code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.columnar
import java.nio.{ByteBuffer, ByteOrder}
import org.apache.spark.sql.Row
/**
* A stackable trait used for building byte buffer for a column containing null values. Memory
* layout of the final byte buffer is:
* {{{
* .----------------------- Column type ID (4 bytes)
* | .------------------- Null count N (4 bytes)
* | | .--------------- Null positions (4 x N bytes, empty if null count is zero)
* | | | .--------- Non-null elements
* V V V V
* +---+---+-----+---------+
* | | | ... | ... ... |
* +---+---+-----+---------+
* }}}
*/
private[sql] trait NullableColumnBuilder extends ColumnBuilder {
protected var nulls: ByteBuffer = _
protected var nullCount: Int = _
private var pos: Int = _
abstract override def initialize(
initialSize: Int,
columnName: String,
useCompression: Boolean): Unit = {
nulls = ByteBuffer.allocate(1024)
nulls.order(ByteOrder.nativeOrder())
pos = 0
nullCount = 0
super.initialize(initialSize, columnName, useCompression)
}
abstract override def appendFrom(row: Row, ordinal: Int): Unit = {
columnStats.gatherStats(row, ordinal)
if (row.isNullAt(ordinal)) {
nulls = ColumnBuilder.ensureFreeSpace(nulls, 4)
nulls.putInt(pos)
nullCount += 1
} else {
super.appendFrom(row, ordinal)
}
pos += 1
}
abstract override def build(): ByteBuffer = {
val nonNulls = super.build()
val typeId = nonNulls.getInt()
val nullDataLen = nulls.position()
nulls.limit(nullDataLen)
nulls.rewind()
val buffer = ByteBuffer
.allocate(4 + 4 + nullDataLen + nonNulls.remaining())
.order(ByteOrder.nativeOrder())
.putInt(typeId)
.putInt(nullCount)
.put(nulls)
.put(nonNulls)
buffer.rewind()
buffer
}
protected def buildNonNulls(): ByteBuffer = {
nulls.limit(nulls.position()).rewind()
super.build()
}
}
| hengyicai/OnlineAggregationUCAS | sql/core/src/main/scala/org/apache/spark/sql/columnar/NullableColumnBuilder.scala | Scala | apache-2.0 | 2,791 |
package gg.uhc.hosts.endpoints.key
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
class KeyRoute(getApiKey: GetApiKey, regenerateApiKey: RegenerateApiKey) {
def apply(): Route =
pathEndOrSingleSlash {
concat(
get(getApiKey()),
post(regenerateApiKey())
)
}
}
| Eluinhost/hosts.uhc.gg | src/main/scala/gg/uhc/hosts/endpoints/key/KeyRoute.scala | Scala | mit | 336 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import kafka.admin.AdminUtils
import kafka.cluster.Broker
import kafka.log.LogConfig
import kafka.message.ByteBufferMessageSet
import kafka.api.{OffsetRequest, FetchResponsePartitionData}
import kafka.common.{KafkaStorageException, TopicAndPartition}
class ReplicaFetcherThread(name:String,
sourceBroker: Broker,
brokerConfig: KafkaConfig,
replicaMgr: ReplicaManager)
extends AbstractFetcherThread(name = name,
clientId = name,
sourceBroker = sourceBroker,
socketTimeout = brokerConfig.replicaSocketTimeoutMs,
socketBufferSize = brokerConfig.replicaSocketReceiveBufferBytes,
fetchSize = brokerConfig.replicaFetchMaxBytes,
fetcherBrokerId = brokerConfig.brokerId,
maxWait = brokerConfig.replicaFetchWaitMaxMs,
minBytes = brokerConfig.replicaFetchMinBytes,
isInterruptible = false) {
// process fetched data
def processPartitionData(topicAndPartition: TopicAndPartition, fetchOffset: Long, partitionData: FetchResponsePartitionData) {
try {
val topic = topicAndPartition.topic
val partitionId = topicAndPartition.partition
val replica = replicaMgr.getReplica(topic, partitionId).get
val messageSet = partitionData.messages.asInstanceOf[ByteBufferMessageSet]
if (fetchOffset != replica.logEndOffset)
throw new RuntimeException("Offset mismatch: fetched offset = %d, log end offset = %d.".format(fetchOffset, replica.logEndOffset))
trace("Follower %d has replica log end offset %d for partition %s. Received %d messages and leader hw %d"
.format(replica.brokerId, replica.logEndOffset, topicAndPartition, messageSet.sizeInBytes, partitionData.hw))
replica.log.get.append(messageSet, assignOffsets = false)
trace("Follower %d has replica log end offset %d after appending %d bytes of messages for partition %s"
.format(replica.brokerId, replica.logEndOffset, messageSet.sizeInBytes, topicAndPartition))
val followerHighWatermark = replica.logEndOffset.min(partitionData.hw)
replica.highWatermark = followerHighWatermark
trace("Follower %d set replica highwatermark for partition [%s,%d] to %d"
.format(replica.brokerId, topic, partitionId, followerHighWatermark))
} catch {
case e: KafkaStorageException =>
fatal("Disk error while replicating data.", e)
Runtime.getRuntime.halt(1)
}
}
/**
* Handle a partition whose offset is out of range and return a new fetch offset.
*/
def handleOffsetOutOfRange(topicAndPartition: TopicAndPartition): Long = {
val replica = replicaMgr.getReplica(topicAndPartition.topic, topicAndPartition.partition).get
/**
* Unclean leader election: A follower goes down, in the meanwhile the leader keeps appending messages. The follower comes back up
* and before it has completely caught up with the leader's logs, all replicas in the ISR go down. The follower is now uncleanly
* elected as the new leader, and it starts appending messages from the client. The old leader comes back up, becomes a follower
* and it may discover that the current leader's end offset is behind its own end offset.
*
* In such a case, truncate the current follower's log to the current leader's end offset and continue fetching.
*
* There is a potential for a mismatch between the logs of the two replicas here. We don't fix this mismatch as of now.
*/
val leaderEndOffset = simpleConsumer.earliestOrLatestOffset(topicAndPartition, OffsetRequest.LatestTime, brokerConfig.brokerId)
if (leaderEndOffset < replica.logEndOffset) {
// Prior to truncating the follower's log, ensure that doing so is not disallowed by the configuration for unclean leader election.
// This situation could only happen if the unclean election configuration for a topic changes while a replica is down. Otherwise,
// we should never encounter this situation since a non-ISR leader cannot be elected if disallowed by the broker configuration.
if (!LogConfig.fromProps(brokerConfig.props.props, AdminUtils.fetchTopicConfig(replicaMgr.zkClient,
topicAndPartition.topic)).uncleanLeaderElectionEnable) {
// Log a fatal error and shutdown the broker to ensure that data loss does not unexpectedly occur.
fatal("Halting because log truncation is not allowed for topic %s,".format(topicAndPartition.topic) +
" Current leader %d's latest offset %d is less than replica %d's latest offset %d"
.format(sourceBroker.id, leaderEndOffset, brokerConfig.brokerId, replica.logEndOffset))
Runtime.getRuntime.halt(1)
}
replicaMgr.logManager.truncateTo(Map(topicAndPartition -> leaderEndOffset))
warn("Replica %d for partition %s reset its fetch offset from %d to current leader %d's latest offset %d"
.format(brokerConfig.brokerId, topicAndPartition, replica.logEndOffset, sourceBroker.id, leaderEndOffset))
leaderEndOffset
} else {
/**
* The follower could have been down for a long time and when it starts up, its end offset could be smaller than the leader's
* start offset because the leader has deleted old logs (log.logEndOffset < leaderStartOffset).
*
* Roll out a new log at the follower with the start offset equal to the current leader's start offset and continue fetching.
*/
val leaderStartOffset = simpleConsumer.earliestOrLatestOffset(topicAndPartition, OffsetRequest.EarliestTime, brokerConfig.brokerId)
replicaMgr.logManager.truncateFullyAndStartAt(topicAndPartition, leaderStartOffset)
warn("Replica %d for partition %s reset its fetch offset from %d to current leader %d's start offset %d"
.format(brokerConfig.brokerId, topicAndPartition, replica.logEndOffset, sourceBroker.id, leaderStartOffset))
leaderStartOffset
}
}
// any logic for partitions whose leader has changed
def handlePartitionsWithErrors(partitions: Iterable[TopicAndPartition]) {
// no handler needed since the controller will make the changes accordingly
}
}
| stealthly/kafka | core/src/main/scala/kafka/server/ReplicaFetcherThread.scala | Scala | apache-2.0 | 7,228 |
/*
* Copyright 2001-2009 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.fixture
import org.scalatest._
import scala.collection.immutable.ListSet
import org.scalatest.Suite.autoTagClassAnnotations
/**
* A sister class to <code>org.scalatest.FunSuite</code> that can pass a fixture object into its tests.
*
* <table><tr><td class="usage">
* <strong>Recommended Usage</strong>:
* Use class <code>fixture.FunSuite</code> in situations for which <a href="../FunSuite.html"><code>FunSuite</code></a>
* would be a good choice, when all or most tests need the same fixture objects
* that must be cleaned up afterwords. <em>Note: <code>fixture.FunSuite</code> is intended for use in special situations, with class <code>FunSuite</code> used for general needs. For
* more insight into where <code>fixture.FunSuite</code> fits in the big picture, see the <a href="../FunSuite.html#withFixtureOneArgTest"><code>withFixture(OneArgTest)</code></a> subsection of the <a href="../FunSuite.html#sharedFixtures">Shared fixtures</a> section in the documentation for class <code>FunSuite</code>.</em>
* </td></tr></table>
*
* <p>
* Class <code>fixture.FunSuite</code> behaves similarly to class <code>org.scalatest.FunSuite</code>, except that tests may have a
* fixture parameter. The type of the
* fixture parameter is defined by the abstract <code>FixtureParam</code> type, which is a member of this class.
* This class also contains an abstract <code>withFixture</code> method. This <code>withFixture</code> method
* takes a <code>OneArgTest</code>, which is a nested trait defined as a member of this class.
* <code>OneArgTest</code> has an <code>apply</code> method that takes a <code>FixtureParam</code>.
* This <code>apply</code> method is responsible for running a test.
* This class's <code>runTest</code> method delegates the actual running of each test to <code>withFixture(OneArgTest)</code>, passing
* in the test code to run via the <code>OneArgTest</code> argument. The <code>withFixture(OneArgTest)</code> method (abstract in this class) is responsible
* for creating the fixture argument and passing it to the test function.
* </p>
*
* <p>
* Subclasses of this class must, therefore, do three things differently from a plain old <code>org.scalatest.FunSuite</code>:
* </p>
*
* <ol>
* <li>define the type of the fixture parameter by specifying type <code>FixtureParam</code></li>
* <li>define the <code>withFixture(OneArgTest)</code> method</li>
* <li>write tests that take a fixture parameter</li>
* <li>(You can also define tests that don't take a fixture parameter.)</li>
* </ol>
*
* <p>
* If the fixture you want to pass into your tests consists of multiple objects, you will need to combine
* them into one object to use this class. One good approach to passing multiple fixture objects is
* to encapsulate them in a case class. Here's an example:
* </p>
*
* <pre class="stHighlight">
* case class FixtureParam(file: File, writer: FileWriter)
* </pre>
*
* <p>
* To enable the stacking of traits that define <code>withFixture(NoArgTest)</code>, it is a good idea to let
* <code>withFixture(NoArgTest)</code> invoke the test function instead of invoking the test
* function directly. To do so, you'll need to convert the <code>OneArgTest</code> to a <code>NoArgTest</code>. You can do that by passing
* the fixture object to the <code>toNoArgTest</code> method of <code>OneArgTest</code>. In other words, instead of
* writing “<code>test(theFixture)</code>”, you'd delegate responsibility for
* invoking the test function to the <code>withFixture(NoArgTest)</code> method of the same instance by writing:
* </p>
*
* <pre>
* withFixture(test.toNoArgTest(theFixture))
* </pre>
*
* <p>
* Here's a complete example:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.funsuite.oneargtest
*
* import org.scalatest.fixture
* import java.io._
*
* class ExampleSuite extends fixture.FunSuite {
*
* case class FixtureParam(file: File, writer: FileWriter)
*
* def withFixture(test: OneArgTest) = {
*
* // create the fixture
* val file = File.createTempFile("hello", "world")
* val writer = new FileWriter(file)
* val theFixture = FixtureParam(file, writer)
*
* try {
* writer.write("ScalaTest is ") // set up the fixture
* withFixture(test.toNoArgTest(theFixture)) // "loan" the fixture to the test
* }
* finally writer.close() // clean up the fixture
* }
*
* test("testing should be easy") { f =>
* f.writer.write("easy!")
* f.writer.flush()
* assert(f.file.length === 18)
* }
*
* test("testing should be fun") { f =>
* f.writer.write("fun!")
* f.writer.flush()
* assert(f.file.length === 17)
* }
* }
* </pre>
*
* <p>
* If a test fails, the <code>OneArgTest</code> function will complete abruptly with an exception describing the failure.
* To ensure clean up happens even if a test fails, you should invoke the test function from inside a <code>try</code> block and do the cleanup in a
* <code>finally</code> clause, as shown in the previous example.
* </p>
*
* <a name="sharingFixturesAcrossClasses"></a><h2>Sharing fixtures across classes</h2>
*
* <p>
* If multiple test classes need the same fixture, you can define the <code>FixtureParam</code> and <code>withFixture(OneArgTest)</code> implementations
* in a trait, then mix that trait into the test classes that need it. For example, if your application requires a database and your integration tests
* use that database, you will likely have many test classes that need a database fixture. You can create a "database fixture" trait that creates a
* database with a unique name, passes the connector into the test, then removes the database once the test completes. This is shown in the following example:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.fixture.funsuite.sharing
*
* import java.util.concurrent.ConcurrentHashMap
* import org.scalatest.fixture
* import DbServer._
* import java.util.UUID.randomUUID
*
* object DbServer { // Simulating a database server
* type Db = StringBuffer
* private val databases = new ConcurrentHashMap[String, Db]
* def createDb(name: String): Db = {
* val db = new StringBuffer
* databases.put(name, db)
* db
* }
* def removeDb(name: String) {
* databases.remove(name)
* }
* }
*
* trait DbFixture { this: fixture.Suite =>
*
* type FixtureParam = Db
*
* // Allow clients to populate the database after
* // it is created
* def populateDb(db: Db) {}
*
* def withFixture(test: OneArgTest) = {
* val dbName = randomUUID.toString
* val db = createDb(dbName) // create the fixture
* try {
* populateDb(db) // setup the fixture
* withFixture(test.toNoArgTest(db)) // "loan" the fixture to the test
* }
* finally removeDb(dbName) // clean up the fixture
* }
* }
*
* class ExampleSuite extends fixture.FunSuite with DbFixture {
*
* override def populateDb(db: Db) { // setup the fixture
* db.append("ScalaTest is ")
* }
*
* test("testing should be easy") { db =>
* db.append("easy!")
* assert(db.toString === "ScalaTest is easy!")
* }
*
* test("testing should be fun") { db =>
* db.append("fun!")
* assert(db.toString === "ScalaTest is fun!")
* }
*
* // This test doesn't need a Db
* test("test code should be clear") { () =>
* val buf = new StringBuffer
* buf.append("ScalaTest code is ")
* buf.append("clear!")
* assert(buf.toString === "ScalaTest code is clear!")
* }
* }
* </pre>
*
* <p>
* Often when you create fixtures in a trait like <code>DbFixture</code>, you'll still need to enable individual test classes
* to "setup" a newly created fixture before it gets passed into the tests. A good way to accomplish this is to pass the newly
* created fixture into a setup method, like <code>populateDb</code> in the previous example, before passing it to the test
* function. Classes that need to perform such setup can override the method, as does <code>ExampleSuite</code>.
* </p>
*
* <p>
* If a test doesn't need the fixture, you can indicate that by providing a no-arg instead of a one-arg function, as is done in the
* third test in the previous example, “<code>test code should be clear</code>”. In other words, instead of starting your function literal
* with something like “<code>db =></code>”, you'd start it with “<code>() =></code>”. For such tests, <code>runTest</code>
* will not invoke <code>withFixture(OneArgTest)</code>. It will instead directly invoke <code>withFixture(NoArgTest)</code>.
* </p>
*
*
* <p>
* Both examples shown above demonstrate the technique of giving each test its own "fixture sandbox" to play in. When your fixtures
* involve external side-effects, like creating files or databases, it is a good idea to give each file or database a unique name as is
* done in these examples. This keeps tests completely isolated, allowing you to run them in parallel if desired. You could mix
* <code>ParallelTestExecution</code> into either of these <code>ExampleSuite</code> classes, and the tests would run in parallel just fine.
* </p>
*
* @author Bill Venners
*/
@Finders(Array("org.scalatest.finders.FunSuiteFinder"))
abstract class FunSuite extends FunSuiteLike {
/**
* Returns a user friendly string for this suite, composed of the
* simple name of the class (possibly simplified further by removing dollar signs if added by the Scala interpeter) and, if this suite
* contains nested suites, the result of invoking <code>toString</code> on each
* of the nested suites, separated by commas and surrounded by parentheses.
*
* @return a user-friendly string for this suite
*/
override def toString: String = Suite.suiteToString(None, this)
}
| svn2github/scalatest | src/main/scala/org/scalatest/fixture/FunSuite.scala | Scala | apache-2.0 | 10,591 |
package com.wavesplatform.block.validation
import cats.syntax.either._
import com.wavesplatform.block.Block.{GenerationSignatureLength, GenerationVRFSignatureLength, MaxFeaturesInBlock, ProtoBlockVersion}
import com.wavesplatform.block.{Block, MicroBlock}
import com.wavesplatform.crypto
import com.wavesplatform.crypto.KeyLength
import com.wavesplatform.mining.Miner.MaxTransactionsPerMicroblock
import com.wavesplatform.settings.GenesisSettings
import com.wavesplatform.transaction.GenesisTransaction
import com.wavesplatform.transaction.TxValidationError.GenericError
object Validators {
type Validation[A] = Either[GenericError, A]
def validateBlock(b: Block): Validation[Block] =
(for {
_ <- Either.cond(Block.validateReferenceLength(b.header.reference.arr.length), (), "Incorrect reference")
genSigLength = if (b.header.version < ProtoBlockVersion) GenerationSignatureLength else GenerationVRFSignatureLength
_ <- Either.cond(b.header.generationSignature.arr.length == genSigLength, (), "Incorrect generationSignature")
_ <- Either.cond(b.header.generator.arr.length == KeyLength, (), "Incorrect signer")
_ <- Either.cond(
b.header.version > 2 || b.header.featureVotes.isEmpty,
(),
s"Block version ${b.header.version} could not contain feature votes"
)
_ <- Either.cond(b.header.featureVotes.distinct.size == b.header.featureVotes.size, (), s"Duplicates in feature votes")
_ <- Either.cond(b.header.version < ProtoBlockVersion || b.header.featureVotes.sorted == b.header.featureVotes, (), s"Unsorted feature votes")
_ <- Either.cond(b.header.featureVotes.size <= MaxFeaturesInBlock, (), s"Block could not contain more than $MaxFeaturesInBlock feature votes")
} yield b).leftMap(GenericError(_))
def validateGenesisBlock(block: Block, genesisSettings: GenesisSettings): Validation[Block] =
for {
// Common validation
_ <- validateBlock(block)
// Verify signature
_ <- Either.cond(
crypto.verify(block.signature, block.bodyBytes(), block.header.generator),
(),
GenericError("Passed genesis signature is not valid")
)
// Verify initial balance
txsSum = block.transactionData.collect { case tx: GenesisTransaction => tx.amount }.reduce(Math.addExact(_: Long, _: Long))
_ <- Either.cond(
txsSum == genesisSettings.initialBalance,
(),
GenericError(s"Initial balance ${genesisSettings.initialBalance} did not match the distributions sum $txsSum")
)
} yield block
def validateMicroBlock(mb: MicroBlock): Validation[MicroBlock] =
(for {
_ <- Either.cond(
MicroBlock.validateReferenceLength(mb.version, mb.reference.arr.length),
(),
s"Incorrect prevResBlockSig: ${mb.reference.arr.length}"
)
_ <- Either.cond(
mb.totalResBlockSig.arr.length == crypto.SignatureLength,
(),
s"Incorrect totalResBlockSig: ${mb.totalResBlockSig.arr.length}"
)
_ <- Either.cond(mb.sender.arr.length == KeyLength, (), s"Incorrect generator.publicKey: ${mb.sender.arr.length}")
_ <- Either.cond(mb.transactionData.nonEmpty, (), "cannot create empty MicroBlock")
_ <- Either.cond(
mb.transactionData.size <= MaxTransactionsPerMicroblock,
(),
s"too many txs in MicroBlock: allowed: $MaxTransactionsPerMicroblock, actual: ${mb.transactionData.size}"
)
} yield mb).leftMap(GenericError(_))
}
| wavesplatform/Waves | node/src/main/scala/com/wavesplatform/block/validation/Validators.scala | Scala | mit | 3,497 |
package com.basho.riak.spark.run
import org.apache.spark._
import org.apache.spark.rdd._
object LongJobApp {
val USAGE = "Usage: LongJobApp.jar [--iter num] [--durtaion sec] [--master spark://host{ip}:port] [--samples] [--partitions"
val NUM_SAMPLES = 2048
val PARTITIONS = 3
val APP_NAME = "Pi Darts"
val SPARK_URL = "spark://node1.local:7077"
def calc_pi(count: RDD[Int]) = {
val x = count.map(x => {
val x = Math.random()
val y = Math.random()
if (x*x + y*y < 1) 1 else 0
}).reduce(_ + _)
println("Pi is roughly " + 4.0 * x / NUM_SAMPLES)
}
def nextOption(map: Map[Symbol, Any], list: List[String]) : Map[Symbol, Any] = {
list match {
case Nil => map
case "--iter" :: value :: tail => nextOption(map ++ Map('iter -> value.toInt), tail)
case "--duration" :: value :: tail => nextOption(map ++ Map('duration -> value.toInt), tail)
case "--master" :: value :: tail => nextOption(map ++ Map('master -> value), tail)
case "--samples" :: value :: tail => nextOption(map ++ Map('samples -> value.toInt), tail)
case "--partitions" :: value :: tail => nextOption(map ++ Map('partitions -> value.toInt), tail)
case option :: tail => println("Unknown option " + option)
println(USAGE)
sys.exit(1)
}
}
def main(args: Array[String]) = {
val argList = args.toList
val options = nextOption(Map(),argList)
val conf = new SparkConf().setAppName(APP_NAME)
.setMaster(options.getOrElse('master, SPARK_URL).asInstanceOf[String])
val sc = new SparkContext(conf)
val count = sc.parallelize(1 to options.getOrElse('samples, NUM_SAMPLES).asInstanceOf[Int],
options.getOrElse('partitions, PARTITIONS).asInstanceOf[Int])
if (options.contains('iter)) {
val n = options.getOrElse('iter, 100).asInstanceOf[Int]
for (i <- 1 to n) {
println("======================================")
println("Iteration " + i + " of " + n)
println("======================================")
calc_pi(count)
}
} else {
val sec = options.getOrElse('duration, 60).asInstanceOf[Int]
println("=====================================================")
println("Scheduled to run for at least " + sec + " sec")
println("=====================================================")
var time = System.currentTimeMillis
val end = time + sec * 1000
var i = 0
while (time < end) {
i += 1
time = System.currentTimeMillis()
println("======================================")
println("Pre: Iteration " + i + " timestamp: " + time)
println("======================================")
calc_pi(count)
time = System.currentTimeMillis()
println("======================================")
println("Post: Iteration " + i + " timestamp: " + time)
println("======================================")
}
}
}
} | basho/spark-riak-connector | test-utils/src/main/scala/com/basho/riak/spark/run/PiRunLong.scala | Scala | apache-2.0 | 2,978 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.evaluation
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.param.ParamsSuite
import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTestingUtils}
import org.apache.spark.mllib.util.MLlibTestSparkContext
class MulticlassClassificationEvaluatorSuite
extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest {
test("params") {
ParamsSuite.checkParams(new MulticlassClassificationEvaluator)
}
test("read/write") {
val evaluator = new MulticlassClassificationEvaluator()
.setPredictionCol("myPrediction")
.setLabelCol("myLabel")
.setMetricName("accuracy")
testDefaultReadWrite(evaluator)
}
test("should support all NumericType labels and not support other types") {
MLTestingUtils.checkNumericTypes(new MulticlassClassificationEvaluator, spark)
}
}
| bravo-zhang/spark | mllib/src/test/scala/org/apache/spark/ml/evaluation/MulticlassClassificationEvaluatorSuite.scala | Scala | apache-2.0 | 1,664 |
package com.itszuvalex.itszulib.logistics
import com.itszuvalex.itszulib.api.core.Loc4
/**
* Created by Christopher Harris (Itszuvalex) on 4/5/15.
*/
trait INetworkNode[T <: INetwork[_,T]] {
def setNetwork(network: T)
def getNetwork: T
def getLoc: Loc4
def canConnect(loc: Loc4): Boolean
def refresh(): Unit
def canAdd(iNetwork: INetwork[_,T]): Boolean
def added(iNetwork: INetwork[_,T]): Unit
def removed(iNetwork: INetwork[_,T]): Unit
def connect(node: Loc4): Unit
def disconnect(node: Loc4): Unit
}
| BlockWorker/ItszuLib | src/main/scala/com/itszuvalex/itszulib/logistics/INetworkNode.scala | Scala | gpl-2.0 | 538 |
// Copyright (C) 2011-2012 the original author or authors.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.scalastyle
import java.io.File
import java.io.FileFilter
import scala.collection.JavaConversions.collectionAsScalaIterable
import scala.collection.JavaConversions.seqAsJavaList
class Directory
class DirectoryFileSpec(name: String, encoding: Option[String], val file: java.io.File) extends RealFileSpec(name, encoding) {
override def toString: String = file.getAbsolutePath
}
object Directory {
val scalaFileFilter = new FileFilter() {
def accept(file: File): Boolean = file.getAbsolutePath.endsWith(".scala")
}
def getFilesAsJava(encoding: Option[String], files: java.util.List[File]): java.util.List[FileSpec] = {
seqAsJavaList(privateGetFiles(encoding, collectionAsScalaIterable(files)))
}
def getFiles(encoding: Option[String], files: Iterable[File], excludedFiles: Seq[String] = Nil): List[FileSpec] = {
val excludeFilter = createFileExclusionFilter(excludedFiles)
privateGetFiles(encoding, files, excludeFilter).toList
}
private[this] def createFileExclusionFilter(excludedFiles: Seq[String]): Option[FileFilter] = {
if (excludedFiles.isEmpty) {
None
} else {
val exclusionPatterns = excludedFiles.map(_.r)
Some(new FileFilter {
def accept(file: File): Boolean = {
val path = file.getAbsolutePath
exclusionPatterns.exists(_.findFirstMatchIn(path).isDefined)
}
})
}
}
private[this] def privateGetFiles(encoding: Option[String], files: Iterable[File], excludeFilter: Option[FileFilter] = None): Seq[FileSpec] = {
files.flatMap(f => {
if (excludeFilter.exists(_.accept(f))) {
Nil
} else if (f.isDirectory) {
privateGetFiles(encoding, f.listFiles, excludeFilter)
} else if (scalaFileFilter.accept(f)) {
Seq(new DirectoryFileSpec(f.getAbsolutePath, encoding, f.getAbsoluteFile))
} else {
Nil
}
}).toSeq
}
}
| scalastyle/scalastyle | src/main/scala/org/scalastyle/Directory.scala | Scala | apache-2.0 | 2,638 |
package scalariform.formatter
import scalariform.formatter.preferences._
class ParenAndBracketSpacingTest extends AbstractExpressionFormatterTest {
{
implicit val formattingPreferences = FormattingPreferences.setPreference(SpaceInsideParentheses, true)
"()" ==> "()"
"(a: Int) => 3" ==> "( a: Int ) => 3"
"(3)" ==> "( 3 )"
"(3, 4)" ==> "( 3, 4 )"
"for (n <- 1 to 10) yield foo(n, n)" ==> "for ( n <- 1 to 10 ) yield foo( n, n )"
}
{
implicit val formattingPreferences = FormattingPreferences.setPreference(SpaceInsideBrackets, true)
"x: List[String]" ==> "x: List[ String ]"
"foo[Bar](baz)" ==> "foo[ Bar ](baz)"
"{ class A[B] { private[this] val bob } }" ==> "{ class A[ B ] { private[ this ] val bob } }"
"super[X].y" ==> "super[ X ].y"
"foo[Bar](baz)[Biz]" ==> "foo[ Bar ](baz)[ Biz ]"
"foo[Bar][Baz][Buz]" ==> "foo[ Bar ][ Baz ][ Buz ]"
}
} | yu-iskw/scalariform | scalariform/src/test/scala/scalariform/formatter/ParenAndBracketSpacingTest.scala | Scala | mit | 909 |
package com.typesafe.slick.testkit.tests
import com.typesafe.slick.testkit.util.{AsyncTest, RelationalTestDB}
class ColumnDefaultTest extends AsyncTest[RelationalTestDB] {
import tdb.profile.api._
class A(tag: Tag) extends Table[(Int, String, Option[Boolean])](tag, "a") {
def id = column[Int]("id")
def a = column[String]("a", O Default "foo", O Length 254)
def b = column[Option[Boolean]]("b", O Default Some(true))
def * = (id, a, b)
}
lazy val as = TableQuery[A]
def test = ifCap(rcap.columnDefaults) {
for {
_ <- as.schema.create
_ <- as.map(_.id) += 42
_ <- as.result.map(_ shouldBe List((42, "foo", Some(true))))
} yield ()
}
}
| jkutner/slick | slick-testkit/src/main/scala/com/typesafe/slick/testkit/tests/ColumnDefaultTest.scala | Scala | bsd-2-clause | 693 |
package com.rasterfoundry.database
import com.rasterfoundry.datamodel._
import com.rasterfoundry.common.Generators.Implicits._
import doobie.implicits._
import org.scalacheck.Prop.forAll
import org.scalatest._
import org.scalatestplus.scalacheck.Checkers
class UploadDaoSpec
extends FunSuite
with Matchers
with Checkers
with DBTestConfig
with PropTestHelpers {
test("list uploads") {
UploadDao.query.list.transact(xa).unsafeRunSync.length should be >= 0
}
test("insert an upload") {
check {
forAll {
(
user: User.Create,
org: Organization.Create,
platform: Platform,
project: Project.Create,
upload: Upload.Create
) =>
{
val uploadInsertIO = for {
(dbUser, _, _, dbProject) <- insertUserOrgPlatProject(
user,
org,
platform,
project
)
datasource <- unsafeGetRandomDatasource
insertedUpload <- UploadDao.insert(
fixupUploadCreate(dbUser, dbProject, datasource, upload),
dbUser
)
} yield insertedUpload
val dbUpload = uploadInsertIO.transact(xa).unsafeRunSync
dbUpload.uploadStatus == upload.uploadStatus &&
dbUpload.fileType == upload.fileType &&
dbUpload.files == upload.files &&
dbUpload.metadata == upload.metadata &&
dbUpload.visibility == upload.visibility &&
dbUpload.source == upload.source
}
}
}
}
test("insert an upload to a project") {
check {
forAll {
(
user: User.Create,
org: Organization.Create,
platform: Platform,
project: Project.Create,
upload: Upload.Create
) =>
{
val uploadInsertIO = for {
(dbUser, _, _, dbProject) <- insertUserOrgPlatProject(
user,
org,
platform,
project
)
datasource <- unsafeGetRandomDatasource
uploadToInsert = upload.copy(
owner = Some(user.id),
datasource = datasource.id,
projectId = Some(dbProject.id)
)
insertedUpload <- UploadDao.insert(uploadToInsert, dbUser)
} yield (insertedUpload, dbProject)
val (dbUpload, dbProject) =
uploadInsertIO.transact(xa).unsafeRunSync
dbUpload.uploadStatus == upload.uploadStatus &&
dbUpload.fileType == upload.fileType &&
dbUpload.files == upload.files &&
dbUpload.metadata == upload.metadata &&
dbUpload.visibility == upload.visibility &&
dbUpload.source == upload.source &&
dbUpload.projectId == Some(dbProject.id) &&
dbUpload.layerId == Some(dbProject.defaultLayerId)
}
}
}
}
test("insert an upload to a project's non-default layer") {
check {
forAll {
(
user: User.Create,
org: Organization.Create,
platform: Platform,
project: Project.Create,
upload: Upload.Create,
projectLayerCreate: ProjectLayer.Create
) =>
{
val uploadInsertIO = for {
(dbUser, _, _, dbProject) <- insertUserOrgPlatProject(
user,
org,
platform,
project
)
datasource <- unsafeGetRandomDatasource
dbLayer <- ProjectLayerDao.insertProjectLayer(
projectLayerCreate
.copy(projectId = Some(dbProject.id))
.toProjectLayer
)
uploadToInsert = upload.copy(
owner = Some(user.id),
datasource = datasource.id,
projectId = Some(dbProject.id),
layerId = Some(dbLayer.id)
)
insertedUpload <- UploadDao.insert(uploadToInsert, dbUser)
} yield (insertedUpload, dbProject, dbLayer)
val (dbUpload, dbProject, dbLayer) =
uploadInsertIO.transact(xa).unsafeRunSync
dbUpload.uploadStatus == upload.uploadStatus &&
dbUpload.fileType == upload.fileType &&
dbUpload.files == upload.files &&
dbUpload.metadata == upload.metadata &&
dbUpload.visibility == upload.visibility &&
dbUpload.source == upload.source &&
dbUpload.projectId == Some(dbProject.id) &&
dbUpload.layerId == Some(dbLayer.id)
}
}
}
}
test("update an upload") {
check {
forAll {
(
user: User.Create,
org: Organization.Create,
platform: Platform,
project: Project.Create,
insertUpload: Upload.Create,
updateUpload: Upload.Create
) =>
{
val uploadInsertWithUserOrgProjectDatasourceIO = for {
userOrgPlatProject <- insertUserOrgPlatProject(
user,
org,
platform,
project
)
(dbUser, dbOrg, dbPlatform, dbProject) = userOrgPlatProject
datasource <- unsafeGetRandomDatasource
insertedUpload <- UploadDao.insert(
fixupUploadCreate(dbUser, dbProject, datasource, insertUpload),
dbUser
)
} yield
(insertedUpload, dbUser, dbOrg, dbPlatform, dbProject, datasource)
val uploadUpdateWithUploadIO = uploadInsertWithUserOrgProjectDatasourceIO flatMap {
case (
dbUpload: Upload,
dbUser: User,
_: Organization,
dbPlatform: Platform,
dbProject: Project,
dbDatasource: Datasource
) => {
val uploadId = dbUpload.id
val fixedUpUpdateUpload =
fixupUploadCreate(
dbUser,
dbProject,
dbDatasource,
updateUpload
).toUpload(
dbUser,
(dbPlatform.id, false),
Some(dbPlatform.id)
)
UploadDao.update(fixedUpUpdateUpload, uploadId) flatMap {
(affectedRows: Int) =>
{
UploadDao.unsafeGetUploadById(uploadId) map {
(affectedRows, _)
}
}
}
}
}
val (affectedRows, updatedUpload) =
uploadUpdateWithUploadIO.transact(xa).unsafeRunSync
affectedRows == 1 &&
updatedUpload.uploadStatus == updateUpload.uploadStatus &&
updatedUpload.fileType == updateUpload.fileType &&
updatedUpload.uploadType == updateUpload.uploadType &&
updatedUpload.metadata == updateUpload.metadata &&
updatedUpload.visibility == updateUpload.visibility &&
updatedUpload.projectId == updateUpload.projectId &&
updatedUpload.source == updateUpload.source
}
}
}
}
}
| aaronxsu/raster-foundry | app-backend/db/src/test/scala/com/azavea/rf/database/UploadDaoSpec.scala | Scala | apache-2.0 | 7,493 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.io.{ByteArrayOutputStream, CharArrayWriter, StringWriter}
import scala.util.parsing.combinator.RegexParsers
import com.fasterxml.jackson.core._
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.expressions.codegen.CodegenFallback
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.json._
import org.apache.spark.sql.catalyst.util.ParseModes
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.util.Utils
private[this] sealed trait PathInstruction
private[this] object PathInstruction {
private[expressions] case object Subscript extends PathInstruction
private[expressions] case object Wildcard extends PathInstruction
private[expressions] case object Key extends PathInstruction
private[expressions] case class Index(index: Long) extends PathInstruction
private[expressions] case class Named(name: String) extends PathInstruction
}
private[this] sealed trait WriteStyle
private[this] object WriteStyle {
private[expressions] case object RawStyle extends WriteStyle
private[expressions] case object QuotedStyle extends WriteStyle
private[expressions] case object FlattenStyle extends WriteStyle
}
private[this] object JsonPathParser extends RegexParsers {
import PathInstruction._
def root: Parser[Char] = '$'
def long: Parser[Long] = "\\\\d+".r ^? {
case x => x.toLong
}
// parse `[*]` and `[123]` subscripts
def subscript: Parser[List[PathInstruction]] =
for {
operand <- '[' ~> ('*' ^^^ Wildcard | long ^^ Index) <~ ']'
} yield {
Subscript :: operand :: Nil
}
// parse `.name` or `['name']` child expressions
def named: Parser[List[PathInstruction]] =
for {
name <- '.' ~> "[^\\\\.\\\\[]+".r | "['" ~> "[^\\\\'\\\\?]+".r <~ "']"
} yield {
Key :: Named(name) :: Nil
}
// child wildcards: `..`, `.*` or `['*']`
def wildcard: Parser[List[PathInstruction]] =
(".*" | "['*']") ^^^ List(Wildcard)
def node: Parser[List[PathInstruction]] =
wildcard |
named |
subscript
val expression: Parser[List[PathInstruction]] = {
phrase(root ~> rep(node) ^^ (x => x.flatten))
}
def parse(str: String): Option[List[PathInstruction]] = {
this.parseAll(expression, str) match {
case Success(result, _) =>
Some(result)
case NoSuccess(msg, next) =>
None
}
}
}
private[this] object SharedFactory {
val jsonFactory = new JsonFactory()
// Enabled for Hive compatibility
jsonFactory.enable(JsonParser.Feature.ALLOW_UNQUOTED_CONTROL_CHARS)
}
/**
* Extracts json object from a json string based on json path specified, and returns json string
* of the extracted json object. It will return null if the input json string is invalid.
*/
@ExpressionDescription(
usage = "_FUNC_(json_txt, path) - Extracts a json object from `path`.",
extended = """
Examples:
> SELECT _FUNC_('{"a":"b"}', '$.a');
b
""")
case class GetJsonObject(json: Expression, path: Expression)
extends BinaryExpression with ExpectsInputTypes with CodegenFallback {
import com.fasterxml.jackson.core.JsonToken._
import PathInstruction._
import SharedFactory._
import WriteStyle._
override def left: Expression = json
override def right: Expression = path
override def inputTypes: Seq[DataType] = Seq(StringType, StringType)
override def dataType: DataType = StringType
override def nullable: Boolean = true
override def prettyName: String = "get_json_object"
@transient private lazy val parsedPath = parsePath(path.eval().asInstanceOf[UTF8String])
override def eval(input: InternalRow): Any = {
val jsonStr = json.eval(input).asInstanceOf[UTF8String]
if (jsonStr == null) {
return null
}
val parsed = if (path.foldable) {
parsedPath
} else {
parsePath(path.eval(input).asInstanceOf[UTF8String])
}
if (parsed.isDefined) {
try {
Utils.tryWithResource(jsonFactory.createParser(jsonStr.getBytes)) { parser =>
val output = new ByteArrayOutputStream()
val matched = Utils.tryWithResource(
jsonFactory.createGenerator(output, JsonEncoding.UTF8)) { generator =>
parser.nextToken()
evaluatePath(parser, generator, RawStyle, parsed.get)
}
if (matched) {
UTF8String.fromBytes(output.toByteArray)
} else {
null
}
}
} catch {
case _: JsonProcessingException => null
}
} else {
null
}
}
private def parsePath(path: UTF8String): Option[List[PathInstruction]] = {
if (path != null) {
JsonPathParser.parse(path.toString)
} else {
None
}
}
// advance to the desired array index, assumes to start at the START_ARRAY token
private def arrayIndex(p: JsonParser, f: () => Boolean): Long => Boolean = {
case _ if p.getCurrentToken == END_ARRAY =>
// terminate, nothing has been written
false
case 0 =>
// we've reached the desired index
val dirty = f()
while (p.nextToken() != END_ARRAY) {
// advance the token stream to the end of the array
p.skipChildren()
}
dirty
case i if i > 0 =>
// skip this token and evaluate the next
p.skipChildren()
p.nextToken()
arrayIndex(p, f)(i - 1)
}
/**
* Evaluate a list of JsonPath instructions, returning a bool that indicates if any leaf nodes
* have been written to the generator
*/
private def evaluatePath(
p: JsonParser,
g: JsonGenerator,
style: WriteStyle,
path: List[PathInstruction]): Boolean = {
(p.getCurrentToken, path) match {
case (VALUE_STRING, Nil) if style == RawStyle =>
// there is no array wildcard or slice parent, emit this string without quotes
if (p.hasTextCharacters) {
g.writeRaw(p.getTextCharacters, p.getTextOffset, p.getTextLength)
} else {
g.writeRaw(p.getText)
}
true
case (START_ARRAY, Nil) if style == FlattenStyle =>
// flatten this array into the parent
var dirty = false
while (p.nextToken() != END_ARRAY) {
dirty |= evaluatePath(p, g, style, Nil)
}
dirty
case (_, Nil) =>
// general case: just copy the child tree verbatim
g.copyCurrentStructure(p)
true
case (START_OBJECT, Key :: xs) =>
var dirty = false
while (p.nextToken() != END_OBJECT) {
if (dirty) {
// once a match has been found we can skip other fields
p.skipChildren()
} else {
dirty = evaluatePath(p, g, style, xs)
}
}
dirty
case (START_ARRAY, Subscript :: Wildcard :: Subscript :: Wildcard :: xs) =>
// special handling for the non-structure preserving double wildcard behavior in Hive
var dirty = false
g.writeStartArray()
while (p.nextToken() != END_ARRAY) {
dirty |= evaluatePath(p, g, FlattenStyle, xs)
}
g.writeEndArray()
dirty
case (START_ARRAY, Subscript :: Wildcard :: xs) if style != QuotedStyle =>
// retain Flatten, otherwise use Quoted... cannot use Raw within an array
val nextStyle = style match {
case RawStyle => QuotedStyle
case FlattenStyle => FlattenStyle
case QuotedStyle => throw new IllegalStateException()
}
// temporarily buffer child matches, the emitted json will need to be
// modified slightly if there is only a single element written
val buffer = new StringWriter()
var dirty = 0
Utils.tryWithResource(jsonFactory.createGenerator(buffer)) { flattenGenerator =>
flattenGenerator.writeStartArray()
while (p.nextToken() != END_ARRAY) {
// track the number of array elements and only emit an outer array if
// we've written more than one element, this matches Hive's behavior
dirty += (if (evaluatePath(p, flattenGenerator, nextStyle, xs)) 1 else 0)
}
flattenGenerator.writeEndArray()
}
val buf = buffer.getBuffer
if (dirty > 1) {
g.writeRawValue(buf.toString)
} else if (dirty == 1) {
// remove outer array tokens
g.writeRawValue(buf.substring(1, buf.length()-1))
} // else do not write anything
dirty > 0
case (START_ARRAY, Subscript :: Wildcard :: xs) =>
var dirty = false
g.writeStartArray()
while (p.nextToken() != END_ARRAY) {
// wildcards can have multiple matches, continually update the dirty count
dirty |= evaluatePath(p, g, QuotedStyle, xs)
}
g.writeEndArray()
dirty
case (START_ARRAY, Subscript :: Index(idx) :: (xs@Subscript :: Wildcard :: _)) =>
p.nextToken()
// we're going to have 1 or more results, switch to QuotedStyle
arrayIndex(p, () => evaluatePath(p, g, QuotedStyle, xs))(idx)
case (START_ARRAY, Subscript :: Index(idx) :: xs) =>
p.nextToken()
arrayIndex(p, () => evaluatePath(p, g, style, xs))(idx)
case (FIELD_NAME, Named(name) :: xs) if p.getCurrentName == name =>
// exact field match
if (p.nextToken() != JsonToken.VALUE_NULL) {
evaluatePath(p, g, style, xs)
} else {
false
}
case (FIELD_NAME, Wildcard :: xs) =>
// wildcard field match
p.nextToken()
evaluatePath(p, g, style, xs)
case _ =>
p.skipChildren()
false
}
}
}
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(jsonStr, p1, p2, ..., pn) - Return a tuple like the function get_json_object, but it takes multiple names. All the input parameters and output column types are string.",
extended = """
Examples:
> SELECT _FUNC_('{"a":1, "b":2}', 'a', 'b');
1 2
""")
// scalastyle:on line.size.limit
case class JsonTuple(children: Seq[Expression])
extends Generator with CodegenFallback {
import SharedFactory._
override def nullable: Boolean = {
// a row is always returned
false
}
// if processing fails this shared value will be returned
@transient private lazy val nullRow: Seq[InternalRow] =
new GenericInternalRow(Array.ofDim[Any](fieldExpressions.length)) :: Nil
// the json body is the first child
@transient private lazy val jsonExpr: Expression = children.head
// the fields to query are the remaining children
@transient private lazy val fieldExpressions: Seq[Expression] = children.tail
// eagerly evaluate any foldable the field names
@transient private lazy val foldableFieldNames: IndexedSeq[String] = {
fieldExpressions.map {
case expr if expr.foldable => expr.eval().asInstanceOf[UTF8String].toString
case _ => null
}.toIndexedSeq
}
// and count the number of foldable fields, we'll use this later to optimize evaluation
@transient private lazy val constantFields: Int = foldableFieldNames.count(_ != null)
override def elementSchema: StructType = StructType(fieldExpressions.zipWithIndex.map {
case (_, idx) => StructField(s"c$idx", StringType, nullable = true)
})
override def prettyName: String = "json_tuple"
override def checkInputDataTypes(): TypeCheckResult = {
if (children.length < 2) {
TypeCheckResult.TypeCheckFailure(s"$prettyName requires at least two arguments")
} else if (children.forall(child => StringType.acceptsType(child.dataType))) {
TypeCheckResult.TypeCheckSuccess
} else {
TypeCheckResult.TypeCheckFailure(s"$prettyName requires that all arguments are strings")
}
}
override def eval(input: InternalRow): TraversableOnce[InternalRow] = {
val json = jsonExpr.eval(input).asInstanceOf[UTF8String]
if (json == null) {
return nullRow
}
try {
Utils.tryWithResource(jsonFactory.createParser(json.getBytes)) {
parser => parseRow(parser, input)
}
} catch {
case _: JsonProcessingException =>
nullRow
}
}
private def parseRow(parser: JsonParser, input: InternalRow): Seq[InternalRow] = {
// only objects are supported
if (parser.nextToken() != JsonToken.START_OBJECT) {
return nullRow
}
// evaluate the field names as String rather than UTF8String to
// optimize lookups from the json token, which is also a String
val fieldNames = if (constantFields == fieldExpressions.length) {
// typically the user will provide the field names as foldable expressions
// so we can use the cached copy
foldableFieldNames
} else if (constantFields == 0) {
// none are foldable so all field names need to be evaluated from the input row
fieldExpressions.map(_.eval(input).asInstanceOf[UTF8String].toString)
} else {
// if there is a mix of constant and non-constant expressions
// prefer the cached copy when available
foldableFieldNames.zip(fieldExpressions).map {
case (null, expr) => expr.eval(input).asInstanceOf[UTF8String].toString
case (fieldName, _) => fieldName
}
}
val row = Array.ofDim[Any](fieldNames.length)
// start reading through the token stream, looking for any requested field names
while (parser.nextToken() != JsonToken.END_OBJECT) {
if (parser.getCurrentToken == JsonToken.FIELD_NAME) {
// check to see if this field is desired in the output
val idx = fieldNames.indexOf(parser.getCurrentName)
if (idx >= 0) {
// it is, copy the child tree to the correct location in the output row
val output = new ByteArrayOutputStream()
// write the output directly to UTF8 encoded byte array
if (parser.nextToken() != JsonToken.VALUE_NULL) {
Utils.tryWithResource(jsonFactory.createGenerator(output, JsonEncoding.UTF8)) {
generator => copyCurrentStructure(generator, parser)
}
row(idx) = UTF8String.fromBytes(output.toByteArray)
}
}
}
// always skip children, it's cheap enough to do even if copyCurrentStructure was called
parser.skipChildren()
}
new GenericInternalRow(row) :: Nil
}
private def copyCurrentStructure(generator: JsonGenerator, parser: JsonParser): Unit = {
parser.getCurrentToken match {
// if the user requests a string field it needs to be returned without enclosing
// quotes which is accomplished via JsonGenerator.writeRaw instead of JsonGenerator.write
case JsonToken.VALUE_STRING if parser.hasTextCharacters =>
// slight optimization to avoid allocating a String instance, though the characters
// still have to be decoded... Jackson doesn't have a way to access the raw bytes
generator.writeRaw(parser.getTextCharacters, parser.getTextOffset, parser.getTextLength)
case JsonToken.VALUE_STRING =>
// the normal String case, pass it through to the output without enclosing quotes
generator.writeRaw(parser.getText)
case JsonToken.VALUE_NULL =>
// a special case that needs to be handled outside of this method.
// if a requested field is null, the result must be null. the easiest
// way to achieve this is just by ignoring null tokens entirely
throw new IllegalStateException("Do not attempt to copy a null field")
case _ =>
// handle other types including objects, arrays, booleans and numbers
generator.copyCurrentStructure(parser)
}
}
}
/**
* Converts an json input string to a [[StructType]] with the specified schema.
*/
case class JsonToStruct(schema: StructType, options: Map[String, String], child: Expression)
extends UnaryExpression with CodegenFallback with ExpectsInputTypes {
override def nullable: Boolean = true
@transient
lazy val parser =
new JacksonParser(
schema,
"invalid", // Not used since we force fail fast. Invalid rows will be set to `null`.
new JSONOptions(options ++ Map("mode" -> ParseModes.FAIL_FAST_MODE)))
override def dataType: DataType = schema
override def nullSafeEval(json: Any): Any = {
try parser.parse(json.toString).head catch {
case _: SparkSQLJsonProcessingException => null
}
}
override def inputTypes: Seq[AbstractDataType] = StringType :: Nil
}
/**
* Converts a [[StructType]] to a json output string.
*/
case class StructToJson(options: Map[String, String], child: Expression)
extends UnaryExpression with CodegenFallback with ExpectsInputTypes {
override def nullable: Boolean = true
@transient
lazy val writer = new CharArrayWriter()
@transient
lazy val gen =
new JacksonGenerator(child.dataType.asInstanceOf[StructType], writer)
override def dataType: DataType = StringType
override def checkInputDataTypes(): TypeCheckResult = {
if (StructType.acceptsType(child.dataType)) {
try {
JacksonUtils.verifySchema(child.dataType.asInstanceOf[StructType])
TypeCheckResult.TypeCheckSuccess
} catch {
case e: UnsupportedOperationException =>
TypeCheckResult.TypeCheckFailure(e.getMessage)
}
} else {
TypeCheckResult.TypeCheckFailure(
s"$prettyName requires that the expression is a struct expression.")
}
}
override def nullSafeEval(row: Any): Any = {
gen.write(row.asInstanceOf[InternalRow])
gen.flush()
val json = writer.toString
writer.reset()
UTF8String.fromString(json)
}
override def inputTypes: Seq[AbstractDataType] = StructType :: Nil
}
| kimoonkim/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/jsonExpressions.scala | Scala | apache-2.0 | 18,695 |
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package types
import Chisel._
object consts {
/*------------------------------------------------------------------------
| For rounding to integer values, rounding mode 'odd' rounds to minimum
| magnitude instead, same as 'minMag'.
*------------------------------------------------------------------------*/
def round_near_even = UInt("b000", 3)
def round_minMag = UInt("b001", 3)
def round_min = UInt("b010", 3)
def round_max = UInt("b011", 3)
def round_near_maxMag = UInt("b100", 3)
def round_odd = UInt("b101", 3)
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
def tininess_beforeRounding = UInt(0, 1)
def tininess_afterRounding = UInt(1, 1)
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
def flRoundOpt_sigMSBitAlwaysZero = 1
def flRoundOpt_subnormsAlwaysExact = 2
def flRoundOpt_neverUnderflows = 4
def flRoundOpt_neverOverflows = 8
}
class RawFloat(val expWidth: Int, val sigWidth: Int) extends Bundle
{
val isNaN = Bool() // overrides all other fields
val isInf = Bool() // overrides 'isZero', 'sExp', and 'sig'
val isZero = Bool() // overrides 'sExp' and 'sig'
val sign = Bool()
val sExp = SInt(width = expWidth + 2)
val sig = UInt(width = sigWidth + 1) // 2 m.s. bits cannot both be 0
override def cloneType =
new RawFloat(expWidth, sigWidth).asInstanceOf[this.type]
}
//*** CHANGE THIS INTO A '.isSigNaN' METHOD OF THE 'RawFloat' CLASS:
object isSigNaNRawFloat
{
def apply(in: RawFloat): Bool = in.isNaN && ! in.sig(in.sigWidth - 2)
}
| stanford-ppl/spatial-lang | spatial/core/resources/chiselgen/template-level/templates/hardfloat/common.scala | Scala | mit | 3,809 |
package org.cloudfun.util
import scala.math._
/**
* A range from some value to some other.
*/
trait ValueRange {
def min: Double
def max: Double
def average = (min + max) / 2
def contains(v: Double) = v >= min && v <= max
def contains(other: ValueRange) = max >= other.max && min <= other.min
def intersects(other: ValueRange) = min <= other.max && max >= other.min
}
case class Around(value: Double, variance: Double) extends ValueRange {
if (variance < 0 ) throw new IllegalArgumentException("Variance should not be negative")
val min = value - variance
val max = value + variance
}
case class Between(min: Double, max: Double) extends ValueRange {
if (min > max) throw new IllegalArgumentException("Min should be smaller than max")
}
case class Above(min: Double) extends ValueRange {
val max = Double.PositiveInfinity
}
case class Below(max: Double) extends ValueRange {
val min = Double.NegativeInfinity
}
case object AllValues extends ValueRange {
val min = Double.NegativeInfinity
val max = Double.PositiveInfinity
}
case object NoValues extends ValueRange {
val min = Double.NaN
val max = Double.NaN
override def average = Double.NaN
override def intersects(other: ValueRange) = false
override def contains(other: ValueRange) = false
override def contains(v: Double) = false
}
| zzorn/cloudfun | src/main/scala/org/cloudfun/util/ValueRange.scala | Scala | lgpl-3.0 | 1,338 |
// Copyright: 2010 - 2018 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.config
import org.ensime.util.file._
import org.ensime.util.{ EnsimeSpec, EscapingStringInterpolation }
import org.ensime.api._
import org.ensime.config.richconfig._
import scala.util.Properties
class EnsimeConfigSpec extends EnsimeSpec {
import EscapingStringInterpolation._
def test(contents: String, testFn: (EnsimeConfig) => Unit): Unit =
testFn(EnsimeConfigProtocol.parse(contents))
"EnsimeConfig" should "parse a simple config" in withTempDir { dir =>
val abc = dir / "abc"
val cache = dir / ".ensime_cache"
val javaHome = File(Properties.javaHome)
abc.mkdirs()
cache.mkdirs()
test(
s"""
(:name "project"
:scala-version "2.10.4"
:java-home "$javaHome"
:root-dir "$dir"
:cache-dir "$cache"
:reference-source-roots ()
:subprojects ((:name "module1"
:scala-version "2.10.4"
:depends-on-modules ()
:targets ("$abc")
:test-targets ("$abc")
:source-roots ()
:reference-source-roots ()
:compiler-args ()
:runtime-deps ()
:test-deps ()))
:projects ((:id (:project "module1" :config "compile")
:depends ()
:sources ()
:targets ("$abc")
:scalac-options ()
:javac-options ()
:library-jars ()
:library-sources ()
:library-docs ())))""", { implicit config =>
config.name shouldBe "project"
config.scalaVersion shouldBe "2.10.4"
val module1 = config.lookup(EnsimeProjectId("module1", "compile"))
module1.id.project shouldBe "module1"
module1.dependencies shouldBe empty
config.projects.size shouldBe 1
}
)
}
it should "parse a minimal config for a binary only project" in withTempDir {
dir =>
val abc = dir / "abc"
val cache = dir / ".ensime_cache"
val javaHome = File(Properties.javaHome)
abc.mkdirs()
cache.mkdirs()
test(
s"""
(:name "project"
:scala-version "2.10.4"
:java-home "$javaHome"
:root-dir "$dir"
:cache-dir "$cache"
:projects ((:id (:project "module1" :config "compile")
:depends ()
:targets ("$abc"))))""", { implicit config =>
config.name shouldBe "project"
config.scalaVersion shouldBe "2.10.4"
val module1 = config.lookup(EnsimeProjectId("module1", "compile"))
module1.id.project shouldBe "module1"
module1.dependencies shouldBe empty
module1.targets should have size 1
}
)
}
}
| yyadavalli/ensime-server | core/src/test/scala/org/ensime/config/EnsimeConfigSpec.scala | Scala | gpl-3.0 | 2,776 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3.executionplan.builders
import org.neo4j.cypher.internal.compiler.v2_3._
import org.neo4j.cypher.internal.compiler.v2_3.commands.{RelationshipById, StartItem}
import org.neo4j.cypher.internal.compiler.v2_3.executionplan._
import org.neo4j.cypher.internal.compiler.v2_3.executionplan.builders.GetGraphElements.getElements
import org.neo4j.cypher.internal.compiler.v2_3.pipes.{EntityProducer, PipeMonitor, QueryState, RelationshipStartPipe}
import org.neo4j.cypher.internal.compiler.v2_3.planDescription.InternalPlanDescription.Arguments
import org.neo4j.cypher.internal.compiler.v2_3.spi.PlanContext
import org.neo4j.graphdb.Relationship
import scala.collection.Seq
class RelationshipByIdBuilder extends PlanBuilder {
def apply(plan: ExecutionPlanInProgress, ctx: PlanContext)(implicit pipeMonitor: PipeMonitor) = {
val q = plan.query
val p = plan.pipe
val startItemToken = interestingStartItems(q).head
val Unsolved(RelationshipById(key, expression)) = startItemToken
val pipe = new RelationshipStartPipe(p, key,
EntityProducer[Relationship]("Rels(RelationshipById)", Arguments.LegacyExpression(expression)) {
(ctx: ExecutionContext, state: QueryState) =>
getElements[Relationship](expression(ctx)(state), key, (id) => state.query.relationshipOps.getById(id))
})()
val remainingQ: Seq[QueryToken[StartItem]] = q.start.filterNot(_ == startItemToken) :+ startItemToken.solve
plan.copy(pipe = pipe, query = q.copy(start = remainingQ))
}
def canWorkWith(plan: ExecutionPlanInProgress, ctx: PlanContext)(implicit pipeMonitor: PipeMonitor) =
interestingStartItems(plan.query).nonEmpty
private def interestingStartItems(q: PartiallySolvedQuery): Seq[QueryToken[StartItem]] = q.start.filter({
case Unsolved(RelationshipById(_, expression)) => true
case _ => false
})
}
| HuangLS/neo4j | community/cypher/cypher-compiler-2.3/src/main/scala/org/neo4j/cypher/internal/compiler/v2_3/executionplan/builders/RelationshipByIdBuilder.scala | Scala | apache-2.0 | 2,692 |
package cn.edu.sjtu.omnilab.emcbdc
import org.apache.hadoop.io.NullWritable
import org.apache.hadoop.mapred.lib.MultipleTextOutputFormat
/**
* Output RDD into multiple files named by the key.
*/
class RDDMultipleTextOutputFormat extends MultipleTextOutputFormat[Any, Any] {
override def generateActualKey(key: Any, value: Any): Any =
NullWritable.get()
override def generateFileNameForKeyValue(key: Any, value: Any, name: String): String =
key.asInstanceOf[String]
} | OMNILab/sjtu-bdc-data-tools | data-prepare/src/main/scala/cn/edu/sjtu/omnilab/emcbdc/RDDMultipleTextOutputFormat.scala | Scala | gpl-2.0 | 483 |
/*
* Copyright 2001-2014 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic
import java.util.{Map => JMap}
import java.util.{HashMap => JHashMap}
import org.scalatest._
import Matchers._
class EntrySpec extends FunSpec {
describe("An org.scalatest.Entry") {
// SKIP-SCALATESTJS-START
it("can be compared against an Entry coming from a java.util.Map") {
val jmap: JMap[String, Int] = new JHashMap[String, Int]
jmap.put("one", 1)
jmap.put("two", 2)
jmap.put("three", 3)
jmap.entrySet should contain (Entry("one", 1))
jmap.entrySet should not contain (Entry("one", 100))
jmap.entrySet should contain allOf (Entry("one", 1), Entry("two", 2))
}
it("should have a toString consistent with the ones coming from Java") {
Entry("one", 1).toString should be ("one=1")
Entry(1, "one").toString should be ("1=one")
}
// SKIP-SCALATESTJS-END
}
describe("the loneElement method") {
// SKIP-SCALATESTJS-START
describe("when used with java.util.Map") {
it("should return an Entry that has key and value methods") {
import LoneElement._
val jmap: JMap[String, Int] = new JHashMap[String, Int]
jmap.put("one", 1)
jmap.loneElement.key should be ("one")
jmap.loneElement.value should be (1)
}
}
// SKIP-SCALATESTJS-END
}
} | SRGOM/scalatest | scalactic-test/src/test/scala/org/scalactic/EntrySpec.scala | Scala | apache-2.0 | 1,907 |
/* NSC -- new Scala compiler
* Copyright 2005-2013 LAMP/EPFL
* @author Iulian Dragos
*/
package scala
package tools.nsc
package transform
import scala.tools.nsc.symtab.Flags
import scala.collection.{ mutable, immutable }
import scala.annotation.tailrec
/** Specialize code on types.
*
* Make sure you've read the thesis:
*
* Iulian Dragos: Compiling Scala for Performance (chapter 4)
*
* There are some things worth noting, (possibly) not mentioned there:
* 0) Make sure you understand the meaning of various `SpecializedInfo` descriptors
* defined below.
*
* 1) Specializing traits by introducing bridges in specialized methods
* of the specialized trait may introduce problems during mixin composition.
* Concretely, it may cause cyclic calls and result in a stack overflow.
* See ticket #4351.
* This was solved by introducing an `Abstract` specialized info descriptor.
* Instead of generating a bridge in the trait, an abstract method is generated.
*
* 2) Specialized private members sometimes have to be switched to protected.
* In some cases, even this is not enough. Example:
*
* {{{
* class A[@specialized T](protected val d: T) {
* def foo(that: A[T]) = that.d
* }
* }}}
*
* Specialization will generate a specialized class and a specialized method:
*
* {{{
* class A$mcI$sp(protected val d: Int) extends A[Int] {
* def foo(that: A[Int]) = foo$mcI$sp(that)
* def foo(that: A[Int]) = that.d
* }
* }}}
*
* Above, `A$mcI$sp` cannot access `d`, so the method cannot be typechecked.
*/
abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
import global._
import definitions._
import Flags._
private val inlineFunctionExpansion = settings.Ydelambdafy.value == "inline"
/** the name of the phase: */
val phaseName: String = "specialize"
/** The following flags may be set by this phase: */
override def phaseNewFlags: Long = notPRIVATE
/** This phase changes base classes. */
override def changesBaseClasses = true
override def keepsTypeParams = true
type TypeEnv = immutable.Map[Symbol, Type]
def emptyEnv: TypeEnv = Map[Symbol, Type]()
private implicit val typeOrdering: Ordering[Type] = Ordering[String] on ("" + _.typeSymbol.name)
/** TODO - this is a lot of maps.
*/
/** For a given class and concrete type arguments, give its specialized class */
val specializedClass = perRunCaches.newMap[(Symbol, TypeEnv), Symbol]
/** Map a method symbol to a list of its specialized overloads in the same class. */
private val overloads = perRunCaches.newMap[Symbol, List[Overload]]() withDefaultValue Nil
/** Map a symbol to additional information on specialization. */
private val info = perRunCaches.newMap[Symbol, SpecializedInfo]()
/** Map class symbols to the type environments where they were created. */
private val typeEnv = perRunCaches.newMap[Symbol, TypeEnv]() withDefaultValue emptyEnv
// Key: a specialized class or method
// Value: a map from tparams in the original class to tparams in the specialized class.
private val anyrefSpecCache = perRunCaches.newMap[Symbol, mutable.Map[Symbol, Symbol]]()
// holds mappings from members to the type variables in the class
// that they were already specialized for, so that they don't get
// specialized twice (this is for AnyRef specializations)
private val wasSpecializedForTypeVars = perRunCaches.newMap[Symbol, Set[Symbol]]() withDefaultValue Set()
/** Concrete methods that use a specialized type, or override such methods. */
private val concreteSpecMethods = perRunCaches.newWeakSet[Symbol]()
private def specializedOn(sym: Symbol): List[Symbol] = {
val GroupOfSpecializable = currentRun.runDefinitions.GroupOfSpecializable
sym getAnnotation SpecializedClass match {
case Some(AnnotationInfo(_, Nil, _)) => specializableTypes.map(_.typeSymbol)
case Some(ann @ AnnotationInfo(_, args, _)) => {
args map (_.tpe) flatMap { tp =>
tp baseType GroupOfSpecializable match {
case TypeRef(_, GroupOfSpecializable, arg :: Nil) =>
arg.typeArgs map (_.typeSymbol)
case _ =>
tp.typeSymbol :: Nil
}
}
}
case _ => Nil
}
}
@annotation.tailrec private def findSymbol[T](candidates: List[T], f: T => Symbol): Symbol = {
if (candidates.isEmpty) NoSymbol
else f(candidates.head) match {
case NoSymbol => findSymbol(candidates.tail, f)
case sym => sym
}
}
private def hasNewParents(tree: Tree) = {
val parents = tree.symbol.info.parents
val prev = enteringPrevPhase(tree.symbol.info.parents)
(parents != prev) && {
debuglog(s"$tree parents changed from: $prev to: $parents")
true
}
}
// If we replace `isBoundedGeneric` with (tp <:< AnyRefTpe),
// then pos/spec-List.scala fails - why? Does this kind of check fail
// for similar reasons? Does `sym.isAbstractType` make a difference?
private def isSpecializedAnyRefSubtype(tp: Type, sym: Symbol) = {
specializedOn(sym).exists(s => !isPrimitiveValueClass(s)) &&
!isPrimitiveValueClass(tp.typeSymbol) &&
isBoundedGeneric(tp)
//(tp <:< AnyRefTpe)
}
object TypeEnv {
/** Return a new type environment binding specialized type parameters of sym to
* the given args. Expects the lists to have the same length.
*/
def fromSpecialization(sym: Symbol, args: List[Type]): TypeEnv = {
ifDebug(assert(sym.info.typeParams.length == args.length, sym + " args: " + args))
emptyEnv ++ collectMap2(sym.info.typeParams, args)((k, v) => k.isSpecialized)
}
/** Does typeenv `t1` include `t2`? All type variables in `t1`
* are defined in `t2` and:
* - are bound to the same type, or
* - are an AnyRef specialization and `t2` is bound to a subtype of AnyRef
*/
def includes(t1: TypeEnv, t2: TypeEnv) = t1 forall {
case (sym, tpe) =>
t2 get sym exists { t2tp =>
(tpe == t2tp) || !(isPrimitiveValueType(tpe) || isPrimitiveValueType(t2tp)) // u.t.b. (t2tp <:< AnyRefTpe)
}
}
/** Reduce the given environment to contain mappings only for type variables in tps. */
def restrict(env: TypeEnv, tps: immutable.Set[Symbol]): TypeEnv =
env.filterKeys(tps).toMap
/** Is the given environment a valid specialization for sym?
* It is valid if each binding is from a @specialized type parameter in sym (or its owner)
* to a type for which `sym` is specialized.
*/
def isValid(env: TypeEnv, sym: Symbol): Boolean = {
env forall { case (tvar, tpe) =>
tvar.isSpecialized && (concreteTypes(tvar) contains tpe) && {
(sym.typeParams contains tvar) ||
(sym.owner != rootMirror.RootClass && (sym.owner.typeParams contains tvar))
}
}
}
}
case class Overload(sym: Symbol, env: TypeEnv) {
override def toString = "specialized overload " + sym + " in " + env
def matchesSym(sym1: Symbol) = sym.info =:= sym1.info
def matchesEnv(env1: TypeEnv) = TypeEnv.includes(env, env1)
}
private def newOverload(method: Symbol, specializedMethod: Symbol, env: TypeEnv) = {
assert(!specializedMethod.isOverloaded, specializedMethod.defString)
val om = Overload(specializedMethod, env)
overloads(method) ::= om
om
}
/** Just to mark uncheckable */
override def newPhase(prev: scala.tools.nsc.Phase): StdPhase = new SpecializationPhase(prev)
class SpecializationPhase(prev: scala.tools.nsc.Phase) extends super.Phase(prev) {
override def checkable = false
}
protected def newTransformer(unit: CompilationUnit): Transformer =
new SpecializationTransformer(unit)
abstract class SpecializedInfo {
def target: Symbol
/** Are type bounds of @specialized type parameters of 'target' now in 'env'? */
def typeBoundsIn(env: TypeEnv) = false
/** A degenerated method has @specialized type parameters that appear only in
* type bounds of other @specialized type parameters (and not in its result type).
*/
def degenerate = false
}
/** Symbol is a special overloaded method of 'original', in the environment env. */
case class SpecialOverload(original: Symbol, env: TypeEnv) extends SpecializedInfo {
def target = original
}
/** Symbol is a method that should be forwarded to 't' */
case class Forward(t: Symbol) extends SpecializedInfo {
def target = t
}
/** Symbol is a specialized abstract method, either specialized or original. The original `t` is abstract. */
case class Abstract(t: Symbol) extends SpecializedInfo {
def target = t
}
/** Symbol is a special overload of the super accessor. */
case class SpecialSuperAccessor(t: Symbol) extends SpecializedInfo {
def target = t
}
/** Symbol is a specialized accessor for the `target` field. */
case class SpecializedAccessor(target: Symbol) extends SpecializedInfo { }
/** Symbol is a specialized method whose body should be the target's method body. */
case class Implementation(target: Symbol) extends SpecializedInfo
/** Symbol is a specialized override paired with `target`. */
case class SpecialOverride(target: Symbol) extends SpecializedInfo
/** A specialized inner class that specializes original inner class `target` on a type parameter of the enclosing class, in the typeenv `env`. */
case class SpecializedInnerClass(target: Symbol, env: TypeEnv) extends SpecializedInfo
/** Symbol is a normalized member obtained by specializing 'target'. */
case class NormalizedMember(target: Symbol) extends SpecializedInfo {
/** Type bounds of a @specialized type var are now in the environment. */
override def typeBoundsIn(env: TypeEnv): Boolean = {
target.info.typeParams exists { tvar =>
tvar.isSpecialized && (specializedTypeVars(tvar.info.bounds) exists env.isDefinedAt)
}
}
override lazy val degenerate = {
val stvTypeParams = specializedTypeVars(target.info.typeParams map (_.info))
val stvResult = specializedTypeVars(target.info.resultType)
debuglog("degenerate: " + target + " stv tparams: " + stvTypeParams + " stv info: " + stvResult)
(stvTypeParams -- stvResult).nonEmpty
}
}
/** Has `clazz` any type parameters that need be specialized? */
def hasSpecializedParams(clazz: Symbol) =
clazz.info.typeParams exists (_.isSpecialized)
/** Return specialized type parameters. */
def specializedParams(sym: Symbol): List[Symbol] =
sym.info.typeParams filter (_.isSpecialized)
/** Given an original class symbol and a list of types its type parameters are instantiated at
* returns a list of type parameters that should remain in the TypeRef when instantiating a
* specialized type.
*/
def survivingArgs(sym: Symbol, args: List[Type]): List[Type] =
for ((tvar, tpe) <- sym.info.typeParams.zip(args) if !tvar.isSpecialized || !isPrimitiveValueType(tpe))
yield tpe
/** Is `member` potentially affected by specialization? This is a gross overapproximation,
* but it should be okay for use outside of specialization.
*/
def possiblySpecialized(sym: Symbol) = specializedTypeVars(sym).nonEmpty
/** Refines possiblySpecialized taking into account the instantiation of the specialized type variables at `site` */
def isSpecializedIn(sym: Symbol, site: Type) =
specializedTypeVars(sym) exists { tvar =>
val concretes = concreteTypes(tvar)
(concretes contains AnyRefClass) || (concretes contains site.memberType(tvar))
}
val specializedType = new TypeMap {
override def apply(tp: Type): Type = tp match {
case TypeRef(pre, sym, args) if args.nonEmpty =>
val pre1 = this(pre)
// when searching for a specialized class, take care to map all
// type parameters that are subtypes of AnyRef to AnyRef
val args1 = map2(args, sym.info.typeParams)((tp, orig) =>
if (isSpecializedAnyRefSubtype(tp, orig)) AnyRefTpe
else tp
)
specializedClass.get((sym, TypeEnv.fromSpecialization(sym, args1))) match {
case Some(sym1) => typeRef(pre1, sym1, survivingArgs(sym, args))
case None => typeRef(pre1, sym, args)
}
case _ => tp
}
}
def specializedFunctionName(sym: Symbol, args: List[Type]) = exitingSpecialize {
require(isFunctionSymbol(sym), sym)
val env: TypeEnv = TypeEnv.fromSpecialization(sym, args)
specializedClass.get((sym, env)) match {
case Some(x) =>
x.name
case None =>
sym.name
}
}
/** Return the specialized name of 'sym' in the given environment. It
* guarantees the same result regardless of the map order by sorting
* type variables alphabetically.
*
* !!! Is this safe in the face of the following?
* scala> trait T { def foo[A] = 0}; object O extends T { override def foo[B] = 0 }
*/
private def specializedName(sym: Symbol, env: TypeEnv): TermName = {
val tvars = (
if (sym.isClass) env.keySet
else specializedTypeVars(sym).intersect(env.keySet)
)
specializedName(sym.name, tvars, env)
}
private def specializedName(name: Name, tvars: immutable.Set[Symbol], env: TypeEnv): TermName = {
val (methparams, others) = tvars.toList sortBy ("" + _.name) partition (_.owner.isMethod)
// debuglog("specName(" + sym + ") env: " + env + " tvars: " + tvars)
specializedName(name, methparams map env, others map env)
}
/** Specialize name for the two list of types. The first one denotes
* specialization on method type parameters, the second on outer environment.
*/
private def specializedName(name: Name, types1: List[Type], types2: List[Type]): TermName = (
if (name == nme.CONSTRUCTOR || (types1.isEmpty && types2.isEmpty))
name.toTermName
else if (nme.isSetterName(name))
specializedName(name.getterName, types1, types2).setterName
else if (nme.isLocalName(name))
specializedName(name.getterName, types1, types2).localName
else {
val (base, cs, ms) = nme.splitSpecializedName(name)
newTermName(base.toString + "$"
+ "m" + ms + types1.map(t => abbrvTag(t.typeSymbol)).mkString("", "", "")
+ "c" + cs + types2.map(t => abbrvTag(t.typeSymbol)).mkString("", "", "$sp"))
}
)
lazy val specializableTypes = ScalaValueClasses.map(_.tpe).sorted
/** If the symbol is the companion of a value class, the value class.
* Otherwise, AnyRef.
*/
def specializesClass(sym: Symbol): Symbol = {
val c = sym.companionClass
if (isPrimitiveValueClass(c)) c else AnyRefClass
}
/** Return the types `sym` should be specialized at. This may be some of the primitive types
* or AnyRef. AnyRef means that a new type parameter T will be generated later, known to be a
* subtype of AnyRef (T <: AnyRef).
* These are in a meaningful order for stability purposes.
*/
def concreteTypes(sym: Symbol): List[Type] = {
val types = if (!sym.isSpecialized)
Nil // no @specialized Annotation
else
specializedOn(sym).map(s => specializesClass(s).tpe).sorted
if (isBoundedGeneric(sym.tpe) && (types contains AnyRefClass))
reporter.warning(sym.pos, sym + " is always a subtype of " + AnyRefTpe + ".")
types
}
/** Return a list of all type environments for all specializations
* of @specialized types in `tps`.
*/
private def specializations(tps: List[Symbol]): List[TypeEnv] = {
// the keys in each TypeEnv
val keys: List[Symbol] = tps filter (_.isSpecialized)
// creating each permutation of concrete types
def loop(ctypes: List[List[Type]]): List[List[Type]] = ctypes match {
case Nil => Nil
case set :: Nil => set map (_ :: Nil)
case set :: sets => for (x <- set ; xs <- loop(sets)) yield x :: xs
}
// zip the keys with each permutation to create a TypeEnv.
// If we don't exclude the "all AnyRef" specialization, we will
// incur duplicate members and crash during mixin.
loop(keys map concreteTypes) filterNot (_ forall (_ <:< AnyRefTpe)) map (xss => Map(keys zip xss: _*))
}
/** Does the given 'sym' need to be specialized in the environment 'env'?
* Specialization is needed for
* - members with specialized type parameters found in the given environment
* - constructors of specialized classes
* - normalized members whose type bounds appear in the environment
* But suppressed for:
* - any member with the @unspecialized annotation, or which has an
* enclosing member with the annotation.
*/
private def needsSpecialization(env: TypeEnv, sym: Symbol): Boolean = (
!hasUnspecializableAnnotation(sym) && (
specializedTypeVars(sym).intersect(env.keySet).diff(wasSpecializedForTypeVars(sym)).nonEmpty
|| sym.isClassConstructor && (sym.enclClass.typeParams exists (_.isSpecialized))
|| isNormalizedMember(sym) && info(sym).typeBoundsIn(env)
)
)
private def hasUnspecializableAnnotation(sym: Symbol): Boolean =
sym.ownerChain.exists(_ hasAnnotation UnspecializedClass)
def isNormalizedMember(m: Symbol) = m.isSpecialized && (info get m exists {
case NormalizedMember(_) => true
case _ => false
})
def specializedTypeVars(tpes: List[Type]): immutable.Set[Symbol] = {
@tailrec def loop(result: immutable.Set[Symbol], xs: List[Type]): immutable.Set[Symbol] = {
if (xs.isEmpty) result
else loop(result ++ specializedTypeVars(xs.head), xs.tail)
}
loop(immutable.Set.empty, tpes)
}
def specializedTypeVars(sym: Symbol): immutable.Set[Symbol] = (
if (neverHasTypeParameters(sym)) immutable.Set.empty
else enteringTyper(specializedTypeVars(sym.info))
)
/** Return the set of @specialized type variables mentioned by the given type.
* It only counts type variables that appear:
* - naked
* - as arguments to type constructors in @specialized positions
* (arrays are considered as Array[@specialized T])
*/
def specializedTypeVars(tpe: Type): immutable.Set[Symbol] = tpe match {
case TypeRef(pre, sym, args) =>
if (sym.isAliasType)
specializedTypeVars(tpe.dealiasWiden)
else if (sym.isTypeParameter && sym.isSpecialized || (sym.isTypeSkolem && sym.deSkolemize.isSpecialized))
Set(sym)
else if (sym == ArrayClass)
specializedTypeVars(args)
else if (args.isEmpty)
Set()
else
specializedTypeVars(sym.typeParams zip args collect { case (tp, arg) if tp.isSpecialized => arg })
case PolyType(tparams, resTpe) => specializedTypeVars(resTpe :: mapList(tparams)(symInfo)) // OPT
// since this method may be run at phase typer (before uncurry, where NMTs are eliminated)
case NullaryMethodType(resTpe) => specializedTypeVars(resTpe)
case MethodType(argSyms, resTpe) => specializedTypeVars(resTpe :: mapList(argSyms)(symTpe)) // OPT
case ExistentialType(_, res) => specializedTypeVars(res)
case AnnotatedType(_, tp) => specializedTypeVars(tp)
case TypeBounds(lo, hi) => specializedTypeVars(lo :: hi :: Nil)
case RefinedType(parents, _) => parents.flatMap(specializedTypeVars).toSet
case _ => immutable.Set.empty
}
/** Returns the type parameter in the specialized class `sClass` that corresponds to type parameter
* `tparam` in the original class. It will create it if needed or use the one from the cache.
*/
private def typeParamSubAnyRef(tparam: Symbol, sClass: Symbol): Type = {
val sClassMap = anyrefSpecCache.getOrElseUpdate(sClass, mutable.Map[Symbol, Symbol]())
sClassMap.getOrElseUpdate(tparam,
tparam.cloneSymbol(sClass, tparam.flags, tparam.name append tpnme.SPECIALIZED_SUFFIX)
modifyInfo (info => TypeBounds(info.bounds.lo, AnyRefTpe))
).tpe
}
/** Cleans the anyrefSpecCache of all type parameter symbols of a class.
*/
private def cleanAnyRefSpecCache(clazz: Symbol, decls: List[Symbol]) {
// remove class type parameters and those of normalized members.
clazz :: decls foreach (anyrefSpecCache remove _)
}
/** Type parameters that survive when specializing in the specified environment. */
def survivingParams(params: List[Symbol], env: TypeEnv) =
params filter {
p =>
!p.isSpecialized ||
!env.contains(p) ||
!isPrimitiveValueType(env(p))
}
/** Produces the symbols from type parameters `syms` of the original owner,
* in the given type environment `env`. The new owner is `nowner`.
*
* Non-specialized type parameters are cloned into new ones.
* Type parameters specialized on AnyRef have preexisting symbols.
*
* For instance, a @specialized(AnyRef) T, will become T$sp <: AnyRef.
*/
def produceTypeParameters(syms: List[Symbol], nowner: Symbol, env: TypeEnv) = {
val cloned = for (s <- syms) yield if (!env.contains(s)) s.cloneSymbol(nowner) else env(s).typeSymbol
// log("producing type params: " + cloned.map(t => (t, t.tpe.bounds.hi)))
foreach2(syms, cloned) { (orig, cln) =>
cln.removeAnnotation(SpecializedClass)
if (env.contains(orig))
cln modifyInfo (info => TypeBounds(info.bounds.lo, AnyRefTpe))
}
cloned map (_ substInfo (syms, cloned))
}
/** Maps AnyRef bindings from a raw environment (holding AnyRefs) into type parameters from
* the specialized symbol (class (specialization) or member (normalization)), leaves everything else as-is.
*/
private def mapAnyRefsInSpecSym(env: TypeEnv, origsym: Symbol, specsym: Symbol): TypeEnv = env map {
case (sym, AnyRefTpe) if sym.owner == origsym => (sym, typeParamSubAnyRef(sym, specsym))
case x => x
}
/** Maps AnyRef bindings from a raw environment (holding AnyRefs) into type parameters from
* the original class, leaves everything else as-is.
*/
private def mapAnyRefsInOrigCls(env: TypeEnv, origcls: Symbol): TypeEnv = env map {
case (sym, AnyRefTpe) if sym.owner == origcls => (sym, sym.tpe)
case x => x
}
/** Specialize 'clazz', in the environment `outerEnv`. The outer
* environment contains bindings for specialized types of enclosing
* classes.
*
* A class C is specialized w.r.t to its own specialized type params
* `stps`, by specializing its members, and creating a new class for
* each combination of `stps`.
*/
def specializeClass(clazz: Symbol, outerEnv: TypeEnv): List[Symbol] = {
def specializedClass(env0: TypeEnv, normMembers: List[Symbol]): Symbol = {
/* It gets hard to follow all the clazz and cls, and specializedClass
* was both already used for a map and mucho long. So "sClass" is the
* specialized subclass of "clazz" throughout this file.
*/
// SI-5545: Eliminate classes with the same name loaded from the bytecode already present - all we need to do is
// to force .info on them, as their lazy type will be evaluated and the symbols will be eliminated. Unfortunately
// evaluating the info after creating the specialized class will mess the specialized class signature, so we'd
// better evaluate it before creating the new class symbol
val clazzName = specializedName(clazz, env0).toTypeName
val bytecodeClazz = clazz.owner.info.decl(clazzName)
// debuglog("Specializing " + clazz + ", but found " + bytecodeClazz + " already there")
bytecodeClazz.info
val sClass = clazz.owner.newClass(clazzName, clazz.pos, (clazz.flags | SPECIALIZED) & ~CASE)
sClass.setAnnotations(clazz.annotations) // SI-8574 important that the subclass picks up @SerialVersionUID, @strictfp, etc.
def cloneInSpecializedClass(member: Symbol, flagFn: Long => Long, newName: Name = null) =
member.cloneSymbol(sClass, flagFn(member.flags | SPECIALIZED), newName)
sClass.associatedFile = clazz.sourceFile
currentRun.symSource(sClass) = clazz.sourceFile // needed later on by mixin
val env = mapAnyRefsInSpecSym(env0, clazz, sClass)
typeEnv(sClass) = env
this.specializedClass((clazz, env0)) = sClass
val decls1 = newScope // declarations of the newly specialized class 'sClass'
var oldClassTParams: List[Symbol] = Nil // original unspecialized type parameters
var newClassTParams: List[Symbol] = Nil // unspecialized type parameters of 'specializedClass' (cloned)
// has to be a val in order to be computed early. It is later called
// within 'enteringPhase(next)', which would lead to an infinite cycle otherwise
val specializedInfoType: Type = {
oldClassTParams = survivingParams(clazz.info.typeParams, env)
newClassTParams = produceTypeParameters(oldClassTParams, sClass, env) map subst(env)
// log("new tparams " + newClassTParams.zip(newClassTParams map {s => (s.tpe, s.tpe.bounds.hi)}) + ", in env: " + env)
def applyContext(tpe: Type) =
subst(env, tpe).instantiateTypeParams(oldClassTParams, newClassTParams map (_.tpe))
/* Return a list of specialized parents to be re-mixed in a specialized subclass.
* Assuming env = [T -> Int] and
* class Integral[@specialized T] extends Numeric[T]
* and Numeric[U] is specialized on U, this produces List(Numeric$mcI).
*
* so that class Integral$mci extends Integral[Int] with Numeric$mcI.
*/
def specializedParents(parents: List[Type]): List[Type] = {
var res: List[Type] = Nil
// log(specializedClass + ": seeking specialized parents of class with parents: " + parents.map(_.typeSymbol))
for (p <- parents) {
val stp = exitingSpecialize(specializedType(p))
if (stp != p)
if (p.typeSymbol.isTrait) res ::= stp
else if (currentRun.compiles(clazz))
reporter.warning(clazz.pos, p.typeSymbol + " must be a trait. Specialized version of "
+ clazz + " will inherit generic " + p) // TODO change to error
}
res
}
var parents = List(applyContext(enteringTyper(clazz.tpe_*)))
// log("!!! Parents: " + parents + ", sym: " + parents.map(_.typeSymbol))
if (parents.head.typeSymbol.isTrait)
parents = parents.head.parents.head :: parents
val extraSpecializedMixins = specializedParents(clazz.info.parents map applyContext)
if (extraSpecializedMixins.nonEmpty)
debuglog("extra specialized mixins for %s: %s".format(clazz.name.decode, extraSpecializedMixins.mkString(", ")))
// If the class being specialized has a self-type, the self type may
// require specialization. First exclude classes whose self types have
// the same type constructor as the class itself, since they will
// already be covered. Then apply the current context to the self-type
// as with the parents and assign it to typeOfThis.
if (clazz.typeOfThis.typeConstructor ne clazz.typeConstructor) {
sClass.typeOfThis = applyContext(clazz.typeOfThis)
debuglog("Rewriting self-type for specialized class:\\n" +
" " + clazz.defStringSeenAs(clazz.typeOfThis) + "\\n" +
" => " + sClass.defStringSeenAs(sClass.typeOfThis)
)
}
GenPolyType(newClassTParams, ClassInfoType(parents ::: extraSpecializedMixins, decls1, sClass))
}
exitingSpecialize(sClass setInfo specializedInfoType)
val fullEnv = outerEnv ++ env
/* Enter 'sym' in the scope of the current specialized class. Its type is
* mapped through the active environment, binding type variables to concrete
* types. The existing typeEnv for `sym` is composed with the current active
* environment
*/
def enterMember(sym: Symbol): Symbol = {
typeEnv(sym) = fullEnv ++ typeEnv(sym) // append the full environment
sym modifyInfo (_.substThis(clazz, sClass).instantiateTypeParams(oldClassTParams, newClassTParams map (_.tpe)))
// we remove any default parameters. At this point, they have been all
// resolved by the type checker. Later on, erasure re-typechecks everything and
// chokes if it finds default parameters for specialized members, even though
// they are never needed.
mapParamss(sym)(_ resetFlag DEFAULTPARAM)
decls1 enter subst(fullEnv)(sym)
}
/* Create and enter in scope an overridden symbol m1 for `m` that forwards
* to `om`. `om` is a fresh, special overload of m1 that is an implementation
* of `m`. For example, for a
*
* class Foo[@specialized A] {
* def m(x: A) = <body> // m
* }
* , for class Foo$I extends Foo[Int], this method enters two new symbols in
* the scope of Foo$I:
*
* def m(x: Int) = m$I(x) // m1
* def m$I(x: Int) = <body>/adapted to env {A -> Int} // om
*/
def forwardToOverload(m: Symbol): Symbol = {
val specMember = enterMember(cloneInSpecializedClass(m, f => (f | OVERRIDE) & ~(DEFERRED | CASEACCESSOR)))
val om = specializedOverload(sClass, m, env).setFlag(OVERRIDE)
val original = info.get(m) match {
case Some(NormalizedMember(tg)) => tg
case _ => m
}
info(specMember) = Forward(om)
info(om) = if (original.isDeferred) Forward(original) else Implementation(original)
typeEnv(om) = env ++ typeEnv(m) // add the environment for any method tparams
newOverload(specMember, om, typeEnv(om))
enterMember(om)
}
for (m <- normMembers ; if needsSpecialization(outerEnv ++ env, m) && satisfiable(fullEnv)) {
if (!m.isDeferred)
addConcreteSpecMethod(m)
// specialized members have to be overridable.
if (m.isPrivate)
m.resetFlag(PRIVATE).setFlag(PROTECTED)
if (m.isConstructor) {
val specCtor = enterMember(cloneInSpecializedClass(m, x => x))
info(specCtor) = Forward(m)
}
else if (isNormalizedMember(m)) { // methods added by normalization
val NormalizedMember(original) = info(m)
if (nonConflicting(env ++ typeEnv(m))) {
if (info(m).degenerate) {
debuglog("degenerate normalized member " + m.defString)
val specMember = enterMember(cloneInSpecializedClass(m, _ & ~DEFERRED))
info(specMember) = Implementation(original)
typeEnv(specMember) = env ++ typeEnv(m)
} else {
val om = forwardToOverload(m)
debuglog("normalizedMember " + m + " om: " + om + " " + pp(typeEnv(om)))
}
}
else
debuglog("conflicting env for " + m + " env: " + env)
}
else if (m.isDeferred && m.isSpecialized) { // abstract methods
val specMember = enterMember(cloneInSpecializedClass(m, _ | DEFERRED))
// debuglog("deferred " + specMember.fullName + " remains abstract")
info(specMember) = new Abstract(specMember)
// was: new Forward(specMember) {
// override def target = m.owner.info.member(specializedName(m, env))
// }
} else if (!sClass.isTrait && m.isMethod && !m.hasAccessorFlag) { // other concrete methods
// log("other concrete " + m)
forwardToOverload(m)
} else if (!sClass.isTrait && m.isMethod && m.hasFlag(LAZY)) {
forwardToOverload(m)
} else if (m.isValue && !m.isMethod) { // concrete value definition
def mkAccessor(field: Symbol, name: Name) = {
val newFlags = (SPECIALIZED | m.getterIn(clazz).flags) & ~(LOCAL | CASEACCESSOR | PARAMACCESSOR)
// we rely on the super class to initialize param accessors
val sym = sClass.newMethod(name.toTermName, field.pos, newFlags)
info(sym) = SpecializedAccessor(field)
sym
}
def overrideIn(clazz: Symbol, sym: Symbol) = {
val newFlags = (sym.flags | OVERRIDE | SPECIALIZED) & ~(DEFERRED | CASEACCESSOR | PARAMACCESSOR)
val sym1 = sym.cloneSymbol(clazz, newFlags)
sym1 modifyInfo (_ asSeenFrom (clazz.tpe, sym1.owner))
}
val specVal = specializedOverload(sClass, m, env)
addConcreteSpecMethod(m)
specVal.asInstanceOf[TermSymbol].setAlias(m)
enterMember(specVal)
// create accessors
if (m.isLazy) {
// no getters needed (we'll specialize the compute method and accessor separately), can stay private
// m.setFlag(PRIVATE) -- TODO: figure out how to leave the non-specialized lazy var private
// (the implementation needs it to be visible while duplicating and retypechecking,
// but it really could be private in bytecode)
specVal.setFlag(PRIVATE)
}
else if (nme.isLocalName(m.name)) {
val specGetter = mkAccessor(specVal, specVal.getterName) setInfo MethodType(Nil, specVal.info)
val origGetter = overrideIn(sClass, m.getterIn(clazz))
info(origGetter) = Forward(specGetter)
enterMember(specGetter)
enterMember(origGetter)
debuglog("specialize accessor in %s: %s -> %s".format(sClass.name.decode, origGetter.name.decode, specGetter.name.decode))
clazz.caseFieldAccessors.find(_.name.startsWith(m.name)) foreach { cfa =>
val cfaGetter = overrideIn(sClass, cfa)
info(cfaGetter) = SpecializedAccessor(specVal)
enterMember(cfaGetter)
debuglog("override case field accessor %s -> %s".format(m.name.decode, cfaGetter.name.decode))
}
if (specVal.isVariable && m.setterIn(clazz) != NoSymbol) {
val specSetter = mkAccessor(specVal, specGetter.setterName)
.resetFlag(STABLE)
specSetter.setInfo(MethodType(specSetter.newSyntheticValueParams(List(specVal.info)),
UnitTpe))
val origSetter = overrideIn(sClass, m.setterIn(clazz))
info(origSetter) = Forward(specSetter)
enterMember(specSetter)
enterMember(origSetter)
}
}
else { // if there are no accessors, specialized methods will need to access this field in specialized subclasses
m.resetFlag(PRIVATE)
specVal.resetFlag(PRIVATE)
debuglog("no accessors for %s/%s, specialized methods must access field in subclass".format(
m.name.decode, specVal.name.decode))
}
}
else if (m.isClass) {
val specClass: Symbol = cloneInSpecializedClass(m, x => x)
typeEnv(specClass) = fullEnv
specClass setName specializedName(specClass, fullEnv).toTypeName
enterMember(specClass)
debuglog("entered specialized class " + specClass.fullName)
info(specClass) = SpecializedInnerClass(m, fullEnv)
}
}
sClass
}
val decls1 = clazz.info.decls.toList flatMap { m: Symbol =>
if (m.isAnonymousClass) List(m) else {
normalizeMember(m.owner, m, outerEnv) flatMap { normalizedMember =>
val ms = specializeMember(m.owner, normalizedMember, outerEnv, clazz.info.typeParams)
// interface traits have concrete members now
if (ms.nonEmpty && clazz.isTrait && clazz.isInterface)
clazz.resetFlag(INTERFACE)
if (normalizedMember.isMethod) {
val newTpe = subst(outerEnv, normalizedMember.info)
// only do it when necessary, otherwise the method type might be at a later phase already
if (newTpe != normalizedMember.info) {
normalizedMember updateInfo newTpe
}
}
normalizedMember :: ms
}
}
}
val subclasses = specializations(clazz.info.typeParams) filter satisfiable
subclasses foreach {
env =>
val spc = specializedClass(env, decls1)
val existing = clazz.owner.info.decl(spc.name)
// a symbol for the specialized class already exists if there's a classfile for it.
// keeping both crashes the compiler on test/files/pos/spec-Function1.scala
if (existing != NoSymbol)
clazz.owner.info.decls.unlink(existing)
exitingSpecialize(clazz.owner.info.decls enter spc) //!!! assumes fully specialized classes
}
if (subclasses.nonEmpty) clazz.resetFlag(FINAL)
cleanAnyRefSpecCache(clazz, decls1)
decls1
}
/** Expand member `sym` to a set of normalized members. Normalized members
* are monomorphic or polymorphic only in non-specialized types.
*
* Given method m[@specialized T, U](x: T, y: U) it returns
* m[T, U](x: T, y: U),
* m$I[ U](x: Int, y: U),
* m$D[ U](x: Double, y: U)
* // etc.
*/
private def normalizeMember(owner: Symbol, sym: Symbol, outerEnv: TypeEnv): List[Symbol] = {
sym :: (
if (!sym.isMethod || enteringTyper(sym.typeParams.isEmpty)) Nil
else if (sym.hasDefault) {
/* Specializing default getters is useless, also see SI-7329 . */
sym.resetFlag(SPECIALIZED)
Nil
} else {
// debuglog("normalizeMember: " + sym.fullNameAsName('.').decode)
var specializingOn = specializedParams(sym)
val unusedStvars = specializingOn filterNot specializedTypeVars(sym.info)
// I think the last condition should be !sym.isArtifact, but that made the
// compiler start warning about Tuple1.scala and Tuple2.scala claiming
// their type parameters are used in non-specializable positions. Why is
// unusedStvars.nonEmpty for these classes???
if (unusedStvars.nonEmpty && currentRun.compiles(sym) && !sym.isSynthetic) {
reporter.warning(sym.pos,
"%s %s unused or used in non-specializable positions.".format(
unusedStvars.mkString("", ", ", ""),
if (unusedStvars.length == 1) "is" else "are")
)
unusedStvars foreach (_ removeAnnotation SpecializedClass)
specializingOn = specializingOn filterNot (unusedStvars contains _)
}
for (env0 <- specializations(specializingOn) if needsSpecialization(env0, sym)) yield {
// !!! Can't this logic be structured so that the new symbol's name is
// known when the symbol is cloned? It is much cleaner not to be mutating
// names after the fact. And it adds about a billion lines of
// "Renaming value _1 in class Tuple2 to _1$mcZ$sp" to obscure the small
// number of other (important) actual symbol renamings.
val tps = survivingParams(sym.info.typeParams, env0)
val specMember = sym.cloneSymbol(owner, (sym.flags | SPECIALIZED) & ~DEFERRED) // <-- this needs newName = ...
val env = mapAnyRefsInSpecSym(env0, sym, specMember)
val (keys, vals) = env.toList.unzip
specMember setName specializedName(sym, env) // <-- but the name is calculated based on the cloned symbol
// debuglog("%s normalizes to %s%s".format(sym, specMember,
// if (tps.isEmpty) "" else " with params " + tps.mkString(", ")))
typeEnv(specMember) = outerEnv ++ env
val tps1 = produceTypeParameters(tps, specMember, env)
tps1 foreach (_ modifyInfo (_.instantiateTypeParams(keys, vals)))
// the cloneInfo is necessary so that method parameter symbols are cloned at the new owner
val methodType = sym.info.resultType.instantiateTypeParams(keys ++ tps, vals ++ tps1.map(_.tpe)).cloneInfo(specMember)
specMember setInfo GenPolyType(tps1, methodType)
debuglog("%s expands to %s in %s".format(sym, specMember.name.decode, pp(env)))
info(specMember) = NormalizedMember(sym)
newOverload(sym, specMember, env)
specMember
}
}
)
}
// concise printing of type env
private def pp(env: TypeEnv): String = {
env.toList.sortBy(_._1.name) map {
case (k, v) =>
val vsym = v.typeSymbol
if (k == vsym) "" + k.name
else k.name + ":" + vsym.name
} mkString ("env(", ", ", ")")
}
/** Specialize member `m` w.r.t. to the outer environment and the type
* parameters of the innermost enclosing class.
*
* Turns 'private' into 'protected' for members that need specialization.
*
* Return a list of symbols that are specializations of 'sym', owned by 'owner'.
*/
private def specializeMember(owner: Symbol, sym: Symbol, outerEnv: TypeEnv, tps: List[Symbol]): List[Symbol] = {
def specializeOn(tparams: List[Symbol]): List[Symbol] = specializations(tparams) map { spec0 =>
val spec = mapAnyRefsInOrigCls(spec0, owner)
if (sym.isPrivate) {
sym.resetFlag(PRIVATE).setFlag(PROTECTED)
debuglog("Set %s to private[%s]".format(sym, sym.enclosingPackage))
}
val specMember = subst(outerEnv)(specializedOverload(owner, sym, spec))
typeEnv(specMember) = typeEnv(sym) ++ outerEnv ++ spec
wasSpecializedForTypeVars(specMember) ++= spec collect { case (s, tp) if s.tpe == tp => s }
val wasSpec = wasSpecializedForTypeVars(specMember)
if (wasSpec.nonEmpty)
debuglog("specialized overload for %s in %s".format(specMember, pp(typeEnv(specMember))))
newOverload(sym, specMember, spec)
info(specMember) = SpecialOverload(sym, typeEnv(specMember))
specMember
}
if (sym.isMethod) {
if (hasUnspecializableAnnotation(sym)) {
List()
} else {
val stvars = specializedTypeVars(sym)
if (stvars.nonEmpty)
debuglog("specialized %s on %s".format(sym.fullLocationString, stvars.map(_.name).mkString(", ")))
val tps1 = if (sym.isConstructor) tps filter (sym.info.paramTypes contains _) else tps
val tps2 = tps1 filter stvars
if (!sym.isDeferred)
addConcreteSpecMethod(sym)
specializeOn(tps2)
}
}
else Nil
}
/** Return the specialized overload of `m`, in the given environment. */
private def specializedOverload(owner: Symbol, sym: Symbol, env: TypeEnv, nameSymbol: Symbol = NoSymbol): Symbol = {
val newFlags = (sym.flags | SPECIALIZED) & ~(DEFERRED | CASEACCESSOR | LAZY)
// this method properly duplicates the symbol's info
val specname = specializedName(nameSymbol orElse sym, env)
( sym.cloneSymbol(owner, newFlags, newName = specname)
modifyInfo (info => subst(env, info.asSeenFrom(owner.thisType, sym.owner)))
)
}
/** For each method m that overrides an inherited method m', add a special
* overload method `om` that overrides the corresponding overload in the
* superclass. For the following example:
*
* class IntFun extends Function1[Int, Int] {
* def apply(x: Int): Int = ..
* }
*
* this method will return List('apply$mcII$sp')
*/
private def specialOverrides(clazz: Symbol) = logResultIf[List[Symbol]]("specialized overrides in " + clazz, _.nonEmpty) {
/* Return the overridden symbol in syms that needs a specialized overriding symbol,
* together with its specialization environment. The overridden symbol may not be
* the closest to 'overriding', in a given hierarchy.
*
* An method m needs a special override if
* * m overrides a method whose type contains specialized type variables
* * there is a valid specialization environment that maps the overridden method type to m's type.
*/
def needsSpecialOverride(overriding: Symbol): (Symbol, TypeEnv) = {
def checkOverriddenTParams(overridden: Symbol) {
foreach2(overridden.info.typeParams, overriding.info.typeParams) { (baseTvar, derivedTvar) =>
val missing = concreteTypes(baseTvar).toSet -- concreteTypes(derivedTvar).toSet
if (missing.nonEmpty) {
reporter.error(derivedTvar.pos,
"Type parameter has to be specialized at least for the same types as in the overridden method. Missing "
+ "types: " + missing.mkString("", ", ", "")
)
}
}
}
if (!overriding.isParamAccessor) {
for (overridden <- overriding.allOverriddenSymbols) {
val stvars = specializedTypeVars(overridden.info)
if (stvars.nonEmpty) {
debuglog("specialized override of %s by %s%s".format(overridden.fullLocationString, overriding.fullLocationString,
if (stvars.isEmpty) "" else stvars.map(_.name).mkString("(", ", ", ")")))
if (currentRun compiles overriding)
checkOverriddenTParams(overridden)
val env = unify(overridden.info, overriding.info, emptyEnv, false, true)
def atNext = exitingSpecialize(overridden.owner.info.decl(specializedName(overridden, env)))
if (TypeEnv.restrict(env, stvars).nonEmpty && TypeEnv.isValid(env, overridden) && atNext != NoSymbol) {
debuglog(" " + pp(env) + " found " + atNext)
return (overridden, env)
}
}
}
}
(NoSymbol, emptyEnv)
}
(clazz.info.decls flatMap { overriding =>
needsSpecialOverride(overriding) match {
case (NoSymbol, _) =>
if (overriding.isSuperAccessor) {
val alias = overriding.alias
debuglog(s"checking special overload for super accessor: ${overriding.fullName}, alias for ${alias.fullName}")
needsSpecialOverride(alias) match {
case nope @ (NoSymbol, _) => None
case (overridden, env) =>
val om = specializedOverload(clazz, overriding, env, overridden)
om.setName(nme.superName(om.name))
om.asInstanceOf[TermSymbol].setAlias(info(alias).target)
om.owner.info.decls.enter(om)
info(om) = SpecialSuperAccessor(om)
om.makeNotPrivate(om.owner)
newOverload(overriding, om, env)
Some(om)
}
} else None
case (overridden, env) =>
val om = specializedOverload(clazz, overridden, env)
clazz.info.decls.enter(om)
foreachWithIndex(om.paramss) { (params, i) =>
foreachWithIndex(params) { (param, j) =>
param.name = overriding.paramss(i)(j).name // SI-6555 Retain the parameter names from the subclass.
}
}
debuglog(s"specialized overload $om for ${overriding.name.decode} in ${pp(env)}: ${om.info}")
if (overriding.isAbstractOverride) om.setFlag(ABSOVERRIDE)
typeEnv(om) = env
addConcreteSpecMethod(overriding)
if (overriding.isDeferred) { // abstract override
debuglog("abstract override " + overriding.fullName + " with specialized " + om.fullName)
info(om) = Forward(overriding)
}
else {
// if the override is a normalized member, 'om' gets the
// implementation from its original target, and adds the
// environment of the normalized member (that is, any
// specialized /method/ type parameter bindings)
info get overriding match {
case Some(NormalizedMember(target)) =>
typeEnv(om) = env ++ typeEnv(overriding)
info(om) = Forward(target)
case _ =>
info(om) = SpecialOverride(overriding)
}
info(overriding) = Forward(om setPos overriding.pos)
}
newOverload(overriding, om, env)
ifDebug(exitingSpecialize(assert(
overridden.owner.info.decl(om.name) != NoSymbol,
"Could not find " + om.name + " in " + overridden.owner.info.decls))
)
Some(om)
}
}).toList
}
case object UnifyError extends scala.util.control.ControlThrowable
private[this] def unifyError(tp1: Any, tp2: Any): Nothing = {
log("unifyError" + ((tp1, tp2)))
throw UnifyError
}
/** Return the most general type environment that specializes tp1 to tp2.
* It only allows binding of type parameters annotated with @specialized.
* Fails if such an environment cannot be found.
*
* If `strict` is true, a UnifyError is thrown if unification is impossible.
*
* If `tparams` is true, then the methods tries to unify over type params in polytypes as well.
*/
private def unify(tp1: Type, tp2: Type, env: TypeEnv, strict: Boolean, tparams: Boolean = false): TypeEnv = (tp1, tp2) match {
case (TypeRef(_, sym1, _), _) if sym1.isSpecialized =>
debuglog(s"Unify $tp1, $tp2")
if (isPrimitiveValueClass(tp2.typeSymbol) || isSpecializedAnyRefSubtype(tp2, sym1))
env + ((sym1, tp2))
else if (isSpecializedAnyRefSubtype(tp2, sym1))
env + ((sym1, tp2))
else if (strict)
unifyError(tp1, tp2)
else
env
case (TypeRef(_, sym1, args1), TypeRef(_, sym2, args2)) =>
if (args1.nonEmpty || args2.nonEmpty)
debuglog(s"Unify types $tp1 and $tp2")
if (strict && args1.length != args2.length) unifyError(tp1, tp2)
val e = unify(args1, args2, env, strict)
if (e.nonEmpty) debuglog(s"unified to: $e")
e
case (TypeRef(_, sym1, _), _) if sym1.isTypeParameterOrSkolem =>
env
case (MethodType(params1, res1), MethodType(params2, res2)) =>
if (strict && params1.length != params2.length) unifyError(tp1, tp2)
debuglog(s"Unify methods $tp1 and $tp2")
unify(res1 :: (params1 map (_.tpe)), res2 :: (params2 map (_.tpe)), env, strict)
case (PolyType(tparams1, res1), PolyType(tparams2, res2)) =>
debuglog(s"Unify polytypes $tp1 and $tp2")
if (strict && tparams1.length != tparams2.length)
unifyError(tp1, tp2)
else if (tparams && tparams1.length == tparams2.length)
unify(res1 :: tparams1.map(_.info), res2 :: tparams2.map(_.info), env, strict)
else
unify(res1, res2, env, strict)
case (PolyType(_, res), other) => unify(res, other, env, strict)
case (ThisType(_), ThisType(_)) => env
case (_, SingleType(_, _)) => unify(tp1, tp2.underlying, env, strict)
case (SingleType(_, _), _) => unify(tp1.underlying, tp2, env, strict)
case (ThisType(_), _) => unify(tp1.widen, tp2, env, strict)
case (_, ThisType(_)) => unify(tp1, tp2.widen, env, strict)
case (RefinedType(_, _), RefinedType(_, _)) => env
case (AnnotatedType(_, tp1), tp2) => unify(tp2, tp1, env, strict)
case (ExistentialType(_, res1), _) => unify(tp2, res1, env, strict)
case (TypeBounds(lo1, hi1), TypeBounds(lo2, hi2)) => unify(List(lo1, hi1), List(lo2, hi2), env, strict)
case _ =>
debuglog(s"don't know how to unify $tp1 [${tp1.getClass}] with $tp2 [${tp2.getClass}]")
env
}
private def unify(tp1: List[Type], tp2: List[Type], env: TypeEnv, strict: Boolean): TypeEnv = {
if (tp1.isEmpty || tp2.isEmpty) env
else (tp1 zip tp2).foldLeft(env) { (env, args) =>
if (!strict) unify(args._1, args._2, env, strict)
else {
val nenv = unify(args._1, args._2, emptyEnv, strict)
if (env.keySet.intersect(nenv.keySet).isEmpty) env ++ nenv
else {
debuglog(s"could not unify: u(${args._1}, ${args._2}) yields $nenv, env: $env")
unifyError(tp1, tp2)
}
}
}
}
/** Apply the type environment 'env' to the given type. All type
* bindings are supposed to be to primitive types. A type variable
* that is annotated with 'uncheckedVariance' is mapped to the corresponding
* primitive type losing the annotation.
*/
private def subst(env: TypeEnv, tpe: Type): Type = {
class FullTypeMap(from: List[Symbol], to: List[Type]) extends SubstTypeMap(from, to) with AnnotationFilter {
def keepAnnotation(annot: AnnotationInfo) = !(annot matches uncheckedVarianceClass)
override def mapOver(tp: Type): Type = tp match {
case ClassInfoType(parents, decls, clazz) =>
val parents1 = parents mapConserve this
val decls1 = mapOver(decls)
if ((parents1 eq parents) && (decls1 eq decls)) tp
else ClassInfoType(parents1, decls1, clazz)
case _ =>
super.mapOver(tp)
}
}
val (keys, values) = env.toList.unzip
(new FullTypeMap(keys, values))(tpe)
}
private def subst(env: TypeEnv)(decl: Symbol): Symbol =
decl modifyInfo (info =>
if (decl.isConstructor) MethodType(subst(env, info).params, decl.owner.tpe_*)
else subst(env, info)
)
private def unspecializableClass(tp: Type) = (
isRepeatedParamType(tp) // ???
|| tp.typeSymbol.isJavaDefined
|| tp.typeSymbol.isPackageClass
)
/** Type transformation. It is applied to all symbols, compiled or loaded.
* If it is a 'no-specialization' run, it is applied only to loaded symbols.
*/
override def transformInfo(sym: Symbol, tpe: Type): Type = {
if (settings.nospecialization && currentRun.compiles(sym)) tpe
else tpe.resultType match {
case cinfo @ ClassInfoType(parents, decls, clazz) if !unspecializableClass(cinfo) =>
val tparams = tpe.typeParams
if (tparams.isEmpty)
exitingSpecialize(parents map (_.typeSymbol.info))
val parents1 = parents mapConserve specializedType
if (parents ne parents1) {
debuglog("specialization transforms %s%s parents to %s".format(
if (tparams.nonEmpty) "(poly) " else "", clazz, parents1)
)
}
val newScope = newScopeWith(specializeClass(clazz, typeEnv(clazz)) ++ specialOverrides(clazz): _*)
// If tparams.isEmpty, this is just the ClassInfoType.
GenPolyType(tparams, ClassInfoType(parents1, newScope, clazz))
case _ =>
tpe
}
}
/** Is any type variable in `env` conflicting with any if its type bounds, when
* type bindings in `env` are taken into account?
*
* A conflicting type environment could still be satisfiable.
*/
def nonConflicting(env: TypeEnv) = env forall { case (tvar, tpe) =>
(subst(env, tvar.info.bounds.lo) <:< tpe) && (tpe <:< subst(env, tvar.info.bounds.hi))
}
/** The type environment is sound w.r.t. to all type bounds or only soft
* conflicts appear. An environment is sound if all bindings are within
* the bounds of the given type variable. A soft conflict is a binding
* that does not fall within the bounds, but whose bounds contain
* type variables that are @specialized, (that could become satisfiable).
*/
def satisfiable(env: TypeEnv): Boolean = satisfiable(env, false)
def satisfiable(env: TypeEnv, warnings: Boolean): Boolean = {
def matches(tpe1: Type, tpe2: Type): Boolean = {
val t1 = subst(env, tpe1)
val t2 = subst(env, tpe2)
((t1 <:< t2)
|| specializedTypeVars(t1).nonEmpty
|| specializedTypeVars(t2).nonEmpty)
}
env forall { case (tvar, tpe) =>
matches(tvar.info.bounds.lo, tpe) && matches(tpe, tvar.info.bounds.hi) || {
if (warnings)
reporter.warning(tvar.pos, s"Bounds prevent specialization of $tvar")
debuglog("specvars: " +
tvar.info.bounds.lo + ": " +
specializedTypeVars(tvar.info.bounds.lo) + " " +
subst(env, tvar.info.bounds.hi) + ": " +
specializedTypeVars(subst(env, tvar.info.bounds.hi))
)
false
}
}
}
def satisfiabilityConstraints(env: TypeEnv): Option[TypeEnv] = {
val noconstraints = Some(emptyEnv)
def matches(tpe1: Type, tpe2: Type): Option[TypeEnv] = {
val t1 = subst(env, tpe1)
val t2 = subst(env, tpe2)
// log("---------> " + tpe1 + " matches " + tpe2)
// log(t1 + ", " + specializedTypeVars(t1))
// log(t2 + ", " + specializedTypeVars(t2))
// log("unify: " + unify(t1, t2, env, false, false) + " in " + env)
if (t1 <:< t2) noconstraints
else if (specializedTypeVars(t1).nonEmpty) Some(unify(t1, t2, env, false, false) -- env.keys)
else if (specializedTypeVars(t2).nonEmpty) Some(unify(t2, t1, env, false, false) -- env.keys)
else None
}
env.foldLeft[Option[TypeEnv]](noconstraints) {
case (constraints, (tvar, tpe)) =>
val loconstraints = matches(tvar.info.bounds.lo, tpe)
val hiconstraints = matches(tpe, tvar.info.bounds.hi)
val allconstraints = for (c <- constraints; l <- loconstraints; h <- hiconstraints) yield c ++ l ++ h
allconstraints
}
}
/** This duplicator additionally performs casts of expressions if that is allowed by the `casts` map. */
class Duplicator(casts: Map[Symbol, Type]) extends {
val global: SpecializeTypes.this.global.type = SpecializeTypes.this.global
} with typechecker.Duplicators {
private val (castfrom, castto) = casts.unzip
private object CastMap extends SubstTypeMap(castfrom.toList, castto.toList)
class BodyDuplicator(_context: Context) extends super.BodyDuplicator(_context) {
override def castType(tree: Tree, pt: Type): Tree = {
tree modifyType fixType
// log(" tree type: " + tree.tpe)
val ntree = if (tree.tpe != null && !(tree.tpe <:< pt)) {
val casttpe = CastMap(tree.tpe)
if (casttpe <:< pt) gen.mkCast(tree, casttpe)
else if (casttpe <:< CastMap(pt)) gen.mkCast(tree, pt)
else tree
} else tree
ntree.clearType()
}
}
protected override def newBodyDuplicator(context: Context) = new BodyDuplicator(context)
}
/** Introduced to fix SI-7343: Phase ordering problem between Duplicators and Specialization.
* brief explanation: specialization rewires class parents during info transformation, and
* the new info then guides the tree changes. But if a symbol is created during duplication,
* which runs after specialization, its info is not visited and thus the corresponding tree
* is not specialized. One manifestation is the following:
* ```
* object Test {
* class Parent[@specialized(Int) T]
*
* def spec_method[@specialized(Int) T](t: T, expectedXSuper: String) = {
* class X extends Parent[T]()
* // even in the specialized variant, the local X class
* // doesn't extend Parent$mcI$sp, since its symbol has
* // been created after specialization and was not seen
* // by specialization's info transformer.
* ...
* }
* }
* ```
* We fix this by forcing duplication to take place before specialization.
*
* Note: The constructors phase (which also uses duplication) comes after erasure and uses the
* post-erasure typer => we must protect it from the beforeSpecialization phase shifting.
*/
class SpecializationDuplicator(casts: Map[Symbol, Type]) extends Duplicator(casts) {
override def retyped(context: Context, tree: Tree, oldThis: Symbol, newThis: Symbol, env: scala.collection.Map[Symbol, Type]): Tree =
enteringSpecialize(super.retyped(context, tree, oldThis, newThis, env))
}
/** A tree symbol substituter that substitutes on type skolems.
* If a type parameter is a skolem, it looks for the original
* symbol in the 'from' and maps it to the corresponding new
* symbol. The new symbol should probably be a type skolem as
* well (not enforced).
*
* All private members are made protected in order to be accessible from
* specialized classes.
*/
class ImplementationAdapter(from: List[Symbol],
to: List[Symbol],
targetClass: Symbol,
addressFields: Boolean) extends TreeSymSubstituter(from, to) {
override val symSubst = new SubstSymMap(from, to) {
override def matches(sym1: Symbol, sym2: Symbol) =
if (sym2.isTypeSkolem) sym2.deSkolemize eq sym1
else sym1 eq sym2
}
private def isAccessible(sym: Symbol): Boolean =
if (currentOwner.isAnonymousFunction) {
if (inlineFunctionExpansion) devWarning("anonymous function made it to specialization even though inline expansion is set.")
false
}
else (currentClass == sym.owner.enclClass) && (currentClass != targetClass)
private def shouldMakePublic(sym: Symbol): Boolean =
sym.hasFlag(PRIVATE | PROTECTED) && (addressFields || !nme.isLocalName(sym.name))
/** All private members that are referenced are made protected,
* in order to be accessible from specialized subclasses.
*/
override def transform(tree: Tree): Tree = tree match {
case Select(qual, name) =>
val sym = tree.symbol
if (sym.isPrivate) debuglog(
"seeing private member %s, currentClass: %s, owner: %s, isAccessible: %b, isLocalName: %b".format(
sym, currentClass, sym.owner.enclClass, isAccessible(sym), nme.isLocalName(sym.name))
)
if (shouldMakePublic(sym) && !isAccessible(sym)) {
debuglog(s"changing private flag of $sym")
sym.makeNotPrivate(sym.owner)
}
super.transform(tree)
case _ =>
super.transform(tree)
}
}
/** Return the generic class corresponding to this specialized class. */
def originalClass(clazz: Symbol): Symbol =
if (clazz.isSpecialized) {
val (originalName, _, _) = nme.splitSpecializedName(clazz.name)
clazz.owner.info.decl(originalName).suchThat(_.isClass)
} else NoSymbol
def illegalSpecializedInheritance(clazz: Symbol): Boolean = (
clazz.isSpecialized
&& originalClass(clazz).parentSymbols.exists(p => hasSpecializedParams(p) && !p.isTrait)
)
def specializeCalls(unit: CompilationUnit) = new TypingTransformer(unit) {
/** Map a specializable method to its rhs, when not deferred. */
val body = perRunCaches.newMap[Symbol, Tree]()
/** Map a specializable method to its value parameter symbols. */
val parameters = perRunCaches.newMap[Symbol, List[Symbol]]()
/** Collect method bodies that are concrete specialized methods.
*/
class CollectMethodBodies extends Traverser {
override def traverse(tree: Tree) = tree match {
case DefDef(_, _, _, vparams :: Nil, _, rhs) =>
if (concreteSpecMethods(tree.symbol) || tree.symbol.isConstructor) {
// debuglog("!!! adding body of a defdef %s, symbol %s: %s".format(tree, tree.symbol, rhs))
body(tree.symbol) = rhs
// body(tree.symbol) = tree // whole method
parameters(tree.symbol) = vparams.map(_.symbol)
concreteSpecMethods -= tree.symbol
} // no need to descend further down inside method bodies
case ValDef(mods, name, tpt, rhs) if concreteSpecMethods(tree.symbol) =>
body(tree.symbol) = rhs
// log("!!! adding body of a valdef " + tree.symbol + ": " + rhs)
//super.traverse(tree)
case _ =>
super.traverse(tree)
}
}
def doesConform(origSymbol: Symbol, treeType: Type, memberType: Type, env: TypeEnv) = {
(treeType =:= memberType) || { // anyref specialization
memberType match {
case PolyType(_, resTpe) =>
debuglog(s"Conformance for anyref - polytype with result type: $resTpe and $treeType\\nOrig. sym.: $origSymbol")
try {
val e = unify(origSymbol.tpe, memberType, emptyEnv, true)
debuglog(s"obtained env: $e")
e.keySet == env.keySet
} catch {
case _: Throwable =>
debuglog("Could not unify.")
false
}
case _ => false
}
}
}
def reportError[T](body: =>T)(handler: TypeError => T): T =
try body
catch {
case te: TypeError =>
reporter.error(te.pos, te.msg)
handler(te)
}
override def transform(tree: Tree): Tree =
reportError { transform1(tree) } {_ => tree}
def transform1(tree: Tree) = {
val symbol = tree.symbol
/* The specialized symbol of 'tree.symbol' for tree.tpe, if there is one */
def specSym(qual: Tree): Symbol = {
val env = unify(symbol.tpe, tree.tpe, emptyEnv, false)
def isMatch(member: Symbol) = {
val memberType = qual.tpe memberType member
val residualTreeType = tree match {
case TypeApply(fun, targs) if fun.symbol == symbol =>
// SI-6308 Handle methods with only some type parameters specialized.
// drop the specialized type parameters from the PolyType, and
// substitute in the type environment.
val GenPolyType(tparams, tpe) = fun.tpe
val (from, to) = env.toList.unzip
val residualTParams = tparams.filterNot(env.contains)
GenPolyType(residualTParams, tpe).substituteTypes(from, to)
case _ => tree.tpe
}
(
doesConform(symbol, residualTreeType, memberType, env)
&& TypeEnv.includes(typeEnv(member), env)
)
}
if (env.isEmpty) NoSymbol
else qual.tpe member specializedName(symbol, env) suchThat isMatch
}
def matchingSymbolInPrefix(pre: Type, member: Symbol, env: TypeEnv): Symbol = {
pre member specializedName(member, env) suchThat (_.tpe matches subst(env, member.tpe))
}
def transformSelect(sel: Select) = {
val Select(qual, name) = sel
debuglog(s"specializing Select(sym=${symbol.defString}, tree.tpe=${tree.tpe})")
val qual1 = transform(qual)
def copySelect = treeCopy.Select(tree, qual1, name)
def newSelect(member: Symbol) = atPos(tree.pos)(Select(qual1, member))
def typedOp(member: Symbol) = localTyper typedOperator newSelect(member)
def typedTree(member: Symbol) = localTyper typed newSelect(member)
val ignoreEnv = specializedTypeVars(symbol.info).isEmpty || name == nme.CONSTRUCTOR
if (ignoreEnv) overloads(symbol) find (_ matchesSym symbol) match {
case Some(Overload(member, _)) => typedOp(member)
case _ => copySelect
}
else {
val env = unify(symbol.tpe, tree.tpe, emptyEnv, false)
overloads(symbol) find (_ matchesEnv env) match {
case Some(Overload(member, _)) => typedOp(member)
case _ =>
matchingSymbolInPrefix(qual1.tpe, symbol, env) match {
case NoSymbol => copySelect
case member if member.isMethod => typedOp(member)
case member => typedTree(member)
}
}
}
}
/** Computes residual type parameters after rewiring, like "String" in the following example:
* ```
* def specMe[@specialized T, U](t: T, u: U) = ???
* specMe[Int, String](1, "2") => specMe$mIc$sp[String](1, "2")
* ```
*/
def computeResidualTypeVars(baseTree: Tree, specMember: Symbol, specTree: Tree, baseTargs: List[Tree], env: TypeEnv): Tree = {
val residualTargs = symbol.info.typeParams zip baseTargs collect {
case (tvar, targ) if !env.contains(tvar) || !isPrimitiveValueClass(env(tvar).typeSymbol) => targ
}
ifDebug(assert(residualTargs.length == specMember.info.typeParams.length,
"residual: %s, tparams: %s, env: %s".format(residualTargs, specMember.info.typeParams, env))
)
val tree1 = gen.mkTypeApply(specTree, residualTargs)
debuglog(s"rewrote $tree to $tree1")
localTyper.typedOperator(atPos(tree.pos)(tree1)) // being polymorphic, it must be a method
}
curTree = tree
tree match {
case Apply(Select(New(tpt), nme.CONSTRUCTOR), args) =>
def transformNew = {
debuglog(s"Attempting to specialize new $tpt(${args.mkString(", ")})")
val found = specializedType(tpt.tpe)
if (found.typeSymbol ne tpt.tpe.typeSymbol) { // the ctor can be specialized
val inst = New(found, transformTrees(args): _*)
reportError(localTyper.typedPos(tree.pos)(inst))(_ => super.transform(tree))
}
else
super.transform(tree)
}
transformNew
case Apply(sel @ Select(sup @ Super(qual, name), name1), args) if hasNewParents(sup) =>
def transformSuperApply = {
val sup1 = Super(qual, name) setPos sup.pos
val tree1 = Apply(Select(sup1, name1) setPos sel.pos, transformTrees(args))
val res = localTyper.typedPos(tree.pos)(tree1)
debuglog(s"retyping call to super, from: $symbol to ${res.symbol}")
res
}
transformSuperApply
// This rewires calls to specialized methods defined in a class (which have a receiver)
// class C {
// def foo[@specialized T](t: T): T = t
// C.this.foo(3) // TypeApply(Select(This(C), foo), List(Int)) => C.this.foo$mIc$sp(3)
// }
case TypeApply(sel @ Select(qual, name), targs)
if (specializedTypeVars(symbol.info).nonEmpty && name != nme.CONSTRUCTOR) =>
debuglog("checking typeapp for rerouting: " + tree + " with sym.tpe: " + symbol.tpe + " tree.tpe: " + tree.tpe)
val qual1 = transform(qual)
log(">>> TypeApply: " + tree + ", qual1: " + qual1)
specSym(qual1) match {
case NoSymbol =>
// See pos/exponential-spec.scala - can't call transform on the whole tree again.
treeCopy.TypeApply(tree, treeCopy.Select(sel, qual1, name), transformTrees(targs))
case specMember =>
debuglog("found " + specMember.fullName)
ifDebug(assert(symbol.info.typeParams.length == targs.length, symbol.info.typeParams + " / " + targs))
val env = typeEnv(specMember)
computeResidualTypeVars(tree, specMember, gen.mkAttributedSelect(qual1, specMember), targs, env)
}
// This rewires calls to specialized methods defined in the local scope. For example:
// def outerMethod = {
// def foo[@specialized T](t: T): T = t
// foo(3) // TypeApply(Ident(foo), List(Int)) => foo$mIc$sp(3)
// }
case TypeApply(sel @ Ident(name), targs) if name != nme.CONSTRUCTOR =>
val env = unify(symbol.tpe, tree.tpe, emptyEnv, false)
if (env.isEmpty) super.transform(tree)
else {
overloads(symbol) find (_ matchesEnv env) match {
case Some(Overload(specMember, _)) => computeResidualTypeVars(tree, specMember, Ident(specMember), targs, env)
case _ => super.transform(tree)
}
}
case Select(Super(_, _), _) if illegalSpecializedInheritance(currentClass) =>
val pos = tree.pos
debuglog(pos.source.file.name+":"+pos.line+": not specializing call to super inside illegal specialized inheritance class.\\n" + pos.lineContent)
tree
case sel @ Select(_, _) =>
transformSelect(sel)
case PackageDef(pid, stats) =>
tree.symbol.info // make sure specializations have been performed
atOwner(tree, symbol) {
val specMembers = implSpecClasses(stats) map localTyper.typed
treeCopy.PackageDef(tree, pid, transformStats(stats ::: specMembers, symbol.moduleClass))
}
case Template(parents, self, body) =>
def transformTemplate = {
val specMembers = makeSpecializedMembers(tree.symbol.enclClass) ::: (implSpecClasses(body) map localTyper.typed)
if (!symbol.isPackageClass)
(new CollectMethodBodies)(tree)
val parents1 = map2(currentOwner.info.parents, parents)((tpe, parent) =>
TypeTree(tpe) setPos parent.pos)
treeCopy.Template(tree,
parents1 /*currentOwner.info.parents.map(tpe => TypeTree(tpe) setPos parents.head.pos)*/ ,
self,
atOwner(currentOwner)(transformTrees(body ::: specMembers)))
}
transformTemplate
case ddef @ DefDef(_, _, _, vparamss, _, _) if info.isDefinedAt(symbol) =>
def transformDefDef = {
if (symbol.isConstructor) {
val t = atOwner(symbol)(forwardCtorCall(tree.pos, gen.mkSuperInitCall, vparamss, symbol.owner))
if (symbol.isPrimaryConstructor)
localTyper.typedPos(symbol.pos)(deriveDefDef(tree)(_ => Block(List(t), Literal(Constant(())))))
else // duplicate the original constructor
reportError(duplicateBody(ddef, info(symbol).target))(_ => ddef)
}
else info(symbol) match {
case Implementation(target) =>
assert(body.isDefinedAt(target), "sym: " + symbol.fullName + " target: " + target.fullName)
// we have an rhs, specialize it
val tree1 = reportError(duplicateBody(ddef, target))(_ => ddef)
debuglog("implementation: " + tree1)
deriveDefDef(tree1)(transform)
case NormalizedMember(target) =>
logResult("constraints")(satisfiabilityConstraints(typeEnv(symbol))) match {
case Some(constraint) if !target.isDeferred =>
// we have an rhs, specialize it
val tree1 = reportError(duplicateBody(ddef, target, constraint))(_ => ddef)
debuglog("implementation: " + tree1)
deriveDefDef(tree1)(transform)
case _ =>
deriveDefDef(tree)(_ => localTyper typed gen.mkSysErrorCall("Fatal error in code generation: this should never be called."))
}
case SpecialOverride(target) =>
assert(body.isDefinedAt(target), "sym: " + symbol.fullName + " target: " + target.fullName)
//debuglog("moving implementation, body of target " + target + ": " + body(target))
log("%s is param accessor? %b".format(ddef.symbol, ddef.symbol.isParamAccessor))
// we have an rhs, specialize it
val tree1 = addBody(ddef, target)
(new ChangeOwnerTraverser(target, tree1.symbol))(tree1.rhs)
debuglog("changed owners, now: " + tree1)
deriveDefDef(tree1)(transform)
case SpecialOverload(original, env) =>
debuglog("completing specialized " + symbol.fullName + " calling " + original)
debuglog("special overload " + original + " -> " + env)
val t = DefDef(symbol, { vparamss: List[List[Symbol]] =>
val fun = Apply(Select(This(symbol.owner), original),
makeArguments(original, vparamss.head))
debuglog("inside defdef: " + symbol + "; type: " + symbol.tpe + "; owner: " + symbol.owner)
gen.maybeMkAsInstanceOf(fun,
symbol.owner.thisType.memberType(symbol).finalResultType,
symbol.owner.thisType.memberType(original).finalResultType)
})
debuglog("created special overload tree " + t)
debuglog("created " + t)
reportError {
localTyper.typed(t)
} {
_ => super.transform(tree)
}
case fwd @ Forward(_) =>
debuglog("forward: " + fwd + ", " + ddef)
val rhs1 = forwardCall(tree.pos, gen.mkAttributedRef(symbol.owner.thisType, fwd.target), vparamss)
debuglog("-->d completed forwarder to specialized overload: " + fwd.target + ": " + rhs1)
reportError {
localTyper.typed(deriveDefDef(tree)(_ => rhs1))
} {
_ => super.transform(tree)
}
case SpecializedAccessor(target) =>
val rhs1 = if (symbol.isGetter)
gen.mkAttributedRef(target)
else
Assign(gen.mkAttributedRef(target), Ident(vparamss.head.head.symbol))
debuglog("specialized accessor: " + target + " -> " + rhs1)
localTyper.typed(deriveDefDef(tree)(_ => rhs1))
case Abstract(targ) =>
debuglog("abstract: " + targ)
localTyper.typed(deriveDefDef(tree)(rhs => rhs))
case SpecialSuperAccessor(targ) =>
debuglog("special super accessor: " + targ + " for " + tree)
localTyper.typed(deriveDefDef(tree)(rhs => rhs))
}
}
expandInnerNormalizedMembers(transformDefDef)
case ddef @ DefDef(_, _, _, _, _, _) =>
val tree1 = expandInnerNormalizedMembers(tree)
super.transform(tree1)
case ValDef(_, _, _, _) if symbol.hasFlag(SPECIALIZED) && !symbol.isParamAccessor =>
def transformValDef = {
assert(body.isDefinedAt(symbol.alias), body)
val tree1 = deriveValDef(tree)(_ => body(symbol.alias).duplicate)
debuglog("now typing: " + tree1 + " in " + tree.symbol.owner.fullName)
val d = new SpecializationDuplicator(emptyEnv)
val newValDef = d.retyped(
localTyper.context1.asInstanceOf[d.Context],
tree1,
symbol.alias.enclClass,
symbol.enclClass,
typeEnv(symbol.alias) ++ typeEnv(tree.symbol)
)
deriveValDef(newValDef)(transform)
}
transformValDef
case _ =>
super.transform(tree)
}
}
/**
* This performs method specialization inside a scope other than a {class, trait, object}: could be another method
* or a value. This specialization is much simpler, since there is no need to record the new members in the class
* signature, their signatures are only visible locally. It works according to the usual logic:
* - we use normalizeMember to create the specialized symbols
* - we leave DefDef stubs in the tree that are later filled in by tree duplication and adaptation
* @see duplicateBody
*/
private def expandInnerNormalizedMembers(tree: Tree) = tree match {
case ddef @ DefDef(_, _, _, vparams :: Nil, _, rhs)
if ddef.symbol.owner.isMethod &&
specializedTypeVars(ddef.symbol.info).nonEmpty &&
!ddef.symbol.hasFlag(SPECIALIZED) =>
val sym = ddef.symbol
val owner = sym.owner
val norm = normalizeMember(owner, sym, emptyEnv)
if (norm.length > 1) {
// record the body for duplication
body(sym) = rhs
parameters(sym) = vparams.map(_.symbol)
// to avoid revisiting the member, we can set the SPECIALIZED
// flag. nobody has to see this anyway :)
sym.setFlag(SPECIALIZED)
// create empty bodies for specializations
localTyper.typed(Block(norm.tail.map(sym => DefDef(sym, { vparamss: List[List[Symbol]] => EmptyTree })), ddef))
} else
tree
case _ =>
tree
}
/** Duplicate the body of the given method `tree` to the new symbol `source`.
*
* Knowing that the method can be invoked only in the `castmap` type environment,
* this method will insert casts for all the expressions of types mappend in the
* `castmap`.
*/
private def duplicateBody(tree: DefDef, source: Symbol, castmap: TypeEnv = emptyEnv) = {
val symbol = tree.symbol
val meth = addBody(tree, source)
val d = new SpecializationDuplicator(castmap)
debuglog("-->d DUPLICATING: " + meth)
d.retyped(
localTyper.context1.asInstanceOf[d.Context],
meth,
source.enclClass,
symbol.enclClass,
typeEnv(source) ++ typeEnv(symbol)
)
}
/** Put the body of 'source' as the right hand side of the method 'tree'.
* The destination method gets fresh symbols for type and value parameters,
* and the body is updated to the new symbols, and owners adjusted accordingly.
* However, if the same source tree is used in more than one place, full re-typing
* is necessary. @see method duplicateBody
*/
private def addBody(tree: DefDef, source: Symbol): DefDef = {
val symbol = tree.symbol
debuglog("specializing body of" + symbol.defString)
val DefDef(_, _, tparams, vparams :: Nil, tpt, _) = tree
val env = typeEnv(symbol)
val boundTvars = env.keySet
val origtparams = source.typeParams.filter(tparam => !boundTvars(tparam) || !isPrimitiveValueType(env(tparam)))
if (origtparams.nonEmpty || symbol.typeParams.nonEmpty)
debuglog("substituting " + origtparams + " for " + symbol.typeParams)
// skolemize type parameters
val oldtparams = tparams map (_.symbol)
val newtparams = deriveFreshSkolems(oldtparams)
map2(tparams, newtparams)(_ setSymbol _)
// create fresh symbols for value parameters to hold the skolem types
val newSyms = cloneSymbolsAtOwnerAndModify(vparams map (_.symbol), symbol, _.substSym(oldtparams, newtparams))
// replace value and type parameters of the old method with the new ones
// log("Adding body for " + tree.symbol + " - origtparams: " + origtparams + "; tparams: " + tparams)
// log("Type vars of: " + source + ": " + source.typeParams)
// log("Type env of: " + tree.symbol + ": " + boundTvars)
// log("newtparams: " + newtparams)
val symSubstituter = new ImplementationAdapter(
parameters(source) ::: origtparams,
newSyms ::: newtparams,
source.enclClass,
false) // don't make private fields public
val newBody = symSubstituter(body(source).duplicate)
tpt modifyType (_.substSym(oldtparams, newtparams))
copyDefDef(tree)(vparamss = List(newSyms map ValDef.apply), rhs = newBody)
}
/** Create trees for specialized members of 'sClass', based on the
* symbols that are already there.
*/
private def makeSpecializedMembers(sClass: Symbol): List[Tree] = {
// add special overrides first
// if (!specializedClass.hasFlag(SPECIALIZED))
// for (m <- specialOverrides(specializedClass)) specializedClass.info.decls.enter(m)
val mbrs = new mutable.ListBuffer[Tree]
var hasSpecializedFields = false
for (m <- sClass.info.decls
if m.hasFlag(SPECIALIZED)
&& (m.sourceFile ne null)
&& satisfiable(typeEnv(m), !sClass.hasFlag(SPECIALIZED))) {
debuglog("creating tree for " + m.fullName)
if (m.isMethod) {
if (info(m).target.hasAccessorFlag) hasSpecializedFields = true
if (m.isClassConstructor) {
val origParams = parameters(info(m).target)
val vparams = (
map2(m.info.paramTypes, origParams)((tp, sym) =>
m.newValue(specializedName(sym, typeEnv(sClass)), sym.pos, sym.flags) setInfo tp
)
)
// param accessors for private members (the others are inherited from the generic class)
if (m.isPrimaryConstructor) {
for (param <- vparams ; if sClass.info.nonPrivateMember(param.name) == NoSymbol) {
val acc = param.cloneSymbol(sClass, param.flags | PARAMACCESSOR | PRIVATE)
sClass.info.decls.enter(acc)
mbrs += ValDef(acc, EmptyTree).setType(NoType).setPos(m.pos)
}
}
// ctor
mbrs += DefDef(m, Modifiers(m.flags), mmap(List(vparams))(ValDef.apply), EmptyTree)
} else {
mbrs += DefDef(m, { paramss: List[List[Symbol]] => EmptyTree })
}
} else if (m.isValue) {
mbrs += ValDef(m).setType(NoType)
} else if (m.isClass) {
// mbrs +=
// ClassDef(m, Template(m.info.parents map TypeTree, noSelfType, List())
// .setSymbol(m.newLocalDummy(m.pos)))
// log("created synthetic class: " + m.fullName)
}
}
if (hasSpecializedFields) {
val isSpecializedInstance = sClass :: sClass.parentSymbols exists (_ hasFlag SPECIALIZED)
val sym = sClass.newMethod(nme.SPECIALIZED_INSTANCE, sClass.pos) setInfoAndEnter MethodType(Nil, BooleanTpe)
mbrs += DefDef(sym, Literal(Constant(isSpecializedInstance)).setType(BooleanTpe)).setType(NoType)
}
mbrs.toList
}
/** Create specialized class definitions */
def implSpecClasses(trees: List[Tree]): List[Tree] = {
trees flatMap {
case tree @ ClassDef(_, _, _, impl) =>
tree.symbol.info // force specialization
for (((sym1, env), specCls) <- specializedClass if sym1 == tree.symbol) yield {
debuglog("created synthetic class: " + specCls + " of " + sym1 + " in " + pp(env))
val parents = specCls.info.parents.map(TypeTree)
ClassDef(specCls, atPos(impl.pos)(Template(parents, noSelfType, List()))
.setSymbol(specCls.newLocalDummy(sym1.pos))) setPos tree.pos
}
case _ => Nil
} sortBy (_.name.decoded)
}
}
private def forwardCall(pos: scala.reflect.internal.util.Position, receiver: Tree, paramss: List[List[ValDef]]): Tree = {
val argss = mmap(paramss)(x => Ident(x.symbol))
atPos(pos) { (receiver /: argss) (Apply.apply) }
}
/** Forward to the generic class constructor. If the current class initializes
* specialized fields corresponding to parameters, it passes null to the superclass
* constructor.
*
* For example:
* {{{
* case class Tuple2[T, U](x: T, y: U)
*
* class Tuple2$II {
* val _x$I: Int = ..
* def x = _x$I
* // same for y
* def this(x: Int, y: Int) {
* super.this(null.asInstanceOf[Int], null.asInstanceOf[Int])
* }
* }
* }}}
*
* Note that erasure first transforms `null.asInstanceOf[Int]` to `unbox(null)`, which is 0.
* Then it adapts the argument `unbox(null)` of type Int to the erased parameter type of Tuple2,
* which is Object, so it inserts a `box` call and we get `box(unbox(null))`, which is
* `new Integer(0)` (not `null`).
*
* However it does not make sense to create an Integer instance to be stored in the generic field
* of the superclass: that field is never used. Therefore we mark the `null` tree with the
* [[SpecializedSuperConstructorCallArgument]] attachment and special-case erasure to replace
* `box(unbox(null))` by `null` in this case.
*/
private def forwardCtorCall(pos: scala.reflect.internal.util.Position, receiver: Tree, paramss: List[List[ValDef]], clazz: Symbol): Tree = {
log(s"forwardCtorCall($pos, $receiver, $paramss, $clazz)")
/* A constructor parameter `f` initializes a specialized field
* iff:
* - it is specialized itself
* - there is a getter for the original (non-specialized) field in the same class
* - there is a getter for the specialized field in the same class
*/
def initializesSpecializedField(f: Symbol) = (
(f.name endsWith nme.SPECIALIZED_SUFFIX)
&& clazz.info.member(f.unexpandedName).isPublic
&& clazz.info.decl(f.name).suchThat(_.isGetter) != NoSymbol
)
val argss = mmap(paramss)(x =>
if (initializesSpecializedField(x.symbol))
gen.mkAsInstanceOf(Literal(Constant(null)).updateAttachment(SpecializedSuperConstructorCallArgument), x.symbol.tpe)
else
Ident(x.symbol)
)
atPos(pos) { (receiver /: argss) (Apply.apply) }
}
/** Add method m to the set of symbols for which we need an implementation tree
* in the tree transformer.
*
* @note This field is part of the specializeTypes subcomponent, so any symbols
* that here are not garbage collected at the end of a compiler run!
*/
def addConcreteSpecMethod(m: Symbol) {
if (currentRun.compiles(m)) concreteSpecMethods += m
}
private def makeArguments(fun: Symbol, vparams: List[Symbol]): List[Tree] = (
//! TODO: make sure the param types are seen from the right prefix
map2(fun.info.paramTypes, vparams)((tp, arg) => gen.maybeMkAsInstanceOf(Ident(arg), tp, arg.tpe))
)
class SpecializationTransformer(unit: CompilationUnit) extends Transformer {
informProgress("specializing " + unit)
override def transform(tree: Tree) = {
val resultTree = if (settings.nospecialization) tree
else exitingSpecialize(specializeCalls(unit).transform(tree))
// Remove the final modifier and @inline annotation from anything in the
// original class (since it's being overridden in at least onesubclass).
//
// We do this here so that the specialized subclasses will correctly copy
// final and @inline.
info.foreach {
case (sym, SpecialOverload(target, _)) => {
sym.resetFlag(FINAL)
target.resetFlag(FINAL)
sym.removeAnnotation(ScalaInlineClass)
target.removeAnnotation(ScalaInlineClass)
}
case _ => {}
}
resultTree
}
}
object SpecializedSuperConstructorCallArgument
}
| slothspot/scala | src/compiler/scala/tools/nsc/transform/SpecializeTypes.scala | Scala | bsd-3-clause | 90,053 |
package org.gedanken.farley.parser.modules
/**
*
* parser/module/Module.scala
*
* Copyright 2013, 2015 Logan O'Sullivan Bruns
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import akka.actor.ActorRef
import scala.util.matching.Regex
trait Module {
case class Rule(regexes: List[Regex], process: (Regex.Match, ModuleContext) => String)
val rules : List[Rule]
def evaluate(parse: String, context: ModuleContext) : String = {
for (rule <- rules) {
for (regex <- rule.regexes) {
val response = regex findFirstMatchIn parse match {
case Some(m) => rule.process(m, context)
case None => null
}
if (response != null)
return response
}
}
return null
}
def evaluate(parses: Array[String], context: ModuleContext) : String = {
for (parse <- parses) {
val response = evaluate(parse, context)
if (response != null)
return response
}
return null
}
}
| loganbruns/farley | parser/src/main/scala/org/gedanken/farley/parser/modules/Module.scala | Scala | apache-2.0 | 1,460 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.orc
import java.io.File
import org.apache.spark.sql.{AnalysisException, Row}
import org.apache.spark.sql.TestingUDT.{IntervalData, IntervalUDT}
import org.apache.spark.sql.execution.datasources.orc.OrcSuite
import org.apache.spark.sql.hive.HiveUtils
import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
class HiveOrcSourceSuite extends OrcSuite with TestHiveSingleton {
override val orcImp: String = "hive"
override def beforeAll(): Unit = {
super.beforeAll()
sql(
s"""CREATE EXTERNAL TABLE normal_orc(
| intField INT,
| stringField STRING
|)
|STORED AS ORC
|LOCATION '${orcTableAsDir.toURI}'
""".stripMargin)
sql(
s"""INSERT INTO TABLE normal_orc
|SELECT intField, stringField FROM orc_temp_table
""".stripMargin)
spark.sql(
s"""CREATE TEMPORARY VIEW normal_orc_source
|USING org.apache.spark.sql.hive.orc
|OPTIONS (
| PATH '${new File(orcTableAsDir.getAbsolutePath).toURI}'
|)
""".stripMargin)
spark.sql(
s"""CREATE TEMPORARY VIEW normal_orc_as_source
|USING org.apache.spark.sql.hive.orc
|OPTIONS (
| PATH '${new File(orcTableAsDir.getAbsolutePath).toURI}'
|)
""".stripMargin)
}
test("SPARK-19459/SPARK-18220: read char/varchar column written by Hive") {
val location = Utils.createTempDir()
val uri = location.toURI
try {
hiveClient.runSqlHive("USE default")
hiveClient.runSqlHive(
"""
|CREATE EXTERNAL TABLE hive_orc(
| a STRING,
| b CHAR(10),
| c VARCHAR(10),
| d ARRAY<CHAR(3)>)
|STORED AS orc""".stripMargin)
// Hive throws an exception if I assign the location in the create table statement.
hiveClient.runSqlHive(
s"ALTER TABLE hive_orc SET LOCATION '$uri'")
hiveClient.runSqlHive(
"""
|INSERT INTO TABLE hive_orc
|SELECT 'a', 'b', 'c', ARRAY(CAST('d' AS CHAR(3)))
|FROM (SELECT 1) t""".stripMargin)
// We create a different table in Spark using the same schema which points to
// the same location.
spark.sql(
s"""
|CREATE EXTERNAL TABLE spark_orc(
| a STRING,
| b CHAR(10),
| c VARCHAR(10),
| d ARRAY<CHAR(3)>)
|STORED AS orc
|LOCATION '$uri'""".stripMargin)
val result = Row("a", "b ", "c", Seq("d "))
checkAnswer(spark.table("hive_orc"), result)
checkAnswer(spark.table("spark_orc"), result)
} finally {
hiveClient.runSqlHive("DROP TABLE IF EXISTS hive_orc")
hiveClient.runSqlHive("DROP TABLE IF EXISTS spark_orc")
Utils.deleteRecursively(location)
}
}
test("SPARK-24204 error handling for unsupported data types") {
withTempDir { dir =>
val orcDir = new File(dir, "orc").getCanonicalPath
// write path
var msg = intercept[AnalysisException] {
sql("select interval 1 days").write.mode("overwrite").orc(orcDir)
}.getMessage
assert(msg.contains("Cannot save interval data type into external storage."))
msg = intercept[AnalysisException] {
sql("select null").write.mode("overwrite").orc(orcDir)
}.getMessage
assert(msg.contains("ORC data source does not support null data type."))
msg = intercept[AnalysisException] {
spark.udf.register("testType", () => new IntervalData())
sql("select testType()").write.mode("overwrite").orc(orcDir)
}.getMessage
assert(msg.contains("ORC data source does not support calendarinterval data type."))
// read path
msg = intercept[AnalysisException] {
val schema = StructType(StructField("a", CalendarIntervalType, true) :: Nil)
spark.range(1).write.mode("overwrite").orc(orcDir)
spark.read.schema(schema).orc(orcDir).collect()
}.getMessage
assert(msg.contains("ORC data source does not support calendarinterval data type."))
msg = intercept[AnalysisException] {
val schema = StructType(StructField("a", new IntervalUDT(), true) :: Nil)
spark.range(1).write.mode("overwrite").orc(orcDir)
spark.read.schema(schema).orc(orcDir).collect()
}.getMessage
assert(msg.contains("ORC data source does not support calendarinterval data type."))
}
}
test("Check BloomFilter creation") {
Seq(true, false).foreach { convertMetastore =>
withSQLConf(HiveUtils.CONVERT_METASTORE_ORC.key -> s"$convertMetastore") {
if (HiveUtils.isHive23) {
testBloomFilterCreation(org.apache.orc.OrcProto.Stream.Kind.BLOOM_FILTER_UTF8)
} else {
// Before ORC-101
testBloomFilterCreation(org.apache.orc.OrcProto.Stream.Kind.BLOOM_FILTER)
}
}
}
}
test("Enforce direct encoding column-wise selectively") {
Seq(true, false).foreach { convertMetastore =>
withSQLConf(HiveUtils.CONVERT_METASTORE_ORC.key -> s"$convertMetastore") {
testSelectiveDictionaryEncoding(isSelective = false, isHive23 = HiveUtils.isHive23)
}
}
}
}
| aosagie/spark | sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/HiveOrcSourceSuite.scala | Scala | apache-2.0 | 6,098 |
/*
* Feather-Crest is a library that simplifies interaction with the EVE Online API.
* Copyright (C) 2016 Calavoow
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package feather.crest.api
import com.typesafe.scalalogging.LazyLogging
import feather.crest.models._
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{FlatSpec, Matchers}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.async.Async.{async,await}
class MarketSpec extends FlatSpec with Matchers with ScalaFutures with LazyLogging {
import Authentication.auth
implicit override val patienceConfig = PatienceConfig(timeout = 10 seconds)
"marketTypes" should "get market data for itemtype Hammerhead II" in {
val ham2Orders : Future[(MarketOrders, MarketOrders)] = async {
// Note: no Futures! But neither will the outer thread block.
val aRoot: Root = await(Root.public())
val regions: Regions = await(aRoot.regions.follow())
// Note that I use {{.get}} here, which could throw an exception,
// but simplifies this example.
val forge: Region = await(regions.items.find(_.name == "The Forge").get
.follow())
/**
* From the type of [[Region.marketSellLink]]
* we see that we need an CrestLink[ItemType],
* so lets handle that in _parallel_.
*
* Async-await will automatically find independent asynchronous requests,
* and run them in parallel.
*
* Note: I know that the item is on the first page of itemtypes,
* this saves me a little time and simplifies things.
**/
val itemTypes = await(aRoot.itemTypes.construct().head)
// Then we find the respective item in the list.
val ham2Link = itemTypes.items.find(_.name == "Hammerhead II").get
// Now we put everything together and get the buy and sell orders.
val ham2Buy : MarketOrders = await(forge.marketBuyLink(ham2Link)
.follow())
val ham2Sell : MarketOrders = await(forge.marketSellLink(ham2Link)
.follow())
(ham2Buy, ham2Sell)
}
def invariantOrder(item : MarketOrders.Item): Unit = {
item.volume should be > 0L
item.price should be > 0D
item.minVolume should be > 0L
item.`type`.name should equal("Hammerhead II")
}
whenReady(ham2Orders) {
case (ham2Buy, ham2Sell) =>
ham2Buy.items should not be empty // There are always buy order in Jita
ham2Buy.items.foreach { buyItem =>
buyItem.buy should be (true)
invariantOrder(buyItem)
}
ham2Sell.items.foreach { sellItem =>
sellItem.buy should be (false)
invariantOrder(sellItem)
}
}
}
it should "get market history for Hammerhead II in Domain" in {
implicit val patienceConfig = PatienceConfig(timeout = 5 seconds)
val marketHistory = for(
r <- Root.authed(); // Need auth for history.
itemLink <- r.itemTypes.follow(auth).map(_.items.find(_.name == "Hammerhead II").get);
regionLink <- r.regions.follow(auth).map(_.items.find(_.name == "Domain").get);
history <- MarketHistory.fetch(regionLink, itemLink)(auth)
) yield {
history
}
whenReady(marketHistory) { history =>
history.isDefined should equal(true)
val h = history.get
h.items.size should be > 100 // There should be many history items.
h.totalCount should equal(h.items.size)
}
}
it should "get market groups" in {
val marketG = for(
r <- Root.public();
marketGroups <- r.marketGroups.follow();
marketGroup <- marketGroups.items.head.follow();
allMarketTypes <- Future.sequence(marketGroup.types.construct())
) yield (marketGroups, allMarketTypes)
whenReady(marketG) { case(marketGroups, allMarketTypes) =>
// Check if not paginated
marketGroups.items.size should equal(marketGroups.totalCount)
// Check if all market types present.
val list = allMarketTypes.map(_.items).reduce(_ ++ _)
list.size should equal(allMarketTypes.head.totalCount)
}
}
it should "get market prices" in {
val prices = for(
r <- Root.public();
marketPrices <- r.marketPrices.follow()
) yield marketPrices
whenReady(prices) { p =>
// Check if indeed non-paginated and all items are present.
p.totalCount should equal(p.items.size)
p.items.foreach { it =>
it.adjustedPrice.foreach(x => x.isNaN should be (false))
it.averagePrice.foreach(x => x.isNaN should be (false))
it.`type`.id_str should equal(it.`type`.id.toString)
}
}
}
}
| Calavoow/feather-crest | src/test/scala/feather/crest/api/MarketSpec.scala | Scala | gpl-3.0 | 5,088 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.language.experimental.macros
import scala.reflect.ClassTag
import scala.util.Random
import scala.util.control.NonFatal
import org.scalatest.{Assertions, BeforeAndAfterAll}
import org.scalatest.concurrent.{Eventually, Signaler, ThreadSignaler, TimeLimits}
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.exceptions.TestFailedDueToTimeoutException
import org.scalatest.time.Span
import org.scalatest.time.SpanSugar._
import org.apache.spark.SparkEnv
import org.apache.spark.sql.{Dataset, Encoder, QueryTest, Row}
import org.apache.spark.sql.catalyst.encoders.{encoderFor, ExpressionEncoder, RowEncoder}
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.plans.physical.AllTuples
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.connector.read.streaming.{Offset => OffsetV2, SparkDataStream}
import org.apache.spark.sql.execution.datasources.v2.StreamingDataSourceV2Relation
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.execution.streaming.continuous.{ContinuousExecution, EpochCoordinatorRef, IncrementAndGetEpoch}
import org.apache.spark.sql.execution.streaming.sources.MemorySink
import org.apache.spark.sql.execution.streaming.state.StateStore
import org.apache.spark.sql.streaming.StreamingQueryListener._
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.util.{Clock, SystemClock, Utils}
/**
* A framework for implementing tests for streaming queries and sources.
*
* A test consists of a set of steps (expressed as a `StreamAction`) that are executed in order,
* blocking as necessary to let the stream catch up. For example, the following adds some data to
* a stream, blocking until it can verify that the correct values are eventually produced.
*
* {{{
* val inputData = MemoryStream[Int]
* val mapped = inputData.toDS().map(_ + 1)
*
* testStream(mapped)(
* AddData(inputData, 1, 2, 3),
* CheckAnswer(2, 3, 4))
* }}}
*
* Note that while we do sleep to allow the other thread to progress without spinning,
* `StreamAction` checks should not depend on the amount of time spent sleeping. Instead they
* should check the actual progress of the stream before verifying the required test condition.
*
* Currently it is assumed that all streaming queries will eventually complete in 10 seconds to
* avoid hanging forever in the case of failures. However, individual suites can change this
* by overriding `streamingTimeout`.
*/
trait StreamTest extends QueryTest with SharedSparkSession with TimeLimits with BeforeAndAfterAll {
// Necessary to make ScalaTest 3.x interrupt a thread on the JVM like ScalaTest 2.2.x
implicit val defaultSignaler: Signaler = ThreadSignaler
override def afterAll(): Unit = {
try {
super.afterAll()
} finally {
StateStore.stop() // stop the state store maintenance thread and unload store providers
}
}
protected val defaultTrigger = Trigger.ProcessingTime(0)
/** How long to wait for an active stream to catch up when checking a result. */
val streamingTimeout = 60.seconds
/** A trait for actions that can be performed while testing a streaming DataFrame. */
trait StreamAction
/** A trait to mark actions that require the stream to be actively running. */
trait StreamMustBeRunning
/**
* Adds the given data to the stream. Subsequent check answers will block until this data has
* been processed.
*/
object AddData {
def apply[A](source: MemoryStreamBase[A], data: A*): AddDataMemory[A] =
AddDataMemory(source, data)
}
/**
* Adds data to multiple memory streams such that all the data will be made visible in the
* same batch. This is applicable only to MicroBatchExecution, as this coordination cannot be
* performed at the driver in ContinuousExecutions.
*/
object MultiAddData {
def apply[A]
(source1: MemoryStream[A], data1: A*)(source2: MemoryStream[A], data2: A*): StreamAction = {
apply((source1, data1), (source2, data2))
}
def apply[A](inputs: (MemoryStream[A], Seq[A])*): StreamAction = {
val actions = inputs.map { case (source, data) => AddDataMemory(source, data) }
StreamProgressLockedActions(actions, desc = actions.mkString("[ ", " | ", " ]"))
}
}
/** A trait that can be extended when testing a source. */
trait AddData extends StreamAction {
/**
* Called to adding the data to a source. It should find the source to add data to from
* the active query, and then return the source object the data was added, as well as the
* offset of added data.
*/
def addData(query: Option[StreamExecution]): (SparkDataStream, OffsetV2)
}
/** A trait that can be extended when testing a source. */
trait ExternalAction extends StreamAction {
def runAction(): Unit
}
case class AddDataMemory[A](source: MemoryStreamBase[A], data: Seq[A]) extends AddData {
override def toString: String = s"AddData to $source: ${data.mkString(",")}"
override def addData(query: Option[StreamExecution]): (SparkDataStream, OffsetV2) = {
(source, source.addData(data))
}
}
/**
* Checks to make sure that the current data stored in the sink matches the `expectedAnswer`.
* This operation automatically blocks until all added data has been processed.
*/
object CheckAnswer {
def apply[A : Encoder](data: A*): CheckAnswerRows = {
val encoder = encoderFor[A]
val toExternalRow = RowEncoder(encoder.schema).resolveAndBind()
CheckAnswerRows(
data.map(d => toExternalRow.fromRow(encoder.toRow(d))),
lastOnly = false,
isSorted = false)
}
def apply(rows: Row*): CheckAnswerRows = CheckAnswerRows(rows, false, false)
def apply(globalCheckFunction: Seq[Row] => Unit): CheckAnswerRowsByFunc =
CheckAnswerRowsByFunc(globalCheckFunction, false)
}
/**
* Checks to make sure that the current data stored in the sink matches the `expectedAnswer`.
* This operation automatically blocks until all added data has been processed.
*/
object CheckLastBatch {
def apply[A : Encoder](data: A*): CheckAnswerRows = {
apply(isSorted = false, data: _*)
}
def apply[A: Encoder](isSorted: Boolean, data: A*): CheckAnswerRows = {
val encoder = encoderFor[A]
val toExternalRow = RowEncoder(encoder.schema).resolveAndBind()
CheckAnswerRows(
data.map(d => toExternalRow.fromRow(encoder.toRow(d))),
lastOnly = true,
isSorted = isSorted)
}
def apply(rows: Row*): CheckAnswerRows = CheckAnswerRows(rows, true, false)
def apply(globalCheckFunction: Seq[Row] => Unit): CheckAnswerRowsByFunc =
CheckAnswerRowsByFunc(globalCheckFunction, true)
}
case class CheckAnswerRows(expectedAnswer: Seq[Row], lastOnly: Boolean, isSorted: Boolean)
extends StreamAction with StreamMustBeRunning {
override def toString: String = s"$operatorName: ${expectedAnswer.mkString(",")}"
private def operatorName = if (lastOnly) "CheckLastBatch" else "CheckAnswer"
}
case class CheckAnswerRowsContains(expectedAnswer: Seq[Row], lastOnly: Boolean = false)
extends StreamAction with StreamMustBeRunning {
override def toString: String = s"$operatorName: ${expectedAnswer.mkString(",")}"
private def operatorName = if (lastOnly) "CheckLastBatchContains" else "CheckAnswerContains"
}
case class CheckAnswerRowsByFunc(
globalCheckFunction: Seq[Row] => Unit,
lastOnly: Boolean) extends StreamAction with StreamMustBeRunning {
override def toString: String = if (lastOnly) "CheckLastBatchByFunc" else "CheckAnswerByFunc"
}
case class CheckNewAnswerRows(expectedAnswer: Seq[Row])
extends StreamAction with StreamMustBeRunning {
override def toString: String = s"CheckNewAnswer: ${expectedAnswer.mkString(",")}"
}
object CheckNewAnswer {
def apply(): CheckNewAnswerRows = CheckNewAnswerRows(Seq.empty)
def apply[A: Encoder](data: A, moreData: A*): CheckNewAnswerRows = {
val encoder = encoderFor[A]
val toExternalRow = RowEncoder(encoder.schema).resolveAndBind()
CheckNewAnswerRows((data +: moreData).map(d => toExternalRow.fromRow(encoder.toRow(d))))
}
def apply(rows: Row*): CheckNewAnswerRows = CheckNewAnswerRows(rows)
}
/** Stops the stream. It must currently be running. */
case object StopStream extends StreamAction with StreamMustBeRunning
/** Starts the stream, resuming if data has already been processed. It must not be running. */
case class StartStream(
trigger: Trigger = defaultTrigger,
triggerClock: Clock = new SystemClock,
additionalConfs: Map[String, String] = Map.empty,
checkpointLocation: String = null)
extends StreamAction
/** Advance the trigger clock's time manually. */
case class AdvanceManualClock(timeToAdd: Long) extends StreamAction
/**
* Signals that a failure is expected and should not kill the test.
*
* @param isFatalError if this is a fatal error. If so, the error should also be caught by
* UncaughtExceptionHandler.
* @param assertFailure a function to verify the error.
*/
case class ExpectFailure[T <: Throwable : ClassTag](
assertFailure: Throwable => Unit = _ => {},
isFatalError: Boolean = false) extends StreamAction {
val causeClass: Class[T] = implicitly[ClassTag[T]].runtimeClass.asInstanceOf[Class[T]]
override def toString(): String =
s"ExpectFailure[${causeClass.getName}, isFatalError: $isFatalError]"
}
/**
* Performs multiple actions while locking the stream from progressing.
* This is applicable only to MicroBatchExecution, as progress of ContinuousExecution
* cannot be controlled from the driver.
*/
case class StreamProgressLockedActions(actions: Seq[StreamAction], desc: String = null)
extends StreamAction {
override def toString(): String = {
if (desc != null) desc else super.toString
}
}
/** Assert that a body is true */
class Assert(condition: => Boolean, val message: String = "") extends StreamAction {
def run(): Unit = { Assertions.assert(condition) }
override def toString: String = s"Assert(<condition>, $message)"
}
object Assert {
def apply(condition: => Boolean, message: String = ""): Assert = new Assert(condition, message)
def apply(message: String)(body: => Unit): Assert = new Assert( { body; true }, message)
def apply(body: => Unit): Assert = new Assert( { body; true }, "")
}
/** Assert that a condition on the active query is true */
class AssertOnQuery(val condition: StreamExecution => Boolean, val message: String)
extends StreamAction {
override def toString: String = s"AssertOnQuery(<condition>, $message)"
}
object AssertOnQuery {
def apply(condition: StreamExecution => Boolean, message: String = ""): AssertOnQuery = {
new AssertOnQuery(condition, message)
}
def apply(message: String)(condition: StreamExecution => Boolean): AssertOnQuery = {
new AssertOnQuery(condition, message)
}
}
/** Execute arbitrary code */
object Execute {
def apply(name: String)(func: StreamExecution => Any): AssertOnQuery =
AssertOnQuery(query => { func(query); true }, name)
def apply(func: StreamExecution => Any): AssertOnQuery = apply("Execute")(func)
}
object AwaitEpoch {
def apply(epoch: Long): AssertOnQuery =
Execute {
case s: ContinuousExecution => s.awaitEpoch(epoch)
case _ => throw new IllegalStateException("microbatch cannot await epoch")
}
}
object IncrementEpoch {
def apply(): AssertOnQuery =
Execute {
case s: ContinuousExecution =>
val newEpoch = EpochCoordinatorRef.get(s.currentEpochCoordinatorId, SparkEnv.get)
.askSync[Long](IncrementAndGetEpoch)
s.awaitEpoch(newEpoch - 1)
case _ => throw new IllegalStateException("microbatch cannot increment epoch")
}
}
/**
* Executes the specified actions on the given streaming DataFrame and provides helpful
* error messages in the case of failures or incorrect answers.
*
* Note that if the stream is not explicitly started before an action that requires it to be
* running then it will be automatically started before performing any other actions.
*/
def testStream(
_stream: Dataset[_],
outputMode: OutputMode = OutputMode.Append)(actions: StreamAction*): Unit = synchronized {
import org.apache.spark.sql.streaming.util.StreamManualClock
// `synchronized` is added to prevent the user from calling multiple `testStream`s concurrently
// because this method assumes there is only one active query in its `StreamingQueryListener`
// and it may not work correctly when multiple `testStream`s run concurrently.
val stream = _stream.toDF()
val sparkSession = stream.sparkSession // use the session in DF, not the default session
var pos = 0
var currentStream: StreamExecution = null
var lastStream: StreamExecution = null
val awaiting = new mutable.HashMap[Int, OffsetV2]() // source index -> offset to wait for
val sink = new MemorySink
val resetConfValues = mutable.Map[String, Option[String]]()
val defaultCheckpointLocation =
Utils.createTempDir(namePrefix = "streaming.metadata").getCanonicalPath
var manualClockExpectedTime = -1L
@volatile
var streamThreadDeathCause: Throwable = null
// Set UncaughtExceptionHandler in `onQueryStarted` so that we can ensure catching fatal errors
// during query initialization.
val listener = new StreamingQueryListener {
override def onQueryStarted(event: QueryStartedEvent): Unit = {
// Note: this assumes there is only one query active in the `testStream` method.
Thread.currentThread.setUncaughtExceptionHandler(
(_: Thread, e: Throwable) => streamThreadDeathCause = e)
}
override def onQueryProgress(event: QueryProgressEvent): Unit = {}
override def onQueryTerminated(event: QueryTerminatedEvent): Unit = {}
}
sparkSession.streams.addListener(listener)
// If the test doesn't manually start the stream, we do it automatically at the beginning.
val startedManually =
actions.takeWhile(!_.isInstanceOf[StreamMustBeRunning]).exists(_.isInstanceOf[StartStream])
val startedTest = if (startedManually) actions else StartStream() +: actions
def testActions = actions.zipWithIndex.map {
case (a, i) =>
if ((pos == i && startedManually) || (pos == (i + 1) && !startedManually)) {
"=> " + a.toString
} else {
" " + a.toString
}
}.mkString("\n")
def currentOffsets =
if (currentStream != null) currentStream.committedOffsets.toString else "not started"
def threadState =
if (currentStream != null && currentStream.queryExecutionThread.isAlive) "alive" else "dead"
def threadStackTrace =
if (currentStream != null && currentStream.queryExecutionThread.isAlive) {
s"Thread stack trace: ${currentStream.queryExecutionThread.getStackTrace.mkString("\n")}"
} else {
""
}
def testState = {
val sinkDebugString = sink.toDebugString
s"""
|== Progress ==
|$testActions
|
|== Stream ==
|Output Mode: $outputMode
|Stream state: $currentOffsets
|Thread state: $threadState
|$threadStackTrace
|${if (streamThreadDeathCause != null) stackTraceToString(streamThreadDeathCause) else ""}
|
|== Sink ==
|$sinkDebugString
|
|
|== Plan ==
|${if (currentStream != null) currentStream.lastExecution else ""}
""".stripMargin
}
def verify(condition: => Boolean, message: String): Unit = {
if (!condition) {
failTest(message)
}
}
def eventually[T](message: String)(func: => T): T = {
try {
Eventually.eventually(Timeout(streamingTimeout)) {
func
}
} catch {
case NonFatal(e) =>
failTest(message, e)
}
}
def failTest(message: String, cause: Throwable = null) = {
// Recursively pretty print a exception with truncated stacktrace and internal cause
def exceptionToString(e: Throwable, prefix: String = ""): String = {
val base = s"$prefix${e.getMessage}" +
e.getStackTrace.take(10).mkString(s"\n$prefix", s"\n$prefix\t", "\n")
if (e.getCause != null) {
base + s"\n$prefix\tCaused by: " + exceptionToString(e.getCause, s"$prefix\t")
} else {
base
}
}
val c = Option(cause).map(exceptionToString(_))
val m = if (message != null && message.size > 0) Some(message) else None
fail(
s"""
|${(m ++ c).mkString(": ")}
|$testState
""".stripMargin)
}
var lastFetchedMemorySinkLastBatchId: Long = -1
def fetchStreamAnswer(
currentStream: StreamExecution,
lastOnly: Boolean = false,
sinceLastFetchOnly: Boolean = false) = {
verify(
!(lastOnly && sinceLastFetchOnly), "both lastOnly and sinceLastFetchOnly cannot be true")
verify(currentStream != null, "stream not running")
// Block until all data added has been processed for all the source
awaiting.foreach { case (sourceIndex, offset) =>
failAfter(streamingTimeout) {
currentStream.awaitOffset(sourceIndex, offset, streamingTimeout.toMillis)
// Make sure all processing including no-data-batches have been executed
if (!currentStream.triggerClock.isInstanceOf[StreamManualClock]) {
currentStream.processAllAvailable()
}
}
}
val lastExecution = currentStream.lastExecution
if (currentStream.isInstanceOf[MicroBatchExecution] && lastExecution != null) {
// Verify if stateful operators have correct metadata and distribution
// This can often catch hard to debug errors when developing stateful operators
lastExecution.executedPlan.collect { case s: StatefulOperator => s }.foreach { s =>
assert(s.stateInfo.map(_.numPartitions).contains(lastExecution.numStateStores))
s.requiredChildDistribution.foreach { d =>
withClue(s"$s specifies incorrect # partitions in requiredChildDistribution $d") {
assert(d.requiredNumPartitions.isDefined)
assert(d.requiredNumPartitions.get >= 1)
if (d != AllTuples) {
assert(d.requiredNumPartitions.get == s.stateInfo.get.numPartitions)
}
}
}
}
}
val rows = try {
if (sinceLastFetchOnly) {
if (sink.latestBatchId.getOrElse(-1L) < lastFetchedMemorySinkLastBatchId) {
failTest("MemorySink was probably cleared since last fetch. Use CheckAnswer instead.")
}
sink.dataSinceBatch(lastFetchedMemorySinkLastBatchId)
} else {
if (lastOnly) sink.latestBatchData else sink.allData
}
} catch {
case e: Exception =>
failTest("Exception while getting data from sink", e)
}
lastFetchedMemorySinkLastBatchId = sink.latestBatchId.getOrElse(-1L)
rows
}
def executeAction(action: StreamAction): Unit = {
logInfo(s"Processing test stream action: $action")
action match {
case StartStream(trigger, triggerClock, additionalConfs, checkpointLocation) =>
verify(currentStream == null || !currentStream.isActive, "stream already running")
verify(triggerClock.isInstanceOf[SystemClock]
|| triggerClock.isInstanceOf[StreamManualClock],
"Use either SystemClock or StreamManualClock to start the stream")
if (triggerClock.isInstanceOf[StreamManualClock]) {
manualClockExpectedTime = triggerClock.asInstanceOf[StreamManualClock].getTimeMillis()
}
val metadataRoot = Option(checkpointLocation).getOrElse(defaultCheckpointLocation)
additionalConfs.foreach(pair => {
val value =
if (sparkSession.conf.contains(pair._1)) {
Some(sparkSession.conf.get(pair._1))
} else None
resetConfValues(pair._1) = value
sparkSession.conf.set(pair._1, pair._2)
})
lastStream = currentStream
currentStream =
sparkSession
.streams
.startQuery(
None,
Some(metadataRoot),
stream,
Map(),
sink,
outputMode,
trigger = trigger,
triggerClock = triggerClock)
.asInstanceOf[StreamingQueryWrapper]
.streamingQuery
// Wait until the initialization finishes, because some tests need to use `logicalPlan`
// after starting the query.
try {
currentStream.awaitInitialization(streamingTimeout.toMillis)
currentStream match {
case s: ContinuousExecution => eventually("IncrementalExecution was not created") {
assert(s.lastExecution != null)
}
case _ =>
}
} catch {
case _: StreamingQueryException =>
// Ignore the exception. `StopStream` or `ExpectFailure` will catch it as well.
}
case AdvanceManualClock(timeToAdd) =>
verify(currentStream != null,
"can not advance manual clock when a stream is not running")
verify(currentStream.triggerClock.isInstanceOf[StreamManualClock],
s"can not advance clock of type ${currentStream.triggerClock.getClass}")
val clock = currentStream.triggerClock.asInstanceOf[StreamManualClock]
assert(manualClockExpectedTime >= 0)
// Make sure we don't advance ManualClock too early. See SPARK-16002.
eventually("StreamManualClock has not yet entered the waiting state") {
assert(clock.isStreamWaitingAt(manualClockExpectedTime))
}
clock.advance(timeToAdd)
manualClockExpectedTime += timeToAdd
verify(clock.getTimeMillis() === manualClockExpectedTime,
s"Unexpected clock time after updating: " +
s"expecting $manualClockExpectedTime, current ${clock.getTimeMillis()}")
case StopStream =>
verify(currentStream != null, "can not stop a stream that is not running")
try failAfter(streamingTimeout) {
currentStream.stop()
verify(!currentStream.queryExecutionThread.isAlive,
s"microbatch thread not stopped")
verify(!currentStream.isActive,
"query.isActive() is false even after stopping")
verify(currentStream.exception.isEmpty,
s"query.exception() is not empty after clean stop: " +
currentStream.exception.map(_.toString()).getOrElse(""))
} catch {
case _: InterruptedException =>
case e: org.scalatest.exceptions.TestFailedDueToTimeoutException =>
failTest(
"Timed out while stopping and waiting for microbatchthread to terminate.", e)
case t: Throwable =>
failTest("Error while stopping stream", t)
} finally {
lastStream = currentStream
currentStream = null
}
case ef: ExpectFailure[_] =>
verify(currentStream != null, "can not expect failure when stream is not running")
try failAfter(streamingTimeout) {
val thrownException = intercept[StreamingQueryException] {
currentStream.awaitTermination()
}
eventually("microbatch thread not stopped after termination with failure") {
assert(!currentStream.queryExecutionThread.isAlive)
}
verify(currentStream.exception === Some(thrownException),
s"incorrect exception returned by query.exception()")
val exception = currentStream.exception.get
verify(exception.cause.getClass === ef.causeClass,
"incorrect cause in exception returned by query.exception()\n" +
s"\tExpected: ${ef.causeClass}\n\tReturned: ${exception.cause.getClass}")
if (ef.isFatalError) {
// This is a fatal error, `streamThreadDeathCause` should be set to this error in
// UncaughtExceptionHandler.
verify(streamThreadDeathCause != null &&
streamThreadDeathCause.getClass === ef.causeClass,
"UncaughtExceptionHandler didn't receive the correct error\n" +
s"\tExpected: ${ef.causeClass}\n\tReturned: $streamThreadDeathCause")
streamThreadDeathCause = null
}
ef.assertFailure(exception.getCause)
} catch {
case _: InterruptedException =>
case e: org.scalatest.exceptions.TestFailedDueToTimeoutException =>
failTest("Timed out while waiting for failure", e)
case t: Throwable =>
failTest("Error while checking stream failure", t)
} finally {
lastStream = currentStream
currentStream = null
}
case a: AssertOnQuery =>
verify(currentStream != null || lastStream != null,
"cannot assert when no stream has been started")
val streamToAssert = Option(currentStream).getOrElse(lastStream)
try {
verify(a.condition(streamToAssert), s"Assert on query failed: ${a.message}")
} catch {
case NonFatal(e) =>
failTest(s"Assert on query failed: ${a.message}", e)
}
case a: Assert =>
val streamToAssert = Option(currentStream).getOrElse(lastStream)
verify({ a.run(); true }, s"Assert failed: ${a.message}")
case a: AddData =>
try {
// If the query is running with manual clock, then wait for the stream execution
// thread to start waiting for the clock to increment. This is needed so that we
// are adding data when there is no trigger that is active. This would ensure that
// the data gets deterministically added to the next batch triggered after the manual
// clock is incremented in following AdvanceManualClock. This avoid race conditions
// between the test thread and the stream execution thread in tests using manual
// clock.
if (currentStream != null &&
currentStream.triggerClock.isInstanceOf[StreamManualClock]) {
val clock = currentStream.triggerClock.asInstanceOf[StreamManualClock]
eventually("Error while synchronizing with manual clock before adding data") {
if (currentStream.isActive) {
assert(clock.isStreamWaitingAt(clock.getTimeMillis()))
}
}
if (!currentStream.isActive) {
failTest("Query terminated while synchronizing with manual clock")
}
}
// Add data
val queryToUse = Option(currentStream).orElse(Option(lastStream))
val (source, offset) = a.addData(queryToUse)
def findSourceIndex(plan: LogicalPlan): Option[Int] = {
plan
.collect {
// v1 source
case r: StreamingExecutionRelation => r.source
// v2 source
case r: StreamingDataSourceV2Relation => r.stream
// We can add data to memory stream before starting it. Then the input plan has
// not been processed by the streaming engine and contains `StreamingRelationV2`.
case r: StreamingRelationV2 if r.sourceName == "memory" =>
r.table.asInstanceOf[MemoryStreamTable].stream
}
.zipWithIndex
.find(_._1 == source)
.map(_._2)
}
// Try to find the index of the source to which data was added. Either get the index
// from the current active query or the original input logical plan.
val sourceIndex =
queryToUse.flatMap { query =>
findSourceIndex(query.logicalPlan)
}.orElse {
findSourceIndex(stream.logicalPlan)
}.orElse {
queryToUse.flatMap { q =>
findSourceIndex(q.lastExecution.logical)
}
}.getOrElse {
throw new IllegalArgumentException(
"Could not find index of the source to which data was added")
}
// Store the expected offset of added data to wait for it later
awaiting.put(sourceIndex, offset)
} catch {
case NonFatal(e) =>
failTest("Error adding data", e)
}
case e: ExternalAction =>
e.runAction()
case CheckAnswerRows(expectedAnswer, lastOnly, isSorted) =>
val sparkAnswer = fetchStreamAnswer(currentStream, lastOnly)
QueryTest.sameRows(expectedAnswer, sparkAnswer, isSorted).foreach {
error => failTest(error)
}
case CheckAnswerRowsContains(expectedAnswer, lastOnly) =>
val sparkAnswer = currentStream match {
case null => fetchStreamAnswer(lastStream, lastOnly)
case s => fetchStreamAnswer(s, lastOnly)
}
QueryTest.includesRows(expectedAnswer, sparkAnswer).foreach {
error => failTest(error)
}
case CheckAnswerRowsByFunc(globalCheckFunction, lastOnly) =>
val sparkAnswer = currentStream match {
case null => fetchStreamAnswer(lastStream, lastOnly)
case s => fetchStreamAnswer(s, lastOnly)
}
try {
globalCheckFunction(sparkAnswer)
} catch {
case e: Throwable => failTest(e.toString)
}
case CheckNewAnswerRows(expectedAnswer) =>
val sparkAnswer = fetchStreamAnswer(currentStream, sinceLastFetchOnly = true)
QueryTest.sameRows(expectedAnswer, sparkAnswer).foreach {
error => failTest(error)
}
}
}
try {
startedTest.foreach {
case StreamProgressLockedActions(actns, _) =>
// Perform actions while holding the stream from progressing
assert(currentStream != null,
s"Cannot perform stream-progress-locked actions $actns when query is not active")
assert(currentStream.isInstanceOf[MicroBatchExecution],
s"Cannot perform stream-progress-locked actions on non-microbatch queries")
currentStream.asInstanceOf[MicroBatchExecution].withProgressLocked {
actns.foreach(executeAction)
}
pos += 1
case action: StreamAction =>
executeAction(action)
pos += 1
}
if (streamThreadDeathCause != null) {
failTest("Stream Thread Died", streamThreadDeathCause)
}
} catch {
case _: InterruptedException if streamThreadDeathCause != null =>
failTest("Stream Thread Died", streamThreadDeathCause)
case e: org.scalatest.exceptions.TestFailedDueToTimeoutException =>
failTest("Timed out waiting for stream", e)
} finally {
if (currentStream != null && currentStream.queryExecutionThread.isAlive) {
currentStream.stop()
}
// Rollback prev configuration values
resetConfValues.foreach {
case (key, Some(value)) => sparkSession.conf.set(key, value)
case (key, None) => sparkSession.conf.unset(key)
}
sparkSession.streams.removeListener(listener)
}
}
/**
* Creates a stress test that randomly starts/stops/adds data/checks the result.
*
* @param ds a dataframe that executes + 1 on a stream of integers, returning the result
* @param addData an add data action that adds the given numbers to the stream, encoding them
* as needed
* @param iterations the iteration number
*/
def runStressTest(
ds: Dataset[Int],
addData: Seq[Int] => StreamAction,
iterations: Int = 100): Unit = {
runStressTest(ds, Seq.empty, (data, running) => addData(data), iterations)
}
/**
* Creates a stress test that randomly starts/stops/adds data/checks the result.
*
* @param ds a dataframe that executes + 1 on a stream of integers, returning the result
* @param prepareActions actions need to run before starting the stress test.
* @param addData an add data action that adds the given numbers to the stream, encoding them
* as needed
* @param iterations the iteration number
*/
def runStressTest(
ds: Dataset[Int],
prepareActions: Seq[StreamAction],
addData: (Seq[Int], Boolean) => StreamAction,
iterations: Int): Unit = {
implicit val intEncoder = ExpressionEncoder[Int]()
var dataPos = 0
var running = true
val actions = new ArrayBuffer[StreamAction]()
actions ++= prepareActions
def addCheck() = { actions += CheckAnswer(1 to dataPos: _*) }
def addRandomData() = {
val numItems = Random.nextInt(10)
val data = dataPos until (dataPos + numItems)
dataPos += numItems
actions += addData(data, running)
}
(1 to iterations).foreach { i =>
val rand = Random.nextDouble()
if(!running) {
rand match {
case r if r < 0.7 => // AddData
addRandomData()
case _ => // StartStream
actions += StartStream()
running = true
}
} else {
rand match {
case r if r < 0.1 =>
addCheck()
case r if r < 0.7 => // AddData
addRandomData()
case _ => // StopStream
addCheck()
actions += StopStream
running = false
}
}
}
if(!running) { actions += StartStream() }
addCheck()
testStream(ds)(actions: _*)
}
object AwaitTerminationTester {
trait ExpectedBehavior
/** Expect awaitTermination to not be blocked */
case object ExpectNotBlocked extends ExpectedBehavior
/** Expect awaitTermination to get blocked */
case object ExpectBlocked extends ExpectedBehavior
/** Expect awaitTermination to throw an exception */
case class ExpectException[E <: Exception]()(implicit val t: ClassTag[E])
extends ExpectedBehavior
private val DEFAULT_TEST_TIMEOUT = 1.second
def test(
expectedBehavior: ExpectedBehavior,
awaitTermFunc: () => Unit,
testTimeout: Span = DEFAULT_TEST_TIMEOUT
): Unit = {
expectedBehavior match {
case ExpectNotBlocked =>
withClue("Got blocked when expected non-blocking.") {
failAfter(testTimeout) {
awaitTermFunc()
}
}
case ExpectBlocked =>
withClue("Was not blocked when expected.") {
intercept[TestFailedDueToTimeoutException] {
failAfter(testTimeout) {
awaitTermFunc()
}
}
}
case e: ExpectException[_] =>
val thrownException =
withClue(s"Did not throw ${e.t.runtimeClass.getSimpleName} when expected.") {
intercept[StreamingQueryException] {
failAfter(testTimeout) {
awaitTermFunc()
}
}
}
assert(thrownException.cause.getClass === e.t.runtimeClass,
"exception of incorrect type was throw")
}
}
}
}
| darionyaphet/spark | sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamTest.scala | Scala | apache-2.0 | 37,049 |
package com.twitter.finagle.tracing
import com.twitter.finagle._
import com.twitter.util.{Future, Throw}
object TraceInitializerFilter {
val role: Stack.Role = Stack.Role("TraceInitializerFilter")
/**
* @param newId Set the next TraceId when the tracer is pushed, `true` for clients.
*/
private[finagle] def apply[Req, Rep](tracer: Tracer, newId: Boolean): Filter[Req, Rep, Req, Rep] =
new TraceInitializerFilter[Req, Rep](tracer, newId)
private[finagle] def typeAgnostic(tracer: Tracer, newId: Boolean): Filter.TypeAgnostic =
new Filter.TypeAgnostic {
def toFilter[Req, Rep]: Filter[Req, Rep, Req, Rep] = apply(tracer, newId)
}
private[finagle] class Module[Req, Rep](newId: Boolean)
extends Stack.Module1[param.Tracer, ServiceFactory[Req, Rep]] {
def this() = this(true)
val role: Stack.Role = TraceInitializerFilter.role
val description = "Initialize the tracing system"
def make(_tracer: param.Tracer, next: ServiceFactory[Req, Rep]): ServiceFactory[Req, Rep] = {
apply(_tracer.tracer, newId).andThen(next)
}
}
/**
* Create a new stack module for clients. On each request a
* [[com.twitter.finagle.tracing.Tracer]] will pushed and the next TraceId will be set
*/
private[finagle] def clientModule[Req, Rep] = new Module[Req, Rep](true)
/**
* Create a new stack module for servers. On each request a
* [[com.twitter.finagle.tracing.Tracer]] will pushed.
*/
private[finagle] def serverModule[Req, Rep] = new Module[Req, Rep](false)
private[finagle] def empty[Req, Rep]: Stackable[ServiceFactory[Req, Rep]] =
new Stack.Module0[ServiceFactory[Req, Rep]] {
val role: Stack.Role = TraceInitializerFilter.role
val description = "Empty Stackable, used Default(Client|Server)"
def make(next: ServiceFactory[Req, Rep]): ServiceFactory[Req, Rep] = next
}
}
/**
* The TraceInitializerFilter takes care of span lifecycle events. It is always
* placed first in the service [[com.twitter.finagle.Filter]] chain (or last in
* the [[com.twitter.finagle.Stack]]) so that protocols with trace support will
* override the span resets, and still be properly reported here.
*
* @note This should be replaced by per-codec trace initializers that
* is capable of parsing trace information out of the codec.
*
* @param tracer An instance of a tracer to use. Eg: ZipkinTracer
* @param newId Set the next TraceId when the tracer is pushed (used for clients)
*/
class TraceInitializerFilter[Req, Rep](tracer: Tracer, newId: Boolean)
extends SimpleFilter[Req, Rep] {
def apply(request: Req, service: Service[Req, Rep]): Future[Rep] =
if (tracer.isNull) {
if (newId) Trace.letId(Trace.nextId) { service(request) } else service(request)
} else {
if (newId) Trace.letTracerAndNextId(tracer) { service(request) } else
Trace.letTracer(tracer) { service(request) }
}
}
/**
* A generic filter that can be used for annotating the Server and Client side
* of a trace. Finagle-specific trace information should live here.
*
* @param label The given name of the service
* @param prefix A prefix for `finagle.version` and `dtab.local`.
* [[com.twitter.finagle.tracing.Annotation Annotation]] keys.
* @param before An [[com.twitter.finagle.tracing.Annotation]] to be recorded
* before the service is called
* @param after An [[com.twitter.finagle.tracing.Annotation]] to be recorded
* after the service's [[com.twitter.util.Future]] is satisfied, regardless of success.
* @param afterFailure Function from String to [[com.twitter.finagle.tracing.Annotation]] to be recorded
* if the service's [[com.twitter.util.Future]] fails.
* @param finagleVersion A thunk that returns the version of finagle. Useful
* for testing.
*/
sealed class AnnotatingTracingFilter[Req, Rep](
label: String,
prefix: String,
before: Annotation,
after: Annotation,
afterFailure: String => Annotation,
finagleVersion: () => String = () => Init.finagleVersion,
traceMetadata: Boolean = true)
extends SimpleFilter[Req, Rep] {
def this(
label: String,
before: Annotation,
after: Annotation,
afterFailure: String => Annotation
) =
this(label, "unknown", before, after, afterFailure)
def this(label: String, before: Annotation, after: Annotation) = {
this(label, "unknown", before, after, AnnotatingTracingFilter.defaultAfterFailureTracer)
}
private[this] val finagleVersionKey = s"$prefix/finagle.version"
private[this] val dtabLocalKey = s"$prefix/dtab.local"
private[this] val labelKey = s"$prefix/finagle.label"
def apply(request: Req, service: Service[Req, Rep]): Future[Rep] = {
val trace = Trace()
if (trace.isActivelyTracing) {
if (traceMetadata) {
trace.recordServiceName(TraceServiceName() match {
case Some(l) => l
case None => label
})
trace.recordBinary(labelKey, label)
trace.recordBinary(finagleVersionKey, finagleVersion())
// Trace dtab propagation on all requests that have them.
if (Dtab.local.nonEmpty) {
trace.recordBinary(dtabLocalKey, Dtab.local.show)
}
}
trace.record(before)
service(request).respond {
case Throw(e) =>
trace.record(afterFailure(s"${e.getClass.getName}: ${e.getMessage}"))
trace.record(after)
case _ =>
trace.record(after)
}
} else {
service(request)
}
}
}
object AnnotatingTracingFilter {
private[finagle] final val defaultAfterFailureTracer = { errorStr: String =>
Annotation.Message(s"Error seen in AnnotatingTracingFilter: $errorStr")
}
}
/**
* Annotate the request with Server specific records (ServerRecv, ServerSend)
*/
object ServerTracingFilter {
val role = Stack.Role("ServerTracingFilter")
case class TracingFilter[Req, Rep](
label: String,
finagleVersion: () => String = () => Init.finagleVersion)
extends AnnotatingTracingFilter[Req, Rep](
label,
"srv",
Annotation.ServerRecv,
Annotation.ServerSend,
Annotation.ServerSendError(_),
finagleVersion,
traceMetadata = false
)
def module[Req, Rep]: Stackable[ServiceFactory[Req, Rep]] =
new Stack.Module2[param.Label, param.Tracer, ServiceFactory[Req, Rep]] {
val role: Stack.Role = ServerTracingFilter.role
val description = "Report finagle information and server recv/send events"
def make(
_label: param.Label,
_tracer: param.Tracer,
next: ServiceFactory[Req, Rep]
): ServiceFactory[Req, Rep] = {
val param.Tracer(tracer) = _tracer
if (tracer.isNull) next
else {
val param.Label(label) = _label
TracingFilter[Req, Rep](label).andThen(next)
}
}
}
}
/**
* Annotate the request with Client specific records (ClientSend, ClientRecv)
*/
object ClientTracingFilter {
val role = Stack.Role("ClientTracingFilter")
case class TracingFilter[Req, Rep](
label: String,
finagleVersion: () => String = () => Init.finagleVersion)
extends AnnotatingTracingFilter[Req, Rep](
label,
"clnt",
Annotation.ClientSend,
Annotation.ClientRecv,
Annotation.ClientRecvError(_),
finagleVersion
)
def module[Req, Rep]: Stackable[ServiceFactory[Req, Rep]] =
new Stack.Module2[param.Label, param.Tracer, ServiceFactory[Req, Rep]] {
val role: Stack.Role = ClientTracingFilter.role
val description = "Report finagle information and client send/recv events"
def make(
_label: param.Label,
_tracer: param.Tracer,
next: ServiceFactory[Req, Rep]
): ServiceFactory[Req, Rep] = {
val param.Tracer(tracer) = _tracer
if (tracer.isNull) next
else {
val param.Label(label) = _label
TracingFilter[Req, Rep](label).andThen(next)
}
}
}
}
/**
* Annotate the request events directly before/after sending data on the wire (WireSend, WireRecv)
*/
private[finagle] object WireTracingFilter {
val role = Stack.Role("WireTracingFilter")
case class TracingFilter[Req, Rep](
label: String,
prefix: String,
before: Annotation,
after: Annotation,
traceMetadata: Boolean,
finagleVersion: () => String = () => Init.finagleVersion)
extends AnnotatingTracingFilter[Req, Rep](
label,
prefix,
before,
after,
Annotation.WireRecvError(_),
finagleVersion,
traceMetadata
)
private def module[Req, Rep](
prefix: String,
before: Annotation,
after: Annotation,
traceMetadata: Boolean
): Stackable[ServiceFactory[Req, Rep]] =
new Stack.Module2[param.Label, param.Tracer, ServiceFactory[Req, Rep]] {
val role: Stack.Role = WireTracingFilter.role
val description = "Report finagle information and wire send/recv events"
def make(
_label: param.Label,
_tracer: param.Tracer,
next: ServiceFactory[Req, Rep]
): ServiceFactory[Req, Rep] = {
val param.Tracer(tracer) = _tracer
if (tracer.isNull) next
else {
val param.Label(label) = _label
TracingFilter[Req, Rep](label, prefix, before, after, traceMetadata).andThen(next)
}
}
}
def clientModule[Req, Rep]: Stackable[ServiceFactory[Req, Rep]] = module(
"clnt",
Annotation.WireSend,
Annotation.WireRecv,
traceMetadata = false
)
def serverModule[Req, Rep]: Stackable[ServiceFactory[Req, Rep]] = module(
"srv",
Annotation.WireRecv,
Annotation.WireSend,
traceMetadata = true
)
}
| luciferous/finagle | finagle-core/src/main/scala/com/twitter/finagle/tracing/TraceInitializerFilter.scala | Scala | apache-2.0 | 9,703 |
package org.pico.statsd
import org.pico.disposal.Auto
import org.pico.disposal.std.autoCloseable._
import org.pico.event.Bus
import org.pico.statsd.arb._
import org.pico.statsd.datapoint._
import org.specs2.ScalaCheck
import org.specs2.mutable.Specification
import org.pico.statsd.syntax.event._
class AlertSpec extends Specification with ScalaCheck {
sequential
"Should send datagrams" >> {
implicit val stringAlert = Alert[String](Event("eventTitle").comap(txt => EventData(txt, Priority.Low, AlertType.Success)))
"via withAlert" in prop { (prefix: Identifier, aspect: Identifier, txts: List[String]) =>
implicit val client = TestStastDClient(prefix.value, SampleRate.always, "name:John", "lastname:Doe")
for {
bus <- Auto(Bus[String])
_ <- Auto(bus.withAlert(aspect.value, "extraTag"))
} {
txts.foreach(bus.publish)
}
val expected = txts.map { txt =>
val title = List(prefix, aspect, Identifier("eventTitle")).map(_.value).filter(_.nonEmpty).mkString(".")
s"_e{${title.length},${txt.length}}:$title|$txt|p:low|t:success|#name:John,lastname:Doe,extraTag"
}
client.sentMessages.value.reverse ==== expected
}
"always via generic sink" in prop { txts: List[Identifier] =>
implicit val client = TestStastDClient("prefix", SampleRate(0.01), "name:John", "lastname:Doe")
for {
bus <- Auto(Bus[String])
_ <- Auto(bus.withAlert("aspect", "extraTag"))
} {
txts.map(_.value).foreach(bus.publish)
}
val title = "prefix.aspect.eventTitle"
val expected = txts.map(_.value).map { txt =>
s"_e{${title.length},${txt.length}}:$title|$txt|p:low|t:success|#name:John,lastname:Doe,extraTag"
}
val actual = client.sentMessages.value.reverse
actual.length ==== expected.length
actual ==== expected
}
"always via specific sink" in prop { txts: List[EventData] =>
implicit val client = TestStastDClient("prefix", SampleRate(0.01), "name:John", "lastname:Doe")
for {
bus <- Auto(Bus[EventData])
_ <- Auto(bus into eventSink("aspect.eventTitle", "extraTag"))
} {
txts.foreach(bus.publish)
}
val title = "prefix.aspect.eventTitle"
val expected = txts.map { e =>
s"_e{${title.length},${e.text.length}}:$title|${e.text}|p:${e.priority.toString.toLowerCase}|t:${e.alertType.toString.toLowerCase}|#name:John,lastname:Doe,extraTag"
}
val actual = client.sentMessages.value.reverse
actual.length ==== expected.length
actual ==== expected
}
}
}
| pico-works/pico-statsd | pico-statsd/src/test/scala/org/pico/statsd/AlertSpec.scala | Scala | mit | 2,683 |
package au.gov.dva.sopapi.sopref.parsing
import au.gov.dva.sopapi.exceptions.SopParserRuntimeException
import au.gov.dva.sopapi.sopref.parsing.traits.MiscRegexes
import scala.util.Properties
import scala.util.matching.Regex
object SoPExtractorUtilities extends MiscRegexes
{
private val regexForRegisterId = """F(20[0-9]{2})([LC])([0-9]{5})""".r
def unpackRegisterId(registerId : String) : RegisterIdInfo = {
registerId match {
case regexForRegisterId(year,"C",number) => new RegisterIdInfo(registerId, year.toInt,true,number.toInt)
case regexForRegisterId(year,"L",number) => new RegisterIdInfo(registerId, year.toInt,false,number.toInt)
case _ => throw new SopParserRuntimeException("Cannot unpack this register ID: " + registerId)
}
}
def getSections(cleansedSoPText: String, sectionHeaderLineRegex: Regex): List[List[String]] = {
val acc = List[List[String]]();
val lines = cleansedSoPText.split(platformNeutralLineEndingRegex.regex).toList
divideRecursive(List.empty, sectionHeaderLineRegex, acc, lines)
}
private def divideRecursive(nextSectionTitle: List[String], sectionHeaderLineRegex: Regex, acc: List[List[String]], remaining: List[String]): List[List[String]] = {
if (remaining.isEmpty)
acc
else {
val sectionLinesWithTitleForNextHeading: List[String] = remaining.head :: remaining.tail.takeWhile(l => sectionHeaderLineRegex.findFirstMatchIn(l).isEmpty)
val headerForNextSection = sectionLinesWithTitleForNextHeading.takeRight(1).map(i => i.trim)
val sectionLinesWithoutTitleForNextSection = sectionLinesWithTitleForNextHeading match {
case lines if (lines.length == remaining.length) => lines // lines
case lines => lines.dropRight(1)
}
val sectionLines = (nextSectionTitle ++ sectionLinesWithoutTitleForNextSection).map(l => l.trim)
divideRecursive(headerForNextSection, sectionHeaderLineRegex, sectionLines :: acc, remaining.drop(sectionLinesWithTitleForNextHeading.size))
}
}
def parseSectionBlock(sectionBlock: List[String]): (Option[Int], String, List[String]) = {
val lines = sectionBlock
val title: String = lines.head
if (lines.tail.isEmpty) {
return (None,"",lines)
// throw new SopParserRuntimeException("This does not appear to be a section block: " + sectionBlock.mkString(" "))
}
val numberedLine: String = lines.tail(0)
val sectionHeaderLineRegex = """^([0-9]+)\\.\\s""".r
val m = sectionHeaderLineRegex.findFirstMatchIn(numberedLine)
if (m.isDefined) {
val sectionNumber = m.get.group(1).toInt
val bodyTextWithoutSectionNumber: List[String] = List(sectionHeaderLineRegex.replaceFirstIn(numberedLine, "")) ++ lines.tail.drop(1)
return (Some(sectionNumber), title, bodyTextWithoutSectionNumber)
}
else {
return (None, title, lines.tail)
}
}
def getSection(cleansedSopText: String, paragraphLineRegex: Regex): (Int, List[String]) = {
val sectionHeaderLineRegex = """^([0-9]+)\\.\\s""".r
val allSections: List[(Option[Int], String, List[String])] = getSections(cleansedSopText, sectionHeaderLineRegex).map(s => parseSectionBlock(s))
val sectionForSpecifiedPara = allSections.find(s => paragraphLineRegex.findFirstIn(s._2).nonEmpty)
if (sectionForSpecifiedPara.isEmpty)
throw new SopParserRuntimeException("No section found with title matching regex: " + paragraphLineRegex.regex)
if (sectionForSpecifiedPara.get._1.isEmpty)
throw new SopParserRuntimeException("Could not determine section number using regex: " + sectionHeaderLineRegex.regex)
(sectionForSpecifiedPara.get._1.get, sectionForSpecifiedPara.get._3)
}
def getMainParaLetterSequence = {
val aToz = 'a' to 'z'
val doubled = aToz.map(l => s"$l$l")
val combined = aToz ++ doubled
combined.map(i => "(" + i + ")")
}
def getSubParaLetterSequence = {
List("i", "ii", "iii", "iv", "v", "vi", "vii", "viii", "ix", "x", "xi", "xii", "xiii")
.map(i => ("(" + i + ")"))
}
def splitFactorsSectionToHeaderAndRest(factorsSection: List[String]): (List[String], List[String]) = {
val (headerLines, rest) = factorsSection.span(l => !l.startsWith("("))
if (headerLines.isEmpty ) throw new SopParserRuntimeException(s"Cannot split this factors section to head and then the rest: ${factorsSection.mkString(Properties.lineSeparator)}")
(headerLines, rest)
}
def splitFactorToHeaderAndRest(singleFactor: List[String]): (String, List[String]) = {
if (singleFactor.size == 1)
return (singleFactor(0), List.empty)
else {
val headerLines = singleFactor.head :: singleFactor.tail.takeWhile(l => !l.startsWith("("))
val rest = singleFactor.drop(headerLines.size)
return (headerLines.mkString(" "), rest)
}
}
def splitOutTailIfAny(lastSubParaWithLineBreaks: String): (String, Option[String]) = {
val asLines = lastSubParaWithLineBreaks.split(platformNeutralLineEndingRegex.regex).toList
val reversed = asLines.reverse
val tail = reversed.takeWhile(l => !l.endsWith(";")).reverse
if (tail.size < asLines.size) {
val partWithoutTail = asLines.take(asLines.size - tail.size)
return (partWithoutTail.mkString(Properties.lineSeparator), Some(tail.mkString(Properties.lineSeparator)))
}
else return (lastSubParaWithLineBreaks, None)
}
def splitFactorsSectionByFactor(factorsSectionExcludingHead: List[String]): List[List[String]] = {
val lettersSequence = getMainParaLetterSequence.toList
divideSectionRecursively(lettersSequence, 1, List.empty, factorsSectionExcludingHead)
}
def divideSectionRecursively(lettersSequence: List[String], nextLetterIndex: Int, acc: List[List[String]], remainingLines: List[String])
: List[List[String]] = {
if (remainingLines.isEmpty)
acc
else {
val (factorLines, rest) = partition(lettersSequence, nextLetterIndex, remainingLines)
divideSectionRecursively(lettersSequence, nextLetterIndex + 1, acc :+ factorLines, rest)
}
}
def splitFactorToSubFactors(factorLines: List[String]): List[List[String]] = {
val romanNumeralsSequence = getSubParaLetterSequence
divideSectionRecursively(romanNumeralsSequence, 1, List.empty, factorLines)
}
private def partition(letterSequence: List[String], nextLetterIndex: Int, remainingLines: List[String]): (List[String], List[String]) = {
// edge case of (i) following (h)
if (letterSequence(nextLetterIndex - 1) == "(h)" && letterSequence(nextLetterIndex) == "(i)") {
if (remainingLines.head.endsWith(",")) {
return splitWithSkip(remainingLines,2, lineStartsWithLetter("(i)"));
}
}
// edge case of (ii) following (hh)
if (letterSequence(nextLetterIndex - 1) == "(hh)" && letterSequence(nextLetterIndex) == "(ii)") {
if (remainingLines.head.endsWith(",")) {
return splitWithSkip(remainingLines,2, lineStartsWithLetter("(ii)"));
}
}
val nextLetter = letterSequence(nextLetterIndex)
remainingLines.span(l => !l.startsWith(nextLetter))
}
def splitWithSkip(lines: List[String], numberOfTimes: Int, test: String => Boolean): (List[String],List[String]) = {
val firstPart = takeUntilTestPassedNTimes(lines,numberOfTimes,test)
(firstPart,lines.drop(firstPart.size))
}
def takeUntilTestPassedNTimes(lines: List[String], numberOfTimes: Int, test: String => Boolean): List[String] = {
def takeRecursive(remaining: List[String], acc: List[String], timesPassed : Int, maxTimes: Int, test: String => Boolean): List[String] = {
if (remaining.isEmpty) return acc
else {
val passOnCurrent = if (test(remaining.head)) 1 else 0
if (passOnCurrent + timesPassed == maxTimes) return acc
else takeRecursive(remaining.tail, acc :+ remaining.head, timesPassed + passOnCurrent, maxTimes, test)
}
}
takeRecursive(lines,List(),0,numberOfTimes,test)
}
private def lineStartsWithLetter(letter: String) (line: String) = {
val letterFollowedBySpace = letter + " ";
line.startsWith(letterFollowedBySpace)
}
}
| govlawtech/dva-sop-api | app/src/main/scala/au/gov/dva/sopapi/sopref/parsing/SoPExtractorUtilities.scala | Scala | apache-2.0 | 8,141 |
package edu.gemini.spModel.dataset
import org.scalacheck._
import org.scalacheck.Gen._
import org.scalacheck.Arbitrary._
import java.time.Instant
import java.util.UUID
trait Arbitraries {
implicit val arbitraryDataset: Arbitrary[Dataset] =
Arbitrary {
for {
label <- arbitrary[DatasetLabel]
file <- arbitrary[String]
time <- posNum[Long]
} yield new Dataset(label, file, time)
}
implicit val arbDatasetExecRecord: Arbitrary[DatasetExecRecord] =
Arbitrary {
for {
dataset <- arbitrary[Dataset]
summit <- arbitrary[SummitState]
archive <- arbitrary[Option[DatasetGsaState]]
} yield DatasetExecRecord(dataset, summit, archive)
}
implicit val arbDatasetGsaState: Arbitrary[DatasetGsaState] =
Arbitrary {
for {
qa <- arbitrary[DatasetQaState]
timestamp <- arbitrary[Instant]
md5 <- arbitrary[DatasetMd5]
} yield DatasetGsaState(qa, timestamp, md5)
}
implicit val arbDatasetLabel: Arbitrary[DatasetLabel] =
Arbitrary {
for {
name <- listOfN(4, alphaUpperChar).map(_.mkString)
obs <- posNum[Int]
ds <- posNum[Int]
} yield new DatasetLabel(s"$name-$obs-$ds")
}
implicit val arbDatasetMd5: Arbitrary[DatasetMd5] =
Arbitrary {
listOfN(16, arbitrary[Byte]).map(a => new DatasetMd5(a.toArray))
}
implicit val arbDatasetQaState: Arbitrary[DatasetQaState] =
Arbitrary { oneOf(DatasetQaState.values()) }
implicit val arbInstant: Arbitrary[Instant] =
Arbitrary { arbitrary[Long].map(Instant.ofEpochMilli) }
implicit val arbFailedRequestStatus: Arbitrary[QaRequestStatus.Failed] =
Arbitrary { arbitrary[String].map(QaRequestStatus.Failed) }
implicit val arbSummitMissing: Arbitrary[SummitState.Missing] =
Arbitrary { arbitrary[DatasetQaState].map(SummitState.Missing.apply) }
implicit val arbSummitIdle: Arbitrary[SummitState.Idle] =
Arbitrary { arbitrary[DatasetGsaState].map(SummitState.Idle.apply) }
implicit val arbSummitActive: Arbitrary[SummitState.ActiveRequest] =
Arbitrary {
for {
gsa <- arbitrary[DatasetGsaState]
req <- arbitrary[DatasetQaState]
id <- arbitrary[UUID]
status <- arbitrary[QaRequestStatus]
when <- arbitrary[Instant]
retry <- arbitrary[Int]
} yield SummitState.ActiveRequest(gsa, req, id, status, when, retry)
}
implicit val arbSummitState: Arbitrary[SummitState] =
Arbitrary {
Gen.oneOf(arbitrary[SummitState.Missing], arbitrary[SummitState.Idle], arbitrary[SummitState.ActiveRequest])
}
implicit val arbQaRequestStatus: Arbitrary[QaRequestStatus] = {
import QaRequestStatus._
Arbitrary {
Gen.oneOf(PendingPost, ProcessingPost, arbitrary[Failed], Accepted)
}
}
implicit val arbUuid: Arbitrary[UUID] =
Arbitrary { Gen(_ => Some(UUID.randomUUID())) }
}
| arturog8m/ocs | bundle/edu.gemini.pot/src/test/scala/edu/gemini/spModel/dataset/Arbitraries.scala | Scala | bsd-3-clause | 2,954 |
package de.mukis.sbt.slick
import sbt._
/**
* Keys for slick code generation
*/
trait Keys {
/**
* database name => url
*/
type MakeUrl = Option[String] => String
/**
* database name => package name
*/
type MakePackage = String => String
val slickDriver = SettingKey[String]("slick-driver", "The slick driver, e.g. 'scala.slick.driver.MySQLDriver'")
val slickOutputDir = SettingKey[File]("slick-output-dir", "The directory (excluding package) to write the classes to, e.g. 'file(src/main)' or (sourceManaged in Compile).value.getPath")
val slickJdbcDriver = SettingKey[String]("slick-jdbc-driver", "The jdbc driver, e.g. 'com.mysql.jdbc.Driver'")
val slickUrl = TaskKey[MakeUrl]("slick-url", "URL to database, e.g. 'jdbc:mysql://localhost:3306/your-db'")
val slickCallForm = SettingKey[CallForm]("slick-call-form", "The calling form to SourceCodeGenerator, with or without credentials included")
val slickUser = SettingKey[Option[String]]("slick-user", "User to access database")
val slickPassword = SettingKey[Option[String]]("slick-password", "Password to access database")
val slickHostName = SettingKey[String]("slick-host-name", "Database server host name, defaults to 'localhost'")
val slickPort = TaskKey[Int]("slick-port", "Port to access database")
val slickDatabases = SettingKey[Seq[String]]("slick-databases", "databases to generate tables for")
val slickPackage = SettingKey[String]("slick-package", "Output package for Tables.scala")
val slickMakeDbPackage = TaskKey[MakePackage]("slick-db-package", "Call for each database to generate the package name")
val slickGenTables = TaskKey[Seq[File]]("slick-gen-tables", "Task to generate the table")
}
| muuki88/sbt-slick | src/main/scala/de/mukis/sbt/slick/Keys.scala | Scala | apache-2.0 | 1,714 |
/**
* (c) Copyright 2013 WibiData, Inc.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kiji.modeling.framework
import scala.collection.JavaConverters.asScalaIteratorConverter
import cascading.tuple.Fields
import org.kiji.annotations.ApiAudience
import org.kiji.annotations.ApiStability
/**
* The TupleUtil object contains various convenience functions for dealing with scala and
* Cascading/Scalding tuples.
*/
@ApiAudience.Framework
@ApiStability.Experimental
object TupleUtil {
/**
* Converts a Cascading fields object into a sequence of the fields it contains. This only
* supports fields without a special [[cascading.tuple.Fields.Kind]].
*
* @param fields to convert.
* @return a sequence of field names.
*/
def fieldsToSeq(fields: Fields): Seq[String] = {
fields
.iterator()
.asScala
.map { field => field.toString }
.toSeq
}
/**
* Converts a tuple into an appropriate representation for processing by a model phase function.
* Handles instances of Tuple1 as special cases and unpacks them to permit functions with only one
* parameter to be defined without expecting their argument to be wrapped in a Tuple1 instance.
*
* @tparam T is the type of the output function argument.
* @param tuple to convert.
* @return an argument ready to be passed to a model phase function.
*/
def tupleToFnArg[T](tuple: Product): T = {
tuple match {
case Tuple1(x1) => x1.asInstanceOf[T]
case other => other.asInstanceOf[T]
}
}
/**
* Converts a function return value into a tuple. Handles the case where the provided result is
* not a tuple by wrapping it in a Tuple1 instance.
*
* @param result from a model phase function.
* @return a processed tuple.
*/
def fnResultToTuple(result: Any): Product = {
result match {
case tuple: Tuple1[_] => tuple
case tuple: Tuple2[_, _] => tuple
case tuple: Tuple3[_, _, _] => tuple
case tuple: Tuple4[_, _, _, _] => tuple
case tuple: Tuple5[_, _, _, _, _] => tuple
case tuple: Tuple6[_, _, _, _, _, _] => tuple
case tuple: Tuple7[_, _, _, _, _, _, _] => tuple
case tuple: Tuple8[_, _, _, _, _, _, _, _] => tuple
case tuple: Tuple9[_, _, _, _, _, _, _, _, _] => tuple
case tuple: Tuple10[_, _, _, _, _, _, _, _, _, _] => tuple
case tuple: Tuple11[_, _, _, _, _, _, _, _, _, _, _] => tuple
case tuple: Tuple12[_, _, _, _, _, _, _, _, _, _, _, _] => tuple
case tuple: Tuple13[_, _, _, _, _, _, _, _, _, _, _, _, _] => tuple
case tuple: Tuple14[_, _, _, _, _, _, _, _, _, _, _, _, _, _] => tuple
case tuple: Tuple15[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _] => tuple
case tuple: Tuple16[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _] => tuple
case tuple: Tuple17[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _] => tuple
case tuple: Tuple18[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _] => tuple
case tuple: Tuple19[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _] => tuple
case tuple: Tuple20[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _] => tuple
case tuple: Tuple21[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _] => tuple
case tuple: Tuple22[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _] => tuple
case other => Tuple1(other)
}
}
/**
* Converts a sequence to a tuple.
*
* @tparam T is the type of the output tuple.
* @param sequence to convert.
* @return a tuple converted from the provided sequence.
*/
def seqToTuple[T <: Product](sequence: Seq[_]): T = {
val tuple = sequence match {
case Seq(x1) => {
Tuple1(x1)
}
case Seq(x1, x2) => {
Tuple2(x1, x2)
}
case Seq(x1, x2, x3) => {
Tuple3(x1, x2, x3)
}
case Seq(x1, x2, x3, x4) => {
Tuple4(x1, x2, x3, x4)
}
case Seq(x1, x2, x3, x4, x5) => {
Tuple5(x1, x2, x3, x4, x5)
}
case Seq(x1, x2, x3, x4, x5, x6) => {
Tuple6(x1, x2, x3, x4, x5, x6)
}
case Seq(x1, x2, x3, x4, x5, x6, x7) => {
Tuple7(x1, x2, x3, x4, x5, x6, x7)
}
case Seq(x1, x2, x3, x4, x5, x6, x7, x8) => {
Tuple8(x1, x2, x3, x4, x5, x6, x7, x8)
}
case Seq(x1, x2, x3, x4, x5, x6, x7, x8, x9) => {
Tuple9(x1, x2, x3, x4, x5, x6, x7, x8, x9)
}
case Seq(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10) => {
Tuple10(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10)
}
case Seq(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11) => {
Tuple11(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11)
}
case Seq(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12) => {
Tuple12(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12)
}
case Seq(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13) => {
Tuple13(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13)
}
case Seq(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14) => {
Tuple14(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14)
}
case Seq(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15) => {
Tuple15(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15)
}
case Seq(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16) => {
Tuple16(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16)
}
case Seq(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17) => {
Tuple17(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17)
}
case Seq(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18) => {
Tuple18(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18)
}
case Seq(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18,
x19) => {
Tuple19(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18,
x19)
}
case Seq(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18,
x19, x20) => {
Tuple20(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18,
x19, x20)
}
case Seq(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18,
x19, x20, x21) => {
Tuple21(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18,
x19, x20, x21)
}
case Seq(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18,
x19, x20, x21, x22) => {
Tuple22(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18,
x19, x20, x21, x22)
}
}
tuple.asInstanceOf[T]
}
}
| kijiproject/kiji-modeling | kiji-modeling/src/main/scala/org/kiji/modeling/framework/TupleUtil.scala | Scala | apache-2.0 | 7,635 |
// Copyright (c) 2016 PSForever.net to present
package net.psforever.packet.game
import net.psforever.packet.{GamePacketOpcode, Marshallable, PacketHelpers, PlanetSideGamePacket}
import net.psforever.types.Vector3
import scodec.Codec
import scodec.codecs._
/**
* An `Enumeration` of the kinds of bugs applicable to the reporting system.
*/
object BugType extends Enumeration {
type Type = Value
val CRASH,
GAMEPLAY,
ART,
SOUND,
HARDWARE,
OTHER
= Value
implicit val codec = PacketHelpers.createEnumerationCodec(this, uint4L)
}
/**
* Allow the user to report a bug they have found in the game.<br>
* <br>
* Bug reports are prepended by the version of the client on which the player is encountering the issue.
* The last delivered client by Sony Online Entertainment was `3.15.84` with date `Dec 2 2009`.<br>
* <br>
* The path of bug reports submitted to the game's official server is not known.
* @param version_major the client's major version number
* @param version_minor the client's minor version number
* @param version_date the date the client was compiled
* @param bug_type the kind of bug that took place
* @param repeatable whether the bug is repeatable
* @param unk na;
* always 0?
* @param zone which zone the bug took place
* @param pos the location where ther bug took place
* @param summary a short explanation of the bug
* @param desc a detailed explanation of the bug
*/
final case class BugReportMessage(version_major : Long,
version_minor : Long,
version_date : String,
bug_type : BugType.Value,
repeatable : Boolean,
unk : Int,
zone : Int,
pos : Vector3,
summary : String,
desc : String)
extends PlanetSideGamePacket {
type Packet = BugReportMessage
def opcode = GamePacketOpcode.BugReportMessage
def encode = BugReportMessage.encode(this)
}
object BugReportMessage extends Marshallable[BugReportMessage] {
implicit val codec : Codec[BugReportMessage] = (
("versionMajor" | uint32L) ::
("versionMinor" | uint32L) ::
("versionDate" | PacketHelpers.encodedString) ::
("bug_type" | BugType.codec) ::
ignore(3) ::
("repeatable" | bool) ::
("unk" | uint4L) ::
("zone" | uint8L) ::
("pos" | Vector3.codec_pos) ::
("summary" | PacketHelpers.encodedWideStringAligned(4)) ::
("desc" | PacketHelpers.encodedWideString)
).as[BugReportMessage]
}
| Fate-JH/PSF-Server | common/src/main/scala/net/psforever/packet/game/BugReportMessage.scala | Scala | gpl-3.0 | 2,745 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.graphx.lib
import org.apache.spark.SparkFunSuite
import org.apache.spark.graphx._
import org.apache.spark.graphx.util.GraphGenerators
object GridPageRank {
def apply(nRows: Int, nCols: Int, nIter: Int, resetProb: Double): Seq[(VertexId, Double)] = {
val inNbrs = Array.fill(nRows * nCols)(collection.mutable.MutableList.empty[Int])
val outDegree = Array.fill(nRows * nCols)(0)
// Convert row column address into vertex ids (row major order)
def sub2ind(r: Int, c: Int): Int = r * nCols + c
// Make the grid graph
for (r <- 0 until nRows; c <- 0 until nCols) {
val ind = sub2ind(r, c)
if (r + 1 < nRows) {
outDegree(ind) += 1
inNbrs(sub2ind(r + 1, c)) += ind
}
if (c + 1 < nCols) {
outDegree(ind) += 1
inNbrs(sub2ind(r, c + 1)) += ind
}
}
// compute the pagerank
var pr = Array.fill(nRows * nCols)(resetProb)
for (iter <- 0 until nIter) {
val oldPr = pr
pr = new Array[Double](nRows * nCols)
for (ind <- 0 until (nRows * nCols)) {
pr(ind) = resetProb + (1.0 - resetProb) *
inNbrs(ind).map( nbr => oldPr(nbr) / outDegree(nbr)).sum
}
}
(0L until (nRows * nCols)).zip(pr)
}
}
class PageRankSuite extends SparkFunSuite with LocalSparkContext {
def compareRanks(a: VertexRDD[Double], b: VertexRDD[Double]): Double = {
a.leftJoin(b) { case (id, a, bOpt) => (a - bOpt.getOrElse(0.0)) * (a - bOpt.getOrElse(0.0)) }
.map { case (id, error) => error }.sum()
}
test("Star PageRank") {
withSpark { sc =>
val nVertices = 100
val starGraph = GraphGenerators.starGraph(sc, nVertices).cache()
val resetProb = 0.15
val errorTol = 1.0e-5
val staticRanks1 = starGraph.staticPageRank(numIter = 1, resetProb).vertices
val staticRanks2 = starGraph.staticPageRank(numIter = 2, resetProb).vertices.cache()
// Static PageRank should only take 2 iterations to converge
val notMatching = staticRanks1.innerZipJoin(staticRanks2) { (vid, pr1, pr2) =>
if (pr1 != pr2) 1 else 0
}.map { case (vid, test) => test }.sum()
assert(notMatching === 0)
val staticErrors = staticRanks2.map { case (vid, pr) =>
val p = math.abs(pr - (resetProb + (1.0 - resetProb) * (resetProb * (nVertices - 1)) ))
val correct = (vid > 0 && pr == resetProb) || (vid == 0L && p < 1.0E-5)
if (!correct) 1 else 0
}
assert(staticErrors.sum === 0)
val dynamicRanks = starGraph.pageRank(0, resetProb).vertices.cache()
assert(compareRanks(staticRanks2, dynamicRanks) < errorTol)
}
} // end of test Star PageRank
test("Star PersonalPageRank") {
withSpark { sc =>
val nVertices = 100
val starGraph = GraphGenerators.starGraph(sc, nVertices).cache()
val resetProb = 0.15
val errorTol = 1.0e-5
val staticRanks1 = starGraph.staticPersonalizedPageRank(0, numIter = 1, resetProb).vertices
val staticRanks2 = starGraph.staticPersonalizedPageRank(0, numIter = 2, resetProb)
.vertices.cache()
// Static PageRank should only take 2 iterations to converge
val notMatching = staticRanks1.innerZipJoin(staticRanks2) { (vid, pr1, pr2) =>
if (pr1 != pr2) 1 else 0
}.map { case (vid, test) => test }.sum
assert(notMatching === 0)
val staticErrors = staticRanks2.map { case (vid, pr) =>
val correct = (vid > 0 && pr == 0.0) ||
(vid == 0 && pr == resetProb)
if (!correct) 1 else 0
}
assert(staticErrors.sum === 0)
val dynamicRanks = starGraph.personalizedPageRank(0, 0, resetProb).vertices.cache()
assert(compareRanks(staticRanks2, dynamicRanks) < errorTol)
val parallelStaticRanks1 = starGraph
.staticParallelPersonalizedPageRank(Array(0), 1, resetProb).mapVertices {
case (vertexId, vector) => vector(0)
}.vertices.cache()
assert(compareRanks(staticRanks1, parallelStaticRanks1) < errorTol)
val parallelStaticRanks2 = starGraph
.staticParallelPersonalizedPageRank(Array(0, 1), 2, resetProb).mapVertices {
case (vertexId, vector) => vector(0)
}.vertices.cache()
assert(compareRanks(staticRanks2, parallelStaticRanks2) < errorTol)
// We have one outbound edge from 1 to 0
val otherStaticRanks2 = starGraph.staticPersonalizedPageRank(1, numIter = 2, resetProb)
.vertices.cache()
val otherDynamicRanks = starGraph.personalizedPageRank(1, 0, resetProb).vertices.cache()
val otherParallelStaticRanks2 = starGraph
.staticParallelPersonalizedPageRank(Array(0, 1), 2, resetProb).mapVertices {
case (vertexId, vector) => vector(1)
}.vertices.cache()
assert(compareRanks(otherDynamicRanks, otherStaticRanks2) < errorTol)
assert(compareRanks(otherStaticRanks2, otherParallelStaticRanks2) < errorTol)
assert(compareRanks(otherDynamicRanks, otherParallelStaticRanks2) < errorTol)
}
} // end of test Star PersonalPageRank
test("Grid PageRank") {
withSpark { sc =>
val rows = 10
val cols = 10
val resetProb = 0.15
val tol = 0.0001
val numIter = 50
val errorTol = 1.0e-5
val gridGraph = GraphGenerators.gridGraph(sc, rows, cols).cache()
val staticRanks = gridGraph.staticPageRank(numIter, resetProb).vertices.cache()
val dynamicRanks = gridGraph.pageRank(tol, resetProb).vertices.cache()
val referenceRanks = VertexRDD(
sc.parallelize(GridPageRank(rows, cols, numIter, resetProb))).cache()
assert(compareRanks(staticRanks, referenceRanks) < errorTol)
assert(compareRanks(dynamicRanks, referenceRanks) < errorTol)
}
} // end of Grid PageRank
test("Chain PageRank") {
withSpark { sc =>
val chain1 = (0 until 9).map(x => (x, x + 1))
val rawEdges = sc.parallelize(chain1, 1).map { case (s, d) => (s.toLong, d.toLong) }
val chain = Graph.fromEdgeTuples(rawEdges, 1.0).cache()
val resetProb = 0.15
val tol = 0.0001
val numIter = 10
val errorTol = 1.0e-5
val staticRanks = chain.staticPageRank(numIter, resetProb).vertices
val dynamicRanks = chain.pageRank(tol, resetProb).vertices
assert(compareRanks(staticRanks, dynamicRanks) < errorTol)
}
}
test("Chain PersonalizedPageRank") {
withSpark { sc =>
val chain1 = (0 until 9).map(x => (x, x + 1) )
val rawEdges = sc.parallelize(chain1, 1).map { case (s, d) => (s.toLong, d.toLong) }
val chain = Graph.fromEdgeTuples(rawEdges, 1.0).cache()
val resetProb = 0.15
val tol = 0.0001
val numIter = 10
val errorTol = 1.0e-1
val staticRanks = chain.staticPersonalizedPageRank(4, numIter, resetProb).vertices
val dynamicRanks = chain.personalizedPageRank(4, tol, resetProb).vertices
assert(compareRanks(staticRanks, dynamicRanks) < errorTol)
val parallelStaticRanks = chain
.staticParallelPersonalizedPageRank(Array(4), numIter, resetProb).mapVertices {
case (vertexId, vector) => vector(0)
}.vertices.cache()
assert(compareRanks(staticRanks, parallelStaticRanks) < errorTol)
}
}
}
| u2009cf/spark-radar | graphx/src/test/scala/org/apache/spark/graphx/lib/PageRankSuite.scala | Scala | apache-2.0 | 8,046 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
/**
* Executes a roll up-style query against Apache logs.
*/
object LogQuery {
val exampleApacheLogs = List(
"""10.10.10.10 - "FRED" [18/Jan/2013:17:56:07 +1100] "GET http://images.com/2013/Generic.jpg
| HTTP/1.1" 304 315 "http://referall.com/" "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1;
| GTB7.4; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR
| 3.5.21022; .NET CLR 3.0.4506.2152; .NET CLR 1.0.3705; .NET CLR 1.1.4322; .NET CLR
| 3.5.30729; Release=ARP)" "UD-1" - "image/jpeg" "whatever" 0.350 "-" - "" 265 923 934 ""
| 62.24.11.25 images.com 1358492167 - Whatup""".stripMargin.replace("\\n", ""),
"""10.10.10.10 - "FRED" [18/Jan/2013:18:02:37 +1100] "GET http://images.com/2013/Generic.jpg
| HTTP/1.1" 304 306 "http:/referall.com" "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1;
| GTB7.4; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR
| 3.5.21022; .NET CLR 3.0.4506.2152; .NET CLR 1.0.3705; .NET CLR 1.1.4322; .NET CLR
| 3.5.30729; Release=ARP)" "UD-1" - "image/jpeg" "whatever" 0.352 "-" - "" 256 977 988 ""
| 0 73.23.2.15 images.com 1358492557 - Whatup""".stripMargin.replace("\\n", "")
)
def main(args: Array[String]) {
if (args.length == 0) {
System.err.println("Usage: LogQuery <master> [logFile]")
System.exit(1)
}
val sc = new SparkContext(args(0), "Log Query",
System.getenv("SPARK_HOME"), SparkContext.jarOfClass(this.getClass))
val dataSet =
if (args.length == 2) sc.textFile(args(1))
else sc.parallelize(exampleApacheLogs)
val apacheLogRegex =
"""^([\\d.]+) (\\S+) (\\S+) \\[([\\w\\d:/]+\\s[+\\-]\\d{4})\\] "(.+?)" (\\d{3}) ([\\d\\-]+) "([^"]+)" "([^"]+)".*""".r
/** Tracks the total query count and number of aggregate bytes for a particular group. */
class Stats(val count: Int, val numBytes: Int) extends Serializable {
def merge(other: Stats) = new Stats(count + other.count, numBytes + other.numBytes)
override def toString = "bytes=%s\\tn=%s".format(numBytes, count)
}
def extractKey(line: String): (String, String, String) = {
apacheLogRegex.findFirstIn(line) match {
case Some(apacheLogRegex(ip, _, user, dateTime, query, status, bytes, referer, ua)) =>
if (user != "\\"-\\"") (ip, user, query)
else (null, null, null)
case _ => (null, null, null)
}
}
def extractStats(line: String): Stats = {
apacheLogRegex.findFirstIn(line) match {
case Some(apacheLogRegex(ip, _, user, dateTime, query, status, bytes, referer, ua)) =>
new Stats(1, bytes.toInt)
case _ => new Stats(1, 0)
}
}
dataSet.map(line => (extractKey(line), extractStats(line)))
.reduceByKey((a, b) => a.merge(b))
.collect().foreach {
case (user, query) => println("%s\\t%s".format(user, query))
}
}
}
| amirhyoussefi/spark-template | src/main/scala/org/apache/spark/examples/LogQuery.scala | Scala | apache-2.0 | 3,844 |
package lila.site
import scala.concurrent.duration._
import akka.actor._
import play.api.libs.json._
import actorApi._
import lila.common.PimpedJson._
import lila.socket._
import lila.socket.actorApi.StartWatching
private[site] final class SocketHandler(
socket: ActorRef,
hub: lila.hub.Env) {
def apply(
uid: String,
userId: Option[String],
flag: Option[String]): Fu[JsSocketHandler] = {
def controller(member: Member): Handler.Controller = {
case ("startWatching", o) => o str "d" foreach { ids =>
hub.actor.moveBroadcast ! StartWatching(uid, member, ids.split(' ').toSet)
}
}
Handler(hub, socket, uid, Join(uid, userId, flag), userId) {
case Connected(enum, member) => (controller(member), enum, member)
}
}
}
| danilovsergey/i-bur | modules/site/src/main/SocketHandler.scala | Scala | mit | 786 |
package ems.storage
import java.io.{FileInputStream, File}
import ems.{MIMEType, StreamingAttachment, Attachment}
import ems.model.Entity
import org.joda.time.DateTime
case class FileAttachment(id: Option[String], file: File, name: String, mediaType: Option[MIMEType]) extends Entity[Attachment] with Attachment {
def lastModified = new DateTime(file.lastModified)
def withId(id: String) = copy(id = Some(id))
def size = Some(file.length())
def data = new FileInputStream(file)
} | chrissearle/ems-redux | src/main/scala/ems/storage/FileAttachment.scala | Scala | apache-2.0 | 493 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.worker.ui
import java.io.File
import javax.servlet.http.HttpServletRequest
import org.apache.spark.Logging
import org.apache.spark.deploy.worker.Worker
import org.apache.spark.ui.{SparkUI, WebUI}
import org.apache.spark.ui.JettyUtils._
import org.apache.spark.util.RpcUtils
/**
* Web UI server for the standalone worker.
*/
private[worker]
class WorkerWebUI(
val worker: Worker,
val workDir: File,
requestedPort: Int)
extends WebUI(worker.securityMgr, requestedPort, worker.conf, name = "WorkerUI")
with Logging {
private[ui] val timeout = RpcUtils.askRpcTimeout(worker.conf)
initialize()
/** Initialize all components of the server. */
def initialize() {
val logPage = new LogPage(this)
attachPage(logPage)
attachPage(new WorkerPage(this))
attachHandler(createStaticHandler(WorkerWebUI.STATIC_RESOURCE_BASE, "/static"))
attachHandler(createServletHandler("/log",
(request: HttpServletRequest) => logPage.renderLog(request),
worker.securityMgr,
worker.conf))
}
}
private[worker] object WorkerWebUI {
val STATIC_RESOURCE_BASE = SparkUI.STATIC_RESOURCE_DIR
val DEFAULT_RETAINED_DRIVERS = 1000
val DEFAULT_RETAINED_EXECUTORS = 1000
}
| chenc10/Spark-PAF | core/src/main/scala/org/apache/spark/deploy/worker/ui/WorkerWebUI.scala | Scala | apache-2.0 | 2,044 |
package scala.models
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.matchers.should.Matchers
class JsonImportsSpec extends AnyFunSpec with Matchers {
it("basic service") {
JsonImports(models.TestHelper.referenceApiService) should be(
Seq(
"import io.apibuilder.reference.api.v0.models.json._"
)
)
}
it("includes imports") {
JsonImports(models.TestHelper.generatorApiService) should be(
Seq(
"import io.apibuilder.common.v0.models.json._",
"import io.apibuilder.generator.v0.models.json._",
"import io.apibuilder.spec.v0.models.json._"
)
)
}
}
| gheine/apidoc-generator | scala-generator/src/test/scala/models/JsonImportsSpec.scala | Scala | mit | 642 |
/*
* Copyright (c) 2018. Fengguo Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License v2.0
* which accompanies this distribution, and is available at
* https://www.apache.org/licenses/LICENSE-2.0
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.jawa.flow.pta.rfa
import java.util.concurrent.TimeoutException
import org.argus.jawa.flow.Context
import org.argus.jawa.flow.cfg.{ICFGNode, InterProceduralControlFlowGraph}
import org.argus.jawa.flow.dfa._
import org.argus.jawa.flow.interprocedural.CallResolver
import org.argus.jawa.flow.pta.model.ModelCallHandler
import org.argus.jawa.flow.pta._
import org.argus.jawa.core.ast._
import org.argus.jawa.core.util._
import org.argus.jawa.core._
import org.argus.jawa.core.elements.{JavaKnowledge, JawaType}
import org.argus.jawa.flow.summary.SummaryManager
import scala.collection.immutable.BitSet
/**
* Created by fgwei on 6/29/17.
*/
class ReachingFactsAnalysis(
global: Global,
icfg: InterProceduralControlFlowGraph[ICFGNode],
ptaresult: PTAResult,
handler: ModelCallHandler,
sm: SummaryManager,
clm: ClassLoadManager,
resolve_static_init: Boolean,
timeout: Option[MyTimeout]) {
type Node = ICFGNode
var mdf: MonotoneDataFlowAnalysisResult[ICFGNode, RFAFact] = _
def process (
entryPointProc: JawaMethod,
initialFacts: ISet[RFAFact] = isetEmpty,
initContext: Context,
callr: CallResolver[Node, RFAFact]): InterProceduralDataFlowGraph = {
val gen = new Gen
val kill = new Kill
val initial: ISet[RFAFact] = isetEmpty
val ip = new Ip(icfg)
icfg.collectCfgToBaseGraph(entryPointProc, initContext, isFirst = true, callr.needReturnNode())
initialFacts.foreach { fact =>
val entryContext = icfg.entryNode.getContext.copy
ptaresult.addInstance(entryContext, fact.slot, fact.ins)
}
val iota: ISet[RFAFact] = initialFacts + RFAFact(StaticFieldSlot("Analysis.RFAiota"), PTAInstance(JavaKnowledge.OBJECT.toUnknown, initContext.copy))
try {
mdf = MonotoneDataFlowAnalysisFramework[ICFGNode, RFAFact, Context](icfg,
forward = true, lub = true, ip, gen, kill, Some(callr), iota, initial)
} catch {
case te: TimeoutException =>
global.reporter.warning("ReachingFactsAnalysis", entryPointProc.getSignature + " " + te.getMessage)
}
val finalFacts = mdf.entrySet(icfg.exitNode)
finalFacts.foreach { fact =>
val exitContext = icfg.exitNode.getContext.copy
ptaresult.addInstance(exitContext, fact.slot, fact.ins)
}
InterProceduralDataFlowGraph(icfg, ptaresult)
}
class Gen extends MonotonicFunction[ICFGNode, RFAFact] {
protected def isInterestingAssignment(a: Assignment): Boolean = {
a match{
case as: AssignmentStatement =>
as.rhs match {
case _: CastExpression => true
case _: ConstClassExpression => true
case _: ExceptionExpression => true
case _: Expression with New => true
case _: NullExpression => true
case _ =>
as.kind == "object"
}
case _ => false
}
}
private def handleAssignmentStatement(s: ISet[RFAFact], a: AssignmentStatement, currentNode: ICFGNode): ISet[RFAFact] = {
var result: ISet[RFAFact] = isetEmpty
if(isInterestingAssignment(a)) {
val lhsOpt = a.getLhs
val rhs = a.getRhs
val heapUnknownFacts = ReachingFactsAnalysisHelper.getHeapUnknownFacts(rhs, currentNode.getContext, ptaresult)
result ++= heapUnknownFacts
val slots: IMap[PTASlot, Boolean] = lhsOpt match {
case Some(lhs) => ReachingFactsAnalysisHelper.processLHS(lhs, currentNode.getContext, ptaresult)
case None => imapEmpty
}
val (values, extraFacts) = ReachingFactsAnalysisHelper.processRHS(rhs, currentNode.getContext, ptaresult)
slots.foreach {
case (slot, _) =>
result ++= values.map{v => RFAFact(slot, v)}
}
result ++= extraFacts
}
val exceptionFacts: ISet[RFAFact] = ReachingFactsAnalysisHelper.getExceptionFacts(a, s, currentNode.getContext)
result ++= exceptionFacts
result
}
def apply(s: ISet[RFAFact], e: Statement, currentNode: ICFGNode): ISet[RFAFact] = {
var result: ISet[RFAFact] = isetEmpty
e match{
case as: AssignmentStatement =>
result ++= handleAssignmentStatement(s, as, currentNode)
case ta: ThrowStatement =>
val slot = VarSlot(ta.varSymbol.varName)
val value = s.filter(_.s == slot).map(_.v)
result ++= value.map(RFAFact(VarSlot(ExceptionCenter.EXCEPTION_VAR_NAME), _))
case _ =>
}
result
}
}
class Kill extends MonotonicFunction[ICFGNode, RFAFact] {
private def handleAssignmentStatement(s: ISet[RFAFact], a: AssignmentStatement, currentNode: ICFGNode): ISet[RFAFact] = {
var result = ReachingFactsAnalysisHelper.aggregate(s)
val lhsOpt = a.getLhs
lhsOpt match {
case Some(lhs) =>
val slotsWithMark = ReachingFactsAnalysisHelper.processLHS(lhs, currentNode.getContext, ptaresult).toSet
for (rdf <- s) {
//if it is a strong definition, we can kill the existing definition
if (slotsWithMark.contains(rdf.s, true)) {
result = result - rdf
}
}
case None =>
}
result
}
def apply(s: ISet[RFAFact], e: Statement, currentNode: ICFGNode): ISet[RFAFact] = {
e match {
case as: AssignmentStatement => handleAssignmentStatement(s, as, currentNode)
case _ => s
}
}
}
private def checkAndLoadClassFromHierarchy(me: JawaClass, currentNode: Node): Unit = {
if(me.hasSuperClass){
checkAndLoadClassFromHierarchy(me.getSuperClass, currentNode)
}
val bitset = currentNode.getLoadedClassBitSet
if(!clm.isLoaded(me, bitset)) {
currentNode.setLoadedClassBitSet(clm.loadClass(me, bitset))
if(me.declaresStaticInitializer) {
val p = me.getStaticInitializer.get
if(resolve_static_init) {
if(handler.isModelCall(p)) {
ReachingFactsAnalysisHelper.getUnknownObjectForClinit(p, currentNode.getContext)
} else if(!this.icfg.isProcessed(p.getSignature, currentNode.getContext)) { // for normal call
val nodes = this.icfg.collectCfgToBaseGraph(p, currentNode.getContext, isFirst = false, needReturnNode = true)
nodes.foreach{n => n.setLoadedClassBitSet(clm.loadClass(me, bitset))}
val clinitVirEntryContext = currentNode.getContext.copy.setContext(p.getSignature, "Entry")
val clinitVirExitContext = currentNode.getContext.copy.setContext(p.getSignature, "Exit")
val clinitEntry = this.icfg.getICFGEntryNode(clinitVirEntryContext)
val clinitExit = this.icfg.getICFGExitNode(clinitVirExitContext)
this.icfg.addEdge(currentNode, clinitEntry)
this.icfg.addEdge(clinitExit, currentNode)
}
}
}
}
}
private def checkClass(recTyp: JawaType, currentNode: Node): Unit = {
val rec = global.getClassOrResolve(recTyp)
checkAndLoadClassFromHierarchy(rec, currentNode)
}
/**
* A.<clinit>() will be called under four kinds of situation: v0 = new A, A.f = v1, v2 = A.f, and A.foo()
* also for v0 = new B where B is descendant of A, first we call A.<clinit>, later B.<clinit>.
*/
protected def checkAndLoadClasses(a: Statement, currentNode: Node): Unit = {
a match {
case as: AssignmentStatement =>
as.lhs match {
case sfae: StaticFieldAccessExpression =>
val slot = StaticFieldSlot(sfae.name)
val recTyp = JavaKnowledge.getClassTypeFromFieldFQN(slot.fqn)
checkClass(recTyp, currentNode)
case _ =>
}
as.rhs match {
case ne: Expression with New =>
val typ = ne.typ
checkClass(typ, currentNode)
case sfae: StaticFieldAccessExpression =>
val slot = StaticFieldSlot(sfae.name)
val recTyp = JavaKnowledge.getClassTypeFromFieldFQN(slot.fqn)
checkClass(recTyp, currentNode)
case _ =>
}
case cs: CallStatement =>
if (cs.kind == "static") {
val recTyp = a.asInstanceOf[CallStatement].signature.getClassType
checkClass(recTyp, currentNode)
}
case _ =>
}
}
class Ip(icfg: InterProceduralControlFlowGraph[ICFGNode]) extends InterIngredientProvider[RFAFact](global, icfg) {
override def preProcess(node: ICFGNode, statement: Statement, s: ISet[RFAFact]): Unit = {
checkAndLoadClasses(statement, node)
statement match {
case a: AssignmentStatement =>
ReachingFactsAnalysisHelper.updatePTAResultRHS(a.rhs, node.getContext, s, ptaresult)
ReachingFactsAnalysisHelper.updatePTAResultLHS(a.lhs, node.getContext, s, ptaresult)
case _: EmptyStatement =>
case m: MonitorStatement =>
ReachingFactsAnalysisHelper.updatePTAResultVar(m.varSymbol.varName, node.getContext, s, ptaresult)
case j: Jump =>
j match {
case cs: CallStatement =>
ReachingFactsAnalysisHelper.updatePTAResultCallJump(cs, node.getContext, s, ptaresult, afterCall = false)
case _: GotoStatement =>
case is: IfStatement =>
ReachingFactsAnalysisHelper.updatePTAResultExp(is.cond, node.getContext, s, ptaresult)
case rs: ReturnStatement =>
rs.varOpt match {
case Some(v) =>
ReachingFactsAnalysisHelper.updatePTAResultVar(v.varName, node.getContext, s, ptaresult)
case None =>
}
case ss: SwitchStatement =>
ReachingFactsAnalysisHelper.updatePTAResultVar(ss.condition.varName, node.getContext, s, ptaresult)
}
case t: ThrowStatement =>
ReachingFactsAnalysisHelper.updatePTAResultVar(t.varSymbol.varName, node.getContext, s, ptaresult)
}
}
override def postProcess(node: ICFGNode, statement: Statement, s: ISet[RFAFact]): Unit = {
statement match {
case cs: CallStatement =>
ReachingFactsAnalysisHelper.updatePTAResultCallJump(cs, node.getContext, s, ptaresult, afterCall = true)
case _ =>
}
statement match {
case a: Assignment =>
a.getLhs match {
case Some(lhs) =>
lhs match {
case vne: VariableNameExpression =>
val slot = VarSlot(vne.name)
s.filter { fact => fact.s == slot }.foreach(f => ptaresult.addInstance(node.getContext, slot, f.v))
case sfae: StaticFieldAccessExpression =>
val slot = StaticFieldSlot(sfae.name)
s.filter { fact => fact.s == slot }.foreach(f => ptaresult.addInstance(node.getContext, slot, f.v))
case _ =>
}
case None =>
}
case _ =>
}
}
override def onPreVisitNode(node: ICFGNode, preds: CSet[ICFGNode]): Unit = {
val bitset = if(preds.nonEmpty)preds.map{_.getLoadedClassBitSet}.reduce{ (x, y) => x.intersect(y)} else BitSet.empty
node.setLoadedClassBitSet(bitset)
}
override def onPostVisitNode(node: ICFGNode, succs: CSet[ICFGNode]): Unit = {
timeout foreach (_.timeoutThrow())
}
}
}
| arguslab/Argus-SAF | jawa/src/main/scala/org/argus/jawa/flow/pta/rfa/ReachingFactsAnalysis.scala | Scala | apache-2.0 | 11,643 |
package com.github.spirom.sparkflights.fw
import java.text.SimpleDateFormat
import java.util.Calendar
import org.apache.log4j.Logger
import org.apache.spark.sql.DataFrame
abstract class Experiment(val name: String) {
val logger = Logger.getLogger(getClass.getName)
def runQuery(df: DataFrame, runOutputBase: String, index: Int): Unit
def run(df: DataFrame, runOutputBase: String, index: Int, results: Results): Unit = {
val timeFormat = new SimpleDateFormat("hh:mm:ss")
val before = Calendar.getInstance().getTime()
logger.info(s"Running $name at ${timeFormat.format(before)}")
try {
runQuery(df, runOutputBase, index)
val after = Calendar.getInstance().getTime()
val diff = after.getTime - before.getTime
val result = new ExperimentResult(this, before, after, diff, None)
results.add(result)
logger.info(s"Completed $name at ${timeFormat.format(after)} after $diff msec")
} catch {
case (t: Throwable) => {
val after = Calendar.getInstance().getTime()
val diff = after.getTime - before.getTime
val result = new ExperimentResult(this, before, after, diff, Some(t))
results.add(result)
logger.warn(s"Failed $name at ${timeFormat.format(after)} after $diff msec", t)
}
}
}
}
| spirom/SparkFlightExamples | src/main/scala/com/github/spirom/sparkflights/fw/Experiment.scala | Scala | mit | 1,309 |
package challenge
import scala.util.Random
import scala.math.{abs, max, pow}
object Int191 {
case class Pos(row: Int, col: Int)
class Terrain(N: Int, start: Pos, goal: Pos) {
val AsteroidPercentage = 0.2
val GravityWellPercentage = 0.07
val size = pow(N, 2)
val asteroids = (AsteroidPercentage * size).toInt
val gravityWells = (GravityWellPercentage * size).toInt
def generateGridElement(remaining: Int, forbidden: Set[Pos]): Set[Pos] = {
def generateGridElementHelper(remaining: Int,
placed: Set[Pos]): Set[Pos] = {
if (remaining == 0) {
placed
} else {
val newPlace = Pos(Random.nextInt(N), Random.nextInt(N))
if ((placed contains newPlace) || (forbidden contains newPlace))
generateGridElementHelper(remaining, placed)
else
generateGridElementHelper(remaining - 1, placed + newPlace)
}
}
generateGridElementHelper(remaining, Set())
}
val startPosition = start
val goalPosition = goal
val allForbidden = Set(start, goal)
val asteroidPositions = generateGridElement(asteroids, allForbidden)
val gravityForbidden = asteroidPositions ++ adjacentPositions(start) ++
adjacentPositions(goal) ++ allForbidden
val gravityWellPositions = generateGridElement(gravityWells,
gravityForbidden)
def adjacentPositions(pos: Pos): IndexedSeq[Pos] = for {
i <- -1 to 1
j <- -1 to 1
if i != 0 || j != 0
} yield Pos(pos.row - i, pos.col - j)
def accessiblePosition(pos: Pos): Boolean =
pos.row >= 0 && pos.row < N && pos.col >= 0 && pos.col < N &&
!(asteroidPositions contains pos) && (adjacentPositions(pos) forall
(!gravityWellPositions.contains(_)))
def makeGrid(path: Set[Pos]): String = {
val elemGrid = Vector.tabulate(N, N)((row, col) => {
val pos = Pos(row, col)
if (asteroidPositions contains pos) "A"
else if (gravityWellPositions contains pos) "G"
else if (startPosition == pos) "S"
else if (goalPosition == pos) "E"
else if (path contains pos) "O"
else "."
})
elemGrid map (_.mkString) mkString "\\n"
}
}
def main(args: Array[String]): Unit = {
val N = 10
val Start = Pos(0, 0)
val Goal = Pos(9, 9)
val terrain = new Terrain(N, Start, Goal)
val path = closestPath(terrain)
println(terrain.makeGrid(path))
}
def allPaths(terrain: Terrain): Stream[List[Pos]] = {
def from(paths: Stream[List[Pos]],
explored: Set[Pos]): Stream[List[Pos]] = {
if (paths.isEmpty) {
Stream()
} else {
val more: Stream[List[Pos]] = for {
path <- paths
next <- terrain.adjacentPositions(path.head)
if terrain.accessiblePosition(next)
if !(explored contains next)
} yield next :: path
paths #::: from(more, explored ++ (more map (_.head)))
}
}
from(Stream(List(terrain.startPosition)), Set())
}
def distance(curr: Pos, target: Pos): Int =
max(abs(curr.row - target.row), abs(curr.col - target.col))
def minDistance(a: List[Pos], b: List[Pos], target: Pos): List[Pos] =
if (distance(a.head, target) <= distance(b.head, target)) a else b
def closestPath(terrain: Terrain): Set[Pos] = {
val paths = allPaths(terrain)
val pathsReachingGoal = paths filter (_.head == terrain.goalPosition)
lazy val pathClosestGoal: List[Pos] = paths reduceLeft
((closest, next) => minDistance(closest, next, terrain.goalPosition))
val path =
if (pathsReachingGoal.nonEmpty)
pathsReachingGoal(0)
else
pathClosestGoal
path.toSet
}
}
| nichwn/dailyprogrammer-scala | src/main/scala/challenge/Int191.scala | Scala | mit | 3,759 |
package fr.thomasdufour.autodiff.examples
object SimpleDerivation {
def main( args: Array[String] ): Unit = {
case class Item( description: String )
case class Bag( items: List[Item] )
import fr.thomasdufour.autodiff.Diff
import fr.thomasdufour.autodiff.Pretty
import fr.thomasdufour.autodiff.derived
implicit val bagDiff: Diff[Bag] = {
import derived.auto._
derived.semi.diff
}
println(
Pretty.colorized2.showDiff(
Bag( Item( "a wombat" ) :: Item( "coffee" ) :: Item( "a green fountain pen" ) :: Nil ),
Bag( Item( "4 paperclips" ) :: Item( "coffee" ) :: Nil )
)
)
}
}
| chwthewke/auto-diff | auto-diff-examples/src/main/scala/fr/thomasdufour/autodiff/examples/SimpleDerivation.scala | Scala | apache-2.0 | 655 |
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.sexp.util
trait ThreadLocalSupport {
protected def local[T](t: => T) = new ThreadLocal[T] {
override def initialValue = t
}
}
| sugakandrey/ensime-server | s-express/src/main/scala/org/ensime/sexp/util/ThreadLocalSupport.scala | Scala | gpl-3.0 | 283 |
package io.swagger.client.api
import io.swagger.client.model.Inline_response_200_15
import io.swagger.client.model.Inline_response_200_16
import io.swagger.client.model.UnitCategory
import io.swagger.client.model.Inline_response_200_2
import io.swagger.client.ApiInvoker
import io.swagger.client.ApiException
import com.sun.jersey.multipart.FormDataMultiPart
import com.sun.jersey.multipart.file.FileDataBodyPart
import javax.ws.rs.core.MediaType
import java.io.File
import java.util.Date
import scala.collection.mutable.HashMap
class UnitCategoryApi(val defBasePath: String = "https://app.quantimo.do/api/v2",
defApiInvoker: ApiInvoker = ApiInvoker) {
var basePath = defBasePath
var apiInvoker = defApiInvoker
def addHeader(key: String, value: String) = apiInvoker.defaultHeaders += key -> value
/**
* Get all UnitCategories
* Get all UnitCategories
* @param name name
* @param createdAt created_at
* @param updatedAt updated_at
* @param limit limit
* @param offset offset
* @param sort sort
* @return Inline_response_200_15
*/
def unitCategoriesGet (name: String, createdAt: String, updatedAt: String, limit: Integer, offset: Integer, sort: String) : Option[Inline_response_200_15] = {
// create path and map variables
val path = "/unitCategories".replaceAll("\\\\{format\\\\}","json")
val contentTypes = List("application/json", "application/json")
val contentType = contentTypes(0)
// query params
val queryParams = new HashMap[String, String]
val headerParams = new HashMap[String, String]
val formParams = new HashMap[String, String]
if(String.valueOf(name) != "null") queryParams += "name" -> name.toString
if(String.valueOf(createdAt) != "null") queryParams += "created_at" -> createdAt.toString
if(String.valueOf(updatedAt) != "null") queryParams += "updated_at" -> updatedAt.toString
if(String.valueOf(limit) != "null") queryParams += "limit" -> limit.toString
if(String.valueOf(offset) != "null") queryParams += "offset" -> offset.toString
if(String.valueOf(sort) != "null") queryParams += "sort" -> sort.toString
var postBody: AnyRef = null
if(contentType.startsWith("multipart/form-data")) {
val mp = new FormDataMultiPart()
postBody = mp
}
else {
}
try {
apiInvoker.invokeApi(basePath, path, "GET", queryParams.toMap, formParams.toMap, postBody, headerParams.toMap, contentType) match {
case s: String =>
Some(ApiInvoker.deserialize(s, "", classOf[Inline_response_200_15]).asInstanceOf[Inline_response_200_15])
case _ => None
}
} catch {
case ex: ApiException if ex.code == 404 => None
case ex: ApiException => throw ex
}
}
/**
* Store UnitCategory
* Store UnitCategory
* @param body UnitCategory that should be stored
* @return Inline_response_200_16
*/
def unitCategoriesPost (body: UnitCategory) : Option[Inline_response_200_16] = {
// create path and map variables
val path = "/unitCategories".replaceAll("\\\\{format\\\\}","json")
val contentTypes = List("application/json", "application/json")
val contentType = contentTypes(0)
// query params
val queryParams = new HashMap[String, String]
val headerParams = new HashMap[String, String]
val formParams = new HashMap[String, String]
var postBody: AnyRef = body
if(contentType.startsWith("multipart/form-data")) {
val mp = new FormDataMultiPart()
postBody = mp
}
else {
}
try {
apiInvoker.invokeApi(basePath, path, "POST", queryParams.toMap, formParams.toMap, postBody, headerParams.toMap, contentType) match {
case s: String =>
Some(ApiInvoker.deserialize(s, "", classOf[Inline_response_200_16]).asInstanceOf[Inline_response_200_16])
case _ => None
}
} catch {
case ex: ApiException if ex.code == 404 => None
case ex: ApiException => throw ex
}
}
/**
* Get UnitCategory
* Get UnitCategory
* @param id id of UnitCategory
* @return Inline_response_200_16
*/
def unitCategoriesIdGet (id: Integer) : Option[Inline_response_200_16] = {
// create path and map variables
val path = "/unitCategories/{id}".replaceAll("\\\\{format\\\\}","json").replaceAll("\\\\{" + "id" + "\\\\}",apiInvoker.escape(id))
val contentTypes = List("application/json", "application/json")
val contentType = contentTypes(0)
// query params
val queryParams = new HashMap[String, String]
val headerParams = new HashMap[String, String]
val formParams = new HashMap[String, String]
var postBody: AnyRef = null
if(contentType.startsWith("multipart/form-data")) {
val mp = new FormDataMultiPart()
postBody = mp
}
else {
}
try {
apiInvoker.invokeApi(basePath, path, "GET", queryParams.toMap, formParams.toMap, postBody, headerParams.toMap, contentType) match {
case s: String =>
Some(ApiInvoker.deserialize(s, "", classOf[Inline_response_200_16]).asInstanceOf[Inline_response_200_16])
case _ => None
}
} catch {
case ex: ApiException if ex.code == 404 => None
case ex: ApiException => throw ex
}
}
/**
* Update UnitCategory
* Update UnitCategory
* @param id id of UnitCategory
* @param body UnitCategory that should be updated
* @return Inline_response_200_2
*/
def unitCategoriesIdPut (id: Integer, body: UnitCategory) : Option[Inline_response_200_2] = {
// create path and map variables
val path = "/unitCategories/{id}".replaceAll("\\\\{format\\\\}","json").replaceAll("\\\\{" + "id" + "\\\\}",apiInvoker.escape(id))
val contentTypes = List("application/json", "application/json")
val contentType = contentTypes(0)
// query params
val queryParams = new HashMap[String, String]
val headerParams = new HashMap[String, String]
val formParams = new HashMap[String, String]
var postBody: AnyRef = body
if(contentType.startsWith("multipart/form-data")) {
val mp = new FormDataMultiPart()
postBody = mp
}
else {
}
try {
apiInvoker.invokeApi(basePath, path, "PUT", queryParams.toMap, formParams.toMap, postBody, headerParams.toMap, contentType) match {
case s: String =>
Some(ApiInvoker.deserialize(s, "", classOf[Inline_response_200_2]).asInstanceOf[Inline_response_200_2])
case _ => None
}
} catch {
case ex: ApiException if ex.code == 404 => None
case ex: ApiException => throw ex
}
}
/**
* Delete UnitCategory
* Delete UnitCategory
* @param id id of UnitCategory
* @return Inline_response_200_2
*/
def unitCategoriesIdDelete (id: Integer) : Option[Inline_response_200_2] = {
// create path and map variables
val path = "/unitCategories/{id}".replaceAll("\\\\{format\\\\}","json").replaceAll("\\\\{" + "id" + "\\\\}",apiInvoker.escape(id))
val contentTypes = List("application/json", "application/json")
val contentType = contentTypes(0)
// query params
val queryParams = new HashMap[String, String]
val headerParams = new HashMap[String, String]
val formParams = new HashMap[String, String]
var postBody: AnyRef = null
if(contentType.startsWith("multipart/form-data")) {
val mp = new FormDataMultiPart()
postBody = mp
}
else {
}
try {
apiInvoker.invokeApi(basePath, path, "DELETE", queryParams.toMap, formParams.toMap, postBody, headerParams.toMap, contentType) match {
case s: String =>
Some(ApiInvoker.deserialize(s, "", classOf[Inline_response_200_2]).asInstanceOf[Inline_response_200_2])
case _ => None
}
} catch {
case ex: ApiException if ex.code == 404 => None
case ex: ApiException => throw ex
}
}
}
| QuantiModo/QuantiModo-SDK-Scala | src/main/scala/io/swagger/client/api/UnitCategoryApi.scala | Scala | gpl-2.0 | 8,117 |
package com.example.http4s
package site
import cats.effect.IO
import org.http4s._
import org.http4s.dsl.io._
object HelloBetterWorld {
val service = HttpService[IO] {
// We use http4s-dsl to match the path of the Request to the familiar URI form
case GET -> Root / "hello" =>
// We could make a IO[Response] manually, but we use the
// EntityResponseGenerator 'Ok' for convenience
Ok("Hello, better world.")
}
}
| reactormonk/http4s | examples/src/main/scala/com/example/http4s/site/HelloBetterWorld.scala | Scala | apache-2.0 | 445 |
package sparkExample
import org.apache.spark.mllib.classification.SVMWithSGD
import org.apache.spark.mllib.evaluation.BinaryClassificationMetrics
import org.apache.spark.mllib.optimization.L1Updater
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.{SparkContext, SparkConf}
/**
* Created by root on 15-9-3.
* Project: ${Project_name}.
*
* Just for Matrix.
*/
object LinearModelTest {
def main (args: Array[String]) {
val sparkConf = new SparkConf()
.setAppName("LinearModelTest")
.setMaster("local")
val sc = new SparkContext(sparkConf)
val fileBasicPath = "/media/Document/programm/" +
"GitHub/spark_pre/spark/data/mllib/"
val libsvm_data = MLUtils.loadLibSVMFile(sc,
fileBasicPath + "sample_libsvm_data.txt")
println(libsvm_data.count() + " is the count of the data")
//libsvm_data.take(10).map {line => println("the libsvm data is " + line)}
val splits = libsvm_data.randomSplit(Array(0.8,0.4), seed = 11L)
println(splits.length + " is the length of the splits")
val training = splits(0).cache()
val test = splits(1)
//training.take(10).map {line => println("the train data " +line)}
//test.take(10).map {line => println("the test data " +line)}
val numIternations = 10
val model = SVMWithSGD.train(training,numIternations)
model.clearThreshold()
val scoreAndLabels = test.map {point =>
val score = model.predict(point.features)
(score,point.label)
}
val metric = new BinaryClassificationMetrics(scoreAndLabels)
val auROC = metric.areaUnderROC()
println("Area under ROC " + auROC)
val svmAlg = new SVMWithSGD()
svmAlg.optimizer
.setNumIterations(200)
.setRegParam(0.1).setUpdater(new L1Updater)
val modelL1 = svmAlg.run(training)
println(modelL1.weights.toString() + " is modelL1 weight")
sc.stop()
System.exit(0)
}
}
| baokunguo/learning-spark-examples | note/process/2015-07-12/scalaTest/src/sparkExample/LinearModelTest.scala | Scala | mit | 1,971 |
package controllers.auth
import javax.inject.Inject
import play.api.mvc.{ActionBuilder,AnyContent,BodyParsers,Request,Result}
import scala.concurrent.{ExecutionContext,Future}
class ApiAuthorizedAction @Inject() (
apiTokenFactory: ApiTokenFactory,
val bodyParser: BodyParsers.Default,
ec: ExecutionContext
) {
def apply(authority: Authority) = new ActionBuilder[ApiAuthorizedRequest, AnyContent] {
override def parser = bodyParser
override implicit def executionContext = ec
override def invokeBlock[A](request: Request[A], block: (ApiAuthorizedRequest[A] => Future[Result])): Future[Result] = {
/*
* We special-case AuthorizedRequest[A] to short-circuit auth, so we can
* write tests that don't hit UserFactory.
*
* We can't use overloading (because Request is a trait) or matching
* (because of type erasure), but we can prove this is type-safe.
*
* [adam, 2017-07-18] check this is still needed; it's for Play 2.3.
*/
if (request.isInstanceOf[ApiAuthorizedRequest[_]]) {
block(request.asInstanceOf[ApiAuthorizedRequest[A]])
} else {
apiTokenFactory.loadAuthorizedApiToken(request, authority).flatMap {
case Left(plainResult) => Future(plainResult)
case Right(apiToken) => block(new ApiAuthorizedRequest(request, apiToken))
}
}
}
}
}
| overview/overview-server | web/app/controllers/auth/ApiAuthorizedAction.scala | Scala | agpl-3.0 | 1,389 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package xml.dtd.impl
import scala.collection.{ immutable, mutable }
import scala.collection.Seq
/**
* A nondeterministic automaton. States are integers, where
* 0 is always the only initial state. Transitions are represented
* in the delta function. Default transitions are transitions that
* are taken when no other transitions can be applied.
* All states are reachable. Accepting states are those for which
* the partial function `finals` is defined.
*/
// TODO: still used in ContentModel -- @deprecated("This class will be removed", "2.10.0")
private[dtd] abstract class NondetWordAutom[T <: AnyRef] {
val nstates: Int
val labels: Seq[T]
val finals: Array[Int] // 0 means not final
val delta: Array[mutable.Map[T, immutable.BitSet]]
val default: Array[immutable.BitSet]
/** @return true if the state is final */
final def isFinal(state: Int) = finals(state) > 0
/** @return tag of final state */
final def finalTag(state: Int) = finals(state)
/** @return true if the set of states contains at least one final state */
final def containsFinal(Q: immutable.BitSet): Boolean = Q exists isFinal
/** @return true if there are no accepting states */
final def isEmpty = (0 until nstates) forall (x => !isFinal(x))
/** @return a immutable.BitSet with the next states for given state and label */
def next(q: Int, a: T): immutable.BitSet = delta(q).getOrElse(a, default(q))
/** @return a immutable.BitSet with the next states for given state and label */
def next(Q: immutable.BitSet, a: T): immutable.BitSet = next(Q, next(_, a))
def nextDefault(Q: immutable.BitSet): immutable.BitSet = next(Q, default)
private def next(Q: immutable.BitSet, f: (Int) => immutable.BitSet): immutable.BitSet =
Q.toSet.map(f).foldLeft(immutable.BitSet.empty)(_ ++ _)
private def finalStates = 0 until nstates filter isFinal
override def toString = {
val finalString = Map(finalStates map (j => j -> finals(j)): _*).toString
val deltaString = (0 until nstates)
.map(i => " %d->%s\\n _>%s\\n".format(i, delta(i), default(i))).mkString
"[NondetWordAutom nstates=%d finals=%s delta=\\n%s".format(nstates, finalString, deltaString)
}
}
| scala/scala-xml | shared/src/main/scala/scala/xml/dtd/impl/NondetWordAutom.scala | Scala | apache-2.0 | 2,509 |
package agilesites.setup
import java.io._
import agilesites.Utils
import sbt.Keys._
import sbt._
/**
* Created by msciab on 01/03/15.
*/
trait SetupSettings extends Utils {
this: AutoPlugin
with InstallerSettings
with ToolsSettings
with TomcatSettings =>
import agilesites.config.AgileSitesConfigKeys._
import agilesites.setup.AgileSitesSetupKeys._
lazy val asSetupServletRequest = taskKey[Unit]("setup servlet request")
lazy val asSetupServletRequestTask = asSetupServletRequest := {
val webapp = sitesWebapp.value
val prpFile = file(webapp) / "WEB-INF" / "classes" / "ServletRequest.properties"
val prp = new java.util.Properties
prp.load(new FileReader(prpFile))
// shift the url assembler to add agilesites as the first
if (prp.getProperty("uri.assembler.1.shortform") != "agilesites") {
val p1s = prp.getProperty("uri.assembler.1.shortform")
val p1c = prp.getProperty("uri.assembler.1.classname")
val p2s = prp.getProperty("uri.assembler.2.shortform")
val p2c = prp.getProperty("uri.assembler.2.classname")
val p3s = prp.getProperty("uri.assembler.3.shortform")
val p3c = prp.getProperty("uri.assembler.3.classname")
val p4s = prp.getProperty("uri.assembler.4.shortform")
val p4c = prp.getProperty("uri.assembler.4.classname")
if (p4s != null && p4s != "") prp.setProperty("uri.assembler.5.shortform", p4s)
if (p4c != null && p4c != "") prp.setProperty("uri.assembler.5.classname", p4c)
if (p3s != null && p4s != "") prp.setProperty("uri.assembler.4.shortform", p3s)
if (p3s != null && p4s != "") prp.setProperty("uri.assembler.4.classname", p3c)
prp.setProperty("uri.assembler.3.shortform", p2s)
prp.setProperty("uri.assembler.3.classname", p2c)
prp.setProperty("uri.assembler.2.shortform", p1s)
prp.setProperty("uri.assembler.2.classname", p1c)
prp.setProperty("uri.assembler.1.shortform", "agilesites")
prp.setProperty("uri.assembler.1.classname", "wcs.core.Assembler")
}
for (s <- sitesFocus.value.split(",")) {
val nsite = normalizeSiteName(s)
prp.setProperty("agilesites.site." + nsite, "/cs/Satellite/" + nsite)
prp.setProperty("agilesites.name." + nsite, s)
}
prp.setProperty("agilesites.statics", sitesStatics.value)
// store
println("~ " + prpFile)
prp.store(new FileWriter(prpFile),
"updated by AgileSites setup")
}
// configure futurentense.ini
lazy val asSetupFutureTenseIni = taskKey[Unit]("setup futuretense.ini")
lazy val asSetupFutureTenseIniTask = asSetupFutureTenseIni := {
val home = sitesHome.value
val shared = sitesShared.value
val version = sitesVersion.value
val envision = sitesEnvision.value
val prpFile = file(home) / "futuretense.ini"
val prp = new java.util.Properties
prp.load(new FileReader(prpFile))
val jardir = file(shared) / "agilesites"
prp.setProperty("agilesites.dir",
jardir.getAbsolutePath);
prp.setProperty("agilesites.poll",
"1000");
prp.setProperty("cs.csdtfolder",
file(envision).getParentFile.getAbsolutePath())
println("~ " + prpFile)
prp.store(new FileWriter(prpFile),
"updated by AgileSites setup")
}
// select jars for the setup offline
lazy val asSetupCopyJarsWeb = taskKey[Unit]("setup servlet request")
lazy val asSetupCopyJarsWebTask = asSetupCopyJarsWeb := {
val webapp = sitesWebapp.value
val version = sitesVersion.value
val destLib = file(webapp) / "WEB-INF" / "lib"
val addJars = asCoreClasspath.value
val removeJars = destLib.listFiles.filter(_.getName.startsWith("agilesites"))
setupCopyJars("agilesites_", destLib, addJars, removeJars)
}
// select jars for the setup online
lazy val asSetupCopyJarsLib = taskKey[Unit]("setup servlet request")
lazy val asSetupCopyJarsLibTask = asSetupCopyJarsLib := {
val shared = sitesShared.value
val parentLib = file(shared) / "agilesites"
val destLib = parentLib / "lib"
destLib.mkdirs()
// jars to include when performing a setup
val addJars = asApiClasspath.value
//println(addJars)
// jars to remove when performing a setup
val removeJars = destLib.listFiles
//println(removeJars)
setupCopyJars("", destLib, addJars, removeJars)
for (file <- destLib.listFiles) {
val parentFile = parentLib / file.getName
if (parentFile.exists) {
parentFile.delete
println("- " + parentFile.getAbsolutePath)
}
}
}
// copy jars filtering and and remove
def setupCopyJars(prefix: String, destLib: File, addJars: Seq[File], removeJars: Seq[File]) {
// remove jars
println("** removing old version of files **");
for (file <- removeJars) {
val tgt = destLib / file.getName
tgt.delete
println("- " + tgt.getAbsolutePath)
}
// add jars
println("** installing new version of files **");
for (file <- addJars) yield {
val tgt = destLib / (prefix + file.getName)
IO.copyFile(file, tgt)
//println(file)
println("+ " + tgt.getAbsolutePath)
}
}
val asSetupOfflineTask = asSetupOffline := {
//if (sitesHello.value.nonEmpty)
// throw new Exception("Web Center Sites must be offline.")
println("*** Installing AgileSites for WebCenter Sites ***");
// configuring
asSetupServletRequest.value
asSetupFutureTenseIni.value
// installing jars
asSetupCopyJarsWeb.value
asSetupCopyJarsLib.value
println( """**** Setup Complete.
|**** Please restart your application server.
|**** You need to complete installation with "asDeploy".""".stripMargin)
}
//val asSetupOnlineTask = cmov.fullInput(" setup").parsed
val setupSettings = Seq(ivyConfigurations ++= Seq(config("core"), config("api"), config("populate")),
asCoreClasspath <<= (update) map {
report => report.select(configurationFilter("core"))
}, asApiClasspath <<= (update) map {
report => report.select(configurationFilter("api"))
}, asPopulateClasspath <<= (update) map {
report => report.select(configurationFilter("populate"))
},
asSetupOfflineTask,
asSetupServletRequestTask,
asSetupFutureTenseIniTask,
asSetupCopyJarsWebTask,
asSetupCopyJarsLibTask,
asSetupOnline := {
cmov.toTask(" setup").value
},
asSetupWeblogic := Def.sequential(
asSetupOffline,
weblogicRedeployCs,
asSetupOnline
).value,
asSetup := Def.sequential(
serverStop,
asSetupOffline,
serverStart,
asSetupOnline).value)
}
| agilesites/agilesites2-build | src/main/scala/agilesites/setup/SetupSettings.scala | Scala | mit | 6,652 |
package mesosphere.marathon.integration
import java.io.File
import mesosphere.marathon.integration.setup._
import org.scalatest.{ BeforeAndAfter, GivenWhenThen, Matchers }
class MarathonStartupIntegrationTest extends IntegrationFunSuite
with SingleMarathonIntegrationTest
with Matchers
with BeforeAndAfter
with GivenWhenThen {
test("Marathon should fail during start, if the HTTP port is already bound") {
Given(s"a Marathon process already running on port ${config.marathonBasePort}")
When("starting another Marathon process using an HTTP port that is already bound")
val cwd = new File(".")
val failingProcess = ProcessKeeper.startMarathon(
cwd,
env,
List("--http_port", config.marathonBasePort.toString, "--zk", config.zk, "--master", config.master),
startupLine = "FATAL Failed to start all services."
)
Then("the new process should fail and exit with an error code")
assert(failingProcess.exitValue() > 0)
}
}
| EasonYi/marathon | src/test/scala/mesosphere/marathon/integration/MarathonStartupIntegrationTest.scala | Scala | apache-2.0 | 992 |
/*
* Copyright (C) 2016-2017 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.persistence.cassandra.query
import akka.annotation.InternalApi
/**
* INTERNAL API
*/
@InternalApi private[akka] trait CassandraReadStatements {
def config: CassandraReadJournalConfig
private def tableName = s"${config.keyspace}.${config.table}"
private def tagViewTableName = s"${config.keyspace}.tag_views"
def selectDistinctPersistenceIds =
s"""
SELECT DISTINCT persistence_id, partition_nr FROM $tableName
"""
def selectEventsFromTagView =
s"""
SELECT * FROM $tagViewTableName WHERE
tag_name = ? AND
timebucket = ? AND
timestamp > ?
ORDER BY timestamp ASC
""".stripMargin
def selectEventsFromTagViewWithUpperBound =
s"""
SELECT * FROM $tagViewTableName WHERE
tag_name = ? AND
timebucket = ? AND
timestamp > ? AND
timestamp < ?
""".stripMargin
def selectTagSequenceNrs =
s"""
SELECT persistence_id, tag_pid_sequence_nr, timestamp
FROM $tagViewTableName WHERE
tag_name = ? AND
timebucket = ? AND
timestamp > ? AND
timestamp <= ?"""
}
| ktoso/akka-persistence-cassandra | core/src/main/scala/akka/persistence/cassandra/query/CassandraReadStatements.scala | Scala | apache-2.0 | 1,188 |
package feh.tec.nxt
import feh.tec.nxt.LegoRobotRubik.{LightSensorPosition, CubeRotationMotor, LightSensorMotor}
object CubeSideAction{
def foreachCube[R](f: ((Int, Int)) => R)
(implicit lsm: LightSensorMotor, crm: CubeRotationMotor): Seq[R] =
motorPositions.map{
case (p, LightSensorPosition(sa, ra)) =>
lsm.get.rotateTo(sa, true)
crm.get.rotateTo(ra, false)
f(p)
}
private def mpRot0 = 50
private def mpRotC = 165
private def mpRotM = 145
lazy val motorPositionsMap = motorPositions.toMap
lazy val motorPositions = Seq(
(1, 1) -> LightSensorPosition(40, 0),
(1, 0) -> LightSensorPosition(23, mpRot0), // 50
(0, 0) -> LightSensorPosition(17, mpRot0+mpRotC), // 210
(0, 1) -> LightSensorPosition(23, mpRot0+mpRotC+mpRotM), // 350
(0, 2) -> LightSensorPosition(17, mpRot0+mpRotC*2+mpRotM), // 510
(1, 2) -> LightSensorPosition(23, mpRot0+mpRotC*2+mpRotM*2),
(2, 2) -> LightSensorPosition(17, mpRot0+mpRotC*3+mpRotM*2),
(2, 1) -> LightSensorPosition(23, mpRot0+mpRotC*3+mpRotM*3),
(2, 0) -> LightSensorPosition(17, mpRot0+mpRotC*4+mpRotM*3)
)
} | fehu/int-sis--Rubik | nxt/src/main/scala/feh/tec/nxt/CubeSideAction.scala | Scala | mit | 1,166 |
package org.jetbrains.plugins.scala.conversion
package generated
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
import org.jetbrains.plugins.scala.settings.ScalaProjectSettings
import org.jetbrains.plugins.scala.util.TypeAnnotationSettings
class JavaToScalaConversionExamplesTest extends JavaToScalaConversionTestBase {
//This class was generated by build script, please don't change this
override def folderPath: String = super.folderPath + "examples/"
def testAnnotated() = doTest()
def testAnonymousClass() = doTest()
def testDeprecated() = doTest()
def testEnum() = doTest()
def testFinalInObjects() = doTest()
def testHelloWorld() = doTest()
def testInterface() = doTest()
def testRightOrder() = doTest()
def testStaticInitializer() = doTest()
def testStaticPrefix() = doTest()
def testThrows() = doTest()
def testTypeParameters() = doTest()
def testVarArgs() = doTest()
def testSCL3899() = doTest()
def testSCL9369() = doTest()
def testSCL11463() = doTest()
def testNeedConstructorsSorting() = doTest()
def testNoOverrideToImplement() = {
val oldValue = ScalaProjectSettings.getInstance(getProjectAdapter).isAddOverrideToImplementInConverter
ScalaProjectSettings.getInstance(getProjectAdapter).setAddOverrideToImplementInConverter(false)
doTest()
ScalaProjectSettings.getInstance(getProjectAdapter).setAddOverrideToImplementInConverter(oldValue)
}
// def testSCL9434() = doTest()
def testSCL9421() = doTest()
def testSCL9375() = doTest()
def testSCL11313() = doTest()
def testSCL11451() = doTest()
def testNoReturnTypeForPublic() =
doTest(TypeAnnotationSettings.noTypeAnnotationForPublic(
TypeAnnotationSettings.alwaysAddType(ScalaCodeStyleSettings.getInstance(getProjectAdapter)))
)
def testNoRetunTypeForLocal() =
doTest(TypeAnnotationSettings.noTypeAnnotationForLocal(
TypeAnnotationSettings.alwaysAddType(ScalaCodeStyleSettings.getInstance(getProjectAdapter))
))
def testImports() = doTest()
def testLambdaExpr() = doTest()
} | loskutov/intellij-scala | test/org/jetbrains/plugins/scala/conversion/generated/JavaToScalaConversionExamplesTest.scala | Scala | apache-2.0 | 2,105 |
package com.outr.stripe.connect
import com.outr.stripe.Money
case class FeeRefund(id: String,
`object`: String,
amount: Money,
balanceTransaction: Option[String],
created: Long,
currency: String,
fee: String,
metadata: Map[String, String]) | outr/scala-stripe | core/jvm/src/main/scala/com/outr/stripe/connect/FeeRefund.scala | Scala | mit | 388 |
package at.fh.swengb.resifoAndroid.activities.list
import android.app.Activity
import android.content.Intent
import android.os.Bundle
import android.view.View
import android.view.View.OnClickListener
import android.widget.ImageView
import at.fh.swengb.resifoAndroid.{MainActivity, R}
/**
* Created by laszlobalo on 31.12.16.
*/
class Credits extends Activity {
override protected def onCreate(savedInstanceState: Bundle) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_credits)
val exitButton: ImageView = findViewById(R.id.exitBtn).asInstanceOf[ImageView]
exitButton.setOnClickListener(new OnClickListener {
def onClick(v: View): Unit = {
startActivity(new Intent(getApplicationContext, classOf[MainActivity]))
}
})
}
}
| Gulasch4ever/resifo-android | app/src/main/scala/at/fh/swengb/resifoAndroid/activities/list/Credits.scala | Scala | gpl-3.0 | 795 |
package com.socrata.geoexport.util
import org.geotools.feature.FeatureIterator
import org.opengis.feature.simple.SimpleFeature
class GeoIterator(sfi: FeatureIterator[SimpleFeature]) extends Iterator[SimpleFeature] with AutoCloseable {
def hasNext: Boolean = sfi.hasNext()
def next(): SimpleFeature = sfi.next()
def remove(): Unit = ???
def close(): Unit = ???
}
| socrata-platform/geo-export | src/main/scala/com.socrata.geoexport/util/GeoIterator.scala | Scala | apache-2.0 | 373 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.fuberlin.wiwiss.silk.learning.reproduction
import de.fuberlin.wiwiss.silk.util.DPair
import util.Random
import de.fuberlin.wiwiss.silk.learning.individual.{NodeTraverser, InputNode}
/**
* A crossover operator which combines the transformations of two comparisons.
*/
case class TransformationCrossover() extends NodePairCrossoverOperator[InputNode] {
override protected def compatible(nodes: DPair[InputNode]) = {
nodes.source.isSource == nodes.target.isSource
}
override protected def crossover(nodePair: DPair[InputNode]) = {
val lowerSourceNodes = NodeTraverser(nodePair.source).iterateAll.withFilter(_.node.isInstanceOf[InputNode]).toIndexedSeq
val lowerTargetNodes = NodeTraverser(nodePair.target).iterateAll.withFilter(_.node.isInstanceOf[InputNode]).toIndexedSeq
val lowerSourceNode = lowerSourceNodes(Random.nextInt(lowerSourceNodes.size))
val lowerTargetNode = lowerTargetNodes(Random.nextInt(lowerTargetNodes.size))
val updatedLowerNode = lowerTargetNode.update(lowerSourceNode.node)
val updatedUpperNode = updatedLowerNode.iterate(_.moveUp).toTraversable.last
updatedUpperNode.node.asInstanceOf[InputNode]
}
}
| fusepoolP3/p3-silk | silk-learning/src/main/scala/de/fuberlin/wiwiss/silk/learning/reproduction/TransformationCrossover.scala | Scala | apache-2.0 | 1,749 |
package com.twitter.finatra.kafkastreams.integration.finatratransformer
import com.twitter.conversions.DurationOps._
import com.twitter.finatra.kafkastreams.test.{FinatraTopologyTester, TopologyFeatureTest}
import org.apache.kafka.common.serialization.Serdes
import org.joda.time.DateTime
class WordLengthServerTopologyFeatureTest extends TopologyFeatureTest {
override val topologyTester = FinatraTopologyTester(
kafkaApplicationId = "test-transformer-prod-alice",
server = new WordLengthServer,
startingWallClockTime = new DateTime("2018-01-01T00:00:00Z")
)
private val wordAndCountTopic =
topologyTester.topic(WordLengthServer.stringsAndInputsTopic, Serdes.String(), Serdes.String())
private val stringAndCountTopic =
topologyTester.topic(WordLengthServer.StringsAndOutputsTopic, Serdes.String(), Serdes.String())
test("test inputs get transformed and timers fire") {
wordAndCountTopic.pipeInput("key", "")
stringAndCountTopic.assertOutput("key", "onMessage key " + "key".length)
// advance time
topologyTester.advanceWallClockTime(6.seconds)
// send a message to advance the watermark
wordAndCountTopic.pipeInput("key2", "")
// advance time again to cause the new watermark to get passed through onWatermark
topologyTester.advanceWallClockTime(1.seconds)
stringAndCountTopic.assertOutput("key2", "onMessage key2 " + "key2".length)
stringAndCountTopic.assertOutput("key", "onEventTimer key " + "key".length)
}
}
| twitter/finatra | kafka-streams/kafka-streams/src/test/scala/com/twitter/finatra/kafkastreams/integration/finatratransformer/WordLengthServerTopologyFeatureTest.scala | Scala | apache-2.0 | 1,494 |
/*
Copyright (c) 2013-2016 Karol M. Stasiak
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package io.github.karols.units.internal
object Integers {
import Bools._
import language.higherKinds
sealed trait TInteger {
type Succ <: TInteger
type Pred <: TInteger
type Half <: TInteger
type Third <: TInteger
type DivisibleByTwo <: TBool
type DivisibleByThree <: TBool
type Negate <: TInteger
type Add[X<:TInteger] <: TInteger
type RevSub[X<:TInteger] <: TInteger
type Sub[X<:TInteger] <: TInteger
type Mul[X<:TInteger] <: TInteger
type ZeroNegPos[IfZero<:ResultType, IfNeg[N<:TInteger]<:ResultType, IfPos[N<:TInteger]<:ResultType, ResultType] <: ResultType
type Equal[X<:TInteger] <: TBool
}
type LambdaNatTrue[N<:TInteger] = True
type LambdaNatFalse[N<:TInteger] = False
type LambdaNatNothing[N<:TInteger] = Nothing
sealed trait NegZ extends TInteger
sealed trait PosZ extends TInteger
sealed trait _0 extends TInteger with NegZ with PosZ{
type Succ = Inc[_0]
type Pred = Dec[_0]
type Half = _0
type Third = _0
type DivisibleByTwo = True
type DivisibleByThree = True
type Negate = _0
type Add[X<:TInteger] = X
type RevSub[X<:TInteger] = X
type Sub[X<:TInteger] = X#RevSub[_0]
type Mul[X<:TInteger] = _0
type ZeroNegPos[IfZero<:ResultType, IfNeg[N<:TInteger]<:ResultType, IfPos[N<:TInteger]<:ResultType, ResultType] = IfZero
type Equal[X<:TInteger] <: X#ZeroNegPos[True, LambdaNatFalse, LambdaNatFalse, TBool]
}
sealed trait Inc[N <: TInteger] extends TInteger with PosZ {
type Succ = Inc[Inc[N]]
type Pred = N
type Half = If[DivisibleByTwo,
Inc[N#Pred#Half],
Nothing,
TInteger]
type Third = If[DivisibleByThree,
Inc[N#Pred#Pred#Third],
Nothing,
TInteger]
type DivisibleByTwo = N#DivisibleByTwo#Not
type DivisibleByThree = N#ZeroNegPos[
False,
LambdaNatFalse,
({type L[X<:TInteger] = X#Pred#DivisibleByThree})#L,
TBool]
type Negate = Dec[N#Negate]
type Add[X<:TInteger] = N#Add[X#Succ]
type RevSub[X<:TInteger] = N#RevSub[X#Pred]
type Sub[X<:TInteger] = X#RevSub[N#Succ]
type Mul[X<:TInteger] = N#Mul[X]#Add[X]
type ZeroNegPos[IfZero<:ResultType, IfNeg[N<:TInteger]<:ResultType, IfPos[N<:TInteger]<:ResultType, ResultType] = IfPos[N]
type Equal[X<:TInteger] <: X#ZeroNegPos[False, LambdaNatFalse, N#Equal, TBool]
}
sealed trait Dec[N <: TInteger] extends TInteger with NegZ {
type Succ = N
type Pred = Dec[Dec[N]]
type Half = If[DivisibleByTwo,
Dec[N#Succ#Half],
Nothing,
TInteger]
type Third = If[DivisibleByThree,
Dec[N#Succ#Succ#Third],
Nothing,
TInteger]
type DivisibleByTwo = N#DivisibleByTwo#Not
type DivisibleByThree = N#ZeroNegPos[
False,
({type L[X<:TInteger] = X#Succ#DivisibleByThree})#L,
LambdaNatFalse,
TBool]
type Negate = Inc[N#Negate]
type Add[X<:TInteger] = N#Add[X#Pred]
type RevSub[X<:TInteger] = N#RevSub[X#Succ]
type Sub[X<:TInteger] = X#RevSub[N#Pred]
type Mul[X<:TInteger] = N#Mul[X]#Sub[X]
type ZeroNegPos[IfZero<:ResultType, IfNeg[N<:TInteger]<:ResultType, IfPos[N<:TInteger]<:ResultType, ResultType] = IfNeg[N]
type Equal[X<:TInteger] <: X#ZeroNegPos[False, N#Equal, LambdaNatFalse, TBool]
}
type P1 = Inc[_0]
type P2 = Inc[P1]
type P3 = Inc[P2]
type P4 = Inc[P3]
type P5 = Inc[P4]
type P6 = Inc[P5]
type P7 = Inc[P6]
type P8 = Inc[P7]
type P9 = Inc[P8]
type P10 = Inc[P9]
type P11 = Inc[P10]
type P12 = Inc[P11]
type P13 = Inc[P12]
type P14 = Inc[P13]
type P15 = Inc[P14]
type N1 = Dec[_0]
type N2 = Dec[N1]
type N3 = Dec[N2]
type N4 = Dec[N3]
type N5 = Dec[N4]
type N6 = Dec[N5]
type N7 = Dec[N6]
type N8 = Dec[N7]
type N9 = Dec[N8]
type +[A<:TInteger,B<:TInteger] = A#Add[B]
type -[A<:TInteger,B<:TInteger] = A#Sub[B]
type ==[A<:TInteger,B<:TInteger] = A#Equal[B]
class ToInt[N<:TInteger](val toInt:Int) extends AnyVal {
override def toString = toInt.toString
}
def printlnN[N<:TInteger]()(implicit toInt: ToInt[N]) = println(toInt.toInt)
implicit val print0:ToInt[_0] = new ToInt[_0](0)
implicit def printPos[N<:PosZ](implicit toInt: ToInt[N]):ToInt[Inc[N]]= new ToInt[Inc[N]](toInt.toInt + 1)
implicit def printNeg[N<:NegZ](implicit toInt: ToInt[N]):ToInt[Dec[N]]= new ToInt[Dec[N]](toInt.toInt - 1)
} | KarolS/units | units/src/main/scala/io/github/karols/units/internal/Integers.scala | Scala | mit | 5,231 |
import sbt._
import Keys._
import com.typesafe.sbt.SbtMultiJvm
import com.typesafe.sbt.SbtMultiJvm.MultiJvmKeys.MultiJvm
import org.scalastyle.sbt.ScalastylePlugin
import pl.project13.scala.sbt.JmhPlugin
import sbtassembly.AssemblyPlugin.autoImport._
/**
* FiloDB modules and dependencies
*/
object FiloBuild extends Build {
import FiloSettings._
lazy val memory = project
.in(file("memory"))
.settings(commonSettings: _*)
.settings(assemblySettings: _*)
.settings(name := "filodb-memory")
.settings(scalacOptions += "-language:postfixOps")
.settings(libraryDependencies ++= memoryDeps)
lazy val core = project
.in(file("core"))
.settings(commonSettings: _*)
.settings(name := "filodb-core")
.settings(scalacOptions += "-language:postfixOps")
.settings(libraryDependencies ++= coreDeps)
.dependsOn(memory % "compile->compile; test->test")
lazy val coordinator = project
.in(file("coordinator"))
.settings(commonSettings: _*)
.settings(multiJvmSettings: _*)
.settings(testMultiJvmToo: _*)
.settings(name := "filodb-coordinator")
.settings(libraryDependencies ++= coordDeps)
.settings(libraryDependencies +=
"com.typesafe.akka" %% "akka-contrib" % akkaVersion exclude(
"com.typesafe.akka", s"akka-persistence-experimental_${scalaBinaryVersion.value}"))
.dependsOn(core % "compile->compile; test->test")
.dependsOn(query % "compile->compile; test->test")
.dependsOn(prometheus % "compile->compile; test->test")
.configs(MultiJvm)
lazy val prometheus = project
.in(file("prometheus"))
.settings(commonSettings: _*)
.settings(name := "filodb-prometheus")
.settings(libraryDependencies ++= promDeps)
.dependsOn(core % "compile->compile; test->test")
.dependsOn(query % "compile->compile; test->test")
lazy val query = project
.in(file("query"))
.settings(libraryDependencies ++= queryDeps)
.settings(commonSettings: _*)
.settings(scalacOptions += "-language:postfixOps")
.settings(name := "filodb-query")
.dependsOn(core % "compile->compile; test->test")
lazy val cassandra = project
.in(file("cassandra"))
.settings(commonSettings: _*)
.settings(name := "filodb-cassandra")
.settings(libraryDependencies ++= cassDeps)
.dependsOn(core % "compile->compile; test->test", coordinator)
lazy val cli = project
.in(file("cli"))
.settings(commonSettings: _*)
.settings(name := "filodb-cli")
.settings(libraryDependencies ++= cliDeps)
.settings(cliAssemblySettings: _*)
.dependsOn(prometheus % "compile->compile; test->test")
.dependsOn(core, coordinator % "test->test", cassandra)
lazy val kafka = project
.in(file("kafka"))
.settings(name := "filodb-kafka")
.settings(commonSettings: _*)
.settings(kafkaSettings: _*)
.settings(itSettings: _*)
.settings(assemblySettings: _*)
.settings(libraryDependencies ++= kafkaDeps)
.dependsOn(
core % "compile->compile; it->test",
coordinator % "compile->compile; test->test")
.configs(IntegrationTest, MultiJvm)
lazy val bootstrapper = project
.in(file("akka-bootstrapper"))
.settings(commonSettings: _*)
.settings(multiJvmMaybeSettings: _*)
.settings(name := "akka-bootstrapper")
.settings(libraryDependencies ++= bootstrapperDeps)
.configs(MultiJvm)
lazy val http = project
.in(file("http"))
.settings(commonSettings: _*)
.settings(name := "http")
.settings(libraryDependencies ++= httpDeps)
.dependsOn(core, coordinator % "compile->compile; test->test")
lazy val standalone = project
.in(file("standalone"))
.settings(commonSettings: _*)
.settings(multiJvmMaybeSettings: _*)
.settings(assemblySettings: _*)
.settings(libraryDependencies ++= standaloneDeps)
.dependsOn(core, prometheus % "test->test", coordinator % "compile->compile; test->test",
cassandra, kafka, http, bootstrapper, gateway % Test)
.configs(MultiJvm)
// lazy val spark = project
// .in(file("spark"))
// .settings(name := "filodb-spark")
// .settings(commonSettings: _*)
// .settings(libraryDependencies ++= sparkDeps)
// .settings(itSettings: _*)
// .settings(jvmPerTestSettings: _*)
// .settings(assemblyExcludeScala: _*)
// // Disable tests for now since lots of work remaining to enable Spark
// .settings(test := {})
// .dependsOn(core % "compile->compile; test->test; it->test",
// coordinator % "compile->compile; test->test",
// cassandra % "compile->compile; test->test; it->test")
// .configs( IntegrationTest )
lazy val jmh = project
.in(file("jmh"))
.settings(commonSettings: _*)
.settings(name := "filodb-jmh")
.settings(libraryDependencies ++= jmhDeps)
.settings(publish := {})
.enablePlugins(JmhPlugin)
.dependsOn(core % "compile->compile; compile->test", gateway)
// lazy val stress = project
// .in(file("stress"))
// .settings(commonSettings: _*)
// .settings(name := "filodb-stress")
// .settings(libraryDependencies ++= stressDeps)
// .settings(assemblyExcludeScala: _*)
// .dependsOn(spark)
lazy val gateway = project
.in(file("gateway"))
.settings(commonSettings: _*)
.settings(name := "filodb-gateway")
.settings(libraryDependencies ++= gatewayDeps)
.settings(gatewayAssemblySettings: _*)
.dependsOn(coordinator % "compile->compile; test->test",
prometheus, cassandra)
// Zookeeper pulls in slf4j-log4j12 which we DON'T want
val excludeZK = ExclusionRule(organization = "org.apache.zookeeper")
// This one is brought by Spark by default
val excludeSlf4jLog4j = ExclusionRule(organization = "org.slf4j", name = "slf4j-log4j12")
val excludeJersey = ExclusionRule(organization = "com.sun.jersey")
// The default minlog only logs to STDOUT. We want to log to SLF4J.
val excludeMinlog = ExclusionRule(organization = "com.esotericsoftware", name = "minlog")
val excludeOldLz4 = ExclusionRule(organization = "net.jpountz.lz4", name = "lz4")
/* Versions in various modules versus one area of build */
val akkaVersion = "2.5.22" // akka-http/akka-stream compat. TODO when kamon-akka-remote is akka 2.5.4 compat
val akkaHttpVersion = "10.1.8"
val cassDriverVersion = "3.7.1"
val ficusVersion = "1.1.2"
val kamonVersion = "1.1.6"
val monixKafkaVersion = "0.15"
val sparkVersion = "2.0.0"
val sttpVersion = "1.3.3"
/* Dependencies shared */
val logbackDep = "ch.qos.logback" % "logback-classic" % "1.2.3"
val log4jDep = "log4j" % "log4j" % "1.2.17"
val scalaLoggingDep = "com.typesafe.scala-logging" %% "scala-logging" % "3.7.2"
val scalaTest = "org.scalatest" %% "scalatest" % "2.2.6" // TODO upgrade to 3.0.4
val scalaCheck = "org.scalacheck" %% "scalacheck" % "1.11.0"
val akkaHttp = "com.typesafe.akka" %% "akka-http" % akkaHttpVersion withJavadoc()
val akkaHttpTestkit = "com.typesafe.akka" %% "akka-http-testkit" % akkaHttpVersion withJavadoc()
val akkaHttpCirce = "de.heikoseeberger" %% "akka-http-circe" % "1.21.0"
val circeGeneric = "io.circe" %% "circe-generic" % "0.8.0"
val circeParser = "io.circe" %% "circe-parser" % "0.8.0"
lazy val commonDeps = Seq(
"io.kamon" %% "kamon-core" % kamonVersion,
"io.kamon" %% "kamon-akka-2.5" % "1.1.3",
"io.kamon" %% "kamon-executors" % "1.0.2",
"io.kamon" %% "kamon-akka-remote-2.5" % "1.1.0",
logbackDep % Test,
scalaTest % Test,
scalaCheck % "test"
)
lazy val scalaxyDep = "com.nativelibs4java" %% "scalaxy-loops" % "0.3.3" % "provided"
lazy val memoryDeps = commonDeps ++ Seq(
"com.github.jnr" % "jnr-ffi" % "2.1.6",
"joda-time" % "joda-time" % "2.2" withJavadoc(),
"org.joda" % "joda-convert" % "1.2",
"org.lz4" % "lz4-java" % "1.4",
"org.jctools" % "jctools-core" % "2.0.1" withJavadoc(),
"org.spire-math" %% "debox" % "0.8.0" withJavadoc(),
scalaLoggingDep,
scalaxyDep
)
lazy val coreDeps = commonDeps ++ Seq(
scalaLoggingDep,
"io.kamon" %% "kamon-zipkin" % "1.0.0",
"org.slf4j" % "slf4j-api" % "1.7.10",
"com.beachape" %% "enumeratum" % "1.5.10",
"io.monix" %% "monix" % "2.3.0",
"com.googlecode.concurrentlinkedhashmap" % "concurrentlinkedhashmap-lru" % "1.4",
"net.ceedubs" %% "ficus" % ficusVersion,
"io.fastjson" % "boon" % "0.33",
"com.googlecode.javaewah" % "JavaEWAH" % "1.1.6" withJavadoc(),
"com.github.rholder.fauxflake" % "fauxflake-core" % "1.1.0",
"org.scalactic" %% "scalactic" % "2.2.6" withJavadoc(),
"org.apache.lucene" % "lucene-core" % "7.3.0" withJavadoc(),
"com.github.alexandrnikitin" %% "bloom-filter" % "0.11.0",
scalaxyDep
)
lazy val cassDeps = commonDeps ++ Seq(
// other dependencies separated by commas
"org.lz4" % "lz4-java" % "1.4",
"com.datastax.cassandra" % "cassandra-driver-core" % cassDriverVersion,
logbackDep % Test
)
lazy val queryDeps = commonDeps ++ Seq(
"com.tdunning" % "t-digest" % "3.1",
scalaxyDep
)
lazy val coordDeps = commonDeps ++ Seq(
"com.typesafe.akka" %% "akka-slf4j" % akkaVersion,
"com.typesafe.akka" %% "akka-cluster" % akkaVersion withJavadoc(),
"com.github.romix.akka" %% "akka-kryo-serialization" % "0.5.0" excludeAll(excludeMinlog, excludeOldLz4),
"de.javakaffee" % "kryo-serializers" % "0.42" excludeAll(excludeMinlog),
"io.kamon" %% "kamon-prometheus" % "1.1.1",
// Redirect minlog logs to SLF4J
"com.dorkbox" % "MinLog-SLF4J" % "1.12",
"com.opencsv" % "opencsv" % "3.3",
"com.github.TanUkkii007" %% "akka-cluster-custom-downing" % "0.0.12",
"com.typesafe.akka" %% "akka-testkit" % akkaVersion % Test,
"com.typesafe.akka" %% "akka-multi-node-testkit" % akkaVersion % Test
)
lazy val cliDeps = Seq(
logbackDep,
"io.kamon" %% "kamon-akka-2.5" % "1.1.3",
"io.kamon" %% "kamon-executors" % "1.0.2",
"io.kamon" %% "kamon-akka-remote-2.5" % "1.1.0",
"com.quantifind" %% "sumac" % "0.3.0"
)
lazy val kafkaDeps = Seq(
"io.monix" %% "monix-kafka-1x" % monixKafkaVersion,
"org.apache.kafka" % "kafka-clients" % "1.0.0" % "compile,test" exclude("org.slf4j", "slf4j-log4j12"),
"com.typesafe.akka" %% "akka-testkit" % akkaVersion % "test,it",
scalaTest % "test,it",
logbackDep % "test,it")
lazy val promDeps = Seq(
"com.google.protobuf" % "protobuf-java" % "2.5.0"
)
lazy val gatewayDeps = commonDeps ++ Seq(
scalaxyDep,
"io.monix" %% "monix-kafka-1x" % monixKafkaVersion,
"org.rogach" %% "scallop" % "3.1.1"
)
lazy val httpDeps = Seq(
logbackDep,
akkaHttp,
akkaHttpCirce,
circeGeneric,
circeParser,
akkaHttpTestkit % Test,
"org.xerial.snappy" % "snappy-java" % "1.1.7.3"
)
lazy val standaloneDeps = Seq(
logbackDep,
"io.kamon" %% "kamon-zipkin" % "1.0.0",
"net.ceedubs" %% "ficus" % ficusVersion % Test,
"com.typesafe.akka" %% "akka-multi-node-testkit" % akkaVersion % Test,
"com.softwaremill.sttp" %% "circe" % sttpVersion % Test,
"com.softwaremill.sttp" %% "akka-http-backend" % sttpVersion % Test,
"com.softwaremill.sttp" %% "core" % sttpVersion % Test,
"com.typesafe.akka" %% "akka-stream" % "2.5.11" % Test
)
lazy val bootstrapperDeps = Seq(
logbackDep,
scalaLoggingDep,
"com.typesafe.akka" %% "akka-cluster" % akkaVersion,
// akka http should be a compile time dependency only. Users of this library may want to use a different http server
akkaHttp % "test; provided",
akkaHttpCirce % "test; provided",
circeGeneric % "test; provided",
circeParser % "test; provided",
"com.typesafe.akka" %% "akka-slf4j" % akkaVersion,
"dnsjava" % "dnsjava" % "2.1.8",
"org.scalaj" %% "scalaj-http" % "2.3.0",
"com.typesafe.akka" %% "akka-testkit" % akkaVersion % Test,
"com.typesafe.akka" %% "akka-multi-node-testkit" % akkaVersion % Test,
scalaTest % Test
)
// lazy val sparkDeps = Seq(
// // We don't want LOG4J. We want Logback! The excludeZK is to help with a conflict re Coursier plugin.
// "org.apache.spark" %% "spark-hive" % sparkVersion % "provided" excludeAll(excludeSlf4jLog4j, excludeZK),
// "org.apache.spark" %% "spark-hive-thriftserver" % sparkVersion % "provided" excludeAll(excludeSlf4jLog4j, excludeZK),
// "org.apache.spark" %% "spark-streaming" % sparkVersion % "provided",
// scalaTest % "it"
// )
lazy val jmhDeps = Seq(
scalaxyDep,
"org.apache.spark" %% "spark-sql" % sparkVersion excludeAll(excludeSlf4jLog4j, excludeZK, excludeJersey)
)
// lazy val stressDeps = Seq(
// "com.databricks" %% "spark-csv" % "1.3.0",
// scalaxyDep,
// "org.apache.spark" %% "spark-sql" % sparkVersion % "provided" excludeAll(excludeZK),
// "org.apache.spark" %% "spark-streaming" % sparkVersion % "provided" excludeAll(excludeZK)
// )
}
| velvia/FiloDB | project/FiloBuild.scala | Scala | apache-2.0 | 13,983 |
import stainless.lang._
import stainless.proof._
import stainless.annotation._
object Laws1 {
abstract class A {
def value: BigInt
@law
def lawNotZero: Boolean = {
value != 0
}
}
abstract class B extends A {
override def lawNotZero: Boolean = {
value == 0
}
}
case class C() extends B {
def value = 0
}
}
| epfl-lara/stainless | frontends/benchmarks/verification/invalid/Laws1.scala | Scala | apache-2.0 | 363 |
package org.jetbrains.plugins.scala
package annotator
import org.intellij.lang.annotations.Language
import org.jetbrains.plugins.scala.base.SimpleTestCase
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.base.ScReferenceElement
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScMethodCall
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScClass
import org.jetbrains.plugins.scala.lang.psi.types.Compatibility
/**
* Created by kate on 3/24/16.
*/
trait ApplicationAnnotatorTestBase extends SimpleTestCase{
final val Header = """
class Seq[+A]
object Seq { def apply[A](a: A) = new Seq[A] }
class A; class B;
object A extends A; object B extends B
"""
def messages(@Language(value = "Scala", prefix = Header) code: String): List[Message] = {
val annotator = new ApplicationAnnotator() {}
val file = (Header + code).parse
val mock = new AnnotatorHolderMock(file)
val seq = file.depthFirst().findByType[ScClass]
Compatibility.seqClass = seq
try {
file.depthFirst().filterByType[ScReferenceElement].foreach {
annotator.annotateReference(_, mock)
}
file.depthFirst().filterByType[ScMethodCall].foreach {
annotator.annotateMethodInvocation(_, mock)
}
mock.annotations
}
finally {
Compatibility.seqClass = None
}
}
}
| loskutov/intellij-scala | test/org/jetbrains/plugins/scala/annotator/ApplicationAnnotatorTestBase.scala | Scala | apache-2.0 | 1,414 |
/*
* Applied Processing
* Copyright 2014 yueh
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package de.orod.minecraft
package appliedprocessing
package utility
import org.apache.logging.log4j.Level
import cpw.mods.fml.common.FMLLog
import reference._
object LogHelper {
def log(logLevel: Level, o: Object) = FMLLog log (Reference.MOD_NAME, logLevel, String valueOf o)
def all(o: Object) = log(Level.ALL, o)
def debug(o: Object) = log(Level.DEBUG, o)
def error(o: Object) = log(Level.ERROR, o)
def fatal(o: Object) = log(Level.FATAL, o)
def info(o: Object) = log(Level.INFO, o)
def off(o: Object) = log(Level.OFF, o)
def trace(o: Object) = log(Level.TRACE, o)
def warn(o: Object) = log(Level.WARN, o)
}
| Mazdallier/AppliedProcessing | src/main/scala/de/orod/minecraft/appliedprocessing/utility/LogHelper.scala | Scala | gpl-2.0 | 1,340 |
package io.hydrosphere.mist.master.interfaces
import org.scalatest.{FunSpec, Matchers}
import org.scalatest.prop.TableDrivenPropertyChecks._
import spray.json._
import scala.language.postfixOps
class AnyJsonTest extends FunSpec with DefaultJsonProtocol with AnyJsonFormat with Matchers {
val expected = Table[JsValue, Any](
("in", "out"),
(JsNumber(5), 5),
(JsNumber(5.5), 5.5),
(JsNumber(12147483647L), 12147483647L),
(JsString("str"), "str"),
(JsTrue, true),
(JsFalse, false),
(JsArray(JsString("str"), JsTrue, JsNumber(42)), Seq("str", true, 42)),
(JsObject("a" -> JsString("b")), Map("a" -> "b"))
)
it("should read") {
forAll(expected) { (in, out) =>
AnyFormat.read(in) shouldBe out
}
}
it("should write") {
forAll(expected) { (in, out) =>
AnyFormat.write(out) shouldBe in
}
}
}
| Hydrospheredata/mist | mist/master/src/test/scala/io/hydrosphere/mist/master/interfaces/AnyJsonTest.scala | Scala | apache-2.0 | 865 |
package proofpeer.proofscript.automation
import proofpeer.proofscript.logic._
import scala.language.implicitConversions
import scalaz.{ Name => _, _}
import Scalaz._
object KernelInstances {
implicit object TermIsEqual extends Equal[Term] {
override def equal(l: Term, r: Term) = l == r
}
implicit object NamespaceIsOrdered extends Order[Namespace] {
override def equal(l: Namespace, r: Namespace) = l == r
override def order(l: Namespace, r: Namespace) =
(l.isAbsolute ?|? r.isAbsolute) |+| (l.components ?|? r.components)
}
implicit object IndexedNameIsOrdered extends Order[IndexedName] {
override def equal(l: IndexedName, r: IndexedName) = l == r
override def order(l: IndexedName, r: IndexedName) = {
(l.name ?|? r.name) |+| (l.index ?|? r.index)
}
}
implicit object NameIsOrdered extends Order[Name] {
override def equal(l: Name, r: Name) = l == r
override def order(l: Name, r: Name) =
(l.namespace ?|? r.namespace) |+| (l.name ?|? r.name)
}
// TODO: Temporarily poor show implementation
implicit object IndexedNameIsShow extends Show[IndexedName] {
override def show(n: IndexedName) = n.name.shows ++ n.index.shows
}
implicit object NameIsShow extends Show[Name] {
override def show(n: Name) = n.name.show
}
def bracket(str: Cord) = Cord("(") ++ str ++ Cord(")")
implicit object TypeIsOrdered extends Order[Type] {
override def equal(l: Type, r: Type) = l == r
override def order(l: Type, r: Type) =
(l,r) match {
case (Type.Universe, Type.Universe) => Ordering.EQ
case (Type.Universe, _) => Ordering.LT
case (_, Type.Universe) => Ordering.GT
case (Type.Prop, Type.Prop) => Ordering.EQ
case (Type.Prop, _) => Ordering.LT
case (_, Type.Prop) => Ordering.GT
case (Type.Fun(ldom,lcodom), Type.Fun(rdom,rcodom)) =>
(ldom ?|? rdom) |+| (lcodom ?|? rcodom)
case (Type.Fun(_,_),_) => Ordering.LT
case (_, Type.Fun(_,_)) => Ordering.GT
}
}
implicit object TermIsOrdered extends Order[Term] {
override def equal(l: Term, r: Term) = l == r
override def order(l: Term, r: Term): Ordering = {
import Term._
(l,r) match {
case (PolyConst(m,ty1), PolyConst(n,ty2)) =>
(m ?|? n) |+| (ty1 ?|? ty2)
case (PolyConst(_,_), _) => Ordering.LT
case (_, PolyConst(_,_)) => Ordering.GT
case (Const(n1),Const(n2)) => n1 ?|? n2
case (Const(_),_) => Ordering.LT
case (_,Const(_)) => Ordering.GT
case (Comb(f1,x1), Comb(f2,x2)) => (f1 ?|? f2) |+| (x1 ?|? x2)
case (Comb(_,_), _) => Ordering.LT
case (_,Comb(_,_)) => Ordering.GT
case (Abs(n1,ty1,body1), Abs(n2,ty2,body2)) =>
(n1 ?|? n2) |+| (ty1 ?|? ty2) |+| (body1 ?|? body2)
case (Abs(_,_,_), _) => Ordering.LT
case (_, Abs(_,_,_)) => Ordering.GT
case (Var(n1), Var(n2)) => n1 ?|? n2
case (Var(_), _) => Ordering.LT
case (_, Var(_)) => Ordering.GT
}
}
}
implicit object TypeIsShow extends Show[Type] {
override def show(ty: Type) =
ty match {
case Type.Universe => "𝒰"
case Type.Prop => "ℙ"
case Type.Fun(dom,codom) => bracket(dom.show ++ Cord("→") ++ codom.show)
}
}
implicit object TermIsShow extends Show[Term] {
override def show(t: Term) =
t match {
case Term.Var(IndexedName(name, index)) =>
name.show ++ index.show
case Term.PolyConst(name,ty) =>
name.show ++ Cord(":") ++ ty.show
case Term.Const(name) => name.shows
case Term.Abs(name,ty,body) =>
name.show ++ Cord(":") ++ ty.show ++ Cord(" ↦ ") ++ body.show
case Term.Comb(rator,rand) =>
bracket(rator.show) ++ Cord(" ") ++ bracket(rand.show)
}
}
}
| proofpeer/proofpeer-proofscript | shared/src/main/scala/proofpeer/proofscript/automation/Instances.scala | Scala | mit | 4,400 |
package ru.tmtool.commons.priority
import junit.framework.TestCase
import org.junit.Test
import org.scalatest._
/**
* User: Sergey Kozlov skozlov@poidem.ru
* Date: 12.08.2014
* Time: 16:49
*/
class PrioritiesSeqTest extends TestCase with MustMatchers{
@Test
def testEmptySeq(){
val priorities = new PrioritiesSeq {
override protected val priorities: Seq[Priority] = Nil
}
priorities.prioritiesCount mustBe 0
priorities.priority mustBe 0
}
@Test
def testOnePriority(){
val priorities = new PrioritiesSeq {
override protected val priorities: Seq[Priority] = List(new Priority {
override val minPriority: Int = 3
override val prioritiesCount: Int = 10
override val normalizedPriority: Int = 5
})
}
priorities.prioritiesCount mustBe 10
priorities.priority mustBe 5
}
@Test
def testSeveralPriorities(){
val priorities = new PrioritiesSeq {
override protected val priorities: Seq[Priority] = List(
new Priority {
override val minPriority: Int = 3
override val prioritiesCount: Int = 10
override val normalizedPriority: Int = 5
},
new Priority {
override val minPriority: Int = 2
override val prioritiesCount: Int = 9
override val normalizedPriority: Int = 8
}
)
}
priorities.prioritiesCount mustBe 90
priorities.priority mustBe 53
}
} | tmtool/commons | src/test/scala/ru/tmtool/commons/priority/PrioritiesSeqTest.scala | Scala | mit | 1,346 |
package cromwell.api.model
import spray.json.{DefaultJsonProtocol, JsNumber, JsValue, RootJsonFormat}
case class ShardIndex(index: Option[Int]) extends AnyVal {
override def toString: String = index.getOrElse(-1).toString
}
object ShardIndexFormatter extends DefaultJsonProtocol {
implicit object ShardIndexJsonFormat extends RootJsonFormat[ShardIndex] {
def write(si: ShardIndex) = JsNumber(si.index.getOrElse(-1))
def read(value: JsValue) = value match {
case JsNumber(i) if i.equals(-1) => ShardIndex(None)
case JsNumber(i) if i.isValidInt && i.intValue > 0 => ShardIndex(Option(i.intValue()))
case other => throw new UnsupportedOperationException(s"Cannot deserialize $other into a ShardIndex")
}
}
}
| ohsu-comp-bio/cromwell | cromwellApiClient/src/main/scala/cromwell/api/model/ShardIndex.scala | Scala | bsd-3-clause | 744 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.executor
import java.util.concurrent.ThreadPoolExecutor
import scala.collection.JavaConverters._
import com.codahale.metrics.{Gauge, MetricRegistry}
import org.apache.hadoop.fs.FileSystem
import org.apache.spark.metrics.source.Source
private[spark]
class ExecutorSource(threadPool: ThreadPoolExecutor, executorId: String) extends Source {
private def fileStats(scheme: String) : Option[FileSystem.Statistics] =
FileSystem.getAllStatistics.asScala.find(s => s.getScheme.equals(scheme))
private def registerFileSystemStat[T](
scheme: String, name: String, f: FileSystem.Statistics => T, defaultValue: T) = {
metricRegistry.register(MetricRegistry.name("filesystem", scheme, name), new Gauge[T] {
override def getValue: T = fileStats(scheme).map(f).getOrElse(defaultValue)
})
}
override val metricRegistry = new MetricRegistry()
override val sourceName = "executor"
// Gauge for executor thread pool's actively executing task counts
metricRegistry.register(MetricRegistry.name("threadpool", "activeTasks"), new Gauge[Int] {
override def getValue: Int = threadPool.getActiveCount()
})
// Gauge for executor thread pool's approximate total number of tasks that have been completed
metricRegistry.register(MetricRegistry.name("threadpool", "completeTasks"), new Gauge[Long] {
override def getValue: Long = threadPool.getCompletedTaskCount()
})
// Gauge for executor, number of tasks started
metricRegistry.register(MetricRegistry.name("threadpool", "startedTasks"), new Gauge[Long] {
override def getValue: Long = threadPool.getTaskCount()
})
// Gauge for executor thread pool's current number of threads
metricRegistry.register(MetricRegistry.name("threadpool", "currentPool_size"), new Gauge[Int] {
override def getValue: Int = threadPool.getPoolSize()
})
// Gauge got executor thread pool's largest number of threads that have ever simultaneously
// been in th pool
metricRegistry.register(MetricRegistry.name("threadpool", "maxPool_size"), new Gauge[Int] {
override def getValue: Int = threadPool.getMaximumPoolSize()
})
// Gauge for file system stats of this executor
for (scheme <- Array("hdfs", "file")) {
registerFileSystemStat(scheme, "read_bytes", _.getBytesRead(), 0L)
registerFileSystemStat(scheme, "write_bytes", _.getBytesWritten(), 0L)
registerFileSystemStat(scheme, "read_ops", _.getReadOps(), 0)
registerFileSystemStat(scheme, "largeRead_ops", _.getLargeReadOps(), 0)
registerFileSystemStat(scheme, "write_ops", _.getWriteOps(), 0)
}
// Expose executor task metrics using the Dropwizard metrics system.
// The list of available Task metrics can be found in TaskMetrics.scala
val SUCCEEDED_TASKS = metricRegistry.counter(MetricRegistry.name("succeededTasks"))
val METRIC_CPU_TIME = metricRegistry.counter(MetricRegistry.name("cpuTime"))
val METRIC_RUN_TIME = metricRegistry.counter(MetricRegistry.name("runTime"))
val METRIC_JVM_GC_TIME = metricRegistry.counter(MetricRegistry.name("jvmGCTime"))
val METRIC_DESERIALIZE_TIME =
metricRegistry.counter(MetricRegistry.name("deserializeTime"))
val METRIC_DESERIALIZE_CPU_TIME =
metricRegistry.counter(MetricRegistry.name("deserializeCpuTime"))
val METRIC_RESULT_SERIALIZE_TIME =
metricRegistry.counter(MetricRegistry.name("resultSerializationTime"))
val METRIC_SHUFFLE_FETCH_WAIT_TIME =
metricRegistry.counter(MetricRegistry.name("shuffleFetchWaitTime"))
val METRIC_SHUFFLE_WRITE_TIME =
metricRegistry.counter(MetricRegistry.name("shuffleWriteTime"))
val METRIC_SHUFFLE_TOTAL_BYTES_READ =
metricRegistry.counter(MetricRegistry.name("shuffleTotalBytesRead"))
val METRIC_SHUFFLE_REMOTE_BYTES_READ =
metricRegistry.counter(MetricRegistry.name("shuffleRemoteBytesRead"))
val METRIC_SHUFFLE_REMOTE_BYTES_READ_TO_DISK =
metricRegistry.counter(MetricRegistry.name("shuffleRemoteBytesReadToDisk"))
val METRIC_SHUFFLE_LOCAL_BYTES_READ =
metricRegistry.counter(MetricRegistry.name("shuffleLocalBytesRead"))
val METRIC_SHUFFLE_RECORDS_READ =
metricRegistry.counter(MetricRegistry.name("shuffleRecordsRead"))
val METRIC_SHUFFLE_REMOTE_BLOCKS_FETCHED =
metricRegistry.counter(MetricRegistry.name("shuffleRemoteBlocksFetched"))
val METRIC_SHUFFLE_LOCAL_BLOCKS_FETCHED =
metricRegistry.counter(MetricRegistry.name("shuffleLocalBlocksFetched"))
val METRIC_SHUFFLE_BYTES_WRITTEN =
metricRegistry.counter(MetricRegistry.name("shuffleBytesWritten"))
val METRIC_SHUFFLE_RECORDS_WRITTEN =
metricRegistry.counter(MetricRegistry.name("shuffleRecordsWritten"))
val METRIC_INPUT_BYTES_READ =
metricRegistry.counter(MetricRegistry.name("bytesRead"))
val METRIC_INPUT_RECORDS_READ =
metricRegistry.counter(MetricRegistry.name("recordsRead"))
val METRIC_OUTPUT_BYTES_WRITTEN =
metricRegistry.counter(MetricRegistry.name("bytesWritten"))
val METRIC_OUTPUT_RECORDS_WRITTEN =
metricRegistry.counter(MetricRegistry.name("recordsWritten"))
val METRIC_RESULT_SIZE =
metricRegistry.counter(MetricRegistry.name("resultSize"))
val METRIC_DISK_BYTES_SPILLED =
metricRegistry.counter(MetricRegistry.name("diskBytesSpilled"))
val METRIC_MEMORY_BYTES_SPILLED =
metricRegistry.counter(MetricRegistry.name("memoryBytesSpilled"))
}
| pgandhi999/spark | core/src/main/scala/org/apache/spark/executor/ExecutorSource.scala | Scala | apache-2.0 | 6,138 |
import leon.annotation._
import leon.lang._
object DaysToYears {
val base : Int = 1980
def isLeapYear(y : Int): Boolean = y % 4 == 0
def daysToYears(days : Int): Int = {
require(days > 0)
daysToYears1(base, days)._1
}
def daysToYears1(year : Int, days : Int): (Int, Int) = {
require(year >= base && days > 0)
if (days > 366 && isLeapYear(year))
daysToYears1(year + 1, days - 366)
else if (days > 365 && !isLeapYear(year))
daysToYears1(year, days - 365) // FIXME forgot +1
else (year, days)
} ensuring { res =>
res._2 <= 366 &&
res._2 > 0 &&
res._1 >= base &&
(((year,days), res) passes {
case (1999, 14 ) => (1999, 14)
case (1980, 366) => (1980, 366)
case (1981, 366) => (1982, 1)
})
}
@ignore
def main(args : Array[String]) = {
println(daysToYears1(base, 10593 ))
println(daysToYears1(base, 366 ))
println(daysToYears1(base, 1000 ))
}
}
| epfl-lara/leon | testcases/repair/DaysToYears/DaysToYears1.scala | Scala | gpl-3.0 | 956 |
/**
* Copyright (C) 2014 TU Berlin (peel@dima.tu-berlin.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.peelframework.core.results.model
/** Model class for experiment runs.
*
* Captures information for a single [[org.peelframework.core.beans.experiment.Experiment.Run Experiment.Run]].
*
* @param experimentID The ID of the parent [[org.peelframework.core.results.model.Experiment Experiment]].
* @param run The run number.
* @param time The wall clock time for this run (in milliseconds).
*/
case class ExperimentRun(
experimentID: Int,
run: Int,
exit: Int,
time: Long
) {
val id = experimentID.## * 31 + run
}
/** [[ExperimentRun]] companion and storage manager. */
object ExperimentRun extends PersistedAPI[ExperimentRun] {
import java.sql.Connection
import anorm.SqlParser._
import anorm._
override val tableName = "experiment_run"
override val rowParser = {
get[Int] ("id") ~
get[Int] ("experiment_id") ~
get[Int] ("run") ~
get[Int] ("exit") ~
get[Long] ("time") map {
case id ~ experiment_id ~ run ~ exit ~ time => ExperimentRun(experiment_id, run, exit, time)
}
}
override def createTable()(implicit conn: Connection): Unit = if (!tableExists) {
SQL( s"""
CREATE TABLE experiment_run (
id INTEGER NOT NULL,
experiment_id INTEGER NOT NULL,
run INTEGER NOT NULL,
exit INTEGER NOT NULL,
time BIGINT NOT NULL,
PRIMARY KEY (id),
FOREIGN KEY (experiment_id) REFERENCES experiment(id) ON DELETE CASCADE
)""").execute()
}
override def insert(x: ExperimentRun)(implicit conn: Connection): Unit = {
SQL"""
INSERT INTO experiment_run(id, experiment_id, run, exit, time) VALUES(
${x.id},
${x.experimentID},
${x.run},
${x.exit},
${x.time}
)
""".executeInsert()
}
override def update(x: ExperimentRun)(implicit conn: Connection): Unit = {
SQL"""
UPDATE experiment_run SET
experiment_id = ${x.experimentID},
run = ${x.run},
exit = ${x.exit},
time = ${x.time}
WHERE
id = ${x.id}
""".executeUpdate()
}
override def delete(x: ExperimentRun)(implicit conn: Connection): Unit = {
SQL"""
DELETE FROM experiment_run WHERE id = ${x.id}
""".execute()
}
} | akunft/peel | peel-core/src/main/scala/org/peelframework/core/results/model/ExperimentRun.scala | Scala | apache-2.0 | 2,973 |
package com.twitter.finagle.stats
/**
* A RollupStatsReceiver reports stats on multiple Counter/Stat/Gauge based on the sequence of
* names you pass.
* e.g.
* counter("errors", "clientErrors", "java_net_ConnectException").incr()
* will actually increment those three counters:
* - "/errors"
* - "/errors/clientErrors"
* - "/errors/clientErrors/java_net_ConnectException"
*/
class RollupStatsReceiver(val self: StatsReceiver)
extends StatsReceiver with DelegatingStatsReceiver with Proxy
{
val repr = self.repr
private[this] def tails[A](s: Seq[A]): Seq[Seq[A]] = {
s match {
case s@Seq(_) =>
Seq(s)
case Seq(hd, tl@_*) =>
Seq(Seq(hd)) ++ (tails(tl) map { t => Seq(hd) ++ t })
}
}
override def toString(): String = self.toString
def counter(names: String*): Counter = new Counter {
private[this] val allCounters = BroadcastCounter(
tails(names) map (self.counter(_: _*))
)
def incr(delta: Int) = allCounters.incr(delta)
}
def stat(names: String*): Stat = new Stat {
private[this] val allStats = BroadcastStat(
tails(names) map (self.stat(_: _*))
)
def add(value: Float) = allStats.add(value)
}
def addGauge(names: String*)(f: => Float): Gauge = new Gauge {
private[this] val underlying = tails(names) map { self.addGauge(_: _*)(f) }
def remove() = underlying foreach { _.remove() }
}
def underlying: Seq[StatsReceiver] = Seq(self)
}
| BuoyantIO/twitter-util | util-stats/src/main/scala/com/twitter/finagle/stats/RollupStatsReceiver.scala | Scala | apache-2.0 | 1,453 |
package org.crudible.lift.binding.util
import org.crudible.core.table.Table
import org.crudible.core.table.TableRepository
import org.crudible.core.table.TableRowModel
import org.crudible.core.table.action.OnEntity
import org.crudible.core.table.action.OnInstance
import org.crudible.lift.ui.ConfirmDialog
import net.liftweb.http.js.JsCmds
import org.crudible.lift.util.EasyJS.jsCmds
import org.crudible.lift.util.EasyJS
import org.crudible.lift.util.LabelCallback
import org.crudible.core.binding.Form
import org.crudible.core.validation.ValidationContext
import org.crudible.lift.markup.LiftTableMarkupFactory
import org.crudible.core.util.DataHelpers
import net.liftweb.http.js.JsCmd
import scala.xml.Text
class ActionFactory(factory: ActionDialogFactory) {
def defaultCreate(form: Form) = {
def callback(table: Table) = factory.modal("Create", table, form, table.create())
OnEntity().label("Create").onInvoke(callback _)
}
def defaultEdit(form: Form) = {
defaultEditTransformed(form, m => m)
}
def defaultEditTransformed(form: Form, transform: TableRowModel => TableRowModel) = {
def callback(table: Table, id: String) = factory.modal("Edit", table, form, transform(table.findByID(id).get))
OnInstance().label("Edit").onInvoke(callback _)
}
def defaultEditRedirect(createURL: String => String) = {
def redirectToEdit(table: Table, modelID: String) = EasyJS.prependJs(EasyJS.redirect(createURL(modelID)))
OnInstance().label("Edit").onInvoke(redirectToEdit)
}
def defaultDelete(form: Form) = {
def callback(table: Table, id: String) = factory.confirm("Delete", table, form, table.findByID(id).get)
OnInstance().label("Delete").onInvoke(callback _)
}
} | rehei/crudible | crudible-lift/src/main/scala/org/crudible/lift/binding/util/ActionFactory.scala | Scala | apache-2.0 | 1,719 |
package com.goticks
import scala.concurrent.Future
import scala.util.{Failure, Success}
import akka.actor.{ ActorSystem , Actor, Props }
import akka.event.Logging
import akka.util.Timeout
import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer
import com.typesafe.config.{ Config, ConfigFactory }
object Main extends App
with RequestTimeout {
val config = ConfigFactory.load()
val host = config.getString("http.host") // 설정으로부터 호스트와 포트를 가져온다
val port = config.getInt("http.port")
implicit val system = ActorSystem()
implicit val ec = system.dispatcher // bindAndHandle은 비동기적이며, ExecutionContext를 암시적으로 사용해야 한다
val api = new RestApi(system, requestTimeout(config)).routes // RestApi는 HTTP 루트를 제공한다
implicit val materializer = ActorMaterializer()
val bindingFuture: Future[ServerBinding] =
Http().bindAndHandle(api, host, port) // RestApi 루트를 가지고 HTTP 서버를 시작한다
val log = Logging(system.eventStream, "go-ticks")
bindingFuture.map { serverBinding =>
log.info(s"RestApi bound to ${serverBinding.localAddress} ")
}.onComplete {
case Success(v) =>
case Failure(ex) =>
log.error(ex, "Failed to bind to {}:{}!", host, port)
system.terminate()
}
}
trait RequestTimeout {
import scala.concurrent.duration._
def requestTimeout(config: Config): Timeout = {
val t = config.getString("akka.http.server.request-timeout")
val d = Duration(t)
FiniteDuration(d.length, d.unit)
}
}
| gilbutITbook/006877 | chapter-up-and-running/src/main/scala/com/goticks/Main.scala | Scala | mit | 1,670 |
package net.caoticode.synergy
class AbstractWorker {
} | mdread/synergy | src/main/scala/net/caoticode/synergy/AbstractWorker.scala | Scala | mit | 60 |
package jp.co.cyberagent.aeromock.template.jade4j
import jp.co.cyberagent.aeromock.AeromockConfigurationException
import jp.co.cyberagent.aeromock.config.Project
import jp.co.cyberagent.aeromock.core.script.GroovyScriptRunner
import de.neuland.jade4j.JadeConfiguration
import de.neuland.jade4j.template.FileTemplateLoader
import groovy.lang.{Binding, GroovyShell}
import jp.co.cyberagent.aeromock.helper._
import scalaz._
import Scalaz._
import scala.collection.JavaConverters._
/**
* Factort to create [[de.neuland.jade4j.JadeConfiguration]].
* @author stormcat24
*/
object JadeConfigurationFactory {
/**
* Create [[de.neuland.jade4j.JadeConfiguration]].
* @param config [[Jade4jConfig]]
* @return [[de.neuland.jade4j.JadeConfiguration]]
*/
def create(project: Project, config: Jade4jConfig): JadeConfiguration = {
val configuration = new JadeConfiguration
// directory path must finish slash.
val templateLoader = new FileTemplateLoader(project._template.root.toString + "/", "UTF-8")
configuration.setTemplateLoader(templateLoader)
config.mode.map(v => configuration.setMode(v))
config.prettyPrint.map(v => configuration.setPrettyPrint(v))
val functionRootDir = project.function match {
case Success(Some(value)) => value.root.some
case Failure(errors) => throw new AeromockConfigurationException(project.projectConfig, errors)
case _ => None
}
functionRootDir match {
case None =>
case Some(functionRoot) => {
val functionMap = (functionRoot.getChildren().map { script =>
val scriptName = script.getFileName().toString()
val shell = new GroovyShell()
(scriptName.replace(".groovy", "") -> shell.run(script.toAbsolutePath.toFile(), Array.empty[String]))
}).toMap[String, AnyRef]
configuration.setSharedVariables(functionMap.asJava)
}
}
// hook script
if (project.templateScript.exists()) {
val binding = new Binding()
binding.setProperty("configuration", configuration)
new GroovyScriptRunner[Unit](project.templateScript).run(binding)
}
configuration
}
}
| CyberAgent/aeromock | aeromock-jade4j/src/main/scala/jp/co/cyberagent/aeromock/template/jade4j/JadeConfigurationFactory.scala | Scala | mit | 2,156 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.dsa.mediator.smithwaterman.adam
import org.dsa.mediator.smithwaterman.SmithWaterman
abstract class SmithWatermanGapScoringFromFn(xSequence: String,
ySequence: String,
scoreFn: (Int, Int, Char, Char) => Double)
extends SmithWaterman(xSequence, ySequence) {
def buildScoringMatrix(): (Double,Array[Array[Int]], Array[Array[Char]]) = {
val y = ySequence.length
val x = xSequence.length
val scoreMatrix = new Array[Array[Int]](x + 1)
val moveMatrix = new Array[Array[Char]](x + 1)
for (i <- 0 to x) {
scoreMatrix(i) = new Array[Int](y + 1)
moveMatrix(i) = new Array[Char](y + 1)
}
// set row/col 0 to 0
for (i <- 0 to x) {
scoreMatrix(i)(0) = 0
moveMatrix(i)(0) = 'T'
}
for (j <- 0 to y) {
scoreMatrix(0)(j) = 0
moveMatrix(0)(j) = 'T'
}
// score matrix
for (i <- 1 to x) {
for (j <- 1 to y) {
val m = scoreMatrix(i - 1)(j - 1) + scoreFn(i, j, xSequence(i - 1), ySequence(j - 1))
val d = scoreMatrix(i - 1)(j) + scoreFn(i, j, xSequence(i - 1), '_')
val in = scoreMatrix(i)(j - 1) + scoreFn(i, j, '_', ySequence(j - 1))
val (scoreUpdate, moveUpdate) = if (m >= d && m >= in && m > 0) {
(m.toInt, 'B')
} else if (d >= in && d > 0) {
(d.toInt, 'J')
} else if (in > 0) {
(in.toInt, 'I')
} else {
(0, 'T')
}
scoreMatrix(i)(j) = scoreUpdate.toInt
moveMatrix(i)(j) = moveUpdate
}
}
(0.0,scoreMatrix, moveMatrix)
}
}
| xubo245/CloudSW | src/main/scala/org/dsa/mediator/smithwaterman/adam/SmithWatermanGapScoringFromFn.scala | Scala | gpl-2.0 | 2,460 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.recommendation
import java.io.File
import java.util.Random
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.WrappedArray
import scala.collection.JavaConverters._
import scala.language.existentials
import com.github.fommil.netlib.BLAS.{getInstance => blas}
import org.apache.commons.io.FileUtils
import org.apache.commons.io.filefilter.TrueFileFilter
import org.apache.spark._
import org.apache.spark.internal.Logging
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.ml.recommendation.ALS._
import org.apache.spark.ml.recommendation.ALS.Rating
import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTestingUtils}
import org.apache.spark.ml.util.TestingUtils._
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.scheduler.{SparkListener, SparkListenerStageCompleted}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.sql.functions.lit
import org.apache.spark.sql.types._
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.Utils
class ALSSuite
extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest with Logging {
override def beforeAll(): Unit = {
super.beforeAll()
sc.setCheckpointDir(tempDir.getAbsolutePath)
}
override def afterAll(): Unit = {
super.afterAll()
}
test("LocalIndexEncoder") {
val random = new Random
for (numBlocks <- Seq(1, 2, 5, 10, 20, 50, 100)) {
val encoder = new LocalIndexEncoder(numBlocks)
val maxLocalIndex = Int.MaxValue / numBlocks
val tests = Seq.fill(5)((random.nextInt(numBlocks), random.nextInt(maxLocalIndex))) ++
Seq((0, 0), (numBlocks - 1, maxLocalIndex))
tests.foreach { case (blockId, localIndex) =>
val err = s"Failed with numBlocks=$numBlocks, blockId=$blockId, and localIndex=$localIndex."
val encoded = encoder.encode(blockId, localIndex)
assert(encoder.blockId(encoded) === blockId, err)
assert(encoder.localIndex(encoded) === localIndex, err)
}
}
}
test("normal equation construction") {
val k = 2
val ne0 = new NormalEquation(k)
.add(Array(1.0f, 2.0f), 3.0)
.add(Array(4.0f, 5.0f), 6.0, 2.0) // weighted
assert(ne0.k === k)
assert(ne0.triK === k * (k + 1) / 2)
// NumPy code that computes the expected values:
// A = np.matrix("1 2; 4 5")
// b = np.matrix("3; 6")
// C = np.matrix(np.diag([1, 2]))
// ata = A.transpose() * C * A
// atb = A.transpose() * C * b
assert(Vectors.dense(ne0.ata) ~== Vectors.dense(33.0, 42.0, 54.0) relTol 1e-8)
assert(Vectors.dense(ne0.atb) ~== Vectors.dense(51.0, 66.0) relTol 1e-8)
val ne1 = new NormalEquation(2)
.add(Array(7.0f, 8.0f), 9.0)
ne0.merge(ne1)
// NumPy code that computes the expected values:
// A = np.matrix("1 2; 4 5; 7 8")
// b = np.matrix("3; 6; 9")
// C = np.matrix(np.diag([1, 2, 1]))
// ata = A.transpose() * C * A
// atb = A.transpose() * C * b
assert(Vectors.dense(ne0.ata) ~== Vectors.dense(82.0, 98.0, 118.0) relTol 1e-8)
assert(Vectors.dense(ne0.atb) ~== Vectors.dense(114.0, 138.0) relTol 1e-8)
intercept[IllegalArgumentException] {
ne0.add(Array(1.0f), 2.0)
}
intercept[IllegalArgumentException] {
ne0.add(Array(1.0f, 2.0f, 3.0f), 4.0)
}
intercept[IllegalArgumentException] {
ne0.add(Array(1.0f, 2.0f), 0.0, -1.0)
}
intercept[IllegalArgumentException] {
val ne2 = new NormalEquation(3)
ne0.merge(ne2)
}
ne0.reset()
assert(ne0.ata.forall(_ == 0.0))
assert(ne0.atb.forall(_ == 0.0))
}
test("CholeskySolver") {
val k = 2
val ne0 = new NormalEquation(k)
.add(Array(1.0f, 2.0f), 4.0)
.add(Array(1.0f, 3.0f), 9.0)
.add(Array(1.0f, 4.0f), 16.0)
val ne1 = new NormalEquation(k)
.merge(ne0)
val chol = new CholeskySolver
val x0 = chol.solve(ne0, 0.0).map(_.toDouble)
// NumPy code that computes the expected solution:
// A = np.matrix("1 2; 1 3; 1 4")
// b = b = np.matrix("3; 6")
// x0 = np.linalg.lstsq(A, b)[0]
assert(Vectors.dense(x0) ~== Vectors.dense(-8.333333, 6.0) relTol 1e-6)
assert(ne0.ata.forall(_ == 0.0))
assert(ne0.atb.forall(_ == 0.0))
val x1 = chol.solve(ne1, 1.5).map(_.toDouble)
// NumPy code that computes the expected solution, where lambda is scaled by n:
// x0 = np.linalg.solve(A.transpose() * A + 1.5 * np.eye(2), A.transpose() * b)
assert(Vectors.dense(x1) ~== Vectors.dense(-0.1155556, 3.28) relTol 1e-6)
}
test("RatingBlockBuilder") {
val emptyBuilder = new RatingBlockBuilder[Int]()
assert(emptyBuilder.size === 0)
val emptyBlock = emptyBuilder.build()
assert(emptyBlock.srcIds.isEmpty)
assert(emptyBlock.dstIds.isEmpty)
assert(emptyBlock.ratings.isEmpty)
val builder0 = new RatingBlockBuilder()
.add(Rating(0, 1, 2.0f))
.add(Rating(3, 4, 5.0f))
assert(builder0.size === 2)
val builder1 = new RatingBlockBuilder()
.add(Rating(6, 7, 8.0f))
.merge(builder0.build())
assert(builder1.size === 3)
val block = builder1.build()
val ratings = Seq.tabulate(block.size) { i =>
(block.srcIds(i), block.dstIds(i), block.ratings(i))
}.toSet
assert(ratings === Set((0, 1, 2.0f), (3, 4, 5.0f), (6, 7, 8.0f)))
}
test("UncompressedInBlock") {
val encoder = new LocalIndexEncoder(10)
val uncompressed = new UncompressedInBlockBuilder[Int](encoder)
.add(0, Array(1, 0, 2), Array(0, 1, 4), Array(1.0f, 2.0f, 3.0f))
.add(1, Array(3, 0), Array(2, 5), Array(4.0f, 5.0f))
.build()
assert(uncompressed.length === 5)
val records = Seq.tabulate(uncompressed.length) { i =>
val dstEncodedIndex = uncompressed.dstEncodedIndices(i)
val dstBlockId = encoder.blockId(dstEncodedIndex)
val dstLocalIndex = encoder.localIndex(dstEncodedIndex)
(uncompressed.srcIds(i), dstBlockId, dstLocalIndex, uncompressed.ratings(i))
}.toSet
val expected =
Set((1, 0, 0, 1.0f), (0, 0, 1, 2.0f), (2, 0, 4, 3.0f), (3, 1, 2, 4.0f), (0, 1, 5, 5.0f))
assert(records === expected)
val compressed = uncompressed.compress()
assert(compressed.size === 5)
assert(compressed.srcIds.toSeq === Seq(0, 1, 2, 3))
assert(compressed.dstPtrs.toSeq === Seq(0, 2, 3, 4, 5))
var decompressed = ArrayBuffer.empty[(Int, Int, Int, Float)]
var i = 0
while (i < compressed.srcIds.length) {
var j = compressed.dstPtrs(i)
while (j < compressed.dstPtrs(i + 1)) {
val dstEncodedIndex = compressed.dstEncodedIndices(j)
val dstBlockId = encoder.blockId(dstEncodedIndex)
val dstLocalIndex = encoder.localIndex(dstEncodedIndex)
decompressed += ((compressed.srcIds(i), dstBlockId, dstLocalIndex, compressed.ratings(j)))
j += 1
}
i += 1
}
assert(decompressed.toSet === expected)
}
test("CheckedCast") {
val checkedCast = new ALS().checkedCast
val df = spark.range(1)
withClue("Valid Integer Ids") {
df.select(checkedCast(lit(123))).collect()
}
withClue("Valid Long Ids") {
df.select(checkedCast(lit(1231L))).collect()
}
withClue("Valid Decimal Ids") {
df.select(checkedCast(lit(123).cast(DecimalType(15, 2)))).collect()
}
withClue("Valid Double Ids") {
df.select(checkedCast(lit(123.0))).collect()
}
val msg = "either out of Integer range or contained a fractional part"
withClue("Invalid Long: out of range") {
val e: SparkException = intercept[SparkException] {
df.select(checkedCast(lit(1231000000000L))).collect()
}
assert(e.getMessage.contains(msg))
}
withClue("Invalid Decimal: out of range") {
val e: SparkException = intercept[SparkException] {
df.select(checkedCast(lit(1231000000000.0).cast(DecimalType(15, 2)))).collect()
}
assert(e.getMessage.contains(msg))
}
withClue("Invalid Decimal: fractional part") {
val e: SparkException = intercept[SparkException] {
df.select(checkedCast(lit(123.1).cast(DecimalType(15, 2)))).collect()
}
assert(e.getMessage.contains(msg))
}
withClue("Invalid Double: out of range") {
val e: SparkException = intercept[SparkException] {
df.select(checkedCast(lit(1231000000000.0))).collect()
}
assert(e.getMessage.contains(msg))
}
withClue("Invalid Double: fractional part") {
val e: SparkException = intercept[SparkException] {
df.select(checkedCast(lit(123.1))).collect()
}
assert(e.getMessage.contains(msg))
}
withClue("Invalid Type") {
val e: SparkException = intercept[SparkException] {
df.select(checkedCast(lit("123.1"))).collect()
}
assert(e.getMessage.contains("was not numeric"))
}
}
/**
* Generates an explicit feedback dataset for testing ALS.
* @param numUsers number of users
* @param numItems number of items
* @param rank rank
* @param noiseStd the standard deviation of additive Gaussian noise on training data
* @param seed random seed
* @return (training, test)
*/
def genExplicitTestData(
numUsers: Int,
numItems: Int,
rank: Int,
noiseStd: Double = 0.0,
seed: Long = 11L): (RDD[Rating[Int]], RDD[Rating[Int]]) = {
val trainingFraction = 0.6
val testFraction = 0.3
val totalFraction = trainingFraction + testFraction
val random = new Random(seed)
val userFactors = genFactors(numUsers, rank, random)
val itemFactors = genFactors(numItems, rank, random)
val training = ArrayBuffer.empty[Rating[Int]]
val test = ArrayBuffer.empty[Rating[Int]]
for ((userId, userFactor) <- userFactors; (itemId, itemFactor) <- itemFactors) {
val x = random.nextDouble()
if (x < totalFraction) {
val rating = blas.sdot(rank, userFactor, 1, itemFactor, 1)
if (x < trainingFraction) {
val noise = noiseStd * random.nextGaussian()
training += Rating(userId, itemId, rating + noise.toFloat)
} else {
test += Rating(userId, itemId, rating)
}
}
}
logInfo(s"Generated an explicit feedback dataset with ${training.size} ratings for training " +
s"and ${test.size} for test.")
(sc.parallelize(training, 2), sc.parallelize(test, 2))
}
/**
* Generates an implicit feedback dataset for testing ALS.
* @param numUsers number of users
* @param numItems number of items
* @param rank rank
* @param noiseStd the standard deviation of additive Gaussian noise on training data
* @param seed random seed
* @return (training, test)
*/
def genImplicitTestData(
numUsers: Int,
numItems: Int,
rank: Int,
noiseStd: Double = 0.0,
seed: Long = 11L): (RDD[Rating[Int]], RDD[Rating[Int]]) = {
ALSSuite.genImplicitTestData(sc, numUsers, numItems, rank, noiseStd, seed)
}
/**
* Generates random user/item factors, with i.i.d. values drawn from U(a, b).
* @param size number of users/items
* @param rank number of features
* @param random random number generator
* @param a min value of the support (default: -1)
* @param b max value of the support (default: 1)
* @return a sequence of (ID, factors) pairs
*/
private def genFactors(
size: Int,
rank: Int,
random: Random,
a: Float = -1.0f,
b: Float = 1.0f): Seq[(Int, Array[Float])] = {
ALSSuite.genFactors(size, rank, random, a, b)
}
/**
* Test ALS using the given training/test splits and parameters.
* @param training training dataset
* @param test test dataset
* @param rank rank of the matrix factorization
* @param maxIter max number of iterations
* @param regParam regularization constant
* @param implicitPrefs whether to use implicit preference
* @param numUserBlocks number of user blocks
* @param numItemBlocks number of item blocks
* @param targetRMSE target test RMSE
*/
def testALS(
training: RDD[Rating[Int]],
test: RDD[Rating[Int]],
rank: Int,
maxIter: Int,
regParam: Double,
implicitPrefs: Boolean = false,
numUserBlocks: Int = 2,
numItemBlocks: Int = 3,
targetRMSE: Double = 0.05): Unit = {
val spark = this.spark
import spark.implicits._
val als = new ALS()
.setRank(rank)
.setRegParam(regParam)
.setImplicitPrefs(implicitPrefs)
.setNumUserBlocks(numUserBlocks)
.setNumItemBlocks(numItemBlocks)
.setSeed(0)
val alpha = als.getAlpha
val model = als.fit(training.toDF())
val predictions = model.transform(test.toDF()).select("rating", "prediction").rdd.map {
case Row(rating: Float, prediction: Float) =>
(rating.toDouble, prediction.toDouble)
}
val rmse =
if (implicitPrefs) {
// TODO: Use a better (rank-based?) evaluation metric for implicit feedback.
// We limit the ratings and the predictions to interval [0, 1] and compute the weighted RMSE
// with the confidence scores as weights.
val (totalWeight, weightedSumSq) = predictions.map { case (rating, prediction) =>
val confidence = 1.0 + alpha * math.abs(rating)
val rating01 = math.max(math.min(rating, 1.0), 0.0)
val prediction01 = math.max(math.min(prediction, 1.0), 0.0)
val err = prediction01 - rating01
(confidence, confidence * err * err)
}.reduce { case ((c0, e0), (c1, e1)) =>
(c0 + c1, e0 + e1)
}
math.sqrt(weightedSumSq / totalWeight)
} else {
val mse = predictions.map { case (rating, prediction) =>
val err = rating - prediction
err * err
}.mean()
math.sqrt(mse)
}
logInfo(s"Test RMSE is $rmse.")
assert(rmse < targetRMSE)
MLTestingUtils.checkCopyAndUids(als, model)
}
test("exact rank-1 matrix") {
val (training, test) = genExplicitTestData(numUsers = 20, numItems = 40, rank = 1)
testALS(training, test, maxIter = 1, rank = 1, regParam = 1e-5, targetRMSE = 0.001)
testALS(training, test, maxIter = 1, rank = 2, regParam = 1e-5, targetRMSE = 0.001)
}
test("approximate rank-1 matrix") {
val (training, test) =
genExplicitTestData(numUsers = 20, numItems = 40, rank = 1, noiseStd = 0.01)
testALS(training, test, maxIter = 2, rank = 1, regParam = 0.01, targetRMSE = 0.02)
testALS(training, test, maxIter = 2, rank = 2, regParam = 0.01, targetRMSE = 0.02)
}
test("approximate rank-2 matrix") {
val (training, test) =
genExplicitTestData(numUsers = 20, numItems = 40, rank = 2, noiseStd = 0.01)
testALS(training, test, maxIter = 4, rank = 2, regParam = 0.01, targetRMSE = 0.03)
testALS(training, test, maxIter = 4, rank = 3, regParam = 0.01, targetRMSE = 0.03)
}
test("different block settings") {
val (training, test) =
genExplicitTestData(numUsers = 20, numItems = 40, rank = 2, noiseStd = 0.01)
for ((numUserBlocks, numItemBlocks) <- Seq((1, 1), (1, 2), (2, 1), (2, 2))) {
testALS(training, test, maxIter = 4, rank = 3, regParam = 0.01, targetRMSE = 0.03,
numUserBlocks = numUserBlocks, numItemBlocks = numItemBlocks)
}
}
test("more blocks than ratings") {
val (training, test) =
genExplicitTestData(numUsers = 4, numItems = 4, rank = 1)
testALS(training, test, maxIter = 2, rank = 1, regParam = 1e-4, targetRMSE = 0.002,
numItemBlocks = 5, numUserBlocks = 5)
}
test("implicit feedback") {
val (training, test) =
genImplicitTestData(numUsers = 20, numItems = 40, rank = 2, noiseStd = 0.01)
testALS(training, test, maxIter = 4, rank = 2, regParam = 0.01, implicitPrefs = true,
targetRMSE = 0.3)
}
test("using generic ID types") {
val (ratings, _) = genImplicitTestData(numUsers = 20, numItems = 40, rank = 2, noiseStd = 0.01)
val longRatings = ratings.map(r => Rating(r.user.toLong, r.item.toLong, r.rating))
val (longUserFactors, _) = ALS.train(longRatings, rank = 2, maxIter = 4, seed = 0)
assert(longUserFactors.first()._1.getClass === classOf[Long])
val strRatings = ratings.map(r => Rating(r.user.toString, r.item.toString, r.rating))
val (strUserFactors, _) = ALS.train(strRatings, rank = 2, maxIter = 4, seed = 0)
assert(strUserFactors.first()._1.getClass === classOf[String])
}
test("nonnegative constraint") {
val (ratings, _) = genImplicitTestData(numUsers = 20, numItems = 40, rank = 2, noiseStd = 0.01)
val (userFactors, itemFactors) =
ALS.train(ratings, rank = 2, maxIter = 4, nonnegative = true, seed = 0)
def isNonnegative(factors: RDD[(Int, Array[Float])]): Boolean = {
factors.values.map { _.forall(_ >= 0.0) }.reduce(_ && _)
}
assert(isNonnegative(userFactors))
assert(isNonnegative(itemFactors))
// TODO: Validate the solution.
}
test("als partitioner is a projection") {
for (p <- Seq(1, 10, 100, 1000)) {
val part = new ALSPartitioner(p)
var k = 0
while (k < p) {
assert(k === part.getPartition(k))
assert(k === part.getPartition(k.toLong))
k += 1
}
}
}
test("partitioner in returned factors") {
val (ratings, _) = genImplicitTestData(numUsers = 20, numItems = 40, rank = 2, noiseStd = 0.01)
val (userFactors, itemFactors) = ALS.train(
ratings, rank = 2, maxIter = 4, numUserBlocks = 3, numItemBlocks = 4, seed = 0)
for ((tpe, factors) <- Seq(("User", userFactors), ("Item", itemFactors))) {
assert(userFactors.partitioner.isDefined, s"$tpe factors should have partitioner.")
val part = userFactors.partitioner.get
userFactors.mapPartitionsWithIndex { (idx, items) =>
items.foreach { case (id, _) =>
if (part.getPartition(id) != idx) {
throw new SparkException(s"$tpe with ID $id should not be in partition $idx.")
}
}
Iterator.empty
}.count()
}
}
test("als with large number of iterations") {
val (ratings, _) = genExplicitTestData(numUsers = 4, numItems = 4, rank = 1)
ALS.train(ratings, rank = 1, maxIter = 50, numUserBlocks = 2, numItemBlocks = 2, seed = 0)
ALS.train(ratings, rank = 1, maxIter = 50, numUserBlocks = 2, numItemBlocks = 2,
implicitPrefs = true, seed = 0)
}
test("read/write") {
val spark = this.spark
import spark.implicits._
import ALSSuite._
val (ratings, _) = genExplicitTestData(numUsers = 4, numItems = 4, rank = 1)
def getFactors(df: DataFrame): Set[(Int, Array[Float])] = {
df.select("id", "features").collect().map { case r =>
(r.getInt(0), r.getAs[Array[Float]](1))
}.toSet
}
def checkModelData(model: ALSModel, model2: ALSModel): Unit = {
assert(model.rank === model2.rank)
assert(getFactors(model.userFactors) === getFactors(model2.userFactors))
assert(getFactors(model.itemFactors) === getFactors(model2.itemFactors))
}
val als = new ALS()
testEstimatorAndModelReadWrite(als, ratings.toDF(), allEstimatorParamSettings,
allModelParamSettings, checkModelData)
}
test("input type validation") {
val spark = this.spark
import spark.implicits._
// check that ALS can handle all numeric types for rating column
// and user/item columns (when the user/item ids are within Int range)
val als = new ALS().setMaxIter(1).setRank(1)
Seq(("user", IntegerType), ("item", IntegerType), ("rating", FloatType)).foreach {
case (colName, sqlType) =>
MLTestingUtils.checkNumericTypesALS(als, spark, colName, sqlType) {
(ex, act) =>
ex.userFactors.first().getSeq[Float](1) === act.userFactors.first.getSeq[Float](1)
} { (ex, act, _) =>
ex.transform(_: DataFrame).select("prediction").first.getDouble(0) ~==
act.transform(_: DataFrame).select("prediction").first.getDouble(0) absTol 1e-6
}
}
// check user/item ids falling outside of Int range
val big = Int.MaxValue.toLong + 1
val small = Int.MinValue.toDouble - 1
val df = Seq(
(0, 0L, 0d, 1, 1L, 1d, 3.0),
(0, big, small, 0, big, small, 2.0),
(1, 1L, 1d, 0, 0L, 0d, 5.0)
).toDF("user", "user_big", "user_small", "item", "item_big", "item_small", "rating")
val msg = "either out of Integer range or contained a fractional part"
withClue("fit should fail when ids exceed integer range. ") {
assert(intercept[SparkException] {
als.fit(df.select(df("user_big").as("user"), df("item"), df("rating")))
}.getCause.getMessage.contains(msg))
assert(intercept[SparkException] {
als.fit(df.select(df("user_small").as("user"), df("item"), df("rating")))
}.getCause.getMessage.contains(msg))
assert(intercept[SparkException] {
als.fit(df.select(df("item_big").as("item"), df("user"), df("rating")))
}.getCause.getMessage.contains(msg))
assert(intercept[SparkException] {
als.fit(df.select(df("item_small").as("item"), df("user"), df("rating")))
}.getCause.getMessage.contains(msg))
}
withClue("transform should fail when ids exceed integer range. ") {
val model = als.fit(df)
assert(intercept[SparkException] {
model.transform(df.select(df("user_big").as("user"), df("item"))).first
}.getMessage.contains(msg))
assert(intercept[SparkException] {
model.transform(df.select(df("user_small").as("user"), df("item"))).first
}.getMessage.contains(msg))
assert(intercept[SparkException] {
model.transform(df.select(df("item_big").as("item"), df("user"))).first
}.getMessage.contains(msg))
assert(intercept[SparkException] {
model.transform(df.select(df("item_small").as("item"), df("user"))).first
}.getMessage.contains(msg))
}
}
test("SPARK-18268: ALS with empty RDD should fail with better message") {
val ratings = sc.parallelize(Array.empty[Rating[Int]])
intercept[IllegalArgumentException] {
ALS.train(ratings)
}
}
test("ALS cold start user/item prediction strategy") {
val spark = this.spark
import spark.implicits._
import org.apache.spark.sql.functions._
val (ratings, _) = genExplicitTestData(numUsers = 4, numItems = 4, rank = 1)
val data = ratings.toDF
val knownUser = data.select(max("user")).as[Int].first()
val unknownUser = knownUser + 10
val knownItem = data.select(max("item")).as[Int].first()
val unknownItem = knownItem + 20
val test = Seq(
(unknownUser, unknownItem),
(knownUser, unknownItem),
(unknownUser, knownItem),
(knownUser, knownItem)
).toDF("user", "item")
val als = new ALS().setMaxIter(1).setRank(1)
// default is 'nan'
val defaultModel = als.fit(data)
val defaultPredictions = defaultModel.transform(test).select("prediction").as[Float].collect()
assert(defaultPredictions.length == 4)
assert(defaultPredictions.slice(0, 3).forall(_.isNaN))
assert(!defaultPredictions.last.isNaN)
// check 'drop' strategy should filter out rows with unknown users/items
val dropPredictions = defaultModel
.setColdStartStrategy("drop")
.transform(test)
.select("prediction").as[Float].collect()
assert(dropPredictions.length == 1)
assert(!dropPredictions.head.isNaN)
assert(dropPredictions.head ~== defaultPredictions.last relTol 1e-14)
}
test("case insensitive cold start param value") {
val spark = this.spark
import spark.implicits._
val (ratings, _) = genExplicitTestData(numUsers = 2, numItems = 2, rank = 1)
val data = ratings.toDF
val model = new ALS().fit(data)
Seq("nan", "NaN", "Nan", "drop", "DROP", "Drop").foreach { s =>
model.setColdStartStrategy(s).transform(data)
}
}
private def getALSModel = {
val spark = this.spark
import spark.implicits._
val userFactors = Seq(
(0, Array(6.0f, 4.0f)),
(1, Array(3.0f, 4.0f)),
(2, Array(3.0f, 6.0f))
).toDF("id", "features")
val itemFactors = Seq(
(3, Array(5.0f, 6.0f)),
(4, Array(6.0f, 2.0f)),
(5, Array(3.0f, 6.0f)),
(6, Array(4.0f, 1.0f))
).toDF("id", "features")
val als = new ALS().setRank(2)
new ALSModel(als.uid, als.getRank, userFactors, itemFactors)
.setUserCol("user")
.setItemCol("item")
}
test("recommendForAllUsers with k < num_items") {
val topItems = getALSModel.recommendForAllUsers(2)
assert(topItems.count() == 3)
assert(topItems.columns.contains("user"))
val expected = Map(
0 -> Array((3, 54f), (4, 44f)),
1 -> Array((3, 39f), (5, 33f)),
2 -> Array((3, 51f), (5, 45f))
)
checkRecommendations(topItems, expected, "item")
}
test("recommendForAllUsers with k = num_items") {
val topItems = getALSModel.recommendForAllUsers(4)
assert(topItems.count() == 3)
assert(topItems.columns.contains("user"))
val expected = Map(
0 -> Array((3, 54f), (4, 44f), (5, 42f), (6, 28f)),
1 -> Array((3, 39f), (5, 33f), (4, 26f), (6, 16f)),
2 -> Array((3, 51f), (5, 45f), (4, 30f), (6, 18f))
)
checkRecommendations(topItems, expected, "item")
}
test("recommendForAllItems with k < num_users") {
val topUsers = getALSModel.recommendForAllItems(2)
assert(topUsers.count() == 4)
assert(topUsers.columns.contains("item"))
val expected = Map(
3 -> Array((0, 54f), (2, 51f)),
4 -> Array((0, 44f), (2, 30f)),
5 -> Array((2, 45f), (0, 42f)),
6 -> Array((0, 28f), (2, 18f))
)
checkRecommendations(topUsers, expected, "user")
}
test("recommendForAllItems with k = num_users") {
val topUsers = getALSModel.recommendForAllItems(3)
assert(topUsers.count() == 4)
assert(topUsers.columns.contains("item"))
val expected = Map(
3 -> Array((0, 54f), (2, 51f), (1, 39f)),
4 -> Array((0, 44f), (2, 30f), (1, 26f)),
5 -> Array((2, 45f), (0, 42f), (1, 33f)),
6 -> Array((0, 28f), (2, 18f), (1, 16f))
)
checkRecommendations(topUsers, expected, "user")
}
private def checkRecommendations(
topK: DataFrame,
expected: Map[Int, Array[(Int, Float)]],
dstColName: String): Unit = {
val spark = this.spark
import spark.implicits._
assert(topK.columns.contains("recommendations"))
topK.as[(Int, Seq[(Int, Float)])].collect().foreach { case (id: Int, recs: Seq[(Int, Float)]) =>
assert(recs === expected(id))
}
topK.collect().foreach { row =>
val recs = row.getAs[WrappedArray[Row]]("recommendations")
assert(recs(0).fieldIndex(dstColName) == 0)
assert(recs(0).fieldIndex("rating") == 1)
}
}
}
class ALSCleanerSuite extends SparkFunSuite {
test("ALS shuffle cleanup standalone") {
val conf = new SparkConf()
val localDir = Utils.createTempDir()
val checkpointDir = Utils.createTempDir()
def getAllFiles: Set[File] =
FileUtils.listFiles(localDir, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE).asScala.toSet
try {
conf.set("spark.local.dir", localDir.getAbsolutePath)
val sc = new SparkContext("local[2]", "test", conf)
try {
sc.setCheckpointDir(checkpointDir.getAbsolutePath)
// Test checkpoint and clean parents
val input = sc.parallelize(1 to 1000)
val keyed = input.map(x => (x % 20, 1))
val shuffled = keyed.reduceByKey(_ + _)
val keysOnly = shuffled.keys
val deps = keysOnly.dependencies
keysOnly.count()
ALS.cleanShuffleDependencies(sc, deps, true)
val resultingFiles = getAllFiles
assert(resultingFiles === Set())
// Ensure running count again works fine even if we kill the shuffle files.
keysOnly.count()
} finally {
sc.stop()
}
} finally {
Utils.deleteRecursively(localDir)
Utils.deleteRecursively(checkpointDir)
}
}
test("ALS shuffle cleanup in algorithm") {
val conf = new SparkConf()
val localDir = Utils.createTempDir()
val checkpointDir = Utils.createTempDir()
def getAllFiles: Set[File] =
FileUtils.listFiles(localDir, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE).asScala.toSet
try {
conf.set("spark.local.dir", localDir.getAbsolutePath)
val sc = new SparkContext("local[2]", "test", conf)
try {
sc.setCheckpointDir(checkpointDir.getAbsolutePath)
// Generate test data
val (training, _) = ALSSuite.genImplicitTestData(sc, 20, 5, 1, 0.2, 0)
// Implicitly test the cleaning of parents during ALS training
val spark = SparkSession.builder
.master("local[2]")
.appName("ALSCleanerSuite")
.sparkContext(sc)
.getOrCreate()
import spark.implicits._
val als = new ALS()
.setRank(1)
.setRegParam(1e-5)
.setSeed(0)
.setCheckpointInterval(1)
.setMaxIter(7)
val model = als.fit(training.toDF())
val resultingFiles = getAllFiles
// We expect the last shuffles files, block ratings, user factors, and item factors to be
// around but no more.
val pattern = "shuffle_(\\\\d+)_.+\\\\.data".r
val rddIds = resultingFiles.flatMap { f =>
pattern.findAllIn(f.getName()).matchData.map { _.group(1) } }
assert(rddIds.size === 4)
} finally {
sc.stop()
}
} finally {
Utils.deleteRecursively(localDir)
Utils.deleteRecursively(checkpointDir)
}
}
}
class ALSStorageSuite
extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest with Logging {
test("invalid storage params") {
intercept[IllegalArgumentException] {
new ALS().setIntermediateStorageLevel("foo")
}
intercept[IllegalArgumentException] {
new ALS().setIntermediateStorageLevel("NONE")
}
intercept[IllegalArgumentException] {
new ALS().setFinalStorageLevel("foo")
}
}
test("default and non-default storage params set correct RDD StorageLevels") {
val spark = this.spark
import spark.implicits._
val data = Seq(
(0, 0, 1.0),
(0, 1, 2.0),
(1, 2, 3.0),
(1, 0, 2.0)
).toDF("user", "item", "rating")
val als = new ALS().setMaxIter(1).setRank(1)
// add listener to check intermediate RDD default storage levels
val defaultListener = new IntermediateRDDStorageListener
sc.addSparkListener(defaultListener)
val model = als.fit(data)
// check final factor RDD default storage levels
val defaultFactorRDDs = sc.getPersistentRDDs.collect {
case (id, rdd) if rdd.name == "userFactors" || rdd.name == "itemFactors" =>
rdd.name -> (id, rdd.getStorageLevel)
}.toMap
defaultFactorRDDs.foreach { case (_, (id, level)) =>
assert(level == StorageLevel.MEMORY_AND_DISK)
}
defaultListener.storageLevels.foreach(level => assert(level == StorageLevel.MEMORY_AND_DISK))
// add listener to check intermediate RDD non-default storage levels
val nonDefaultListener = new IntermediateRDDStorageListener
sc.addSparkListener(nonDefaultListener)
val nonDefaultModel = als
.setFinalStorageLevel("MEMORY_ONLY")
.setIntermediateStorageLevel("DISK_ONLY")
.fit(data)
// check final factor RDD non-default storage levels
val levels = sc.getPersistentRDDs.collect {
case (id, rdd) if rdd.name == "userFactors" && rdd.id != defaultFactorRDDs("userFactors")._1
|| rdd.name == "itemFactors" && rdd.id != defaultFactorRDDs("itemFactors")._1 =>
rdd.getStorageLevel
}
levels.foreach(level => assert(level == StorageLevel.MEMORY_ONLY))
nonDefaultListener.storageLevels.foreach(level => assert(level == StorageLevel.DISK_ONLY))
}
}
private class IntermediateRDDStorageListener extends SparkListener {
val storageLevels: mutable.ArrayBuffer[StorageLevel] = mutable.ArrayBuffer()
override def onStageCompleted(stageCompleted: SparkListenerStageCompleted): Unit = {
val stageLevels = stageCompleted.stageInfo.rddInfos.collect {
case info if info.name.contains("Blocks") || info.name.contains("Factors-") =>
info.storageLevel
}
storageLevels ++= stageLevels
}
}
object ALSSuite extends Logging {
/**
* Mapping from all Params to valid settings which differ from the defaults.
* This is useful for tests which need to exercise all Params, such as save/load.
* This excludes input columns to simplify some tests.
*/
val allModelParamSettings: Map[String, Any] = Map(
"predictionCol" -> "myPredictionCol"
)
/**
* Mapping from all Params to valid settings which differ from the defaults.
* This is useful for tests which need to exercise all Params, such as save/load.
* This excludes input columns to simplify some tests.
*/
val allEstimatorParamSettings: Map[String, Any] = allModelParamSettings ++ Map(
"maxIter" -> 1,
"rank" -> 1,
"regParam" -> 0.01,
"numUserBlocks" -> 2,
"numItemBlocks" -> 2,
"implicitPrefs" -> true,
"alpha" -> 0.9,
"nonnegative" -> true,
"checkpointInterval" -> 20,
"intermediateStorageLevel" -> "MEMORY_ONLY",
"finalStorageLevel" -> "MEMORY_AND_DISK_SER"
)
// Helper functions to generate test data we share between ALS test suites
/**
* Generates random user/item factors, with i.i.d. values drawn from U(a, b).
* @param size number of users/items
* @param rank number of features
* @param random random number generator
* @param a min value of the support (default: -1)
* @param b max value of the support (default: 1)
* @return a sequence of (ID, factors) pairs
*/
private def genFactors(
size: Int,
rank: Int,
random: Random,
a: Float = -1.0f,
b: Float = 1.0f): Seq[(Int, Array[Float])] = {
require(size > 0 && size < Int.MaxValue / 3)
require(b > a)
val ids = mutable.Set.empty[Int]
while (ids.size < size) {
ids += random.nextInt()
}
val width = b - a
ids.toSeq.sorted.map(id => (id, Array.fill(rank)(a + random.nextFloat() * width)))
}
/**
* Generates an implicit feedback dataset for testing ALS.
*
* @param sc SparkContext
* @param numUsers number of users
* @param numItems number of items
* @param rank rank
* @param noiseStd the standard deviation of additive Gaussian noise on training data
* @param seed random seed
* @return (training, test)
*/
def genImplicitTestData(
sc: SparkContext,
numUsers: Int,
numItems: Int,
rank: Int,
noiseStd: Double = 0.0,
seed: Long = 11L): (RDD[Rating[Int]], RDD[Rating[Int]]) = {
// The assumption of the implicit feedback model is that unobserved ratings are more likely to
// be negatives.
val positiveFraction = 0.8
val negativeFraction = 1.0 - positiveFraction
val trainingFraction = 0.6
val testFraction = 0.3
val totalFraction = trainingFraction + testFraction
val random = new Random(seed)
val userFactors = genFactors(numUsers, rank, random)
val itemFactors = genFactors(numItems, rank, random)
val training = ArrayBuffer.empty[Rating[Int]]
val test = ArrayBuffer.empty[Rating[Int]]
for ((userId, userFactor) <- userFactors; (itemId, itemFactor) <- itemFactors) {
val rating = blas.sdot(rank, userFactor, 1, itemFactor, 1)
val threshold = if (rating > 0) positiveFraction else negativeFraction
val observed = random.nextDouble() < threshold
if (observed) {
val x = random.nextDouble()
if (x < totalFraction) {
if (x < trainingFraction) {
val noise = noiseStd * random.nextGaussian()
training += Rating(userId, itemId, rating + noise.toFloat)
} else {
test += Rating(userId, itemId, rating)
}
}
}
}
logInfo(s"Generated an implicit feedback dataset with ${training.size} ratings for training " +
s"and ${test.size} for test.")
(sc.parallelize(training, 2), sc.parallelize(test, 2))
}
}
| milliman/spark | mllib/src/test/scala/org/apache/spark/ml/recommendation/ALSSuite.scala | Scala | apache-2.0 | 37,092 |
/*
* Copyright (c) 2015.
* CenturyLink Cloud. All rights reserved.
*/
package net.xtof.time
trait PeriodWriter {
def write(p: Period): String
}
object PeriodWriter {
val basic = new PeriodWriter {
override def write(p: Period): String = s"P${p.years}Y${p.months}MT${p.preciseMilliseconds * 1000.0}S"
}
val normalized = new PeriodWriter {
override def write(p: Period): String = {
val sb = new StringBuilder
sb.append("P")
// @formatter:off
val y = p.years; if (y > 0) { sb.append(y); sb.append("Y") }
val o = p.months; if (o > 0) { sb.append(o); sb.append("M") }
if (p.preciseMilliseconds > 0) {
val w = p.weeks; if (w > 0) { sb.append(w); sb.append("W") }
val d = p.days; if (d > 0) { sb.append(d); sb.append("D") }
sb.append("T")
val h = p.hours; if (h > 0) { sb.append(h); sb.append("H") }
val m = p.minutes; if (m > 0) { sb.append(m); sb.append("M")}
val s = p.seconds
val z = p.preciseMilliseconds % 1000
if (s > 0 || z > 0) {
if (z == 0) {
sb.append(s)
} else {
sb.append(f"${s + z / 1000.0}%.3f")
}
sb.append("S")
}
}
// @formatter:on
sb.mkString
}
}
} | xtofs/tiny-time | src/main/scala/net/xtof/time/PeriodWriter.scala | Scala | apache-2.0 | 1,290 |
package mesosphere.marathon.state
import com.google.protobuf.Message
/**
* @author Tobi Knaup
*/
trait MarathonState[M <: Message, T <: MarathonState[M, _]] {
def mergeFromProto(message: M): T
def mergeFromProto(bytes: Array[Byte]): T
def toProto: M
def toProtoByteArray: Array[Byte] = toProto.toByteArray
} | MiLk/marathon | src/main/scala/mesosphere/marathon/state/MarathonState.scala | Scala | apache-2.0 | 325 |
package reactivemongo.api
sealed trait Compressor {
/** The compressor [[https://github.com/mongodb/specifications/blob/master/source/compression/OP_COMPRESSED.rst#mongodb-handshake-amendment name]] (e.g `snappy`) */
def name: String
/** The compressor [[https://github.com/mongodb/specifications/blob/master/source/compression/OP_COMPRESSED.rst#compressor-ids ID]] (e.g. `1` for snappy) */
def id: Byte
@inline override def toString = name
}
object Compressor {
/**
* The content of the message is uncompressed.
* This is realistically only used for testing.
*/
object Noop extends Compressor {
val name = "noop"
val id: Byte = 0
}
/** The content of the message is compressed using snappy. */
object Snappy extends Compressor {
val name = "snappy"
val id: Byte = 1
}
/**
* The content of the message is compressed using zlib.
*
* @param compressionLevel Zlib compression [[https://github.com/mongodb/specifications/blob/master/source/compression/OP_COMPRESSED.rst#zlibcompressionlevel level]] (from -1 - 9)
*/
sealed class Zlib private[api] (
val compressionLevel: Int) extends Compressor {
val name = Zlib.name
val id = Zlib.id
override def hashCode: Int = compressionLevel
override def equals(that: Any): Boolean = that match {
case other: Zlib =>
this.compressionLevel == other.compressionLevel
case _ =>
false
}
override def toString = s"Zlib($compressionLevel)"
}
object Zlib {
val name = "zlib"
val id: Byte = 2
lazy val DefaultCompressor: Zlib = new Zlib(-1)
def apply(compressionLevel: Int): Zlib = new Zlib(compressionLevel)
def unapply(compressor: Compressor): Option[Int] = compressor match {
case zlib: Zlib =>
Some(zlib.compressionLevel)
case _ =>
Option.empty[Int]
}
}
/** The content of the message is compressed using zstd. */
object Zstd extends Compressor {
val name = "zstd"
val id: Byte = 3
}
}
| ReactiveMongo/ReactiveMongo | core/src/main/scala/api/Compressor.scala | Scala | apache-2.0 | 2,025 |
package suiryc.scala.javafx.concurrent
import akka.actor.{Actor, ActorRef, Props, Terminated}
import com.typesafe.config.Config
import com.typesafe.scalalogging.StrictLogging
import javafx.application.Platform
import monix.execution.{Cancelable, Scheduler}
import suiryc.scala.Configuration
import suiryc.scala.akka.CoreSystem
import java.util.concurrent.TimeUnit
import scala.concurrent.{Await, ExecutionContextExecutor, Future}
import scala.concurrent.duration.{Duration, FiniteDuration}
import scala.util.{Failure, Success}
/**
* JavaFX concurrent helpers.
*
* Mainly delegating action to a dedicated actor running inside the JavaFX
* thread.
*/
object JFXSystem
extends StrictLogging
{
// Akka system.
import CoreSystem.NonBlocking._
// Notes:
// We don't want the (core) system to prevent the JVM from exiting. Thus its
// threads are (configuration) made daemonic so that the application don't
// have to explicitly terminate it.
// But there is an additional issue when dealing with JavaFX: when its thread
// is stopped (exit) actors assigned to the special dispatcher won't work
// anymore; in particular they cannot be properly stopped anymore.
// By default, akka systems are automatically terminated upon JVM exit through
// a shutdown hook (with a 10s timeout).
// See: https://doc.akka.io/docs/akka/current/coordinated-shutdown.html#coordinated-shutdown
// So in this case the actual JVM exit is delayed until timeout.
// There are two ways to prevent this:
// 1. Explicitly stop those actors (or more generally terminate the system)
// before exiting JavaFX; see 'gracefulStop' and 'terminate'
// 2. Disable the automatic system termination for the (core) system
// As for daemon threads, 2. is done through configuration by default. The
// application can still override this behaviour and/or explicitly terminate
// the actors and system before exiting JavaFX when needed.
//
// Akka intercepts thrown exception inside JavaFX actor, thus no need to
// try/catch when doing 'action'.
//
// Some delays related to JavaFX (Java 10; upon showing a stage):
// 1. queuing a message to process in the JavaFX actor (JFXSystem.schedule)
// 2. Future.onComplete with JavaFX execution context
// 3. queuing a Platform.runLater
// (all triggered at the same time, before or after showing stage)
//
// On Windows 10 build 1803:
// - 2. and 3. are executed ~50ms after 1.
// - ~400ms to 'show' the stage
// - if actions are triggered before 'show', 1. is executed right after
// (at the same time 'show' returns and 'showing' changes to 'true')
// - if actions are triggered after 'show', 1. is executed ~100ms later
//
// On Gnome 3.28:
// - 1., 2. and 3. are executed at the same time
// - ~300ms to 'show' the stage
// - if actions are triggered before 'show', 1. is executed right after
// (at the same time 'show' returns and 'showing' changes to 'true')
// - if actions are triggered after 'show', 1. is executed ~50ms later
/** Message to delegate action. */
protected case class Action(action: () => Unit)
/** JavaJX configuration ('javafx' path relative to core config). */
val config: Config = Configuration.libConfig.getConfig("javafx")
/** Whether to warn if requesting to schedule action while already in JavaFX thread. */
private val warnReentrant = config.getBoolean("system.warn-reentrant")
/** JavaFX actor to which actions are delegated. */
private val jfxActor = newJFXActor(Props[JFXActor](), "JavaFX-dispatcher")
// Note:
// Even though a MessageDispatcher is also an ExecutionContext, we usually
// only need to directly use the JavaFX executor instead of using a JavaFX
// dispatcher. The dispatcher (at least the usual one built from Akka system)
// relies on BatchingExecutor and may trigger a 'silent' "requirement failed"
// error when concurrent code executions are delegated to JavaFX; this happens
// when creating more than one modal dialog and Await'ing its result from
// different threads.
// Thus we better only use the dispatcher for the dedicated JavaFX actor.
//val dispatcher: ExecutionContextExecutor = system.dispatchers.lookup("javafx.dispatcher")
/** The dedicated JavaFX execution context. */
lazy val executor: ExecutionContextExecutor = JFXExecutor.executor
/** Scheduler running with JavaFX execution context. */
lazy val scheduler: Scheduler = Scheduler(executor)
/** Creates an actor using the JavaFX thread backed dispatcher. */
def newJFXActor(props: Props): ActorRef =
actorOf(props.withDispatcher("suiryc-scala.javafx.dispatcher"))
/** Creates an actor using the JavaFX thread backed dispatcher. */
def newJFXActor(props: Props, name: String): ActorRef =
actorOf(props.withDispatcher("suiryc-scala.javafx.dispatcher"), name)
@inline protected def reentrant(): Unit = {
if (warnReentrant) {
val throwable = new Exception("Already using JavaFX thread")
logger.warn("Caller delegating action to JavaFX thread while already using it",
throwable)
}
}
/**
* Delegates action to JavaFX using Platform.runLater.
*
* Purposely does not perform the action synchronously if we already are in
* the JavaFX thread.
* Execution may be performed before other actions delegated through dedicated
* actor.
*/
@inline def runLater(action: => Unit): Unit =
Platform.runLater(() => action)
/**
* Delegates action to JavaFX.
*
* Action is performed synchronously if we are in the JavaFX thread.
* Otherwise it is delegated through 'runLater'.
*/
@inline def run(action: => Unit): Unit = {
if (Platform.isFxApplicationThread) action
else runLater(action)
}
/** Delegates action to JavaFX using a Future, and waits for result. */
def await[T](action: => T, logReentrant: Boolean = true): T = {
if (Platform.isFxApplicationThread) {
// We are already in the JavaFX thread. So *DO NOT* create a Future to
// await on it otherwise we will block the application: the actor uses
// the same thread.
if (logReentrant) reentrant()
action
} else {
val f = Future {
action
} (executor)
Await.ready(f, Duration.Inf).value match {
case None =>
// should not happen
throw new Exception("Awaited Future not ready")
case Some(Failure(ex)) =>
throw ex
case Some(Success(v)) =>
v
}
}
}
/**
* Delegates action to JavaFX using dedicated actor.
*
* Unless requested to execute 'later' (i.e. not synchronously), executes the
* action right away if we are in the JavaFX thread.
*/
def schedule(action: => Unit, later: Boolean = false, logReentrant: Boolean = true): Unit = {
if (!later && Platform.isFxApplicationThread) {
if (logReentrant) reentrant()
action
}
else jfxActor ! Action { () => action }
}
/** Delegates periodic action to JavaFX using dedicated actor. */
def schedule(initialDelay: FiniteDuration, interval: FiniteDuration)(action: => Unit): Cancelable =
scheduler.scheduleWithFixedDelay(initialDelay, interval)(jfxActor ! Action { () => action })
/** Delegates delayed action to JavaFX using dedicated actor. */
def scheduleOnce(delay: FiniteDuration)(action: => Unit): Cancelable =
scheduler.scheduleOnce(delay)(jfxActor ! Action { () => action })
/** The default (configured) graceful stop timeout. */
lazy val gracefulStopTimeout: FiniteDuration =
FiniteDuration(config.getDuration("system.graceful-stop.timeout").toMillis, TimeUnit.MILLISECONDS)
/** Gracefully stops the internal "JavaFX actor". */
def gracefulStop(timeout: FiniteDuration): Future[Boolean] = {
akka.pattern.gracefulStop(jfxActor, timeout)
}
/** Terminates the (core) system with JavaFX actors, then JavaFX. */
def terminate(): Future[Terminated] = {
system.terminate().map { v =>
Platform.exit()
v
}
}
private class JFXActor extends Actor {
override def receive: Receive = {
case msg: Action =>
msg.action()
}
}
}
| suiryc/suiryc-scala | javafx/src/main/scala/suiryc/scala/javafx/concurrent/JFXSystem.scala | Scala | gpl-3.0 | 8,178 |
package org.monkeynuthead.monkeybarrel.web
import akka.actor.ActorSystem
import akka.stream.scaladsl.Source
import akka.testkit.{TestActorRef, TestKit}
import org.scalatest.{BeforeAndAfterAll, MustMatchers, WordSpec}
import scala.language.{implicitConversions, postfixOps}
/**
* Testing for the BroadcastMessagesActor.
*/
class BroadcastMessagesActorSpec extends WordSpec with MustMatchers with BeforeAndAfterAll {
implicit val system = ActorSystem()
"The Broadcast Actor" must {
import BroadcastMessagesActor._
import scala.concurrent.duration._
val NoMsgTimeout = 150 millis
implicit def intToID(i: Int) = new ID(i.toLong, 0)
"Allow 2 actors to be registered and broadcast to all" in {
val broadcast = TestActorRef(create())
val a1 = new TestKit(system)
val a2 = new TestKit(system)
broadcast ! Register(1, a1.testActor)
broadcast ! Register(2, a2.testActor)
val source = Source.single("Hello")
broadcast ! Message(source)
a1.expectMsg(Message(source))
a2.expectMsg(Message(source))
}
"Allow 2 actors to be registered and messages to be sent between them" in {
val broadcast = TestActorRef(create())
val a1 = new TestKit(system)
val a2 = new TestKit(system)
broadcast ! Register(1, a1.testActor)
broadcast ! Register(2, a2.testActor)
val hello2 = Message(Source.single("Hello2"), Some(1))
broadcast ! hello2
a1.expectNoMsg(NoMsgTimeout)
a2.expectMsg(hello2)
val hello1 = Message(Source.single("Hello1"), Some(2))
broadcast ! hello1
a1.expectMsg(hello1)
a2.expectNoMsg(NoMsgTimeout)
}
"Allow 3 actors to be registered and broadcast messages between them" in {
val broadcast = TestActorRef(create())
val a1 = new TestKit(system)
val a2 = new TestKit(system)
val a3 = new TestKit(system)
broadcast ! Register(1, a1.testActor)
broadcast ! Register(2, a2.testActor)
broadcast ! Register(3, a3.testActor)
val hello2And3 = Message(Source.single("Hello2+3"), Some(1))
broadcast ! hello2And3
a1.expectNoMsg(NoMsgTimeout)
a2.expectMsg(hello2And3)
a3.expectMsg(hello2And3)
}
"Remove actors on unregistration" in {
val broadcast = TestActorRef(create())
val a1 = new TestKit(system)
val a2 = new TestKit(system)
val a3 = new TestKit(system)
broadcast ! Register(1, a1.testActor)
broadcast ! Register(2, a2.testActor)
broadcast ! Register(3, a3.testActor)
broadcast ! Unregister(2)
val helloSome = Message(Source.single("HelloSome"), Some(1))
broadcast ! helloSome
a1.expectNoMsg(NoMsgTimeout)
a2.expectNoMsg(NoMsgTimeout)
a3.expectMsg(helloSome)
}
}
override protected def afterAll(): Unit = {
system.shutdown()
super.afterAll()
}
}
| georgenicoll/monkey-barrel | web/src/test/scala/org/monkeynuthead/monkeybarrel/web/BroadcastMessagesActorSpec.scala | Scala | gpl-2.0 | 3,002 |
package pl.bitgrind.messages
import Messages._
object MessageValidation {
import scalaz._
import scalaz.Scalaz._
type StringValidation[T] = ValidationNel[String, T]
private def validUserId(userId: Int): StringValidation[Int] =
if (userId <= 0) "INVALID_USER_ID".failureNel
else userId.successNel
private def validContent(content: String): StringValidation[String] =
if (content.length < 3 || content.length > 200) "CONTENT_TOO_SHORT".failureNel
else content.successNel
def validate[T <: BaseMessage](message: T): StringValidation[T] = {
(validUserId(message.toUser)
|@| validUserId(message.fromUser)
|@| validContent(message.content))((_, _, _) => message)
}
}
| oegnus/scalaMicroservice | src/main/scala/pl/bitgrind/messages/MessageValidation.scala | Scala | mit | 713 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import java.lang.Thread.UncaughtExceptionHandler
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.language.experimental.macros
import scala.reflect.ClassTag
import scala.util.Random
import scala.util.control.NonFatal
import org.scalatest.{Assertions, BeforeAndAfterAll}
import org.scalatest.concurrent.{Eventually, Signaler, ThreadSignaler, TimeLimits}
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.exceptions.TestFailedDueToTimeoutException
import org.scalatest.time.Span
import org.scalatest.time.SpanSugar._
import org.apache.spark.SparkEnv
import org.apache.spark.sql.{Dataset, Encoder, QueryTest, Row}
import org.apache.spark.sql.catalyst.encoders.{encoderFor, ExpressionEncoder, RowEncoder}
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.plans.physical.AllTuples
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.execution.datasources.v2.StreamingDataSourceV2Relation
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.execution.streaming.continuous.{ContinuousExecution, EpochCoordinatorRef, IncrementAndGetEpoch}
import org.apache.spark.sql.execution.streaming.sources.MemorySinkV2
import org.apache.spark.sql.execution.streaming.state.StateStore
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources.v2.DataSourceOptions
import org.apache.spark.sql.streaming.StreamingQueryListener._
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.util.{Clock, SystemClock, Utils}
/**
* A framework for implementing tests for streaming queries and sources.
*
* A test consists of a set of steps (expressed as a `StreamAction`) that are executed in order,
* blocking as necessary to let the stream catch up. For example, the following adds some data to
* a stream, blocking until it can verify that the correct values are eventually produced.
*
* {{{
* val inputData = MemoryStream[Int]
* val mapped = inputData.toDS().map(_ + 1)
*
* testStream(mapped)(
* AddData(inputData, 1, 2, 3),
* CheckAnswer(2, 3, 4))
* }}}
*
* Note that while we do sleep to allow the other thread to progress without spinning,
* `StreamAction` checks should not depend on the amount of time spent sleeping. Instead they
* should check the actual progress of the stream before verifying the required test condition.
*
* Currently it is assumed that all streaming queries will eventually complete in 10 seconds to
* avoid hanging forever in the case of failures. However, individual suites can change this
* by overriding `streamingTimeout`.
*/
trait StreamTest extends QueryTest with SharedSQLContext with TimeLimits with BeforeAndAfterAll {
// Necessary to make ScalaTest 3.x interrupt a thread on the JVM like ScalaTest 2.2.x
implicit val defaultSignaler: Signaler = ThreadSignaler
override def afterAll(): Unit = {
super.afterAll()
StateStore.stop() // stop the state store maintenance thread and unload store providers
}
protected val defaultTrigger = Trigger.ProcessingTime(0)
protected val defaultUseV2Sink = false
/** How long to wait for an active stream to catch up when checking a result. */
val streamingTimeout = 10.seconds
/** A trait for actions that can be performed while testing a streaming DataFrame. */
trait StreamAction
/** A trait to mark actions that require the stream to be actively running. */
trait StreamMustBeRunning
/**
* Adds the given data to the stream. Subsequent check answers will block until this data has
* been processed.
*/
object AddData {
def apply[A](source: MemoryStreamBase[A], data: A*): AddDataMemory[A] =
AddDataMemory(source, data)
}
/**
* Adds data to multiple memory streams such that all the data will be made visible in the
* same batch. This is applicable only to MicroBatchExecution, as this coordination cannot be
* performed at the driver in ContinuousExecutions.
*/
object MultiAddData {
def apply[A]
(source1: MemoryStream[A], data1: A*)(source2: MemoryStream[A], data2: A*): StreamAction = {
val actions = Seq(AddDataMemory(source1, data1), AddDataMemory(source2, data2))
StreamProgressLockedActions(actions, desc = actions.mkString("[ ", " | ", " ]"))
}
}
/** A trait that can be extended when testing a source. */
trait AddData extends StreamAction {
/**
* Called to adding the data to a source. It should find the source to add data to from
* the active query, and then return the source object the data was added, as well as the
* offset of added data.
*/
def addData(query: Option[StreamExecution]): (BaseStreamingSource, Offset)
}
/** A trait that can be extended when testing a source. */
trait ExternalAction extends StreamAction {
def runAction(): Unit
}
case class AddDataMemory[A](source: MemoryStreamBase[A], data: Seq[A]) extends AddData {
override def toString: String = s"AddData to $source: ${data.mkString(",")}"
override def addData(query: Option[StreamExecution]): (BaseStreamingSource, Offset) = {
(source, source.addData(data))
}
}
/**
* Checks to make sure that the current data stored in the sink matches the `expectedAnswer`.
* This operation automatically blocks until all added data has been processed.
*/
object CheckAnswer {
def apply[A : Encoder](data: A*): CheckAnswerRows = {
val encoder = encoderFor[A]
val toExternalRow = RowEncoder(encoder.schema).resolveAndBind()
CheckAnswerRows(
data.map(d => toExternalRow.fromRow(encoder.toRow(d))),
lastOnly = false,
isSorted = false)
}
def apply(rows: Row*): CheckAnswerRows = CheckAnswerRows(rows, false, false)
def apply(globalCheckFunction: Seq[Row] => Unit): CheckAnswerRowsByFunc =
CheckAnswerRowsByFunc(globalCheckFunction, false)
}
/**
* Checks to make sure that the current data stored in the sink matches the `expectedAnswer`.
* This operation automatically blocks until all added data has been processed.
*/
object CheckLastBatch {
def apply[A : Encoder](data: A*): CheckAnswerRows = {
apply(isSorted = false, data: _*)
}
def apply[A: Encoder](isSorted: Boolean, data: A*): CheckAnswerRows = {
val encoder = encoderFor[A]
val toExternalRow = RowEncoder(encoder.schema).resolveAndBind()
CheckAnswerRows(
data.map(d => toExternalRow.fromRow(encoder.toRow(d))),
lastOnly = true,
isSorted = isSorted)
}
def apply(rows: Row*): CheckAnswerRows = CheckAnswerRows(rows, true, false)
def apply(globalCheckFunction: Seq[Row] => Unit): CheckAnswerRowsByFunc =
CheckAnswerRowsByFunc(globalCheckFunction, true)
}
case class CheckAnswerRows(expectedAnswer: Seq[Row], lastOnly: Boolean, isSorted: Boolean)
extends StreamAction with StreamMustBeRunning {
override def toString: String = s"$operatorName: ${expectedAnswer.mkString(",")}"
private def operatorName = if (lastOnly) "CheckLastBatch" else "CheckAnswer"
}
case class CheckAnswerRowsContains(expectedAnswer: Seq[Row], lastOnly: Boolean = false)
extends StreamAction with StreamMustBeRunning {
override def toString: String = s"$operatorName: ${expectedAnswer.mkString(",")}"
private def operatorName = if (lastOnly) "CheckLastBatchContains" else "CheckAnswerContains"
}
case class CheckAnswerRowsByFunc(
globalCheckFunction: Seq[Row] => Unit,
lastOnly: Boolean) extends StreamAction with StreamMustBeRunning {
override def toString: String = if (lastOnly) "CheckLastBatchByFunc" else "CheckAnswerByFunc"
}
case class CheckNewAnswerRows(expectedAnswer: Seq[Row])
extends StreamAction with StreamMustBeRunning {
override def toString: String = s"CheckNewAnswer: ${expectedAnswer.mkString(",")}"
}
object CheckNewAnswer {
def apply(): CheckNewAnswerRows = CheckNewAnswerRows(Seq.empty)
def apply[A: Encoder](data: A, moreData: A*): CheckNewAnswerRows = {
val encoder = encoderFor[A]
val toExternalRow = RowEncoder(encoder.schema).resolveAndBind()
CheckNewAnswerRows((data +: moreData).map(d => toExternalRow.fromRow(encoder.toRow(d))))
}
def apply(rows: Row*): CheckNewAnswerRows = CheckNewAnswerRows(rows)
}
/** Stops the stream. It must currently be running. */
case object StopStream extends StreamAction with StreamMustBeRunning
/** Starts the stream, resuming if data has already been processed. It must not be running. */
case class StartStream(
trigger: Trigger = defaultTrigger,
triggerClock: Clock = new SystemClock,
additionalConfs: Map[String, String] = Map.empty,
checkpointLocation: String = null)
extends StreamAction
/** Advance the trigger clock's time manually. */
case class AdvanceManualClock(timeToAdd: Long) extends StreamAction
/**
* Signals that a failure is expected and should not kill the test.
*
* @param isFatalError if this is a fatal error. If so, the error should also be caught by
* UncaughtExceptionHandler.
* @param assertFailure a function to verify the error.
*/
case class ExpectFailure[T <: Throwable : ClassTag](
assertFailure: Throwable => Unit = _ => {},
isFatalError: Boolean = false) extends StreamAction {
val causeClass: Class[T] = implicitly[ClassTag[T]].runtimeClass.asInstanceOf[Class[T]]
override def toString(): String =
s"ExpectFailure[${causeClass.getName}, isFatalError: $isFatalError]"
}
/**
* Performs multiple actions while locking the stream from progressing.
* This is applicable only to MicroBatchExecution, as progress of ContinuousExecution
* cannot be controlled from the driver.
*/
case class StreamProgressLockedActions(actions: Seq[StreamAction], desc: String = null)
extends StreamAction {
override def toString(): String = {
if (desc != null) desc else super.toString
}
}
/** Assert that a body is true */
class Assert(condition: => Boolean, val message: String = "") extends StreamAction {
def run(): Unit = { Assertions.assert(condition) }
override def toString: String = s"Assert(<condition>, $message)"
}
object Assert {
def apply(condition: => Boolean, message: String = ""): Assert = new Assert(condition, message)
def apply(message: String)(body: => Unit): Assert = new Assert( { body; true }, message)
def apply(body: => Unit): Assert = new Assert( { body; true }, "")
}
/** Assert that a condition on the active query is true */
class AssertOnQuery(val condition: StreamExecution => Boolean, val message: String)
extends StreamAction {
override def toString: String = s"AssertOnQuery(<condition>, $message)"
}
object AssertOnQuery {
def apply(condition: StreamExecution => Boolean, message: String = ""): AssertOnQuery = {
new AssertOnQuery(condition, message)
}
def apply(message: String)(condition: StreamExecution => Boolean): AssertOnQuery = {
new AssertOnQuery(condition, message)
}
}
/** Execute arbitrary code */
object Execute {
def apply(func: StreamExecution => Any): AssertOnQuery =
AssertOnQuery(query => { func(query); true }, "Execute")
}
object AwaitEpoch {
def apply(epoch: Long): AssertOnQuery =
Execute {
case s: ContinuousExecution => s.awaitEpoch(epoch)
case _ => throw new IllegalStateException("microbatch cannot await epoch")
}
}
object IncrementEpoch {
def apply(): AssertOnQuery =
Execute {
case s: ContinuousExecution =>
val newEpoch = EpochCoordinatorRef.get(s.currentEpochCoordinatorId, SparkEnv.get)
.askSync[Long](IncrementAndGetEpoch)
s.awaitEpoch(newEpoch - 1)
case _ => throw new IllegalStateException("microbatch cannot increment epoch")
}
}
/**
* Executes the specified actions on the given streaming DataFrame and provides helpful
* error messages in the case of failures or incorrect answers.
*
* Note that if the stream is not explicitly started before an action that requires it to be
* running then it will be automatically started before performing any other actions.
*/
def testStream(
_stream: Dataset[_],
outputMode: OutputMode = OutputMode.Append,
useV2Sink: Boolean = defaultUseV2Sink)(actions: StreamAction*): Unit = synchronized {
import org.apache.spark.sql.streaming.util.StreamManualClock
// `synchronized` is added to prevent the user from calling multiple `testStream`s concurrently
// because this method assumes there is only one active query in its `StreamingQueryListener`
// and it may not work correctly when multiple `testStream`s run concurrently.
val stream = _stream.toDF()
val sparkSession = stream.sparkSession // use the session in DF, not the default session
var pos = 0
var currentStream: StreamExecution = null
var lastStream: StreamExecution = null
val awaiting = new mutable.HashMap[Int, Offset]() // source index -> offset to wait for
val sink = if (useV2Sink) new MemorySinkV2
else new MemorySink(stream.schema, outputMode, DataSourceOptions.empty())
val resetConfValues = mutable.Map[String, Option[String]]()
val defaultCheckpointLocation =
Utils.createTempDir(namePrefix = "streaming.metadata").getCanonicalPath
var manualClockExpectedTime = -1L
@volatile
var streamThreadDeathCause: Throwable = null
// Set UncaughtExceptionHandler in `onQueryStarted` so that we can ensure catching fatal errors
// during query initialization.
val listener = new StreamingQueryListener {
override def onQueryStarted(event: QueryStartedEvent): Unit = {
// Note: this assumes there is only one query active in the `testStream` method.
Thread.currentThread.setUncaughtExceptionHandler(new UncaughtExceptionHandler {
override def uncaughtException(t: Thread, e: Throwable): Unit = {
streamThreadDeathCause = e
}
})
}
override def onQueryProgress(event: QueryProgressEvent): Unit = {}
override def onQueryTerminated(event: QueryTerminatedEvent): Unit = {}
}
sparkSession.streams.addListener(listener)
// If the test doesn't manually start the stream, we do it automatically at the beginning.
val startedManually =
actions.takeWhile(!_.isInstanceOf[StreamMustBeRunning]).exists(_.isInstanceOf[StartStream])
val startedTest = if (startedManually) actions else StartStream() +: actions
def testActions = actions.zipWithIndex.map {
case (a, i) =>
if ((pos == i && startedManually) || (pos == (i + 1) && !startedManually)) {
"=> " + a.toString
} else {
" " + a.toString
}
}.mkString("\\n")
def currentOffsets =
if (currentStream != null) currentStream.committedOffsets.toString else "not started"
def threadState =
if (currentStream != null && currentStream.queryExecutionThread.isAlive) "alive" else "dead"
def threadStackTrace =
if (currentStream != null && currentStream.queryExecutionThread.isAlive) {
s"Thread stack trace: ${currentStream.queryExecutionThread.getStackTrace.mkString("\\n")}"
} else {
""
}
def testState = {
val sinkDebugString = sink match {
case s: MemorySink => s.toDebugString
case s: MemorySinkV2 => s.toDebugString
}
s"""
|== Progress ==
|$testActions
|
|== Stream ==
|Output Mode: $outputMode
|Stream state: $currentOffsets
|Thread state: $threadState
|$threadStackTrace
|${if (streamThreadDeathCause != null) stackTraceToString(streamThreadDeathCause) else ""}
|
|== Sink ==
|$sinkDebugString
|
|
|== Plan ==
|${if (currentStream != null) currentStream.lastExecution else ""}
""".stripMargin
}
def verify(condition: => Boolean, message: String): Unit = {
if (!condition) {
failTest(message)
}
}
def eventually[T](message: String)(func: => T): T = {
try {
Eventually.eventually(Timeout(streamingTimeout)) {
func
}
} catch {
case NonFatal(e) =>
failTest(message, e)
}
}
def failTest(message: String, cause: Throwable = null) = {
// Recursively pretty print a exception with truncated stacktrace and internal cause
def exceptionToString(e: Throwable, prefix: String = ""): String = {
val base = s"$prefix${e.getMessage}" +
e.getStackTrace.take(10).mkString(s"\\n$prefix", s"\\n$prefix\\t", "\\n")
if (e.getCause != null) {
base + s"\\n$prefix\\tCaused by: " + exceptionToString(e.getCause, s"$prefix\\t")
} else {
base
}
}
val c = Option(cause).map(exceptionToString(_))
val m = if (message != null && message.size > 0) Some(message) else None
fail(
s"""
|${(m ++ c).mkString(": ")}
|$testState
""".stripMargin)
}
var lastFetchedMemorySinkLastBatchId: Long = -1
def fetchStreamAnswer(
currentStream: StreamExecution,
lastOnly: Boolean = false,
sinceLastFetchOnly: Boolean = false) = {
verify(
!(lastOnly && sinceLastFetchOnly), "both lastOnly and sinceLastFetchOnly cannot be true")
verify(currentStream != null, "stream not running")
// Block until all data added has been processed for all the source
awaiting.foreach { case (sourceIndex, offset) =>
failAfter(streamingTimeout) {
currentStream.awaitOffset(sourceIndex, offset)
// Make sure all processing including no-data-batches have been executed
if (!currentStream.triggerClock.isInstanceOf[StreamManualClock]) {
currentStream.processAllAvailable()
}
}
}
val lastExecution = currentStream.lastExecution
if (currentStream.isInstanceOf[MicroBatchExecution] && lastExecution != null) {
// Verify if stateful operators have correct metadata and distribution
// This can often catch hard to debug errors when developing stateful operators
lastExecution.executedPlan.collect { case s: StatefulOperator => s }.foreach { s =>
assert(s.stateInfo.map(_.numPartitions).contains(lastExecution.numStateStores))
s.requiredChildDistribution.foreach { d =>
withClue(s"$s specifies incorrect # partitions in requiredChildDistribution $d") {
assert(d.requiredNumPartitions.isDefined)
assert(d.requiredNumPartitions.get >= 1)
if (d != AllTuples) {
assert(d.requiredNumPartitions.get == s.stateInfo.get.numPartitions)
}
}
}
}
}
val rows = try {
if (sinceLastFetchOnly) {
if (sink.latestBatchId.getOrElse(-1L) < lastFetchedMemorySinkLastBatchId) {
failTest("MemorySink was probably cleared since last fetch. Use CheckAnswer instead.")
}
sink.dataSinceBatch(lastFetchedMemorySinkLastBatchId)
} else {
if (lastOnly) sink.latestBatchData else sink.allData
}
} catch {
case e: Exception =>
failTest("Exception while getting data from sink", e)
}
lastFetchedMemorySinkLastBatchId = sink.latestBatchId.getOrElse(-1L)
rows
}
def executeAction(action: StreamAction): Unit = {
logInfo(s"Processing test stream action: $action")
action match {
case StartStream(trigger, triggerClock, additionalConfs, checkpointLocation) =>
verify(currentStream == null, "stream already running")
verify(triggerClock.isInstanceOf[SystemClock]
|| triggerClock.isInstanceOf[StreamManualClock],
"Use either SystemClock or StreamManualClock to start the stream")
if (triggerClock.isInstanceOf[StreamManualClock]) {
manualClockExpectedTime = triggerClock.asInstanceOf[StreamManualClock].getTimeMillis()
}
val metadataRoot = Option(checkpointLocation).getOrElse(defaultCheckpointLocation)
additionalConfs.foreach(pair => {
val value =
if (sparkSession.conf.contains(pair._1)) {
Some(sparkSession.conf.get(pair._1))
} else None
resetConfValues(pair._1) = value
sparkSession.conf.set(pair._1, pair._2)
})
lastStream = currentStream
currentStream =
sparkSession
.streams
.startQuery(
None,
Some(metadataRoot),
stream,
Map(),
sink,
outputMode,
trigger = trigger,
triggerClock = triggerClock)
.asInstanceOf[StreamingQueryWrapper]
.streamingQuery
// Wait until the initialization finishes, because some tests need to use `logicalPlan`
// after starting the query.
try {
currentStream.awaitInitialization(streamingTimeout.toMillis)
currentStream match {
case s: ContinuousExecution => eventually("IncrementalExecution was not created") {
assert(s.lastExecution != null)
}
case _ =>
}
} catch {
case _: StreamingQueryException =>
// Ignore the exception. `StopStream` or `ExpectFailure` will catch it as well.
}
case AdvanceManualClock(timeToAdd) =>
verify(currentStream != null,
"can not advance manual clock when a stream is not running")
verify(currentStream.triggerClock.isInstanceOf[StreamManualClock],
s"can not advance clock of type ${currentStream.triggerClock.getClass}")
val clock = currentStream.triggerClock.asInstanceOf[StreamManualClock]
assert(manualClockExpectedTime >= 0)
// Make sure we don't advance ManualClock too early. See SPARK-16002.
eventually("StreamManualClock has not yet entered the waiting state") {
assert(clock.isStreamWaitingAt(manualClockExpectedTime))
}
clock.advance(timeToAdd)
manualClockExpectedTime += timeToAdd
verify(clock.getTimeMillis() === manualClockExpectedTime,
s"Unexpected clock time after updating: " +
s"expecting $manualClockExpectedTime, current ${clock.getTimeMillis()}")
case StopStream =>
verify(currentStream != null, "can not stop a stream that is not running")
try failAfter(streamingTimeout) {
currentStream.stop()
verify(!currentStream.queryExecutionThread.isAlive,
s"microbatch thread not stopped")
verify(!currentStream.isActive,
"query.isActive() is false even after stopping")
verify(currentStream.exception.isEmpty,
s"query.exception() is not empty after clean stop: " +
currentStream.exception.map(_.toString()).getOrElse(""))
} catch {
case _: InterruptedException =>
case e: org.scalatest.exceptions.TestFailedDueToTimeoutException =>
failTest(
"Timed out while stopping and waiting for microbatchthread to terminate.", e)
case t: Throwable =>
failTest("Error while stopping stream", t)
} finally {
lastStream = currentStream
currentStream = null
}
case ef: ExpectFailure[_] =>
verify(currentStream != null, "can not expect failure when stream is not running")
try failAfter(streamingTimeout) {
val thrownException = intercept[StreamingQueryException] {
currentStream.awaitTermination()
}
eventually("microbatch thread not stopped after termination with failure") {
assert(!currentStream.queryExecutionThread.isAlive)
}
verify(currentStream.exception === Some(thrownException),
s"incorrect exception returned by query.exception()")
val exception = currentStream.exception.get
verify(exception.cause.getClass === ef.causeClass,
"incorrect cause in exception returned by query.exception()\\n" +
s"\\tExpected: ${ef.causeClass}\\n\\tReturned: ${exception.cause.getClass}")
if (ef.isFatalError) {
// This is a fatal error, `streamThreadDeathCause` should be set to this error in
// UncaughtExceptionHandler.
verify(streamThreadDeathCause != null &&
streamThreadDeathCause.getClass === ef.causeClass,
"UncaughtExceptionHandler didn't receive the correct error\\n" +
s"\\tExpected: ${ef.causeClass}\\n\\tReturned: $streamThreadDeathCause")
streamThreadDeathCause = null
}
ef.assertFailure(exception.getCause)
} catch {
case _: InterruptedException =>
case e: org.scalatest.exceptions.TestFailedDueToTimeoutException =>
failTest("Timed out while waiting for failure", e)
case t: Throwable =>
failTest("Error while checking stream failure", t)
} finally {
lastStream = currentStream
currentStream = null
}
case a: AssertOnQuery =>
verify(currentStream != null || lastStream != null,
"cannot assert when no stream has been started")
val streamToAssert = Option(currentStream).getOrElse(lastStream)
try {
verify(a.condition(streamToAssert), s"Assert on query failed: ${a.message}")
} catch {
case NonFatal(e) =>
failTest(s"Assert on query failed: ${a.message}", e)
}
case a: Assert =>
val streamToAssert = Option(currentStream).getOrElse(lastStream)
verify({ a.run(); true }, s"Assert failed: ${a.message}")
case a: AddData =>
try {
// If the query is running with manual clock, then wait for the stream execution
// thread to start waiting for the clock to increment. This is needed so that we
// are adding data when there is no trigger that is active. This would ensure that
// the data gets deterministically added to the next batch triggered after the manual
// clock is incremented in following AdvanceManualClock. This avoid race conditions
// between the test thread and the stream execution thread in tests using manual
// clock.
if (currentStream != null &&
currentStream.triggerClock.isInstanceOf[StreamManualClock]) {
val clock = currentStream.triggerClock.asInstanceOf[StreamManualClock]
eventually("Error while synchronizing with manual clock before adding data") {
if (currentStream.isActive) {
assert(clock.isStreamWaitingAt(clock.getTimeMillis()))
}
}
if (!currentStream.isActive) {
failTest("Query terminated while synchronizing with manual clock")
}
}
// Add data
val queryToUse = Option(currentStream).orElse(Option(lastStream))
val (source, offset) = a.addData(queryToUse)
def findSourceIndex(plan: LogicalPlan): Option[Int] = {
plan
.collect {
case r: StreamingExecutionRelation => r.source
case r: StreamingDataSourceV2Relation => r.reader
}
.zipWithIndex
.find(_._1 == source)
.map(_._2)
}
// Try to find the index of the source to which data was added. Either get the index
// from the current active query or the original input logical plan.
val sourceIndex =
queryToUse.flatMap { query =>
findSourceIndex(query.logicalPlan)
}.orElse {
findSourceIndex(stream.logicalPlan)
}.orElse {
queryToUse.flatMap { q =>
findSourceIndex(q.lastExecution.logical)
}
}.getOrElse {
throw new IllegalArgumentException(
"Could not find index of the source to which data was added")
}
// Store the expected offset of added data to wait for it later
awaiting.put(sourceIndex, offset)
} catch {
case NonFatal(e) =>
failTest("Error adding data", e)
}
case e: ExternalAction =>
e.runAction()
case CheckAnswerRows(expectedAnswer, lastOnly, isSorted) =>
val sparkAnswer = fetchStreamAnswer(currentStream, lastOnly)
QueryTest.sameRows(expectedAnswer, sparkAnswer, isSorted).foreach {
error => failTest(error)
}
case CheckAnswerRowsContains(expectedAnswer, lastOnly) =>
val sparkAnswer = currentStream match {
case null => fetchStreamAnswer(lastStream, lastOnly)
case s => fetchStreamAnswer(s, lastOnly)
}
QueryTest.includesRows(expectedAnswer, sparkAnswer).foreach {
error => failTest(error)
}
case CheckAnswerRowsByFunc(globalCheckFunction, lastOnly) =>
val sparkAnswer = fetchStreamAnswer(currentStream, lastOnly)
try {
globalCheckFunction(sparkAnswer)
} catch {
case e: Throwable => failTest(e.toString)
}
case CheckNewAnswerRows(expectedAnswer) =>
val sparkAnswer = fetchStreamAnswer(currentStream, sinceLastFetchOnly = true)
QueryTest.sameRows(expectedAnswer, sparkAnswer).foreach {
error => failTest(error)
}
}
}
try {
startedTest.foreach {
case StreamProgressLockedActions(actns, _) =>
// Perform actions while holding the stream from progressing
assert(currentStream != null,
s"Cannot perform stream-progress-locked actions $actns when query is not active")
assert(currentStream.isInstanceOf[MicroBatchExecution],
s"Cannot perform stream-progress-locked actions on non-microbatch queries")
currentStream.asInstanceOf[MicroBatchExecution].withProgressLocked {
actns.foreach(executeAction)
}
pos += 1
case action: StreamAction =>
executeAction(action)
pos += 1
}
if (streamThreadDeathCause != null) {
failTest("Stream Thread Died", streamThreadDeathCause)
}
} catch {
case _: InterruptedException if streamThreadDeathCause != null =>
failTest("Stream Thread Died", streamThreadDeathCause)
case e: org.scalatest.exceptions.TestFailedDueToTimeoutException =>
failTest("Timed out waiting for stream", e)
} finally {
if (currentStream != null && currentStream.queryExecutionThread.isAlive) {
currentStream.stop()
}
// Rollback prev configuration values
resetConfValues.foreach {
case (key, Some(value)) => sparkSession.conf.set(key, value)
case (key, None) => sparkSession.conf.unset(key)
}
sparkSession.streams.removeListener(listener)
}
}
/**
* Creates a stress test that randomly starts/stops/adds data/checks the result.
*
* @param ds a dataframe that executes + 1 on a stream of integers, returning the result
* @param addData an add data action that adds the given numbers to the stream, encoding them
* as needed
* @param iterations the iteration number
*/
def runStressTest(
ds: Dataset[Int],
addData: Seq[Int] => StreamAction,
iterations: Int = 100): Unit = {
runStressTest(ds, Seq.empty, (data, running) => addData(data), iterations)
}
/**
* Creates a stress test that randomly starts/stops/adds data/checks the result.
*
* @param ds a dataframe that executes + 1 on a stream of integers, returning the result
* @param prepareActions actions need to run before starting the stress test.
* @param addData an add data action that adds the given numbers to the stream, encoding them
* as needed
* @param iterations the iteration number
*/
def runStressTest(
ds: Dataset[Int],
prepareActions: Seq[StreamAction],
addData: (Seq[Int], Boolean) => StreamAction,
iterations: Int): Unit = {
implicit val intEncoder = ExpressionEncoder[Int]()
var dataPos = 0
var running = true
val actions = new ArrayBuffer[StreamAction]()
actions ++= prepareActions
def addCheck() = { actions += CheckAnswer(1 to dataPos: _*) }
def addRandomData() = {
val numItems = Random.nextInt(10)
val data = dataPos until (dataPos + numItems)
dataPos += numItems
actions += addData(data, running)
}
(1 to iterations).foreach { i =>
val rand = Random.nextDouble()
if(!running) {
rand match {
case r if r < 0.7 => // AddData
addRandomData()
case _ => // StartStream
actions += StartStream()
running = true
}
} else {
rand match {
case r if r < 0.1 =>
addCheck()
case r if r < 0.7 => // AddData
addRandomData()
case _ => // StopStream
addCheck()
actions += StopStream
running = false
}
}
}
if(!running) { actions += StartStream() }
addCheck()
testStream(ds)(actions: _*)
}
object AwaitTerminationTester {
trait ExpectedBehavior
/** Expect awaitTermination to not be blocked */
case object ExpectNotBlocked extends ExpectedBehavior
/** Expect awaitTermination to get blocked */
case object ExpectBlocked extends ExpectedBehavior
/** Expect awaitTermination to throw an exception */
case class ExpectException[E <: Exception]()(implicit val t: ClassTag[E])
extends ExpectedBehavior
private val DEFAULT_TEST_TIMEOUT = 1.second
def test(
expectedBehavior: ExpectedBehavior,
awaitTermFunc: () => Unit,
testTimeout: Span = DEFAULT_TEST_TIMEOUT
): Unit = {
expectedBehavior match {
case ExpectNotBlocked =>
withClue("Got blocked when expected non-blocking.") {
failAfter(testTimeout) {
awaitTermFunc()
}
}
case ExpectBlocked =>
withClue("Was not blocked when expected.") {
intercept[TestFailedDueToTimeoutException] {
failAfter(testTimeout) {
awaitTermFunc()
}
}
}
case e: ExpectException[_] =>
val thrownException =
withClue(s"Did not throw ${e.t.runtimeClass.getSimpleName} when expected.") {
intercept[StreamingQueryException] {
failAfter(testTimeout) {
awaitTermFunc()
}
}
}
assert(thrownException.cause.getClass === e.t.runtimeClass,
"exception of incorrect type was throw")
}
}
}
}
| lxsmnv/spark | sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamTest.scala | Scala | apache-2.0 | 36,666 |
package com.typesafe.training.scalatrain
/**
* Created by admin on 7/28/14.
*/
import scala.collection.immutable.Seq
case class Train(info: TrainInfo, schedule: Seq[(Time, Station)]) {
require(schedule.size >= 2, "Schedule must contain at least 2 Station type values")
//TODO Verify that schedule is strictly increasing in time
//timeAndStation is the Tuple in the Seq and _2 is the shorthand way of referencing the second element in the Tuple
val stations: Seq[Station] = schedule map (timeAndStation => timeAndStation._2)
override def equals(that: Any): Boolean = that match {
case that: Train => this.info == that.info && this.schedule == that.schedule
case _ => false
}
}
| praneetloke/FastTrackToScala | src/main/scala/com/typesafe/training/scalatrain/Train.scala | Scala | mit | 712 |
package mesosphere.marathon.core.launcher.impl
import akka.pattern.AskTimeoutException
import mesosphere.marathon.core.base.Clock
import mesosphere.marathon.core.launcher.{ TaskOp, OfferProcessor, OfferProcessorConfig, TaskLauncher }
import mesosphere.marathon.core.matcher.base.OfferMatcher
import mesosphere.marathon.core.matcher.base.OfferMatcher.{ MatchedTaskOps, TaskOpWithSource }
import mesosphere.marathon.core.task.TaskStateOp
import mesosphere.marathon.core.task.tracker.TaskCreationHandler
import mesosphere.marathon.metrics.{ MetricPrefixes, Metrics }
import mesosphere.marathon.state.Timestamp
import org.apache.mesos.Protos.{ Offer, OfferID }
import org.slf4j.LoggerFactory
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.control.NonFatal
/**
* Passes processed offers to the offerMatcher and launches the appropriate tasks.
*/
private[launcher] class OfferProcessorImpl(
conf: OfferProcessorConfig, clock: Clock,
metrics: Metrics,
offerMatcher: OfferMatcher,
taskLauncher: TaskLauncher,
taskCreationHandler: TaskCreationHandler) extends OfferProcessor {
import scala.concurrent.ExecutionContext.Implicits.global
private[this] val log = LoggerFactory.getLogger(getClass)
private[this] val offerMatchingTimeout = conf.offerMatchingTimeout().millis
private[this] val saveTasksToLaunchTimeout = conf.saveTasksToLaunchTimeout().millis
private[this] val incomingOffersMeter =
metrics.meter(metrics.name(MetricPrefixes.SERVICE, getClass, "incomingOffers"))
private[this] val matchTimeMeter =
metrics.timer(metrics.name(MetricPrefixes.SERVICE, getClass, "matchTime"))
private[this] val matchErrorsMeter =
metrics.meter(metrics.name(MetricPrefixes.SERVICE, getClass, "matchErrors"))
private[this] val savingTasksTimeMeter =
metrics.timer(metrics.name(MetricPrefixes.SERVICE, getClass, "savingTasks"))
private[this] val savingTasksTimeoutMeter =
metrics.meter(metrics.name(MetricPrefixes.SERVICE, getClass, "savingTasksTimeouts"))
private[this] val savingTasksErrorMeter =
metrics.meter(metrics.name(MetricPrefixes.SERVICE, getClass, "savingTasksErrors"))
override def processOffer(offer: Offer): Future[Unit] = {
incomingOffersMeter.mark()
val matchingDeadline = clock.now() + offerMatchingTimeout
val savingDeadline = matchingDeadline + saveTasksToLaunchTimeout
val matchFuture: Future[MatchedTaskOps] = matchTimeMeter.timeFuture {
offerMatcher.matchOffer(matchingDeadline, offer)
}
matchFuture
.recover {
case e: AskTimeoutException =>
matchErrorsMeter.mark()
log.warn(s"Could not process offer '${offer.getId.getValue}' in time. (See --max_offer_matching_timeout)")
MatchedTaskOps(offer.getId, Seq.empty, resendThisOffer = true)
case NonFatal(e) =>
matchErrorsMeter.mark()
log.error(s"Could not process offer '${offer.getId.getValue}'", e)
MatchedTaskOps(offer.getId, Seq.empty, resendThisOffer = true)
}.flatMap {
case MatchedTaskOps(offerId, tasks, resendThisOffer) =>
savingTasksTimeMeter.timeFuture {
saveTasks(tasks, savingDeadline).map { savedTasks =>
def notAllSaved: Boolean = savedTasks.size != tasks.size
MatchedTaskOps(offerId, savedTasks, resendThisOffer || notAllSaved)
}
}
}.flatMap {
case MatchedTaskOps(offerId, Nil, resendThisOffer) => declineOffer(offerId, resendThisOffer)
case MatchedTaskOps(offerId, tasks, _) => acceptOffer(offerId, tasks)
}
}
private[this] def declineOffer(offerId: OfferID, resendThisOffer: Boolean): Future[Unit] = {
//if the offer should be resent, than we ignore the configured decline offer duration
val duration: Option[Long] = if (resendThisOffer) None else conf.declineOfferDuration.get
taskLauncher.declineOffer(offerId, duration)
Future.successful(())
}
private[this] def acceptOffer(offerId: OfferID, taskOpsWithSource: Seq[TaskOpWithSource]): Future[Unit] = {
if (taskLauncher.acceptOffer(offerId, taskOpsWithSource.map(_.op))) {
log.debug("Offer [{}]. Task launch successful", offerId.getValue)
taskOpsWithSource.foreach(_.accept())
Future.successful(())
}
else {
log.warn("Offer [{}]. Task launch rejected", offerId.getValue)
taskOpsWithSource.foreach(_.reject("driver unavailable"))
revertTaskOps(taskOpsWithSource.view.map(_.op))
}
}
/** Revert the effects of the task ops on the task state. */
private[this] def revertTaskOps(ops: Iterable[TaskOp]): Future[Unit] = {
ops.foldLeft(Future.successful(())) { (terminatedFuture, nextOp) =>
terminatedFuture.flatMap { _ =>
nextOp.oldTask match {
case Some(existingTask) =>
taskCreationHandler.created(TaskStateOp.Revert(existingTask)).map(_ => ())
case None =>
taskCreationHandler.terminated(TaskStateOp.ForceExpunge(nextOp.taskId)).map(_ => ())
}
}
}.recover {
case NonFatal(e) =>
throw new RuntimeException("while reverting task ops", e)
}
}
/**
* Saves the given tasks sequentially, evaluating before each save whether the given deadline has been reached
* already.
*/
private[this] def saveTasks(ops: Seq[TaskOpWithSource], savingDeadline: Timestamp): Future[Seq[TaskOpWithSource]] = {
def saveTask(taskOpWithSource: TaskOpWithSource): Future[Option[TaskOpWithSource]] = {
val taskId = taskOpWithSource.taskId
log.info("Persisting TaskStateOp for [{}]", taskOpWithSource.taskId)
taskCreationHandler
.created(taskOpWithSource.op.stateOp)
.map(_ => Some(taskOpWithSource))
.recoverWith {
case NonFatal(e) =>
savingTasksErrorMeter.mark()
taskOpWithSource.reject(s"storage error: $e")
log.warn(s"error while storing task $taskId for app [${taskId.appId}]", e)
revertTaskOps(Some(taskOpWithSource.op))
}.map {
case Some(savedTask) => Some(taskOpWithSource)
case None => None
}
}
ops.foldLeft(Future.successful(Vector.empty[TaskOpWithSource])) { (savedTasksFuture, nextTask) =>
savedTasksFuture.flatMap { savedTasks =>
if (clock.now() > savingDeadline) {
savingTasksTimeoutMeter.mark(savedTasks.size.toLong)
nextTask.reject("saving timeout reached")
log.info(
s"Timeout reached, skipping launch and save for ${nextTask.op.taskId}. " +
s"You can reconfigure this with --${conf.saveTasksToLaunchTimeout.name}.")
Future.successful(savedTasks)
}
else {
val saveTaskFuture = saveTask(nextTask)
saveTaskFuture.map(task => savedTasks ++ task)
}
}
}
}
}
| vivekjuneja/marathon | src/main/scala/mesosphere/marathon/core/launcher/impl/OfferProcessorImpl.scala | Scala | apache-2.0 | 6,906 |
package util.uima
import org.apache.uima.jcas.cas.FloatArray
/**
* @author K.Sakamoto
* Created on 2016/09/25
*/
object FloatArrayUtils {
implicit def floatArrayToFloatArrayUtils(repr: FloatArray): FloatArrayUtils = {
new FloatArrayUtils(repr)
}
}
/**
* @author K.Sakamoto
* @param repr float array
*/
class FloatArrayUtils(repr: FloatArray) {
def toSeq: Seq[Float] = {
repr.toArray.toSeq
}
}
| ktr-skmt/FelisCatusZero | src/main/scala/util/uima/FloatArrayUtils.scala | Scala | apache-2.0 | 432 |
package lila.coordinate
import com.typesafe.config.Config
import lila.common.PimpedConfig._
final class Env(
config: Config,
db: lila.db.Env) {
private val CollectionScore = config getString "collection.score"
lazy val api = new CoordinateApi(scoreColl = scoreColl)
lazy val forms = DataForm
private[coordinate] lazy val scoreColl = db(CollectionScore)
}
object Env {
lazy val current: Env = "coordinate" boot new Env(
config = lila.common.PlayApp loadConfig "coordinate",
db = lila.db.Env.current)
}
| r0k3/lila | modules/coordinate/src/main/Env.scala | Scala | mit | 536 |
/*
* This file is part of the changelogit project.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gnieh.changelogit
import scopt.immutable.OptionParser
import org.fusesource.scalate._
import org.fusesource.scalate.support._
import org.fusesource.scalate.util._
import java.io._
/** @author Lucas Satabin
*
*/
object ChangeloGit extends App {
val parser = new OptionParser[Options]("changelogit", true) {
def options = Seq(
help("h", "help", "displays this help"),
opt("p", "project-name", "sets the project name") {
(s, o) => o.copy(projectName = s)
},
opt("u", "tickets-url", "sets the base URL for browsing tickets") {
(s, o) => o.copy(ticketUrl = Option(s))
},
opt("o", "output-file", "specifies the output file (default prints in the console") {
(s, o) => o.copy(outputFile = Option(s))
},
opt("f", "output-format", "specifies the output format (html, dpkg, markdown)") {
(s, o) => o.copy(format = s)
},
opt("s", "since", "the initial commit in the range or none to analyze everything since the beginning of times (default is none)") {
(s, o) => o.copy(since = Option(s))
},
opt("u", "until", "the final commit in the range (default is HEAD)") {
(s, o) => o.copy(until = s)
},
opt("b", "branch", "the branch to checkout (default is master)") {
(s, o) => o.copy(branch = s)
},
opt("d", "template-dir", "the directory in which user defined templates are located (default is current directory)") {
(s, o) => o.copy(templateDir = s)
},
argOpt("<git-repository>", "specifies the git repository") {
(s, o) => o.copy(directory = s)
})
}
for (options <- parser.parse(args, Options())) {
val analyzer = new LogAnalyzer(options)
val model = analyzer.analyze
val writer = options.outputFile match {
case Some(f) => new FileWriter(f)
case None => new PrintWriter(System.out)
}
val engine = new TemplateEngine
// short life-cycle, no need to check for template changes
engine.allowReload = false
engine.allowCaching = false
engine.templateDirectories = List(options.templateDir)
// the resource loader try first in the template dir, and then in the classpath
engine.resourceLoader = new FileResourceLoader {
override def resource(uri: String) = super.resource(uri) match {
case None =>
// if it was not found on file system, try in the classpath
Option(getClass.getResource(uri)).map(Resource.fromURL _)
case res => res
}
}
try {
writer.write(engine.layout(options.format + ".mustache", model))
} finally {
writer.close
// XXX horrible but necessary hack to shutdown the compiler at the end
// see https://www.assembla.com/spaces/scalate/support/tickets/281
engine.compiler.asInstanceOf[ScalaCompiler].compiler.askShutdown
}
}
}
case class Options(projectName: String = "<project>",
ticketUrl: Option[String] = None,
outputFile: Option[String] = None,
format: String = "markdown",
directory: String = ".",
since: Option[String] = None,
until: String = "HEAD",
branch: String = "master",
templateDir: String = ".") | gnieh/changelogit | src/main/scala/gnieh/changelogit/ChangeloGit.scala | Scala | apache-2.0 | 3,933 |
package akka.http.extensions.stubs
import akka.http.extensions.security._
import akka.http.extensions.utils.BiMap
import com.github.t3hnar.bcrypt._
import scala.concurrent.Future
/**
* Inmemory login and registration controller, recommended mostly for testing
*/
class InMemoryLoginController extends FutureLoginController {
protected var usersByName:Map[String,LoginInfo] = Map.empty
protected var usersByEmail:Map[String,LoginInfo] = Map.empty
def addUser(user:LoginInfo,computeHash:Boolean = true): LoginInfo =if(computeHash)
addUser(this.withHash(user),false)
else
{
usersByName =usersByName + (user.username -> user)
usersByEmail =usersByEmail + (user.email -> user)
user
}
def removeUser(user:LoginInfo) = {
usersByName =usersByName - user.username
usersByEmail =usersByEmail - user.email
this
}
def isValidEmail(email: String): Boolean = """(\\w+)@([\\w\\.]+)""".r.unapplySeq(email).isDefined
override def loginByName(username: String, password: String): Future[LoginResult] = usersByName.get(username) match {
case None=> Future.successful(UserDoesNotExist(username))
case Some(user)=>
Future.successful(if(password.isBcrypted(user.password)) LoggedIn(user) else PasswordDoesNotMuch(username,password))
}
override def loginByEmail(email: String, password: String): Future[LoginResult] = usersByEmail.get(email) match {
case None=> Future.successful(EmailDoesNotExist(email))
case Some(user)=>
Future.successful(if(password.isBcrypted(user.password)) LoggedIn(user) else PasswordDoesNotMuch(email,password))
}
def exists(user:LoginInfo) = usersByName.values.exists(_==user)
def exists(username:String) = usersByName.contains(username)
def clean() = {
usersByName = Map.empty
usersByEmail = Map.empty
}
override def register(username: String, password: String, email: String): Future[RegistrationResult] = {
val registerInfo = LoginInfo(username, password, email)
usersByName.get(username) match {
case Some(existed) => Future.successful(UserAlreadyExists(existed))
case None if password.length < 4 => Future.successful(BadPassword(registerInfo, "password is too short"))
case None if password == username => Future.successful(BadPassword(registerInfo, "password and username cannot be same"))
case None if !this.isValidEmail(email) => Future.successful(BadEmail(registerInfo,s"$email is not an email!"))
case None =>
Future.successful(UserRegistered(addUser(registerInfo)))
}
}
} | denigma/akka-http-extensions | extensions/src/main/scala/akka/http/extensions/stubs/InMemoryLoginController.scala | Scala | mpl-2.0 | 2,555 |
package lightning.evaluator.json
import io.shaka.http.Http.http
import io.shaka.http.Request.GET
import io.shaka.http.Status
import lightning.model.SystemName
import lightning.model.Node
case class HttpGraphStatusEvaluator(url: String, targetNode: Node) extends JsonStatusEvaluator {
def apply() = {
val response = http(GET(url))
if (response.status == Status.OK)
SuccessfulStatusResponse(url, targetNode, response.entityAsString)
else
UnsuccessfulStatusResponse(url, targetNode)
}
} | lancewalton/lightning | evaluator/src/main/scala/lightning/evaluator/json/HttpGraphStatusEvaluator.scala | Scala | mit | 514 |
/**
* Copyright 2009 Jorge Ortiz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**/
package org.scala_tools.time
import org.joda.time._
class RichReadableInterval(underlying: ReadableInterval) {
def chronology: Chronology =
underlying.getChronology
def end: DateTime =
underlying.getEnd
def start: DateTime =
underlying.getStart
def duration: Duration =
underlying.toDuration
def millis: Long =
underlying.toDuration.getMillis
// TODO: Should > and > be added as aliases for isAfter and isBefore?
// could be convenient, or just confusing because this isn't Ordered.
}
| jorgeortiz85/scala-time | src/main/scala/org/scala_tools/time/RichReadableInterval.scala | Scala | apache-2.0 | 1,123 |
/*
* Copyright (C) 2013 Tactix4
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.tactix4.t4openerp.connector
import com.tactix4.t4openerp.connector.codecs.GeneratedEncodeOE._
import scala.language.implicitConversions
/**
* Class to hold the context data used by OpenERP to adjust times and languages as well as to hold other
* arbitrary data in a map
*
* @author max@tactix4.com
* 5/20/13
*/
case class OEContext(activeTest: Boolean = true, lang: String = "en_GB", timezone: String = "Europe/London")
/**
* Companion object providing an implicit instance of a TransportDataConverter[OpenERPContext]
*/
object OEContext{
implicit val oeContextEncoder = encode3M((c: OEContext) => (c.activeTest,c.lang,c.timezone))("activeTest","lang","tz")
}
| NeovaHealth/t4openerp-connector | src/main/scala/com/tactix4/t4openerp.connector/OEContext.scala | Scala | agpl-3.0 | 1,399 |
package objektwerks
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
import scala.collection.JavaConverters._
class AccumulatorTest extends AnyFunSuite with Matchers {
import SparkInstance._
test("long accumulator") {
val longAcc = sparkContext.longAccumulator("longAcc")
longAcc.add(1)
longAcc.name.get shouldBe "longAcc"
longAcc.value shouldBe 1
}
test("double accumulator") {
val doubleAcc = sparkContext.doubleAccumulator("doubleAcc")
doubleAcc.add(1.0)
doubleAcc.name.get shouldBe "doubleAcc"
doubleAcc.value shouldBe 1.0
}
test("collection accumulator") {
val intsAcc = sparkContext.collectionAccumulator[Int]("intsAcc")
intsAcc.add(1)
intsAcc.add(2)
intsAcc.add(3)
intsAcc.name.get shouldBe "intsAcc"
intsAcc.value.asScala.sum shouldEqual 6
}
} | objektwerks/spark | src/test/scala/objektwerks/AccumulatorTest.scala | Scala | apache-2.0 | 868 |
package vggames.shared.view
import vggames.shared.GamesConfiguration
import vggames.shared.Game
import scala.collection.concurrent.Map
import scalatags.Text.all._
import vggames.shared.vraptor.GameFactoryCache
import scala.util.Try
import vggames.shared.GameView
import scala.util.Success
import scala.util.Failure
import vggames.shared.task.JudgedTask
import vggames.shared.task.Exercise
class TaskView extends TypedView[(String, Exercise, Game, Option[JudgedTask], String)] {
val title = "title".tag[String]
override def render(t: (String, Exercise, Game, Option[JudgedTask], String)) = {
val (gameName, task, game, judgedTask, lastAttempt) = t
html(
head(
title(s"Exercício ${task.index} de ${game.name}"),
meta(name := "robots", "content".attr := "noindex")),
body(
raw(renderGameView(game, task, judgedTask, lastAttempt))))
}
private def renderGameView(game: Game, task: Exercise, judgedTask: Option[JudgedTask], lastAttempt: String): String = {
val viewName = s"vggames.${game.path}.${game.path.capitalize}GameView"
Try(Class.forName(viewName).newInstance.asInstanceOf[GameView]).
map(_.render(game, task, judgedTask, lastAttempt)) match {
case Success(string) => string
case Failure(t) => s"Não foi encontrada view para o jogo ${game.name}. " +
s"Exceção: ${t.getClass.getName} ${t.getMessage} <pre>${t.getStackTraceString}</pre>"
}
}
}
| rustaeja/rustaeja-project-8273 | web/src/main/scala/vggames/shared/view/TaskView.scala | Scala | gpl-3.0 | 1,453 |
/**
* Copyright (C) 2010 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.event.events
import collection.JavaConverters._
import java.util.{List ⇒ JList}
import org.orbeon.oxf.common.ValidationException
import org.orbeon.oxf.util._
import org.orbeon.oxf.xforms.event.XFormsEvent
import org.orbeon.oxf.xforms.submission.XFormsModelSubmission
import org.orbeon.oxf.xml._
import org.orbeon.saxon.om._
import org.apache.log4j.Level
import org.orbeon.oxf.util.ScalaUtils._
import java.net.URL
import org.orbeon.oxf.xforms.XFormsProperties
import java.io.InputStreamReader
import org.orbeon.oxf.xforms.event.XFormsEvent._
import scala.util.control.NonFatal
// Helper trait for xforms-submit-done/xforms-submit-error
trait SubmitResponseEvent extends XFormsEvent {
def connectionResult: Option[ConnectionResult]
final def headers = connectionResult flatMap (c ⇒ Option(c) map (_.responseHeaders)) map (_.asScala)
override implicit def indentedLogger = containingDocument.getIndentedLogger(XFormsModelSubmission.LOGGING_CATEGORY)
override def lazyProperties = getters(this, SubmitResponseEvent.Getters)
override def newPropertyName(name: String) = SubmitResponseEvent.Deprecated.get(name) orElse super.newPropertyName(name)
}
private object SubmitResponseEvent {
import NamespaceMapping.EMPTY_MAPPING
import XPathCache._
// "Zero or more elements, each one representing a content header in the error response received by a
// failed submission. The returned node-set is empty if the failed submission did not receive an error
// response or if there were no headers. Each element has a local name of header with no namespace URI and
// two child elements, name and value, whose string contents are the name and value of the header,
// respectively."
def headersDocument(headersOpt: Option[collection.Map[String, JList[String]]]): Option[DocumentInfo] =
headersOpt filter (_.nonEmpty) map { headers ⇒
val sb = new StringBuilder
sb.append("<headers>")
for ((name, values) ← headers) {
sb.append("<header><name>")
sb.append(XMLUtils.escapeXMLMinimal(name))
sb.append("</name>")
for (value ← values.asScala) {
sb.append("<value>")
sb.append(XMLUtils.escapeXMLMinimal(value))
sb.append("</value>")
}
sb.append("</header>")
}
sb.append("</headers>")
TransformerUtils.stringToTinyTree(XPathCache.getGlobalConfiguration, sb.toString, false, false) // handleXInclude, handleLexical
}
def headerElements(e: SubmitResponseEvent): Option[Seq[Item]] = headersDocument(e.headers) map { document: Item ⇒
evaluateKeepItems(
Seq(document).asJava,
1,
"/headers/header",
EMPTY_MAPPING,
null, null, null, null,
e.locationData,
e.containingDocument.getRequestStats.addXPathStat).asScala
}
def body(e: SubmitResponseEvent): Option[AnyRef] = {
implicit val logger = e.indentedLogger
e.connectionResult flatMap tryToReadBody map {
case Left(string) ⇒ string
case Right(document) ⇒ document
}
}
def tryToReadBody(connectionResult: ConnectionResult)(implicit logger: IndentedLogger): Option[String Either DocumentInfo] = {
// Log response details if not done already
connectionResult.logResponseDetailsIfNeeded(logger, Level.ERROR, "xforms-submit-error")
// Try to add body information if present
if (connectionResult.hasContent) {
// "When the error response specifies an XML media type as defined by [RFC 3023], the response body is
// parsed into an XML document and the root element of the document is returned. If the parse fails, or if
// the error response specifies a text media type (starting with text/), then the response body is returned
// as a string. Otherwise, an empty string is returned."
def warn[T](message: String): PartialFunction[Throwable, Option[T]] = {
case NonFatal(t) ⇒
logger.logWarning("xforms-submit-error", message, t)
None
}
// Read the whole stream to a temp URI so we can read it more than once if needed
val tempURIOpt =
try useAndClose(connectionResult.getResponseInputStream) { is ⇒
Option(NetUtils.inputStreamToAnyURI(is, NetUtils.REQUEST_SCOPE))
} catch
warn("error while reading response body.")
tempURIOpt flatMap { tempURI ⇒
def asDocument =
if (XMLUtils.isXMLMediatype(connectionResult.getResponseMediaType)) {
// XML content-type
// Read stream into Document
// TODO: In case of text/xml, charset is not handled. Should modify readTinyTree() and readDom4j()
try useAndClose(new URL(tempURI).openStream()) { is ⇒
val document = TransformerUtils.readTinyTree(XPathCache.getGlobalConfiguration, is, connectionResult.resourceURI, false, true)
if (XFormsProperties.getErrorLogging.contains("submission-error-body"))
logger.logError("xforms-submit-error", "setting body document", "body", "\\n" + TransformerUtils.tinyTreeToString(document))
Some(document)
} catch
warn("error while parsing response body as XML, defaulting to plain text.")
} else
None
def asString =
if (XMLUtils.isTextOrJSONContentType(connectionResult.getResponseMediaType)) {
// XML parsing failed, or we got a text content-type
// Read stream into String
try {
val charset = NetUtils.getTextCharsetFromContentType(connectionResult.getResponseContentType)
val is = new URL(tempURI).openStream()
useAndClose(new InputStreamReader(is, charset)) { reader ⇒
val string = NetUtils.readStreamAsString(reader)
if (XFormsProperties.getErrorLogging.contains("submission-error-body"))
logger.logError("xforms-submit-error", "setting body string", "body", "\\n" + string)
Some(string)
}
} catch
warn("error while reading response body ")
} else {
// This is binary
// Don't store anything for now
None
}
asDocument orElse asString match {
case Some(document: DocumentInfo) ⇒ Some(Right(document))
case Some(string: String) ⇒ Some(Left(string))
case _ ⇒ None
}
}
} else
None
}
val Deprecated = Map(
"body" → "response-body"
)
val Getters = Map[String, SubmitResponseEvent ⇒ Option[Any]] (
"response-headers" → headerElements,
"response-reason-phrase" → (e ⇒ throw new ValidationException("Property Not implemented yet: " + "response-reason-phrase", e.locationData)),
"response-body" → body,
"body" → body,
"resource-uri" → (e ⇒ e.connectionResult flatMap (c ⇒ Option(c.resourceURI))),
"response-status-code" → (e ⇒ e.connectionResult flatMap (c ⇒ Option(c.statusCode) filter (_ > 0)))
)
} | evlist/orbeon-forms | src/main/scala/org/orbeon/oxf/xforms/event/events/SubmitResponseEvent.scala | Scala | lgpl-2.1 | 8,663 |
package com.nabijaczleweli.fancymagicks.element.caster
import com.nabijaczleweli.fancymagicks.util.NBTReloadable
trait ElementCaster extends NBTReloadable {
def start(): Unit
def continue(): Unit
def end(): Unit
}
| nabijaczleweli/Magicks | src/main/scala/com/nabijaczleweli/fancymagicks/element/caster/ElementCaster.scala | Scala | mit | 221 |
package sledtr.section.filter
import net.htmlparser.jericho._
import scala.collection.JavaConversions._
import sledtr.MyPreDef._
// 調査用にHTMLの構造を書き出す
object HtmlInfo {
def check(src: Source, target: Element): String = {
val sb = new StringBuffer
for(elm <- src.getChildElements) r(elm, target, sb, 0)
sb.toString
}
def r(elm: Element, target: Element, sb: StringBuffer, depth: Int): Unit = {
for(child <- elm.getChildElements()) {
child.getName match {
case "div" =>
val id = child.getAttributeValue("id")
val cls = child.getAttributeValue("class")
if(id != null || cls != null) {
sb.a(" " * depth * 2 + "<div%s%s>%s".format(
if(id != null) " id=" + id else "",
if(cls != null) " class=" + cls else "",
if(child == target) " < ============ target" else ""))
r(child, target, sb, depth + 1)
sb.a(" " * depth * 2 + "</div>")
} else r(child, target, sb, depth + 1)
case _ => r(child, target, sb, depth)
}
}
}
} | K2Da/sledtr | src/main/scala/sledtr/section/filter/HtmlInfo.scala | Scala | gpl-3.0 | 1,125 |
/* ______________ ___ __ ___ _____ _____ ____ __ ____________ *\\
** / __/_ __/ __ \\/ _ \\/ |/ / / __/ |/ / _ \\/ __ \\/ / / /_ __/ __/ **
** _\\ \\ / / / /_/ / , _/ /|_/ / / _// / , _/ /_/ / /_/ / / / / _/ **
** /___/ /_/ \\____/_/|_/_/ /_/ /___/_/|_/_/|_|\\____/\\____/ /_/ /___/ **
** **
** Storm Enroute (c) 2011 **
\\* www.storm-enroute.com */
package org.brijest.storm
package engine
package impl.local
import com.weiglewilczek.slf4s._
import org.h2.jdbc._
import java.io.File
import model.{Area, AreaId, PlayerId}
import Simulators.State
class Database(config: Config) extends Logging {
import logger._
val filename = "%s/%s/%s".format(config.basedir, config.savedir, config.savename)
val existing = (new File(filename + ".h2.db")).exists
val url = "jdbc:h2:%s;FILE_LOCK=FS;PAGE_SIZE=1024;CACHE_SIZE=8192".format(filename)
Class.forName("org.h2.Driver")
val conn = java.sql.DriverManager.getConnection(url)
import Database._
def update(s: String) {
val stmt = conn.createStatement()
stmt.executeUpdate(s)
}
def query(s: String) = {
val stmt = conn.createStatement()
stmt.executeQuery(s)
}
if (!existing) {
info("creating new database")
update("create table %s (%s bigint not null, %s blob, %s blob, primary key (%s))".format(tablename, areaid, data, statedata, areaid))
update("create table %s (%s bigint not null, %s bigint not null, primary key (%s))".format(playerpos, pidcol, aidcol, pidcol))
}
def getInfo(id: AreaId): Option[(Area, State)] = {
val rs = query("select %s, %s, %s from %s where %s = %d".format(areaid, data, statedata, tablename, areaid, id))
if (!rs.next()) None
else {
def deser[T](blob: java.sql.Blob) = {
val bs = blob.getBinaryStream()
val ois = new java.io.ObjectInputStream(bs)
val obj = ois.readObject().asInstanceOf[Area]
ois.close()
obj.asInstanceOf[T]
}
val datablob = rs.getBlob(data)
val statedatablob = rs.getBlob(statedata)
Some(deser[Area](datablob), deser[State](statedatablob))
}
}
def putInfo(id: AreaId, area: Area, state: State) {
val bs = new java.io.ByteArrayOutputStream()
val oos = new java.io.ObjectOutputStream(bs)
oos.writeObject(area)
update("delete from %s where %s = %d".format(tablename, areaid, id))
val pstmt = conn.prepareStatement("insert into %s values (%d, ?)".format(tablename, areaid))
pstmt.setBinaryStream(1, new java.io.ByteArrayInputStream(bs.toByteArray))
pstmt.execute()
oos.close()
}
def putPlayerPos(pid: PlayerId, areaid: AreaId) {
update("delete from %s where %s = %d".format(playerpos, pidcol, pid.id))
update("insert into %s values (%d, %d)".format(playerpos, pid.id, areaid))
}
def getPlayerPositions: Seq[(PlayerId, AreaId)] = {
val rs = query("select * from %s".format(playerpos))
val res = collection.mutable.ArrayBuffer[(PlayerId, AreaId)]()
while (rs.next) {
res += ((PlayerId(rs.getLong(pidcol)), rs.getLong(aidcol)))
}
res
}
def terminate() {
conn.close()
}
}
object Database {
val name = "save"
val tablename = "areas"
val areaid = "id"
val data = "data"
val statedata = "statedata"
val playerpos = "playerpos"
val pidcol = "pid"
val aidcol = "areaid"
}
| axel22/scala-2d-game-editor | tmp/scala/org/brijest/storm/engine/impl/local/Database.scala | Scala | bsd-3-clause | 3,539 |
package org.splink.pagelets
object RequestId {
private val rnd = new scala.util.Random()
def create = RequestId("[" + (0 to 5).map { _ =>
(rnd.nextInt(90 - 65) + 65).toChar
}.mkString + "]")
}
case class RequestId(id: String) {
override def toString = id
} | splink/pagelets | src/main/scala/org/splink/pagelets/RequestId.scala | Scala | apache-2.0 | 271 |
package example
object Hello extends Greeting with App {
println(greeting)
}
trait Greeting {
lazy val greeting: String = "hello world!"
}
| crajyaguru/bigdata | spark/Hello/src/main/scala/example/Hello.scala | Scala | gpl-3.0 | 154 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.