code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
package sbtrelease
import org.specs2.mutable.Specification
object VersionSpec extends Specification {
def version(v: String) = Version(v) match {
case Some(parsed) => parsed
case None => sys.error("Can't parse version " + v)
}
"Version bumping" should {
def bump(v: String) = version(v).bump.string
"bump the major version if there's only a major version" in {
bump("1") must_== "2"
}
"bump the minor version if there's only a minor version" in {
bump("1.2") must_== "1.3"
}
"bump the bugfix version if there's only a bugfix version" in {
bump("1.2.3") must_== "1.2.4"
}
"bump the nano version if there's only a nano version" in {
bump("1.2.3.4") must_== "1.2.3.5"
}
"drop the qualifier if it's a pre release" in {
bump("1-rc1") must_== "1"
bump("1.2-rc1") must_== "1.2"
bump("1.2.3-rc1") must_== "1.2.3"
bump("1-rc") must_== "1"
bump("1-RC1") must_== "1"
bump("1-M1") must_== "1"
bump("1-rc-1") must_== "1"
bump("1-rc.1") must_== "1"
bump("1-beta") must_== "1"
bump("1-beta-1") must_== "1"
bump("1-beta.1") must_== "1"
bump("1-alpha") must_== "1"
}
"not drop the qualifier if it's not a pre release" in {
bump("1.2.3-Final") must_== "1.2.4-Final"
}
"not drop the post-nano qualifier if it's not a pre release" in {
bump("1.2.3.4-Final") must_== "1.2.3.5-Final"
}
}
"Major Version bumping" should {
def bumpMajor(v: String) = version(v).bumpMajor.string
"bump the major version and reset other versions" in {
bumpMajor("1.2.3.4.5") must_== "2.0.0.0.0"
}
"not drop the qualifier" in {
bumpMajor("1.2.3.4.5-alpha") must_== "2.0.0.0.0-alpha"
}
}
"Minor Version bumping" should {
def bumpMinor(v: String) = version(v).bumpMinor.string
"bump the minor version" in {
bumpMinor("1.2") must_== "1.3"
}
"bump the minor version and reset other subversions" in {
bumpMinor("1.2.3.4.5") must_== "1.3.0.0.0"
}
"not bump the major version when no minor version" in {
bumpMinor("1") must_== "1"
}
"not drop the qualifier" in {
bumpMinor("1.2.3.4.5-alpha") must_== "1.3.0.0.0-alpha"
}
}
"Subversion bumping" should {
def bumpSubversion(v: String)(i: Int) = version(v).maybeBumpSubversion(i).string
"bump the subversion" in {
bumpSubversion("1.2")(0) must_== "1.3"
}
"bump the subversion and reset lower subversions" in {
bumpSubversion("1.2.3.4.5")(0) must_== "1.3.0.0.0"
}
"not change anything with an invalid subversion index" in {
bumpSubversion("1.2-beta")(1) must_== "1.2-beta"
}
"not drop the qualifier" in {
bumpSubversion("1.2.3.4.5-alpha")(2) must_== "1.2.3.5.0-alpha"
}
}
}
|
xuwei-k/sbt-release
|
src/test/scala/VersionSpec.scala
|
Scala
|
apache-2.0
| 2,829
|
package water.app
import org.apache.spark.{SparkContext, SparkConf}
/**
* Publish useful method to configure Spark context.
*/
trait SparkContextSupport {
def configure(appName:String = "Sparkling Water Demo"):SparkConf = {
val conf = new SparkConf()
.setAppName(appName)
conf.setIfMissing("spark.master", sys.env.getOrElse("spark.master", "local[*]"))
conf
}
def addFiles(sc: SparkContext, files: String*): Unit = {
files.foreach( f => sc.addFile(f) )
}
def absPath(path: String): String = new java.io.File(path).getAbsolutePath
}
|
nvoron23/sparkling-water
|
core/src/main/scala/water/app/SparkContextSupport.scala
|
Scala
|
apache-2.0
| 571
|
package nl.svanwouw.trending.types
/**
* Syntactic sugar for a slope integer.
* Represents the slope of the frequency of occurence of a certain topic in a certain period.
* @param v The value with run time value type.
*/
class Slope(val v: Double) extends AnyVal with Serializable {
override def toString = v.toString
}
|
stefanvanwouw/spark-based-trending-topics-extraction
|
src/main/scala/nl/svanwouw/trending/types/Slope.scala
|
Scala
|
mit
| 328
|
package com.ecfront.common
import org.scalatest.FunSuite
class ShellSpec extends FunSuite {
test("Shell测试") {
//use linux
assert(ShellHelper.sync("echo hello",returnResult = true)=="echo hello")
}
}
|
gudaoxuri/ez-common
|
src/test/scala/com/ecfront/common/ShellSpec.scala
|
Scala
|
apache-2.0
| 221
|
package dotty.tools.dotc
package transform
import core._
import DenotTransformers.SymTransformer
import Contexts.Context
import SymDenotations.SymDenotation
import Types._
import Symbols._
import SymUtils._
import Constants._
import TreeTransforms._
import Flags._
import Decorators._
import ValueClasses._
/** Performs the following rewritings for fields of a class:
*
* <mods> val x: T = e
* --> <mods> <stable> <accessor> def x: T = e
* <mods> var x: T = e
* --> <mods> <accessor> def x: T = e
*
* <mods> val x: T
* --> <mods> <stable> <accessor> def x: T
*
* <mods> lazy val x: T = e
* --> <mods> <accessor> lazy def x: T =e
*
* <mods> var x: T
* --> <mods> <accessor> def x: T
*
* <mods> non-static <module> val x$ = e
* --> <mods> <module> <accessor> def x$ = e
*
* Omitted from the rewritings are
*
* - private[this] fields in classes (excluding traits, value classes)
* - fields generated for static modules (TODO: needed?)
* - parameters, static fields, and fields coming from Java
*
* Furthermore, assignments to mutable vars are replaced by setter calls
*
* p.x = e
* --> p.x_=(e)
*
* No fields are generated yet. This is done later in phase Memoize.
*/
class Getters extends MiniPhaseTransform with SymTransformer { thisTransform =>
import ast.tpd._
override def phaseName = "getters"
override def transformSym(d: SymDenotation)(implicit ctx: Context): SymDenotation = {
def noGetterNeeded =
d.is(NoGetterNeeded) ||
d.initial.asInstanceOf[SymDenotation].is(PrivateLocal) && !d.owner.is(Trait) && !isDerivedValueClass(d.owner) && !d.is(Flags.Lazy) ||
d.is(Module) && d.isStatic ||
d.isSelfSym
if (d.isTerm && (d.is(Lazy) || d.owner.isClass) && d.info.isValueType && !noGetterNeeded) {
val maybeStable = if (d.isStable) Stable else EmptyFlags
d.copySymDenotation(
initFlags = d.flags | maybeStable | AccessorCreationFlags,
info = ExprType(d.info))
}
else d
}
private val NoGetterNeeded = Method | Param | JavaDefined | JavaStatic
override def transformValDef(tree: ValDef)(implicit ctx: Context, info: TransformerInfo): Tree =
if (tree.symbol is Method) DefDef(tree.symbol.asTerm, tree.rhs) else tree
override def transformAssign(tree: Assign)(implicit ctx: Context, info: TransformerInfo): Tree =
if (tree.lhs.symbol is Method) tree.lhs.becomes(tree.rhs) else tree
}
|
densh/dotty
|
src/dotty/tools/dotc/transform/Getters.scala
|
Scala
|
bsd-3-clause
| 2,478
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.utils
import java.io._
import java.nio._
import java.nio.channels._
import java.nio.charset.{Charset, StandardCharsets}
import java.security.cert.X509Certificate
import java.util.{Collections, Properties}
import java.util.concurrent.{Callable, Executors, TimeUnit}
import javax.net.ssl.X509TrustManager
import kafka.api._
import kafka.cluster.{Broker, EndPoint}
import kafka.common.TopicAndPartition
import kafka.consumer.{ConsumerConfig, ConsumerTimeoutException, KafkaStream}
import kafka.log._
import kafka.message._
import kafka.producer._
import kafka.security.auth.{Acl, Authorizer, Resource}
import kafka.serializer.{DefaultEncoder, Encoder, StringEncoder}
import kafka.server._
import kafka.server.checkpoints.OffsetCheckpointFile
import Implicits._
import kafka.controller.LeaderIsrAndControllerEpoch
import kafka.zk.{AdminZkClient, BrokerIdsZNode, BrokerInfo, KafkaZkClient}
import org.apache.kafka.clients.CommonClientConfigs
import org.apache.kafka.clients.consumer.{ConsumerRecord, KafkaConsumer, OffsetAndMetadata, RangeAssignor}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.header.Header
import org.apache.kafka.common.internals.Topic
import org.apache.kafka.common.network.{ListenerName, Mode}
import org.apache.kafka.common.record._
import org.apache.kafka.common.security.auth.SecurityProtocol
import org.apache.kafka.common.serialization.{ByteArrayDeserializer, ByteArraySerializer, Deserializer, Serializer}
import org.apache.kafka.common.utils.Time
import org.apache.kafka.common.utils.Utils._
import org.apache.kafka.test.{TestSslUtils, TestUtils => JTestUtils}
import org.apache.zookeeper.ZooDefs._
import org.apache.zookeeper.data.ACL
import org.junit.Assert._
import scala.collection.JavaConverters._
import scala.collection.{Map, mutable}
import scala.collection.mutable.{ArrayBuffer, ListBuffer}
/**
* Utility functions to help with testing
*/
object TestUtils extends Logging {
val random = JTestUtils.RANDOM
/* 0 gives a random port; you can then retrieve the assigned port from the Socket object. */
val RandomPort = 0
/** Port to use for unit tests that mock/don't require a real ZK server. */
val MockZkPort = 1
/** ZooKeeper connection string to use for unit tests that mock/don't require a real ZK server. */
val MockZkConnect = "127.0.0.1:" + MockZkPort
// CN in SSL certificates - this is used for endpoint validation when enabled
val SslCertificateCn = "localhost"
private val transactionStatusKey = "transactionStatus"
private val committedValue : Array[Byte] = "committed".getBytes(StandardCharsets.UTF_8)
private val abortedValue : Array[Byte] = "aborted".getBytes(StandardCharsets.UTF_8)
/**
* Create a temporary directory
*/
def tempDir(): File = JTestUtils.tempDirectory()
def tempTopic(): String = "testTopic" + random.nextInt(1000000)
/**
* Create a temporary relative directory
*/
def tempRelativeDir(parent: String): File = {
val parentFile = new File(parent)
parentFile.mkdirs()
JTestUtils.tempDirectory(parentFile.toPath, null)
}
/**
* Create a random log directory in the format <string>-<int> used for Kafka partition logs.
* It is the responsibility of the caller to set up a shutdown hook for deletion of the directory.
*/
def randomPartitionLogDir(parentDir: File): File = {
val attempts = 1000
val f = Iterator.continually(new File(parentDir, "kafka-" + random.nextInt(1000000)))
.take(attempts).find(_.mkdir())
.getOrElse(sys.error(s"Failed to create directory after $attempts attempts"))
f.deleteOnExit()
f
}
/**
* Create a temporary file
*/
def tempFile(): File = JTestUtils.tempFile()
/**
* Create a temporary file and return an open file channel for this file
*/
def tempChannel(): FileChannel = new RandomAccessFile(tempFile(), "rw").getChannel()
/**
* Create a kafka server instance with appropriate test settings
* USING THIS IS A SIGN YOU ARE NOT WRITING A REAL UNIT TEST
*
* @param config The configuration of the server
*/
def createServer(config: KafkaConfig, time: Time = Time.SYSTEM): KafkaServer = {
val server = new KafkaServer(config, time)
server.startup()
server
}
def boundPort(server: KafkaServer, securityProtocol: SecurityProtocol = SecurityProtocol.PLAINTEXT): Int =
server.boundPort(ListenerName.forSecurityProtocol(securityProtocol))
def createBroker(id: Int, host: String, port: Int, securityProtocol: SecurityProtocol = SecurityProtocol.PLAINTEXT): Broker =
new Broker(id, host, port, ListenerName.forSecurityProtocol(securityProtocol), securityProtocol)
/**
* Create a test config for the provided parameters.
*
* Note that if `interBrokerSecurityProtocol` is defined, the listener for the `SecurityProtocol` will be enabled.
*/
def createBrokerConfigs(numConfigs: Int,
zkConnect: String,
enableControlledShutdown: Boolean = true,
enableDeleteTopic: Boolean = false,
interBrokerSecurityProtocol: Option[SecurityProtocol] = None,
trustStoreFile: Option[File] = None,
saslProperties: Option[Properties] = None,
enablePlaintext: Boolean = true,
enableSsl: Boolean = false,
enableSaslPlaintext: Boolean = false,
enableSaslSsl: Boolean = false,
rackInfo: Map[Int, String] = Map(),
logDirCount: Int = 1,
enableToken: Boolean = false): Seq[Properties] = {
(0 until numConfigs).map { node =>
createBrokerConfig(node, zkConnect, enableControlledShutdown, enableDeleteTopic, RandomPort,
interBrokerSecurityProtocol, trustStoreFile, saslProperties, enablePlaintext = enablePlaintext, enableSsl = enableSsl,
enableSaslPlaintext = enableSaslPlaintext, enableSaslSsl = enableSaslSsl, rack = rackInfo.get(node), logDirCount = logDirCount, enableToken = enableToken)
}
}
def getBrokerListStrFromServers(servers: Seq[KafkaServer], protocol: SecurityProtocol = SecurityProtocol.PLAINTEXT): String = {
servers.map { s =>
val listener = s.config.advertisedListeners.find(_.securityProtocol == protocol).getOrElse(
sys.error(s"Could not find listener with security protocol $protocol"))
formatAddress(listener.host, boundPort(s, protocol))
}.mkString(",")
}
def bootstrapServers(servers: Seq[KafkaServer], listenerName: ListenerName): String = {
servers.map { s =>
val listener = s.config.advertisedListeners.find(_.listenerName == listenerName).getOrElse(
sys.error(s"Could not find listener with name ${listenerName.value}"))
formatAddress(listener.host, s.boundPort(listenerName))
}.mkString(",")
}
/**
* Shutdown `servers` and delete their log directories.
*/
def shutdownServers(servers: Seq[KafkaServer]) {
servers.par.foreach { s =>
s.shutdown()
CoreUtils.delete(s.config.logDirs)
}
}
/**
* Create a test config for the provided parameters.
*
* Note that if `interBrokerSecurityProtocol` is defined, the listener for the `SecurityProtocol` will be enabled.
*/
def createBrokerConfig(nodeId: Int,
zkConnect: String,
enableControlledShutdown: Boolean = true,
enableDeleteTopic: Boolean = false,
port: Int = RandomPort,
interBrokerSecurityProtocol: Option[SecurityProtocol] = None,
trustStoreFile: Option[File] = None,
saslProperties: Option[Properties] = None,
enablePlaintext: Boolean = true,
enableSaslPlaintext: Boolean = false,
saslPlaintextPort: Int = RandomPort,
enableSsl: Boolean = false,
sslPort: Int = RandomPort,
enableSaslSsl: Boolean = false,
saslSslPort: Int = RandomPort,
rack: Option[String] = None,
logDirCount: Int = 1,
enableToken: Boolean = false): Properties = {
def shouldEnable(protocol: SecurityProtocol) = interBrokerSecurityProtocol.fold(false)(_ == protocol)
val protocolAndPorts = ArrayBuffer[(SecurityProtocol, Int)]()
if (enablePlaintext || shouldEnable(SecurityProtocol.PLAINTEXT))
protocolAndPorts += SecurityProtocol.PLAINTEXT -> port
if (enableSsl || shouldEnable(SecurityProtocol.SSL))
protocolAndPorts += SecurityProtocol.SSL -> sslPort
if (enableSaslPlaintext || shouldEnable(SecurityProtocol.SASL_PLAINTEXT))
protocolAndPorts += SecurityProtocol.SASL_PLAINTEXT -> saslPlaintextPort
if (enableSaslSsl || shouldEnable(SecurityProtocol.SASL_SSL))
protocolAndPorts += SecurityProtocol.SASL_SSL -> saslSslPort
val listeners = protocolAndPorts.map { case (protocol, port) =>
s"${protocol.name}://localhost:$port"
}.mkString(",")
val props = new Properties
if (nodeId >= 0) props.put(KafkaConfig.BrokerIdProp, nodeId.toString)
props.put(KafkaConfig.ListenersProp, listeners)
if (logDirCount > 1) {
val logDirs = (1 to logDirCount).toList.map(i =>
// We would like to allow user to specify both relative path and absolute path as log directory for backward-compatibility reason
// We can verify this by using a mixture of relative path and absolute path as log directories in the test
if (i % 2 == 0) TestUtils.tempDir().getAbsolutePath else TestUtils.tempRelativeDir("data")
).mkString(",")
props.put(KafkaConfig.LogDirsProp, logDirs)
} else {
props.put(KafkaConfig.LogDirProp, TestUtils.tempDir().getAbsolutePath)
}
props.put(KafkaConfig.ZkConnectProp, zkConnect)
props.put(KafkaConfig.ZkConnectionTimeoutMsProp, "10000")
props.put(KafkaConfig.ReplicaSocketTimeoutMsProp, "1500")
props.put(KafkaConfig.ControllerSocketTimeoutMsProp, "1500")
props.put(KafkaConfig.ControlledShutdownEnableProp, enableControlledShutdown.toString)
props.put(KafkaConfig.DeleteTopicEnableProp, enableDeleteTopic.toString)
props.put(KafkaConfig.LogDeleteDelayMsProp, "1000")
props.put(KafkaConfig.ControlledShutdownRetryBackoffMsProp, "100")
props.put(KafkaConfig.LogCleanerDedupeBufferSizeProp, "2097152")
props.put(KafkaConfig.LogMessageTimestampDifferenceMaxMsProp, Long.MaxValue.toString)
props.put(KafkaConfig.OffsetsTopicReplicationFactorProp, "1")
if (!props.containsKey(KafkaConfig.OffsetsTopicPartitionsProp))
props.put(KafkaConfig.OffsetsTopicPartitionsProp, "5")
if (!props.containsKey(KafkaConfig.GroupInitialRebalanceDelayMsProp))
props.put(KafkaConfig.GroupInitialRebalanceDelayMsProp, "0")
rack.foreach(props.put(KafkaConfig.RackProp, _))
if (protocolAndPorts.exists { case (protocol, _) => usesSslTransportLayer(protocol) })
props ++= sslConfigs(Mode.SERVER, false, trustStoreFile, s"server$nodeId")
if (protocolAndPorts.exists { case (protocol, _) => usesSaslAuthentication(protocol) })
props ++= JaasTestUtils.saslConfigs(saslProperties)
interBrokerSecurityProtocol.foreach { protocol =>
props.put(KafkaConfig.InterBrokerSecurityProtocolProp, protocol.name)
}
if (enableToken)
props.put(KafkaConfig.DelegationTokenMasterKeyProp, "masterkey")
props
}
/**
* Create a topic in ZooKeeper.
* Wait until the leader is elected and the metadata is propagated to all brokers.
* Return the leader for each partition.
*/
def createTopic(zkClient: KafkaZkClient,
topic: String,
numPartitions: Int = 1,
replicationFactor: Int = 1,
servers: Seq[KafkaServer],
topicConfig: Properties = new Properties): scala.collection.immutable.Map[Int, Int] = {
val adminZkClient = new AdminZkClient(zkClient)
// create topic
adminZkClient.createTopic(topic, numPartitions, replicationFactor, topicConfig)
// wait until the update metadata request for new topic reaches all servers
(0 until numPartitions).map { i =>
TestUtils.waitUntilMetadataIsPropagated(servers, topic, i)
i -> TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic, i)
}.toMap
}
/**
* Create a topic in ZooKeeper using a customized replica assignment.
* Wait until the leader is elected and the metadata is propagated to all brokers.
* Return the leader for each partition.
*/
def createTopic(zkClient: KafkaZkClient, topic: String, partitionReplicaAssignment: collection.Map[Int, Seq[Int]],
servers: Seq[KafkaServer]): scala.collection.immutable.Map[Int, Int] = {
val adminZkClient = new AdminZkClient(zkClient)
// create topic
adminZkClient.createOrUpdateTopicPartitionAssignmentPathInZK(topic, partitionReplicaAssignment)
// wait until the update metadata request for new topic reaches all servers
partitionReplicaAssignment.keySet.map { case i =>
TestUtils.waitUntilMetadataIsPropagated(servers, topic, i)
i -> TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic, i)
}.toMap
}
/**
* Create the consumer offsets/group metadata topic and wait until the leader is elected and metadata is propagated
* to all brokers.
*/
def createOffsetsTopic(zkClient: KafkaZkClient, servers: Seq[KafkaServer]): Unit = {
val server = servers.head
createTopic(zkClient, Topic.GROUP_METADATA_TOPIC_NAME,
server.config.getInt(KafkaConfig.OffsetsTopicPartitionsProp),
server.config.getShort(KafkaConfig.OffsetsTopicReplicationFactorProp).toInt,
servers,
server.groupCoordinator.offsetsTopicConfigs)
}
/**
* Create a test config for a consumer
*/
def createConsumerProperties(zkConnect: String, groupId: String, consumerId: String,
consumerTimeout: Long = -1): Properties = {
val props = new Properties
props.put("zookeeper.connect", zkConnect)
props.put("group.id", groupId)
props.put("consumer.id", consumerId)
props.put("consumer.timeout.ms", consumerTimeout.toString)
props.put("zookeeper.session.timeout.ms", "6000")
props.put("zookeeper.sync.time.ms", "200")
props.put("auto.commit.interval.ms", "1000")
props.put("rebalance.max.retries", "4")
props.put("auto.offset.reset", "smallest")
props.put("num.consumer.fetchers", "2")
props
}
/**
* Fail a test case explicitly. Return Nothing so that we are not constrained by the return type.
*/
def fail(msg: String): Nothing = throw new AssertionError(msg)
/**
* Wrap a single record log buffer.
*/
def singletonRecords(value: Array[Byte],
key: Array[Byte] = null,
codec: CompressionType = CompressionType.NONE,
timestamp: Long = RecordBatch.NO_TIMESTAMP,
magicValue: Byte = RecordBatch.CURRENT_MAGIC_VALUE): MemoryRecords = {
records(Seq(new SimpleRecord(timestamp, key, value)), magicValue = magicValue, codec = codec)
}
def recordsWithValues(magicValue: Byte,
codec: CompressionType,
values: Array[Byte]*): MemoryRecords = {
records(values.map(value => new SimpleRecord(value)), magicValue, codec)
}
def records(records: Iterable[SimpleRecord],
magicValue: Byte = RecordBatch.CURRENT_MAGIC_VALUE,
codec: CompressionType = CompressionType.NONE,
producerId: Long = RecordBatch.NO_PRODUCER_ID,
producerEpoch: Short = RecordBatch.NO_PRODUCER_EPOCH,
sequence: Int = RecordBatch.NO_SEQUENCE,
baseOffset: Long = 0L): MemoryRecords = {
val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records.asJava))
val builder = MemoryRecords.builder(buf, magicValue, codec, TimestampType.CREATE_TIME, baseOffset,
System.currentTimeMillis, producerId, producerEpoch, sequence)
records.foreach(builder.append)
builder.build()
}
/**
* Generate an array of random bytes
*
* @param numBytes The size of the array
*/
def randomBytes(numBytes: Int): Array[Byte] = JTestUtils.randomBytes(numBytes)
/**
* Generate a random string of letters and digits of the given length
*
* @param len The length of the string
* @return The random string
*/
def randomString(len: Int): String = JTestUtils.randomString(len)
/**
* Check that the buffer content from buffer.position() to buffer.limit() is equal
*/
def checkEquals(b1: ByteBuffer, b2: ByteBuffer) {
assertEquals("Buffers should have equal length", b1.limit() - b1.position(), b2.limit() - b2.position())
for(i <- 0 until b1.limit() - b1.position())
assertEquals("byte " + i + " byte not equal.", b1.get(b1.position() + i), b2.get(b1.position() + i))
}
/**
* Throw an exception if the two iterators are of differing lengths or contain
* different messages on their Nth element
*/
def checkEquals[T](expected: Iterator[T], actual: Iterator[T]) {
var length = 0
while(expected.hasNext && actual.hasNext) {
length += 1
assertEquals(expected.next, actual.next)
}
// check if the expected iterator is longer
if (expected.hasNext) {
var length1 = length
while (expected.hasNext) {
expected.next
length1 += 1
}
assertFalse("Iterators have uneven length-- first has more: "+length1 + " > " + length, true)
}
// check if the actual iterator was longer
if (actual.hasNext) {
var length2 = length
while (actual.hasNext) {
actual.next
length2 += 1
}
assertFalse("Iterators have uneven length-- second has more: "+length2 + " > " + length, true)
}
}
/**
* Throw an exception if an iterable has different length than expected
*
*/
def checkLength[T](s1: Iterator[T], expectedLength:Int) {
var n = 0
while (s1.hasNext) {
n+=1
s1.next
}
assertEquals(expectedLength, n)
}
/**
* Throw an exception if the two iterators are of differing lengths or contain
* different messages on their Nth element
*/
def checkEquals[T](s1: java.util.Iterator[T], s2: java.util.Iterator[T]) {
while(s1.hasNext && s2.hasNext)
assertEquals(s1.next, s2.next)
assertFalse("Iterators have uneven length--first has more", s1.hasNext)
assertFalse("Iterators have uneven length--second has more", s2.hasNext)
}
def stackedIterator[T](s: Iterator[T]*): Iterator[T] = {
new Iterator[T] {
var cur: Iterator[T] = null
val topIterator = s.iterator
def hasNext: Boolean = {
while (true) {
if (cur == null) {
if (topIterator.hasNext)
cur = topIterator.next
else
return false
}
if (cur.hasNext)
return true
cur = null
}
// should never reach her
throw new RuntimeException("should not reach here")
}
def next() : T = cur.next
}
}
/**
* Create a hexadecimal string for the given bytes
*/
def hexString(bytes: Array[Byte]): String = hexString(ByteBuffer.wrap(bytes))
/**
* Create a hexadecimal string for the given bytes
*/
def hexString(buffer: ByteBuffer): String = {
val builder = new StringBuilder("0x")
for(i <- 0 until buffer.limit())
builder.append(String.format("%x", Integer.valueOf(buffer.get(buffer.position() + i))))
builder.toString
}
/**
* Create a producer with a few pre-configured properties.
* If certain properties need to be overridden, they can be provided in producerProps.
*/
@deprecated("This method has been deprecated and it will be removed in a future release.", "0.10.0.0")
def createProducer[K, V](brokerList: String,
encoder: String = classOf[DefaultEncoder].getName,
keyEncoder: String = classOf[DefaultEncoder].getName,
partitioner: String = classOf[DefaultPartitioner].getName,
producerProps: Properties = null): Producer[K, V] = {
val props: Properties = getProducerConfig(brokerList)
//override any explicitly specified properties
if (producerProps != null)
props ++= producerProps
props.put("serializer.class", encoder)
props.put("key.serializer.class", keyEncoder)
props.put("partitioner.class", partitioner)
new Producer[K, V](new kafka.producer.ProducerConfig(props))
}
def securityConfigs(mode: Mode,
securityProtocol: SecurityProtocol,
trustStoreFile: Option[File],
certAlias: String,
certCn: String,
saslProperties: Option[Properties]): Properties = {
val props = new Properties
if (usesSslTransportLayer(securityProtocol))
props ++= sslConfigs(mode, securityProtocol == SecurityProtocol.SSL, trustStoreFile, certAlias, certCn)
if (usesSaslAuthentication(securityProtocol))
props ++= JaasTestUtils.saslConfigs(saslProperties)
props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, securityProtocol.name)
props
}
def producerSecurityConfigs(securityProtocol: SecurityProtocol, trustStoreFile: Option[File], saslProperties: Option[Properties]): Properties =
securityConfigs(Mode.CLIENT, securityProtocol, trustStoreFile, "producer", SslCertificateCn, saslProperties)
/**
* Create a (new) producer with a few pre-configured properties.
*/
def createNewProducer[K, V](brokerList: String,
acks: Int = -1,
maxBlockMs: Long = 60 * 1000L,
bufferSize: Long = 1024L * 1024L,
retries: Int = 0,
lingerMs: Long = 0,
requestTimeoutMs: Long = 10 * 1024L,
securityProtocol: SecurityProtocol = SecurityProtocol.PLAINTEXT,
trustStoreFile: Option[File] = None,
saslProperties: Option[Properties] = None,
keySerializer: Serializer[K] = new ByteArraySerializer,
valueSerializer: Serializer[V] = new ByteArraySerializer,
props: Option[Properties] = None): KafkaProducer[K, V] = {
val producerProps = props.getOrElse(new Properties)
producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList)
producerProps.put(ProducerConfig.ACKS_CONFIG, acks.toString)
producerProps.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, maxBlockMs.toString)
producerProps.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferSize.toString)
producerProps.put(ProducerConfig.RETRIES_CONFIG, retries.toString)
producerProps.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, requestTimeoutMs.toString)
/* Only use these if not already set */
val defaultProps = Map(
ProducerConfig.RETRY_BACKOFF_MS_CONFIG -> "100",
ProducerConfig.RECONNECT_BACKOFF_MS_CONFIG -> "200",
ProducerConfig.LINGER_MS_CONFIG -> lingerMs.toString
)
defaultProps.foreach { case (key, value) =>
if (!producerProps.containsKey(key)) producerProps.put(key, value)
}
/*
* It uses CommonClientConfigs.SECURITY_PROTOCOL_CONFIG to determine whether
* securityConfigs has been invoked already. For example, we need to
* invoke it before this call in IntegrationTestHarness, otherwise the
* SSL client auth fails.
*/
if (!producerProps.containsKey(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG))
producerProps ++= producerSecurityConfigs(securityProtocol, trustStoreFile, saslProperties)
new KafkaProducer[K, V](producerProps, keySerializer, valueSerializer)
}
def usesSslTransportLayer(securityProtocol: SecurityProtocol): Boolean = securityProtocol match {
case SecurityProtocol.SSL | SecurityProtocol.SASL_SSL => true
case _ => false
}
def usesSaslAuthentication(securityProtocol: SecurityProtocol): Boolean = securityProtocol match {
case SecurityProtocol.SASL_PLAINTEXT | SecurityProtocol.SASL_SSL => true
case _ => false
}
def consumerSecurityConfigs(securityProtocol: SecurityProtocol, trustStoreFile: Option[File], saslProperties: Option[Properties]): Properties =
securityConfigs(Mode.CLIENT, securityProtocol, trustStoreFile, "consumer", SslCertificateCn, saslProperties)
def adminClientSecurityConfigs(securityProtocol: SecurityProtocol, trustStoreFile: Option[File], saslProperties: Option[Properties]): Properties =
securityConfigs(Mode.CLIENT, securityProtocol, trustStoreFile, "admin-client", SslCertificateCn, saslProperties)
/**
* Create a new consumer with a few pre-configured properties.
*/
def createNewConsumer[K, V](brokerList: String,
groupId: String = "group",
autoOffsetReset: String = "earliest",
partitionFetchSize: Long = 4096L,
partitionAssignmentStrategy: String = classOf[RangeAssignor].getName,
sessionTimeout: Int = 30000,
securityProtocol: SecurityProtocol,
trustStoreFile: Option[File] = None,
saslProperties: Option[Properties] = None,
keyDeserializer: Deserializer[K] = new ByteArrayDeserializer,
valueDeserializer: Deserializer[V] =new ByteArrayDeserializer,
props: Option[Properties] = None) : KafkaConsumer[K, V] = {
import org.apache.kafka.clients.consumer.ConsumerConfig
val consumerProps = props.getOrElse(new Properties())
consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList)
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset)
consumerProps.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, partitionFetchSize.toString)
val defaultProps = Map(
ConsumerConfig.RETRY_BACKOFF_MS_CONFIG -> "100",
ConsumerConfig.RECONNECT_BACKOFF_MS_CONFIG -> "200",
ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG -> partitionAssignmentStrategy,
ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG -> sessionTimeout.toString,
ConsumerConfig.GROUP_ID_CONFIG -> groupId)
defaultProps.foreach { case (key, value) =>
if (!consumerProps.containsKey(key)) consumerProps.put(key, value)
}
/*
* It uses CommonClientConfigs.SECURITY_PROTOCOL_CONFIG to determine whether
* securityConfigs has been invoked already. For example, we need to
* invoke it before this call in IntegrationTestHarness, otherwise the
* SSL client auth fails.
*/
if(!consumerProps.containsKey(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG))
consumerProps ++= consumerSecurityConfigs(securityProtocol, trustStoreFile, saslProperties)
new KafkaConsumer[K, V](consumerProps, keyDeserializer, valueDeserializer)
}
/**
* Create a default producer config properties map with the given metadata broker list
*/
def getProducerConfig(brokerList: String): Properties = {
val props = new Properties()
props.put("metadata.broker.list", brokerList)
props.put("message.send.max.retries", "5")
props.put("retry.backoff.ms", "1000")
props.put("request.timeout.ms", "2000")
props.put("request.required.acks", "-1")
props.put("send.buffer.bytes", "65536")
props
}
@deprecated("This method has been deprecated and will be removed in a future release", "0.11.0.0")
def getSyncProducerConfig(port: Int): Properties = {
val props = new Properties()
props.put("host", "localhost")
props.put("port", port.toString)
props.put("request.timeout.ms", "10000")
props.put("request.required.acks", "1")
props.put("serializer.class", classOf[StringEncoder].getName)
props
}
@deprecated("This method has been deprecated and will be removed in a future release.", "0.11.0.0")
def updateConsumerOffset(config : ConsumerConfig, path : String, offset : Long) = {
val zkUtils = ZkUtils(config.zkConnect, config.zkSessionTimeoutMs, config.zkConnectionTimeoutMs, false)
zkUtils.updatePersistentPath(path, offset.toString)
zkUtils.close()
}
def getMessageIterator(iter: Iterator[MessageAndOffset]): Iterator[Message] = {
new IteratorTemplate[Message] {
override def makeNext(): Message = {
if (iter.hasNext)
iter.next.message
else
allDone()
}
}
}
def createBrokersInZk(zkClient: KafkaZkClient, ids: Seq[Int]): Seq[Broker] =
createBrokersInZk(ids.map(kafka.admin.BrokerMetadata(_, None)), zkClient)
def createBrokersInZk(brokerMetadatas: Seq[kafka.admin.BrokerMetadata], zkClient: KafkaZkClient): Seq[Broker] = {
zkClient.makeSurePersistentPathExists(BrokerIdsZNode.path)
val brokers = brokerMetadatas.map { b =>
val protocol = SecurityProtocol.PLAINTEXT
val listenerName = ListenerName.forSecurityProtocol(protocol)
Broker(b.id, Seq(EndPoint("localhost", 6667, listenerName, protocol)), b.rack)
}
brokers.foreach(b => zkClient.registerBrokerInZk(BrokerInfo(Broker(b.id, b.endPoints, rack = b.rack),
ApiVersion.latestVersion, jmxPort = -1)))
brokers
}
def deleteBrokersInZk(zkClient: KafkaZkClient, ids: Seq[Int]): Seq[Broker] = {
val brokers = ids.map(createBroker(_, "localhost", 6667, SecurityProtocol.PLAINTEXT))
ids.foreach(b => zkClient.deletePath(BrokerIdsZNode.path + "/" + b))
brokers
}
def getMsgStrings(n: Int): Seq[String] = {
val buffer = new ListBuffer[String]
for (i <- 0 until n)
buffer += ("msg" + i)
buffer
}
/**
* Create a wired format request based on simple basic information
*/
@deprecated("This method has been deprecated and it will be removed in a future release", "0.10.0.0")
def produceRequest(topic: String,
partition: Int,
message: ByteBufferMessageSet,
acks: Int,
timeout: Int,
correlationId: Int = 0,
clientId: String): ProducerRequest = {
produceRequestWithAcks(Seq(topic), Seq(partition), message, acks, timeout, correlationId, clientId)
}
@deprecated("This method has been deprecated and it will be removed in a future release", "0.10.0.0")
def produceRequestWithAcks(topics: Seq[String],
partitions: Seq[Int],
message: ByteBufferMessageSet,
acks: Int,
timeout: Int,
correlationId: Int = 0,
clientId: String): ProducerRequest = {
val data = topics.flatMap(topic =>
partitions.map(partition => (TopicAndPartition(topic, partition), message))
)
new ProducerRequest(correlationId, clientId, acks.toShort, timeout, collection.mutable.Map(data:_*))
}
def makeLeaderForPartition(zkClient: KafkaZkClient,
topic: String,
leaderPerPartitionMap: scala.collection.immutable.Map[Int, Int],
controllerEpoch: Int) {
val newLeaderIsrAndControllerEpochs = leaderPerPartitionMap.map { case (partition, leader) =>
val topicPartition = new TopicPartition(topic, partition)
val newLeaderAndIsr = zkClient.getTopicPartitionState(topicPartition)
.map(_.leaderAndIsr.newLeader(leader))
.getOrElse(LeaderAndIsr(leader, List(leader)))
topicPartition -> LeaderIsrAndControllerEpoch(newLeaderAndIsr, controllerEpoch)
}
zkClient.setTopicPartitionStatesRaw(newLeaderIsrAndControllerEpochs)
}
/**
* If neither oldLeaderOpt nor newLeaderOpt is defined, wait until the leader of a partition is elected.
* If oldLeaderOpt is defined, it waits until the new leader is different from the old leader.
* If newLeaderOpt is defined, it waits until the new leader becomes the expected new leader.
*
* @return The new leader (note that negative values are used to indicate conditions like NoLeader and
* LeaderDuringDelete).
* @throws AssertionError if the expected condition is not true within the timeout.
*/
def waitUntilLeaderIsElectedOrChanged(zkClient: KafkaZkClient, topic: String, partition: Int, timeoutMs: Long = 30000L,
oldLeaderOpt: Option[Int] = None, newLeaderOpt: Option[Int] = None): Int = {
require(!(oldLeaderOpt.isDefined && newLeaderOpt.isDefined), "Can't define both the old and the new leader")
val startTime = System.currentTimeMillis()
val topicPartition = new TopicPartition(topic, partition)
trace(s"Waiting for leader to be elected or changed for partition $topicPartition, old leader is $oldLeaderOpt, " +
s"new leader is $newLeaderOpt")
var leader: Option[Int] = None
var electedOrChangedLeader: Option[Int] = None
while (electedOrChangedLeader.isEmpty && System.currentTimeMillis() < startTime + timeoutMs) {
// check if leader is elected
leader = zkClient.getLeaderForPartition(topicPartition)
leader match {
case Some(l) => (newLeaderOpt, oldLeaderOpt) match {
case (Some(newLeader), _) if newLeader == l =>
trace(s"Expected new leader $l is elected for partition $topicPartition")
electedOrChangedLeader = leader
case (_, Some(oldLeader)) if oldLeader != l =>
trace(s"Leader for partition $topicPartition is changed from $oldLeader to $l")
electedOrChangedLeader = leader
case (None, None) =>
trace(s"Leader $l is elected for partition $topicPartition")
electedOrChangedLeader = leader
case _ =>
trace(s"Current leader for partition $topicPartition is $l")
}
case None =>
trace(s"Leader for partition $topicPartition is not elected yet")
}
Thread.sleep(math.min(timeoutMs, 100L))
}
electedOrChangedLeader.getOrElse {
val errorMessage = (newLeaderOpt, oldLeaderOpt) match {
case (Some(newLeader), _) =>
s"Timing out after $timeoutMs ms since expected new leader $newLeader was not elected for partition $topicPartition, leader is $leader"
case (_, Some(oldLeader)) =>
s"Timing out after $timeoutMs ms since a new leader that is different from $oldLeader was not elected for partition $topicPartition, " +
s"leader is $leader"
case _ =>
s"Timing out after $timeoutMs ms since a leader was not elected for partition $topicPartition"
}
fail(errorMessage)
}
}
/**
* Execute the given block. If it throws an assert error, retry. Repeat
* until no error is thrown or the time limit elapses
*/
def retry(maxWaitMs: Long)(block: => Unit) {
var wait = 1L
val startTime = System.currentTimeMillis()
while(true) {
try {
block
return
} catch {
case e: AssertionError =>
val elapsed = System.currentTimeMillis - startTime
if (elapsed > maxWaitMs) {
throw e
} else {
info("Attempt failed, sleeping for " + wait + ", and then retrying.")
Thread.sleep(wait)
wait += math.min(wait, 1000)
}
}
}
}
/**
* Wait until the given condition is true or throw an exception if the given wait time elapses.
*/
def waitUntilTrue(condition: () => Boolean, msg: => String,
waitTime: Long = JTestUtils.DEFAULT_MAX_WAIT_MS, pause: Long = 100L): Unit = {
val startTime = System.currentTimeMillis()
while (true) {
if (condition())
return
if (System.currentTimeMillis() > startTime + waitTime)
fail(msg)
Thread.sleep(waitTime.min(pause))
}
// should never hit here
throw new RuntimeException("unexpected error")
}
/**
* Invoke `compute` until `predicate` is true or `waitTime` elapses.
*
* Return the last `compute` result and a boolean indicating whether `predicate` succeeded for that value.
*
* This method is useful in cases where `waitUntilTrue` makes it awkward to provide good error messages.
*/
def computeUntilTrue[T](compute: => T, waitTime: Long = JTestUtils.DEFAULT_MAX_WAIT_MS, pause: Long = 100L)(
predicate: T => Boolean): (T, Boolean) = {
val startTime = System.currentTimeMillis()
while (true) {
val result = compute
if (predicate(result))
return result -> true
if (System.currentTimeMillis() > startTime + waitTime)
return result -> false
Thread.sleep(waitTime.min(pause))
}
// should never hit here
throw new RuntimeException("unexpected error")
}
def isLeaderLocalOnBroker(topic: String, partitionId: Int, server: KafkaServer): Boolean = {
server.replicaManager.getPartition(new TopicPartition(topic, partitionId)).exists(_.leaderReplicaIfLocal.isDefined)
}
def createRequestByteBuffer(request: RequestOrResponse): ByteBuffer = {
val byteBuffer = ByteBuffer.allocate(request.sizeInBytes + 2)
byteBuffer.putShort(request.requestId.get)
request.writeTo(byteBuffer)
byteBuffer.rewind()
byteBuffer
}
/**
* Wait until all brokers know about each other.
*
* @param servers The Kafka broker servers.
* @param timeout The amount of time waiting on this condition before assert to fail
*/
def waitUntilBrokerMetadataIsPropagated(servers: Seq[KafkaServer],
timeout: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Unit = {
val expectedBrokerIds = servers.map(_.config.brokerId).toSet
TestUtils.waitUntilTrue(() => servers.forall(server =>
expectedBrokerIds == server.apis.metadataCache.getAliveBrokers.map(_.id).toSet
), "Timed out waiting for broker metadata to propagate to all servers", timeout)
}
/**
* Wait until a valid leader is propagated to the metadata cache in each broker.
* It assumes that the leader propagated to each broker is the same.
*
* @param servers The list of servers that the metadata should reach to
* @param topic The topic name
* @param partition The partition Id
* @param timeout The amount of time waiting on this condition before assert to fail
* @return The leader of the partition.
*/
def waitUntilMetadataIsPropagated(servers: Seq[KafkaServer], topic: String, partition: Int,
timeout: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Int = {
var leader: Int = -1
TestUtils.waitUntilTrue(() =>
servers.foldLeft(true) {
(result, server) =>
val partitionStateOpt = server.apis.metadataCache.getPartitionInfo(topic, partition)
partitionStateOpt match {
case None => false
case Some(partitionState) =>
leader = partitionState.basePartitionState.leader
result && Request.isValidBrokerId(leader)
}
},
"Partition [%s,%d] metadata not propagated after %d ms".format(topic, partition, timeout),
waitTime = timeout)
leader
}
def waitUntilControllerElected(zkClient: KafkaZkClient, timeout: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Int = {
val (controllerId, _) = TestUtils.computeUntilTrue(zkClient.getControllerId, waitTime = timeout)(_.isDefined)
controllerId.getOrElse(fail(s"Controller not elected after $timeout ms"))
}
def waitUntilLeaderIsKnown(servers: Seq[KafkaServer], topic: String, partition: Int,
timeout: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Unit = {
val tp = new TopicPartition(topic, partition)
TestUtils.waitUntilTrue(() =>
servers.exists { server =>
server.replicaManager.getPartition(tp).exists(_.leaderReplicaIfLocal.isDefined)
}, s"Partition $tp leaders not made yet after $timeout ms", waitTime = timeout
)
}
def writeNonsenseToFile(fileName: File, position: Long, size: Int) {
val file = new RandomAccessFile(fileName, "rw")
file.seek(position)
for (_ <- 0 until size)
file.writeByte(random.nextInt(255))
file.close()
}
def appendNonsenseToFile(fileName: File, size: Int) {
val file = new FileOutputStream(fileName, true)
for (_ <- 0 until size)
file.write(random.nextInt(255))
file.close()
}
def checkForPhantomInSyncReplicas(zkUtils: ZkUtils, topic: String, partitionToBeReassigned: Int, assignedReplicas: Seq[Int]) {
val inSyncReplicas = zkUtils.getInSyncReplicasForPartition(topic, partitionToBeReassigned)
// in sync replicas should not have any replica that is not in the new assigned replicas
val phantomInSyncReplicas = inSyncReplicas.toSet -- assignedReplicas.toSet
assertTrue("All in sync replicas %s must be in the assigned replica list %s".format(inSyncReplicas, assignedReplicas),
phantomInSyncReplicas.isEmpty)
}
def ensureNoUnderReplicatedPartitions(zkUtils: ZkUtils, topic: String, partitionToBeReassigned: Int, assignedReplicas: Seq[Int],
servers: Seq[KafkaServer]) {
TestUtils.waitUntilTrue(() => {
val inSyncReplicas = zkUtils.getInSyncReplicasForPartition(topic, partitionToBeReassigned)
inSyncReplicas.size == assignedReplicas.size
},
"Reassigned partition [%s,%d] is under replicated".format(topic, partitionToBeReassigned))
var leader: Option[Int] = None
TestUtils.waitUntilTrue(() => {
leader = zkUtils.getLeaderForPartition(topic, partitionToBeReassigned)
leader.isDefined
},
"Reassigned partition [%s,%d] is unavailable".format(topic, partitionToBeReassigned))
TestUtils.waitUntilTrue(() => {
val leaderBroker = servers.filter(s => s.config.brokerId == leader.get).head
leaderBroker.replicaManager.underReplicatedPartitionCount == 0
},
"Reassigned partition [%s,%d] is under-replicated as reported by the leader %d".format(topic, partitionToBeReassigned, leader.get))
}
def verifyNonDaemonThreadsStatus(threadNamePrefix: String) {
val threadCount = Thread.getAllStackTraces.keySet.asScala.count { t =>
!t.isDaemon && t.isAlive && t.getName.startsWith(threadNamePrefix)
}
assertEquals(0, threadCount)
}
/**
* Create new LogManager instance with default configuration for testing
*/
def createLogManager(logDirs: Seq[File] = Seq.empty[File],
defaultConfig: LogConfig = LogConfig(),
cleanerConfig: CleanerConfig = CleanerConfig(enableCleaner = false),
time: MockTime = new MockTime()): LogManager = {
new LogManager(logDirs = logDirs,
initialOfflineDirs = Array.empty[File],
topicConfigs = Map(),
initialDefaultConfig = defaultConfig,
cleanerConfig = cleanerConfig,
recoveryThreadsPerDataDir = 4,
flushCheckMs = 1000L,
flushRecoveryOffsetCheckpointMs = 10000L,
flushStartOffsetCheckpointMs = 10000L,
retentionCheckMs = 1000L,
maxPidExpirationMs = 60 * 60 * 1000,
scheduler = time.scheduler,
time = time,
brokerState = BrokerState(),
brokerTopicStats = new BrokerTopicStats,
logDirFailureChannel = new LogDirFailureChannel(logDirs.size))
}
@deprecated("This method has been deprecated and it will be removed in a future release.", "0.10.0.0")
def sendMessages(servers: Seq[KafkaServer],
topic: String,
numMessages: Int,
partition: Int = -1,
compression: CompressionCodec = NoCompressionCodec): List[String] = {
val header = "test-%d".format(partition)
val props = new Properties()
props.put("compression.codec", compression.codec.toString)
val ms = 0.until(numMessages).map(x => header + "-" + x)
// Specific Partition
if (partition >= 0) {
val producer: Producer[Int, String] =
createProducer(TestUtils.getBrokerListStrFromServers(servers),
encoder = classOf[StringEncoder].getName,
keyEncoder = classOf[IntEncoder].getName,
partitioner = classOf[FixedValuePartitioner].getName,
producerProps = props)
producer.send(ms.map(m => new KeyedMessage[Int, String](topic, partition, m)): _*)
debug("Sent %d messages for partition [%s,%d]".format(ms.size, topic, partition))
producer.close()
ms.toList
} else {
// Use topic as the key to determine partition
val producer: Producer[String, String] = createProducer(
TestUtils.getBrokerListStrFromServers(servers),
encoder = classOf[StringEncoder].getName,
keyEncoder = classOf[StringEncoder].getName,
partitioner = classOf[DefaultPartitioner].getName,
producerProps = props)
producer.send(ms.map(m => new KeyedMessage[String, String](topic, topic, m)): _*)
producer.close()
debug("Sent %d messages for topic [%s]".format(ms.size, topic))
ms.toList
}
}
def produceMessages(servers: Seq[KafkaServer],
topic: String,
numMessages: Int,
acks: Int = -1,
valueBytes: Int = -1): Seq[Array[Byte]] = {
val producer = createNewProducer(
TestUtils.getBrokerListStrFromServers(servers),
retries = 5,
requestTimeoutMs = 2000,
acks = acks
)
val values = (0 until numMessages).map(x => valueBytes match {
case -1 => s"test-$x".getBytes
case _ => new Array[Byte](valueBytes)
})
val futures = values.map { value =>
producer.send(new ProducerRecord(topic, value))
}
futures.foreach(_.get)
producer.close()
debug(s"Sent ${values.size} messages for topic [$topic]")
values
}
def produceMessage(servers: Seq[KafkaServer], topic: String, message: String) {
val producer = createNewProducer(
TestUtils.getBrokerListStrFromServers(servers),
retries = 5,
requestTimeoutMs = 2000
)
producer.send(new ProducerRecord(topic, topic.getBytes, message.getBytes)).get
producer.close()
}
/**
* Consume all messages (or a specific number of messages)
*
* @param topicMessageStreams the Topic Message Streams
* @param nMessagesPerThread an optional field to specify the exact number of messages to be returned.
* ConsumerTimeoutException will be thrown if there are no messages to be consumed.
* If not specified, then all available messages will be consumed, and no exception is thrown.
* @return the list of messages consumed.
*/
@deprecated("This method has been deprecated and will be removed in a future release.", "0.11.0.0")
def getMessages(topicMessageStreams: Map[String, List[KafkaStream[String, String]]],
nMessagesPerThread: Int = -1): List[String] = {
var messages: List[String] = Nil
val shouldGetAllMessages = nMessagesPerThread < 0
for (messageStreams <- topicMessageStreams.values) {
for (messageStream <- messageStreams) {
val iterator = messageStream.iterator()
try {
var i = 0
while ((shouldGetAllMessages && iterator.hasNext()) || (i < nMessagesPerThread)) {
assertTrue(iterator.hasNext)
val message = iterator.next().message // will throw a timeout exception if the message isn't there
messages ::= message
debug("received message: " + message)
i += 1
}
} catch {
case e: ConsumerTimeoutException =>
if (shouldGetAllMessages) {
// swallow the exception
debug("consumer timed out after receiving " + messages.length + " message(s).")
} else {
throw e
}
}
}
}
messages.reverse
}
def verifyTopicDeletion(zkClient: KafkaZkClient, topic: String, numPartitions: Int, servers: Seq[KafkaServer]) {
val topicPartitions = (0 until numPartitions).map(new TopicPartition(topic, _))
// wait until admin path for delete topic is deleted, signaling completion of topic deletion
TestUtils.waitUntilTrue(() => !zkClient.isTopicMarkedForDeletion(topic),
"Admin path /admin/delete_topic/%s path not deleted even after a replica is restarted".format(topic))
TestUtils.waitUntilTrue(() => !zkClient.topicExists(topic),
"Topic path /brokers/topics/%s not deleted after /admin/delete_topic/%s path is deleted".format(topic, topic))
// ensure that the topic-partition has been deleted from all brokers' replica managers
TestUtils.waitUntilTrue(() =>
servers.forall(server => topicPartitions.forall(tp => server.replicaManager.getPartition(tp).isEmpty)),
"Replica manager's should have deleted all of this topic's partitions")
// ensure that logs from all replicas are deleted if delete topic is marked successful in ZooKeeper
assertTrue("Replica logs not deleted after delete topic is complete",
servers.forall(server => topicPartitions.forall(tp => server.getLogManager.getLog(tp).isEmpty)))
// ensure that topic is removed from all cleaner offsets
TestUtils.waitUntilTrue(() => servers.forall(server => topicPartitions.forall { tp =>
val checkpoints = server.getLogManager.liveLogDirs.map { logDir =>
new OffsetCheckpointFile(new File(logDir, "cleaner-offset-checkpoint")).read()
}
checkpoints.forall(checkpointsPerLogDir => !checkpointsPerLogDir.contains(tp))
}), "Cleaner offset for deleted partition should have been removed")
import scala.collection.JavaConverters._
TestUtils.waitUntilTrue(() => servers.forall(server =>
server.config.logDirs.forall { logDir =>
topicPartitions.forall { tp =>
!new File(logDir, tp.topic + "-" + tp.partition).exists()
}
}
), "Failed to soft-delete the data to a delete directory")
TestUtils.waitUntilTrue(() => servers.forall(server =>
server.config.logDirs.forall { logDir =>
topicPartitions.forall { tp =>
!java.util.Arrays.asList(new File(logDir).list()).asScala.exists { partitionDirectoryName =>
partitionDirectoryName.startsWith(tp.topic + "-" + tp.partition) &&
partitionDirectoryName.endsWith(Log.DeleteDirSuffix)
}
}
}
), "Failed to hard-delete the delete directory")
}
/**
* Translate the given buffer into a string
*
* @param buffer The buffer to translate
* @param encoding The encoding to use in translating bytes to characters
*/
def readString(buffer: ByteBuffer, encoding: String = Charset.defaultCharset.toString): String = {
val bytes = new Array[Byte](buffer.remaining)
buffer.get(bytes)
new String(bytes, encoding)
}
def copyOf(props: Properties): Properties = {
val copy = new Properties()
copy ++= props
copy
}
def sslConfigs(mode: Mode, clientCert: Boolean, trustStoreFile: Option[File], certAlias: String,
certCn: String = SslCertificateCn): Properties = {
val trustStore = trustStoreFile.getOrElse {
throw new Exception("SSL enabled but no trustStoreFile provided")
}
val sslConfigs = TestSslUtils.createSslConfig(clientCert, true, mode, trustStore, certAlias, certCn)
val sslProps = new Properties()
sslConfigs.asScala.foreach { case (k, v) => sslProps.put(k, v) }
sslProps
}
// a X509TrustManager to trust self-signed certs for unit tests.
def trustAllCerts: X509TrustManager = {
val trustManager = new X509TrustManager() {
override def getAcceptedIssuers: Array[X509Certificate] = {
null
}
override def checkClientTrusted(certs: Array[X509Certificate], authType: String) {
}
override def checkServerTrusted(certs: Array[X509Certificate], authType: String) {
}
}
trustManager
}
def waitAndVerifyAcls(expected: Set[Acl], authorizer: Authorizer, resource: Resource) = {
TestUtils.waitUntilTrue(() => authorizer.getAcls(resource) == expected,
s"expected acls $expected but got ${authorizer.getAcls(resource)}", waitTime = JTestUtils.DEFAULT_MAX_WAIT_MS)
}
/**
* Verifies that this ACL is the secure one.
*/
def isAclSecure(acl: ACL, sensitive: Boolean): Boolean = {
debug(s"ACL $acl")
acl.getPerms match {
case Perms.READ => !sensitive && acl.getId.getScheme == "world"
case Perms.ALL => acl.getId.getScheme == "sasl"
case _ => false
}
}
/**
* Verifies that the ACL corresponds to the unsecure one that
* provides ALL access to everyone (world).
*/
def isAclUnsecure(acl: ACL): Boolean = {
debug(s"ACL $acl")
acl.getPerms match {
case Perms.ALL => acl.getId.getScheme == "world"
case _ => false
}
}
private def secureZkPaths(zkUtils: ZkUtils): Seq[String] = {
def subPaths(path: String): Seq[String] = {
if (zkUtils.pathExists(path))
path +: zkUtils.getChildren(path).map(c => path + "/" + c).flatMap(subPaths)
else
Seq.empty
}
val topLevelPaths = ZkUtils.SecureZkRootPaths ++ ZkUtils.SensitiveZkRootPaths
topLevelPaths.flatMap(subPaths)
}
/**
* Verifies that all secure paths in ZK are created with the expected ACL.
*/
def verifySecureZkAcls(zkUtils: ZkUtils, usersWithAccess: Int) {
secureZkPaths(zkUtils).foreach(path => {
if (zkUtils.pathExists(path)) {
val sensitive = ZkUtils.sensitivePath(path)
// usersWithAccess have ALL access to path. For paths that are
// not sensitive, world has READ access.
val aclCount = if (sensitive) usersWithAccess else usersWithAccess + 1
val acls = zkUtils.zkConnection.getAcl(path).getKey
assertEquals(s"Invalid ACLs for $path $acls", aclCount, acls.size)
acls.asScala.foreach(acl => isAclSecure(acl, sensitive))
}
})
}
/**
* Verifies that secure paths in ZK have no access control. This is
* the case when zookeeper.set.acl=false and no ACLs have been configured.
*/
def verifyUnsecureZkAcls(zkUtils: ZkUtils) {
secureZkPaths(zkUtils).foreach(path => {
if (zkUtils.pathExists(path)) {
val acls = zkUtils.zkConnection.getAcl(path).getKey
assertEquals(s"Invalid ACLs for $path $acls", 1, acls.size)
acls.asScala.foreach(isAclUnsecure)
}
})
}
/**
* To use this you pass in a sequence of functions that are your arrange/act/assert test on the SUT.
* They all run at the same time in the assertConcurrent method; the chances of triggering a multithreading code error,
* and thereby failing some assertion are greatly increased.
*/
def assertConcurrent(message: String, functions: Seq[() => Any], timeoutMs: Int) {
def failWithTimeout() {
fail(s"$message. Timed out, the concurrent functions took more than $timeoutMs milliseconds")
}
val numThreads = functions.size
val threadPool = Executors.newFixedThreadPool(numThreads)
val exceptions = ArrayBuffer[Throwable]()
try {
val runnables = functions.map { function =>
new Callable[Unit] {
override def call(): Unit = function()
}
}.asJava
val futures = threadPool.invokeAll(runnables, timeoutMs, TimeUnit.MILLISECONDS).asScala
futures.foreach { future =>
if (future.isCancelled)
failWithTimeout()
else
try future.get()
catch { case e: Exception =>
exceptions += e
}
}
} catch {
case _: InterruptedException => failWithTimeout()
case e: Throwable => exceptions += e
} finally {
threadPool.shutdownNow()
}
assertTrue(s"$message failed with exception(s) $exceptions", exceptions.isEmpty)
}
def consumeTopicRecords[K, V](servers: Seq[KafkaServer],
topic: String,
numMessages: Int,
securityProtocol: SecurityProtocol = SecurityProtocol.PLAINTEXT,
trustStoreFile: Option[File] = None,
waitTime: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Seq[ConsumerRecord[Array[Byte], Array[Byte]]] = {
val consumer = createNewConsumer(TestUtils.getBrokerListStrFromServers(servers, securityProtocol),
securityProtocol = securityProtocol, trustStoreFile = trustStoreFile)
try {
consumer.subscribe(Collections.singleton(topic))
consumeRecords(consumer, numMessages, waitTime)
} finally consumer.close()
}
def consumeRecords[K, V](consumer: KafkaConsumer[K, V], numMessages: Int,
waitTime: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Seq[ConsumerRecord[K, V]] = {
val records = new ArrayBuffer[ConsumerRecord[K, V]]()
waitUntilTrue(() => {
records ++= consumer.poll(50).asScala
records.size >= numMessages
}, s"Consumed ${records.size} records until timeout instead of the expected $numMessages records", waitTime)
assertEquals("Consumed more records than expected", numMessages, records.size)
records
}
/**
* Will consume all the records for the given consumer for the specified duration. If you want to drain all the
* remaining messages in the partitions the consumer is subscribed to, the duration should be set high enough so
* that the consumer has enough time to poll everything. This would be based on the number of expected messages left
* in the topic, and should not be too large (ie. more than a second) in our tests.
*
* @return All the records consumed by the consumer within the specified duration.
*/
def consumeRecordsFor[K, V](consumer: KafkaConsumer[K, V], duration: Long): Seq[ConsumerRecord[K, V]] = {
val startTime = System.currentTimeMillis()
val records = new ArrayBuffer[ConsumerRecord[K, V]]()
waitUntilTrue(() => {
records ++= consumer.poll(50).asScala
System.currentTimeMillis() - startTime > duration
}, s"The timeout $duration was greater than the maximum wait time.")
records
}
def createTransactionalProducer(transactionalId: String, servers: Seq[KafkaServer], batchSize: Int = 16384,
transactionTimeoutMs: Long = 60000) = {
val props = new Properties()
props.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, transactionalId)
props.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "5")
props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true")
props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize.toString)
props.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, transactionTimeoutMs.toString)
TestUtils.createNewProducer(TestUtils.getBrokerListStrFromServers(servers), retries = Integer.MAX_VALUE, acks = -1, props = Some(props))
}
// Seeds the given topic with records with keys and values in the range [0..numRecords)
def seedTopicWithNumberedRecords(topic: String, numRecords: Int, servers: Seq[KafkaServer]): Unit = {
val props = new Properties()
props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true")
val producer = TestUtils.createNewProducer(TestUtils.getBrokerListStrFromServers(servers),
retries = Integer.MAX_VALUE, acks = -1, props = Some(props))
try {
for (i <- 0 until numRecords) {
producer.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, asBytes(i.toString), asBytes(i.toString)))
}
producer.flush()
} finally {
producer.close()
}
}
private def asString(bytes: Array[Byte]) = new String(bytes, StandardCharsets.UTF_8)
private def asBytes(string: String) = string.getBytes(StandardCharsets.UTF_8)
// Verifies that the record was intended to be committed by checking the headers for an expected transaction status
// If true, this will return the value as a string. It is expected that the record in question should have been created
// by the `producerRecordWithExpectedTransactionStatus` method.
def assertCommittedAndGetValue(record: ConsumerRecord[Array[Byte], Array[Byte]]) : String = {
record.headers.headers(transactionStatusKey).asScala.headOption match {
case Some(header) =>
assertEquals(s"Got ${asString(header.value)} but expected the value to indicate " +
s"committed status.", asString(committedValue), asString(header.value))
case None =>
fail("expected the record header to include an expected transaction status, but received nothing.")
}
recordValueAsString(record)
}
def recordValueAsString(record: ConsumerRecord[Array[Byte], Array[Byte]]) : String = {
asString(record.value)
}
def producerRecordWithExpectedTransactionStatus(topic: String, key: Array[Byte], value: Array[Byte],
willBeCommitted: Boolean) : ProducerRecord[Array[Byte], Array[Byte]] = {
val header = new Header {override def key() = transactionStatusKey
override def value() = if (willBeCommitted)
committedValue
else
abortedValue
}
new ProducerRecord[Array[Byte], Array[Byte]](topic, null, key, value, Collections.singleton(header))
}
def producerRecordWithExpectedTransactionStatus(topic: String, key: String, value: String,
willBeCommitted: Boolean) : ProducerRecord[Array[Byte], Array[Byte]] = {
producerRecordWithExpectedTransactionStatus(topic, asBytes(key), asBytes(value), willBeCommitted)
}
// Collect the current positions for all partition in the consumers current assignment.
def consumerPositions(consumer: KafkaConsumer[Array[Byte], Array[Byte]]) : Map[TopicPartition, OffsetAndMetadata] = {
val offsetsToCommit = new mutable.HashMap[TopicPartition, OffsetAndMetadata]()
consumer.assignment.asScala.foreach { topicPartition =>
offsetsToCommit.put(topicPartition, new OffsetAndMetadata(consumer.position(topicPartition)))
}
offsetsToCommit.toMap
}
def pollUntilAtLeastNumRecords(consumer: KafkaConsumer[Array[Byte], Array[Byte]], numRecords: Int): Seq[ConsumerRecord[Array[Byte], Array[Byte]]] = {
val records = new ArrayBuffer[ConsumerRecord[Array[Byte], Array[Byte]]]()
TestUtils.waitUntilTrue(() => {
records ++= consumer.poll(50).asScala
records.size >= numRecords
}, s"Consumed ${records.size} records until timeout, but expected $numRecords records.")
records
}
def resetToCommittedPositions(consumer: KafkaConsumer[Array[Byte], Array[Byte]]) = {
consumer.assignment.asScala.foreach { case(topicPartition) =>
val offset = consumer.committed(topicPartition)
if (offset != null)
consumer.seek(topicPartition, offset.offset)
else
consumer.seekToBeginning(Collections.singletonList(topicPartition))
}
}
/**
* Capture the console output during the execution of the provided function.
*/
def grabConsoleOutput(f: => Unit) : String = {
val out = new ByteArrayOutputStream
try scala.Console.withOut(out)(f)
finally scala.Console.out.flush()
out.toString
}
/**
* Capture the console error during the execution of the provided function.
*/
def grabConsoleError(f: => Unit) : String = {
val err = new ByteArrayOutputStream
try scala.Console.withErr(err)(f)
finally scala.Console.err.flush()
err.toString
}
/**
* Capture both the console output and console error during the execution of the provided function.
*/
def grabConsoleOutputAndError(f: => Unit) : (String, String) = {
val out = new ByteArrayOutputStream
val err = new ByteArrayOutputStream
try scala.Console.withOut(out)(scala.Console.withErr(err)(f))
finally {
scala.Console.out.flush()
scala.Console.err.flush()
}
(out.toString, err.toString)
}
}
class IntEncoder(props: VerifiableProperties = null) extends Encoder[Int] {
override def toBytes(n: Int) = n.toString.getBytes
}
@deprecated("This class is deprecated and it will be removed in a future release.", "0.10.0.0")
class StaticPartitioner(props: VerifiableProperties = null) extends Partitioner {
def partition(data: Any, numPartitions: Int): Int = {
data.asInstanceOf[String].length % numPartitions
}
}
@deprecated("This class has been deprecated and it will be removed in a future release.", "0.10.0.0")
class FixedValuePartitioner(props: VerifiableProperties = null) extends Partitioner {
def partition(data: Any, numPartitions: Int): Int = data.asInstanceOf[Int]
}
|
MyPureCloud/kafka
|
core/src/test/scala/unit/kafka/utils/TestUtils.scala
|
Scala
|
apache-2.0
| 66,152
|
package katas.scala.fizzbuzz
import org.scalatest.Matchers
import org.junit.Test
class FizzBuzz extends Matchers {
@Test def given_number_one_should_produce_one() {
fizzBuzz(1) should equal("1")
}
@Test def given_number_two_should_produce_two() {
fizzBuzz(2) should equal("2")
}
@Test def given_number_three_should_produce_fizz() {
fizzBuzz(3) should equal("Fizz")
}
@Test def given_number_four_should_produce_four() {
fizzBuzz(4) should equal("4")
}
@Test def given_number_five_should_produce_buzz() {
fizzBuzz(5) should equal("Buzz")
}
@Test def given_number_seven_should_produce_woof() {
fizzBuzzWoof(7) should equal("Woof")
}
@Test def given_number_fifteen_should_produce_fizzBuzz() {
fizzBuzz(15) should equal("FizzBuzz")
}
@Test def given_number_twenty_one_should_produce_fizzWoof() {
fizzBuzzWoof(21) should equal("FizzWoof")
}
@Test def given_number_thirty_five_should_produce_buzzWoof() {
fizzBuzzWoof(35) should equal("BuzzWoof")
}
@Test def given_number_hundred_five_should_produce_fizzBuzzWoof() {
fizzBuzzWoof(105) should equal("FizzBuzzWoof")
}
@Test def output_from_one_to_one_hundred() {
Range(1, 101).map(fizzBuzz).toList.toString should equal("List(1, 2, Fizz, 4, Buzz, Fizz, 7, 8, Fizz, Buzz, 11, Fizz, 13, 14, FizzBuzz, 16, 17, Fizz, 19, Buzz, Fizz, 22, 23, Fizz, Buzz, 26, Fizz, 28, 29, FizzBuzz, 31, 32, Fizz, 34, Buzz, Fizz, 37, 38, Fizz, Buzz, 41, Fizz, 43, 44, FizzBuzz, 46, 47, Fizz, 49, Buzz, Fizz, 52, 53, Fizz, Buzz, 56, Fizz, 58, 59, FizzBuzz, 61, 62, Fizz, 64, Buzz, Fizz, 67, 68, Fizz, Buzz, 71, Fizz, 73, 74, FizzBuzz, 76, 77, Fizz, 79, Buzz, Fizz, 82, 83, Fizz, Buzz, 86, Fizz, 88, 89, FizzBuzz, 91, 92, Fizz, 94, Buzz, Fizz, 97, 98, Fizz, Buzz)")
}
abstract class Multiple(n: Int, val name: String) {
def matches(n: Int): Boolean = n % this.n == 0
}
case class Fizz() extends Multiple(3, "Fizz")
case class Buzz() extends Multiple(5, "Buzz")
case class Woof() extends Multiple(7, "Woof")
def fizzBuzzWoof(n: Int, multiples: Seq[Multiple] = Seq(Fizz(), Buzz(), Woof())): String = {
multiples.filter{_.matches(n)} match {
case Seq() => n.toString
case xs => xs.map{_.name}.mkString("")
}
}
val FIZZ_MULTIPLE = 3
val BUZZ_MULTIPLE = 5
val WOOF_MULTIPLE = 7
def fizzBuzz(input: Int): String = {
if (input % FIZZ_MULTIPLE == 0 && input % BUZZ_MULTIPLE == 0) "FizzBuzz"
else if (input % FIZZ_MULTIPLE == 0) "Fizz"
else if (input % BUZZ_MULTIPLE == 0) "Buzz"
else input.toString
}
}
|
dkandalov/katas
|
scala/src/katas/scala/fizzbuzz/FizzBuzz.scala
|
Scala
|
unlicense
| 2,505
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.entity.test
import org.apache.openwhisk.core.database.DocumentFactory
import spray.json._
import org.apache.openwhisk.core.entity._
/**
* Contains types which represent former versions of database schemas
* to be able to test migration path
*/
/**
* Old schema of rules, containing the rules' status in the rule record
* itself
*/
case class OldWhiskRule(namespace: EntityPath,
override val name: EntityName,
trigger: EntityName,
action: EntityName,
status: Status,
version: SemVer = SemVer(),
publish: Boolean = false,
annotations: Parameters = Parameters())
extends WhiskEntity(name, "rule") {
def toJson = OldWhiskRule.serdes.write(this).asJsObject
def toWhiskRule = {
WhiskRule(
namespace,
name,
FullyQualifiedEntityName(namespace, trigger),
FullyQualifiedEntityName(namespace, action),
version,
publish,
annotations)
}
}
object OldWhiskRule
extends DocumentFactory[OldWhiskRule]
with WhiskEntityQueries[OldWhiskRule]
with DefaultJsonProtocol {
override val collectionName = "rules"
override implicit val serdes = jsonFormat8(OldWhiskRule.apply)
}
/**
* Old schema of triggers, not containing a map of ReducedRules
*/
case class OldWhiskTrigger(namespace: EntityPath,
override val name: EntityName,
parameters: Parameters = Parameters(),
limits: TriggerLimits = TriggerLimits(),
version: SemVer = SemVer(),
publish: Boolean = false,
annotations: Parameters = Parameters())
extends WhiskEntity(name, "trigger") {
def toJson = OldWhiskTrigger.serdes.write(this).asJsObject
def toWhiskTrigger = WhiskTrigger(namespace, name, parameters, limits, version, publish, annotations)
}
object OldWhiskTrigger
extends DocumentFactory[OldWhiskTrigger]
with WhiskEntityQueries[OldWhiskTrigger]
with DefaultJsonProtocol {
override val collectionName = "triggers"
override implicit val serdes = jsonFormat7(OldWhiskTrigger.apply)
}
|
starpit/openwhisk
|
tests/src/test/scala/org/apache/openwhisk/core/entity/test/MigrationEntities.scala
|
Scala
|
apache-2.0
| 3,102
|
package bc
import factory.VirtualMachineFactory
import org.scalatest.FunSuite
import bc.byteCodes.IConst
class PublicByteCodeParserSuite extends FunSuite with ByteCodeValues {
val bcp: ByteCodeParser = VirtualMachineFactory.byteCodeParser
test("[5] byte code parser should parse a single bytecode") {
val code = Vector(bytecode("iadd"))
val bc = bcp.parse(code)
assert(bc.length == 1, "did not parse one bytecode")
assert(bc(0).code == bytecode("iadd"), "did not have the correct code")
}
test("[6] byte code parser should parse a sequence of bytecode") {
val code = Vector(bytecode("iconst"), 4.toByte, bytecode("iconst"), 5.toByte, bytecode("iadd"))
val bc = bcp.parse(code)
assert(bc.length == 3, "did not parse four bytecodes")
assert(bc(0).code == bytecode("iconst"))
assert(bc(1).code == bytecode("iconst"))
assert(bc(2).code == bytecode("iadd"))
assert(bc(0).asInstanceOf[IConst].num == 4)
assert(bc(1).asInstanceOf[IConst].num == 5)
}
test("[7] byte code parser should throw if no args are provided to IConst") {
val code1 = Vector(bytecode("iconst"))
val code2 = Vector(bytecode("iconst"), bytecode("iadd"))
intercept[InvalidBytecodeException] {
bcp.parse(code1)
}
}
test("[8] byte code parser should return an empty Vector if no instructions are passed") {
assert(bcp.parse(Vector.empty) == Vector.empty)
}
}
|
BBK-PiJ-2015-67/sdp-portfolio
|
coursework/cw-two/src/test/scala/bc/PublicByteCodeParserSuite.scala
|
Scala
|
unlicense
| 1,417
|
package io.getquill.context.jdbc.h2
import io.getquill.context.sql.EncodingSpec
class JdbcEncodingSpec extends EncodingSpec {
val context = testContext
import testContext._
"encodes and decodes types" in {
testContext.run(delete)
testContext.run(liftQuery(insertValues).foreach(p => insert(p)))
verify(testContext.run(query[EncodingTestEntity]))
}
}
|
getquill/quill
|
quill-jdbc/src/test/scala/io/getquill/context/jdbc/h2/JdbcEncodingSpec.scala
|
Scala
|
apache-2.0
| 374
|
package org.knora.webapi.messages.v1.store.triplestoremessages
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import org.knora.webapi.util.ErrorHandlingMap
import org.knora.webapi.{IRI, InconsistentTriplestoreDataException, TriplestoreResponseException}
import spray.json.{DefaultJsonProtocol, NullOptions, RootJsonFormat}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Messages
sealed trait TriplestoreRequest
/**
* Simple message for initial actor functionality.
*/
case class HelloTriplestore(txt: String) extends TriplestoreRequest
/**
* Simple message for checking the connection to the triplestore.
*/
case object CheckConnection extends TriplestoreRequest
/**
* Represents a SPARQL SELECT query to be sent to the triplestore. A successful response will be a [[SparqlSelectResponse]].
*
* @param sparql the SPARQL string.
*/
case class SparqlSelectRequest(sparql: String) extends TriplestoreRequest
/**
* Represents a response to a SPARQL SELECT query, containing a parsed representation of the response (JSON, etc.)
* returned by the triplestore
*
* @param head the header of the response, containing the variable names.
* @param results the body of the response, containing rows of query results.
*/
case class SparqlSelectResponse(head: SparqlSelectResponseHeader, results: SparqlSelectResponseBody) {
/**
* Returns the contents of the first row of results.
*
* @return a [[Map]] representing the contents of the first row of results.
*/
@throws[InconsistentTriplestoreDataException]("if the query returned no results.")
def getFirstRow: VariableResultsRow = {
if (results.bindings.isEmpty) {
throw TriplestoreResponseException(s"A SPARQL query unexpectedly returned an empty result")
}
results.bindings.head
}
}
/**
* Represents the header of a JSON response to a SPARQL SELECT query.
*
* @param vars the names of the variables that were used in the SPARQL SELECT statement.
*/
case class SparqlSelectResponseHeader(vars: Seq[String])
/**
* Represents the body of a JSON response to a SPARQL SELECT query.
*
* @param bindings the bindings of values to the variables used in the SPARQL SELECT statement.
* Empty rows are not allowed.
*/
case class SparqlSelectResponseBody(bindings: Seq[VariableResultsRow]) {
require(bindings.forall(_.rowMap.nonEmpty), "Empty rows are not allowed in a SparqlSelectResponseBody")
}
/**
* Represents a row of results in a JSON response to a SPARQL SELECT query.
*
* @param rowMap a map of variable names to values in the row. An empty string is not allowed as a variable
* name or value.
*/
case class VariableResultsRow(rowMap: ErrorHandlingMap[String, String]) {
require(rowMap.forall {
case (key, value) => key.nonEmpty && value.nonEmpty
}, "An empty string is not allowed as a variable name or value in a VariableResultsRow")
}
/**
* Represents a SPARQL CONSTRUCT query to be sent to the triplestore. A successful response will be a
* [[SparqlConstructResponse]].
*
* @param sparql the SPARQL string.
*/
case class SparqlConstructRequest(sparql: String) extends TriplestoreRequest
/**
* A response to a [[SparqlConstructRequest]].
*
* @param statements a map of subject IRIs to statements about each subject.
*/
case class SparqlConstructResponse(statements: Map[IRI, Seq[(IRI, String)]])
/**
* Represents a SPARQL Update operation to be performed.
*
* @param sparql the SPARQL string.
*/
case class SparqlUpdateRequest(sparql: String) extends TriplestoreRequest
/**
* Indicates that the requested SPARQL Update was executed and returned no errors..
*/
case class SparqlUpdateResponse()
/**
* Represents a SPARQL ASK query to be sent to the triplestore. A successful response will be a
* [[SparqlAskResponse]].
*
* @param sparql the SPARQL string.
*/
case class SparqlAskRequest(sparql: String) extends TriplestoreRequest
/**
* Represents a response to a SPARQL ASK query, containing the result.
*
* @param result of the query.
*/
case class SparqlAskResponse(result: Boolean)
/**
* Message for resetting the contents of the triplestore and loading a fresh set of data. The data needs to be
* stored in an accessible path and supplied via the [[RdfDataObject]].
*
* @param rdfDataObjects contains a list of [[RdfDataObject]].
*/
case class ResetTriplestoreContent(rdfDataObjects: Seq[RdfDataObject]) extends TriplestoreRequest
/**
* Sent as a response to [[ResetTriplestoreContent]] if the request was processed successfully.
*/
case class ResetTriplestoreContentACK()
/**
* Message for removing all content from the triple store.
*/
case class DropAllTriplestoreContent() extends TriplestoreRequest
/**
* Sent as a response to [[DropAllTriplestoreContent]] if the request was processed successfully.
*/
case class DropAllTriplestoreContentACK()
/**
* Inserts data into the triplestore.
*
* @param rdfDataObjects contains a list of [[RdfDataObject]].
*/
case class InsertTriplestoreContent(rdfDataObjects: Seq[RdfDataObject]) extends TriplestoreRequest
/**
* Sent as a response to [[InsertTriplestoreContent]] if the request was processed successfully.
*/
case class InsertTriplestoreContentACK()
/**
* Initialize the triplestore. This will initiate the (re)creation of the repository and adding data to it.
*
* @param rdfDataObject contains a list of [[RdfDataObject]].
*/
case class InitTriplestore(rdfDataObject: RdfDataObject) extends TriplestoreRequest
/**
* Initialization ((re)creation of repository and loading of data) is finished successfully.
*/
case class InitTriplestoreACK()
/**
* Ask triplestore if it has finished initialization
*/
case class Initialized() extends TriplestoreRequest
/**
* Response indicating whether the triplestore has finished initialization and is ready for processing messages
*
* @param initFinished indicates if actor initialization has finished
*/
case class InitializedResponse(initFinished: Boolean)
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Components of messages
/**
* Contains the path to the 'ttl' file and the name of the named graph it should be loaded in.
*
* @param path to the 'ttl' file
* @param name of the named graph the data will be load into.
*/
case class RdfDataObject(path: String, name: String)
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// JSON formatting
/**
* A spray-json protocol for generating Knora API v1 JSON providing data about resources and their properties.
*/
trait TriplestoreJsonProtocol extends SprayJsonSupport with DefaultJsonProtocol with NullOptions {
implicit val rdfDataObjectFormat: RootJsonFormat[RdfDataObject] = jsonFormat2(RdfDataObject)
implicit val resetTriplestoreContentFormat: RootJsonFormat[ResetTriplestoreContent] = jsonFormat1(ResetTriplestoreContent)
}
|
nie-ine/Knora
|
webapi/src/main/scala/org/knora/webapi/messages/v1/store/triplestoremessages/TriplestoreMessages.scala
|
Scala
|
agpl-3.0
| 7,197
|
package io.reactivecqrs.core.eventbus
import io.reactivecqrs.api.AggregateVersion
import io.reactivecqrs.api.id.AggregateId
import io.reactivecqrs.core.eventbus.PostgresEventBusState.CacheValue
import io.reactivecqrs.core.projection.OptimisticLockingFailed
import org.postgresql.util.PSQLException
import scalikejdbc._
import scala.collection.mutable
import scala.util.{Failure, Success, Try}
abstract class EventBusState {
def lastPublishedEventForAggregate(aggregateId: AggregateId): AggregateVersion
def eventPublished(aggregateId: AggregateId, lastAggregateVersion: AggregateVersion, aggregateVersion: AggregateVersion): Try[Unit]
def flushUpdates(): Try[Unit]
}
class MemoryEventBusState extends EventBusState {
var state = new mutable.HashMap[AggregateId, AggregateVersion]()
override def lastPublishedEventForAggregate(aggregateId: AggregateId): AggregateVersion = {
state.getOrElse(aggregateId, {
state += aggregateId -> AggregateVersion.ZERO
AggregateVersion.ZERO
})
}
override def eventPublished(aggregateId: AggregateId, lastAggregateVersion: AggregateVersion, aggregateVersion: AggregateVersion): Try[Unit] = {
if (state.get(aggregateId).contains(lastAggregateVersion)) {
state += aggregateId -> aggregateVersion
Success(())
} else {
Failure(new OptimisticLockingFailed)
}
}
override def flushUpdates(): Try[Unit] = {
Success(())
}
}
object PostgresEventBusState {
case class CacheValue(last: AggregateVersion, current: AggregateVersion)
}
class PostgresEventBusState extends EventBusState {
def initSchema(): PostgresEventBusState = {
createEventBusTable()
try {
createEventBusSequence()
} catch {
case e: PSQLException => () //ignore until CREATE SEQUENCE IF NOT EXISTS is available in PostgreSQL
}
try {
createAggregateIdIndex()
} catch {
case e: PSQLException => () //ignore until CREATE UNIQUE INDEX IF NOT EXISTS is available in PostgreSQL
}
try {
createAggregateIdVersionIndex()
} catch {
case e: PSQLException => () //ignore until CREATE UNIQUE INDEX IF NOT EXISTS is available in PostgreSQL
}
this
}
private def createEventBusTable() = DB.autoCommit { implicit session =>
sql"""
CREATE TABLE IF NOT EXISTS event_bus (
id BIGINT NOT NULL PRIMARY KEY,
aggregate_id BIGINT NOT NULL,
aggregate_version INT NOT NULL)
""".execute().apply()
}
private def createEventBusSequence() = DB.autoCommit { implicit session =>
sql"""CREATE SEQUENCE event_bus_seq""".execute().apply()
}
private def createAggregateIdIndex() = DB.autoCommit { implicit session =>
sql"""CREATE UNIQUE INDEX event_bus_agg_id_idx ON event_bus (aggregate_id)""".execute().apply()
}
private def createAggregateIdVersionIndex() = DB.autoCommit { implicit session =>
sql"""CREATE UNIQUE INDEX event_bus_agg_id_version_idx ON event_bus (aggregate_id, aggregate_version)""".execute().apply()
}
override def lastPublishedEventForAggregate(aggregateId: AggregateId): AggregateVersion = {
DB.localTx { implicit session =>
val versionOption = sql"""SELECT aggregate_version FROM event_bus WHERE aggregate_id = ?"""
.bind(aggregateId.asLong).map(rs => AggregateVersion(rs.int(1))).single().apply()
versionOption match {
case Some(version) => version
case None => addAggregateEntry(aggregateId)
}
}
}
private def addAggregateEntry(aggregateId: AggregateId)(implicit session: DBSession): AggregateVersion = {
sql"""INSERT INTO event_bus (id, aggregate_id, aggregate_version) VALUES (nextval('event_bus_seq'), ?, 0)"""
.bind(aggregateId.asLong).executeUpdate().apply()
AggregateVersion.ZERO
}
// aggregate id -> (base version, current version)
private var aggregatesToUpdate = Map[AggregateId, CacheValue]()
override def eventPublished(aggregateId: AggregateId, lastAggregateVersion: AggregateVersion, aggregateVersion: AggregateVersion): Try[Unit] = synchronized {
aggregatesToUpdate.get(aggregateId) match {
case None => aggregatesToUpdate += aggregateId -> CacheValue(lastAggregateVersion, aggregateVersion)
case Some(CacheValue(last, current)) => aggregatesToUpdate += aggregateId -> CacheValue(last, aggregateVersion)
}
if(aggregatesToUpdate.size > 20) {
flushUpdates()
} else {
Success(())
}
// DB.localTx { implicit session =>
// val rowsUpdated = sql"""UPDATE event_bus SET aggregate_version = ? WHERE aggregate_id = ? AND aggregate_version = ?"""
// .bind(aggregateVersion.asInt, aggregateId.asLong, lastAggregateVersion.asInt).map(rs => rs.int(1)).single().executeUpdate().apply()
// if (rowsUpdated == 1) {
// Success(())
// } else {
// Failure(new OptimisticLockingFailed) // TODO handle this
// }
// }
}
//TODO handle optimistic locking!!!!
override def flushUpdates(): Try[Unit] = synchronized {
if(aggregatesToUpdate.nonEmpty) {
try {
DB.localTx { implicit session =>
val params: Seq[Seq[Any]] = aggregatesToUpdate.toSeq.map {
case (key, CacheValue(last, current))=> Seq(current.asInt, key.asLong, last.asInt)
}
aggregatesToUpdate = Map[AggregateId, CacheValue]()
sql"""UPDATE event_bus SET aggregate_version = ? WHERE aggregate_id = ? AND aggregate_version = ?"""
.batch(params: _*).apply()
// TODO check if all updates occured
}
} catch {
case e: Exception => e.printStackTrace(); throw e;
}
}
Success(())
}
}
|
marpiec/ReactiveCQRS
|
core/src/main/scala/io/reactivecqrs/core/eventbus/EventBusState.scala
|
Scala
|
apache-2.0
| 5,663
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import scala.collection.JavaConverters._
import org.scalatest.BeforeAndAfter
import org.apache.spark.sql.hive.test.{TestHive, TestHiveQueryExecution}
import org.apache.spark.sql.internal.SQLConf
/**
* A set of test cases that validate partition and column pruning.
*/
class PruningSuite extends HiveComparisonTest with BeforeAndAfter {
private val originalLimitFlatGlobalLimit = TestHive.conf.limitFlatGlobalLimit
override def beforeAll(): Unit = {
super.beforeAll()
TestHive.setCacheTables(false)
TestHive.setConf(SQLConf.LIMIT_FLAT_GLOBAL_LIMIT, false)
// Column/partition pruning is not implemented for `InMemoryColumnarTableScan` yet,
// need to reset the environment to ensure all referenced tables in this suites are
// not cached in-memory. Refer to https://issues.apache.org/jira/browse/SPARK-2283
// for details.
TestHive.reset()
}
override def afterAll() {
TestHive.setConf(SQLConf.LIMIT_FLAT_GLOBAL_LIMIT, originalLimitFlatGlobalLimit)
super.afterAll()
}
// Column pruning tests
createPruningTest("Column pruning - with partitioned table",
"SELECT key FROM srcpart WHERE ds = '2008-04-08' LIMIT 3",
Seq("key"),
Seq("key"),
Seq(
Seq("2008-04-08", "11"),
Seq("2008-04-08", "12")))
createPruningTest("Column pruning - with non-partitioned table",
"SELECT key FROM src WHERE key > 10 LIMIT 3",
Seq("key"),
Seq("key"),
Seq.empty)
createPruningTest("Column pruning - with multiple projects",
"SELECT c1 FROM (SELECT key AS c1 FROM src WHERE key > 10) t1 LIMIT 3",
Seq("c1"),
Seq("key"),
Seq.empty)
createPruningTest("Column pruning - projects alias substituting",
"SELECT c1 AS c2 FROM (SELECT key AS c1 FROM src WHERE key > 10) t1 LIMIT 3",
Seq("c2"),
Seq("key"),
Seq.empty)
createPruningTest("Column pruning - filter alias in-lining",
"SELECT c1 FROM (SELECT key AS c1 FROM src WHERE key > 10) t1 WHERE c1 < 100 LIMIT 3",
Seq("c1"),
Seq("key"),
Seq.empty)
createPruningTest("Column pruning - without filters",
"SELECT c1 FROM (SELECT key AS c1 FROM src) t1 LIMIT 3",
Seq("c1"),
Seq("key"),
Seq.empty)
createPruningTest("Column pruning - simple top project without aliases",
"SELECT key FROM (SELECT key FROM src WHERE key > 10) t1 WHERE key < 100 LIMIT 3",
Seq("key"),
Seq("key"),
Seq.empty)
createPruningTest("Column pruning - non-trivial top project with aliases",
"SELECT c1 * 2 AS dbl FROM (SELECT key AS c1 FROM src WHERE key > 10) t1 LIMIT 3",
Seq("dbl"),
Seq("key"),
Seq.empty)
// Partition pruning tests
createPruningTest("Partition pruning - non-partitioned, non-trivial project",
"SELECT key * 2 AS dbl FROM src WHERE value IS NOT NULL",
Seq("dbl"),
Seq("key", "value"),
Seq.empty)
createPruningTest("Partition pruning - non-partitioned table",
"SELECT value FROM src WHERE key IS NOT NULL",
Seq("value"),
Seq("value", "key"),
Seq.empty)
createPruningTest("Partition pruning - with filter on string partition key",
"SELECT value, hr FROM srcpart1 WHERE ds = '2008-04-08'",
Seq("value", "hr"),
Seq("value", "hr"),
Seq(
Seq("2008-04-08", "11"),
Seq("2008-04-08", "12")))
createPruningTest("Partition pruning - with filter on int partition key",
"SELECT value, hr FROM srcpart1 WHERE hr < 12",
Seq("value", "hr"),
Seq("value", "hr"),
Seq(
Seq("2008-04-08", "11"),
Seq("2008-04-09", "11")))
createPruningTest("Partition pruning - left only 1 partition",
"SELECT value, hr FROM srcpart1 WHERE ds = '2008-04-08' AND hr < 12",
Seq("value", "hr"),
Seq("value", "hr"),
Seq(
Seq("2008-04-08", "11")))
createPruningTest("Partition pruning - all partitions pruned",
"SELECT value, hr FROM srcpart1 WHERE ds = '2014-01-27' AND hr = 11",
Seq("value", "hr"),
Seq("value", "hr"),
Seq.empty)
createPruningTest("Partition pruning - pruning with both column key and partition key",
"SELECT value, hr FROM srcpart1 WHERE value IS NOT NULL AND hr < 12",
Seq("value", "hr"),
Seq("value", "hr"),
Seq(
Seq("2008-04-08", "11"),
Seq("2008-04-09", "11")))
def createPruningTest(
testCaseName: String,
sql: String,
expectedOutputColumns: Seq[String],
expectedScannedColumns: Seq[String],
expectedPartValues: Seq[Seq[String]]): Unit = {
test(s"$testCaseName - pruning test") {
val plan = new TestHiveQueryExecution(sql).sparkPlan
val actualOutputColumns = plan.output.map(_.name)
val (actualScannedColumns, actualPartValues) = plan.collect {
case p @ HiveTableScanExec(columns, relation, _) =>
val columnNames = columns.map(_.name)
val partValues = if (relation.isPartitioned) {
p.prunePartitions(p.rawPartitions).map(_.getValues)
} else {
Seq.empty
}
(columnNames, partValues)
}.head
assert(actualOutputColumns === expectedOutputColumns, "Output columns mismatch")
// Scanned columns in `HiveTableScanExec` are generated by the `pruneFilterProject` method
// in `SparkPlanner`. This method internally uses `AttributeSet.toSeq`, in which
// the returned output columns are sorted by the names and expression ids.
assert(actualScannedColumns.sorted === expectedScannedColumns.sorted,
"Scanned columns mismatch")
val actualPartitions = actualPartValues.map(_.asScala.mkString(",")).sorted
val expectedPartitions = expectedPartValues.map(_.mkString(",")).sorted
assert(actualPartitions === expectedPartitions, "Partitions selected do not match")
}
// Creates a query test to compare query results generated by Hive and Catalyst.
createQueryTest(s"$testCaseName - query test", sql)
}
}
|
rekhajoshm/spark
|
sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PruningSuite.scala
|
Scala
|
apache-2.0
| 6,738
|
package org.denigma.nlp
import org.denigma.binding.views.BindableView
import org.scalajs.dom
import org.scalajs.dom.Element
import org.scalajs.dom.raw._
import rx.Var
import org.denigma.binding.extensions._
class ContentManager() extends BindableView
{
override lazy val id: String = "main"
lazy val elem: Element = dom.document.body
val selections = Var(List.empty[org.scalajs.dom.raw.Range])
override def bindView() = {
super.bindView()
dom.window.document.onselectionchange = onSelectionChange _
println("CONTENT script is working!")
selections.onChange{
case ss=> println("selection changed to: "+ss)
}
}
protected def onSelectionChange(event: Event) = {
val selection: Selection = dom.window.getSelection()
val count = selection.rangeCount
if (count > 0) {
val values = {
for {
i <- 0 until count
range = selection.getRangeAt(i)
} yield range
}.toList
selections() = values
}
}
}
|
antonkulaga/bio-nlp
|
chrome-bio/src/main/scala/org/denigma/nlp/ContentManager.scala
|
Scala
|
mpl-2.0
| 1,002
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.catalog
import org.apache.flink.table.api.config.{ExecutionConfigOptions, TableConfigOptions}
import org.apache.flink.table.api.internal.TableEnvironmentImpl
import org.apache.flink.table.api.{EnvironmentSettings, TableEnvironment, ValidationException}
import org.apache.flink.table.catalog.{CatalogDatabaseImpl, CatalogFunctionImpl, GenericInMemoryCatalog, ObjectPath}
import org.apache.flink.table.planner.expressions.utils.Func0
import org.apache.flink.table.planner.factories.utils.TestCollectionTableFactory
import org.apache.flink.table.planner.runtime.utils.JavaUserDefinedScalarFunctions.JavaFunc0
import org.apache.flink.table.planner.utils.DateTimeTestUtil.localDateTime
import org.apache.flink.test.util.AbstractTestBase
import org.apache.flink.types.Row
import org.apache.flink.util.FileUtils
import org.junit.Assert.{assertEquals, fail}
import org.junit.rules.ExpectedException
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import org.junit.{Before, Rule, Test}
import java.io.File
import java.math.{BigDecimal => JBigDecimal}
import java.net.URI
import java.util
import scala.collection.JavaConversions._
/** Test cases for catalog table. */
@RunWith(classOf[Parameterized])
class CatalogTableITCase(isStreamingMode: Boolean) extends AbstractTestBase {
//~ Instance fields --------------------------------------------------------
private val settings = if (isStreamingMode) {
EnvironmentSettings.newInstance().inStreamingMode().build()
} else {
EnvironmentSettings.newInstance().inBatchMode().build()
}
private val tableEnv: TableEnvironment = TableEnvironmentImpl.create(settings)
var _expectedEx: ExpectedException = ExpectedException.none
@Rule
def expectedEx: ExpectedException = _expectedEx
@Before
def before(): Unit = {
tableEnv.getConfig.getConfiguration.setBoolean(
TableConfigOptions.TABLE_DYNAMIC_TABLE_OPTIONS_ENABLED,
true)
tableEnv.getConfig
.getConfiguration
.setInteger(ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM, 1)
TestCollectionTableFactory.reset()
val func = new CatalogFunctionImpl(
classOf[JavaFunc0].getName)
tableEnv.getCatalog(tableEnv.getCurrentCatalog).get().createFunction(
new ObjectPath(tableEnv.getCurrentDatabase, "myfunc"),
func,
true)
}
//~ Tools ------------------------------------------------------------------
implicit def rowOrdering: Ordering[Row] = Ordering.by((r : Row) => {
val builder = new StringBuilder
0 until r.getArity foreach(idx => builder.append(r.getField(idx)))
builder.toString()
})
def toRow(args: Any*):Row = {
val row = new Row(args.length)
0 until args.length foreach {
i => row.setField(i, args(i))
}
row
}
//~ Tests ------------------------------------------------------------------
private def testUdf(funcPrefix: String): Unit = {
val sinkDDL =
s"""
|create table sinkT(
| a bigint
|) with (
| 'connector' = 'COLLECTION',
| 'is-bounded' = '$isStreamingMode'
|)
""".stripMargin
tableEnv.executeSql(sinkDDL)
tableEnv.executeSql(s"insert into sinkT select ${funcPrefix}myfunc(cast(1 as bigint))").await()
assertEquals(Seq(toRow(2L)), TestCollectionTableFactory.RESULT.sorted)
}
@Test
def testUdfWithFullIdentifier(): Unit = {
testUdf("default_catalog.default_database.")
}
@Test
def testUdfWithDatabase(): Unit = {
testUdf("default_database.")
}
@Test
def testUdfWithNon(): Unit = {
testUdf("")
}
@Test(expected = classOf[ValidationException])
def testUdfWithWrongCatalog(): Unit = {
testUdf("wrong_catalog.default_database.")
}
@Test(expected = classOf[ValidationException])
def testUdfWithWrongDatabase(): Unit = {
testUdf("default_catalog.wrong_database.")
}
@Test
def testInsertInto(): Unit = {
val sourceData = List(
toRow(1, "1000", 2, new JBigDecimal("10.001")),
toRow(2, "1", 3, new JBigDecimal("10.001")),
toRow(3, "2000", 4, new JBigDecimal("10.001")),
toRow(1, "2", 2, new JBigDecimal("10.001")),
toRow(2, "3000", 3, new JBigDecimal("10.001"))
)
TestCollectionTableFactory.initData(sourceData)
val sourceDDL =
"""
|create table t1(
| a int,
| b varchar,
| c int,
| d DECIMAL(10, 3)
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
val sinkDDL =
"""
|create table t2(
| a int,
| b varchar,
| c int,
| d DECIMAL(10, 3)
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
val query =
"""
|insert into t2
|select t1.a, t1.b, (t1.a + 1) as c , d from t1
""".stripMargin
tableEnv.executeSql(sourceDDL)
tableEnv.executeSql(sinkDDL)
tableEnv.executeSql(query).await()
assertEquals(sourceData.sorted, TestCollectionTableFactory.RESULT.sorted)
}
@Test
def testReadWriteCsvUsingDDL(): Unit = {
val csvRecords = Seq(
"2.02,Euro,2019-12-12 00:00:01.001001",
"1.11,US Dollar,2019-12-12 00:00:02.002001",
"50,Yen,2019-12-12 00:00:04.004001",
"3.1,Euro,2019-12-12 00:00:05.005001",
"5.33,US Dollar,2019-12-12 00:00:06.006001"
)
val tempFilePath = createTempFile(
"csv-order-test",
csvRecords.mkString("#"))
val sourceDDL =
s"""
|CREATE TABLE T1 (
| price DECIMAL(10, 2),
| currency STRING,
| ts6 TIMESTAMP(6),
| ts AS CAST(ts6 AS TIMESTAMP(3)),
| WATERMARK FOR ts AS ts
|) WITH (
| 'connector.type' = 'filesystem',
| 'connector.path' = '$tempFilePath',
| 'format.type' = 'csv',
| 'format.field-delimiter' = ',',
| 'format.line-delimiter' = '#'
|)
""".stripMargin
tableEnv.executeSql(sourceDDL)
val sinkFilePath = getTempFilePath("csv-order-sink")
val sinkDDL =
s"""
|CREATE TABLE T2 (
| window_end TIMESTAMP(3),
| max_ts TIMESTAMP(6),
| counter BIGINT,
| total_price DECIMAL(10, 2)
|) with (
| 'connector.type' = 'filesystem',
| 'connector.path' = '$sinkFilePath',
| 'format.type' = 'csv',
| 'format.field-delimiter' = ','
|)
""".stripMargin
tableEnv.executeSql(sinkDDL)
val query =
"""
|INSERT INTO T2
|SELECT
| TUMBLE_END(ts, INTERVAL '5' SECOND),
| MAX(ts6),
| COUNT(*),
| MAX(price)
|FROM T1
|GROUP BY TUMBLE(ts, INTERVAL '5' SECOND)
""".stripMargin
tableEnv.executeSql(query).await()
val expected =
"2019-12-12 00:00:05.0,2019-12-12 00:00:04.004001,3,50.00\\n" +
"2019-12-12 00:00:10.0,2019-12-12 00:00:06.006001,2,5.33\\n"
assertEquals(expected, FileUtils.readFileUtf8(new File(new URI(sinkFilePath))))
}
@Test
def testReadWriteCsvWithDynamicTableOptions(): Unit = {
val csvRecords = Seq(
"2.02,Euro,2019-12-12 00:00:01.001001",
"1.11,US Dollar,2019-12-12 00:00:02.002001",
"50,Yen,2019-12-12 00:00:04.004001",
"3.1,Euro,2019-12-12 00:00:05.005001",
"5.33,US Dollar,2019-12-12 00:00:06.006001"
)
val tempFilePath = createTempFile(
"csv-order-test",
csvRecords.mkString("#"))
val sourceDDL =
s"""
|CREATE TABLE T1 (
| price DECIMAL(10, 2),
| currency STRING,
| ts6 TIMESTAMP(6),
| ts AS CAST(ts6 AS TIMESTAMP(3)),
| WATERMARK FOR ts AS ts
|) WITH (
| 'connector.type' = 'filesystem',
| 'connector.path' = '$tempFilePath',
| 'format.type' = 'csv',
| 'format.field-delimiter' = ','
|)
""".stripMargin
tableEnv.executeSql(sourceDDL)
val sinkFilePath = getTempFilePath("csv-order-sink")
val sinkDDL =
s"""
|CREATE TABLE T2 (
| window_end TIMESTAMP(3),
| max_ts TIMESTAMP(6),
| counter BIGINT,
| total_price DECIMAL(10, 2)
|) with (
| 'connector.type' = 'filesystem',
| 'connector.path' = '$sinkFilePath',
| 'format.type' = 'csv'
|)
""".stripMargin
tableEnv.executeSql(sinkDDL)
val query =
"""
|INSERT INTO T2 /*+ OPTIONS('format.field-delimiter' = '|') */
|SELECT
| TUMBLE_END(ts, INTERVAL '5' SECOND),
| MAX(ts6),
| COUNT(*),
| MAX(price)
|FROM T1 /*+ OPTIONS('format.line-delimiter' = '#') */
|GROUP BY TUMBLE(ts, INTERVAL '5' SECOND)
""".stripMargin
tableEnv.executeSql(query).await()
val expected =
"2019-12-12 00:00:05.0|2019-12-12 00:00:04.004001|3|50.00\\n" +
"2019-12-12 00:00:10.0|2019-12-12 00:00:06.006001|2|5.33\\n"
assertEquals(expected, FileUtils.readFileUtf8(new File(new URI(sinkFilePath))))
}
@Test
def testInsertSourceTableExpressionFields(): Unit = {
val sourceData = List(
toRow(1, "1000"),
toRow(2, "1"),
toRow(3, "2000"),
toRow(1, "2"),
toRow(2, "3000")
)
val expected = List(
toRow(1, "1000", 2),
toRow(2, "1", 3),
toRow(3, "2000", 4),
toRow(1, "2", 2),
toRow(2, "3000", 3)
)
TestCollectionTableFactory.initData(sourceData)
val sourceDDL =
"""
|create table t1(
| a int,
| b varchar,
| c as a + 1
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
val sinkDDL =
"""
|create table t2(
| a int,
| b varchar,
| c int
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
val query =
"""
|insert into t2
|select t1.a, t1.b, t1.c from t1
""".stripMargin
tableEnv.executeSql(sourceDDL)
tableEnv.executeSql(sinkDDL)
tableEnv.executeSql(query).await()
assertEquals(expected.sorted, TestCollectionTableFactory.RESULT.sorted)
}
// Test the computation expression in front of referenced columns.
@Test
def testInsertSourceTableExpressionFieldsBeforeReferences(): Unit = {
val sourceData = List(
toRow(1, "1000"),
toRow(2, "1"),
toRow(3, "2000"),
toRow(2, "2"),
toRow(2, "3000")
)
val expected = List(
toRow(101, 1, "1000"),
toRow(102, 2, "1"),
toRow(103, 3, "2000"),
toRow(102, 2, "2"),
toRow(102, 2, "3000")
)
TestCollectionTableFactory.initData(sourceData)
val sourceDDL =
"""
|create table t1(
| c as a + 100,
| a int,
| b varchar
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
val sinkDDL =
"""
|create table t2(
| c int,
| a int,
| b varchar
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
val query =
"""
|insert into t2
|select t1.c, t1.a, t1.b from t1
""".stripMargin
tableEnv.executeSql(sourceDDL)
tableEnv.executeSql(sinkDDL)
tableEnv.executeSql(query).await()
assertEquals(expected.sorted, TestCollectionTableFactory.RESULT.sorted)
}
@Test
def testInsertSourceTableWithFuncField(): Unit = {
val sourceData = List(
toRow(1, "1990-02-10 12:34:56"),
toRow(2, "2019-09-10 09:23:41"),
toRow(3, "2019-09-10 09:23:42"),
toRow(1, "2019-09-10 09:23:43"),
toRow(2, "2019-09-10 09:23:44")
)
val expected = List(
toRow(1, "1990-02-10 12:34:56", localDateTime("1990-02-10 12:34:56")),
toRow(2, "2019-09-10 09:23:41", localDateTime("2019-09-10 09:23:41")),
toRow(3, "2019-09-10 09:23:42", localDateTime("2019-09-10 09:23:42")),
toRow(1, "2019-09-10 09:23:43", localDateTime("2019-09-10 09:23:43")),
toRow(2, "2019-09-10 09:23:44", localDateTime("2019-09-10 09:23:44"))
)
TestCollectionTableFactory.initData(sourceData)
val sourceDDL =
"""
|create table t1(
| a int,
| b varchar,
| c as to_timestamp(b)
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
val sinkDDL =
"""
|create table t2(
| a int,
| b varchar,
| c timestamp(3)
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
val query =
"""
|insert into t2
|select t1.a, t1.b, t1.c from t1
""".stripMargin
tableEnv.executeSql(sourceDDL)
tableEnv.executeSql(sinkDDL)
tableEnv.executeSql(query).await()
assertEquals(expected.sorted, TestCollectionTableFactory.RESULT.sorted)
}
@Test
def testInsertSourceTableWithUserDefinedFuncField(): Unit = {
val sourceData = List(
toRow(1, "1990-02-10 12:34:56"),
toRow(2, "2019-09-10 9:23:41"),
toRow(3, "2019-09-10 9:23:42"),
toRow(1, "2019-09-10 9:23:43"),
toRow(2, "2019-09-10 9:23:44")
)
val expected = List(
toRow(1, "1990-02-10 12:34:56", 1, "1990-02-10 12:34:56"),
toRow(2, "2019-09-10 9:23:41", 2, "2019-09-10 9:23:41"),
toRow(3, "2019-09-10 9:23:42", 3, "2019-09-10 9:23:42"),
toRow(1, "2019-09-10 9:23:43", 1, "2019-09-10 9:23:43"),
toRow(2, "2019-09-10 9:23:44", 2, "2019-09-10 9:23:44")
)
TestCollectionTableFactory.initData(sourceData)
tableEnv.registerFunction("my_udf", Func0)
val sourceDDL =
"""
|create table t1(
| a int,
| `time` varchar,
| c as my_udf(a),
| d as `time`
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
val sinkDDL =
"""
|create table t2(
| a int,
| `time` varchar,
| c int not null,
| d varchar
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
val query =
"""
|insert into t2
|select t1.a, t1.`time`, t1.c, t1.d from t1
""".stripMargin
tableEnv.executeSql(sourceDDL)
tableEnv.executeSql(sinkDDL)
tableEnv.executeSql(query).await()
assertEquals(expected.sorted, TestCollectionTableFactory.RESULT.sorted)
}
@Test
def testInsertSinkTableExpressionFields(): Unit = {
val sourceData = List(
toRow(1, "1000"),
toRow(2, "1"),
toRow(3, "2000"),
toRow(1, "2"),
toRow(2, "3000")
)
val expected = List(
toRow(1, 2),
toRow(1, 2),
toRow(2, 3),
toRow(2, 3),
toRow(3, 4)
)
TestCollectionTableFactory.initData(sourceData)
val sourceDDL =
"""
|create table t1(
| a int,
| b varchar,
| c as a + 1
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
val sinkDDL =
"""
|create table t2(
| a int,
| b as c - 1,
| c int
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
val query =
"""
|insert into t2
|select t1.a, t1.c from t1
""".stripMargin
tableEnv.executeSql(sourceDDL)
tableEnv.executeSql(sinkDDL)
tableEnv.executeSql(query).await()
assertEquals(expected.sorted, TestCollectionTableFactory.RESULT.sorted)
}
@Test
def testInsertSinkTableWithUnmatchedFields(): Unit = {
val sourceData = List(
toRow(1, "1000"),
toRow(2, "1"),
toRow(3, "2000"),
toRow(1, "2"),
toRow(2, "3000")
)
TestCollectionTableFactory.initData(sourceData)
val sourceDDL =
"""
|create table t1(
| a int,
| b varchar,
| c as a + 1
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
val sinkDDL =
"""
|create table t2(
| a int,
| b as cast(a as varchar(20)) || cast(c as varchar(20)),
| c int
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
val query =
"""
|insert into t2
|select t1.a, t1.b from t1
""".stripMargin
tableEnv.executeSql(sourceDDL)
tableEnv.executeSql(sinkDDL)
expectedEx.expect(classOf[ValidationException])
expectedEx.expectMessage("Incompatible types for sink column 'c' at position 1.")
tableEnv.executeSql(query).await()
}
@Test
def testInsertWithJoinedSource(): Unit = {
val sourceData = List(
toRow(1, 1000, 2),
toRow(2, 1, 3),
toRow(3, 2000, 4),
toRow(1, 2, 2),
toRow(2, 3000, 3)
)
val expected = List(
toRow(1, 1000, 2, 1),
toRow(1, 2, 2, 1),
toRow(2, 1, 1, 2),
toRow(2, 3000, 1, 2)
)
TestCollectionTableFactory.initData(sourceData)
val sourceDDL =
"""
|create table t1(
| a int,
| b int,
| c int
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
val sinkDDL =
"""
|create table t2(
| a int,
| b int,
| c int,
| d int
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
val query =
"""
|insert into t2
|select a.a, a.b, b.a, b.b
| from t1 a
| join t1 b
| on a.a = b.b
""".stripMargin
tableEnv.executeSql(sourceDDL)
tableEnv.executeSql(sinkDDL)
tableEnv.executeSql(query).await()
assertEquals(expected.sorted, TestCollectionTableFactory.RESULT.sorted)
}
@Test
def testInsertWithAggregateSource(): Unit = {
if (isStreamingMode) {
return
}
val sourceData = List(
toRow(1, 1000, 2),
toRow(2, 1000, 3),
toRow(3, 2000, 4),
toRow(4, 2000, 5),
toRow(5, 3000, 6)
)
val expected = List(
toRow(3, 1000),
toRow(5, 3000),
toRow(7, 2000)
)
TestCollectionTableFactory.initData(sourceData)
val sourceDDL =
"""
|create table t1(
| a int,
| b int,
| c int
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
val sinkDDL =
"""
|create table t2(
| a int,
| b int
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
val query =
"""
|insert into t2
|select sum(a), t1.b from t1 group by t1.b
""".stripMargin
tableEnv.executeSql(sourceDDL)
tableEnv.executeSql(sinkDDL)
tableEnv.executeSql(query).await()
assertEquals(expected.sorted, TestCollectionTableFactory.RESULT.sorted)
}
@Test
def testTemporaryTableMaskPermanentTableWithSameName(): Unit = {
val sourceData = List(
toRow(1, "1000", 2),
toRow(2, "1", 3),
toRow(3, "2000", 4),
toRow(1, "2", 2),
toRow(2, "3000", 3))
val permanentTable =
"""
|CREATE TABLE T1(
| a int,
| b varchar,
| d int
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
val temporaryTable =
"""
|CREATE TEMPORARY TABLE T1(
| a int,
| b varchar,
| c int,
| d as c+1
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
val sinkTable =
"""
|CREATE TABLE T2(
| a int,
| b varchar,
| c int
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
val permanentData = List(
toRow(1, "1000", 2),
toRow(2, "1", 3),
toRow(3, "2000", 4),
toRow(1, "2", 2),
toRow(2, "3000", 3))
val temporaryData = List(
toRow(1, "1000", 3),
toRow(2, "1", 4),
toRow(3, "2000", 5),
toRow(1, "2", 3),
toRow(2, "3000", 4))
tableEnv.executeSql(permanentTable)
tableEnv.executeSql(temporaryTable)
tableEnv.executeSql(sinkTable)
TestCollectionTableFactory.initData(sourceData)
val query = "SELECT a, b, d FROM T1"
tableEnv.sqlQuery(query).executeInsert("T2").await()
// temporary table T1 masks permanent table T1
assertEquals(temporaryData.sorted, TestCollectionTableFactory.RESULT.sorted)
TestCollectionTableFactory.reset()
TestCollectionTableFactory.initData(sourceData)
val dropTemporaryTable =
"""
|DROP TEMPORARY TABLE IF EXISTS T1
""".stripMargin
tableEnv.executeSql(dropTemporaryTable)
tableEnv.sqlQuery(query).executeInsert("T2").await()
// now we only have permanent view T1
assertEquals(permanentData.sorted, TestCollectionTableFactory.RESULT.sorted)
}
@Test
def testDropTableWithFullPath(): Unit = {
val ddl1 =
"""
|create table t1(
| a bigint,
| b bigint,
| c varchar
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
val ddl2 =
"""
|create table t2(
| a bigint,
| b bigint
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
tableEnv.executeSql(ddl1)
tableEnv.executeSql(ddl2)
assert(tableEnv.listTables().sameElements(Array[String]("t1", "t2")))
tableEnv.executeSql("DROP TABLE default_catalog.default_database.t2")
assert(tableEnv.listTables().sameElements(Array("t1")))
}
@Test
def testDropTableWithPartialPath(): Unit = {
val ddl1 =
"""
|create table t1(
| a bigint,
| b bigint,
| c varchar
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
val ddl2 =
"""
|create table t2(
| a bigint,
| b bigint
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
tableEnv.executeSql(ddl1)
tableEnv.executeSql(ddl2)
assert(tableEnv.listTables().sameElements(Array[String]("t1", "t2")))
tableEnv.executeSql("DROP TABLE default_database.t2")
tableEnv.executeSql("DROP TABLE t1")
assert(tableEnv.listTables().isEmpty)
}
@Test(expected = classOf[ValidationException])
def testDropTableWithInvalidPath(): Unit = {
val ddl1 =
"""
|create table t1(
| a bigint,
| b bigint,
| c varchar
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
tableEnv.executeSql(ddl1)
assert(tableEnv.listTables().sameElements(Array[String]("t1")))
tableEnv.executeSql("DROP TABLE catalog1.database1.t1")
}
@Test
def testDropTableWithInvalidPathIfExists(): Unit = {
val ddl1 =
"""
|create table t1(
| a bigint,
| b bigint,
| c varchar
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
tableEnv.executeSql(ddl1)
assert(tableEnv.listTables().sameElements(Array[String]("t1")))
tableEnv.executeSql("DROP TABLE IF EXISTS catalog1.database1.t1")
assert(tableEnv.listTables().sameElements(Array[String]("t1")))
}
@Test
def testDropTableSameNameWithTemporaryTable(): Unit = {
val createTable1 =
"""
|create table t1(
| a bigint,
| b bigint,
| c varchar
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
val createTable2 =
"""
|create temporary table t1(
| a bigint,
| b bigint,
| c varchar
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
tableEnv.executeSql(createTable1)
tableEnv.executeSql(createTable2)
expectedEx.expect(classOf[ValidationException])
expectedEx.expectMessage("Temporary table with identifier "
+ "'`default_catalog`.`default_database`.`t1`' exists. "
+ "Drop it first before removing the permanent table.")
tableEnv.executeSql("drop table t1")
}
@Test
def testDropViewSameNameWithTable(): Unit = {
val createTable1 =
"""
|create table t1(
| a bigint,
| b bigint,
| c varchar
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
tableEnv.executeSql(createTable1)
expectedEx.expect(classOf[ValidationException])
expectedEx.expectMessage("View with identifier "
+ "'default_catalog.default_database.t1' does not exist.")
tableEnv.executeSql("drop view t1")
}
@Test
def testDropViewSameNameWithTableIfNotExists(): Unit = {
val createTable1 =
"""
|create table t1(
| a bigint,
| b bigint,
| c varchar
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
tableEnv.executeSql(createTable1)
tableEnv.executeSql("drop view if exists t1")
assert(tableEnv.listTables().sameElements(Array("t1")))
}
@Test
def testAlterTable(): Unit = {
val ddl1 =
"""
|create table t1(
| a bigint not null,
| b bigint,
| c varchar
|) with (
| 'connector' = 'COLLECTION',
| 'k1' = 'v1'
|)
""".stripMargin
tableEnv.executeSql(ddl1)
tableEnv.executeSql("alter table t1 rename to t2")
assert(tableEnv.listTables().sameElements(Array[String]("t2")))
tableEnv.executeSql("alter table t2 set ('k1' = 'a', 'k2' = 'b')")
val expectedProperties = new util.HashMap[String, String]()
expectedProperties.put("connector", "COLLECTION")
expectedProperties.put("k1", "a")
expectedProperties.put("k2", "b")
val properties = tableEnv.getCatalog(tableEnv.getCurrentCatalog).get()
.getTable(new ObjectPath(tableEnv.getCurrentDatabase, "t2"))
.getProperties
assertEquals(expectedProperties, properties)
val currentCatalog = tableEnv.getCurrentCatalog
val currentDB = tableEnv.getCurrentDatabase
tableEnv.executeSql("alter table t2 add constraint ct1 primary key(a) not enforced")
val tableSchema1 = tableEnv.getCatalog(currentCatalog).get()
.getTable(ObjectPath.fromString(s"${currentDB}.t2"))
.getSchema
assert(tableSchema1.getPrimaryKey.isPresent)
assertEquals("CONSTRAINT ct1 PRIMARY KEY (a)",
tableSchema1.getPrimaryKey.get().asSummaryString())
tableEnv.executeSql("alter table t2 drop constraint ct1")
val tableSchema2 = tableEnv.getCatalog(currentCatalog).get()
.getTable(ObjectPath.fromString(s"${currentDB}.t2"))
.getSchema
assertEquals(false, tableSchema2.getPrimaryKey.isPresent)
}
@Test
def testUseCatalogAndShowCurrentCatalog(): Unit = {
tableEnv.registerCatalog("cat1", new GenericInMemoryCatalog("cat1"))
tableEnv.registerCatalog("cat2", new GenericInMemoryCatalog("cat2"))
tableEnv.executeSql("use catalog cat1")
assertEquals("cat1", tableEnv.getCurrentCatalog)
tableEnv.executeSql("use catalog cat2")
assertEquals("cat2", tableEnv.getCurrentCatalog)
assertEquals("+I[cat2]", tableEnv.executeSql("show current catalog").collect().next().toString)
}
@Test
def testUseDatabaseAndShowCurrentDatabase(): Unit = {
val catalog = new GenericInMemoryCatalog("cat1")
tableEnv.registerCatalog("cat1", catalog)
val catalogDB1 = new CatalogDatabaseImpl(new util.HashMap[String, String](), "db1")
val catalogDB2 = new CatalogDatabaseImpl(new util.HashMap[String, String](), "db2")
catalog.createDatabase("db1", catalogDB1, true)
catalog.createDatabase("db2", catalogDB2, true)
tableEnv.executeSql("use cat1.db1")
assertEquals("db1", tableEnv.getCurrentDatabase)
var currentDatabase = tableEnv.executeSql("show current database").collect().next().toString
assertEquals("+I[db1]", currentDatabase)
tableEnv.executeSql("use db2")
assertEquals("db2", tableEnv.getCurrentDatabase)
currentDatabase = tableEnv.executeSql("show current database").collect().next().toString
assertEquals("+I[db2]", currentDatabase)
}
@Test
def testCreateDatabase(): Unit = {
tableEnv.registerCatalog("cat1", new GenericInMemoryCatalog("default"))
tableEnv.registerCatalog("cat2", new GenericInMemoryCatalog("default"))
tableEnv.executeSql("use catalog cat1")
tableEnv.executeSql("create database db1 ")
tableEnv.executeSql("create database if not exists db1 ")
try {
tableEnv.executeSql("create database db1 ")
fail("ValidationException expected")
} catch {
case _: ValidationException => //ignore
}
tableEnv.executeSql("create database cat2.db1 comment 'test_comment'" +
" with ('k1' = 'v1', 'k2' = 'v2')")
val database = tableEnv.getCatalog("cat2").get().getDatabase("db1")
assertEquals("test_comment", database.getComment)
assertEquals(2, database.getProperties.size())
val expectedProperty = new util.HashMap[String, String]()
expectedProperty.put("k1", "v1")
expectedProperty.put("k2", "v2")
assertEquals(expectedProperty, database.getProperties)
}
@Test
def testDropDatabase(): Unit = {
tableEnv.registerCatalog("cat1", new GenericInMemoryCatalog("default"))
tableEnv.executeSql("use catalog cat1")
tableEnv.executeSql("create database db1")
tableEnv.executeSql("drop database db1")
tableEnv.executeSql("drop database if exists db1")
try {
tableEnv.executeSql("drop database db1")
fail("ValidationException expected")
} catch {
case _: ValidationException => //ignore
}
tableEnv.executeSql("create database db1")
tableEnv.executeSql("use db1")
val ddl1 =
"""
|create table t1(
| a bigint,
| b bigint,
| c varchar
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
tableEnv.executeSql(ddl1)
val ddl2 =
"""
|create table t2(
| a bigint,
| b bigint,
| c varchar
|) with (
| 'connector' = 'COLLECTION'
|)
""".stripMargin
tableEnv.executeSql(ddl2)
try {
tableEnv.executeSql("drop database db1")
fail("ValidationException expected")
} catch {
case _: ValidationException => //ignore
}
tableEnv.executeSql("drop database db1 cascade")
}
@Test
def testAlterDatabase(): Unit = {
tableEnv.registerCatalog("cat1", new GenericInMemoryCatalog("default"))
tableEnv.executeSql("use catalog cat1")
tableEnv.executeSql("create database db1 comment 'db1_comment' with ('k1' = 'v1')")
tableEnv.executeSql("alter database db1 set ('k1' = 'a', 'k2' = 'b')")
val database = tableEnv.getCatalog("cat1").get().getDatabase("db1")
assertEquals("db1_comment", database.getComment)
assertEquals(2, database.getProperties.size())
val expectedProperty = new util.HashMap[String, String]()
expectedProperty.put("k1", "a")
expectedProperty.put("k2", "b")
assertEquals(expectedProperty, database.getProperties)
}
}
object CatalogTableITCase {
@Parameterized.Parameters(name = "{0}")
def parameters(): java.util.Collection[Boolean] = {
util.Arrays.asList(true, false)
}
}
|
aljoscha/flink
|
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/catalog/CatalogTableITCase.scala
|
Scala
|
apache-2.0
| 32,152
|
package com.typesafe.sbt.packager
import sbt._
import sbt.Keys.TaskStreams
import java.io.File
import com.typesafe.sbt.packager.Compat._
object Stager {
/**
* create a cache and sync files if needed
*
* @param config - create a configuration specific cache directory
* @param cacheDirectory - e.g. streams.value.cacheDirectory
* @param stageDirectory - staging directory
* @param mappings - staging content
*
* @example {{{
*
* }}}
*/
def stageFiles(config: String)(cacheDirectory: File, stageDirectory: File, mappings: Seq[(File, String)]): File = {
val cache = cacheDirectory / ("packager-mappings-" + config)
val copies = mappings map {
case (file, path) => file -> (stageDirectory / path)
}
Sync(cache, FileInfo.hash, FileInfo.exists)(copies)
// Now set scripts to executable using Java's lack of understanding of permissions.
// TODO - Config file user-readable permissions....
for {
(from, to) <- copies
// Only set executable permission if it needs to be set. Note: calling to.setExecutable(true) if it's already
// executable is undesirable for a developer using inotify to watch this file as it will trigger events again
if from.canExecute && !to.canExecute
} to.setExecutable(true)
stageDirectory
}
/**
* @see stageFiles
*/
def stage(config: String)(streams: TaskStreams, stageDirectory: File, mappings: Seq[(File, String)]): File =
stageFiles(config)(streams.cacheDirectory, stageDirectory, mappings)
}
|
sbt/sbt-native-packager
|
src/main/scala/com/typesafe/sbt/packager/Stager.scala
|
Scala
|
bsd-2-clause
| 1,552
|
package org.apache.spark.sql
import org.apache.spark.sql.snowflake.{SFQueryTest, SFTestData, SFTestSessionBase}
class SFDataFrameWindowFunctionsSuite
extends DataFrameWindowFunctionsSuite
with SFTestSessionBase
with SFQueryTest
with SFTestData {
override def spark: SparkSession = getSnowflakeSession()
override protected def blackList: Seq[String] =
Seq(
// TS - corr, covar_pop, stddev_pop functions in specific window
"corr, covar_pop, stddev_pop functions in specific window",
// TS - covar_samp, var_samp (variance), stddev_samp (stddev) functions in specific window
"covar_samp, var_samp (variance), stddev_samp (stddev) functions in specific window",
// TS - NaN and -0.0 in window partition keys
"NaN and -0.0 in window partition keys"
)
}
|
snowflakedb/spark-snowflakedb
|
src/it/scala/org/apache/spark/sql/SFDataFrameWindowFunctionsSuite.scala
|
Scala
|
apache-2.0
| 816
|
/*
* Copyright 2011-2021 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.spark.extensions.iterativebatch.compiler
package graph
import scala.collection.JavaConversions._
import org.objectweb.asm.Type
import com.asakusafw.lang.compiler.extension.directio.{ DirectFileIoModels, OutputPattern }
import com.asakusafw.lang.compiler.model.graph.ExternalOutput
import com.asakusafw.lang.compiler.planning.SubPlan
import com.asakusafw.spark.compiler._
import com.asakusafw.spark.compiler.graph.{ CacheOnce, DirectOutputSetupClassBuilder }
import com.asakusafw.spark.compiler.planning.{ IterativeInfo, SubPlanInfo }
import com.asakusafw.spark.compiler.spi.NodeCompiler
object DirectOutputPrepareForIterativeCompiler {
def compile(
subplan: SubPlan)(
implicit context: NodeCompiler.Context): Type = {
val subPlanInfo = subplan.getAttribute(classOf[SubPlanInfo])
val primaryOperator = subPlanInfo.getPrimaryOperator
assert(primaryOperator.isInstanceOf[ExternalOutput],
s"The primary operator should be external output: ${primaryOperator} [${subplan}]")
val operator = primaryOperator.asInstanceOf[ExternalOutput]
assert(DirectFileIoModels.isSupported(operator.getInfo),
s"The subplan is not supported: ${subplan}")
val model = DirectFileIoModels.resolve(operator.getInfo)
val dataModelRef = operator.getOperatorPort.dataModelRef
val pattern = OutputPattern.compile(dataModelRef, model.getResourcePattern, model.getOrder)
val builder = new DirectOutputPrepareForIterativeClassBuilder(
operator)(
pattern,
model)(
subplan.label) with CacheOnce
context.addClass(builder)
}
}
|
asakusafw/asakusafw-spark
|
extensions/iterativebatch/compiler/core/src/main/scala/com/asakusafw/spark/extensions/iterativebatch/compiler/graph/DirectOutputPrepareForIterativeCompiler.scala
|
Scala
|
apache-2.0
| 2,221
|
/**
* Copyright (c) 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.trustedanalytics.sparktk.graph.internal.ops
import org.trustedanalytics.sparktk.frame.Frame
import org.apache.spark.sql.functions.{ sum, array, col, count, explode, struct }
import org.graphframes.GraphFrame
import org.apache.spark.sql.DataFrame
import org.graphframes.lib.AggregateMessages
import org.apache.spark.sql.Column
import org.apache.spark.sql.functions.lit
import org.trustedanalytics.sparktk.graph.internal.{ GraphState, GraphSummarization, BaseGraph }
trait TriangleCountSummarization extends BaseGraph {
/**
* Returns a frame with the number of triangles each vertex is contained in
*
* @return The dataframe containing the vertices and their corresponding triangle counts
*/
def triangleCount(): Frame = {
execute[Frame](TriangleCount())
}
}
case class TriangleCount() extends GraphSummarization[Frame] {
override def work(state: GraphState): Frame = {
new Frame(state.graphFrame.triangleCount.run())
}
}
|
dmsuehir/spark-tk
|
sparktk-core/src/main/scala/org/trustedanalytics/sparktk/graph/internal/ops/TriangleCount.scala
|
Scala
|
apache-2.0
| 1,664
|
// Copyright (C) 2011-2012 the original author or authors.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.scalastyle
import java.io.File
import java.io.FileFilter
import scala.collection.JavaConversions.seqAsJavaList
import scala.collection.JavaConversions.collectionAsScalaIterable
class Directory
class DirectoryFileSpec(name: String, encoding: Option[String], val file: java.io.File) extends RealFileSpec(name, encoding) {
override def toString: String = file.getAbsolutePath()
}
object Directory {
val scalaFileFilter = new FileFilter() {
def accept(file: File): Boolean = file.getAbsolutePath().endsWith(".scala")
}
def getFilesAsJava(encoding: Option[String], files: java.util.List[File]): java.util.List[FileSpec] = {
seqAsJavaList(privateGetFiles(encoding, collectionAsScalaIterable(files)))
}
def getFiles(encoding: Option[String], files: Iterable[File]): List[FileSpec] = {
privateGetFiles(encoding, files);
}
private[this] def privateGetFiles(encoding: Option[String], files: Iterable[File]): List[FileSpec] = {
files.map(f => {
if (f.isDirectory) {
getFiles(encoding, f.listFiles)
} else if (scalaFileFilter.accept(f)) {
List(new DirectoryFileSpec(f.getAbsolutePath(), encoding, f.getAbsoluteFile()))
} else {
List()
}
}).flatten.toList
}
}
|
firebase/scalastyle
|
src/main/scala/org/scalastyle/Directory.scala
|
Scala
|
apache-2.0
| 1,984
|
/*
* MIT License
*
* Copyright (c) 2016 Gonçalo Marques
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.byteslounge.slickrepo.test.sqlserver
import com.byteslounge.slickrepo.test.{JodaTimeVersionedRepositoryTest, SQLServerConfig}
class SQLServerJodaTimeVersionedRepositoryTest extends JodaTimeVersionedRepositoryTest(SQLServerConfig.config)
|
gonmarques/slick-repo
|
src/test/scala/com/byteslounge/slickrepo/test/sqlserver/SQLServerJodaTimeVersionedRepositoryTest.scala
|
Scala
|
mit
| 1,391
|
package org.apress.prospark
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.json4s.DefaultFormats
import org.json4s.JField
import org.json4s.jvalue2extractable
import org.json4s.jvalue2monadic
import org.json4s.native.JsonMethods.parse
import org.json4s.string2JsonInput
object HttpApp {
def main(args: Array[String]) {
if (args.length != 2) {
System.err.println(
"Usage: HttpApp <appname> <outputPath>")
System.exit(1)
}
val Seq(appName, outputPath) = args.toSeq
val conf = new SparkConf()
.setAppName(appName)
.setJars(SparkContext.jarOfClass(this.getClass).toSeq)
val batchInterval = 10
val ssc = new StreamingContext(conf, Seconds(batchInterval))
HttpUtils.createStream(ssc, url = "https://www.citibikenyc.com/stations/json", interval = batchInterval)
.flatMap(rec => (parse(rec) \\ "stationBeanList").children)
.filter(rec => {
implicit val formats = DefaultFormats
(rec \\ "statusKey").extract[Integer] != 1
})
.map(rec => rec.filterField {
case JField("id", _) => true
case JField("stationName", _) => true
case JField("statusValue", _) => true
case _ => false
})
.map(rec => {
implicit val formats = DefaultFormats
(rec(0)._2.extract[Integer], rec(1)._2.extract[String], rec(2)._2.extract[String])
})
.saveAsTextFiles(outputPath)
ssc.start()
ssc.awaitTermination()
}
}
|
ZubairNabi/prosparkstreaming
|
Chap5/src/main/scala/org/apress/prospark/L5-18Http.scala
|
Scala
|
apache-2.0
| 1,595
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.python
import java.{util => ju}
import java.io.{DataInput, DataOutput}
import java.nio.charset.StandardCharsets
import scala.collection.JavaConverters._
import org.apache.hadoop.io._
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat
import org.apache.spark.SparkException
import org.apache.spark.api.java.JavaSparkContext
/**
* A class to test Pyrolite serialization on the Scala side, that will be deserialized
* in Python
*/
case class TestWritable(var str: String, var int: Int, var double: Double) extends Writable {
def this() = this("", 0, 0.0)
def getStr: String = str
def setStr(str: String) { this.str = str }
def getInt: Int = int
def setInt(int: Int) { this.int = int }
def getDouble: Double = double
def setDouble(double: Double) { this.double = double }
def write(out: DataOutput): Unit = {
out.writeUTF(str)
out.writeInt(int)
out.writeDouble(double)
}
def readFields(in: DataInput): Unit = {
str = in.readUTF()
int = in.readInt()
double = in.readDouble()
}
}
private[python] class TestInputKeyConverter extends Converter[Any, Any] {
override def convert(obj: Any): Char = {
obj.asInstanceOf[IntWritable].get().toChar
}
}
private[python] class TestInputValueConverter extends Converter[Any, Any] {
override def convert(obj: Any): ju.List[Double] = {
val m = obj.asInstanceOf[MapWritable]
m.keySet.asScala.map(_.asInstanceOf[DoubleWritable].get()).toSeq.asJava
}
}
private[python] class TestOutputKeyConverter extends Converter[Any, Any] {
override def convert(obj: Any): Text = {
new Text(obj.asInstanceOf[Int].toString)
}
}
private[python] class TestOutputValueConverter extends Converter[Any, Any] {
override def convert(obj: Any): DoubleWritable = {
new DoubleWritable(obj.asInstanceOf[java.util.Map[Double, _]].keySet().iterator().next())
}
}
private[python] class DoubleArrayWritable extends ArrayWritable(classOf[DoubleWritable])
private[python] class DoubleArrayToWritableConverter extends Converter[Any, Writable] {
override def convert(obj: Any): DoubleArrayWritable = obj match {
case arr if arr.getClass.isArray && arr.getClass.getComponentType == classOf[Double] =>
val daw = new DoubleArrayWritable
daw.set(arr.asInstanceOf[Array[Double]].map(new DoubleWritable(_)))
daw
case other => throw new SparkException(s"Data of type $other is not supported")
}
}
private[python] class WritableToDoubleArrayConverter extends Converter[Any, Array[Double]] {
override def convert(obj: Any): Array[Double] = obj match {
case daw : DoubleArrayWritable => daw.get().map(_.asInstanceOf[DoubleWritable].get())
case other => throw new SparkException(s"Data of type $other is not supported")
}
}
/**
* This object contains method to generate SequenceFile test data and write it to a
* given directory (probably a temp directory)
*/
object WriteInputFormatTestDataGenerator {
def main(args: Array[String]) {
val path = args(0)
val sc = new JavaSparkContext("local[4]", "test-writables")
generateData(path, sc)
}
def generateData(path: String, jsc: JavaSparkContext) {
val sc = jsc.sc
val basePath = s"$path/sftestdata/"
val textPath = s"$basePath/sftext/"
val intPath = s"$basePath/sfint/"
val doublePath = s"$basePath/sfdouble/"
val arrPath = s"$basePath/sfarray/"
val mapPath = s"$basePath/sfmap/"
val classPath = s"$basePath/sfclass/"
val bytesPath = s"$basePath/sfbytes/"
val boolPath = s"$basePath/sfbool/"
val nullPath = s"$basePath/sfnull/"
/*
* Create test data for IntWritable, DoubleWritable, Text, BytesWritable,
* BooleanWritable and NullWritable
*/
val intKeys = Seq((1, "aa"), (2, "bb"), (2, "aa"), (3, "cc"), (2, "bb"), (1, "aa"))
sc.parallelize(intKeys).saveAsSequenceFile(intPath)
sc.parallelize(intKeys.map{ case (k, v) => (k.toDouble, v) }).saveAsSequenceFile(doublePath)
sc.parallelize(intKeys.map{ case (k, v) => (k.toString, v) }).saveAsSequenceFile(textPath)
sc.parallelize(intKeys.map{ case (k, v) => (k, v.getBytes(StandardCharsets.UTF_8)) }
).saveAsSequenceFile(bytesPath)
val bools = Seq((1, true), (2, true), (2, false), (3, true), (2, false), (1, false))
sc.parallelize(bools).saveAsSequenceFile(boolPath)
sc.parallelize(intKeys).map{ case (k, v) =>
(new IntWritable(k), NullWritable.get())
}.saveAsSequenceFile(nullPath)
// Create test data for ArrayWritable
val data = Seq(
(1, Array.empty[Double]),
(2, Array(3.0, 4.0, 5.0)),
(3, Array(4.0, 5.0, 6.0))
)
sc.parallelize(data, numSlices = 2)
.map{ case (k, v) =>
val va = new DoubleArrayWritable
va.set(v.map(new DoubleWritable(_)))
(new IntWritable(k), va)
}.saveAsNewAPIHadoopFile[SequenceFileOutputFormat[IntWritable, DoubleArrayWritable]](arrPath)
// Create test data for MapWritable, with keys DoubleWritable and values Text
val mapData = Seq(
(1, Map()),
(2, Map(1.0 -> "cc")),
(3, Map(2.0 -> "dd")),
(2, Map(1.0 -> "aa")),
(1, Map(3.0 -> "bb"))
)
sc.parallelize(mapData, numSlices = 2).map{ case (i, m) =>
val mw = new MapWritable()
m.foreach { case (k, v) =>
mw.put(new DoubleWritable(k), new Text(v))
}
(new IntWritable(i), mw)
}.saveAsSequenceFile(mapPath)
// Create test data for arbitrary custom writable TestWritable
val testClass = Seq(
("1", TestWritable("test1", 1, 1.0)),
("2", TestWritable("test2", 2, 2.3)),
("3", TestWritable("test3", 3, 3.1)),
("5", TestWritable("test56", 5, 5.5)),
("4", TestWritable("test4", 4, 4.2))
)
val rdd = sc.parallelize(testClass, numSlices = 2).map{ case (k, v) => (new Text(k), v) }
rdd.saveAsNewAPIHadoopFile(classPath,
classOf[Text], classOf[TestWritable],
classOf[SequenceFileOutputFormat[Text, TestWritable]])
}
}
|
wangyixiaohuihui/spark2-annotation
|
core/src/main/scala/org/apache/spark/api/python/WriteInputFormatTestDataGenerator.scala
|
Scala
|
apache-2.0
| 6,994
|
package com.twitter.finagle.redis.util
import com.twitter.finagle.redis.protocol._
import java.nio.charset.Charset
import org.jboss.netty.buffer.{ChannelBuffer, ChannelBuffers}
import org.jboss.netty.util.CharsetUtil
import com.twitter.finagle.redis.protocol.Commands.trimList
trait ErrorConversion {
def getException(msg: String): Throwable
def apply(requirement: Boolean, message: String = "Prerequisite failed") {
if (!requirement) {
throw getException(message)
}
}
def safe[T](fn: => T): T = {
try {
fn
} catch {
case e: Throwable => throw getException(e.getMessage)
}
}
}
object BytesToString {
def apply(arg: Array[Byte], charset: Charset = CharsetUtil.UTF_8) = new String(arg, charset)
def fromList(args: Seq[Array[Byte]], charset: Charset = CharsetUtil.UTF_8) =
args.map { arg => BytesToString(arg, charset) }
def fromTuples(args: Seq[(Array[Byte], Array[Byte])], charset: Charset = CharsetUtil.UTF_8) =
args map { arg => (BytesToString(arg._1), BytesToString(arg._2)) }
def fromTuplesWithDoubles(args: Seq[(Array[Byte], Double)],
charset: Charset = CharsetUtil.UTF_8) =
args map { arg => (BytesToString(arg._1, charset), arg._2) }
}
object GetMonadArg {
def apply(args: Seq[Array[Byte]], command: ChannelBuffer): ChannelBuffer =
ChannelBuffers.wrappedBuffer(trimList(args, 1, CBToString(command))(0))
}
object StringToBytes {
def apply(arg: String, charset: Charset = CharsetUtil.UTF_8) = arg.getBytes(charset)
def fromList(args: List[String], charset: Charset = CharsetUtil.UTF_8) =
args.map { arg =>
arg.getBytes(charset)
}
}
object StringToChannelBuffer {
def apply(string: String, charset: Charset = CharsetUtil.UTF_8) = {
ChannelBuffers.wrappedBuffer(string.getBytes(charset))
}
}
object CBToString {
def apply(arg: ChannelBuffer, charset: Charset = CharsetUtil.UTF_8) = {
arg.toString(charset)
}
def fromList(args: Seq[ChannelBuffer], charset: Charset = CharsetUtil.UTF_8) =
args.map { arg => CBToString(arg, charset) }
def fromTuples(args: Seq[(ChannelBuffer, ChannelBuffer)], charset: Charset = CharsetUtil.UTF_8) =
args map { arg => (CBToString(arg._1), CBToString(arg._2)) }
def fromTuplesWithDoubles(args: Seq[(ChannelBuffer, Double)],
charset: Charset = CharsetUtil.UTF_8) =
args map { arg => (CBToString(arg._1, charset), arg._2) }
}
object NumberFormat {
import com.twitter.finagle.redis.naggati.ProtocolError
def toDouble(arg: String): Double = {
try {
arg.toDouble
} catch {
case e: Throwable => throw new ProtocolError("Unable to convert %s to Double".format(arg))
}
}
def toFloat(arg: String): Float = {
try {
arg.toFloat
} catch {
case e: Throwable => throw new ProtocolError("Unable to convert %s to Float".format(arg))
}
}
def toInt(arg: String): Int = {
try {
arg.toInt
} catch {
case e: Throwable => throw new ProtocolError("Unable to convert %s to Int".format(arg))
}
}
def toLong(arg: String): Long = {
try {
arg.toLong
} catch {
case e: Throwable => throw new ProtocolError("Unable to convert %s to Long".format(arg))
}
}
}
object ReplyFormat {
def toString(items: List[Reply]): List[String] = {
items flatMap {
case BulkReply(message) => List(BytesToString(message.array))
case EmptyBulkReply() => EmptyBulkReplyString
case IntegerReply(id) => List(id.toString)
case StatusReply(message) => List(message)
case ErrorReply(message) => List(message)
case MBulkReply(messages) => ReplyFormat.toString(messages)
case EmptyMBulkReply() => EmptyMBulkReplyString
case _ => Nil
}
}
def toChannelBuffers(items: List[Reply]): List[ChannelBuffer] = {
items flatMap {
case BulkReply(message) => List(message)
case EmptyBulkReply() => EmptyBulkReplyChannelBuffer
case IntegerReply(id) => List(ChannelBuffers.wrappedBuffer(Array(id.toByte)))
case StatusReply(message) => List(StringToChannelBuffer(message))
case ErrorReply(message) => List(StringToChannelBuffer(message))
case MBulkReply(messages) => ReplyFormat.toChannelBuffers(messages)
case EmptyMBulkReply() => EmptyBulkReplyChannelBuffer
case _ => Nil
}
}
private val EmptyBulkReplyString = List(RedisCodec.NIL_VALUE.toString)
private val EmptyMBulkReplyString = List(BytesToString(RedisCodec.NIL_VALUE_BA.array))
private val EmptyBulkReplyChannelBuffer = List(RedisCodec.NIL_VALUE_BA)
}
|
firebase/finagle
|
finagle-redis/src/main/scala/com/twitter/finagle/redis/util/Conversions.scala
|
Scala
|
apache-2.0
| 4,619
|
package mesosphere.util.state.zk
import java.util.UUID
import com.fasterxml.uuid.impl.UUIDUtil
import com.google.protobuf.{ ByteString, InvalidProtocolBufferException }
import com.twitter.util.{ Future => TWFuture }
import com.twitter.zk.{ ZNode, ZkClient }
import mesosphere.marathon.{ Protos, StoreCommandFailedException }
import mesosphere.util.ThreadPoolContext
import mesosphere.util.state.zk.ZKStore._
import mesosphere.util.state.{ PersistentEntity, PersistentStore }
import org.apache.log4j.Logger
import org.apache.zookeeper.KeeperException
import org.apache.zookeeper.KeeperException.{ NodeExistsException, NoNodeException }
import scala.concurrent.duration.Duration
import scala.concurrent.{ Await, Future, Promise }
class ZKStore(val client: ZkClient, rootNode: ZNode) extends PersistentStore {
private[this] val log = Logger.getLogger(getClass)
private[this] implicit val ec = ThreadPoolContext.context
val root = createPathBlocking(rootNode)
/**
* Fetch data and return entity.
* The entity is returned also if it is not found in zk, since it is needed for the store operation.
*/
override def load(key: ID): Future[Option[ZKEntity]] = {
val node = root(key)
require(node.parent == root, s"Nested paths are not supported: $key!")
node.getData().asScala
.map { data => Some(ZKEntity(node, ZKData(data.bytes), Some(data.stat.getVersion))) }
.recover { case ex: NoNodeException => None }
.recover(exceptionTransform(s"Could not load key $key"))
}
override def create(key: ID, content: IndexedSeq[Byte]): Future[ZKEntity] = {
val node = root(key)
require(node.parent == root, s"Nested paths are not supported: $key")
val data = ZKData(key, UUID.randomUUID(), content)
node.create(data.toProto.toByteArray).asScala
.map { n => ZKEntity(n, data, Some(0)) } //first version after create is 0
.recover(exceptionTransform(s"Can not create entity $key"))
}
/**
* This will store a previously fetched entity.
* The entity will be either created or updated, depending on the read state.
* @return Some value, if the store operation is successful otherwise None
*/
override def update(entity: PersistentEntity): Future[ZKEntity] = {
val zk = zkEntity(entity)
val version = zk.version.getOrElse (
throw new StoreCommandFailedException(s"Can not store entity $entity, since there is no version!")
)
zk.node.setData(zk.data.toProto.toByteArray, version).asScala
.map { data => zk.copy(version = Some(data.stat.getVersion)) }
.recover(exceptionTransform(s"Can not update entity $entity"))
}
/**
* Delete an entry with given identifier.
*/
override def delete(key: ID): Future[Boolean] = {
val node = root(key)
require(node.parent == root, s"Nested paths are not supported: $key")
node.exists().asScala
.flatMap { d => node.delete(d.stat.getVersion).asScala.map(_ => true) }
.recover { case ex: NoNodeException => false }
.recover(exceptionTransform(s"Can not delete entity $key"))
}
override def allIds(): Future[Seq[ID]] = {
root.getChildren().asScala
.map(_.children.map(_.name))
.recover(exceptionTransform("Can not list all identifiers"))
}
private[this] def exceptionTransform[T](errorMessage: String): PartialFunction[Throwable, T] = {
case ex: KeeperException => throw new StoreCommandFailedException(errorMessage, ex)
}
private[this] def zkEntity(entity: PersistentEntity): ZKEntity = {
entity match {
case zk: ZKEntity => zk
case _ => throw new IllegalArgumentException(s"Can not handle this kind of entity: ${entity.getClass}")
}
}
private[this] def createPathBlocking(path: ZNode): ZNode = {
def createParent(node: ZNode): ZNode = {
val exists = Await.result(node.exists().asScala.map(_ => true)
.recover { case ex: NoNodeException => false }
.recover(exceptionTransform("Can not query for exists")), Duration.Inf)
if (!exists) {
createParent(node.parent)
Await.result(node.create().asScala
.recover { case ex: NodeExistsException => node }
.recover(exceptionTransform("Can not create")), Duration.Inf)
}
node
}
createParent(path)
}
}
case class ZKEntity(node: ZNode, data: ZKData, version: Option[Int] = None) extends PersistentEntity {
override def id: String = node.name
override def withNewContent(updated: IndexedSeq[Byte]): PersistentEntity = copy(data = data.copy(bytes = updated))
override def bytes: IndexedSeq[Byte] = data.bytes
}
case class ZKData(name: String, uuid: UUID, bytes: IndexedSeq[Byte] = Vector.empty) {
def toProto: Protos.ZKStoreEntry = Protos.ZKStoreEntry.newBuilder()
.setName(name)
.setUuid(ByteString.copyFromUtf8(uuid.toString))
.setValue(ByteString.copyFrom(bytes.toArray))
.build()
}
object ZKData {
def apply(bytes: Array[Byte]): ZKData = {
try {
val proto = Protos.ZKStoreEntry.parseFrom(bytes)
new ZKData(proto.getName, UUIDUtil.uuid(proto.getUuid.toByteArray), proto.getValue.toByteArray)
}
catch {
case ex: InvalidProtocolBufferException =>
throw new StoreCommandFailedException(s"Can not deserialize Protobuf from ${bytes.length}", ex)
}
}
}
object ZKStore {
implicit class Twitter2Scala[T](val twitterF: TWFuture[T]) extends AnyVal {
def asScala: Future[T] = {
val promise = Promise[T]()
twitterF.onSuccess(promise.success(_))
twitterF.onFailure(promise.failure(_))
promise.future
}
}
}
|
spacejam/marathon
|
src/main/scala/mesosphere/util/state/zk/ZKStore.scala
|
Scala
|
apache-2.0
| 5,599
|
package monocle.function
import monocle.{Iso, Optional}
import scala.util.Try
import cats.data.Validated
/** Typeclass that defines an [[Optional]] from a monomorphic container `S` to a possible value `A`. There must be at
* most one `A` in `S`.
* @tparam S
* source of the [[Optional]]
* @tparam A
* target of the [[Optional]], `A` is supposed to be unique for a given `S`
*/
@deprecated("no replacement", since = "3.0.0-M1")
abstract class Possible[S, A] extends Serializable {
def possible: Optional[S, A]
}
trait PossibleFunctions {
@deprecated("no replacement", since = "3.0.0-M1")
def possible[S, A](implicit ev: Possible[S, A]): Optional[S, A] = ev.possible
}
object Possible extends PossibleFunctions {
def apply[S, A](optional: Optional[S, A]): Possible[S, A] =
new Possible[S, A] {
override val possible: Optional[S, A] = optional
}
/** lift an instance of [[monocle.Optional]] using an [[Iso]] */
def fromIso[S, A, B](iso: Iso[S, A])(implicit ev: Possible[A, B]): Possible[S, B] =
Possible(iso.andThen(ev.possible))
/** *********************************************************************************************
*/
/** Std instances */
/** *********************************************************************************************
*/
implicit def optionPossible[A]: Possible[Option[A], A] =
new Possible[Option[A], A] {
def possible = monocle.std.option.some
}
implicit def eitherPossible[A, B]: Possible[Either[A, B], B] =
new Possible[Either[A, B], B] {
def possible = monocle.std.either.stdRight
}
implicit def validatedPossible[A, B]: Possible[Validated[A, B], B] =
new Possible[Validated[A, B], B] {
def possible = monocle.std.validated.success
}
implicit def tryPossible[A]: Possible[Try[A], A] =
new Possible[Try[A], A] {
def possible = monocle.std.utilTry.trySuccess
}
}
|
julien-truffaut/Monocle
|
core/shared/src/main/scala/monocle/function/Possible.scala
|
Scala
|
mit
| 1,929
|
package jp.ijufumi.openreports.service.settings
import java.sql.SQLException
import jp.ijufumi.openreports.model.{RMemberGroup, TGroup, TMember}
import jp.ijufumi.openreports.service.HashKey
import jp.ijufumi.openreports.service.enums.StatusCode
import jp.ijufumi.openreports.service.support.{ConnectionFactory, Hash}
import jp.ijufumi.openreports.vo.{GroupInfo, MemberInfo}
import org.joda.time.DateTime
import scalikejdbc.{DB, SQLSyntax}
import skinny.Logging
import scala.collection.mutable
class MemberSettingsService extends Logging {
def getMembers(): Array[MemberInfo] = {
TMember.findAll().map(m => MemberInfo(m)).toArray
}
def getMember(memberId: Long): Option[MemberInfo] = {
val memOpt = TMember.findById(memberId).map(m => MemberInfo(m))
if (memOpt.isEmpty) {
return memOpt
}
val mem = memOpt.get
val newGroups = mutable.Seq[GroupInfo]()
// 所属しているグループを追加
newGroups ++ mem.groups
// 所属していないグループを追加
newGroups ++ TGroup
.findAll()
.find(g => !mem.groups.exists(_.groupId == g.groupId))
.map(g => GroupInfo(g.groupId, g.groupName, g.versions))
Option.apply(
new MemberInfo(
mem.memberId,
mem.name,
mem.emailAddress,
newGroups,
mem.menus,
mem.versions
)
)
}
def registerMember(name: String,
emailAddress: String,
password: String,
isAdmin: Boolean,
groups: Seq[String]): StatusCode.Value = {
val db = DB(ConnectionFactory.getConnection)
try {
db.begin()
val id = TMember.createWithAttributes(
'emailAddress -> emailAddress,
'password -> Hash.hmacSha256(HashKey, password),
'name -> name
)
groups.foreach(
s => RMemberGroup.createWithAttributes('memberId -> id, 'groupId -> s)
)
db.commit()
} catch {
case e: SQLException => {
db.rollback()
return StatusCode.of(e)
}
case _: Throwable => {
db.rollback()
return StatusCode.OTHER_ERROR
}
}
StatusCode.OK
}
def updateMember(memberId: Long,
name: String,
emailAddress: String,
password: String,
isAdmin: Boolean,
groups: Seq[String],
versions: Long): StatusCode.Value = {
val db = DB(ConnectionFactory.getConnection)
try {
db.begin()
val memberOpt = TMember.findById(memberId)
if (memberOpt.isEmpty) {
return StatusCode.DATA_NOT_FOUND
}
val updateBuilder = TMember
.updateByIdAndVersion(memberId, versions)
.addAttributeToBeUpdated(
(TMember.column.field("name"), name)
)
.addAttributeToBeUpdated(
(TMember.column.field("emailAddress"), emailAddress)
)
if (password.length != 0) {
val hashedPassword = Hash.hmacSha256(HashKey, password)
updateBuilder.addAttributeToBeUpdated(
(TMember.column.field("password"), hashedPassword)
)
}
updateBuilder.withAttributes('updatedAt -> DateTime.now)
RMemberGroup.deleteBy(
SQLSyntax.eq(RMemberGroup.column.field("memberId"), memberId)
)
groups.foreach(
s =>
RMemberGroup
.createWithAttributes('memberId -> memberId, 'groupId -> s)
)
db.commit()
} catch {
case e: SQLException => {
db.rollback()
return StatusCode.of(e)
}
case _: Throwable => {
db.rollback()
return StatusCode.OTHER_ERROR
}
}
StatusCode.OK
}
}
|
ijufumi/openreports_scala
|
src/main/scala/jp/ijufumi/openreports/service/settings/MemberSettingsService.scala
|
Scala
|
mit
| 3,760
|
package controllers
import play.api.mvc._, Results._
import lila.app._
import views._
object Donation extends LilaController {
def index = Open { implicit ctx =>
OptionFuOk(Prismic.getBookmark("donate")) {
case (doc, resolver) => Env.donation.api.list(100) zip
Env.donation.api.top(10) zip
Env.donation.api.progress map {
case ((donations, top), progress) =>
views.html.donation.index(doc, resolver, donations, top, progress)
}
}
}
def thanks = Open { implicit ctx =>
OptionOk(Prismic.getBookmark("donate-thanks")) {
case (doc, resolver) => views.html.site.page(doc, resolver)
}
}
def thanksRedirect = Action(Redirect(routes.Donation.thanks))
def ipn = Action.async { implicit req =>
Env.donation.forms.ipn.bindFromRequest.fold(
err => {
println(err)
fuccess(Ok)
},
ipn => {
val donation = lila.donation.Donation.make(
payPalTnx = ipn.txnId,
payPalSub = ipn.subId,
userId = ipn.userId,
email = ipn.email,
name = ipn.name,
gross = ipn.grossCents,
fee = ipn.feeCents,
message = "")
println(donation)
Env.donation.api create donation inject Ok
})
}
}
|
JimmyMow/lila
|
app/controllers/Donation.scala
|
Scala
|
mit
| 1,291
|
package org.apache.datacommons.prepbuddy.smoothers
import org.apache.spark.rdd.RDD
abstract class SmoothingMethod extends Serializable {
def prepare(rdd: RDD[String], windowSize: Int): RDD[Double] = {
val duplicateRDD: RDD[(Int, String)] = rdd.mapPartitionsWithIndex((index: Int, iterator: Iterator[String]) => {
var list: List[(Int, String)] = iterator.toList.map((index, _))
if (index != 0) {
val duplicates: List[(Int, String)] = list.take(windowSize - 1).map((tuple) => (tuple._1 - 1, tuple._2))
list = list ++ duplicates
}
list.iterator
})
duplicateRDD.partitionBy(new KeyPartitioner(duplicateRDD.getNumPartitions)).map(_._2.toDouble)
}
def smooth(singleColumnDataset: RDD[String]): RDD[Double]
}
|
blpabhishek/prep-buddy
|
src/main/scala/org/apache/datacommons/prepbuddy/smoothers/SmoothingMethod.scala
|
Scala
|
apache-2.0
| 821
|
package models.billing
import scalaz._
import Scalaz._
import scalaz.effect.IO
import scalaz.Validation
import scalaz.Validation.FlatMap._
import scalaz.NonEmptyList._
import cache._
import db._
import models.Constants._
import io.megam.auth.funnel.FunnelErrors._
import com.datastax.driver.core.{ ResultSet, Row }
import com.websudos.phantom.dsl._
import scala.concurrent.{ Future => ScalaFuture }
import com.websudos.phantom.connectors.{ ContactPoint, KeySpaceDef }
import scala.concurrent.Await
import scala.concurrent.duration._
import utils.DateHelper
import io.megam.util.Time
import org.joda.time.{DateTime, DateTimeZone}
import org.joda.time.format.{DateTimeFormat,ISODateTimeFormat}
import io.megam.common.uid.UID
import net.liftweb.json._
import net.liftweb.json.scalaz.JsonScalaz._
import java.nio.charset.Charset
import controllers.stack.ImplicitJsonFormats
/**
* @author ranjitha
*
*/
case class SubscriptionsInput( model: String, license: String, trial_ends: String)
case class SubscriptionsResult(
id: String,
account_id: String,
model: String,
license: String,
trial_ends: String,
json_claz: String,
created_at: DateTime) {
}
sealed class SubscriptionsSacks extends CassandraTable[SubscriptionsSacks, SubscriptionsResult] with ImplicitJsonFormats {
object id extends StringColumn(this) with PartitionKey[String]
object account_id extends StringColumn(this) with PrimaryKey[String]
object model extends StringColumn(this)
object license extends StringColumn(this)
object trial_ends extends StringColumn(this)
object json_claz extends StringColumn(this)
object created_at extends DateTimeColumn(this)
def fromRow(row: Row): SubscriptionsResult = {
SubscriptionsResult(
id(row),
account_id(row),
model(row),
license(row),
trial_ends(row),
json_claz(row),
created_at(row))
}
}
abstract class ConcreteSubscriptions extends SubscriptionsSacks with RootConnector {
override lazy val tableName = "subscriptions"
override implicit def space: KeySpace = scyllaConnection.space
override implicit def session: Session = scyllaConnection.session
def insertNewRecord(ams: SubscriptionsResult): ValidationNel[Throwable, ResultSet] = {
val res = insert.value(_.id, ams.id)
.value(_.account_id, ams.account_id)
.value(_.model, ams.model)
.value(_.license, ams.license)
.value(_.trial_ends, ams.trial_ends)
.value(_.json_claz, ams.json_claz)
.value(_.created_at, ams.created_at)
.future()
Await.result(res, 5.seconds).successNel
}
def getRecords(email: String): ValidationNel[Throwable, Seq[SubscriptionsResult]] = {
val res = select.allowFiltering().where(_.account_id eqs email).fetch()
Await.result(res, 5.seconds).successNel
}
}
object Subscriptions extends ConcreteSubscriptions {
private def mkSubscriptionsSack(email: String, input: String): ValidationNel[Throwable, SubscriptionsResult] = {
val subInput: ValidationNel[Throwable, SubscriptionsInput] = (Validation.fromTryCatchThrowable[SubscriptionsInput, Throwable] {
parse(input).extract[SubscriptionsInput]
} leftMap { t: Throwable => new MalformedBodyError(input, t.getMessage) }).toValidationNel //capture failure
for {
sub <- subInput
uir <- (UID("sub").get leftMap { ut: NonEmptyList[Throwable] => ut })
} yield {
val bvalue = Set(email)
val json = new SubscriptionsResult(uir.get._1 + uir.get._2, email, sub.model, sub.license, sub.trial_ends, "Megam::Subscriptions", DateHelper.now())
json
}
}
def create(email: String, input: String): ValidationNel[Throwable, Option[SubscriptionsResult]] = {
for {
wa <- (mkSubscriptionsSack(email, input) leftMap { err: NonEmptyList[Throwable] => err })
set <- (insertNewRecord(wa) leftMap { t: NonEmptyList[Throwable] => t })
} yield {
play.api.Logger.warn(("%s%s%-20s%s%s").format(Console.GREEN, Console.BOLD, "Subscriptions","|+| ✔", Console.RESET))
wa.some
}
}
def findById(email: String): ValidationNel[Throwable, Seq[SubscriptionsResult]] = {
(getRecords(email) leftMap { t: NonEmptyList[Throwable] =>
new ResourceItemNotFound(email, "Subscriptions = nothing found.")
}).toValidationNel.flatMap { nm: Seq[SubscriptionsResult] =>
if (!nm.isEmpty)
Validation.success[Throwable, Seq[SubscriptionsResult]](nm).toValidationNel
else
Validation.failure[Throwable, Seq[SubscriptionsResult]](new ResourceItemNotFound(email, "Subscriptions = nothing found.")).toValidationNel
}
}
}
|
megamsys/verticegateway
|
app/models/billing/Subscriptions.scala
|
Scala
|
mit
| 4,621
|
package com.twitter.finagle.redis.protocol.commands
import com.twitter.finagle.redis.protocol._
import com.twitter.io.Buf
case class BAdd(key: Buf, field: Buf, value: Buf) extends StrictKeyCommand {
def name: Buf = Command.BADD
override def body: Seq[Buf] = Seq(key, field, value)
}
case class BRem(key: Buf, fields: Seq[Buf]) extends StrictKeyCommand {
def name: Buf = Command.BREM
override def body: Seq[Buf] = key +: fields
}
case class BGet(key: Buf, field: Buf) extends StrictKeyCommand {
def name: Buf = Command.BGET
override def body: Seq[Buf] = Seq(key, field)
}
case class BCard(key: Buf) extends StrictKeyCommand {
def name: Buf = Command.BCARD
}
case class BRange(key: Buf, count: Buf, startField: Option[Buf], endField: Option[Buf])
extends StrictKeyCommand {
def name: Buf = Command.BRANGE
override def body: Seq[Buf] = (startField, endField) match {
case (Some(s), Some(e)) => Seq(key, count, Buf.Utf8("startend"), s, e)
case (None, Some(e)) => Seq(key, count, Buf.Utf8("end"), e)
case (Some(s), None) => Seq(key, count, Buf.Utf8("start"), s)
case (None, None) => Seq(key, count)
}
}
case class BMergeEx(key: Buf, fv: Map[Buf, Buf], milliseconds: Long) extends StrictKeyCommand {
def name: Buf = Command.BMERGEEX
override def body: Seq[Buf] = {
val fvList: Seq[Buf] = fv.flatMap {
case (f, v) =>
f :: v :: Nil
}(collection.breakOut)
key +: (Buf.Utf8(milliseconds.toString) +: fvList)
}
}
|
mkhq/finagle
|
finagle-redis/src/main/scala/com/twitter/finagle/redis/protocol/commands/BtreeSortedSet.scala
|
Scala
|
apache-2.0
| 1,483
|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.keras.autograd
import com.intel.analytics.bigdl.dllib.nn.abstractnn._
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.dllib.utils.{Shape, T}
import com.intel.analytics.bigdl.dllib.keras.autograd.{AutoGrad => AG}
import com.intel.analytics.bigdl.dllib.keras.Model
import com.intel.analytics.bigdl.dllib.keras.objectives.TensorLossFunction
import scala.reflect.ClassTag
object CustomLoss {
/**
*
* @param lossFunc function to calculate the loss (yTrue, yPred) => loss
* @param yPredShape the pred shape without batch
* @param yTrueShape the target shape without batch which is the same as yPredShape by default.
* @param sizeAverage average the batch result or not
* @return
*/
def apply[T: ClassTag](
lossFunc: (Variable[T], Variable[T]) => Variable[T],
yPredShape: Shape,
yTrueShape: Shape = null,
sizeAverage: Boolean = true)(
implicit ev: TensorNumeric[T]): TensorLossFunction[T] = {
val yTrue = Variable(if (null == yTrueShape) {yPredShape} else {yTrueShape})
val yPred = Variable(yPredShape)
val lossVar = lossFunc (yTrue, yPred)
new CustomLossWithVariable[T](Array(yTrue, yPred), lossVar)
}
}
class CustomLossWithVariable[T: ClassTag](inputs: Array[Variable[T]], lossVar: Variable[T],
sizeAverage: Boolean = true)(
implicit ev: TensorNumeric[T]) extends CustomLoss[T](sizeAverage = sizeAverage) {
override val loss = this
private val lossInstance = generateLossFromVars(this.inputs, this.lossVar)
override protected def doGetLoss(
inputs: Array[Variable[T]]): AbstractModule[Activity, Activity, T] = lossInstance
override protected def getInputVars(inputShapes: Array[Shape]): Array[Variable[T]] = {
this.inputs
}
}
abstract class CustomLoss[T: ClassTag](sizeAverage: Boolean)(
implicit ev: TensorNumeric[T]) extends TensorLossFunction[T] {
protected def doGetLoss(inputs: Array[Variable[T]]): AbstractModule[Activity, Activity, T]
protected def getInputVars(inputShapes: Array[Shape]): Array[Variable[T]]
final def getLoss(inputShapes: Array[Shape]): AbstractModule[Activity, Activity, T] = {
val inVars = getInputVars(inputShapes)
doGetLoss(inVars)
}
final def generateLossFromVars(inVars: Array[Variable[T]], outVar: Variable[T]): Model[T] = {
if (sizeAverage) {
AG.mean(outVar, axis = 0).toGraph(inVars)
} else {
outVar.toGraph(inVars)
}
}
private def tensorToNonBatchShape(tensor: Tensor[T]) = {
val sizes = tensor.size()
Shape(sizes.slice(1, sizes.length))
}
/**
* Computes the loss using input and objective function. This function
* returns the result which is stored in the output field.
*
* @param yPred input of the criterion
* @param target target or labels
* @return the loss of the criterion
*/
override def updateOutput(yPred: Tensor[T], target: Tensor[T]): T = {
val yPredT = yPred.toTensor
val yTrueT = target.toTensor
val nonBatchShape = tensorToNonBatchShape(yPred)
val loss = getLoss(Array(nonBatchShape, nonBatchShape))
val result = loss.forward(T(yTrueT, yPredT)).toTensor[T]
require(result.isScalar,
s"The loss should be scalar, but got result with shape: [${result.size().mkString(", ")}]")
result.value()
}
/**
* Computing the gradient of the criterion with respect to its own input. This is returned in
* gradInput. Also, the gradInput state variable is updated accordingly.
*
* @param yPred input data
* @param yTrue target data / labels
* @return gradient of input
*/
override def updateGradInput(yPred: Tensor[T], yTrue: Tensor[T]): Tensor[T] = {
val nonBatchShape = tensorToNonBatchShape(yPred)
val loss = getLoss(Array(nonBatchShape, nonBatchShape))
val result = loss.updateGradInput(
T(yTrue, yPred), Tensor[T](1).fill(ev.one))
// we only respect the input grad
result.toTable.get[Tensor[T]](2).get
}
}
|
intel-analytics/BigDL
|
scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/keras/autograd/CustomLoss.scala
|
Scala
|
apache-2.0
| 4,689
|
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
* */
package io.github.tailhq.dynaml.kernels
import breeze.linalg.{DenseMatrix, DenseVector}
import io.github.tailhq.dynaml.utils
/**
* @author tailhq date: 2017/03/08
*
* The Neural network kernel.
*
*
* */
class NeuralNetworkKernel(sigma: DenseMatrix[Double]) extends SVMKernel[DenseMatrix[Double]]
with LocalSVMKernel[DenseVector[Double]]
with Serializable {
utils.isSquareMatrix(sigma)
utils.isSymmetricMatrix(sigma)
val dimensions = sigma.rows
state = {
for(i <- 0 until dimensions; j <- 0 until dimensions)
yield (i,j)
}.filter((coup) => coup._1 <= coup._2)
.map(c => "M_"+c._1+"_"+c._2 -> sigma(c._1, c._2))
.toMap
override val hyper_parameters: List[String] = state.keys.toList
def Sigma(config: Map[String, Double]) = DenseMatrix.tabulate[Double](dimensions, dimensions){(i, j) =>
if(i <= j) config("M_"+i+"_"+j)
else config("M_"+j+"_"+i)
}
override def evaluateAt(config: Map[String, Double])(x: DenseVector[Double], y: DenseVector[Double]) = {
val s = Sigma(config)
val xd = DenseVector(x.toArray ++ Array(1.0))
val yd = DenseVector(y.toArray ++ Array(1.0))
val xx: Double = 2.0 * (xd dot (s*xd))
val yy: Double = 2.0 * (yd dot (s*yd))
val xy: Double = 2.0 * (xd dot (s*yd))
2.0*math.sin(xy/math.sqrt((1.0+xx)*(1.0+yy)))/math.Pi
}
}
|
mandar2812/DynaML
|
dynaml-core/src/main/scala/io/github/tailhq/dynaml/kernels/NeuralNetworkKernel.scala
|
Scala
|
apache-2.0
| 2,107
|
package ch.wsl.fireindices.app.ui
import javax.swing.JPanel
import scala.swing.Component
import scala.swing.LayoutContainer
import scala.swing.Panel
class MigPanel(layoutConstraints:String="",columnConstraints:String="",rowConstraints:String="") extends Panel with LayoutContainer {
// import BorderPanel._
import net.miginfocom.swing._
type Constraints = String
override lazy val peer = new JPanel(new MigLayout(layoutConstraints,columnConstraints,rowConstraints))
def layoutManager = peer.getLayout.asInstanceOf[MigLayout]
protected def constraintsFor(comp: Component) = layoutManager.getComponentConstraints(comp.peer).asInstanceOf[String]
protected def areValid(c: Constraints): (Boolean, String) = (true, "")
def add(c: Component, l: Constraints ="") {
peer.add(c.peer, l.toString)
}
def addJ(c: javax.swing.JComponent, l: Constraints ="") {
peer.add(c, l.toString)
}
}
|
Insubric/fire-calculator
|
fireindicesui/src/main/scala/ch/wsl/fireindices/app/ui/MigPanel.scala
|
Scala
|
gpl-2.0
| 912
|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.freespec
import org.scalatest._
import SharedHelpers.EventRecordingReporter
import scala.concurrent.{ExecutionContext, Promise, Future}
import org.scalatest.concurrent.SleepHelper
import org.scalatest.events.{InfoProvided, MarkupProvided}
import scala.util.Success
import org.scalatest
import org.scalatest.freespec
class FixtureAsyncFreeSpecLikeSpec2 extends scalatest.funspec.AsyncFunSpec {
describe("AsyncFreeSpecLike") {
it("can be used for tests that return Future under parallel async test execution") {
class ExampleSpec extends freespec.FixtureAsyncFreeSpecLike with ParallelTestExecution {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
val a = 1
"test 1" in { fixture =>
Future {
assert(a == 1)
}
}
"test 2" in { fixture =>
Future {
assert(a == 2)
}
}
"test 3" in { fixture =>
Future {
pending
}
}
"test 4" in { fixture =>
Future {
cancel()
}
}
"test 5" ignore { fixture =>
Future {
cancel()
}
}
override def newInstance = new ExampleSpec
}
val rep = new EventRecordingReporter
val spec = new ExampleSpec
val status = spec.run(None, Args(reporter = rep))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(rep) }
promise.future.map { repo =>
assert(repo.testStartingEventsReceived.length == 4)
assert(repo.testSucceededEventsReceived.length == 1)
assert(repo.testSucceededEventsReceived(0).testName == "test 1")
assert(repo.testFailedEventsReceived.length == 1)
assert(repo.testFailedEventsReceived(0).testName == "test 2")
assert(repo.testPendingEventsReceived.length == 1)
assert(repo.testPendingEventsReceived(0).testName == "test 3")
assert(repo.testCanceledEventsReceived.length == 1)
assert(repo.testCanceledEventsReceived(0).testName == "test 4")
assert(repo.testIgnoredEventsReceived.length == 1)
assert(repo.testIgnoredEventsReceived(0).testName == "test 5")
}
}
it("can be used for tests that did not return Future under parallel async test execution") {
class ExampleSpec extends freespec.FixtureAsyncFreeSpecLike with ParallelTestExecution {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
val a = 1
"test 1" in { fixture =>
assert(a == 1)
}
"test 2" in { fixture =>
assert(a == 2)
}
"test 3" in { fixture =>
pending
}
"test 4" in { fixture =>
cancel()
}
"test 5" ignore { fixture =>
cancel()
}
override def newInstance = new ExampleSpec
}
val rep = new EventRecordingReporter
val spec = new ExampleSpec
val status = spec.run(None, Args(reporter = rep))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(rep) }
promise.future.map { repo =>
assert(repo.testStartingEventsReceived.length == 4)
assert(repo.testSucceededEventsReceived.length == 1)
assert(repo.testSucceededEventsReceived(0).testName == "test 1")
assert(repo.testFailedEventsReceived.length == 1)
assert(repo.testFailedEventsReceived(0).testName == "test 2")
assert(repo.testPendingEventsReceived.length == 1)
assert(repo.testPendingEventsReceived(0).testName == "test 3")
assert(repo.testCanceledEventsReceived.length == 1)
assert(repo.testCanceledEventsReceived(0).testName == "test 4")
assert(repo.testIgnoredEventsReceived.length == 1)
assert(repo.testIgnoredEventsReceived(0).testName == "test 5")
}
}
it("should run tests that return Future in serial by default") {
@volatile var count = 0
class ExampleSpec extends freespec.FixtureAsyncFreeSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
"test 1" in { fixture =>
Future {
SleepHelper.sleep(30)
assert(count == 0)
count = 1
Succeeded
}
}
"test 2" in { fixture =>
Future {
assert(count == 1)
SleepHelper.sleep(50)
count = 2
Succeeded
}
}
"test 3" in { fixture =>
Future {
assert(count == 2)
}
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(rep) }
promise.future.map { repo =>
assert(repo.testStartingEventsReceived.length == 3)
assert(repo.testSucceededEventsReceived.length == 3)
}
}
it("should run tests that does not return Future in serial by default") {
@volatile var count = 0
class ExampleSpec extends freespec.FixtureAsyncFreeSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
"test 1" in { fixture =>
SleepHelper.sleep(30)
assert(count == 0)
count = 1
Succeeded
}
"test 2" in { fixture =>
assert(count == 1)
SleepHelper.sleep(50)
count = 2
Succeeded
}
"test 3" in { fixture =>
assert(count == 2)
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(rep) }
promise.future.map { repo =>
assert(repo.testStartingEventsReceived.length == 3)
assert(repo.testSucceededEventsReceived.length == 3)
}
}
// SKIP-SCALATESTJS,NATIVE-START
it("should run tests and its future in same main thread when use SerialExecutionContext") {
var mainThread = Thread.currentThread
var test1Thread: Option[Thread] = None
var test2Thread: Option[Thread] = None
var onCompleteThread: Option[Thread] = None
class ExampleSpec extends freespec.FixtureAsyncFreeSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
"test 1" in { fixture =>
Future {
test1Thread = Some(Thread.currentThread)
succeed
}
}
"test 2" in { fixture =>
Future {
test2Thread = Some(Thread.currentThread)
succeed
}
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
status.whenCompleted { s =>
onCompleteThread = Some(Thread.currentThread)
}
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(rep) }
promise.future.map { repo =>
assert(test1Thread.isDefined)
assert(test1Thread.get == mainThread)
assert(test2Thread.isDefined)
assert(test2Thread.get == mainThread)
assert(onCompleteThread.isDefined)
assert(onCompleteThread.get == mainThread)
}
}
it("should run tests and its true async future in the same thread when use SerialExecutionContext") {
var mainThread = Thread.currentThread
@volatile var test1Thread: Option[Thread] = None
@volatile var test2Thread: Option[Thread] = None
var onCompleteThread: Option[Thread] = None
class ExampleSpec extends freespec.FixtureAsyncFreeSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
"test 1" in { fixture =>
val promise = Promise[Assertion]
val timer = new java.util.Timer
timer.schedule(
new java.util.TimerTask {
def run(): Unit = {
promise.complete(Success(succeed))
}
},
1000
)
promise.future.map { s =>
test1Thread = Some(Thread.currentThread)
s
}
}
"test 2" in { fixture =>
val promise = Promise[Assertion]
val timer = new java.util.Timer
timer.schedule(
new java.util.TimerTask {
def run(): Unit = {
promise.complete(Success(succeed))
}
},
500
)
promise.future.map { s =>
test2Thread = Some(Thread.currentThread)
s
}
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
status.whenCompleted { s =>
onCompleteThread = Some(Thread.currentThread)
}
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(rep) }
promise.future.map { repo =>
assert(test1Thread.isDefined)
assert(test1Thread.get == mainThread)
assert(test2Thread.isDefined)
assert(test2Thread.get == mainThread)
assert(onCompleteThread.isDefined)
assert(onCompleteThread.get == mainThread)
}
}
it("should not run out of stack space with nested futures when using SerialExecutionContext") {
class ExampleSpec extends freespec.FixtureAsyncFreeSpecLike {
// Note we get a StackOverflowError with the following execution
// context.
// override implicit def executionContext: ExecutionContext = new ExecutionContext { def execute(runnable: Runnable) = runnable.run; def reportFailure(cause: Throwable) = () }
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
def sum(xs: List[Int]): Future[Int] =
xs match {
case Nil => Future.successful(0)
case x :: xs => Future(x).flatMap(xx => sum(xs).map(xxx => xx + xxx))
}
"test 1" in { fixture =>
val fut: Future[Int] = sum((1 to 50000).toList)
fut.map(total => assert(total == 1250025000))
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(rep) }
promise.future.map { repo =>
assert(!rep.testSucceededEventsReceived.isEmpty)
}
}
// SKIP-SCALATESTJS,NATIVE-END
it("should run tests that returns Future and report their result in serial") {
class ExampleSpec extends freespec.FixtureAsyncFreeSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
"test 1" in { fixture =>
Future {
SleepHelper.sleep(60)
succeed
}
}
"test 2" in { fixture =>
Future {
SleepHelper.sleep(30)
succeed
}
}
"test 3" in { fixture =>
Future {
succeed
}
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(rep) }
promise.future.map { repo =>
assert(rep.testStartingEventsReceived.length == 3)
assert(rep.testStartingEventsReceived(0).testName == "test 1")
assert(rep.testStartingEventsReceived(1).testName == "test 2")
assert(rep.testStartingEventsReceived(2).testName == "test 3")
assert(rep.testSucceededEventsReceived.length == 3)
assert(rep.testSucceededEventsReceived(0).testName == "test 1")
assert(rep.testSucceededEventsReceived(1).testName == "test 2")
assert(rep.testSucceededEventsReceived(2).testName == "test 3")
}
}
it("should run tests that does not return Future and report their result in serial") {
class ExampleSpec extends freespec.FixtureAsyncFreeSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
"test 1" in { fixture =>
SleepHelper.sleep(60)
succeed
}
"test 2" in { fixture =>
SleepHelper.sleep(30)
succeed
}
"test 3" in { fixture =>
succeed
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(rep) }
promise.future.map { repo =>
assert(rep.testStartingEventsReceived.length == 3)
assert(rep.testStartingEventsReceived(0).testName == "test 1")
assert(rep.testStartingEventsReceived(1).testName == "test 2")
assert(rep.testStartingEventsReceived(2).testName == "test 3")
assert(rep.testSucceededEventsReceived.length == 3)
assert(rep.testSucceededEventsReceived(0).testName == "test 1")
assert(rep.testSucceededEventsReceived(1).testName == "test 2")
assert(rep.testSucceededEventsReceived(2).testName == "test 3")
}
}
it("should send an InfoProvided event for an info in main spec body") {
class MySuite extends freespec.FixtureAsyncFreeSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
info(
"hi there"
)
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val infoList = reporter.infoProvidedEventsReceived
assert(infoList.size == 1)
assert(infoList(0).message == "hi there")
}
}
it("should send an InfoProvided event for an info in scope body") {
class MySuite extends freespec.FixtureAsyncFreeSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
"test feature" - {
info(
"hi there"
)
"test 1" in { fixture => succeed }
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val infoList = reporter.infoProvidedEventsReceived
assert(infoList.size == 1)
assert(infoList(0).message == "hi there")
}
}
it("should send an InfoProvided event for an info in test body") {
class MySuite extends freespec.FixtureAsyncFreeSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
"test feature" - {
"test 1" in { fixture =>
info("hi there")
succeed
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val infoList = reporter.infoProvidedEventsReceived
assert(infoList.size == 0)
val testSucceededList = reporter.testSucceededEventsReceived
assert(testSucceededList.size == 1)
assert(testSucceededList(0).recordedEvents.size == 1)
val recordedEvent = testSucceededList(0).recordedEvents(0)
assert(recordedEvent.isInstanceOf[InfoProvided])
val infoProvided = recordedEvent.asInstanceOf[InfoProvided]
assert(infoProvided.message == "hi there")
}
}
it("should send an InfoProvided event for an info in Future returned by test body") {
class MySuite extends freespec.FixtureAsyncFreeSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
"test feature" - {
"test 1" in { fixture =>
Future {
info("hi there")
succeed
}
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val infoList = reporter.infoProvidedEventsReceived
assert(infoList.size == 0)
val testSucceededList = reporter.testSucceededEventsReceived
assert(testSucceededList.size == 1)
assert(testSucceededList(0).recordedEvents.size == 1)
val recordedEvent = testSucceededList(0).recordedEvents(0)
assert(recordedEvent.isInstanceOf[InfoProvided])
val infoProvided = recordedEvent.asInstanceOf[InfoProvided]
assert(infoProvided.message == "hi there")
}
}
it("should send a NoteProvided event for a note in main spec body") {
class MySuite extends freespec.FixtureAsyncFreeSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
note(
"hi there"
)
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val noteList = reporter.noteProvidedEventsReceived
assert(noteList.size == 1)
assert(noteList(0).message == "hi there")
}
}
it("should send a NoteProvided event for a note in scope body") {
class MySuite extends freespec.FixtureAsyncFreeSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
"test feature" - {
note(
"hi there"
)
"test 1" in { fixture => succeed }
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val noteList = reporter.noteProvidedEventsReceived
assert(noteList.size == 1)
assert(noteList(0).message == "hi there")
}
}
it("should send a NoteProvided event for a note in test body") {
class MySuite extends freespec.FixtureAsyncFreeSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
"test feature" - {
"test 1" in { fixture =>
note("hi there")
succeed
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val noteList = reporter.noteProvidedEventsReceived
assert(noteList.size == 1)
assert(noteList(0).message == "hi there")
}
}
it("should send a NoteProvided event for a note in Future returned by test body") {
class MySuite extends freespec.FixtureAsyncFreeSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
"test feature" - {
"test 1" in { fixture =>
Future {
note("hi there")
succeed
}
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val noteList = reporter.noteProvidedEventsReceived
assert(noteList.size == 1)
assert(noteList(0).message == "hi there")
}
}
it("should send an AlertProvided event for an alert in main spec body") {
class MySuite extends freespec.FixtureAsyncFreeSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
alert(
"hi there"
)
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val alertList = reporter.alertProvidedEventsReceived
assert(alertList.size == 1)
assert(alertList(0).message == "hi there")
}
}
it("should send an AlertProvided event for an alert in scope body") {
class MySuite extends freespec.FixtureAsyncFreeSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
"test feature" - {
alert(
"hi there"
)
"test 1" in { fixture => succeed }
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val alertList = reporter.alertProvidedEventsReceived
assert(alertList.size == 1)
assert(alertList(0).message == "hi there")
}
}
it("should send an AlertProvided event for an alert in test body") {
class MySuite extends freespec.FixtureAsyncFreeSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
"test feature" - {
"test 1" in { fixture =>
alert("hi there")
succeed
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val alertList = reporter.alertProvidedEventsReceived
assert(alertList.size == 1)
assert(alertList(0).message == "hi there")
}
}
it("should send an AlertProvided event for an alert in Future returned by test body") {
class MySuite extends freespec.FixtureAsyncFreeSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
"test feature" - {
"test 1" in { fixture =>
Future {
alert("hi there")
succeed
}
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val alertList = reporter.alertProvidedEventsReceived
assert(alertList.size == 1)
assert(alertList(0).message == "hi there")
}
}
it("should send a MarkupProvided event for a markup in main spec body") {
class MySuite extends freespec.FixtureAsyncFreeSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
markup(
"hi there"
)
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val markupList = reporter.markupProvidedEventsReceived
assert(markupList.size == 1)
assert(markupList(0).text == "hi there")
}
}
it("should send a MarkupProvided event for a markup in scope body") {
class MySuite extends freespec.FixtureAsyncFreeSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
"test feature" - {
markup(
"hi there"
)
"test 1" in { fixture => succeed }
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val markupList = reporter.markupProvidedEventsReceived
assert(markupList.size == 1)
assert(markupList(0).text == "hi there")
}
}
it("should send a MarkupProvided event for a markup in test body") {
class MySuite extends freespec.FixtureAsyncFreeSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
"test feature" - {
"test 1" in { fixture =>
markup("hi there")
succeed
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val markupList = reporter.markupProvidedEventsReceived
assert(markupList.size == 0)
val testSucceededList = reporter.testSucceededEventsReceived
assert(testSucceededList.size == 1)
assert(testSucceededList(0).recordedEvents.size == 1)
val recordedEvent = testSucceededList(0).recordedEvents(0)
assert(recordedEvent.isInstanceOf[MarkupProvided])
val markupProvided = recordedEvent.asInstanceOf[MarkupProvided]
assert(markupProvided.text == "hi there")
}
}
it("should send a MarkupProvided event for a markup in Future returned by test body") {
class MySuite extends freespec.FixtureAsyncFreeSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
"test feature" - {
"test 1" in { fixture =>
Future {
markup("hi there")
succeed
}
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val markupList = reporter.markupProvidedEventsReceived
assert(markupList.size == 0)
val testSucceededList = reporter.testSucceededEventsReceived
assert(testSucceededList.size == 1)
assert(testSucceededList(0).recordedEvents.size == 1)
val recordedEvent = testSucceededList(0).recordedEvents(0)
assert(recordedEvent.isInstanceOf[MarkupProvided])
val markupProvided = recordedEvent.asInstanceOf[MarkupProvided]
assert(markupProvided.text == "hi there")
}
}
it("should allow other execution context to be used") {
class TestSpec extends freespec.FixtureAsyncFreeSpecLike {
// SKIP-SCALATESTJS,NATIVE-START
override implicit val executionContext = scala.concurrent.ExecutionContext.Implicits.global
// SKIP-SCALATESTJS,NATIVE-END
// SCALATESTJS-ONLY override implicit val executionContext = scala.scalajs.concurrent.JSExecutionContext.runNow
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome = test("testing")
val a = 1
"feature 1" - {
"scenario A" in { fixture =>
Future { assert(a == 1) }
}
}
"feature 2" - {
"scenario B" in { fixture =>
Future { assert(a == 1) }
}
}
"group3" - {
"test C" in { fixture =>
Future { assert(a == 1) }
}
}
}
val suite = new TestSpec
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { r =>
assert(reporter.scopeOpenedEventsReceived.length == 3)
assert(reporter.scopeClosedEventsReceived.length == 3)
assert(reporter.testStartingEventsReceived.length == 3)
assert(reporter.testSucceededEventsReceived.length == 3)
}
}
}
}
|
scalatest/scalatest
|
jvm/freespec-test/src/test/scala/org/scalatest/freespec/FixtureAsyncFreeSpecLikeSpec2.scala
|
Scala
|
apache-2.0
| 31,102
|
package bowhaus
import com.twitter.bijection._
import org.jboss.netty.buffer.{ ChannelBuffer, ChannelBuffers }
object Bijections extends StringInjections {
// this is in bijection (0.4.0)
object ChannelBufferBijection extends Bijection[ChannelBuffer, Array[Byte]] {
override def apply(cb: ChannelBuffer) = {
val dup = cb.duplicate
val result = new Array[Byte](dup.readableBytes)
dup.readBytes(result)
result
}
override def invert(ary: Array[Byte]) = ChannelBuffers.wrappedBuffer(ary)
}
object ByteArrayBijection extends Bijection[Array[Byte], String] {
override def invert(in: String) = in.getBytes("utf-8")
override def apply(ary: Array[Byte]) = new String(ary, "utf-8")
}
implicit val cbs = ChannelBufferBijection
implicit val strs = ByteArrayBijection
implicit val bj = Bijection.connect[ChannelBuffer, Array[Byte], String]
}
|
softprops/bowhaus
|
src/main/scala/bijection.scala
|
Scala
|
mit
| 891
|
package forimpatient.chapter11
/**
* Created by Iryna Kharaborkina on 8/9/16.
*
* Solution to the Chapter 11 Exercise 08 'Scala for the Impatient' by Horstmann C.S.
*
* Provide a class Matrix—you can choose whether you want to implement 2 × 2 matrices, square matrices of any size,
* or m × n matrices. Supply operations + and *. The latter should also work with scalars, for example mat * 2.
* A single element should be accessible as mat(row, col).
*/
object Exercise08 extends App {
println("Chapter 11 Exercise 08")
val m = new Matrix(2, 2, 1)
m(0, 1) = 10
m(1, 0) = 5
println(m)
val n = new Matrix(2, 2, 2)
n(0, 0) = 3
n(1, 0) = 5
n(1, 1) = 3
println(n)
println(m * n)
println(n * m)
println(n * 3)
println(n(0, 0))
class Matrix(val m: Int, val n: Int, default: Int = 0) {
val matrix: Array[Array[Double]] = new Array[Array[Double]](m)
for (i <- 0 until m) matrix(i) = {
val row = new Array[Double](n)
for (j <- 0 until n) row(j) = default
row
}
def apply(i: Int, j: Int) = matrix(i)(j)
def update(i: Int, j: Int, value: Double) = matrix(i)(j) = value
def +(other: Matrix) = {
if (other.m == m && other.n == n) {
val result = new Matrix(m, n)
for (i <- 0 until m; j <- 0 until n) result(i, j) = this(i, j) + other(i, j)
result
} else ()
}
def *(other: Matrix) = {
if (n == other.m) {
val result = new Matrix(other.n, m)
for (i <- 0 until m; j <- 0 until other.n; k <- 0 until n) result(i, j) += this(i, k) * other(k, j)
result
}
else ()
}
def *(num: Double) = {
val result = new Matrix(m, n)
for (i <- 0 until m; j <- 0 until n) result(i, j) = this(i, j) * num
result
}
override def toString = matrix.map(_.mkString("[", " ", "]")).mkString("[", "\n ", "]")
}
}
|
Kiryna/Scala-for-the-Impatient
|
src/forimpatient/chapter11/Exercise08.scala
|
Scala
|
apache-2.0
| 1,893
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import org.apache.spark.annotation.DeveloperApi
/**
* :: DeveloperApi ::
* Information about an [[org.apache.spark.util.AccumulatorV2]] modified during a task or stage.
*
* @param id accumulator ID
* @param name accumulator name
* @param update partial value from a task, may be None if used on driver to describe a stage
* @param value total accumulated value so far, maybe None if used on executors to describe a task
* @param internal whether this accumulator was internal
* @param countFailedValues whether to count this accumulator's partial value if the task failed
* @param metadata internal metadata associated with this accumulator, if any
*
* @note Once this is JSON serialized the types of `update` and `value` will be lost and be
* cast to strings. This is because the user can define an accumulator of any type and it will
* be difficult to preserve the type in consumers of the event log. This does not apply to
* internal accumulators that represent task level metrics.
*/
@DeveloperApi
case class AccumulableInfo private[spark] (
id: Long,
name: Option[String],
update: Option[Any], // represents a partial update within a task
value: Option[Any],
private[spark] val internal: Boolean,
private[spark] val countFailedValues: Boolean,
// TODO: use this to identify internal task metrics instead of encoding it in the name
private[spark] val metadata: Option[String] = None)
/**
* A collection of deprecated constructors. This will be removed soon.
*/
object AccumulableInfo {
@deprecated("do not create AccumulableInfo", "2.0.0")
def apply(
id: Long,
name: String,
update: Option[String],
value: String,
internal: Boolean): AccumulableInfo = {
new AccumulableInfo(
id, Option(name), update, Option(value), internal, countFailedValues = false)
}
@deprecated("do not create AccumulableInfo", "2.0.0")
def apply(id: Long, name: String, update: Option[String], value: String): AccumulableInfo = {
new AccumulableInfo(
id, Option(name), update, Option(value), internal = false, countFailedValues = false)
}
@deprecated("do not create AccumulableInfo", "2.0.0")
def apply(id: Long, name: String, value: String): AccumulableInfo = {
new AccumulableInfo(
id, Option(name), None, Option(value), internal = false, countFailedValues = false)
}
}
|
ahnqirage/spark
|
core/src/main/scala/org/apache/spark/scheduler/AccumulableInfo.scala
|
Scala
|
apache-2.0
| 3,222
|
package pl.edu.agh.mplt.parser.AMPL.statements.lexpr
import org.scalatest.{Matchers, FlatSpec}
import pl.edu.agh.mplt.parser.{KeywordAMPLParser, IntercodeImplicits}
import pl.edu.agh.mplt.parser.phrase.logical._
import pl.edu.agh.mplt.parser.member.{SetMember, MemberAMPLParser}
import pl.edu.agh.mplt.parser.phrase.set._
import pl.edu.agh.mplt.parser.phrase.expression.{Bin, ExpressionAMPLParser}
import pl.edu.agh.mplt.parser.reference.{SimpleReference, ReferenceParser}
import pl.edu.agh.mplt.parser.phrase.set.SetComprehension
import pl.edu.agh.mplt.parser.phrase.set.ExplicitSet
class LogicalExpressionTest extends FlatSpec with Matchers with IntercodeImplicits {
val parser = new ReferenceParser with KeywordAMPLParser with ExpressionAMPLParser with IndexingAMPLParser
with LogicalExpressionAMPLParser with SetExpressionAMPLParser with MemberAMPLParser
def expr = parser.lexpr
def parse(input: String) = parser.parse(expr, input).get
"Logical Parser" should "parse operator <" in {
parse("1 < 3") should be(Comparision.<(1, 3))
}
it should "parser operator <=" in {
parse("1 <= 3") should be(Comparision.<=(1, 3))
}
it should "parser operator >" in {
parse("1 > 3") should be(Comparision.>(1, 3))
}
it should "parser operator >=" in {
parse("1 >= 3") should be(Comparision.>=(1, 3))
}
it should "parser operator ==" in {
parse("1 == 3") should be(Comparision.==(1, 3))
}
it should "parser operator !=" in {
parse("1 != 3") should be(Comparision.!=(1, 3))
}
it should "parser `<>' same as '!='" in {
parse("1 <> 3") should be(parse("1 != 3"))
}
it should "parser `=' same as '=='" in {
parse("1 = 3") should be(parse("1 == 3"))
}
it should "parse boolean variables " in {
parse("a > 3") should be(Comparision.>("a", 3))
parse("b == variable") should be(Comparision.==("b", "variable"))
}
it should "parse compund comparrision statements" in {
parse("1 + 3 >= 7 * z - 5") should be(
Comparision.>=(
Bin.+(1, 3),
Bin.-(Bin.*(7, "z"), 5))
)
}
//////////////////////
it should "parse number as logical non-zero check" in {
parse("1") should be(Comparision.!=(1, 0))
}
it should "parse not expr" in {
parse("not x") should be(Logical.Not("x"))
}
it should "parse or expr" in {
parse("x or y") should be(Logical.Or("x", "y"))
}
it should "parse and expr" in {
parse("x and 7") should be(Logical.And("x", Comparision.!=(7, 0)))
}
it should "parse '!' as 'not'" in {
parse("!x") should be(parse("not x"))
}
it should "parse '||' as 'or'" in {
parse("x || y") should be(parse("x or y"))
}
it should "parse '&&' as 'and'" in {
parse("x && 7") should be(parse("x and 7"))
}
it should "maintain left associativity of conjunction" in {
parse("x and y and z") should be(Logical.And(Logical.And("x", "y"), "z"))
}
it should "maintain left associativity of alternative" in {
parse("x or y or z") should be(Logical.Or(Logical.Or("x", "y"), "z"))
}
///////////////////
it should "parse ands with or" in {
parse("x and y and z or a") should be(Logical.Or(Logical.And(Logical.And("x", "y"), "z"), "a"))
parse("x and y or z and a") should be(Logical.Or(Logical.And("x", "y"), Logical.And("z", "a")))
}
it should "parse ands with not" in {
parse("x and y and not z") should be(Logical.And(Logical.And("x", "y"), Logical.Not("z")))
}
it should "parse ands with not and or 1" in {
parse("x and y or not z") should be(Logical.Or(Logical.And("x", "y"), Logical.Not("z")))
parse("x and not y or z") should be(Logical.Or(Logical.And("x", Logical.Not("y")), "z"))
parse("not x and y or z") should be(Logical.Or(Logical.And(Logical.Not("x"), "y"), "z"))
parse("not x or y and z") should be(Logical.Or(Logical.Not("x"), Logical.And("y", "z")))
}
it should "parse simple member inclusion" in {
parse("1 in {1, 2, 3}") should be(
Inclusion.Member(1, ExplicitSet(Set[SetMember](1, 2, 3))))
parse("1 in 1 .. 3") should be(
Inclusion.Member(1, SetComprehension(1, 3)))
}
it should "parse simple member exclusion" in {
parse("1 not in {1, 2, 3}") should be {
Exclusion.Member(1, ExplicitSet(Set[SetMember](1, 2, 3)))
}
}
it should "parse simple subset inclusion" in {
parse("1 .. 5 within {1, 2, 3}") should be {
Inclusion.Subset(SetComprehension(1, 5), ExplicitSet(Set[SetMember](1, 2, 3)))
}
}
it should "parse simple subset exclusion" in {
parse("1 .. 5 not within {1, 2, 3}") should be {
Exclusion.Subset(SetComprehension(1, 5), ExplicitSet(Set[SetMember](1, 2, 3)))
}
}
///////////////////
it should "parse compound logical expression 1" in {
parse("x and y and not z or a and 16 != 0") should be(
Logical.Or(
Logical.And(Logical.And("x", "y"), Logical.Not("z")),
Logical.And("a", Comparision.!=(16, 0))
)
)
}
it should "parse compound logical expression 2" in {
parse("1 in 1 .. 5 and not {1, 2, 3} within 1 .. 4 or {1, 2} not within 1 .. 2 and x > 10 + 5") should be(
Logical.Or(
Logical.And(
Inclusion.Member(1, SetComprehension(1, 5)),
Logical.Not(
Inclusion.Subset(
ExplicitSet(Set[SetMember](1, 2, 3)),
SetComprehension(1, 4)))),
Logical.And(
Exclusion.Subset(
ExplicitSet(Set[SetMember](1, 2)),
SetComprehension(1, 2)),
Comparision.>("x", Bin.+(10, 5))
)
))
}
it should "parse forall reduction" in {
parse("forall {A} 1 == 1") should be(
LogicalReduction.Forall(
Indexing(List(SimpleReference("A"))),
Comparision.==(1, 1)))
}
it should "parse exists reduction" in {
parse("exists {A} 1 == 1") should be(
LogicalReduction.Exists(
Indexing(List(SimpleReference("A"))),
Comparision.==(1, 1)))
}
///////////////////
"conjunction" should "precede alternative" in {
parse("x and y or z") should be(Logical.Or(Logical.And("x", "y"), "z"))
}
"negation" should "precede conjunction" in {
parse("x and not y") should be(Logical.And("x", Logical.Not("y")))
}
}
|
marek1840/MPLT
|
src/test/scala/pl/edu/agh/mplt/parser/AMPL/statements/lexpr/LogicalExpressionTest.scala
|
Scala
|
mit
| 6,459
|
package scala
package reflect.io
import scala.reflect.internal.util.Statistics
// Due to limitations in the Statistics machinery, these are only
// reported if this patch is applied.
//
// --- a/src/reflect/scala/reflect/internal/util/Statistics.scala
// +++ b/src/reflect/scala/reflect/internal/util/Statistics.scala
// @@ -109,7 +109,7 @@ quant)
// * Quantities with non-empty prefix are printed in the statistics info.
// */
// trait Quantity {
// - if (enabled && prefix.nonEmpty) {
// + if (prefix.nonEmpty) {
// val key = s"${if (underlying != this) underlying.prefix else ""}/$prefix"
// qs(key) = this
// }
// @@ -243,7 +243,7 @@ quant)
// *
// * to remove all Statistics code from build
// */
// - final val canEnable = _enabled
// + final val canEnable = true // _enabled
//
// We can commit this change as the first diff reverts a fix for an IDE memory leak.
private[io] object IOStats {
val fileExistsCount = Statistics.newCounter("# File.exists calls")
val fileIsDirectoryCount = Statistics.newCounter("# File.isDirectory calls")
val fileIsFileCount = Statistics.newCounter("# File.isFile calls")
}
|
felixmulder/scala
|
src/reflect/scala/reflect/io/IOStats.scala
|
Scala
|
bsd-3-clause
| 1,182
|
package hr.element.etb
/*
* Copyright (c) 2008-2009, Matthias Mann
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Matthias Mann nor the names of its contributors may
* be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
object Profiling {
def timed[T](report: Long => Unit)(body: => T) = {
val start = System.nanoTime
val r = body
report(System.nanoTime - start)
r
}
private val timeUnits = List("ns", "us", "ms", "s")
def formatTime(delta: Long) = {
def formatTime(v: Long, units: List[String], tail: List[String]): List[String] = {
def makeTail(what: Long) = (what + units.head) :: tail
if (!units.tail.isEmpty && v >= 1000) {
formatTime(v / 1000, units.tail, makeTail(v % 1000))
} else {
makeTail(v)
}
}
formatTime(delta, timeUnits, Nil).mkString(" ")
}
def printTime(msg: String) = (delta: Long) =>
println(msg + formatTime(delta))
}
|
melezov/etb
|
util/src/main/scala/hr/element/etb/Profiling.scala
|
Scala
|
unlicense
| 2,335
|
// Copyright (c) 2016 PSForever.net to present
package net.psforever.packet.control
import net.psforever.packet.{ControlPacketOpcode, Marshallable, PlanetSideControlPacket}
import scodec.Codec
import scodec.bits._
import scodec.codecs._
final case class ServerStart(clientNonce : Long, serverNonce : Long)
extends PlanetSideControlPacket {
type Packet = ServerStart
def opcode = ControlPacketOpcode.ServerStart
def encode = ServerStart.encode(this)
}
object ServerStart extends Marshallable[ServerStart] {
implicit val codec : Codec[ServerStart] = (
("client_nonce" | uint32L) ::
("server_nonce" | uint32L) ::
("unknown" | constant(hex"000000000001d300000002".bits))
).as[ServerStart]
}
|
Fate-JH/PSF-Server
|
common/src/main/scala/net/psforever/packet/control/ServerStart.scala
|
Scala
|
gpl-3.0
| 719
|
import java.util.concurrent.Executors
import cats.effect.{Blocker, IO}
import org.scalacheck.Arbitrary
import org.scalatest._
import rpm4s.codecs.IndexData.StringData
import rpm4s.data.{Architecture, Name, RpmPrimaryEntry, Version}
import scodec.bits.BitVector
import scodec.{Attempt, Codec}
import fs2.Stream
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks
import rpm4s.repo.utils.compress.{gunzip, gzip}
import scala.concurrent.ExecutionContext
class GzipSpec
extends AnyFlatSpec
with Matchers
with ScalaCheckPropertyChecks {
"gunzip" should "uncompress correctly" in {
implicit val cs = IO.contextShift(ExecutionContext.global)
val text = Blocker.fromExecutorService(IO(Executors.newCachedThreadPool())).use { blocker =>
val bits = fs2.io.readInputStream(
IO(getClass.getResourceAsStream("/text.gz")), 4096, blocker
)
bits
.through(gunzip())
.through(fs2.text.utf8Decode)
.compile.toVector.map(_.mkString)
}.unsafeRunSync()
text shouldEqual "hello world!\\n"
}
it should "roundtrip" in {
forAll { value: String =>
val r = Stream.emit(value)
.covary[IO]
.through(fs2.text.utf8Encode)
.through(gzip())
.through(gunzip())
.through(fs2.text.utf8Decode)
.compile.toList.map(_.mkString)
.unsafeRunSync()
r shouldEqual value
}
}
}
|
lucidd/rpm4s
|
repo-utils/jvm/src/test/scala/GzipSpec.scala
|
Scala
|
mit
| 1,498
|
package xitrum.handler.inbound
import java.io.File
import io.netty.channel.{ChannelHandler, SimpleChannelInboundHandler, ChannelHandlerContext}
import io.netty.handler.codec.http.{HttpMethod, HttpResponseStatus}
import ChannelHandler.Sharable
import HttpMethod._
import HttpResponseStatus._
import xitrum.Config
import xitrum.handler.HandlerEnv
import xitrum.handler.outbound.XSendFile
import xitrum.etag.NotModified
import xitrum.util.PathSanitizer
/**
* Serves static files in "public" directory.
* See DefaultHttpChannelInitializer, this handler is put after XSendFile.
*/
@Sharable
class PublicFileServer extends SimpleChannelInboundHandler[HandlerEnv] {
override def channelRead0(ctx: ChannelHandlerContext, env: HandlerEnv) {
val request = env.request
if (request.getMethod != GET && request.getMethod != HEAD && request.getMethod != OPTIONS) {
ctx.fireChannelRead(env)
return
}
val pathInfo = request.getUri.split('?')(0)
if (Config.xitrum.staticFile.pathRegex.findFirstIn(pathInfo).isEmpty) {
ctx.fireChannelRead(env)
return
}
val response = env.response
sanitizedAbsStaticPath(pathInfo) match {
case None =>
XSendFile.set404Page(response, false)
ctx.channel.writeAndFlush(env)
case Some(abs) =>
val file = new File(abs)
if (file.isFile && file.exists) {
response.setStatus(OK)
if (request.getMethod == OPTIONS) {
ctx.channel.writeAndFlush(env)
} else {
if (!Config.xitrum.staticFile.revalidate)
NotModified.setClientCacheAggressively(response)
XSendFile.setHeader(response, abs, false)
ctx.channel.writeAndFlush(env)
}
} else {
ctx.fireChannelRead(env)
}
}
}
/**
* Sanitizes and returns absolute path.
*
* @param pathInfo Starts with "/"
* @param prefixo Starts and stops with "/", like "/static/", if any
*/
private def sanitizedAbsStaticPath(pathInfo: String): Option[String] = {
PathSanitizer.sanitize(pathInfo).map { path =>
xitrum.root + "/public" + path
}
}
}
|
georgeOsdDev/xitrum
|
src/main/scala/xitrum/handler/inbound/PublicFileServer.scala
|
Scala
|
mit
| 2,160
|
/**
* __________ .__ __ .__
* \____ /____ ____ |__|/ |_| |__
* / // __ \ / \| \ __\ | \
* / /\ ___/| | \ || | | Y \
* /_______ \___ >___| /__||__| |___| /
* \/ \/ \/ \/
*/
import sbt.Keys._
import sbt._
import sbtrelease.ReleasePlugin.autoImport._
import com.typesafe.sbt.SbtPgp.autoImport._
object Version {
val nscala_time = "2.18.0"
val specs2 = "4.0.2"
val netty = "3.10.3.Final"
val cats = "1.0.0-RC1"
val cats_mtl = "0.1.0"
val circe = "0.9.0-M2"
// compile time plugins
val kind_projector = "0.9.5"
val simulacrum = "0.10.0"
val paradise = "2.1.0"
}
object Dependencies {
val cats = "org.typelevel" %% "cats-core" % Version.cats
val cats_mtl = "org.typelevel" %% "cats-mtl-core" % Version.cats_mtl
val nscala_time = "com.github.nscala-time" %% "nscala-time" % Version.nscala_time
val specs2 = "org.specs2" %% "specs2-core" % Version.specs2 % "test"
val simulacrum = "com.github.mpilquist" %% "simulacrum" % Version.simulacrum
val paradise = "org.scalamacros" %% "paradise" % Version.paradise cross CrossVersion.full
val kind_projector = "org.spire-math" %% "kind-projector" % Version.kind_projector cross CrossVersion.binary
val netty = "io.netty" % "netty" % Version.netty
val circe_core = "io.circe" %% "circe-core" % Version.circe
val circe_generic = "io.circe" %% "circe-generic" % Version.circe
val circe_jawn = "io.circe" %% "circe-jawn" % Version.circe
}
object Resolvers {
val sonatype = "Sonatype" at "https://oss.sonatype.org/content/repositories/releases/"
val sonatype_public = "Sonatype Public" at "https://oss.sonatype.org/content/groups/public/"
val typesafe = "Typesafe" at "http://repo.typesafe.com/typesafe/releases/"
}
object Configurations {
private lazy val compilerOptions =
"-deprecation" ::
"-encoding" :: "UTF-8" ::
"-feature" ::
"-unchecked" ::
"-language:_" ::
"-Yno-adapted-args" ::
"-Yrangepos" ::
"-Ywarn-dead-code" ::
"-Ywarn-numeric-widen" ::
"-Ypartial-unification" ::
"-Xfuture" :: Nil
lazy val buildSettings =
(organization := "io.github.sungiant") ::
(scalaVersion := "2.12.4") ::
(crossScalaVersions := "2.11.12" :: "2.12.4" :: Nil) :: Nil
lazy val commonSettings =
(resolvers += Resolvers.sonatype) ::
(resolvers += Resolvers.sonatype_public) ::
(resolvers += Resolvers.typesafe) ::
(libraryDependencies += Dependencies.cats) ::
(libraryDependencies += Dependencies.cats_mtl) ::
(libraryDependencies += Dependencies.specs2) ::
(libraryDependencies += Dependencies.nscala_time) ::
(libraryDependencies += Dependencies.simulacrum) ::
(addCompilerPlugin (Dependencies.paradise)) ::
(addCompilerPlugin (Dependencies.kind_projector)) ::
(scalacOptions ++= compilerOptions) ::
(parallelExecution in ThisBuild := false) :: Nil
lazy val noPublishSettings =
(publish := {}) ::
(publishArtifact := false) ::
(publishLocal := {}) ::
(publishTo := Some ("foo" at "bar")) :: Nil // I'd like to be able to just write `None` here but when I do sbt pgp kicks off.
lazy val publishSettings =
(releaseCrossBuild := true) ::
(releasePublishArtifactsAction := PgpKeys.publishSigned.value) ::
(homepage := Some (url ("https://github.com/sungiant/zenith"))) ::
(licenses := Seq ("MIT" -> url ("https://raw.githubusercontent.com/sungiant/zenith/master/LICENSE"))) ::
(publishMavenStyle := true) ::
(publishArtifact in Test := false) ::
(pomIncludeRepository := { _ => false }) ::
(publishTo := {
val nexus = "https://oss.sonatype.org/"
if (isSnapshot.value) Some ("snapshots" at nexus + "content/repositories/snapshots")
else Some ("releases" at nexus + "service/local/staging/deploy/maven2")
}) ::
(credentials ++= (for {
username <- Option (System.getenv ().get ("SONATYPE_USERNAME"))
password <- Option (System.getenv ().get ("SONATYPE_PASSWORD"))
} yield Credentials ("Sonatype Nexus Repository Manager", "oss.sonatype.org", username, password)
).toSeq) ::
(autoAPIMappings := true) ::
(scmInfo := Some (ScmInfo (url ("https://github.com/sungiant/zenith"), "scm:git:git@github.com:sungiant/zenith.git"))) ::
(pomExtra := (
<developers>
<developer>
<id>sungiant</id>
<name>Ash Pook</name>
<url>https://github.com/sungiant</url>
</developer>
</developers>
)) :: Nil
}
|
sungiant/zenith
|
project/build.scala
|
Scala
|
mit
| 4,683
|
package com.ponkotuy.restype
import com.netaporter.uri.Uri
import com.ponkotuy.parser.Query
import com.ponkotuy.util.Log
import org.json4s._
import scala.util.matching.Regex
/**
*
* @author ponkotuy
* Date: 14/02/19.
*/
abstract class ResType {
def regexp: Regex
def postables(q: Query): Seq[Result]
implicit def formats: Formats = DefaultFormats
}
object ResType extends Log {
type Req = Map[String, String]
val Api = "/kcsapi"
val AuthMember = s"$Api/api_auth_member"
val GetMaster = s"$Api/api_get_master"
val GetMember = s"$Api/api_get_member"
val ReqKousyou = s"$Api/api_req_kousyou"
val ReqHokyu = s"$Api/api_req_hokyu"
val ReqHensei = s"$Api/api_req_hensei"
val ReqMission = s"$Api/api_req_mission"
val ReqKaisou = s"$Api/api_req_kaisou"
val ReqPractice = s"$Api/api_req_practice"
val ReqMember = s"$Api/api_req_member"
val ReqMap = s"$Api/api_req_map"
val ReqSortie = s"$Api/api_req_sortie"
val ReqQuest = s"$Api/api_req_quest"
val ReqNyukyo = s"$Api/api_req_nyukyo"
val ReqRanking = s"$Api/api_req_ranking"
val ReqCombined = s"$Api/api_req_combined_battle"
val values: Vector[ResType] = Vector(
ApiStart2,
Basic,
LoginCheck,
Port,
Material,
DeckPort,
Deck,
HenseiChange,
PresetSelect,
PictureBook,
NDock,
KDock,
CreateShip,
GetShip,
CreateItem,
MapInfo,
SelectEventmapRank,
QuestList,
RemodelSlot,
RemodelSlotlist,
RemodelSlotlistDetail,
Ship2,
Ship3,
ShipDeck,
ShipSWF,
SoundMP3,
SlotItem,
MapStart,
MapNext,
MapSWF,
ObfuscatedMapSWF, // 判定順序の問題で必ずMapSWFの後でないといけない
SortieBattleResult,
CombinedBattleResult,
RequireInfo
)
def fromUri(uri: String): Option[ResType] = {
val path = Uri.parse(uri).pathRaw
println(path)
logger.debug(path)
values.find(_.regexp.findFirstIn(path).isDefined)
}
}
|
b-wind/MyFleetGirls
|
client/src/main/scala/com/ponkotuy/restype/ResType.scala
|
Scala
|
mit
| 1,951
|
import sbt._
import java.io._
import Keys._
import com.typesafe.sbteclipse.plugin.EclipsePlugin.EclipseKeys
import org.seacourt.build._
import sbt.Process._
object RootConfig {
def libraries: Seq[String] = {
val output: String = (Process("root-config" :: "--libs" :: "--noldflags" :: Nil) !!)
output.trim.split(" ").filter(_.startsWith("-l")).map(_.substring(2))
}
def linkFlags: Seq[String] = {
val output: String = (Process("root-config" :: "--libs" :: Nil) !!)
val lib_flags = output.trim.split(" ").filter(!_.startsWith("-l"))
lib_flags ++ Seq("-Wl,--no-as-needed")
}
}
object RawRootPlugin extends NativeDefaultBuild("RawRootPlugin") {
lazy val compileFlags = nativeCXXCompileFlags in Compile ++= {
Seq()
}
// RootWalker and handlers
lazy val walkerSettings = NativeProject.sharedLibrarySettings ++ Seq(
nativeCXXCompileFlags in Compile += "-std=c++11",
nativeIncludeDirectories in Compile += file("/usr/include/root"),
nativeIncludeDirectories in Compile += file("./RootWalker/source"),
nativeIncludeDirectories in Compile += file("/usr/lib/jvm/java-7-openjdk-amd64/include"),
nativeLibraries in Compile ++= RootConfig.libraries,
nativeDynamicLibraryLinkFlags in Compile ++= RootConfig.linkFlags,
EclipseKeys.skipProject := true
)
lazy val rootWalker = NativeProject(
"RootWalker", file("RootWalker/"),
walkerSettings
)
def makeNativeProject(f: File): Project =
NativeProject("Handler_" + f.getName, f, walkerSettings) nativeDependsOn rootWalker
lazy val availableHandlers: Array[Project] =
new File("./RootWalker/handlers").listFiles().filter(_.isDirectory).map(makeNativeProject(_))
// Native part of the wrapper
lazy val pluginNative = NativeProject(
"libRawRootPluginNative", file("RawRootPlugin/src/main/native/"),
walkerSettings
) nativeDependsOn rootWalker
// RAW
lazy val raw = uri("git://github.com/ayllon/raw.git")
// Java/Scala wrapper
lazy val plugin = Project(
id = "RawRootPluginJVM",
base = file("RawRootPlugin")
) dependsOn (pluginNative, raw)
// Overload projects
override def projects: Seq[Project] = {
List(super.projects: _*) ::: List(availableHandlers: _*)
}
}
|
ayllon/raw-root
|
project/Build.scala
|
Scala
|
agpl-3.0
| 2,414
|
package org.apache.mesos.chronos.scheduler.api
import com.fasterxml.jackson.core.JsonGenerator
import com.fasterxml.jackson.databind.{JsonSerializer, SerializerProvider}
import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics
/**
* Serializes a subset of the fields of DescriptiveStatistics
* @author Florian Leibert (flo@leibert.de)
*/
@Deprecated
class DescriptiveStatisticsSerializer extends JsonSerializer[DescriptiveStatistics] {
def serialize(stat: DescriptiveStatistics, json: JsonGenerator, provider: SerializerProvider) {
json.writeStartObject()
json.writeFieldName("75thPercentile")
json.writeNumber(stat.getPercentile(75))
json.writeFieldName("95thPercentile")
json.writeNumber(stat.getPercentile(95))
json.writeFieldName("98thPercentile")
json.writeNumber(stat.getPercentile(98))
json.writeFieldName("99thPercentile")
json.writeNumber(stat.getPercentile(99))
json.writeFieldName("median")
json.writeNumber(stat.getPercentile(50))
json.writeFieldName("mean")
json.writeNumber(stat.getMean)
json.writeFieldName("count")
json.writeNumber(stat.getN)
json.writeEndObject()
json.close()
}
}
|
tony-kerz/chronos
|
src/main/scala/org/apache/mesos/chronos/scheduler/api/DescriptiveStatisticsSerializer.scala
|
Scala
|
apache-2.0
| 1,198
|
package blended.security.ssl
import java.security.{KeyPair, SignatureException}
import blended.testsupport.scalatest.LoggingFreeSpec
import org.scalacheck.Gen
import org.scalatest.matchers.should.Matchers
import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks
import scala.util.{Success, Try}
class CertificateHolderSpec extends LoggingFreeSpec
with Matchers
with ScalaCheckPropertyChecks
with SecurityTestSupport
with CertificateRequestBuilder
with CertificateSigner {
private def createChain(length : Int) : Try[CertificateHolder] = {
def extendChain(c : CertificateHolder, l : Int) : Try[CertificateHolder] = {
if (l == 0) {
Success(c)
} else {
extendChain(createHostCertificate(s"host${length - l}", c).get, l - 1)
}
}
extendChain(createRootCertificate(cn = "root").get, length - 1)
}
"The certificate holder should" - {
"Refuse an empty chain" in {
val p : KeyPair = kpg.generateKeyPair()
intercept[EmptyCertificateChainException] {
CertificateHolder.create(p, List.empty).get
}
}
"Ensure the certificate chain does have a root certificate" in {
val root : CertificateHolder = createRootCertificate(cn = "root").get
val host : CertificateHolder = createHostCertificate("host", root).get
intercept[MissingRootCertificateException] {
CertificateHolder.create(host.publicKey, host.privateKey, host.chain.head :: Nil).get
}
}
"Ensure the signature links are correct" in {
val root : CertificateHolder = createRootCertificate(cn = "root").get
val host : CertificateHolder = createHostCertificate("host", root).get
val fakeRoot : CertificateHolder = createRootCertificate(cn = "root").get
intercept[SignatureException] {
CertificateHolder.create(host.publicKey, host.chain.head :: fakeRoot.chain.head :: Nil).get
}
}
"Support chains of an arbitrary, yet reasonable length" in {
val maxLength : Int = 10
forAll(Gen.choose(1, maxLength)) { n =>
assert(createChain(n).isSuccess)
}
}
}
}
|
woq-blended/blended
|
blended.security.ssl/src/test/scala/blended/security/ssl/CertificateHolderSpec.scala
|
Scala
|
apache-2.0
| 2,125
|
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package model.command
import org.joda.time.LocalDate
import model.AddressExamples._
import model.{ CivilServiceExperienceDetails, FSACIndicator }
object GeneralDetailsExamples {
val CandidateContactDetailsUK = GeneralDetails("John", "Doe", "johnd", "johndoe@test.com", LocalDate.now().minusYears(20),
outsideUk = false, FullAddress, Some("A1 B23"), Some(FSACIndicator("London", "London")), country = None, phone = "1234567890",
Some(CivilServiceExperienceDetails(applicable = false)), edipCompleted = Some(false), edipYear = None,
otherInternshipCompleted = Some(false), otherInternshipName = None, otherInternshipYear = None, updateApplicationStatus = Some(true))
val CandidateContactDetailsUKSdip = GeneralDetails("John", "Doe", "johnd", "johndoe@test.com", LocalDate.now().minusYears(20),
outsideUk = false, FullAddress, Some("A1 B23"), Some(FSACIndicator("London", "London")), country = None, phone = "1234567890",
Some(CivilServiceExperienceDetails(applicable = false)), edipCompleted = Some(true), edipYear = Some("2020"),
otherInternshipCompleted = Some(false), otherInternshipName = None, otherInternshipYear = None, updateApplicationStatus = Some(true))
}
|
hmrc/fset-faststream
|
test/model/command/GeneralDetailsExamples.scala
|
Scala
|
apache-2.0
| 1,800
|
package controllers
import play.api.mvc._
import views.html
import helper.utils.{SynonymSyntaxValidator, AuthenticatedAction}
import play.api.data.Form
import models.{InputTopListEntry, Synonyms}
import play.api.data.Forms._
import scala.Some
import helper.services.SynonymService
import esclient.Elasticsearch
import play.api.i18n.Messages
import play.api.data.validation.{Invalid, Valid, ValidationError, Constraint}
import models.results.EditSynonymsResult
object Synonym extends Controller {
implicit val context = scala.concurrent.ExecutionContext.Implicits.global
val synonymSyntaxConstraint: Constraint[String] = Constraint("constraints.synonymSyntaxCheck")({
plainText =>
val errors = plainText match {
case syntaxError if SynonymSyntaxValidator.isWrongSyntax(plainText) => Seq(ValidationError(Messages("error.wrongSyntaxInLine", SynonymSyntaxValidator.getIncorrectSyntaxLineNo(plainText), SynonymSyntaxValidator.getIncorrectSyntaxLine(plainText))))
case _ => Nil
}
if (errors.isEmpty) Valid
else Invalid(errors)
})
val synonymForm: Form[Synonyms] = Form(
mapping("synonyms" -> text().verifying(synonymSyntaxConstraint)) {
(synonyms) => Synonyms(synonyms)
} {
synonyms => Some(synonyms.text)
})
def editor(indexName: String) = {
AuthenticatedAction {
Action.async {
implicit request =>
{
val synonymService = new SynonymService(new Elasticsearch)
synonymService.getSynonymsAndTopInputValues(indexName) map {
result => {
if (result.hasError) {
Redirect(routes.ListIndices.index(Option.empty[String]))
}
else {
Ok(html.synonym.editor(indexName, synonymForm.fill(Synonyms(result.synonymGroups.mkString("\\n"))), result.topTenInputValues))
}
}
} recover {
case e: Throwable => {
Redirect(routes.ListIndices.index(Option.empty[String]))
}
}
}
}
}
}
def submitSynonyms(indexName: String) = {
AuthenticatedAction {
Action.async {
implicit request =>
{
val synonymService = new SynonymService(new Elasticsearch)
synonymForm.bindFromRequest.fold(
errors => {
synonymService.getTopInputValues(indexName) map {
result => {
Ok(html.synonym.editor(indexName, errors, result))
}
} recover {
case _ => {
Ok(html.synonym.editor(indexName, errors, List.empty[InputTopListEntry]))
}
}
},
synonym => {
synonymService.editSynonyms(indexName, synonym.text) map {
synonymsAdded: EditSynonymsResult =>
if (synonymsAdded.hasError) Redirect(routes.Synonym.editor(indexName)).flashing("error" -> synonymsAdded.error)
else Redirect(routes.Synonym.editor(indexName)).flashing("success" -> Messages("success.synonymsAdded", synonymsAdded.reindexResult.succeeded))
}
}
)
}
}
}
}
}
|
MeiSign/Fillable
|
app/controllers/Synonym.scala
|
Scala
|
apache-2.0
| 3,228
|
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
import scala.util.{Failure, Try}
import java.util.TimeZone
import java.text.DateFormat
trait DateParser extends java.io.Serializable { self =>
def parse(s: String)(implicit tz: TimeZone): Try[RichDate]
// Map the input before parsing (name from functional programming: contravariant map)
def contramap(fn: String => String): DateParser = new DateParser {
def parse(s: String)(implicit tz: TimeZone): Try[RichDate] = self.parse(fn(s))
}
def rescueWith(second: DateParser): DateParser =
new DateParser {
def parse(s: String)(implicit tz: TimeZone) =
self.parse(s).orElse(second.parse(s))
}
}
object DateParser {
/**
* This is scalding's default date parser. You can choose this by setting an implicit val DateParser. Note
* that DateParsers using SimpleDateFormat from Java are not thread-safe, thus the def here. You can cache
* the result if you are sure
*/
def default: DateParser = new DateParser {
def parse(s: String)(implicit tz: TimeZone) =
DateOps
.getDateParser(s)
.map(p => p.parse(s))
.getOrElse(Failure(new IllegalArgumentException("Could not find parser for: " + s)))
}
/** Try these Parsers in order */
def apply(items: Iterable[DateParser]): DateParser =
items.reduce(_.rescueWith(_))
/** Using the type-class pattern */
def parse(s: String)(implicit tz: TimeZone, p: DateParser): Try[RichDate] = p.parse(s)(tz)
/**
* Note that DateFormats in Java are generally not thread-safe, so you should not share the result here
* across threads
*/
implicit def from(df: DateFormat): DateParser = new DateParser {
def parse(s: String)(implicit tz: TimeZone) = Try {
df.setTimeZone(tz)
RichDate(df.parse(s))
}
}
/**
* This ignores the time-zone assuming it must be in the String
*/
def from(fn: String => RichDate) = new DateParser {
def parse(s: String)(implicit tz: TimeZone) = Try(fn(s))
}
def from(fn: (String, TimeZone) => RichDate) = new DateParser {
def parse(s: String)(implicit tz: TimeZone) = Try(fn(s, tz))
}
}
/**
* //Scalding used to support Natty, this is removed. To add it back, use something like this in your code,
* //possibly with: //implicit val myParser = DateParser(Seq(DateParser.default, NattyParser))
*
* object NattyParser extends DateParser { def parse(s: String)(implicit tz: TimeZone) = Try { val timeParser
* = new natty.Parser(tz) val dateGroups = timeParser.parse(s) if (dateGroups.size == 0) { throw new
* IllegalArgumentException("Could not convert string: '" + str + "' into a date.") } // a DateGroup can have
* more than one Date (e.g. if you do "Sept. 11th or 12th"), // but we're just going to take the first val
* dates = dateGroups.get(0).getDates() RichDate(dates.get(0)) } }
*/
|
twitter/scalding
|
scalding-date/src/main/scala/com/twitter/scalding/DateParser.scala
|
Scala
|
apache-2.0
| 3,391
|
package acceptance.support
import cucumber.api.scala.ScalaDsl
trait Env extends ScalaDsl {
val db = Mongo("authorization-acceptance-tests")
val authCollection = db.createCollection("authorization")
val host = "http://localhost:9000"
Before {
s =>
db.removeCollection(authCollection)
println("data reset")
Context.world.empty
}
After {
s =>
db.removeCollection(authCollection)
println("data reset")
}
}
object Context {
var world: scala.collection.mutable.Map[String, Any] = scala.collection.mutable.Map.empty[String, Any]
}
|
tvlive/tv-auth
|
test/acceptance/support/Env.scala
|
Scala
|
apache-2.0
| 585
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.runtime.stream.table
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
import org.apache.flink.table.api.scala._
import org.apache.flink.table.api.{Types, ValidationException}
import org.apache.flink.table.planner.expressions.utils.{Func18, FuncWithOpen, RichFunc2}
import org.apache.flink.table.planner.runtime.utils.StreamingWithStateTestBase.StateBackendMode
import org.apache.flink.table.planner.runtime.utils.TestData._
import org.apache.flink.table.planner.runtime.utils._
import org.apache.flink.table.planner.utils.{PojoTableFunc, RF, RichTableFunc1, TableFunc0, TableFunc2, TableFunc3, TableFunc6, TableFunc7, VarArgsFunc0}
import org.apache.flink.types.Row
import org.junit.Assert._
import org.junit.Test
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import java.lang.{Boolean => JBoolean}
import scala.collection.mutable
@RunWith(classOf[Parameterized])
class CorrelateITCase(mode: StateBackendMode) extends StreamingWithStateTestBase(mode) {
@Test
def testCrossJoin(): Unit = {
val t = testData(env).toTable(tEnv).as("a", "b", "c")
val func0 = new TableFunc0
val pojoFunc0 = new PojoTableFunc()
val result = t
.joinLateral(func0('c) as('d, 'e))
.select('c, 'd, 'e)
.joinLateral(pojoFunc0('c))
.where('age > 20)
.select('c, 'name, 'age)
.toAppendStream[Row]
val sink = new TestingAppendSink
result.addSink(sink)
env.execute()
val expected = mutable.MutableList("Jack#22,Jack,22", "Anna#44,Anna,44")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testLeftOuterJoinWithoutPredicates(): Unit = {
val t = testData(env).toTable(tEnv).as("a", "b", "c")
val func0 = new TableFunc0
val result = t
.leftOuterJoinLateral(func0('c) as('d, 'e))
.select('c, 'd, 'e)
.toAppendStream[Row]
val sink = new TestingAppendSink
result.addSink(sink)
env.execute()
val expected = mutable.MutableList(
"nosharp,null,null", "Jack#22,Jack,22",
"John#19,John,19", "Anna#44,Anna,44")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
/**
* Common join predicates are temporarily forbidden (see FLINK-7865).
*/
@Test (expected = classOf[ValidationException])
def testLeftOuterJoinWithPredicates(): Unit = {
val t = testData(env).toTable(tEnv).as("a", "b", "c")
val func0 = new TableFunc0
val result = t
.leftOuterJoinLateral(func0('c) as ('s, 'l), 'a === 'l)
.select('c, 's, 'l)
.toAppendStream[Row]
val sink = new TestingAppendSink
result.addSink(sink)
env.execute()
val expected = "John#19,null,null\n" + "John#22,null,null\n" + "Anna44,null,null\n" +
"nosharp,null,null"
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testUserDefinedTableFunctionWithScalarFunction(): Unit = {
val t = testData(env).toTable(tEnv).as("a", "b", "c")
val func0 = new TableFunc0
val result = t
.joinLateral(func0('c) as('d, 'e))
.where(Func18('d, "J"))
.select('c, 'd, 'e)
.toAppendStream[Row]
val sink = new TestingAppendSink
result.addSink(sink)
env.execute()
val expected = mutable.MutableList("Jack#22,Jack,22", "John#19,John,19")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testUserDefinedTableFunctionWithParameter(): Unit = {
val tableFunc1 = new RichTableFunc1
tEnv.registerFunction("RichTableFunc1", tableFunc1)
UserDefinedFunctionTestUtils.setJobParameters(env, Map("word_separator" -> " "))
val result = failingDataSource(smallTupleData3)
.toTable(tEnv, 'a, 'b, 'c)
.joinLateral(tableFunc1('c) as 's)
.select('a, 's)
val sink = new TestingAppendSink
result.toAppendStream[Row].addSink(sink)
env.execute()
val expected = mutable.MutableList("3,Hello", "3,world")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testUserDefinedTableFunctionWithUserDefinedScalarFunction(): Unit = {
val tableFunc1 = new RichTableFunc1
val richFunc2 = new RichFunc2
tEnv.registerFunction("RichTableFunc1", tableFunc1)
tEnv.registerFunction("RichFunc2", richFunc2)
UserDefinedFunctionTestUtils.setJobParameters(
env,
Map("word_separator" -> "#", "string.value" -> "test"))
val result = failingDataSource(smallTupleData3)
.toTable(tEnv, 'a, 'b, 'c)
.joinLateral(tableFunc1(richFunc2('c)) as 's)
.select('a, 's)
val sink = new TestingAppendSink
result.toAppendStream[Row].addSink(sink)
env.execute()
val expected = mutable.MutableList(
"1,Hi",
"1,test",
"2,Hello",
"2,test",
"3,Hello world",
"3,test")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testTableFunctionConstructorWithParams(): Unit = {
val t = testData(env).toTable(tEnv).as("a", "b", "c")
val config = Map("key1" -> "value1", "key2" -> "value2")
val func30 = new TableFunc3(null)
val func31 = new TableFunc3("OneConf_")
val func32 = new TableFunc3("TwoConf_", config)
val result = t
.joinLateral(func30('c) as('d, 'e))
.select('c, 'd, 'e)
.joinLateral(func31('c) as ('f, 'g))
.select('c, 'd, 'e, 'f, 'g)
.joinLateral(func32('c) as ('h, 'i))
.select('c, 'd, 'f, 'h, 'e, 'g, 'i)
.toAppendStream[Row]
val sink = new TestingAppendSink
result.addSink(sink)
env.execute()
val expected = mutable.MutableList(
"Anna#44,Anna,OneConf_Anna,TwoConf__key=key1_value=value1_Anna,44,44,44",
"Anna#44,Anna,OneConf_Anna,TwoConf__key=key2_value=value2_Anna,44,44,44",
"Jack#22,Jack,OneConf_Jack,TwoConf__key=key1_value=value1_Jack,22,22,22",
"Jack#22,Jack,OneConf_Jack,TwoConf__key=key2_value=value2_Jack,22,22,22",
"John#19,John,OneConf_John,TwoConf__key=key1_value=value1_John,19,19,19",
"John#19,John,OneConf_John,TwoConf__key=key2_value=value2_John,19,19,19"
)
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testTableFunctionWithVariableArguments(): Unit = {
val varArgsFunc0 = new VarArgsFunc0
tEnv.registerFunction("VarArgsFunc0", varArgsFunc0)
val result = testData(env)
.toTable(tEnv, 'a, 'b, 'c)
.select('c)
.joinLateral(varArgsFunc0("1", "2", 'c))
val sink = new TestingAppendSink
result.toAppendStream[Row].addSink(sink)
env.execute()
val expected = mutable.MutableList(
"Anna#44,1",
"Anna#44,2",
"Anna#44,Anna#44",
"Jack#22,1",
"Jack#22,2",
"Jack#22,Jack#22",
"John#19,1",
"John#19,2",
"John#19,John#19",
"nosharp,1",
"nosharp,2",
"nosharp,nosharp")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
val result1 = testData(env)
.toTable(tEnv, 'a, 'b, 'c)
.select('c)
.joinLateral(varArgsFunc0("1", "2"))
val sink1 = new TestingAppendSink
result1.toAppendStream[Row].addSink(sink1)
env.execute()
val expected1 = mutable.MutableList(
"Anna#44,1",
"Anna#44,2",
"Jack#22,1",
"Jack#22,2",
"John#19,1",
"John#19,2",
"nosharp,1",
"nosharp,2")
assertEquals(expected1.sorted, sink1.getAppendResults.sorted)
// Test for empty cases
val result2 = testData(env)
.toTable(tEnv, 'a, 'b, 'c)
.select('c)
.joinLateral(varArgsFunc0())
val sink2 = new TestingAppendSink
result2.toAppendStream[Row].addSink(sink2)
env.execute()
assertTrue(sink2.getAppendResults.isEmpty)
}
@Test
def testRowType(): Unit = {
val row = Row.of(
12.asInstanceOf[Integer],
true.asInstanceOf[JBoolean],
Row.of(1.asInstanceOf[Integer], 2.asInstanceOf[Integer], 3.asInstanceOf[Integer])
)
val rowType = Types.ROW(Types.INT, Types.BOOLEAN, Types.ROW(Types.INT, Types.INT, Types.INT))
val in = env.fromElements(row, row)(rowType).toTable(tEnv).as("a", "b", "c")
val tableFunc = new TableFunc6()
val result = in
.joinLateral(tableFunc('c) as ('f0, 'f1, 'f2))
.select('c, 'f2)
val sink = new TestingAppendSink
result.toAppendStream[Row].addSink(sink)
env.execute()
val expected = mutable.MutableList(
"1,2,3,3",
"1,2,3,3")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testTableFunctionCollectorOpenClose(): Unit = {
val t = testData(env).toTable(tEnv).as("a", "b", "c")
val func0 = new TableFunc0
val func26 = new FuncWithOpen
tEnv.registerFunction("func26", func26)
val result = t
.joinLateral(func0('c) as('d, 'e))
.where(func26('e))
.select('c, 'd, 'e)
.toAppendStream[Row]
val sink = new TestingAppendSink
result.addSink(sink)
env.execute()
val expected = Seq (
"Jack#22,Jack,22",
"John#19,John,19",
"Anna#44,Anna,44"
)
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testTableFunctionCollectorInit(): Unit = {
val t = testData(env).toTable(tEnv).as("a", "b", "c")
val func0 = new TableFunc0
// this case will generate 'timestamp' member field and 'DateFormatter'
val result = t
.joinLateral(func0('c) as('d, 'e))
.where(dateFormat(currentTimestamp(), "yyyyMMdd") === 'd)
.select('c, 'd, 'e)
.toAppendStream[Row]
val sink = new TestingAppendSink
result.addSink(sink)
env.execute()
assertTrue(sink.getAppendResults.isEmpty)
}
@Test
def testFlatMap(): Unit = {
val func2 = new TableFunc2
val ds = testData(env).toTable(tEnv, 'a, 'b, 'c)
// test non alias
.flatMap(func2('c))
.select('f0, 'f1)
// test the output field name of flatMap is the same as the field name of the input table
.flatMap(func2(concat('f0, "#")))
.as ("f0", "f1")
.select('f0, 'f1)
val sink = new TestingAppendSink
ds.toAppendStream[Row].addSink(sink)
env.execute()
val expected = mutable.MutableList(
"Jack,4",
"22,2",
"John,4",
"19,2",
"Anna,4",
"44,2")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testMultipleEvals(): Unit = {
val rf = new RF
val tf = new TableFunc7
val row = Row.of(
12.asInstanceOf[Integer],
true.asInstanceOf[JBoolean],
Row.of(1.asInstanceOf[Integer], 2.asInstanceOf[Integer], 3.asInstanceOf[Integer])
)
val rowType = Types.ROW(Types.INT, Types.BOOLEAN, Types.ROW(Types.INT, Types.INT, Types.INT))
val in = env.fromElements(row, row)(rowType).toTable(tEnv).as("a", "b", "c")
val result = in.select(rf('a) as 'd).joinLateral(tf('d) as 'e)
val sink = new TestingAppendSink
result.toAppendStream[Row].addSink(sink)
env.execute()
}
private def testData(
env: StreamExecutionEnvironment)
: DataStream[(Int, Long, String)] = {
val data = new mutable.MutableList[(Int, Long, String)]
data.+=((1, 1L, "Jack#22"))
data.+=((2, 2L, "John#19"))
data.+=((3, 2L, "Anna#44"))
data.+=((4, 3L, "nosharp"))
env.fromCollection(data)
}
}
|
hequn8128/flink
|
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/runtime/stream/table/CorrelateITCase.scala
|
Scala
|
apache-2.0
| 12,143
|
package dotty.tools.dotc
package core
import Symbols._, Types._, util.Positions._, Contexts._, Constants._, ast.tpd._
object Annotations {
abstract class Annotation {
def tree(implicit ctx: Context): Tree
def symbol(implicit ctx: Context): Symbol = tree.tpe.typeSymbol
def matches(cls: Symbol)(implicit ctx: Context): Boolean = symbol.derivesFrom(cls)
def appliesToModule: Boolean = true // for now; see remark in SymDenotations
def derivedAnnotation(tree: Tree)(implicit ctx: Context) =
if (tree eq this.tree) this else Annotation(tree)
}
case class ConcreteAnnotation(t: Tree) extends Annotation {
def tree(implicit ctx: Context): Tree = t
}
case class LazyAnnotation(sym: Symbol)(treeFn: Context => Tree) extends Annotation {
private var myTree: Tree = null
def tree(implicit ctx: Context) = {
if (myTree == null) myTree = treeFn(ctx)
myTree
}
override def symbol(implicit ctx: Context): Symbol = sym
}
object Annotation {
def apply(tree: Tree) = ConcreteAnnotation(tree)
def apply(cls: ClassSymbol, arg: Tree)(implicit ctx: Context): Annotation =
apply(cls, arg :: Nil)
def apply(cls: ClassSymbol, arg1: Tree, arg2: Tree)(implicit ctx: Context): Annotation =
apply(cls, arg1 :: arg2 :: Nil)
def apply(cls: ClassSymbol, args: List[Tree])(implicit ctx: Context): Annotation =
apply(cls.typeRef, args)
def apply(atp: Type, arg: Tree)(implicit ctx: Context): Annotation =
apply(atp, arg :: Nil)
def apply(atp: Type, arg1: Tree, arg2: Tree)(implicit ctx: Context): Annotation =
apply(atp, arg1 :: arg2 :: Nil)
def apply(atp: Type, args: List[Tree])(implicit ctx: Context): Annotation =
apply(New(atp, args))
def deferred(sym: Symbol, treeFn: Context => Tree)(implicit ctx: Context): Annotation =
new LazyAnnotation(sym)(treeFn)
def deferred(atp: Type, args: List[Tree])(implicit ctx: Context): Annotation =
deferred(atp.classSymbol, implicit ctx => New(atp, args))
def makeAlias(sym: TermSymbol)(implicit ctx: Context) =
apply(defn.AliasAnnot, List(Ident(TermRef.withSig(sym.owner.thisType, sym.name, sym.signature, sym))))
def makeChild(sym: Symbol)(implicit ctx: Context) =
apply(defn.ChildAnnot.typeRef.appliedTo(sym.owner.thisType.select(sym.name, sym)), Nil)
}
def ThrowsAnnotation(cls: ClassSymbol)(implicit ctx: Context) = {
val tref = cls.typeRef
Annotation(defn.ThrowsAnnot.typeRef.appliedTo(tref), Ident(tref))
}
}
|
DarkDimius/dotty
|
src/dotty/tools/dotc/core/Annotations.scala
|
Scala
|
bsd-3-clause
| 2,532
|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.funspec
import scala.concurrent.{ExecutionContext, Promise, Future}
import org.scalatest._
import SharedHelpers.EventRecordingReporter
import org.scalatest.concurrent.SleepHelper
import org.scalatest.events.{InfoProvided, MarkupProvided}
import scala.util.Success
import org.scalatest
import org.scalatest.funspec
class FixtureAsyncFunSpecLikeSpec2 extends scalatest.funspec.AsyncFunSpec {
describe("AsyncFunSpecLike") {
it("can be used for tests that return Future under parallel async test execution") {
class ExampleSpec extends funspec.FixtureAsyncFunSpecLike with ParallelTestExecution {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
val a = 1
it("test 1") { fixture =>
Future {
assert(a == 1)
}
}
it("test 2") { fixture =>
Future {
assert(a == 2)
}
}
it("test 3") { fixture =>
Future {
pending
}
}
it("test 4") { fixture =>
Future {
cancel()
}
}
ignore("test 5") { fixture =>
Future {
cancel()
}
}
override def newInstance = new ExampleSpec
}
val rep = new EventRecordingReporter
val spec = new ExampleSpec
val status = spec.run(None, Args(reporter = rep))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(rep) }
promise.future.map { repo =>
assert(repo.testStartingEventsReceived.length == 4)
assert(repo.testSucceededEventsReceived.length == 1)
assert(repo.testSucceededEventsReceived(0).testName == "test 1")
assert(repo.testFailedEventsReceived.length == 1)
assert(repo.testFailedEventsReceived(0).testName == "test 2")
assert(repo.testPendingEventsReceived.length == 1)
assert(repo.testPendingEventsReceived(0).testName == "test 3")
assert(repo.testCanceledEventsReceived.length == 1)
assert(repo.testCanceledEventsReceived(0).testName == "test 4")
assert(repo.testIgnoredEventsReceived.length == 1)
assert(repo.testIgnoredEventsReceived(0).testName == "test 5")
}
}
it("can be used for tests that did not return Future under parallel async test execution") {
class ExampleSpec extends funspec.FixtureAsyncFunSpecLike with ParallelTestExecution {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
val a = 1
it("test 1") { fixture =>
assert(a == 1)
}
it("test 2") { fixture =>
assert(a == 2)
}
it("test 3") { fixture =>
pending
}
it("test 4") { fixture =>
cancel()
}
ignore("test 5") { fixture =>
cancel()
}
override def newInstance = new ExampleSpec
}
val rep = new EventRecordingReporter
val spec = new ExampleSpec
val status = spec.run(None, Args(reporter = rep))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(rep) }
promise.future.map { repo =>
assert(repo.testStartingEventsReceived.length == 4)
assert(repo.testSucceededEventsReceived.length == 1)
assert(repo.testSucceededEventsReceived(0).testName == "test 1")
assert(repo.testFailedEventsReceived.length == 1)
assert(repo.testFailedEventsReceived(0).testName == "test 2")
assert(repo.testPendingEventsReceived.length == 1)
assert(repo.testPendingEventsReceived(0).testName == "test 3")
assert(repo.testCanceledEventsReceived.length == 1)
assert(repo.testCanceledEventsReceived(0).testName == "test 4")
assert(repo.testIgnoredEventsReceived.length == 1)
assert(repo.testIgnoredEventsReceived(0).testName == "test 5")
}
}
it("should run tests that return Future in serial by default") {
@volatile var count = 0
class ExampleSpec extends funspec.FixtureAsyncFunSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
it("test 1") { fixture =>
Future {
SleepHelper.sleep(30)
assert(count == 0)
count = 1
Succeeded
}
}
it("test 2") { fixture =>
Future {
assert(count == 1)
SleepHelper.sleep(50)
count = 2
Succeeded
}
}
it("test 3") { fixture =>
Future {
assert(count == 2)
}
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(rep) }
promise.future.map { repo =>
assert(repo.testStartingEventsReceived.length == 3)
assert(repo.testSucceededEventsReceived.length == 3)
}
}
it("should run tests that does not return Future in serial by default") {
@volatile var count = 0
class ExampleSpec extends funspec.FixtureAsyncFunSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
it("test 1") { fixture =>
SleepHelper.sleep(30)
assert(count == 0)
count = 1
Succeeded
}
it("test 2") { fixture =>
assert(count == 1)
SleepHelper.sleep(50)
count = 2
Succeeded
}
it("test 3") { fixture =>
assert(count == 2)
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(rep) }
promise.future.map { repo =>
assert(repo.testStartingEventsReceived.length == 3)
assert(repo.testSucceededEventsReceived.length == 3)
}
}
// SKIP-SCALATESTJS,NATIVE-START
it("should run tests and its future in same main thread when use SerialExecutionContext") {
var mainThread = Thread.currentThread
var test1Thread: Option[Thread] = None
var test2Thread: Option[Thread] = None
var onCompleteThread: Option[Thread] = None
class ExampleSpec extends funspec.FixtureAsyncFunSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
it("test 1") { fixture =>
Future {
test1Thread = Some(Thread.currentThread)
succeed
}
}
it("test 2") { fixture =>
Future {
test2Thread = Some(Thread.currentThread)
succeed
}
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
status.whenCompleted { s =>
onCompleteThread = Some(Thread.currentThread)
}
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(rep) }
promise.future.map { repo =>
assert(test1Thread.isDefined)
assert(test1Thread.get == mainThread)
assert(test2Thread.isDefined)
assert(test2Thread.get == mainThread)
assert(onCompleteThread.isDefined)
assert(onCompleteThread.get == mainThread)
}
}
it("should run tests and its true async future in the same thread when use SerialExecutionContext") {
var mainThread = Thread.currentThread
@volatile var test1Thread: Option[Thread] = None
@volatile var test2Thread: Option[Thread] = None
var onCompleteThread: Option[Thread] = None
class ExampleSpec extends funspec.FixtureAsyncFunSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
it("test 1") { fixture =>
val promise = Promise[Assertion]
val timer = new java.util.Timer
timer.schedule(
new java.util.TimerTask {
def run(): Unit = {
promise.complete(Success(succeed))
}
},
1000
)
promise.future.map { s =>
test1Thread = Some(Thread.currentThread)
s
}
}
it("test 2") { fixture =>
val promise = Promise[Assertion]
val timer = new java.util.Timer
timer.schedule(
new java.util.TimerTask {
def run(): Unit = {
promise.complete(Success(succeed))
}
},
500
)
promise.future.map { s =>
test2Thread = Some(Thread.currentThread)
s
}
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
status.whenCompleted { s =>
onCompleteThread = Some(Thread.currentThread)
}
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(rep) }
promise.future.map { repo =>
assert(test1Thread.isDefined)
assert(test1Thread.get == mainThread)
assert(test2Thread.isDefined)
assert(test2Thread.get == mainThread)
assert(onCompleteThread.isDefined)
assert(onCompleteThread.get == mainThread)
}
}
it("should not run out of stack space with nested futures when using SerialExecutionContext") {
class ExampleSpec extends funspec.FixtureAsyncFunSpecLike {
// Note we get a StackOverflowError with the following execution
// context.
// override implicit def executionContext: ExecutionContext = new ExecutionContext { def execute(runnable: Runnable) = runnable.run; def reportFailure(cause: Throwable) = () }
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
def sum(xs: List[Int]): Future[Int] =
xs match {
case Nil => Future.successful(0)
case x :: xs => Future(x).flatMap(xx => sum(xs).map(xxx => xx + xxx))
}
it("test 1") { fixture =>
val fut: Future[Int] = sum((1 to 50000).toList)
fut.map(total => assert(total == 1250025000))
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(rep) }
promise.future.map { repo =>
assert(!rep.testSucceededEventsReceived.isEmpty)
}
}
// SKIP-SCALATESTJS,NATIVE-END
it("should run tests that returns Future and report their result in serial") {
class ExampleSpec extends funspec.FixtureAsyncFunSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
it("test 1") { fixture =>
Future {
SleepHelper.sleep(60)
succeed
}
}
it("test 2") { fixture =>
Future {
SleepHelper.sleep(30)
succeed
}
}
it("test 3") { fixture =>
Future {
succeed
}
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(rep) }
promise.future.map { repo =>
assert(rep.testStartingEventsReceived.length == 3)
assert(rep.testStartingEventsReceived(0).testName == "test 1")
assert(rep.testStartingEventsReceived(1).testName == "test 2")
assert(rep.testStartingEventsReceived(2).testName == "test 3")
assert(rep.testSucceededEventsReceived.length == 3)
assert(rep.testSucceededEventsReceived(0).testName == "test 1")
assert(rep.testSucceededEventsReceived(1).testName == "test 2")
assert(rep.testSucceededEventsReceived(2).testName == "test 3")
}
}
it("should run tests that does not return Future and report their result in serial") {
class ExampleSpec extends funspec.FixtureAsyncFunSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
it("test 1") { fixture =>
SleepHelper.sleep(60)
succeed
}
it("test 2") { fixture =>
SleepHelper.sleep(30)
succeed
}
it("test 3") { fixture =>
succeed
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(rep) }
promise.future.map { repo =>
assert(rep.testStartingEventsReceived.length == 3)
assert(rep.testStartingEventsReceived(0).testName == "test 1")
assert(rep.testStartingEventsReceived(1).testName == "test 2")
assert(rep.testStartingEventsReceived(2).testName == "test 3")
assert(rep.testSucceededEventsReceived.length == 3)
assert(rep.testSucceededEventsReceived(0).testName == "test 1")
assert(rep.testSucceededEventsReceived(1).testName == "test 2")
assert(rep.testSucceededEventsReceived(2).testName == "test 3")
}
}
it("should send an InfoProvided event for an info in main spec body") {
class MySuite extends funspec.FixtureAsyncFunSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
info(
"hi there"
)
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val infoList = reporter.infoProvidedEventsReceived
assert(infoList.size == 1)
assert(infoList(0).message == "hi there")
}
}
it("should send an InfoProvided event for an info in scope body") {
class MySuite extends funspec.FixtureAsyncFunSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
describe("test feature") {
info(
"hi there"
)
it("test 1") { fixture => succeed }
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val infoList = reporter.infoProvidedEventsReceived
assert(infoList.size == 1)
assert(infoList(0).message == "hi there")
}
}
it("should send an InfoProvided event for an info in test body") {
class MySuite extends funspec.FixtureAsyncFunSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
describe("test feature") {
it("test 1") { fixture =>
info("hi there")
succeed
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val infoList = reporter.infoProvidedEventsReceived
assert(infoList.size == 0)
val testSucceededList = reporter.testSucceededEventsReceived
assert(testSucceededList.size == 1)
assert(testSucceededList(0).recordedEvents.size == 1)
val recordedEvent = testSucceededList(0).recordedEvents(0)
assert(recordedEvent.isInstanceOf[InfoProvided])
val infoProvided = recordedEvent.asInstanceOf[InfoProvided]
assert(infoProvided.message == "hi there")
}
}
it("should send an InfoProvided event for an info in Future returned by test body") {
class MySuite extends funspec.FixtureAsyncFunSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
describe("test feature") {
it("test 1") { fixture =>
Future {
info("hi there")
succeed
}
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val infoList = reporter.infoProvidedEventsReceived
assert(infoList.size == 0)
val testSucceededList = reporter.testSucceededEventsReceived
assert(testSucceededList.size == 1)
assert(testSucceededList(0).recordedEvents.size == 1)
val recordedEvent = testSucceededList(0).recordedEvents(0)
assert(recordedEvent.isInstanceOf[InfoProvided])
val infoProvided = recordedEvent.asInstanceOf[InfoProvided]
assert(infoProvided.message == "hi there")
}
}
it("should send a NoteProvided event for a note in main spec body") {
class MySuite extends funspec.FixtureAsyncFunSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
note(
"hi there"
)
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val noteList = reporter.noteProvidedEventsReceived
assert(noteList.size == 1)
assert(noteList(0).message == "hi there")
}
}
it("should send a NoteProvided event for a note in scope body") {
class MySuite extends funspec.FixtureAsyncFunSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
describe("test feature") {
note(
"hi there"
)
it("test 1") { fixture => succeed }
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val noteList = reporter.noteProvidedEventsReceived
assert(noteList.size == 1)
assert(noteList(0).message == "hi there")
}
}
it("should send a NoteProvided event for a note in test body") {
class MySuite extends funspec.FixtureAsyncFunSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
describe("test feature") {
it("test 1") { fixture =>
note("hi there")
succeed
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val noteList = reporter.noteProvidedEventsReceived
assert(noteList.size == 1)
assert(noteList(0).message == "hi there")
}
}
it("should send a NoteProvided event for a note in Future returned by test body") {
class MySuite extends funspec.FixtureAsyncFunSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
describe("test feature") {
it("test 1") { fixture =>
Future {
note("hi there")
succeed
}
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val noteList = reporter.noteProvidedEventsReceived
assert(noteList.size == 1)
assert(noteList(0).message == "hi there")
}
}
it("should send an AlertProvided event for an alert in main spec body") {
class MySuite extends funspec.FixtureAsyncFunSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
alert(
"hi there"
)
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val alertList = reporter.alertProvidedEventsReceived
assert(alertList.size == 1)
assert(alertList(0).message == "hi there")
}
}
it("should send an AlertProvided event for an alert in scope body") {
class MySuite extends funspec.FixtureAsyncFunSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
describe("test feature") {
alert(
"hi there"
)
it("test 1") { fixture => succeed }
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val alertList = reporter.alertProvidedEventsReceived
assert(alertList.size == 1)
assert(alertList(0).message == "hi there")
}
}
it("should send an AlertProvided event for an alert in test body") {
class MySuite extends funspec.FixtureAsyncFunSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
describe("test feature") {
it("test 1") { fixture =>
alert("hi there")
succeed
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val alertList = reporter.alertProvidedEventsReceived
assert(alertList.size == 1)
assert(alertList(0).message == "hi there")
}
}
it("should send an AlertProvided event for an alert in Future returned by test body") {
class MySuite extends funspec.FixtureAsyncFunSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
describe("test feature") {
it("test 1") { fixture =>
Future {
alert("hi there")
succeed
}
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val alertList = reporter.alertProvidedEventsReceived
assert(alertList.size == 1)
assert(alertList(0).message == "hi there")
}
}
it("should send a MarkupProvided event for a markup in main spec body") {
class MySuite extends funspec.FixtureAsyncFunSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
markup(
"hi there"
)
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val markupList = reporter.markupProvidedEventsReceived
assert(markupList.size == 1)
assert(markupList(0).text == "hi there")
}
}
it("should send a MarkupProvided event for a markup in scope body") {
class MySuite extends funspec.FixtureAsyncFunSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
describe("test feature") {
markup(
"hi there"
)
it("test 1") { fixture => succeed }
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val markupList = reporter.markupProvidedEventsReceived
assert(markupList.size == 1)
assert(markupList(0).text == "hi there")
}
}
it("should send a MarkupProvided event for a markup in test body") {
class MySuite extends funspec.FixtureAsyncFunSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
describe("test feature") {
it("test 1") { fixture =>
markup("hi there")
succeed
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val markupList = reporter.markupProvidedEventsReceived
assert(markupList.size == 0)
val testSucceededList = reporter.testSucceededEventsReceived
assert(testSucceededList.size == 1)
assert(testSucceededList(0).recordedEvents.size == 1)
val recordedEvent = testSucceededList(0).recordedEvents(0)
assert(recordedEvent.isInstanceOf[MarkupProvided])
val markupProvided = recordedEvent.asInstanceOf[MarkupProvided]
assert(markupProvided.text == "hi there")
}
}
it("should send a MarkupProvided event for a markup in Future returned by test body") {
class MySuite extends funspec.FixtureAsyncFunSpecLike {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
describe("test feature") {
it("test 1") { fixture =>
Future {
markup("hi there")
succeed
}
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { repo =>
val markupList = reporter.markupProvidedEventsReceived
assert(markupList.size == 0)
val testSucceededList = reporter.testSucceededEventsReceived
assert(testSucceededList.size == 1)
assert(testSucceededList(0).recordedEvents.size == 1)
val recordedEvent = testSucceededList(0).recordedEvents(0)
assert(recordedEvent.isInstanceOf[MarkupProvided])
val markupProvided = recordedEvent.asInstanceOf[MarkupProvided]
assert(markupProvided.text == "hi there")
}
}
it("should allow other execution context to be used") {
class TestSpec extends funspec.FixtureAsyncFunSpecLike {
// SKIP-SCALATESTJS,NATIVE-START
override implicit val executionContext = scala.concurrent.ExecutionContext.Implicits.global
// SKIP-SCALATESTJS,NATIVE-END
// SCALATESTJS-ONLY override implicit val executionContext = scala.scalajs.concurrent.JSExecutionContext.runNow
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
val a = 1
describe("feature 1") {
it("test A") { fixture =>
Future { assert(a == 1) }
}
}
describe("feature 2") {
it("test B") { fixture =>
Future { assert(a == 1) }
}
}
describe("group3") {
it("test C") { fixture =>
Future { assert(a == 1) }
}
}
}
val suite = new TestSpec
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
val promise = Promise[EventRecordingReporter]
status whenCompleted { _ => promise.success(reporter) }
promise.future.map { r =>
assert(reporter.scopeOpenedEventsReceived.length == 3)
assert(reporter.scopeClosedEventsReceived.length == 3)
assert(reporter.testStartingEventsReceived.length == 3)
assert(reporter.testSucceededEventsReceived.length == 3)
}
}
}
}
|
scalatest/scalatest
|
jvm/funspec-test/src/test/scala/org/scalatest/funspec/FixtureAsyncFunSpecLikeSpec2.scala
|
Scala
|
apache-2.0
| 31,206
|
/**
* Copyright 2015 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.play.silhouette.impl.util
import com.mohiva.play.silhouette.api.util.PasswordInfo
import play.api.test.PlaySpecification
/**
* Test case for the [[BCryptPasswordHasher]] class.
*/
class BCryptPasswordHasherSpec extends PlaySpecification {
"The `hash` method" should {
"hash a password" in {
val password = "my_S3cr3t_p@sswQrd"
val hasher = new BCryptPasswordHasher
val info = hasher.hash(password)
info must beAnInstanceOf[PasswordInfo]
info.hasher must be equalTo BCryptPasswordHasher.ID
info.password must not be equalTo(password)
info.salt must beNone
}
}
"The `matches` method" should {
"return true if a password matches a previous hashed password" in {
val password = "my_S3cr3t_p@sswQrd"
val hasher = new BCryptPasswordHasher
val info = hasher.hash(password)
hasher.matches(info, password) must beTrue
}
"return false if a password doesn't match a previous hashed password" in {
val password = "my_S3cr3t_p@sswQrd"
val hasher = new BCryptPasswordHasher
val info = hasher.hash(password)
hasher.matches(info, "not-equal") must beFalse
}
}
}
|
cemcatik/play-silhouette
|
silhouette/test/com/mohiva/play/silhouette/impl/util/BCryptPasswordHasherSpec.scala
|
Scala
|
apache-2.0
| 1,829
|
/*
*
* * Copyright (c) 2014-2016. National Institute of Advanced Industrial Science and Technology (AIST)
* * All rights reserved.
*
*/
package jp.go.aist.cspe
import jp.go.aist.cspe.CSPE._
private[cspe] class ParamPrefixRelaxed(f0: PartialFunction[AbsEvent, Process], id0 : Int) extends Process {
val id = id0
private val f = f0
override def acceptPrim(e: AbsEvent): ProcessSet =
processSet(List(if (f.isDefinedAt(e)) f(e) else this)) // note 'this' instead of 'Failure'
override def canTerminate = false
def canEqual(other: Any): Boolean = other.isInstanceOf[ParamPrefix]
override def equals(other: Any): Boolean = other match {
case that: ParamPrefix =>
(that canEqual this) &&
id == that.id
case _ => false
}
override def hashCode(): Int = {
id
}
}
|
yoriyuki/cspe
|
src/main/scala/jp/go/aist/cspe/ParamPrefixRelaxed.scala
|
Scala
|
bsd-3-clause
| 814
|
package org.globalnames
package matcher
class MatcherSpec extends SpecConfig {
val matcher = Matcher(Seq("Aaadonta angaurana",
"Aaadonta constricta",
"Aaadonta constricta babelthuapi",
"Abacetus cyclomous",
"Abacetus cyclomus"),
canonicalNamesTransducerMaxDistance = 1,
canonicalNamesStemsTransducerMaxDistance = 4)
"find matches correctly" in {
matcher.findMatches("Aaadonta angaurana") should contain only Candidate("Aaadonta angaurana", 0)
matcher.findMatches("Aaadonta angauranA") should contain only Candidate("Aaadonta angaurana", 0)
matcher.findMatches("AaadontX angauranX") should contain only Candidate("Aaadonta angaurana", 2)
matcher.findMatches("Abacetus cyclomoXX") should contain only (
Candidate("Abacetus cyclomus", 3), Candidate("Abacetus cyclomous", 2)
)
}
"handle empty request" in {
matcher.findMatches("") shouldBe empty
}
}
|
GlobalNamesArchitecture/gnmatcher
|
matcher/src/test/scala/org/globalnames/matcher/MatcherSpec.scala
|
Scala
|
mit
| 1,049
|
package reopp.common.benchmarks
import reopp.common.guardedcommands.dataconnectors.ConstraintGen._
import z3.scala.{Z3AST, Z3Config, Z3Context}
import reopp.common.guardedcommands.z3.Z3
import choco.kernel.model.variables.integer.IntegerExpressionVariable
import choco.Choco
import reopp.common.guardedcommands.Formula
import reopp.common.{OptionSol, Solution, IntFunction, IntPredicate}
/**
* Created with IntelliJ IDEA.
*
* Created by jose on 13/06/13.
*/
object AllNewTemp extends App {
/// PARSE ARGUMENTS ///
Warmup.go
val n = if (!args.isEmpty) Integer.parseInt(args(0))
else 3
val satfull = if (args.size > 1) args(1) startsWith "s"
else false
val chocosat = if (args.size > 1) args(1) startsWith "cs"
else false
val choco = if (args.size > 1) (args(1) startsWith "c") && !chocosat
else false
val z3sat = if (args.size > 1) args(1) startsWith "zs"
else false
val z3 = if (args.size > 1) (args(1) startsWith "z") && !z3sat
else false
val quicksat = if (args.size > 1) args(1) startsWith "q"
else false
val lazyy = if (args.size > 1) args(1) startsWith "l"
else false
val justInit = if (args.size > 2) args(2) startsWith "i"
else false
///////////////////
/// The PROBLEM ///
///////////////////
val f2c = new Farhn2Celc
val isF = new IsFarhn
val isC = new IsCelc
val isNight = new IsNight
val getTime = new GetTime
val getTemp = new GetTemp
def genRouters(height:Int): Formula= {
var snks = List("h")
var res = Formula()
for (level <- 1 to height) {
var newsnks = List[String]()
for (x <- snks) {
res ++= exrouter(x,x+"'",x+",")
newsnks :::= List(x+"'",x+",")
}
snks = newsnks
}
for (snk <- snks) {
res ++= transf(snk,snk+"-modified",f2c) ++ reader(snk+"-modified",1)
}
(new scala.util.Random).nextDouble()
res
}
def problem =
transf("a","b",getTime) ++
negfilter("b","c",isNight) ++
transf("a","d",getTemp) ++
filter("d","e",isF) ++
filter("d","g1",isC) ++
transf("e","g2",f2c) ++
merger("g1","g2","g") ++
sync("g","h") ++
sdrain("c","h") ++
writer("a",List(22115)) ++
genRouters(n)
// 15 C @ 8:35am --> (8*60 + 35) + 15*1440 = 22115
// val problem =
// nexrouter("start", (for (x <- 1 to n) yield "display"+x).toList) ++
// writer("start",List(1075))
/////////////////////////
/// Running the tests ///
/////////////////////////
if (justInit) problem.justInit = true
if (satfull) {
val time = System.currentTimeMillis()
val res = problem.solveIterative
val spent = System.currentTimeMillis() - time
print(spent)
}
else if (chocosat) {
val time = System.currentTimeMillis()
val res = problem.solveChocoSat
val spent = System.currentTimeMillis() - time
print(spent)
}
else if (choco) {
val time = System.currentTimeMillis()
val res = problem.solveChoco
val spent = System.currentTimeMillis() - time
print(spent)
}
else if (z3sat) {
val z3 = new Z3Context(new Z3Config("MODEL" -> true))
val time = System.currentTimeMillis()
val res = problem.quickDataSolveZ3(z3)
val spent = System.currentTimeMillis() - time
print(spent)
}
else if (z3) {
val z3 = new Z3Context(new Z3Config("MODEL" -> true))
val time = System.currentTimeMillis()
val res = Z3.solvez3(Z3.gc2z3(problem,z3),z3)
val spent = System.currentTimeMillis() - time
print(spent)
}
else if (quicksat) {
val time = System.currentTimeMillis()
val res = problem.quickDataSolveSAT4J
val spent = System.currentTimeMillis() - time
print(spent)
}
else if (lazyy) {
val time = System.currentTimeMillis()
val res = problem.solveChocoPredAbstVarOrdered
val spent = System.currentTimeMillis() - time
print(spent)
}
/// EXPERIMENTS:
else {
// println(" # THE PROBLEM:\\n"+problem.commands.mkString(" - ","\\n - ","\\n"))
var time: Long = 0
var res: OptionSol[Solution[_]] = null
var spent: Long = 0
// //// QUICK-SAT ////
// time = System.currentTimeMillis()
// res = problem.quickDataSolve
// spent = System.currentTimeMillis() - time
// if (res.isDefined) println("quick-sat - solved in "+spent+" ms:\\n"+res.get)
// else println("quick-sat - no solution (in "+spent+" ms)")
// println("quick-sat - "+spent)
//
// //// SAT-FULL ////
// time = System.currentTimeMillis()
// res = problem.solveIterative
// spent = System.currentTimeMillis() - time
// // if (res.isDefined) println("SAT-full - solved in "+spent+" ms:\\n"+res.get.pretty)
// // else println("SAT-full - no solution (in "+spent+" ms)")
// println("SAT-full - "+spent)
//
// //// SATC-FULL ////
// time = System.currentTimeMillis()
// res = problem.solveChocoSat
// spent = System.currentTimeMillis() - time
// // if (res.isDefined) println("SATC-full - solved in "+spent+" ms:\\n"+res.get.pretty)
// // else println("SATC-full - no solution (in "+spent+" ms)")
// println("SATC-full - "+spent)
//
// //// CHOCO ////
// time = System.currentTimeMillis()
// res = problem.solveChoco
// spent = System.currentTimeMillis() - time
// // if (res.isDefined) println("Choco - solved in "+spent+" ms:\\n"+res.get.pretty)
// // else println("Choco - no solution (in "+spent+" ms)")
// println("Choco - "+spent)
/// Z3 ////
val z3 = new Z3Context(new Z3Config("MODEL" -> true))
time = System.currentTimeMillis()
res = Z3.solvez3(Z3.gc2z3(problem,z3),z3)
spent = System.currentTimeMillis() - time
if (res.isDefined) println("Z3 - solved in "+spent+" ms:\\n"+res.get)
else println("Z3 - no solution (in "+spent+" ms)")
println("Z3 - "+spent)
//// QUICK-SAT-Z3 ////
val zz3 = new Z3Context(new Z3Config("MODEL" -> true))
time = System.currentTimeMillis()
res = problem.quickDataSolveZ3(zz3)
spent = System.currentTimeMillis() - time
if (res.isDefined) println("quick-z3 - solved in "+spent+" ms:\\n"+res.get)
else println("quick-z3 - no solution (in "+spent+" ms)")
println("quick-z3 - "+spent)
// //// LAZY-SAT ////
// time = System.currentTimeMillis()
// res = problem.lazyDataSolve
// spent = System.currentTimeMillis() - time
// // if (res.isDefined) println("lazy-sat - solved in "+spent+" ms:\\n"+res.get.pretty)
// // else println("lazy-sat - no solution (in "+spent+" ms)")
// println("lazy-sat - "+spent)
}
class IsFarhn extends IntPredicate {
// >= 500
val funPred = (x:Int) => (x >= 500)
val choPred = (x:IntegerExpressionVariable) => Choco.geq(x,500)
val z3Pred = (z:Z3Context,v:Z3AST) => z.mkGE(v,z.mkInt(500,z.mkIntSort()))
override def toString = "IsFarhn"
}
class IsCelc extends IntPredicate {
// < 500
val funPred = (x:Int) => (x < 500)
val choPred = (x:IntegerExpressionVariable) => Choco.lt(x,500)
val z3Pred = (z:Z3Context,v:Z3AST) => z.mkLT(v,z.mkInt(500,z.mkIntSort()))
override def toString = "IsCelc"
}
class IsNight extends IntPredicate {
// from 7pm (1140) to 7am (420) x>1140 or x<420
val choPred = (x:IntegerExpressionVariable) => Choco.or(Choco.geq(x,1140),Choco.lt(x,420))
val funPred = (x:Int) => (x >= 1140) || (x < 420)
val z3Pred = (z:Z3Context,v:Z3AST) => //z.mkGT(v,z.mkInt(i,z.mkIntSort()))
z.mkOr(
z.mkGE(v,z.mkInt(1140,z.mkIntSort())),
z.mkLT(v,z.mkInt(420,z.mkIntSort())))
override def toString = "isNight"
}
class Farhn2Celc extends IntFunction {
//ºC = (ºF - 32) x 5/9
val funFun = (x:Int) => ((x-1032) * 5 )/9
val choFun = null
val z3Fun = (z:Z3Context,v:List[Z3AST]) =>
z.mkDiv(
z.mkMul(
z.mkSub(
v.head,
z.mkInt(1032,z.mkIntSort())),
z.mkInt(5,z.mkIntSort())),
z.mkInt(9,z.mkIntSort()))
override def toString = "Farhn2Celc"
}
class GetTemp extends IntFunction {
//x = temp*1440 + time
// time = x % 1440
// temp = x - temp = (x - (x % 1440)) / 1440
val funFun = (x:Int) => (x - (x % 1440)) / 1440
val choFun = null
val z3Fun = (z:Z3Context,v:List[Z3AST]) =>
z.mkDiv(
z.mkSub(
v.head,
z.mkMod(
v.head,
z.mkInt(1440,z.mkIntSort())
)
),
z.mkInt(1440,z.mkIntSort())
)
override def toString = "GetTemp"
}
class GetTime extends IntFunction {
//x = temp*1440 + time
// time = x % 1440
// temp = x - temp = x - (x % 1440)
val funFun = (x:Int) => x % 1440
val choFun = null
val z3Fun = (z:Z3Context,v:List[Z3AST]) =>
z.mkMod(
v.head,
z.mkInt(1440,z.mkIntSort())
)
override def toString = "GetTime"
}
}
|
joseproenca/ip-constraints
|
code/src/main/scala/reopp/common/benchmarks/AllNewTemp.scala
|
Scala
|
mit
| 8,997
|
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.storage.cassandra
import com.twitter.app.App
import com.twitter.cassie.tests.util.FakeCassandra
import com.twitter.zipkin.storage.util.SpanStoreValidator
import com.twitter.zipkin.cassandra.CassieSpanStoreFactory
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class CassieSpanStoreTest extends FunSuite {
object FakeServer extends FakeCassandra
FakeServer.start()
object CassieStore extends App with CassieSpanStoreFactory
CassieStore.main(Array("-zipkin.store.cassie.dest", "127.0.0.1:%d".format(FakeServer.port.get)))
def newSpanStore = {
FakeServer.reset()
CassieStore.newCassandraStore()
}
test("validate") {
new SpanStoreValidator(newSpanStore).validate
}
}
|
willCode2Surf/zipkin
|
zipkin-cassandra/src/test/scala/com/twitter/zipkin/storage/cassandra/CassieSpanStoreTest.scala
|
Scala
|
apache-2.0
| 1,405
|
/* sbt -- Simple Build Tool
* Copyright 2011 Mark Harrah
*/
package sbt
import java.io.File
import KeyRanks.DSetting
import sbt.io.{ GlobFilter, Path }
import sbt.internal.util.AttributeKey
object BuildPaths {
val globalBaseDirectory = AttributeKey[File](
"global-base-directory",
"The base directory for global sbt configuration and staging.",
DSetting)
val globalPluginsDirectory = AttributeKey[File]("global-plugins-directory",
"The base directory for global sbt plugins.",
DSetting)
val globalSettingsDirectory = AttributeKey[File]("global-settings-directory",
"The base directory for global sbt settings.",
DSetting)
val stagingDirectory =
AttributeKey[File]("staging-directory", "The directory for staging remote projects.", DSetting)
val dependencyBaseDirectory = AttributeKey[File](
"dependency-base-directory",
"The base directory for caching dependency resolution.",
DSetting)
val globalZincDirectory =
AttributeKey[File]("global-zinc-directory", "The base directory for Zinc internals.", DSetting)
import sbt.io.syntax._
def getGlobalBase(state: State): File = {
val default = defaultVersionedGlobalBase(binarySbtVersion(state))
def getDefault = { checkTransition(state, default); default }
getFileSetting(globalBaseDirectory, GlobalBaseProperty, getDefault)(state)
}
private[this] def checkTransition(state: State, versioned: File): Unit = {
val unversioned = defaultGlobalBase
def globalDefined(base: File): Boolean =
getGlobalPluginsDirectory(state, base).exists ||
configurationSources(getGlobalSettingsDirectory(state, base)).exists(_.exists)
val warnTransition = !globalDefined(versioned) && globalDefined(unversioned)
if (warnTransition)
state.log.warn(globalDirTransitionWarning(unversioned, versioned))
}
def getStagingDirectory(state: State, globalBase: File): File =
fileSetting(stagingDirectory, StagingProperty, defaultStaging(globalBase))(state)
def getGlobalPluginsDirectory(state: State, globalBase: File): File =
fileSetting(globalPluginsDirectory, GlobalPluginsProperty, defaultGlobalPlugins(globalBase))(
state)
def getGlobalSettingsDirectory(state: State, globalBase: File): File =
fileSetting(globalSettingsDirectory, GlobalSettingsProperty, globalBase)(state)
def getDependencyDirectory(state: State, globalBase: File): File =
fileSetting(dependencyBaseDirectory,
DependencyBaseProperty,
defaultDependencyBase(globalBase))(state)
def getZincDirectory(state: State, globalBase: File): File =
fileSetting(globalZincDirectory, GlobalZincProperty, defaultGlobalZinc(globalBase))(state)
private[this] def fileSetting(stateKey: AttributeKey[File], property: String, default: File)(
state: State): File =
getFileSetting(stateKey, property, default)(state)
def getFileSetting(stateKey: AttributeKey[File], property: String, default: => File)(
state: State): File =
state get stateKey orElse getFileProperty(property) getOrElse default
def getFileProperty(name: String): Option[File] = Option(System.getProperty(name)) flatMap {
path =>
if (path.isEmpty) None else Some(new File(path))
}
def defaultVersionedGlobalBase(sbtVersion: String): File = defaultGlobalBase / sbtVersion
def defaultGlobalBase = Path.userHome / ConfigDirectoryName
private[this] def binarySbtVersion(state: State): String =
sbt.internal.librarymanagement.cross.CrossVersionUtil
.binarySbtVersion(state.configuration.provider.id.version)
private[this] def defaultStaging(globalBase: File) = globalBase / "staging"
private[this] def defaultGlobalPlugins(globalBase: File) = globalBase / PluginsDirectoryName
private[this] def defaultDependencyBase(globalBase: File) = globalBase / "dependency"
private[this] def defaultGlobalZinc(globalBase: File) = globalBase / "zinc"
def configurationSources(base: File): Seq[File] = (base * (GlobFilter("*.sbt") - ".sbt")).get
def pluginDirectory(definitionBase: File) = definitionBase / PluginsDirectoryName
def evalOutputDirectory(base: File) = outputDirectory(base) / "config-classes"
def outputDirectory(base: File) = base / DefaultTargetName
def projectStandard(base: File) = base / "project"
final val PluginsDirectoryName = "plugins"
final val DefaultTargetName = "target"
final val ConfigDirectoryName = ".sbt"
final val GlobalBaseProperty = "sbt.global.base"
final val StagingProperty = "sbt.global.staging"
final val GlobalPluginsProperty = "sbt.global.plugins"
final val GlobalSettingsProperty = "sbt.global.settings"
final val DependencyBaseProperty = "sbt.dependency.base"
final val GlobalZincProperty = "sbt.global.zinc"
def crossPath(base: File, instance: xsbti.compile.ScalaInstance): File =
base / ("scala_" + instance.version)
private[this] def globalDirTransitionWarning(unversioned: File, versioned: File): String =
s"""The global sbt directory is now versioned and is located at $versioned.
You are seeing this warning because there is global configuration in $unversioned but not in $versioned.
The global sbt directory may be changed via the $GlobalBaseProperty system property.
"""
}
|
Duhemm/sbt
|
main/src/main/scala/sbt/BuildPaths.scala
|
Scala
|
bsd-3-clause
| 5,442
|
package s {
sealed trait C[+A]
case class C00[+A]() extends C[A]
case class C10[+A](x: A) extends C[A]
case class C20[+A](x: A, y: A) extends C[A]
case class C01[+A](xs: A*) extends C[A]
case class C11[+A](x: A, ys: A*) extends C[A]
case class C21[+A](x: A, y: A, zs: A*) extends C[A]
object E00 { def unapply[A](x: Any): Boolean = ??? }
object E10 { def unapply[A](x: Any): Option[A] = ??? }
object E20 { def unapply[A](x: Any): Option[(A, A)] = ??? }
object E01 { def unapplySeq[A](x: Any): Option[Seq[A]] = ??? }
object E11 { def unapplySeq[A](x: Any): Option[(A, Seq[A])] = ??? }
object E21 { def unapplySeq[A](x: Any): Option[(A, A, Seq[A])] = ??? }
object F00 { def unapply[A](x: C[A]): Boolean = ??? }
object F10 { def unapply[A](x: C[A]): Option[A] = ??? }
object F20 { def unapply[A](x: C[A]): Option[(A, A)] = ??? }
object F01 { def unapplySeq[A](x: C[A]): Option[Seq[A]] = ??? }
object F11 { def unapplySeq[A](x: C[A]): Option[(A, Seq[A])] = ??? }
object F21 { def unapplySeq[A](x: C[A]): Option[(A, A, Seq[A])] = ??? }
object G00 { def unapply[A](x: C00[A]): Boolean = ??? }
object G10 { def unapply[A](x: C10[A]): Option[A] = ??? }
object G20 { def unapply[A](x: C20[A]): Option[(A, A)] = ??? }
object G01 { def unapplySeq[A](x: C01[A]): Option[Seq[A]] = ??? }
object G11 { def unapplySeq[A](x: C11[A]): Option[(A, Seq[A])] = ??? }
object G21 { def unapplySeq[A](x: C21[A]): Option[(A, A, Seq[A])] = ??? }
}
import s._
package pos {
object Test {
def ga1(x: Any) = x match { case C00() => 1 ; case C10(x) => 2 ; case C20(x, y) => 3 ; case C01(xs) => 4 ; case C11(x, ys) => 5 ; case C21(x, y, zs) => 6 }
def ga2(x: Any) = x match { case C00() => 1 ; case C10(x) => 2 ; case C20(x, y) => 3 ; case C01(xs) => 4 ; case C11(x, ys) => 5 ; case C21(x, y, zs) => 6 }
def ga3(x: Any) = x match { case C00() => 1 ; case C10(x) => 2 ; case C20(x, y) => 3 ; case C01(xs) => 4 ; case C11(x, ys) => 5 ; case C21(x, y, zs) => 6 }
def ga4(x: Any) = x match { case C00() => 1 ; case C10(x) => 2 ; case C20(x, y) => 3 ; case C01(xs) => 4 ; case C11(x, ys) => 5 ; case C21(x, y, zs) => 6 }
def ga5(x: Any) = x match { case C00() => 1 ; case C10(x) => 2 ; case C20(x, y) => 3 ; case C01(xs) => 4 ; case C11(x, ys) => 5 ; case C21(x, y, zs) => 6 }
def ga6(x: Any) = x match { case C00() => 1 ; case C10(x) => 2 ; case C20(x, y) => 3 ; case C01(xs) => 4 ; case C11(x, ys) => 5 ; case C21(x, y, zs) => 6 }
def gb1[A](x: C[A]) = x match { case E00() => ??? ; case E10(x) => x ; case E20(x, y) => x ; case E01(xs @ _*) => xs.head ; case E11(x, ys @ _*) => x ; case E21(x, y, zs @ _*) => x }
def gb2[A](x: C[A]) = x match { case E00() => ??? ; case E10(x) => x ; case E20(x, y) => x ; case E01(xs @ _*) => xs.head ; case E11(x, ys @ _*) => x ; case E21(x, y, zs @ _*) => x }
def gb3[A](x: C[A]) = x match { case E00() => ??? ; case E10(x) => x ; case E20(x, y) => x ; case E01(xs @ _*) => xs.head ; case E11(x, ys @ _*) => x ; case E21(x, y, zs @ _*) => x }
def gb4[A](x: C[A]) = x match { case E00() => ??? ; case E10(x) => x ; case E20(x, y) => x ; case E01(xs @ _*) => xs.head ; case E11(x, ys @ _*) => x ; case E21(x, y, zs @ _*) => x }
def gb5[A](x: C[A]) = x match { case E00() => ??? ; case E10(x) => x ; case E20(x, y) => x ; case E01(xs @ _*) => xs.head ; case E11(x, ys @ _*) => x ; case E21(x, y, zs @ _*) => x }
def gb6[A](x: C[A]) = x match { case E00() => ??? ; case E10(x) => x ; case E20(x, y) => x ; case E01(xs @ _*) => xs.head ; case E11(x, ys @ _*) => x ; case E21(x, y, zs @ _*) => x }
def gc1[A](x: C[A]) = x match { case F00() => ??? ; case F10(x) => x ; case F20(x, y) => x ; case F01(xs @ _*) => xs.head ; case F11(x, ys @ _*) => x ; case F21(x, y, zs @ _*) => x }
def gc2[A](x: C[A]) = x match { case F00() => ??? ; case F10(x) => x ; case F20(x, y) => x ; case F01(xs @ _*) => xs.head ; case F11(x, ys @ _*) => x ; case F21(x, y, zs @ _*) => x }
def gc3[A](x: C[A]) = x match { case F00() => ??? ; case F10(x) => x ; case F20(x, y) => x ; case F01(xs @ _*) => xs.head ; case F11(x, ys @ _*) => x ; case F21(x, y, zs @ _*) => x }
def gc4[A](x: C[A]) = x match { case F00() => ??? ; case F10(x) => x ; case F20(x, y) => x ; case F01(xs @ _*) => xs.head ; case F11(x, ys @ _*) => x ; case F21(x, y, zs @ _*) => x }
def gc5[A](x: C[A]) = x match { case F00() => ??? ; case F10(x) => x ; case F20(x, y) => x ; case F01(xs @ _*) => xs.head ; case F11(x, ys @ _*) => x ; case F21(x, y, zs @ _*) => x }
def gc6[A](x: C[A]) = x match { case F00() => ??? ; case F10(x) => x ; case F20(x, y) => x ; case F01(xs @ _*) => xs.head ; case F11(x, ys @ _*) => x ; case F21(x, y, zs @ _*) => x }
def gd1[A, B <: C[A]](x: B) = x match { case F00() => ??? ; case F10(x) => x ; case F20(x, y) => x ; case F01(xs @ _*) => xs.head ; case F11(x, ys @ _*) => x ; case F21(x, y, zs @ _*) => x }
def gd2[A, B <: C[A]](x: B) = x match { case F00() => ??? ; case F10(x) => x ; case F20(x, y) => x ; case F01(xs @ _*) => xs.head ; case F11(x, ys @ _*) => x ; case F21(x, y, zs @ _*) => x }
def gd3[A, B <: C[A]](x: B) = x match { case F00() => ??? ; case F10(x) => x ; case F20(x, y) => x ; case F01(xs @ _*) => xs.head ; case F11(x, ys @ _*) => x ; case F21(x, y, zs @ _*) => x }
def gd4[A, B <: C[A]](x: B) = x match { case F00() => ??? ; case F10(x) => x ; case F20(x, y) => x ; case F01(xs @ _*) => xs.head ; case F11(x, ys @ _*) => x ; case F21(x, y, zs @ _*) => x }
def gd5[A, B <: C[A]](x: B) = x match { case F00() => ??? ; case F10(x) => x ; case F20(x, y) => x ; case F01(xs @ _*) => xs.head ; case F11(x, ys @ _*) => x ; case F21(x, y, zs @ _*) => x }
def gd6[A, B <: C[A]](x: B) = x match { case F00() => ??? ; case F10(x) => x ; case F20(x, y) => x ; case F01(xs @ _*) => xs.head ; case F11(x, ys @ _*) => x ; case F21(x, y, zs @ _*) => x }
}
}
package neg {
object Fail {
def gb1[A](x: C00[A]) = x match { case E00() => ??? ; case E10(x) => x ; case E20(x, y) => x ; case E01(xs @ _*) => xs.head ; case E11(x, ys @ _*) => x ; case E21(x, y, zs @ _*) => x }
def gb2[A](x: C10[A]) = x match { case E00() => ??? ; case E10(x) => x ; case E20(x, y) => x ; case E01(xs @ _*) => xs.head ; case E11(x, ys @ _*) => x ; case E21(x, y, zs @ _*) => x }
def gb3[A](x: C20[A]) = x match { case E00() => ??? ; case E10(x) => x ; case E20(x, y) => x ; case E01(xs @ _*) => xs.head ; case E11(x, ys @ _*) => x ; case E21(x, y, zs @ _*) => x }
def gb4[A](x: C01[A]) = x match { case E00() => ??? ; case E10(x) => x ; case E20(x, y) => x ; case E01(xs @ _*) => xs.head ; case E11(x, ys @ _*) => x ; case E21(x, y, zs @ _*) => x }
def gb5[A](x: C11[A]) = x match { case E00() => ??? ; case E10(x) => x ; case E20(x, y) => x ; case E01(xs @ _*) => xs.head ; case E11(x, ys @ _*) => x ; case E21(x, y, zs @ _*) => x }
def gb6[A](x: C21[A]) = x match { case E00() => ??? ; case E10(x) => x ; case E20(x, y) => x ; case E01(xs @ _*) => xs.head ; case E11(x, ys @ _*) => x ; case E21(x, y, zs @ _*) => x }
def gc1[A](x: C00[A]) = x match { case F00() => ??? ; case F10(x) => x ; case F20(x, y) => x ; case F01(xs @ _*) => xs.head ; case F11(x, ys @ _*) => x ; case F21(x, y, zs @ _*) => x }
def gc2[A](x: C10[A]) = x match { case F00() => ??? ; case F10(x) => x ; case F20(x, y) => x ; case F01(xs @ _*) => xs.head ; case F11(x, ys @ _*) => x ; case F21(x, y, zs @ _*) => x }
def gc3[A](x: C20[A]) = x match { case F00() => ??? ; case F10(x) => x ; case F20(x, y) => x ; case F01(xs @ _*) => xs.head ; case F11(x, ys @ _*) => x ; case F21(x, y, zs @ _*) => x }
def gc4[A](x: C01[A]) = x match { case F00() => ??? ; case F10(x) => x ; case F20(x, y) => x ; case F01(xs @ _*) => xs.head ; case F11(x, ys @ _*) => x ; case F21(x, y, zs @ _*) => x }
def gc5[A](x: C11[A]) = x match { case F00() => ??? ; case F10(x) => x ; case F20(x, y) => x ; case F01(xs @ _*) => xs.head ; case F11(x, ys @ _*) => x ; case F21(x, y, zs @ _*) => x }
def gc6[A](x: C21[A]) = x match { case F00() => ??? ; case F10(x) => x ; case F20(x, y) => x ; case F01(xs @ _*) => xs.head ; case F11(x, ys @ _*) => x ; case F21(x, y, zs @ _*) => x }
def gd1[A](x: C00[A]) = x match { case G00() => ??? ; case G10(x) => x ; case G20(x, y) => x ; case G01(xs @ _*) => xs.head ; case G11(x, ys @ _*) => x ; case G21(x, y, zs @ _*) => x }
def gd2[A](x: C10[A]) = x match { case G00() => ??? ; case G10(x) => x ; case G20(x, y) => x ; case G01(xs @ _*) => xs.head ; case G11(x, ys @ _*) => x ; case G21(x, y, zs @ _*) => x }
def gd3[A](x: C20[A]) = x match { case G00() => ??? ; case G10(x) => x ; case G20(x, y) => x ; case G01(xs @ _*) => xs.head ; case G11(x, ys @ _*) => x ; case G21(x, y, zs @ _*) => x }
def gd4[A](x: C01[A]) = x match { case G00() => ??? ; case G10(x) => x ; case G20(x, y) => x ; case G01(xs @ _*) => xs.head ; case G11(x, ys @ _*) => x ; case G21(x, y, zs @ _*) => x }
def gd5[A](x: C11[A]) = x match { case G00() => ??? ; case G10(x) => x ; case G20(x, y) => x ; case G01(xs @ _*) => xs.head ; case G11(x, ys @ _*) => x ; case G21(x, y, zs @ _*) => x }
def gd6[A](x: C21[A]) = x match { case G00() => ??? ; case G10(x) => x ; case G20(x, y) => x ; case G01(xs @ _*) => xs.head ; case G11(x, ys @ _*) => x ; case G21(x, y, zs @ _*) => x }
}
}
object Test {
def main(args: Array[String]): Unit = {
}
}
|
felixmulder/scala
|
test/files/run/patmat-behavior.scala
|
Scala
|
bsd-3-clause
| 9,573
|
package mesosphere.mesos
import java.time.Clock
import mesosphere.marathon.RichClock
import mesosphere.marathon.state.Timestamp
import org.apache.mesos.Protos.{DurationInfo, Offer}
import scala.concurrent.duration._
object Availability {
def offerAvailable(offer: Offer, drainingTime: FiniteDuration)(implicit clock: Clock): Boolean = {
val now = clock.now()
if (offerHasUnavailability(offer)) {
val start: Timestamp = offer.getUnavailability.getStart
if (currentlyInDrainingState(now, start, drainingTime)) {
isAgentOutsideUnavailabilityWindow(offer, start, now)
} else true
} else true
}
private def currentlyInDrainingState(now: Timestamp, start: Timestamp, drainingTime: FiniteDuration) = {
now.after(start - drainingTime)
}
private def offerHasUnavailability(offer: Offer) = {
offer.hasUnavailability && offer.getUnavailability.hasStart
}
private def isAgentOutsideUnavailabilityWindow(offer: Offer, start: Timestamp, now: Timestamp) = {
offer.getUnavailability.hasDuration && now.after(start + offer.getUnavailability.getDuration.toDuration)
}
/**
* Convert Mesos DurationInfo to FiniteDuration.
*
* @return FiniteDuration for DurationInfo
*/
implicit class DurationInfoHelper(val di: DurationInfo) extends AnyVal {
def toDuration: FiniteDuration = FiniteDuration(di.getNanoseconds, NANOSECONDS)
}
}
|
gsantovena/marathon
|
src/main/scala/mesosphere/mesos/Availability.scala
|
Scala
|
apache-2.0
| 1,409
|
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.feats
import enumeratum.Enum
import io.truthencode.ddo.model.effect.features.{Features, FeaturesImpl}
import io.truthencode.ddo.model.schools.School
import io.truthencode.ddo.support.EpicLevels
import io.truthencode.ddo.support.StringUtils.Extensions
import io.truthencode.ddo.support.naming.{FriendlyDisplay, Prefix}
import io.truthencode.ddo.support.requisite._
import scala.collection.immutable
/**
* Created by adarr on 2/14/2017.
*/
sealed trait EpicFeat
extends Feat with FriendlyDisplay with SubFeatInformation with LevelRequisiteImpl
with RequiresCharacterLevel with FeaturesImpl {
self: FeatType with Requisite with Inclusion with EpicFeatCategory with Features =>
/**
* Default Minimum Level for all Epic Feats. Override this with a higher level as needed.
*/
override val requireCharacterLevel: Int = EpicLevels.min
}
// scalastyle:off number.of.methods
object EpicFeat extends Enum[EpicFeat] with FeatSearchPrefix with FeatMatcher {
val matchFeat: PartialFunction[Feat, EpicFeat] = { case x: EpicFeat =>
x
}
val matchFeatById: PartialFunction[String, EpicFeat] = {
case x: String if EpicFeat.namesToValuesMap.contains(x) =>
EpicFeat.withNameOption(x) match {
case Some(y) => y
}
}
def epicSpellFocusAny: immutable.IndexedSeq[EpicSpellFocus] =
for { x <- School.values } yield EpicSpellFocus(x)
override def values: immutable.IndexedSeq[EpicFeat] = findValues
case class EpicSpellFocus(school: School)
extends EpicSpellFocusBase with EpicFeat with SubFeat with Prefix {
/**
* Delimits the prefix and text.
*/
override protected val prefixSeparator: String = ": "
override def prefix: Option[String] = Some("EpicSpellFocus".splitByCase)
override def allOfFeats: Seq[GeneralFeat] = lesser
private def lesser = GeneralFeat.spellFocusAny.filter { x =>
x.school.eq(school)
}
override protected def nameSource: String = school.displayText
}
// General Passive Feats
case object BlindingSpeed
extends FeatRequisiteImpl with EpicFeat with GeneralPassive with FreeFeat with BlindingSpeed
case object BulwarkOfDefense
extends FeatRequisiteImpl with EpicFeat with GeneralPassive with FreeFeat with BulwarkOfDefense
case object EpicDamageReduction
extends FeatRequisiteImpl with EpicFeat with GeneralPassive with FreeFeat
with EpicDamageReduction
case object EpicFortitude
extends FeatRequisiteImpl with EpicFeat with GeneralPassive with FreeFeat with EpicFortitude
case object EpicReflexes
extends FeatRequisiteImpl with EpicFeat with GeneralPassive with FreeFeat with EpicReflexes
case object EpicReputation
extends FeatRequisiteImpl with EpicFeat with GeneralPassive with FreeFeat with EpicReputation
case object EpicSkills
extends FeatRequisiteImpl with EpicFeat with GeneralPassive with FreeFeat with EpicSkills
case object EpicWill
extends FeatRequisiteImpl with EpicFeat with GeneralPassive with FreeFeat with EpicWill
case object GreatAbility
extends FeatRequisiteImpl with EpicFeat with GeneralPassive with FreeFeat with GreatAbility
case object OverwhelmingCritical
extends FeatRequisiteImpl with EpicFeat with GeneralPassive with FreeFeat
with OverwhelmingCritical
case object EpicToughness
extends FeatRequisiteImpl with EpicFeat with GeneralPassive with FreeFeat with EpicToughness
case object WatchfulEye
extends FeatRequisiteImpl with EpicFeat with GeneralPassive with FreeFeat with WatchfulEye
// Ranged Combat Passive
case object CombatArchery
extends FeatRequisiteImpl with EpicFeat with RangedCombatPassive with CombatArchery
case object BurstOfGlacialWrath extends FeatRequisiteImpl with EpicFeat with BurstOfGlacialWrath
case object Ruin extends FeatRequisiteImpl with EpicFeat with Ruin
case object GreaterRuin extends FeatRequisiteImpl with EpicFeat with GreaterRuin
case object EpicMentalToughness extends FeatRequisiteImpl with EpicFeat with EpicMentalToughness
case object EpicSpellFocus extends EpicSpellFocusBase with EpicFeat with ParentFeat {
override val subFeats: Seq[EpicFeat with SubFeat] =
epicSpellFocusAny
}
case object EpicSpellPenetration extends FeatRequisiteImpl with EpicFeat with EpicSpellPenetration
case object ImprovedAugmentSummoning
extends FeatRequisiteImpl with EpicFeat with ImprovedAugmentSummoning
case object MasterOfAir
extends FeatRequisiteImpl with EpicFeat with SpellCastingPassive with FreeFeat with MasterOfAir
case object MasterOfAlignment
extends FeatRequisiteImpl with EpicFeat with SpellCastingPassive with FreeFeat
with MasterOfAlignment
case object MasterOfArtifice
extends FeatRequisiteImpl with EpicFeat with SpellCastingPassive with FreeFeat
with MasterOfArtifice
case object MasterOfEarth
extends FeatRequisiteImpl with EpicFeat with SpellCastingPassive with FreeFeat
with MasterOfEarth
case object MasterOfFire
extends FeatRequisiteImpl with EpicFeat with SpellCastingPassive with FreeFeat with MasterOfFire
case object MasterOfKnowledge
extends FeatRequisiteImpl with EpicFeat with SpellCastingPassive with FreeFeat
with MasterOfKnowledge
case object MasterOfLight
extends FeatRequisiteImpl with EpicFeat with SpellCastingPassive with FreeFeat
with MasterOfLight
case object MasterOfMusic
extends FeatRequisiteImpl with EpicFeat with SpellCastingPassive with FreeFeat
with MasterOfMusic
case object MasterOfWater
extends FeatRequisiteImpl with EpicFeat with SpellCastingPassive with FreeFeat
with MasterOfWater
case object MasterOfTheDead
extends FeatRequisiteImpl with EpicFeat with SpellCastingPassive with FreeFeat
with MasterOfTheDead
case object MasterOfTheWilds
extends FeatRequisiteImpl with EpicFeat with SpellCastingPassive with FreeFeat
with MasterOfTheWilds
case object EmboldenSpell extends FeatRequisiteImpl with EpicFeat with FreeFeat with EmboldenSpell
case object IntensifySpell
extends FeatRequisiteImpl with EpicFeat with FreeFeat with IntensifySpell
case object ConstructExemplar
extends FeatRequisiteImpl with RaceRequisiteImpl with ClassRequisiteImpl with EpicFeat
with FreeFeat with ConstructExemplar
case object InspireExcellence
extends FeatRequisiteImpl with SkillRequisiteImpl with ClassRequisiteImpl with EpicFeat
with InspireExcellence
case object ImprovedMartialArts extends FeatRequisiteImpl with EpicFeat with ImprovedMartialArts
case object VorpalStrikes extends VorpalStrikes with EpicFeat
case object ImprovedSneakAttack extends ImprovedSneakAttack with EpicFeat
case object EpicEldritchBlast extends EpicEldritchBlast with EpicFeat
}
|
adarro/ddo-calc
|
subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/model/feats/EpicFeat.scala
|
Scala
|
apache-2.0
| 7,645
|
package net.fwbrasil.smirror
trait Dummy
class SFieldSpecTestClass[X <: Dummy](val m1: String) {
val m2 = "b"
var m3 = 1
var x: Option[X] = None
}
class SFieldSpec extends SMirrorSpec {
"SClass" should "return its fields" in
test[SFieldSpecTestClass[_]] { (sClass, jClass) =>
sClass.fields.map(_.name).toSet should
equal(Set("x", "m1", "m2", "m3"))
}
"Vals" should "reflected" in
test[SFieldSpecTestClass[_]] { (sClass, jClass) =>
val instance = new SFieldSpecTestClass("a")
val vals = sClass.vals.map {
field => (field.name, field.sClass, field.get(instance))
}
vals.toSet should equal(Set(
("m1", sClassOf[String], "a"),
("m2", sClassOf[String], "b")))
}
"Vars" should "reflected" in
test[SFieldSpecTestClass[_]] { (sClass, jClass) =>
val instance = new SFieldSpecTestClass("a")
val vars = sClass.vars.map {
field => (field.name, field.sClass, field.get(instance))
}
vars.toSet should equal(Set(
("m3", sClassOf[Int], 1),
("x", sClassOf[Option[_]], None)))
}
"Vars" should "be modified" in
test[SFieldSpecTestClass[_]] { (sClass, jClass) =>
val instance = new SFieldSpecTestClass("a")
val field = sClass.vars.find(_.name == "m3").get
field.set(instance, 2)
field.get(instance) should equal(2)
field.setter.invoke(instance, 3)
field.getter.invoke(instance) should equal(3)
}
"Vars" should "reflect option type parameter to the upper bound class" in
test[SFieldSpecTestClass[_]] { (sClass, jClass) =>
val field = sClass.vars.find(_.name == "x").get
field.typeArguments.head === sClassOf[Dummy]
}
}
|
fwbrasil/smirror
|
src/test/scala/net/fwbrasil/smirror/SFieldSpec.scala
|
Scala
|
lgpl-2.1
| 1,933
|
package roller_coaster
// Read inputs from System.in, Write outputs to use print.
// Your class name has to be Solution
object Solution {
def main(args: Array[String]) {
// Lecture des données
val t = readLine.split(" ").map((e) => e.toInt)
val (l, c, n) = (t(0), t(1), t(2))
val file = new Array[Int](n)
for (i <- 0 until n) file(i) = readInt
// Calcul recette
var r: Long = 0
var idx = 0
for (i <- 1 to c) {
var (a, j) = (0, 0)
var b = true
while (b && j < n) {
val v = file(idx)
b = v <= (l - a)
if (b) {
a += v
idx += 1
if (idx == n) idx = 0
j += 1
}
}
r += a
}
println(r)
}
}
|
bvaudour/codingame
|
level3/Roller_Coaster.scala
|
Scala
|
gpl-2.0
| 734
|
package org.scalatra
trait MacrosCompat extends Internal210 {
type Context = scala.reflect.macros.blackbox.Context
def freshName(name: String): String = c.freshName(name)
def typeName(name: String): c.universe.TypeName = c.universe.TypeName(name)
def termName(name: String): c.universe.TermName = c.universe.TermName(name)
def typecheck(tree: c.universe.Tree): c.universe.Tree = c.typecheck(tree)
def untypecheck(tree: c.universe.Tree): c.universe.Tree = c.untypecheck(tree)
}
|
dozed/scalatra
|
core/src/main/scala-2.11/org/scalatra/MacrosCompat.scala
|
Scala
|
bsd-2-clause
| 498
|
package sbt
import java.io.{ File, FileNotFoundException, IOException }
object exit {
def main(args: Array[String]) {
System.exit(java.lang.Integer.parseInt(args(0)))
}
}
object cat {
def main(args: Array[String]) {
try {
if (args.length == 0)
IO.transfer(System.in, System.out)
else
catFiles(args.toList)
System.exit(0)
} catch {
case e =>
e.printStackTrace()
System.err.println("Error: " + e.toString)
System.exit(1)
}
}
private def catFiles(filenames: List[String]): Option[String] =
{
filenames match {
case head :: tail =>
val file = new File(head)
if (file.isDirectory)
throw new IOException("Is directory: " + file)
else if (file.exists) {
Using.fileInputStream(file) { stream =>
IO.transfer(stream, System.out)
}
catFiles(tail)
} else
throw new FileNotFoundException("No such file or directory: " + file)
case Nil => None
}
}
}
object echo {
def main(args: Array[String]) {
System.out.println(args.mkString(" "))
}
}
|
niktrop/sbt
|
util/process/src/test/scala/TestedProcess.scala
|
Scala
|
bsd-3-clause
| 1,174
|
package com.typesafe.slick.testkit.tests
import org.junit.Assert
import org.junit.Assert._
import slick.jdbc.GetResult
import com.typesafe.slick.testkit.util.{JdbcTestDB, AsyncTest}
class PlainSQLTest extends AsyncTest[JdbcTestDB] {
import tdb.profile.api._
implicit val getUserResult = GetResult(r => new User(r.<<, r.<<))
case class User(id:Int, name:String)
//TODO convert to new API:
/*
def testSimple = ifCap(tcap.plainSql) {
def getUsers(id: Option[Int]) = {
val q = Q[User] + "select id, name from USERS "
id map { q + "where id =" +? _ } getOrElse q
}
def InsertUser(id: Int, name: String) = Q.u + "insert into USERS values (" +? id + "," +? name + ")"
val createTable = Q[Int] + "create table USERS(ID int not null primary key, NAME varchar(255))"
val populateUsers = List(InsertUser(1, "szeiger"), InsertUser(0, "admin"), InsertUser(2, "guest"), InsertUser(3, "foo"))
val allIDs = Q[Int] + "select id from USERS"
val userForID = Q[Int, User] + "select id, name from USERS where id = ?"
val userForIdAndName = Q[(Int, String), User] + "select id, name from USERS where id = ? and name = ?"
implicitSession.withTransaction {
println("Creating user table: "+createTable.first)
println("Inserting users:")
for(i <- populateUsers) println(" "+i.first)
}
println("All IDs:")
for(s <- allIDs.list) println(" "+s)
assertEquals(Set(1,0,2,3), allIDs.list.toSet)
println("All IDs with foreach:")
var s1 = Set[Int]()
allIDs foreach { s =>
println(" "+s)
s1 += s
}
assertEquals(Set(1,0,2,3), s1)
val res = userForID(2).first
println("User for ID 2: "+res)
assertEquals(User(2,"guest"), res)
assertEquals(User(2,"guest"), userForIdAndName(2, "guest").first)
assertEquals(None, userForIdAndName(2, "foo").firstOption)
println("User 2 with foreach:")
var s2 = Set[User]()
userForID(2) foreach { s =>
println(" "+s)
s2 += s
}
assertEquals(Set(User(2,"guest")), s2)
println("User 2 with foreach:")
var s3 = Set[User]()
getUsers(Some(2)) foreach { s =>
println(" "+s)
s3 += s
}
assertEquals(Set(User(2,"guest")), s3)
println("All users with foreach:")
var s4 = Set[User]()
getUsers(None) foreach { s =>
println(" "+s)
s4 += s
}
assertEquals(Set(User(1,"szeiger"), User(2,"guest"), User(0,"admin"), User(3,"foo")), s4)
println("All users with iterator.foreach:")
var s5 = Set[User]()
for(s <- getUsers(None).iterator) {
println(" "+s)
s5 += s
}
assertEquals(Set(User(1,"szeiger"), User(2,"guest"), User(0,"admin"), User(3,"foo")), s5)
if(tdb.canGetLocalTables) {
println("All tables:")
for(t <- tdb.getLocalTables) println(" "+t)
assertEquals(List("users"), tdb.getLocalTables.map(_.toLowerCase))
}
assertUnquotedTablesExist("USERS")
}
*/
def testInterpolation = ifCap(tcap.plainSql) {
def userForID(id: Int) = sql"select id, name from USERS where id = $id".as[User]
def userForIdAndName(id: Int, name: String) = sql"select id, name from USERS where id = $id and name = $name".as[User]
val foo = "foo"
val s1 = sql"select id from USERS where name = ${"szeiger"}".as[Int]
val s2 = sql"select id from USERS where name = '#${"guest"}'".as[Int]
val s3 = sql"select id from USERS where name = $foo".as[Int]
val s4 = sql"select id from USERS where name = '#$foo'".as[Int]
s1.statements.head shouldBe "select id from USERS where name = ?"
s2.statements.head shouldBe "select id from USERS where name = 'guest'"
s3.statements.head shouldBe "select id from USERS where name = ?"
s4.statements.head shouldBe "select id from USERS where name = 'foo'"
val create: DBIO[Int] = sqlu"create table USERS(ID int not null primary key, NAME varchar(255))"
seq(
create.map(_ shouldBe 0),
DBIO.fold((for {
(id, name) <- List((1, "szeiger"), (0, "admin"), (2, "guest"), (3, "foo"))
} yield sqlu"insert into USERS values ($id, $name)"), 0)(_ + _).map(_ shouldBe 4),
sql"select id from USERS".as[Int].map(_.toSet shouldBe Set(0,1,2,3)), //TODO Support `to` in Plain SQL Actions
userForID(2).map(_.head shouldBe User(2,"guest")), //TODO Support `head` and `headOption` in Plain SQL Actions
s1.map(_ shouldBe List(1)),
s2.map(_ shouldBe List(2)),
userForIdAndName(2, "guest").map(_.head shouldBe User(2,"guest")), //TODO Support `head` and `headOption` in Plain SQL Actions
userForIdAndName(2, "foo").map(_.headOption shouldBe None) //TODO Support `head` and `headOption` in Plain SQL Actions
)
}
}
|
nmartynenko/slick
|
slick-testkit/src/main/scala/com/typesafe/slick/testkit/tests/PlainSQLTest.scala
|
Scala
|
bsd-2-clause
| 4,723
|
package nsmc.conversion.types
import org.apache.spark.sql.types._
import scala.collection.immutable.HashMap
class InternalAndSchema {
}
object InternalAndSchema {
def toSchema(it: ConversionType) : DataType = {
it match {
case AtomicType(dt: DataType) => dt
case SequenceType(et) => ArrayType(toSchema(et))
case StructureType(fields) => {
val converted = fields.map(kv => makeField(kv._1, toSchema(kv._2)))
val sorted = converted.toSeq.sortBy(sf => sf.name)
StructType(sorted)
}
}
}
private def makeField(k:String, t: DataType) : StructField = {
StructField(k, t, nullable = true)
}
def toInternal(schema: Seq[StructField]) : ConversionType = {
val convertedPairs = schema.toSeq.map(toInternal)
val hm = HashMap[String, ConversionType](convertedPairs:_*)
new StructureType(hm)
}
private def toInternal(sf: StructField) : (String, ConversionType) = {
sf.dataType match {
// TODO: leaving out some of the atomic types
case StringType => (sf.name, AtomicType(StringType))
case IntegerType => (sf.name, AtomicType(IntegerType))
case StructType(s) => (sf.name, toInternal(s))
}
}
}
|
shotishu/spark-mongodb-connector
|
src/main/scala/nsmc/conversion/types/InternalAndSchema.scala
|
Scala
|
apache-2.0
| 1,208
|
package hu.frankdavid.ranking.gui
import javafx.event.EventHandler
import javafx.scene.control.ButtonBar.ButtonData
import javafx.scene.control.{ButtonType, Dialog, DialogEvent}
import hu.frankdavid.ranking.TournamentStrategy
import hu.frankdavid.ranking.gui.configs._
import scalafx.Includes._
import scalafx.application.Platform
import scalafx.geometry.Insets
import scalafx.scene.control.ComboBox
import scalafx.scene.layout.{GridPane, Pane, Priority}
import scalafx.util.StringConverter
class StrategyDialog extends Dialog[TournamentStrategy] {
def this(strategy: TournamentStrategy) = {
this()
load(strategy)
}
val Width = 400
val Height = 300
val ConfigurationEditors = Seq(
new RoundRobinConfigurationEditor, new OneByOneConfigurationEditor, new TimSortConfigurationEditor,
new HeapSortConfigurationEditor, new SwissConfigurationEditor, new EmptyConfigurationEditor
)
private val content = new GridPane {
padding = Insets(10)
vgap = 10
hgap = 10
hgrow = Priority.Always
maxWidth = Width
prefWidth = Width
}
getDialogPane.setContent(content)
onShownProperty()() = new EventHandler[DialogEvent] {
def handle(event: DialogEvent) = Platform.runLater {
getDialogPane.getScene.getWindow.sizeToScene()
}
}
private val configPane = new Pane() {hgrow = Priority.Always}
private var configEditor: StrategyConfigurationEditor = new EmptyConfigurationEditor
def strategyConfigPanel(strategy: TournamentStrategy): StrategyConfigurationEditor = {
ConfigurationEditors.find(_.load(strategy)).getOrElse(new EmptyConfigurationEditor)
}
def load(strategy: TournamentStrategy) = {
if (strategy != null) {
strategySelectorBox.value() = strategy
}
}
content.add(configPane, 0, 1)
val distinctTypeOfStrategies = DefaultStrategies.groupBy(_.getClass).values.map(_.head).toSeq.sortBy(_.name)
private val strategySelectorBox = new ComboBox[TournamentStrategy](distinctTypeOfStrategies) {
prefWidth = Width
converter = new StringConverter[TournamentStrategy] {
def fromString(string: String) = ???
def toString(t: TournamentStrategy) = t.typeName
}
value.onChange { (_, _, value) =>
configEditor = strategyConfigPanel(value)
configPane.content = configEditor
Platform.runLater {
getDialogPane.getScene.getWindow.sizeToScene()
}
}
selectionModel().selectFirst()
hgrow = Priority.Always
}
content.add(strategySelectorBox, 0, 0)
private val okButton = new ButtonType("OK", ButtonData.OK_DONE)
private val cancelButton = new ButtonType("Cancel", ButtonData.CANCEL_CLOSE)
getDialogPane.getButtonTypes += okButton
getDialogPane.getButtonTypes += cancelButton
setResultConverter { x: ButtonType =>
x match {
case `okButton` => configEditor.strategy
case _ => null
}
}
}
|
frankdavid/ranking
|
src/main/scala/hu/frankdavid/ranking/gui/StrategyDialog.scala
|
Scala
|
apache-2.0
| 2,886
|
package com.twitter.finagle
import com.twitter.logging.{HasLogLevel, Level}
import com.twitter.util.Duration
import java.net.SocketAddress
/**
* A trait for exceptions that have a source. The name of the source is
* specified as a `serviceName`. The "unspecified" value is used if no
* `serviceName` is provided by the implementation.
*/
trait SourcedException extends Exception {
var serviceName: String = SourcedException.UnspecifiedServiceName
}
object SourcedException {
val UnspecifiedServiceName = "unspecified"
def unapply(t: Throwable): Option[String] = t match {
case sourced: SourcedException
if sourced.serviceName != SourcedException.UnspecifiedServiceName =>
Some(sourced.serviceName)
case sourced: Failure =>
sourced.getSource(Failure.Source.Service).map(_.toString)
case _ =>
None
}
}
/**
* A trait for common exceptions that either
*
* a) don't benefit from full stacktrace information (e.g. stacktraces don't add
* useful information, as in the case of connection closure), or
* b) are thrown frequently enough that stacktrace-creation becomes unacceptably
* expensive.
*
* This trait represents a tradeoff between debugging ease and efficiency.
* Implementers beware.
*/
trait NoStacktrace extends Exception {
override def fillInStackTrace = this
// specs expects non-empty stacktrace array
this.setStackTrace(NoStacktrace.NoStacktraceArray)
}
object NoStacktrace {
val NoStacktraceArray = Array(new StackTraceElement("com.twitter.finagle", "NoStacktrace", null, -1))
}
/**
* A base class for request failures. Indicates that some failure occurred
* before a request could be successfully serviced.
*/
class RequestException(message: String, cause: Throwable)
extends Exception(message, cause)
with NoStacktrace
with SourcedException
{
def this() = this(null, null)
def this(cause: Throwable) = this(null, cause)
override def getStackTrace = if (cause != null) cause.getStackTrace else super.getStackTrace
}
/**
* Indicates that an operation exceeded some timeout duration before completing.
* Differs from [[com.twitter.util.TimeoutException]] in that this trait doesn't
* extend [[java.util.concurrent.TimeoutException]], provides more context in
* its error message (e.g. the source and timeout value), and is only used
* within the confines of Finagle.
*/
trait TimeoutException extends SourcedException { self: Exception =>
protected val timeout: Duration
protected def explanation: String
override def getMessage = s"exceeded $timeout to $serviceName while $explanation"
}
/**
* Indicates that a request timed out. See
* [[com.twitter.finagle.IndividualRequestTimeoutException]] and
* [[com.twitter.finagle.GlobalRequestTimeoutException]] for details on the
* different request granularities that this exception class can pertain to.
*/
class RequestTimeoutException(
protected val timeout: Duration,
protected val explanation: String
) extends RequestException with TimeoutException
/**
* Indicates that a single Finagle-level request timed out. In contrast to
* [[com.twitter.finagle.RequestTimeoutException]], an "individual request"
* could be a single request-retry performed as a constituent of an
* application-level RPC.
*/
class IndividualRequestTimeoutException(timeout: Duration)
extends RequestTimeoutException(
timeout,
"waiting for a response for an individual request, excluding retries")
/**
* Indicates that a request timed out, where "request" comprises a full RPC
* from the perspective of the application. For instance, multiple retried
* Finagle-level requests could constitute the single request that this
* exception pertains to.
*/
class GlobalRequestTimeoutException(timeout: Duration)
extends RequestTimeoutException(
timeout,
"waiting for a response for the request, including retries (if applicable)")
/**
* Indicates that a request failed because no servers were available. The
* Finagle client's internal load balancer was empty. This typically occurs
* under one of the following conditions:
*
* - The cluster is actually down. No servers are available.
* - A service discovery failure. This can be due to a number of causes, such as
* the client being constructed with an invalid cluster destination name [1]
* or a failure in the service discovery system (e.g. DNS, ZooKeeper).
*
* A good way to diagnose NoBrokersAvailableExceptions is to reach out to the
* owners of the service to which the client is attempting to connect and verify
* that the service is operational. If so, then investigate the service
* discovery mechanism that the client is using (e.g. the
* [[com.twitter.finagle.Resolver]] that is it configured to use and the system
* backing it).
*
* [1] http://twitter.github.io/finagle/guide/Names.html
*/
class NoBrokersAvailableException(
val name: String,
val baseDtab: Dtab,
val localDtab: Dtab
) extends RequestException {
def this(name: String = "unknown") = this(name, Dtab.empty, Dtab.empty)
override def getMessage =
s"No hosts are available for $name, Dtab.base=[${baseDtab.show}], Dtab.local=[${localDtab.show}]"
}
/**
* Indicates that a request was cancelled. Cancellation is propagated between a
* Finagle server and a client intra-process when the server is interrupted by
* an upstream service. In such cases, the pending Future is interrupted with
* this exception. The client will cancel its pending request which will by
* default propagate an interrupt to its downstream, and so on. This is done to
* conserve resources.
*/
class CancelledRequestException(cause: Throwable) extends RequestException(cause) {
def this() = this(null)
override def getMessage = {
if (cause == null)
"request cancelled"
else
"request cancelled due to " + cause
}
}
/**
* Used by [[com.twitter.finagle.pool.WatermarkPool]] to indicate that a request
* failed because too many requests are already waiting for a connection to
* become available from a client's connection pool.
*/
class TooManyWaitersException extends RequestException
/**
* A Future is satisfied with this exception when the process of establishing
* a session is interrupted. Sessions are not preemptively established in Finagle,
* rather requests are taxed with session establishment when necessary.
* For example, this exception can occur if a request is interrupted while waiting for
* an available session or if an interrupt is propagated from a Finagle server
* during session establishment.
*
* @see com.twitter.finagle.CancelledRequestException
*/
class CancelledConnectionException(cause: Throwable) extends RequestException(cause) {
def this() = this(null)
}
/**
* Used by [[com.twitter.finagle.service.FailFastFactory]] to indicate that a
* request failed because all hosts in the cluster to which the client is
* connected have been marked as failed. See FailFastFactory for details on
* this behavior.
*/
class FailedFastException(message: String)
extends RequestException(message, cause = null)
with WriteException
{
def this() = this(null)
}
/**
* Indicates that the request was not servable, according to some policy. See
* [[com.twitter.finagle.service.OptionallyServableFilter]] as an example.
*/
class NotServableException extends RequestException
/**
* Indicates that the client failed to distribute a given request according to
* some sharding strategy. See [[com.twitter.finagle.service.ShardingService]]
* for details on this behavior.
*/
class NotShardableException extends NotServableException
/**
* Indicates that the shard to which a request was assigned was not available.
* See [[com.twitter.finagle.service.ShardingService]] for details on this
* behavior.
*/
class ShardNotAvailableException extends NotServableException
object ChannelException {
def apply(cause: Throwable, remoteAddress: SocketAddress) = {
cause match {
case exc: ChannelException => exc
case _: java.net.ConnectException => new ConnectionFailedException(cause, remoteAddress)
case _: java.nio.channels.UnresolvedAddressException => new ConnectionFailedException(cause, remoteAddress)
case _: java.nio.channels.ClosedChannelException => new ChannelClosedException(cause, remoteAddress)
case e: java.io.IOException
if "Connection reset by peer" == e.getMessage => new ChannelClosedException(cause, remoteAddress)
case e: java.io.IOException
if "Broken pipe" == e.getMessage => new ChannelClosedException(cause, remoteAddress)
case e: java.io.IOException
if "Connection timed out" == e.getMessage => new ConnectionFailedException(cause, remoteAddress)
case e => new UnknownChannelException(cause, remoteAddress)
}
}
}
/**
* An exception encountered within the context of a given socket channel.
*/
class ChannelException(underlying: Throwable, val remoteAddress: SocketAddress)
extends Exception(underlying)
with SourcedException
with HasLogLevel
{
def this(underlying: Throwable) = this(underlying, null)
def this() = this(null, null)
override def getMessage = {
val message = (underlying, remoteAddress) match {
case (_, null) => super.getMessage
case (null, _) => s"ChannelException at remote address: ${remoteAddress.toString}"
case (_, _) => s"${underlying.getMessage} at remote address: ${remoteAddress.toString}"
}
if (serviceName == SourcedException.UnspecifiedServiceName) message
else s"$message from service: $serviceName"
}
def logLevel: Level = Level.DEBUG
}
/**
* Indicates that the client failed to establish a connection. Typically this
* class will be extended to provide additional information relevant to a
* particular category of connection failure.
*/
class ConnectionFailedException(underlying: Throwable, remoteAddress: SocketAddress)
extends ChannelException(underlying, remoteAddress) with NoStacktrace {
def this() = this(null, null)
}
/**
* Indicates that a given channel was closed, for instance if the connection
* was reset by a peer or a proxy.
*/
class ChannelClosedException(underlying: Throwable, remoteAddress: SocketAddress)
extends ChannelException(underlying, remoteAddress) with NoStacktrace {
def this(remoteAddress: SocketAddress) = this(null, remoteAddress)
def this() = this(null, null)
}
/**
* Indicates that a write to a given `remoteAddress` timed out. See
* [[com.twitter.finagle.netty3.channel.WriteCompletionTimeoutHandler]] for details.
*/
class WriteTimedOutException(remoteAddress: SocketAddress) extends ChannelException(null, remoteAddress) {
def this() = this(null)
}
/**
* Indicates that some client state was inconsistent with the observed state of
* some server. For example, the client could receive a channel-connection event
* from a proxy when there is no outstanding connect request.
*/
class InconsistentStateException(remoteAddress: SocketAddress) extends ChannelException(null, remoteAddress) {
def this() = this(null)
}
/**
* A catch-all exception class for uncategorized
* [[com.twitter.finagle.ChannelException ChannelExceptions]].
*/
case class UnknownChannelException(underlying: Throwable, override val remoteAddress: SocketAddress)
extends ChannelException(underlying, remoteAddress) {
def this() = this(null, null)
}
object WriteException {
def apply(underlying: Throwable): WriteException =
ChannelWriteException(underlying)
def unapply(t: Throwable): Option[Throwable] = t match {
case we: WriteException => Some(we.getCause)
case _ => None
}
}
/**
* Marker trait to indicate there was an exception while writing the request.
* These exceptions should generally be retryable as the full request should
* not have reached the other end.
*/
trait WriteException extends Exception with SourcedException
/**
* Default implementation for WriteException that wraps an underlying exception.
*/
case class ChannelWriteException(underlying: Throwable)
extends ChannelException(underlying)
with WriteException
with NoStacktrace
{
override def fillInStackTrace = this
override def getStackTrace = underlying.getStackTrace
}
/**
* Indicates that an error occurred while an SSL handshake was being performed
* with a server at a given `remoteAddress`.
*/
case class SslHandshakeException(underlying: Throwable, override val remoteAddress: SocketAddress)
extends ChannelException(underlying, remoteAddress) {
def this() = this(null, null)
}
/**
* Indicates that the certificate for a given session was invalidated.
*/
case class SslHostVerificationException(principal: String) extends ChannelException {
def this() = this(null)
}
/**
* Indicates that connecting to a given `remoteAddress` was refused.
*/
case class ConnectionRefusedException(override val remoteAddress: SocketAddress)
extends ChannelException(null, remoteAddress) {
def this() = this(null)
}
/**
* Indicates that requests were failed by a rate-limiter. See
* [[com.twitter.finagle.service.RateLimitingFilter]] for details.
*/
case class RefusedByRateLimiter() extends ChannelException
/**
* A base class for exceptions encountered in the context of a
* [[com.twitter.finagle.transport.Transport]].
*/
class TransportException extends Exception with SourcedException
/**
* Indicates that a request failed because a
* [[com.twitter.finagle.transport.Transport]] write associated with the request
* was cancelled.
*/
class CancelledWriteException extends TransportException
/**
* Indicates that a [[com.twitter.finagle.transport.Transport]] write associated
* with the request was dropped by the transport (usually to respect backpressure).
*/
class DroppedWriteException extends TransportException
/**
* A trait for exceptions related to a [[com.twitter.finagle.Service]].
*/
trait ServiceException extends Exception with SourcedException
/**
* Indicates that a request was applied to a [[com.twitter.finagle.Service]]
* that is closed (i.e. the connection is closed).
*/
class ServiceClosedException extends ServiceException
/**
* Indicates that a request was applied to a [[com.twitter.finagle.Service]]
* that is unavailable. This constitutes a fail-stop condition.
*/
class ServiceNotAvailableException extends ServiceException
/**
* Indicates that the connection was not established within the timeouts.
* This type of exception should generally be safe to retry.
*/
class ServiceTimeoutException(override protected val timeout: Duration)
extends WriteException
with ServiceException
with TimeoutException
{
override protected def explanation =
"creating a service/connection or reserving a service/connection from the service/connection pool " + serviceName
}
/**
* A base class for exceptions encountered on account of incorrect API usage.
*/
class ApiException extends Exception
/**
* Indicates that the client has issued more concurrent requests than are
* allowable, where "allowable" is typically determined based on some
* configurable maximum.
*/
class TooManyConcurrentRequestsException extends ApiException
/**
* Indicates that an error occurred on account of incorrect usage of a
* [[org.jboss.netty.buffer.ChannelBuffer]].
*
* TODO: Probably remove this exception class once we migrate away from Netty
* usage in public APIs.
*/
class ChannelBufferUsageException(description: String) extends Exception(description)
/**
* An exception that is raised on requests that are discarded because
* their corresponding backup requests succeeded first. See
* [[com.twitter.finagle.exp.BackupRequestFilter]] for details.
*/
object BackupRequestLost extends Exception with NoStacktrace with HasLogLevel {
def logLevel: Level = Level.TRACE
}
|
liamstewart/finagle
|
finagle-core/src/main/scala/com/twitter/finagle/Exceptions.scala
|
Scala
|
apache-2.0
| 15,877
|
/*
* Copyright (c) 2013, Scodec
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package scodec
package codecs
import javax.crypto.KeyGenerator
import javax.crypto.spec.IvParameterSpec
import scodec.bits.ByteVector
import org.scalacheck.Prop.forAll
class CipherCodecTest extends CodecSuite:
private val secretKey =
val keyGen = KeyGenerator.getInstance("AES").nn
keyGen.init(128)
keyGen.generateKey.nn
private val iv = new IvParameterSpec(ByteVector.low(16).toArray)
property("roundtrip with AES/ECB/PKCS5Padding") {
testWithCipherFactory(CipherFactory("AES/ECB/PKCS5Padding", secretKey))
}
property("roundtrip with AES/CBC/PKCS5Padding") {
testWithCipherFactory(CipherFactory("AES/CBC/PKCS5Padding", secretKey, iv))
}
protected def testWithCipherFactory(cipherFactory: CipherFactory) =
val codec = encrypted(int32 :: utf8, cipherFactory)
forAll((n: Int, s: String) => roundtrip(codec, (n, s)))
|
scodec/scodec
|
unitTests/src/test/scala/scodec/codecs/CipherCodecTest.scala
|
Scala
|
bsd-3-clause
| 2,440
|
package com.searchlight.khronus.service
import akka.actor.Props
import com.searchlight.khronus.model.MetricBatch
import com.searchlight.khronus.store.CassandraMetricMeasurementStore._
import com.searchlight.khronus.store.MetricMeasurementStoreSupport
import com.searchlight.khronus.util.{ConcurrencySupport, JacksonJsonSupport}
import com.searchlight.khronus.util.log.Logging
import spray.http.StatusCodes._
import spray.httpx.encoding.{ Gzip, NoEncoding }
import spray.routing._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.Failure
import scala.util.control.NonFatal
class KhronusActor extends HttpServiceActor with KhronusEnpoint with KhronusHandlerException {
def receive = runRoute(metricsRoute)
}
object KhronusActor {
val Name = "khronus-actor"
val Path = "khronus/metrics"
def props = Props[KhronusActor]
}
trait KhronusEnpoint extends HttpService with MetricMeasurementStoreSupport with JacksonJsonSupport with Logging with ConcurrencySupport {
override def loggerName = classOf[KhronusEnpoint].getName
implicit val executionContext: ExecutionContext = executionContext("metric-receiver-endpoint")
val metricsRoute: Route =
decompressRequest(Gzip, NoEncoding) {
post {
entity(as[MetricBatch]) { metricBatch ⇒
complete {
Future {
metricStore.storeMetricMeasurements(metricBatch.metrics)
}
OK
}
}
}
}
}
object SprayMetrics extends Logging {
import spray.routing.directives.BasicDirectives._
def around(before: RequestContext ⇒ (RequestContext, Any ⇒ Any)): Directive0 =
mapInnerRoute { inner ⇒
ctx ⇒
val (ctxForInnerRoute, after) = before(ctx)
try inner(ctxForInnerRoute.withRouteResponseMapped(after))
catch {
case NonFatal(ex) ⇒ after(Failure(ex))
}
}
def buildAfter(name: String, start: Long): Any ⇒ Any = { possibleRsp: Any ⇒
possibleRsp match {
case _ ⇒
log.info(s"$name time spent ${System.currentTimeMillis() - start} ms")
}
possibleRsp
}
def time(name: String): Directive0 =
around { ctx ⇒
val timerContext = System.currentTimeMillis()
(ctx, buildAfter(name, timerContext))
}
}
|
despegar/khronus
|
khronus-core/src/main/scala/com/searchlight/khronus/service/KhronusEnpoint.scala
|
Scala
|
apache-2.0
| 2,283
|
import zio._
object layers {
trait Service1
trait Service2
trait Service3
trait Service4
val service1 = ZLayer.succeed(new Service1 {})
val service2 = ZLayer.succeed(new Service2 {})
val service3 = ZLayer.fromService { (_: Service1) => new Service3 {} }
val service4 = ZLayer.succeed(new Service4 {})
val services: ULayer[Has[Service1] with Has[Service2] with Has[Service3] with Has[Service4]] =
service1 ++ service2 >+> service3// ++ service4
}
|
tek/splain
|
core/src/test/resources-2.13.7+/latest/splain/plugin/ZIOSpec/zlayer/code.scala
|
Scala
|
mit
| 475
|
package org.gedanken.farley.parser
/**
*
* parser/Parser.scala
*
* Copyright 2013, 2014, 2015 Logan O'Sullivan Bruns
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import akka.actor.ActorRef
import com.hp.hpl.jena.tdb.TDBFactory
import com.hp.hpl.jena.query.Dataset
import com.typesafe.scalalogging.LazyLogging
import opennlp.tools.parser._
import opennlp.tools.sentdetect._
import opennlp.tools.cmdline.parser.ParserTool
import org.gedanken.farley.parser.modules._
import org.w3.banana.jena.Jena
import java.io.FileInputStream
class Parser(
sentenceModelPath : String,
parserModelPath : String,
dataSetPath : String) extends LazyLogging {
logger.info("Loading sentence model.")
private val sentenceModel = new SentenceModel(new FileInputStream(sentenceModelPath))
private val detector = new SentenceDetectorME(sentenceModel)
logger.info("Done loading sentence model.")
logger.info("Loading parser model.")
private val parserModel = new ParserModel(new FileInputStream(parserModelPath))
private val parser = ParserFactory.create(parserModel)
logger.info("Done loading parser model.")
logger.info("Loading dataset.")
private val dataset = TDBFactory.createDataset(dataSetPath)
logger.info("Done loading dataset.")
logger.info("Loading modules.")
private val modules =
new Help() :: new Meta[Jena, Dataset](dataset) ::
new Scanner() :: new Show[Jena, Dataset](dataset) ::
Nil
logger.info("Done loading modules.")
def process(input: String, context: ActorRef) : String = {
var nlpOnly = false;
val sentences =
if (!input.startsWith("?"))
detector.sentDetect(input)
else {
nlpOnly = true
detector.sentDetect(input.substring(1))
}
val response = new StringBuilder();
val buffer = new StringBuffer();
for (sentence <- sentences) {
val parses = ParserTool.parseLine(sentence, parser, 5)
val variants : Array[String] =
for (parse <- parses) yield {
buffer.setLength(0)
parse.show(buffer);
buffer.toString()
}
if (nlpOnly)
for (variant <- variants) {
response.append(variant)
response.append("\\n")
}
else {
val mc = new ModuleContext(context)
for (module <- modules) {
val result = module.evaluate(variants, mc)
if (result != null) {
response.append(result)
response.append("\\n")
}
}
}
}
if (response.length() > 0)
return response.toString()
else
return "I'm sorry I didn't understand."
}
}
|
loganbruns/farley
|
parser/src/main/scala/org/gedanken/farley/parser/Parser.scala
|
Scala
|
apache-2.0
| 3,033
|
package app
import util.{LockUtil, CollaboratorsAuthenticator, JGitUtil, ReferrerAuthenticator, Notifier, Keys}
import util.Directory._
import util.Implicits._
import util.ControlUtil._
import service._
import org.eclipse.jgit.api.Git
import jp.sf.amateras.scalatra.forms._
import org.eclipse.jgit.transport.RefSpec
import scala.collection.JavaConverters._
import org.eclipse.jgit.lib.{ObjectId, CommitBuilder, PersonIdent}
import service.IssuesService._
import service.PullRequestService._
import util.JGitUtil.DiffInfo
import service.RepositoryService.RepositoryTreeNode
import util.JGitUtil.CommitInfo
import org.slf4j.LoggerFactory
import org.eclipse.jgit.merge.MergeStrategy
import org.eclipse.jgit.errors.NoMergeBaseException
class PullRequestsController extends PullRequestsControllerBase
with RepositoryService with AccountService with IssuesService with PullRequestService with MilestonesService with ActivityService
with ReferrerAuthenticator with CollaboratorsAuthenticator
trait PullRequestsControllerBase extends ControllerBase {
self: RepositoryService with AccountService with IssuesService with MilestonesService with ActivityService with PullRequestService
with ReferrerAuthenticator with CollaboratorsAuthenticator =>
private val logger = LoggerFactory.getLogger(classOf[PullRequestsControllerBase])
val pullRequestForm = mapping(
"title" -> trim(label("Title" , text(required, maxlength(100)))),
"content" -> trim(label("Content", optional(text()))),
"targetUserName" -> trim(text(required, maxlength(100))),
"targetBranch" -> trim(text(required, maxlength(100))),
"requestUserName" -> trim(text(required, maxlength(100))),
"requestBranch" -> trim(text(required, maxlength(100))),
"commitIdFrom" -> trim(text(required, maxlength(40))),
"commitIdTo" -> trim(text(required, maxlength(40)))
)(PullRequestForm.apply)
val mergeForm = mapping(
"message" -> trim(label("Message", text(required)))
)(MergeForm.apply)
case class PullRequestForm(
title: String,
content: Option[String],
targetUserName: String,
targetBranch: String,
requestUserName: String,
requestBranch: String,
commitIdFrom: String,
commitIdTo: String)
case class MergeForm(message: String)
get("/:owner/:repository/pulls")(referrersOnly { repository =>
searchPullRequests(None, repository)
})
get("/:owner/:repository/pulls/:userName")(referrersOnly { repository =>
searchPullRequests(Some(params("userName")), repository)
})
get("/:owner/:repository/pull/:id")(referrersOnly { repository =>
params("id").toIntOpt.flatMap{ issueId =>
val owner = repository.owner
val name = repository.name
getPullRequest(owner, name, issueId) map { case(issue, pullreq) =>
using(Git.open(getRepositoryDir(owner, name))){ git =>
// prepare head branch
val commitIdTo = fetchPullRequest(git, issueId, pullreq.requestUserName, pullreq.requestRepositoryName, pullreq.requestBranch)
updateCommitIdTo(owner, name, issueId, commitIdTo)
val (commits, diffs) = getRequestCompareInfo(owner, name, pullreq.commitIdFrom, owner, name, commitIdTo)
pulls.html.pullreq(
issue, pullreq,
getComments(owner, name, issueId),
(getCollaborators(owner, name) ::: (if(getAccountByUserName(owner).get.isGroupAccount) Nil else List(owner))).sorted,
getMilestonesWithIssueCount(owner, name),
commits,
diffs,
hasWritePermission(owner, name, context.loginAccount),
repository)
}
}
} getOrElse NotFound
})
ajaxGet("/:owner/:repository/pull/:id/mergeguide")(collaboratorsOnly { repository =>
params("id").toIntOpt.flatMap{ issueId =>
val owner = repository.owner
val name = repository.name
getPullRequest(owner, name, issueId) map { case(issue, pullreq) =>
pulls.html.mergeguide(
checkConflictInPullRequest(owner, name, pullreq.branch, pullreq.requestUserName, name, pullreq.requestBranch, issueId),
pullreq,
s"${baseUrl}${context.path}/git/${pullreq.requestUserName}/${pullreq.requestRepositoryName}.git")
}
} getOrElse NotFound
})
post("/:owner/:repository/pull/:id/merge", mergeForm)(collaboratorsOnly { (form, repository) =>
params("id").toIntOpt.flatMap { issueId =>
val owner = repository.owner
val name = repository.name
LockUtil.lock(s"${owner}/${name}/merge"){
getPullRequest(owner, name, issueId).map { case (issue, pullreq) =>
using(Git.open(getRepositoryDir(owner, name))) { git =>
// mark issue as merged and close.
val loginAccount = context.loginAccount.get
createComment(owner, name, loginAccount.userName, issueId, form.message, "merge")
createComment(owner, name, loginAccount.userName, issueId, "Close", "close")
updateClosed(owner, name, issueId, true)
// record activity
recordMergeActivity(owner, name, loginAccount.userName, issueId, form.message)
// merge
val mergeBaseRefName = s"refs/heads/${pullreq.branch}"
val merger = MergeStrategy.RECURSIVE.newMerger(git.getRepository, true)
val mergeBaseTip = git.getRepository.resolve(mergeBaseRefName)
val mergeTip = git.getRepository.resolve(s"refs/pull/${issueId}/head")
val conflicted = try {
!merger.merge(mergeBaseTip, mergeTip)
} catch {
case e: NoMergeBaseException => true
}
if (conflicted) {
throw new RuntimeException("This pull request can't merge automatically.")
}
// creates merge commit
val mergeCommit = new CommitBuilder()
mergeCommit.setTreeId(merger.getResultTreeId)
mergeCommit.setParentIds(Array[ObjectId](mergeBaseTip, mergeTip): _*)
val personIdent = new PersonIdent(loginAccount.fullName, loginAccount.mailAddress)
mergeCommit.setAuthor(personIdent)
mergeCommit.setCommitter(personIdent)
mergeCommit.setMessage(s"Merge pull request #${issueId} from ${pullreq.requestUserName}/${pullreq.requestRepositoryName}\\n\\n" +
form.message)
// insertObject and got mergeCommit Object Id
val inserter = git.getRepository.newObjectInserter
val mergeCommitId = inserter.insert(mergeCommit)
inserter.flush()
inserter.release()
// update refs
val refUpdate = git.getRepository.updateRef(mergeBaseRefName)
refUpdate.setNewObjectId(mergeCommitId)
refUpdate.setForceUpdate(false)
refUpdate.setRefLogIdent(personIdent)
refUpdate.setRefLogMessage("merged", true)
refUpdate.update()
val (commits, _) = getRequestCompareInfo(owner, name, pullreq.commitIdFrom,
pullreq.requestUserName, pullreq.requestRepositoryName, pullreq.commitIdTo)
commits.flatten.foreach { commit =>
if(!existsCommitId(owner, name, commit.id)){
insertCommitId(owner, name, commit.id)
}
}
// notifications
Notifier().toNotify(repository, issueId, "merge"){
Notifier.msgStatus(s"${baseUrl}/${owner}/${name}/pull/${issueId}")
}
redirect(s"/${owner}/${name}/pull/${issueId}")
}
}
}
} getOrElse NotFound
})
get("/:owner/:repository/compare")(referrersOnly { forkedRepository =>
(forkedRepository.repository.originUserName, forkedRepository.repository.originRepositoryName) match {
case (Some(originUserName), Some(originRepositoryName)) => {
getRepository(originUserName, originRepositoryName, baseUrl).map { originRepository =>
using(
Git.open(getRepositoryDir(originUserName, originRepositoryName)),
Git.open(getRepositoryDir(forkedRepository.owner, forkedRepository.name))
){ (oldGit, newGit) =>
val oldBranch = JGitUtil.getDefaultBranch(oldGit, originRepository).get._2
val newBranch = JGitUtil.getDefaultBranch(newGit, forkedRepository).get._2
redirect(s"${context.path}/${forkedRepository.owner}/${forkedRepository.name}/compare/${originUserName}:${oldBranch}...${newBranch}")
}
} getOrElse NotFound
}
case _ => {
using(Git.open(getRepositoryDir(forkedRepository.owner, forkedRepository.name))){ git =>
JGitUtil.getDefaultBranch(git, forkedRepository).map { case (_, defaultBranch) =>
redirect(s"${context.path}/${forkedRepository.owner}/${forkedRepository.name}/compare/${defaultBranch}...${defaultBranch}")
} getOrElse {
redirect(s"${context.path}/${forkedRepository.owner}/${forkedRepository.name}")
}
}
}
}
})
get("/:owner/:repository/compare/*...*")(referrersOnly { repository =>
val Seq(origin, forked) = multiParams("splat")
val (originOwner, tmpOriginBranch) = parseCompareIdentifie(origin, repository.owner)
val (forkedOwner, tmpForkedBranch) = parseCompareIdentifie(forked, repository.owner)
(getRepository(originOwner, repository.name, baseUrl),
getRepository(forkedOwner, repository.name, baseUrl)) match {
case (Some(originRepository), Some(forkedRepository)) => {
using(
Git.open(getRepositoryDir(originOwner, repository.name)),
Git.open(getRepositoryDir(forkedOwner, repository.name))
){ case (oldGit, newGit) =>
val originBranch = JGitUtil.getDefaultBranch(oldGit, originRepository, tmpOriginBranch).get._2
val forkedBranch = JGitUtil.getDefaultBranch(newGit, forkedRepository, tmpForkedBranch).get._2
val forkedId = getForkedCommitId(oldGit, newGit,
originOwner, repository.name, originBranch,
forkedOwner, repository.name, forkedBranch)
val oldId = oldGit.getRepository.resolve(forkedId)
val newId = newGit.getRepository.resolve(forkedBranch)
val (commits, diffs) = getRequestCompareInfo(
originOwner, repository.name, oldId.getName,
forkedOwner, repository.name, newId.getName)
pulls.html.compare(
commits,
diffs,
repository.repository.originUserName.map { userName =>
userName :: getForkedRepositories(userName, repository.name)
} getOrElse List(repository.owner),
originBranch,
forkedBranch,
oldId.getName,
newId.getName,
repository,
originRepository,
forkedRepository,
hasWritePermission(repository.owner, repository.name, context.loginAccount))
}
}
case _ => NotFound
}
})
ajaxGet("/:owner/:repository/compare/*...*/mergecheck")(collaboratorsOnly { repository =>
val Seq(origin, forked) = multiParams("splat")
val (originOwner, tmpOriginBranch) = parseCompareIdentifie(origin, repository.owner)
val (forkedOwner, tmpForkedBranch) = parseCompareIdentifie(forked, repository.owner)
(getRepository(originOwner, repository.name, baseUrl),
getRepository(forkedOwner, repository.name, baseUrl)) match {
case (Some(originRepository), Some(forkedRepository)) => {
using(
Git.open(getRepositoryDir(originOwner, repository.name)),
Git.open(getRepositoryDir(forkedOwner, repository.name))
){ case (oldGit, newGit) =>
val originBranch = JGitUtil.getDefaultBranch(oldGit, originRepository, tmpOriginBranch).get._2
val forkedBranch = JGitUtil.getDefaultBranch(newGit, forkedRepository, tmpForkedBranch).get._2
pulls.html.mergecheck(
checkConflict(originOwner, repository.name, originBranch, forkedOwner, repository.name, forkedBranch))
}
}
case _ => NotFound()
}
})
post("/:owner/:repository/pulls/new", pullRequestForm)(referrersOnly { (form, repository) =>
val loginUserName = context.loginAccount.get.userName
val issueId = createIssue(
owner = repository.owner,
repository = repository.name,
loginUser = loginUserName,
title = form.title,
content = form.content,
assignedUserName = None,
milestoneId = None,
isPullRequest = true)
createPullRequest(
originUserName = repository.owner,
originRepositoryName = repository.name,
issueId = issueId,
originBranch = form.targetBranch,
requestUserName = form.requestUserName,
requestRepositoryName = repository.name,
requestBranch = form.requestBranch,
commitIdFrom = form.commitIdFrom,
commitIdTo = form.commitIdTo)
// fetch requested branch
using(Git.open(getRepositoryDir(repository.owner, repository.name))){ git =>
git.fetch
.setRemote(getRepositoryDir(form.requestUserName, repository.name).toURI.toString)
.setRefSpecs(new RefSpec(s"refs/heads/${form.requestBranch}:refs/pull/${issueId}/head"))
.call
}
// record activity
recordPullRequestActivity(repository.owner, repository.name, loginUserName, issueId, form.title)
// notifications
Notifier().toNotify(repository, issueId, form.content.getOrElse("")){
Notifier.msgPullRequest(s"${baseUrl}/${repository.owner}/${repository.name}/pull/${issueId}")
}
redirect(s"/${repository.owner}/${repository.name}/pull/${issueId}")
})
/**
* Checks whether conflict will be caused in merging. Returns true if conflict will be caused.
*/
private def checkConflict(userName: String, repositoryName: String, branch: String,
requestUserName: String, requestRepositoryName: String, requestBranch: String): Boolean = {
LockUtil.lock(s"${userName}/${repositoryName}/merge-check"){
using(Git.open(getRepositoryDir(requestUserName, requestRepositoryName))) { git =>
val remoteRefName = s"refs/heads/${branch}"
val tmpRefName = s"refs/merge-check/${userName}/${branch}"
withTmpRefSpec(new RefSpec(s"${remoteRefName}:${tmpRefName}").setForceUpdate(true), git) { ref =>
// fetch objects from origin repository branch
git.fetch
.setRemote(getRepositoryDir(userName, repositoryName).toURI.toString)
.setRefSpecs(ref)
.call
// merge conflict check
val merger = MergeStrategy.RECURSIVE.newMerger(git.getRepository, true)
val mergeBaseTip = git.getRepository.resolve(s"refs/heads/${requestBranch}")
val mergeTip = git.getRepository.resolve(tmpRefName)
try {
!merger.merge(mergeBaseTip, mergeTip)
} catch {
case e: NoMergeBaseException => true
}
}
}
}
}
/**
* Checks whether conflict will be caused in merging within pull request. Returns true if conflict will be caused.
*/
private def checkConflictInPullRequest(userName: String, repositoryName: String, branch: String,
requestUserName: String, requestRepositoryName: String, requestBranch: String,
issueId: Int): Boolean = {
LockUtil.lock(s"${userName}/${repositoryName}/merge") {
using(Git.open(getRepositoryDir(userName, repositoryName))) { git =>
// merge
val merger = MergeStrategy.RECURSIVE.newMerger(git.getRepository, true)
val mergeBaseTip = git.getRepository.resolve(s"refs/heads/${branch}")
val mergeTip = git.getRepository.resolve(s"refs/pull/${issueId}/head")
try {
!merger.merge(mergeBaseTip, mergeTip)
} catch {
case e: NoMergeBaseException => true
}
}
}
}
/**
* Parses branch identifier and extracts owner and branch name as tuple.
*
* - "owner:branch" to ("owner", "branch")
* - "branch" to ("defaultOwner", "branch")
*/
private def parseCompareIdentifie(value: String, defaultOwner: String): (String, String) =
if(value.contains(':')){
val array = value.split(":")
(array(0), array(1))
} else {
(defaultOwner, value)
}
/**
* Extracts all repository names from [[service.RepositoryService.RepositoryTreeNode]] as flat list.
*/
private def getRepositoryNames(node: RepositoryTreeNode): List[String] =
node.owner :: node.children.map { child => getRepositoryNames(child) }.flatten
/**
* Returns the identifier of the root commit (or latest merge commit) of the specified branch.
*/
private def getForkedCommitId(oldGit: Git, newGit: Git, userName: String, repositoryName: String, branch: String,
requestUserName: String, requestRepositoryName: String, requestBranch: String): String =
JGitUtil.getCommitLogs(newGit, requestBranch, true){ commit =>
existsCommitId(userName, repositoryName, commit.getName) &&
JGitUtil.getBranchesOfCommit(oldGit, commit.getName).contains(branch)
}.head.id
private def getRequestCompareInfo(userName: String, repositoryName: String, branch: String,
requestUserName: String, requestRepositoryName: String, requestCommitId: String): (Seq[Seq[CommitInfo]], Seq[DiffInfo]) = {
using(
Git.open(getRepositoryDir(userName, repositoryName)),
Git.open(getRepositoryDir(requestUserName, requestRepositoryName))
){ (oldGit, newGit) =>
val oldId = oldGit.getRepository.resolve(branch)
val newId = newGit.getRepository.resolve(requestCommitId)
val commits = newGit.log.addRange(oldId, newId).call.iterator.asScala.map { revCommit =>
new CommitInfo(revCommit)
}.toList.splitWith{ (commit1, commit2) =>
view.helpers.date(commit1.time) == view.helpers.date(commit2.time)
}
val diffs = JGitUtil.getDiffs(newGit, oldId.getName, newId.getName, true)
(commits, diffs)
}
}
private def searchPullRequests(userName: Option[String], repository: RepositoryService.RepositoryInfo) =
defining(repository.owner, repository.name){ case (owner, repoName) =>
val filterUser = userName.map { x => Map("created_by" -> x) } getOrElse Map("all" -> "")
val page = IssueSearchCondition.page(request)
val sessionKey = Keys.Session.Pulls(owner, repoName)
// retrieve search condition
val condition = session.putAndGet(sessionKey,
if(request.hasQueryString) IssueSearchCondition(request)
else session.getAs[IssueSearchCondition](sessionKey).getOrElse(IssueSearchCondition())
)
pulls.html.list(
searchIssue(condition, filterUser, true, (page - 1) * PullRequestLimit, PullRequestLimit, owner -> repoName),
getPullRequestCountGroupByUser(condition.state == "closed", owner, Some(repoName)),
userName,
page,
countIssue(condition.copy(state = "open" ), filterUser, true, owner -> repoName),
countIssue(condition.copy(state = "closed"), filterUser, true, owner -> repoName),
countIssue(condition, Map.empty, true, owner -> repoName),
condition,
repository,
hasWritePermission(owner, repoName, context.loginAccount))
}
/**
* Fetch pull request contents into refs/pull/${issueId}/head and return the head commit id of the pull request.
*/
private def fetchPullRequest(git: Git, issueId: Int, requestUserName: String, requestRepositoryName: String, requestBranch: String): String = {
git.fetch
.setRemote(getRepositoryDir(requestUserName, requestRepositoryName).toURI.toString)
.setRefSpecs(new RefSpec(s"refs/heads/${requestBranch}:refs/pull/${issueId}/head").setForceUpdate(true))
.call
git.getRepository.resolve(s"refs/pull/${issueId}/head").getName
}
}
|
michaelpnash/gitbucket
|
src/main/scala/app/PullRequestsController.scala
|
Scala
|
apache-2.0
| 20,126
|
package com.timeout.docless.swagger
case class Paths(get: Seq[Path])
|
timeoutdigital/docless
|
src/main/scala/com/timeout/docless/swagger/Paths.scala
|
Scala
|
mit
| 70
|
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.sbt.routes
import play.routes.compiler.RoutesCompiler.GeneratedSource
import sbt._
import xsbti.{ Maybe, Position }
import scala.language.implicitConversions
/**
* Fix compatibility issues for RoutesCompiler. This is the version compatible with sbt 0.13.
*/
private[routes] trait RoutesCompilerCompat {
val routesPositionMapper: Position => Option[Position] = position => {
position.sourceFile collect {
case GeneratedSource(generatedSource) => {
new xsbti.Position {
override lazy val line: Maybe[Integer] = {
position.line
.flatMap(l => generatedSource.mapLine(l.asInstanceOf[Int]))
.map(l => Maybe.just(l.asInstanceOf[java.lang.Integer]))
.getOrElse(Maybe.nothing[java.lang.Integer])
}
override lazy val lineContent: String = {
line flatMap { lineNo =>
sourceFile.flatMap { file =>
IO.read(file).split('\\n').lift(lineNo - 1)
}
} getOrElse ""
}
override val offset: Maybe[Integer] = Maybe.nothing[java.lang.Integer]
override val pointer: Maybe[Integer] = Maybe.nothing[java.lang.Integer]
override val pointerSpace: Maybe[String] = Maybe.nothing[String]
override val sourceFile: Maybe[File] = Maybe.just(generatedSource.source.get)
override val sourcePath: Maybe[String] = Maybe.just(sourceFile.get.getCanonicalPath)
}
}
}
}
}
|
zaneli/playframework
|
framework/src/sbt-plugin/src/main/scala-sbt-0.13/play/sbt/routes/RoutesCompilerCompat.scala
|
Scala
|
apache-2.0
| 1,577
|
package org.pgscala.converters
package test
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.FeatureSpec
import org.scalatest.GivenWhenThen
import org.scalatest.Matchers
@RunWith(classOf[JUnitRunner])
class BooleanTest extends FeatureSpec with GivenWhenThen with Matchers {
feature("About to test a boolean converter") {
info("I want to test if PGNullableBooleanConverter works correctly, both in 2 way conversion")
scenario("boolean to string Nr. 1") {
Given("a starting boolean value of true")
val t = true
When("that value is converted to String")
val res = PGNullableBooleanConverter booleanToString t
Then("""It should return a String value "t"""")
res should equal("t")
}
scenario("boolean to string Nr. 2") {
Given("a starting boolean value of false")
val f = false
When("that value is converted to String")
val res = PGNullableBooleanConverter booleanToString f
Then("""It should return a String value "f"""")
res should equal("f")
}
scenario("string to boolean Nr. 1") {
Given("""a starting String value of "t"""")
val t = "t"
When("that value is converted to String")
val res = PGNullableBooleanConverter stringToBoolean t
Then("""It should return a boolean value true""")
res should equal(true)
}
scenario("string to boolean Nr. 2") {
Given("""a starting String value of "f"""")
val f = "f"
When("that value is converted to String")
val res = PGNullableBooleanConverter stringToBoolean f
Then("""It should return a boolean value false""")
res should equal(false)
}
}
}
|
melezov/pgscala
|
converters-java/src/test/scala/org/pgscala/converters/test/BooleanTest.scala
|
Scala
|
bsd-3-clause
| 1,713
|
package pl.newicom.dddd.office
import pl.newicom.dddd.{BusinessEntity, Eventsourced}
import pl.newicom.dddd.aggregate.{Command, EntityId}
import pl.newicom.dddd.cluster.DefaultDistributionStrategy
import scala.reflect.ClassTag
trait OfficeId extends BusinessEntity with Eventsourced {
def messageClass: Option[Class[_]]
def caseRef(caseLocalId: EntityId): CaseRef =
CaseRef(s"$id-$caseLocalId", this, version = None)
def distributionStrategy = new DefaultDistributionStrategy
def handles(command: Command): Boolean = messageClass.exists(_.isAssignableFrom(command.getClass))
}
case class CaseRef(id: EntityId, responsible: Eventsourced, version: Option[Long]) extends BusinessEntity with Eventsourced {
def department: String = responsible.department
def localId: EntityId = if (id.contains('-')) id.split('-').last else id
}
case class RemoteOfficeId[M: ClassTag](id: EntityId, department: String, commandClass: Class[M]) extends OfficeId {
override def messageClass: Option[Class[_]] = Some(commandClass)
}
|
pawelkaczor/akka-ddd
|
akka-ddd-protocol/src/main/scala/pl/newicom/dddd/office/OfficeId.scala
|
Scala
|
mit
| 1,038
|
/**
* This file is part of SensApp [ http://sensapp.modelbased.net ]
*
* Copyright (C) 2011- SINTEF ICT
* Contact: SINTEF ICT <nicolas.ferry@sintef.no>
*
* Module: net.modelbased.sensapp
*
* SensApp is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* SensApp is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General
* Public License along with SensApp. If not, see
* <http://www.gnu.org/licenses/>.
*/
package net.modelbased.sensapp.backyard.apm2import.datasets
/**
* This file is part of SensApp [ http://sensapp.modelbased.net ]
*
* Copyright (C) 2012- SINTEF ICT
* Contact: Sebastien Mosser <sebastien.mosser@sintef.no>
*
* Module: net.modelbased.sensapp.backyard.apm2import
*
* SensApp is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* SensApp is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General
* Public License along with SensApp. If not, see
* <http://www.gnu.org/licenses/>.
*/
/**
* Created by IntelliJ IDEA.
* User: franck
* Date: 01/06/12
* Time: 07:44
* To change this template use File | Settings | File Templates.
*/
import net.modelbased.sensapp.backyard.apm2import._
object EBike1 {
val log_file = "/EBike1.log"
val pwrlog_file = "/EBike1-Power.csv"
val out_folder = "../net.modelbased.sensapp.data.samples/CyclingData/EBike1/"
val name = "EBike1"
val altitude_offset = 0
val ground_altitude = 0
def main(args : Array[String]) {
var data = APMDataParser.parseAPMLog(log_file)
data = APMDataParser.chopDataSet(data, 2500, 8500)
APMDataParser.fixAltitude(data, altitude_offset)
//APMDataParser.fixAltitude(data, -ground_altitude)
APMDataParser.fix10HzTimeIncrements(data)
APMDataParser.setRelativeTime(data)
APMDataParser.printStats(data)
APMDataParser.writeAPMLog(out_folder + "raw/" + name + ".log", data)
APMDataParser.writeCSVLog(out_folder + "raw/" + name + ".csv", data)
APMDataParser.writeSRTFile(out_folder + name + ".srt", data, 5500, 100)
val data1hz = APMDataParser.extract1HzData(data)
APMDataParser.writeCSVLog(out_folder + "raw/" + name + "_1hz.csv", data1hz)
APMDataParser.writeSRTFile(out_folder + name + "_1hz.srt", data1hz, 5500 , 1000)
APMDataParser.writeSenML(out_folder + "raw/" + name + "_1hz.json", data1hz, name , 1339243536)
APMDataParser.writeIndividualSenML(out_folder + "data/" + name + "_1hz", data1hz, name , 1339243536);
var pwrdata = EBikeDataParser.parseEBikeLog(pwrlog_file)
pwrdata = EBikeDataParser.chopDataSet(pwrdata, 50, 2050)
EBikeDataParser.writeCSVLog(out_folder + "raw/" + name + "_power.csv", pwrdata)
var pwrdata1hz = EBikeDataParser.extract1HzData(pwrdata)
EBikeDataParser.writeCSVLog(out_folder + "raw/" + name + "_power_1hz.csv", pwrdata1hz)
val basetime = pwrdata1hz.head.time / 1000
EBikeDataParser.setRelativeTime(pwrdata1hz)
EBikeDataParser.writeIndividualSenML(out_folder + "data/" + name + "_1hz", pwrdata1hz, name , basetime)
}
}
|
SINTEF-9012/sensapp
|
net.modelbased.sensapp.backyard.apm2import/src/main/scala/net/modelbased/sensapp/backyard/apm2import/datasets/EBike1.scala
|
Scala
|
lgpl-3.0
| 3,843
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.optimization.tfocs
import org.scalatest.FunSuite
import org.apache.spark.mllib.linalg.{ BLAS, DenseVector, Vectors }
import org.apache.spark.mllib.optimization.tfocs.DVectorFunctions._
import org.apache.spark.mllib.optimization.tfocs.fs.vector.double._
import org.apache.spark.mllib.optimization.tfocs.fs.dvector.double._
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.mllib.util.TestingUtils._
class ProxCapableFunctionSuite extends FunSuite with MLlibTestSparkContext {
test("The ProxZero implementation should return the expected value and vector") {
val fun = new ProxZero()
val x = new DenseVector(Array(10.0, -20.0, 30.0))
assert(fun(x) == 0.0, "value should be correct")
val ProxValue(Some(f), Some(g)) = fun(x, 1.5, ProxMode(true, true))
assert(f == 0.0, "minimum value should be correct")
assert(g == Vectors.dense(10.0, -20.0, 30.0), "minimizing value should be correct")
}
test("The ProxL1 implementation should return the expected value and vector") {
val fun = new ProxL1(1.1)
val x = new DenseVector(Array(10.0, -20.0, 30.0))
assert(fun(x) == 66.0, "value should be correct")
val ProxValue(Some(f), Some(g)) = fun(x, 1.5, ProxMode(true, true))
assert(f ~= 60.555 relTol 1e-12, "minimum value should be correct")
assert(g ~= Vectors.dense(8.35, -18.35, 28.34999999) relTol 1e-6,
"minimizing value should be correct")
}
test("The ProjRPlus implementation should return the expected value and vector") {
// Already nonnegative.
val fun = new ProjRPlus()
val x1 = new DenseVector(Array(10.0, 20.0, 30.0))
val ProxValue(Some(f1), Some(g1)) = fun(x1, 1.0, ProxMode(true, true))
assert(f1 == 0.0, "value should be correct")
assert(g1 == x1, "vector should be correct")
assert(fun(x1) == 0.0,
"value inside the nonnegative orthant should be correct for function short form")
// Some negative elements.
val x2 = new DenseVector(Array(-10.0, 20.0, -30.0))
val ProxValue(Some(f2), Some(g2)) = fun(x2, 1.0, ProxMode(true, true))
assert(f2 == 0.0, "value should be correct")
assert(g2 == Vectors.dense(0.0, 20.0, 0.0), "vector should be correct")
assert(fun(x2) == Double.PositiveInfinity,
"value outisde the nonnegative orthant should be correct for function short form")
}
test("The ProjBox implementation should return the expected value and vector") {
// Already within box.
val fun1 = new ProjBox(new DenseVector(Array(9, 19, 29)), new DenseVector(Array(11, 21, 31)))
val x1 = new DenseVector(Array(10.0, 20.0, 30.0))
val ProxValue(Some(f1), Some(g1)) = fun1(x1, 1.0, ProxMode(true, true))
assert(f1 == 0.0, "value should be correct")
assert(g1 == x1, "vector should be correct")
assert(fun1(x1) == 0.0, "value within the box should be correct for function short form")
// Some elements outside box.
val fun2 = new ProjBox(new DenseVector(Array(10.5, 19, 29)),
new DenseVector(Array(11, 21, 29.5)))
val x2 = new DenseVector(Array(10.0, 20.0, 30.0))
val ProxValue(Some(f2), Some(g2)) = fun2(x2, 1.0, ProxMode(true, true))
assert(f2 == 0.0, "value should be correct")
assert(g2 == Vectors.dense(10.5, 20, 29.5), "vector should be correct")
// Some elements outside other boxes.
val fun3 = new ProjBox(new DenseVector(Array(10.5, 19, 29)), new DenseVector(Array(11, 21, 31)))
assert(fun3(x2) == Double.PositiveInfinity,
"value outisde the box should be correct for function short form")
val fun4 = new ProjBox(new DenseVector(Array(10, 19, 29)), new DenseVector(Array(11, 21, 29.5)))
assert(fun4(x2) == Double.PositiveInfinity,
"value outisde the box should be correct for function short form")
}
test("The ProxShiftPlus implementation should return the expected value and vector") {
// Already nonnegative.
val c1 = sc.parallelize(Array(new DenseVector(Array(9.0, 19.0)), new DenseVector(Array(29.0))))
val fun1 = new ProxShiftRPlus(c1)
val x1 = sc.parallelize(Array(new DenseVector(Array(10.0, 20.0)), new DenseVector(Array(30.0))))
val expectedEvalF1 = 10 * 9 + 19 * 20 + 29 * 30
assert(fun1(x1) == expectedEvalF1, "eval value should be correct")
val ProxValue(Some(f1), Some(g1)) = fun1(x1, 0.8, ProxMode(true, true))
val expectedG1 = Vectors.dense(10 - .8 * 9, 20 - .8 * 19, 30 - .8 * 29)
val expectedF1 = BLAS.dot(Vectors.dense(c1.flatMap(_.toArray).collect), expectedG1)
assert(f1 == expectedF1, "value should be correct")
assert(Vectors.dense(g1.collectElements) == expectedG1, "vector should be correct")
// Some negative elements.
val c2 = sc.parallelize(Array(new DenseVector(Array(9.0, -19.0)),
new DenseVector(Array(-29.0))))
val fun2 = new ProxShiftRPlus(c2)
val x2 = sc.parallelize(Array(new DenseVector(Array(-10.0, 20.0)),
new DenseVector(Array(-30.0))))
assert(fun2(x2) == Double.PositiveInfinity, "eval value should be correct")
val ProxValue(Some(f2), Some(g2)) = fun2(x2, 0.8, ProxMode(true, true))
val expectedG2 = Vectors.dense(0.0, 20 - .8 * -19, 0.0)
val expectedF2 = BLAS.dot(Vectors.dense(c2.flatMap(_.toArray).collect), expectedG2)
assert(f2 == expectedF2, "value should be correct")
assert(Vectors.dense(g2.collectElements) == expectedG2, "vector should be correct")
}
}
|
databricks/spark-tfocs
|
src/test/scala/org/apache/spark/mllib/optimization/tfocs/ProxCapableFunctionSuite.scala
|
Scala
|
apache-2.0
| 6,209
|
package io.buoyant.linkerd
package admin
import com.twitter.finagle._
import com.twitter.finagle.buoyant.DstBindingFactory
import com.twitter.finagle.naming.NameInterpreter
import com.twitter.server.handler.{ResourceHandler, SummaryHandler => _}
import io.buoyant.admin.Admin.{Handler, NavItem}
import io.buoyant.admin.names.{BoundNamesHandler, DelegateApiHandler, DelegateHandler}
import io.buoyant.admin.{Admin, ConfigHandler, StaticFilter, _}
import io.buoyant.namer.{Delegator, EnumeratingNamer, NamespacedInterpreterConfig}
import io.buoyant.router.RoutingFactory
object LinkerdAdmin {
def boundNames(namers: Seq[Namer]): Seq[Handler] = {
val enumerating = namers.collect { case en: EnumeratingNamer => en }
Seq(Handler("/bound-names.json", new BoundNamesHandler(enumerating)))
}
def config(lc: Linker.LinkerConfig): Seq[Handler] = Seq(
Handler("/config.json", new ConfigHandler(lc, Linker.LoadedInitializers.iter))
)
def delegator(adminHandler: AdminHandler, routers: Seq[Router]): Seq[Handler] = {
val byLabel = routers.map(r => r.label -> r).toMap
val dtabs = byLabel.mapValues { router =>
val RoutingFactory.BaseDtab(dtab) = router.params[RoutingFactory.BaseDtab]
dtab()
}
val interpreters = byLabel.mapValues { router =>
val DstBindingFactory.Namer(namer) = router.params[DstBindingFactory.Namer]
namer
}
def getInterpreter(label: String): NameInterpreter =
interpreters.getOrElse(label, NameInterpreter)
Seq(
Handler("/delegator", new DelegateHandler(adminHandler, dtabs, getInterpreter)),
Handler("/delegator.json", new DelegateApiHandler(getInterpreter))
)
}
def static(adminHandler: AdminHandler): Seq[Handler] = Seq(
Handler("/", new DashboardHandler(adminHandler)),
Handler("/files/", StaticFilter.andThen(ResourceHandler.fromDirectoryOrJar(
baseRequestPath = "/files/",
baseResourcePath = "io/buoyant/admin",
localFilePath = "admin/src/main/resources/io/buoyant/admin"
))),
Handler("/help", new HelpPageHandler(adminHandler)),
Handler("/logging", new LoggingHandler(adminHandler)),
Handler("/logging.json", new LoggingApiHandler())
)
def apply(lc: Linker.LinkerConfig, linker: Linker): Seq[Handler] = {
val navItems = Seq(
NavItem("dtab", "delegator"),
NavItem("logging", "logging")
) ++ Admin.extractNavItems(
linker.namers ++
linker.routers.map(_.interpreter) ++
linker.routers ++
linker.telemeters
) :+ NavItem("help", "help")
def uniqBy[T, U](items: Seq[T])(f: T => U): Seq[T] = items match {
case Nil => items
case Seq(t) => items
case t +: rest if rest.map(f).contains(f(t)) => uniqBy(rest)(f)
case t +: rest => t +: uniqBy(rest)(f)
}
val adminHandler = new AdminHandler(uniqBy(navItems)(_.name))
val extHandlers = Admin.extractHandlers(
linker.namers ++
linker.routers ++
linker.routers.map(_.interpreter) ++
linker.telemeters
).map {
case Handler(url, service, css) =>
val adminFilter = new AdminFilter(adminHandler, css)
Handler(url, adminFilter.andThen(service), css)
}
static(adminHandler) ++ config(lc) ++
boundNames(linker.namers.map { case (_, n) => n }) ++
delegator(adminHandler, linker.routers) ++
extHandlers
}
}
|
hhtpcd/linkerd
|
linkerd/admin/src/main/scala/io/buoyant/linkerd/admin/LinkerdAdmin.scala
|
Scala
|
apache-2.0
| 3,402
|
/*
* Copyright 2017-2022 John Snow Labs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.johnsnowlabs.ml.crf
object TestDatasets {
def smallText = {
val labels = new TextSentenceLabels(Seq("One", "Two", "One", "Two"))
val sentence = new TextSentenceAttrs(Seq(
new WordAttrs(Seq("attr1" -> "")),
new WordAttrs(Seq("attr1" -> "value1", "attr2" ->"value1", "attr3" -> "")),
new WordAttrs(Seq("attr1" -> "", "attr3" -> "")),
new WordAttrs(Seq("attr1" -> "value1"))
))
Seq(labels -> sentence).toIterator
}
def doubleText = smallText ++ smallText
def small = {
val metadata = new DatasetEncoder()
val (label1, word1) = metadata.getFeatures(metadata.startLabel, "label1",
Seq("one"), Seq(1f, 2f))
val (label2, word2) = metadata.getFeatures("label1", "label2",
Seq("two"), Seq(2f, 3f))
val instance = new Instance(Seq(word1, word2))
val labels = new InstanceLabels(Seq(1, 2))
new CrfDataset(Seq(labels -> instance), metadata.getMetadata)
}
}
|
JohnSnowLabs/spark-nlp
|
src/test/scala/com/johnsnowlabs/ml/crf/TestDatasets.scala
|
Scala
|
apache-2.0
| 1,554
|
package practice
object ArrayTricks extends App {
/**
* We have our lists of orders sorted numerically already, in arrays. Write
* a function to merge our arrays of orders into one sorted array.
*
* For example:
* my_array = [3,4,6,10,11,15]
* alices_array = [1,5,8,12,14,19]
*
* print merge_arrays(my_array, alices_array)
* # prints
* [1,3,4,5,6,8,10,11,12,14,15,19]
*/
def merge(xs: Array[Int], ys: Array[Int]): Array[Int] = {
var i, j = 0
var zs = Array[Int]()
while (i < xs.length && j < ys.length) {
if (xs(i) < ys(j)) {
zs = zs :+ xs(i)
i = i + 1
} else {
zs = zs :+ ys(j)
j = j + 1
}
}
while (i < xs.length) {
zs = zs :+ xs(i)
i = i + 1
}
while (j < ys.length) {
zs = zs :+ ys(j)
j = j + 1
}
zs
}
/**
* Given an array_of_ints, find the highest_product you can get from three
* of the integers. The input array_of_ints will always have at least three
* integers.
*
* Solution - use greedy algorithm
* 1. keep track of top 2 and at each i keep checking if currentMax > top 2 * currentVal
* 2. need to check bottom 2 to check for negative number, e.g [-10, -10, 1, 3, 2]
*
* slightly cleaner variant - keep track of 'product' of top 2 and 'product' of bottom 2
*
*/
def max3(f: Array[Int]): Int = {
var max1 = f(0)
var max2 = f(0) * f(1)
var max3 = f(0) * f(1) * f(2)
var min1 = f(0)
var min2 = f(0) * f(1)
for (i <- 1 to f.length - 1) {
if (i > 2) {
max3 = Math.max(max3, Math.max(f(i) * max2, f(i) * min2))
}
if (i > 1) {
max2 = Math.max(max2, f(i) * max1)
min2 = Math.min(min2, f(i) * min1)
}
max1 = Math.max(max1, f(i))
min1 = Math.min(min1, f(i))
}
max3
}
/**
* You have an array of integers, and for each index you want to find the
* product of every integer except the integer at that index. Write a
* function get_products_of_all_ints_except_at_index() that takes an array
* of integers and returns an array of the products. For example, given: [1,
* 7, 3, 4] your function would return: [84, 12, 28, 21] by calculating:
* [7*3*4, 1*3*4, 1*7*4, 1*7*3] Do not use division in your solution.
*
* Solution - use greedy algorithm - overlapping subproblems
* 1. the value at i = product( product all values before i * product of all values after i)
* 2. get the product of all values before i in 1 pass
* 3. get the product of all values after i in 1 pass
* 4. multiply the resulting array -> this can be done using 1 array
*
* e.g. getProduct(new int[] {1, 2, 6, 5, 9})
*/
def multiply(xs: Array[Int]): Array[Int] = {
val accLeft = xs.scanLeft(1)(_ * _)
val accRight = xs.scanRight(1)(_ * _).drop(1)
accLeft.zip(accRight).map {
case (a, b) => a * b
}
}
println(s"merge ${merge(Array(3, 4, 6, 10, 11, 15), Array(1, 5, 8, 12, 14, 19)).mkString(",")}")
println(s"max3 ${max3(Array(-10, -10, 1, 3, 2, 7))}")
println(s"multiply ${multiply(Array(1, 7, 3, 4)).mkString(",")}")
}
|
mitochon/hexercise
|
src/practice/src/main/scala/practice/ArrayTricks.scala
|
Scala
|
mit
| 3,146
|
package model
import play.api.libs.json._
/**
* Represents the Swagger definition for QueueItemImpl.
* @param additionalProperties Any additional properties this model may have.
*/
@javax.annotation.Generated(value = Array("org.openapitools.codegen.languages.ScalaPlayFrameworkServerCodegen"), date = "2022-02-13T02:38:35.589632Z[Etc/UTC]")
case class QueueItemImpl(
`class`: Option[String],
expectedBuildNumber: Option[Int],
id: Option[String],
pipeline: Option[String],
queuedTime: Option[Int]
additionalProperties:
)
object QueueItemImpl {
implicit lazy val queueItemImplJsonFormat: Format[QueueItemImpl] = {
val realJsonFormat = Json.format[QueueItemImpl]
val declaredPropNames = Set("`class`", "expectedBuildNumber", "id", "pipeline", "queuedTime")
Format(
Reads {
case JsObject(xs) =>
val declaredProps = xs.filterKeys(declaredPropNames)
val additionalProps = JsObject(xs -- declaredPropNames)
val restructuredProps = declaredProps + ("additionalProperties" -> additionalProps)
val newObj = JsObject(restructuredProps)
realJsonFormat.reads(newObj)
case _ =>
JsError("error.expected.jsobject")
},
Writes { queueItemImpl =>
val jsObj = realJsonFormat.writes(queueItemImpl)
val additionalProps = jsObj.value("additionalProperties").as[JsObject]
val declaredProps = jsObj - "additionalProperties"
val newObj = declaredProps ++ additionalProps
newObj
}
)
}
}
|
cliffano/swaggy-jenkins
|
clients/scala-play-server/generated/app/model/QueueItemImpl.scala
|
Scala
|
mit
| 1,548
|
package com.arcusys.valamis.settings.service
import com.arcusys.valamis.settings.model.LRSToActivitySetting
trait LRSToActivitySettingService {
def getAll: Seq[LRSToActivitySetting]
def getByCourseId(courseId: Int): Seq[LRSToActivitySetting]
def create(courseId: Int, title: String, mappedActivity: Option[String], mappedVerb: Option[String]): LRSToActivitySetting
def modify(id: Int, courseID: Int, title: String, mappedActivity: Option[String], mappedVerb: Option[String]): LRSToActivitySetting
def delete(id: Int)
}
|
ViLPy/Valamis
|
valamis-core/src/main/scala/com/arcusys/valamis/settings/service/LRSToActivitySettingService.scala
|
Scala
|
lgpl-3.0
| 531
|
package concrete
import bitvectors.BitVector
import com.typesafe.scalalogging.LazyLogging
import concrete.constraint.{Constraint, StatefulConstraint}
import concrete.util.{IdentityMap, Interval}
import cspom.UNSATException
import scala.annotation.tailrec
import scala.collection.immutable
import scala.collection.immutable.IntMap
sealed trait Outcome {
def tryAssign(variable: Variable, i: Int): Outcome
def andThen(f: ProblemState => Outcome): Outcome
def orElse[A >: ProblemState](f: => A): A
def map[A](f: ProblemState => A): Option[A]
def filterDom(v: Variable)(f: Int => Boolean): Outcome
def filterBounds(v: Variable)(f: Int => Boolean): Outcome
def shaveDom(v: Variable, lb: Int, ub: Int): Outcome
def shaveDom(v: Variable, itv: Interval): Outcome = shaveDom(v, itv.lb, itv.ub)
def intersectDom(v: Variable, d: Domain): Outcome = {
updateDom(v, dom(v) & d)
}
def removeTo(v: Variable, ub: Int): Outcome
def removeFrom(v: Variable, lb: Int): Outcome
def removeUntil(v: Variable, ub: Int): Outcome
def removeAfter(v: Variable, lb: Int): Outcome
def entail(c: Constraint): Outcome
def entail(c: Constraint, i: Int): Outcome
def entailIfFree(c: Constraint): Outcome
def entailIfFree(c: Constraint, doms: Array[Domain]): Outcome
def entailIf(c: Constraint, f: ProblemState => Boolean): Outcome
def updateDom(v: Variable, d: Domain): Outcome
def assign(v: Variable, value: Int): Outcome
def remove(v: Variable, value: Int): Outcome
def doms(vs: Array[Variable]): Array[Domain] = {
val d = new Array[Domain](vs.length)
var i = vs.length - 1
while (i >= 0) {
d(i) = dom(vs(i))
i -= 1
}
d
}
def dom(v: Variable): Domain
def card(v: Variable): Int = dom(v).size
def span(v: Variable): Interval = dom(v).span
//def boolDom(v: Variable): BooleanDomain = dom(v).asInstanceOf[BooleanDomain]
def updateState[S <: AnyRef](c: StatefulConstraint[S], newState: S): Outcome
//def domainsOption: Option[IndexedSeq[Domain]]
//def toString(problem: Problem): String
def toState: ProblemState
//def isEntailed(c: Constraint): Boolean
def activeConstraints(v: Variable): BitVector
def apply[S <: AnyRef](c: StatefulConstraint[S]): S
def isState: Boolean
def fold[A](s: Iterable[A])(f: (ProblemState, A) => Outcome): Outcome = {
fold(s.iterator, f)
}
@tailrec
final def fold[A](it: Iterator[A], f: (ProblemState, A) => Outcome): Outcome = {
this match {
case ps: ProblemState if it.hasNext => f(ps, it.next()).fold(it, f)
case e => e
}
}
def dueTo(cause: => (Constraint, Iterable[Variable])): Outcome
}
object Contradiction {
def apply(to: Variable): Contradiction = Contradiction(Seq(to))
def apply(to: Array[Variable]): Contradiction = Contradiction(immutable.ArraySeq.unsafeWrapArray(to))
def apply(to: Seq[Variable]): Contradiction = Contradiction(None, Seq.empty, to)
}
case class Contradiction(cause: Option[Constraint], from: Seq[Variable], to: Seq[Variable]) extends Outcome {
def andThen(f: ProblemState => Outcome): Outcome = this
def orElse[A >: ProblemState](f: => A): A = f
def map[A](f: ProblemState => A): Option[A] = None
def filterDom(v: Variable)(f: Int => Boolean): Outcome = this
def filterBounds(v: Variable)(f: Int => Boolean): Outcome = this
def shaveDom(v: Variable, lb: Int, ub: Int): Outcome = this
def entailIfFree(c: Constraint): Outcome = this
def entailIfFree(c: Constraint, doms: Array[Domain]): Outcome = this
def entailIf(c: Constraint, f: ProblemState => Boolean): Outcome = this
def removeTo(v: Variable, ub: Int): Outcome = this
def removeFrom(v: Variable, lb: Int): Outcome = this
def removeUntil(v: Variable, ub: Int): Outcome = this
def removeAfter(v: Variable, lb: Int): Outcome = this
def updateDom(v: Variable, d: Domain): Outcome = this
def remove(v: Variable, value: Int): Outcome = this
def dom(v: Variable): Domain = throw new UNSATException("Tried to get a domain from a Contradiction")
def toState = throw new UNSATException("Tried to get state from a Contradiction")
def apply[S <: AnyRef](c: StatefulConstraint[S]): S = throw new UNSATException("Tried to get state from a Contradiction")
def assign(v: Variable, value: Int): concrete.Outcome = this
def entail(c: Constraint): concrete.Outcome = this
def entail(c: Constraint, i: Int): concrete.Outcome = this
def activeConstraints(v: Variable): BitVector = throw new UNSATException("Tried to get state from a Contradiction")
def updateState[S <: AnyRef](c: StatefulConstraint[S], newState: S): Outcome = this
def isState = false
def dueTo(cause: => (Constraint, Iterable[Variable])) = Contradiction(Some(cause._1), this.from ++ cause._2, to)
override def tryAssign(variable: Variable, i: Int): Outcome = this
}
object ProblemState {
def apply(problem: Problem, decisionVariables: Set[Variable]): Outcome = {
val doms = problem.variables.zipWithIndex.map { case (x, i) => i -> x.initDomain }
.to(IntMap)
new ProblemState(
domains = doms,
entailed = EntailmentManager(problem.variables.toSeq)
)
.padConstraints(problem.constraints)
}
def isFree(doms: Array[Domain]): Boolean = {
var one = false
var i = doms.length - 1
while (i >= 0) {
if (!doms(i).isAssigned) {
if (one) {
return false
}
one = true
}
i -= 1
}
true
}
def singleFree(doms: Array[Domain]): Option[Int] = {
var f = -1
var i = doms.length - 1
while (i >= 0) {
if (!doms(i).isAssigned) {
if (f >= 0) return None
f = i
}
i -= 1
}
if (f < 0) None else Some(f)
}
}
class ProblemState(
private val domains: IntMap[Domain],
private val constraintStates: Map[Int, AnyRef] = Map(),
private val initializedConstraints: Int = 0,
val entailed: EntailmentManager,
val recentUpdates: IntMap[Domain] = IntMap(),
// private val pending: IntMap[Domain] = IntMap(),
data: IdentityMap[AnyRef, Any] = new IdentityMap[AnyRef, Any]()) extends Outcome
with LazyLogging {
// def this(domains: Vector[Domain], constraintStates: Vector[AnyRef], entailed: EntailmentManager)
// = this(domains, constraintStates, entailed, new IdentityMap[AnyRef, Any]())
def clearRecent: ProblemState = {
new ProblemState(domains, constraintStates, initializedConstraints, entailed, IntMap(), data)
}
def wDeg(v: Variable): Int = entailed.wDeg(v)
def isState = true
def andThen(f: ProblemState => Outcome): Outcome = f(this)
def orElse[A >: ProblemState](f: => A): A = this
def map[A](f: ProblemState => A) = Some(f(this))
def apply[S <: AnyRef](c: StatefulConstraint[S]): S =
constraintStates(c.id).asInstanceOf[S]
def updateState[S <: AnyRef](c: StatefulConstraint[S], newState: S): ProblemState = {
val id = c.id
if (constraintStates.get(id).exists(_ eq newState)) {
this
} else {
new ProblemState(domains, constraintStates.updated(id, newState), initializedConstraints, entailed, recentUpdates, data)
}
}
def padConstraints(constraints: Array[Constraint]): Outcome = {
val newConstraints = constraints.view.drop(initializedConstraints)
new ProblemState(domains, constraintStates, constraints.length,
entailed.addConstraints(newConstraints), recentUpdates, data)
.fold(newConstraints)((p, c) => c.init(p))
}
//
//
// def padConstraints(constraints: Array[Constraint]): Outcome = {
// assert(constraints.sliding(2).forall (array => array(0).id < array(1).id ))
//
// new ProblemState(domains, constraintStates, lastId, entailed.addConstraints(newConstraints), recentUpdates, data)
//
// constraints.reverseIterator.takeWhile(_.id > initializedConstraints)
//
//
// var i = constraints.length - 1
// var ps: Outcome = this
// while (constraints(i).id > initializedConstraints) {
// new ProblemState(domains, constraintStates, lastId, entailed.addConstraints(newConstraints), recentUpdates, data)
// .fold(newConstraints)((p, c) => if (!p.hasStateFor(c)) c.init(p))
// }
// //
// // newConstraints.foldLeft(
// // new ProblemState(domains, padded, entailed.addConstraints(newConstraints), pending, data): Outcome) {
// // case (out, c) => out.andThen(c.init)
// // }
//
// }
//def isEntailed(c: Constraint): Boolean = entailed(c)
def activeConstraints(v: Variable): BitVector = entailed.active(v)
def shaveDomNonEmpty(variable: Variable, itv: Interval): ProblemState = {
updateDomNonEmpty(variable, dom(variable) & itv)
}
def assign(v: Variable, value: Int): ProblemState = {
val assigned = dom(v).assign(value)
assert(assigned.nonEmpty)
updateDomNonEmpty(v, dom(v).assign(value))
}
//def isEntailed(c: Constraint): Boolean = entailed(c.id)
def tryAssign(v: Variable, value: Int): Outcome = {
val d = dom(v)
if (d.contains(value)) {
if (d.isAssigned) {
this
} else {
updateDomNonEmptyNoCheck(v, d.assign(value))
}
} else {
Contradiction(v)
}
}
def remove(v: Variable, value: Int): Outcome = {
updateDom(v, dom(v) - value)
}
def removeIfPresent(v: Variable, value: Int): Outcome = {
val d = dom(v)
if (d.contains(value)) {
updateDom(v, d - value)
} else {
this
}
}
def updateDom(v: Variable, newDomain: Domain): Outcome = {
if (newDomain.isEmpty) {
Contradiction(v)
} else {
updateDomNonEmpty(v, newDomain)
}
}
def updateDomNonEmpty(variable: Variable, newDomain: Domain): ProblemState = {
assert(newDomain.nonEmpty)
val oldDomain = dom(variable)
assert(newDomain subsetOf oldDomain, s"replacing $oldDomain with $newDomain: not a subset!")
if (oldDomain.size == newDomain.size) {
assert(oldDomain.subsetOf(newDomain), s"replacing $oldDomain with $newDomain: same size but not equal")
this
} else {
assert(newDomain.size < oldDomain.size, s"replacing $oldDomain with $newDomain: domain size seems to have increased")
// assert(!oldDomain.isInstanceOf[BooleanDomain] || newDomain.isInstanceOf[BooleanDomain], s"replaced $oldDomain with $newDomain: type changed")
//assert(!oldDomain.isInstanceOf[IntDomain] || newDomain.isInstanceOf[IntDomain], s"replaced $oldDomain with $newDomain: type changed")
updateDomNonEmptyNoCheck(variable, newDomain)
}
}
def updateDomNonEmptyNoCheck(variable: Variable, newDomain: Domain): ProblemState = {
assert(newDomain.nonEmpty)
assert(dom(variable) ne newDomain)
val id = variable.id
assert(id >= 0 || (dom(variable) eq newDomain), s"$variable updated to $newDomain is not a problem variable")
assert(!newDomain.isInstanceOf[BitVectorDomain] || (newDomain.size <= newDomain.last - newDomain.head))
// if (pending.size >= 64) {
// var sendDomains = domains.updated(id, newDomain)
// for ((id, d) <- pending) {
// sendDomains = sendDomains.updated(id, d)
// }
// new ProblemState(sendDomains, constraintStates, initializedConstraints, entailed, IntMap(), data)
// } else {
// new ProblemState(domains, constraintStates, initializedConstraints, entailed, pending.updated(id, newDomain), data)
// }
new ProblemState(domains.updated(id, newDomain),
constraintStates, initializedConstraints, entailed,
recentUpdates.updated(id, newDomain),
data)
}
def dom(v: Variable): Domain = {
val id = v.id
if (id < 0) v.initDomain else dom(id)
}
def filterDom(v: Variable)(f: Int => Boolean): Outcome =
updateDom(v, dom(v).filter(f))
def filterBounds(v: Variable)(f: Int => Boolean): Outcome =
updateDom(v, dom(v).filterBounds(f))
def shaveDom(v: Variable, lb: Int, ub: Int): Outcome =
updateDom(v, dom(v) & (lb, ub))
def removeTo(v: Variable, ub: Int): Outcome =
updateDom(v, dom(v).removeTo(ub))
override def removeFrom(v: Variable, lb: Int): Outcome =
updateDom(v, dom(v).removeFrom(lb))
override def removeUntil(v: Variable, ub: Int): Outcome =
updateDom(v, dom(v).removeUntil(ub))
override def removeAfter(v: Variable, lb: Int): Outcome =
updateDom(v, dom(v).removeAfter(lb))
def currentDomains: Iterator[Domain] = {
Iterator.range(0, domains.size).map(i => dom(i))
}
def dom(i: Int): Domain = {
domains(i) //.getOrElse(i, domains(i))
}
def entailIfFree(c: Constraint): ProblemState = c.singleFree(this).map(entail(c, _)).getOrElse(this)
def entailIfFree(c: Constraint, doms: Array[Domain]): ProblemState = ProblemState.singleFree(doms).map(entail(c, _)).getOrElse(this)
def entail(c: Constraint, i: Int): ProblemState = {
new ProblemState(domains, constraintStates, initializedConstraints, entailed.entail(c, i), recentUpdates, data)
}
def entailIf(c: Constraint, f: ProblemState => Boolean): ProblemState = {
if (f(this)) entail(c) else this
}
def entail(c: Constraint): ProblemState = {
new ProblemState(domains, constraintStates, initializedConstraints, entailed.entail(c, this), recentUpdates, data)
//else this
}
def toState: ProblemState = this
def dueTo(cause: => (Constraint, Iterable[Variable])): ProblemState = this
def updateData(key: AnyRef, value: Any) =
new ProblemState(domains, constraintStates, initializedConstraints, entailed, recentUpdates, data.updated(key, value))
def getData[A](key: AnyRef): A = data(key).asInstanceOf[A]
def sameDomains(ps: ProblemState): Boolean = domains eq ps.domains
override def toString = s"ProblemState($domains, $constraintStates, $entailed)"
}
|
concrete-cp/concrete
|
src/main/scala/concrete/ProblemState.scala
|
Scala
|
lgpl-2.1
| 13,956
|
package sylvestris.core
import scalaz.Equal
object Tag {
implicit val eqInstance = Equal.equalA[Tag]
}
case class Tag(v: String)
|
drostron/sylvestris
|
core/src/main/scala/sylvestris/core/Tag.scala
|
Scala
|
mit
| 134
|
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.actorregistry
import akka.actor._
import org.squbs.actorregistry.ActorRegistryBean._
import org.squbs.unicomplex.Initialized
import org.squbs.unicomplex.JMX._
import scala.jdk.CollectionConverters._
import scala.language.existentials
import scala.util.Success
private[actorregistry] case class StartActorRegister(cubeNameList: Seq[CubeActorInfo], timeout: Int)
private[actorregistry] case class CubeActorInfo(actorPath: String ,messageTypeList: Seq[CubeActorMessageType])
private[actorregistry] case class CubeActorMessageType(requestClassName: Option[String] = None,
responseClassName: Option[String] = None)
private[actorregistry] case class ActorLookupMessage(actorLookup: ActorLookup[_], msg: Any)
private[actorregistry] case object ObtainRegistry
private[actorregistry] object ActorRegistry {
val path = "/user/ActorRegistryCube/ActorRegistry"
val configBean = "org.squbs.unicomplex:type=ActorRegistry"
}
private[actorregistry] class ActorRegistry extends Actor with Stash {
var registry = Map.empty[ActorRef, Seq[CubeActorMessageType]]
var cubeCount =0
private class ActorRegistryBean(actor: ActorRef) extends ActorRegistryMXBean {
def getPath = actor.path.toString
def getActorMessageTypeList = registry.getOrElse(actor, List.empty[CubeActorMessageType]).map(_.toString).asJava
}
override def postStop(): Unit = {
unregister(prefix + ActorRegistry.configBean)
totalBeans.asScala.foreach(unregister)
}
import ActorRegistry._
import ActorRegistryBean._
def startupReceive: Receive = {
case ActorIdentity(cubeActorInfo : CubeActorInfo, Some(actor))=>
registry += actor -> cubeActorInfo.messageTypeList
register(new ActorRegistryBean(actor) , objName(actor))
context.watch(actor)
cubeCount -= 1
if (cubeCount <= 0) {
context.parent ! Initialized(Success(None))
context.unbecome()
}
case _ => stash()
}
def receive = {
case StartActorRegister(cubeActorInfoList, timeout) =>
register(new ActorRegistryConfigBean(timeout, context), prefix + configBean )
cubeCount = cubeActorInfoList.size
if(cubeCount == 0) // No well-known actors to register
context.parent ! Initialized(Success(None))
else {
cubeActorInfoList.foreach { cubeActorInfo =>
context.actorSelection(cubeActorInfo.actorPath) ! Identify(cubeActorInfo)
}
context.become(startupReceive)
}
case ActorLookupMessage(lookupObj, Identify("ActorLookup")) =>
val result = processActorLookup(lookupObj).keys.headOption
sender() ! ActorIdentity("ActorLookup", result)
case ActorLookupMessage(lookupObj, msg) =>
processActorLookup(lookupObj) match {
case result if result.isEmpty =>
sender() ! org.squbs.actorregistry.ActorNotFound(lookupObj)
case result =>
result.keys foreach { _ forward msg }
}
case Terminated(actor) =>
registry -= actor
unregister(objName(actor))
}
def processActorLookup(lookupObj: ActorLookup[_]) : Map[ActorRef, Seq[CubeActorMessageType]]= {
val requestClass = lookupObj.requestClass map (_.getCanonicalName)
val responseClass = if (lookupObj.explicitType) Option(lookupObj.responseClass.getCanonicalName) else None
(requestClass, responseClass, lookupObj.actorName) match {
case (requestClassName, Some("scala.runtime.Nothing$") | None, None) =>
registry filter { case (_, messageTypes) => messageTypes.exists(_.requestClassName == requestClassName) }
case (_, Some("scala.runtime.Nothing$")| None , Some(actorName)) =>
registry filter { case (actorRef, _) => actorRef.path.name == actorName }
case (_, responseClassName, actorName) =>
registry.filter { case (actorRef, messageTypes) =>
actorRef.path.name == actorName.getOrElse(actorRef.path.name) &&
messageTypes.exists(_.responseClassName == responseClassName)
}
case _ => Map.empty
}
}
}
|
paypal/squbs
|
squbs-actorregistry/src/main/scala/org/squbs/actorregistry/ActorRegistry.scala
|
Scala
|
apache-2.0
| 4,687
|
/*
* Copyright 2001-2012 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import scala.reflect.NameTransformer.decode
private[scalatest] object EncodedOrdering extends Ordering[String] {
def compare(x: String, y: String): Int = {
decode(x) compareTo decode(y)
}
}
|
hubertp/scalatest
|
src/main/scala/org/scalatest/EncodedOrdering.scala
|
Scala
|
apache-2.0
| 823
|
//
// author: Cosmin Basca
//
// Copyright 2010 University of Zurich
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package com.rdftools
import de.l3s.sesame2.tools.RDF2RDF
/**
* Created by basca on 17/09/14.
*/
object Rdf2RdfConverter extends App {
override def main(args: Array[String]) = {
RDF2RDF.main(args)
}
}
|
cosminbasca/jvmrdftools
|
src/main/scala/com/rdftools/Rdf2RdfConverter.scala
|
Scala
|
apache-2.0
| 846
|
package generator
import com.bryzek.apidoc.generator.v0.models.File
import org.scalatest.{FunSpec, ShouldMatchers}
class ServiceFileNamesSpec extends FunSpec with ShouldMatchers {
describe("ServiceFileNames.toFile") {
def toFile(
languages: String,
version: String = "0.0.1"
): File = {
ServiceFileNames.toFile(
namespace = "com.bryzek.apidoc",
organizationKey = "bryzek",
applicationKey = "apidoc",
version = version,
suffix = "Client",
contents = "test",
languages = Some(languages)
)
}
it("ruby is underscored") {
val file = toFile("ruby")
file.name should be("bryzek_apidoc_v0_client.rb")
file.dir should be(Some("com/bryzek/apidoc"))
}
it("scala is camelcased") {
val file = toFile("scala")
file.name should be("BryzekApidocV0Client.scala")
file.dir should be(Some("com/bryzek/apidoc"))
}
}
it("getSuffix for known languages") {
ServiceFileNames.toLanguages("ruby").map(_.extension) should be(Seq("rb"))
ServiceFileNames.toLanguages("ruby,scala").map(_.extension) should be(Seq("rb", "scala"))
ServiceFileNames.toLanguages(" RUBY , SCALA ").map(_.extension) should be(Seq("rb", "scala"))
ServiceFileNames.toLanguages("java").map(_.extension) should be(Seq("java"))
ServiceFileNames.toLanguages("javascript").map(_.extension) should be(Seq("js"))
ServiceFileNames.toLanguages("go").map(_.extension) should be(Seq("go"))
}
it("getSuffix for all known languages") {
ServiceFileNames.Language.All.foreach { l =>
ServiceFileNames.toLanguages(l.name).map(_.extension) should be(Seq(l.extension))
}
}
it("getSuffix for unknown languages") {
ServiceFileNames.toLanguages("") should be(Nil)
ServiceFileNames.toLanguages("foo") should be(Nil)
ServiceFileNames.toLanguages("foo, bar") should be(Nil)
}
}
|
krschultz/apidoc-generator
|
lib/src/test/scala/generator/ServiceFileNamesSpec.scala
|
Scala
|
mit
| 1,921
|
package cz.kamenitxan.jakon.utils
import cz.kamenitxan.jakon.core.database.DBHelper
import cz.kamenitxan.jakon.core.model.JakonObject
import cz.kamenitxan.jakon.core.service.EmailTemplateService
import cz.kamenitxan.jakon.logging.Logger
import cz.kamenitxan.jakon.utils.mail.EmailTemplateEntity
import java.io.{BufferedReader, InputStream, InputStreamReader}
import java.lang.reflect.{Field, ParameterizedType, Type}
import java.net.URLEncoder
import java.util.Locale
import java.util.stream.Collectors
import scala.annotation.tailrec
import scala.collection.immutable.ArraySeq
import scala.io.Source
import scala.language.postfixOps
import scala.util.Try
/**
* Created by TPa on 08.09.16.
*/
object Utils {
implicit class StringImprovements(s: String) {
def toOptInt: Option[Int] = Try(Integer.parseInt(s)).toOption
def toBoolOrFalse: Boolean = {
try {
s.toBoolean
} catch {
case _: IllegalArgumentException => false
}
}
def getOrElse(`else`: String): String = {
if (isEmpty(s)) {
`else`
} else {
s
}
}
def isNullOrEmpty: Boolean = {
if (s == null) {
true
} else {
s.isEmpty
}
}
def urlEncode: String = {
if (s == null) {
null
} else {
URLEncoder.encode(s, "UTF-8")
}
}
}
implicit class FieldImprovements(f: Field) {
def getCollectionGenericType: Type = {
f.getGenericType.asInstanceOf[ParameterizedType].getActualTypeArguments.head
}
def getCollectionGenericTypeClass: Class[_] = {
Class.forName(getCollectionGenericType.getTypeName)
}
}
def getFieldsUpTo(startClass: Class[_], exclusiveParent: Class[_]): Seq[Field] = {
var currentClassFields = getFields(startClass)
val parentClass = startClass.getSuperclass
if (parentClass != null && (exclusiveParent == null || (parentClass != exclusiveParent))) {
val parentClassFields: Seq[Field] = getFieldsUpTo(parentClass, exclusiveParent)
currentClassFields = parentClassFields ++ currentClassFields
}
currentClassFields
}
def getFields(cls: Class[_]): Seq[Field] = {
ArraySeq.unsafeWrapArray(cls.getDeclaredFields)
}
def isJakonObject(cls: Class[_]): Boolean = {
isClassOrChild(cls, classOf[JakonObject])
}
@tailrec
def isClassOrChild(cls: Class[_], parrent: Class[_]): Boolean = {
if (cls == parrent) {
true
} else if (cls == classOf[Object] || cls.getSuperclass == null) {
false
} else {
isClassOrChild(cls.getSuperclass, parrent)
}
}
@tailrec
def getClassByFieldName(startClass: Class[_], fieldName: String): (Class[_], Field) = {
var field: Option[Field] = null
try {
field = Option.apply(startClass.getDeclaredField(fieldName))
} catch {
case _: NoSuchFieldException => field = Option.empty
}
if (field.isEmpty) {
getClassByFieldName(startClass.getSuperclass, fieldName)
} else {
startClass -> field.get
}
}
def stringToLocale(s: String): Locale = {
if (s == null) return null
val split = s.split("_")
new Locale(split(0), split(1))
}
def isEmpty(s: String): Boolean = {
s == null || s.isEmpty
}
def nonEmpty(s: String): Boolean = {
!isEmpty(s)
}
def measured[B](logFun: Long => String)(measuredFun: => B): B = {
// TODO: https://stackoverflow.com/questions/33909930/what-is-the-best-way-to-get-the-name-of-the-caller-class-in-an-object/
val startTime = System.currentTimeMillis()
val result = measuredFun
val stopTime = System.currentTimeMillis()
val elapsedTime = stopTime - startTime
Logger.info(logFun.apply(elapsedTime))
result
}
def getInputStreamFromJar(name: String): Option[InputStream] = {
Option.apply(this.getClass.getResourceAsStream(name))
}
def getResourceFromJar(name: String): Option[String] = {
getInputStreamFromJar(name).map(resource => new BufferedReader(new InputStreamReader(resource)).lines().collect(Collectors.joining("\\n")))
}
def loadEmailTemplate(name: String, from: String, subject: String, path: String): EmailTemplateEntity = {
DBHelper.withDbConnection(implicit conn => {
val currentTmpl = EmailTemplateService.getByName(name)
val bufferedSource = Source.fromFile(path)
val template = bufferedSource.getLines().mkString("\\n")
bufferedSource.close
val tmpl = if (currentTmpl == null) new EmailTemplateEntity else currentTmpl
tmpl.name = name
tmpl.template = template
tmpl.from = from
tmpl.subject = subject
if (currentTmpl == null) {
tmpl.create()
} else {
tmpl.update()
}
tmpl
})
}
}
|
kamenitxan/Jakon
|
modules/backend/src/main/scala/cz/kamenitxan/jakon/utils/Utils.scala
|
Scala
|
bsd-3-clause
| 4,472
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.nodes.physical.stream
import org.apache.flink.api.dag.Transformation
import org.apache.flink.streaming.api.transformations.OneInputTransformation
import org.apache.flink.table.dataformat.BaseRow
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.codegen.{CodeGeneratorContext, ExpandCodeGenerator}
import org.apache.flink.table.planner.delegation.StreamPlanner
import org.apache.flink.table.planner.plan.nodes.calcite.Expand
import org.apache.flink.table.planner.plan.nodes.exec.{ExecNode, StreamExecNode}
import org.apache.flink.table.runtime.typeutils.BaseRowTypeInfo
import org.apache.calcite.plan.{RelOptCluster, RelTraitSet}
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rex.RexNode
import java.util
import scala.collection.JavaConversions._
/**
* Stream physical RelNode for [[Expand]].
*/
class StreamExecExpand(
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputRel: RelNode,
outputRowType: RelDataType,
projects: util.List[util.List[RexNode]],
expandIdIndex: Int)
extends Expand(cluster, traitSet, inputRel, outputRowType, projects, expandIdIndex)
with StreamPhysicalRel
with StreamExecNode[BaseRow] {
override def producesUpdates: Boolean = false
override def needsUpdatesAsRetraction(input: RelNode): Boolean = false
override def consumesRetractions: Boolean = false
override def producesRetractions: Boolean = false
override def requireWatermark: Boolean = false
override def copy(traitSet: RelTraitSet, inputs: util.List[RelNode]): RelNode = {
new StreamExecExpand(cluster, traitSet, inputs.get(0), outputRowType, projects, expandIdIndex)
}
//~ ExecNode methods -----------------------------------------------------------
override def getInputNodes: util.List[ExecNode[StreamPlanner, _]] = {
getInputs.map(_.asInstanceOf[ExecNode[StreamPlanner, _]])
}
override def replaceInputNode(
ordinalInParent: Int,
newInputNode: ExecNode[StreamPlanner, _]): Unit = {
replaceInput(ordinalInParent, newInputNode.asInstanceOf[RelNode])
}
override protected def translateToPlanInternal(
planner: StreamPlanner): Transformation[BaseRow] = {
val config = planner.getTableConfig
val inputTransform = getInputNodes.get(0).translateToPlan(planner)
.asInstanceOf[Transformation[BaseRow]]
val inputType = inputTransform.getOutputType.asInstanceOf[BaseRowTypeInfo].toRowType
val outputType = FlinkTypeFactory.toLogicalRowType(getRowType)
val ctx = CodeGeneratorContext(config)
val operator = ExpandCodeGenerator.generateExpandOperator(
ctx,
inputType,
outputType,
config,
projects,
opName = "StreamExpand",
retainHeader = true)
val transform = new OneInputTransformation(
inputTransform,
getRelDetailedDescription,
operator,
BaseRowTypeInfo.of(outputType),
inputTransform.getParallelism)
if (inputsContainSingleton()) {
transform.setParallelism(1)
transform.setMaxParallelism(1)
}
transform
}
}
|
fhueske/flink
|
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/stream/StreamExecExpand.scala
|
Scala
|
apache-2.0
| 3,995
|
package streaming
import java.io.File
import org.apache.spark.streaming._
import org.apache.spark.{SparkConf, SparkContext}
import streaming.util.CSVFileStreamGenerator
import scala.util.Random
//
// File based streaming requires files to be atomically created in
// the source directory -- in practice this entails creating them somewhere
// else and renaming them in place. Every batch emitted by the StreamingContext
// produces all the data from all files that have appeared since the last
// batch -- potentially many files are combined into a single RDD each time.
//
object FileBasedStreaming {
def main (args: Array[String]) {
val conf = new SparkConf().setAppName("FileBasedStreaming").setMaster("local[4]")
val sc = new SparkContext(conf)
// streams will produce data every second
val ssc = new StreamingContext(sc, Seconds(1))
val fm = new CSVFileStreamGenerator(10, 100, 500)
// create the stream
val stream = ssc.textFileStream(fm.dest.getAbsolutePath)
// register for data
stream.foreachRDD(r => {
// println(r.count())
r.foreachPartition(it=>{
while (it.hasNext){
println(it.next())
}
})
})
// start streaming
ssc.start()
new Thread("Streaming Termination Monitor") {
override def run() {
try {
ssc.awaitTermination()
} catch {
case e: Exception => {
println("*** streaming exception caught in monitor thread")
e.printStackTrace()
}
}
println("*** streaming terminated")
}
}.start()
println("*** started termination monitor")
// A curious fact about files based streaming is that any files written
// before the first RDD is produced are ignored. So wait longer than
// that before producing files.
Thread.sleep(2000)
println("*** producing data")
// start producing files
fm.makeFiles()
Thread.sleep(10000)
println("*** stopping streaming")
ssc.stop()
// wait a bit longer for the call to awaitTermination() to return
Thread.sleep(5000)
println("*** done")
}
}
|
chocolateBlack/LearningSpark
|
src/main/scala/streaming/FileBased.scala
|
Scala
|
mit
| 2,152
|
package spatutorial.client.modules
import japgolly.scalajs.react._
import japgolly.scalajs.react.extra.OnUnmount
import japgolly.scalajs.react.vdom.prefix_<^._
import rx._
import rx.ops._
import spatutorial.client.components.Icon._
import spatutorial.client.components._
import spatutorial.client.services._
import spatutorial.shared.TodoItem
object MainMenu {
case class Props(activeLocation: MainRouter.Loc, todos: Rx[Seq[TodoItem]])
case class MenuItem(label: (Props) => ReactNode, icon: Icon, location: MainRouter.Loc)
class Backend(t: BackendScope[Props, _]) extends OnUnmount {
def mounted(): Unit = {
// hook up to Todo changes
val obsItems = t.props.todos.foreach { _ => t.forceUpdate()}
onUnmount {
// stop observing when unmounted (= never in this SPA)
obsItems.kill()
}
MainDispatcher.dispatch(RefreshTodos)
}
}
// build the Todo menu item, showing the number of open todos
private def buildTodoMenu(props: Props): ReactNode = {
val todoCount = props.todos().count(!_.completed)
Seq(
<.span("Todo "),
if (todoCount > 0) <.span(^.className := "label label-danger label-as-badge", todoCount) else <.span()
)
}
private val menuItems = Seq(
MenuItem(_ => "Dashboard", Icon.dashboard, MainRouter.dashboardLoc),
MenuItem(buildTodoMenu, Icon.check, MainRouter.todoLoc)
)
private val MainMenu = ReactComponentB[Props]("MainMenu")
.stateless
.backend(new Backend(_))
.render((P, _, B) => {
<.ul(^.className := "nav navbar-nav")(
// build a list of menu items
for (item <- menuItems) yield {
<.li((P.activeLocation == item.location) ?= (^.className := "active"),
MainRouter.routerLink(item.location)(item.icon, " ", item.label(P))
)
}
)
})
.componentDidMount(_.backend.mounted())
.build
def apply(props: Props) = MainMenu(props)
}
|
zoosky/eispoc
|
js/src/main/scala/spatutorial/client/modules/MainMenu.scala
|
Scala
|
agpl-3.0
| 1,923
|
/*
* Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH
* under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. Camunda licenses this file to you under the Apache License,
* Version 2.0; you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.feel.context
import org.camunda.feel.syntaxtree.Val
import scala.collection.mutable
trait VariableProvider {
def getVariable(name: String): Option[Any]
def keys: Iterable[String]
def getVariables: Map[String, Any] =
keys
.map(key => key -> getVariable(key).getOrElse(None))
.toMap
}
object VariableProvider {
object EmptyVariableProvider extends VariableProvider {
override def getVariable(name: String): Option[Val] = None
override def keys: Iterable[String] = List.empty
}
case class StaticVariableProvider(variables: Map[String, Any])
extends VariableProvider {
override def getVariable(name: String): Option[Any] = variables.get(name)
override def keys: Iterable[String] = variables.keys
}
case class CacheVariableProvider(provider: VariableProvider)
extends VariableProvider {
private val cache: mutable.Map[String, Any] = mutable.Map.empty
override def getVariable(name: String): Option[Any] =
cache.get(name) match {
case Some(value) => Some(value)
case None =>
provider.getVariable(name) match {
case Some(value) => cache.put(name, value); Some(value)
case None => None
}
}
override def keys: Iterable[String] = cache.keys ++ provider.keys
}
case class CompositeVariableProvider(providers: List[VariableProvider])
extends VariableProvider {
override def getVariable(name: String): Option[Any] = {
for (provider <- providers) {
provider.getVariable(name) match {
case Some(v) => return Some(v)
case _ =>
}
}
None
}
override def keys: Iterable[String] = providers.flatMap(_.keys)
}
}
|
camunda/feel-scala
|
src/main/scala/org/camunda/feel/context/VariableProvider.scala
|
Scala
|
apache-2.0
| 2,577
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.