code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package io.buoyant.linkerd.protocol.h2.grpc
import com.twitter.finagle.buoyant.h2.service.H2ReqRepFrame
import com.twitter.finagle.buoyant.h2.{Headers, Request, Response, Stream => FStream}
import com.twitter.finagle.service.ResponseClass
import com.twitter.finagle.util.LoadService
import com.twitter.util.Return
import io.buoyant.config.Parser
import io.buoyant.linkerd.protocol.h2.grpc.GrpcClassifiers.{AlwaysRetryable, NeverRetryable, RetryableStatusCodes}
import io.buoyant.grpc.runtime.GrpcStatus
import io.buoyant.linkerd.RouterConfig
import io.buoyant.linkerd.protocol.h2.H2ClassifierInitializer
import io.buoyant.linkerd.protocol.{H2DefaultSvc, H2Initializer}
import org.scalacheck.Arbitrary
import org.scalatest.FunSuite
import org.scalatest.prop.GeneratorDrivenPropertyChecks
class GrpcClassifierTest extends FunSuite with GeneratorDrivenPropertyChecks {
implicit val arbitraryStatus: Arbitrary[GrpcStatus] = Arbitrary(for {
code <- Arbitrary.arbitrary[Int]
msg <- Arbitrary.arbitrary[String]
} yield { GrpcStatus(code, msg) })
test("AlwaysRetryable classifies all errors as retryable") {
forAll("status") { status: GrpcStatus =>
val trailers = status.toTrailers
val reqrep = H2ReqRepFrame(
Request(Headers.empty, FStream.empty()),
Return((
Response(Headers.empty, FStream.empty()),
Some(Return(trailers))
))
)
assert(AlwaysRetryable.streamClassifier.isDefinedAt(reqrep))
if (status.code != 0) {
assert(AlwaysRetryable.streamClassifier(reqrep) == ResponseClass.RetryableFailure)
} else {
assert(AlwaysRetryable.streamClassifier(reqrep) == ResponseClass.Success)
}
}
}
test("NeverRetryable classifies no errors as retryable") {
forAll("status") { status: GrpcStatus =>
val trailers = status.toTrailers
val reqrep = H2ReqRepFrame(
Request(Headers.empty, FStream.empty()),
Return((
Response(Headers.empty, FStream.empty()),
Some(Return(trailers))
))
)
assert(NeverRetryable.streamClassifier.isDefinedAt(reqrep))
if (status.code != 0) {
assert(NeverRetryable.streamClassifier(reqrep) == ResponseClass.NonRetryableFailure)
} else {
assert(NeverRetryable.streamClassifier(reqrep) == ResponseClass.Success)
}
}
}
test("RetryableStatusCodes classifies specific codes as retryable") {
forAll("status", "retryable statuses") { (status: GrpcStatus, codes: Set[Int]) =>
val trailers = status.toTrailers
val reqrep = H2ReqRepFrame(
Request(Headers.empty, FStream.empty()),
Return((
Response(Headers.empty, FStream.empty()),
Some(Return(trailers))
))
)
val classifier = new RetryableStatusCodes(codes)
assert(classifier.streamClassifier.isDefinedAt(reqrep))
if (status.code == 0) {
assert(classifier.streamClassifier(reqrep) == ResponseClass.Success)
} else if (codes.contains(status.code)) {
assert(classifier.streamClassifier(reqrep) == ResponseClass.RetryableFailure)
} else {
assert(classifier.streamClassifier(reqrep) == ResponseClass.NonRetryableFailure)
}
}
}
for {
init <- Seq(
AlwaysRetryableInitializer,
NeverRetryableInitializer,
DefaultInitializer
)
kind = init.configId
} {
test(s"loads $kind") {
assert(LoadService[H2ClassifierInitializer]().exists(_.configId == kind))
}
test(s"parse router with $kind") {
val yaml =
s"""|protocol: h2
|service:
| responseClassifier:
| kind: $kind
|servers:
|- port: 0
|""".stripMargin
val mapper = Parser.objectMapper(yaml, Iterable(Seq(H2Initializer), Seq(init)))
val router = mapper.readValue[RouterConfig](yaml)
assert(router.service.get.asInstanceOf[H2DefaultSvc]._h2Classifier.isDefined)
assertThrows[UnsupportedOperationException] {
router.service.get.asInstanceOf[H2DefaultSvc].responseClassifierConfig
}
}
}
test("loads io.l5d.h2.grpc.retryableStatusCodes") {
assert(LoadService[H2ClassifierInitializer]().exists(_.configId == "io.l5d.h2.grpc.retryableStatusCodes"))
}
test("parse router with io.l5d.h2.grpc.retryableStatusCodes") {
val yaml =
s"""|protocol: h2
|service:
| responseClassifier:
| kind: io.l5d.h2.grpc.retryableStatusCodes
| retryableStatusCodes:
| - 1
| - 2
| - 3
|servers:
|- port: 0
|""".stripMargin
val mapper = Parser.objectMapper(yaml, Iterable(Seq(H2Initializer), Seq(RetryableStatusCodesInitializer)))
val router = mapper.readValue[RouterConfig](yaml)
assert(router.service.get.asInstanceOf[H2DefaultSvc]._h2Classifier.isDefined)
assertThrows[UnsupportedOperationException] {
router.service.get.asInstanceOf[H2DefaultSvc].responseClassifierConfig
}
}
}
| denverwilliams/linkerd | linkerd/protocol/h2/src/test/scala/io/buoyant/linkerd/protocol/h2/GrpcClassifierTest.scala | Scala | apache-2.0 | 5,064 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.kafka010
import java.{ util => ju }
import scala.collection.mutable.ArrayBuffer
import org.apache.kafka.clients.consumer.{ ConsumerConfig, ConsumerRecord }
import org.apache.kafka.common.TopicPartition
import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.apache.spark.internal.Logging
import org.apache.spark.partial.{BoundedDouble, PartialResult}
import org.apache.spark.rdd.RDD
import org.apache.spark.scheduler.ExecutorCacheTaskLocation
import org.apache.spark.storage.StorageLevel
/**
* A batch-oriented interface for consuming from Kafka.
* Starting and ending offsets are specified in advance,
* so that you can control exactly-once semantics.
* @param kafkaParams Kafka
* <a href="http://kafka.apache.org/documentation.html#newconsumerconfigs">
* configuration parameters</a>. Requires "bootstrap.servers" to be set
* with Kafka broker(s) specified in host1:port1,host2:port2 form.
* @param offsetRanges offset ranges that define the Kafka data belonging to this RDD
* @param preferredHosts map from TopicPartition to preferred host for processing that partition.
* In most cases, use [[LocationStrategies.PreferConsistent]]
* Use [[LocationStrategies.PreferBrokers]] if your executors are on same nodes as brokers.
* @param useConsumerCache whether to use a consumer from a per-jvm cache
* @tparam K type of Kafka message key
* @tparam V type of Kafka message value
*/
private[spark] class KafkaRDD[K, V](
sc: SparkContext,
val kafkaParams: ju.Map[String, Object],
val offsetRanges: Array[OffsetRange],
val preferredHosts: ju.Map[TopicPartition, String],
useConsumerCache: Boolean
) extends RDD[ConsumerRecord[K, V]](sc, Nil) with Logging with HasOffsetRanges {
require("none" ==
kafkaParams.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG).asInstanceOf[String],
ConsumerConfig.AUTO_OFFSET_RESET_CONFIG +
" must be set to none for executor kafka params, else messages may not match offsetRange")
require(false ==
kafkaParams.get(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG).asInstanceOf[Boolean],
ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG +
" must be set to false for executor kafka params, else offsets may commit before processing")
// TODO is it necessary to have separate configs for initial poll time vs ongoing poll time?
private val pollTimeout = conf.getLong("spark.streaming.kafka.consumer.poll.ms",
conf.getTimeAsMs("spark.network.timeout", "120s"))
private val cacheInitialCapacity =
conf.getInt("spark.streaming.kafka.consumer.cache.initialCapacity", 16)
private val cacheMaxCapacity =
conf.getInt("spark.streaming.kafka.consumer.cache.maxCapacity", 64)
private val cacheLoadFactor =
conf.getDouble("spark.streaming.kafka.consumer.cache.loadFactor", 0.75).toFloat
private val compacted =
conf.getBoolean("spark.streaming.kafka.allowNonConsecutiveOffsets", false)
override def persist(newLevel: StorageLevel): this.type = {
logError("Kafka ConsumerRecord is not serializable. " +
"Use .map to extract fields before calling .persist or .window")
super.persist(newLevel)
}
override def getPartitions: Array[Partition] = {
offsetRanges.zipWithIndex.map { case (o, i) =>
new KafkaRDDPartition(i, o.topic, o.partition, o.fromOffset, o.untilOffset)
}.toArray
}
override def count(): Long =
if (compacted) {
super.count()
} else {
offsetRanges.map(_.count).sum
}
override def countApprox(
timeout: Long,
confidence: Double = 0.95
): PartialResult[BoundedDouble] =
if (compacted) {
super.countApprox(timeout, confidence)
} else {
val c = count
new PartialResult(new BoundedDouble(c, 1.0, c, c), true)
}
override def isEmpty(): Boolean =
if (compacted) {
super.isEmpty()
} else {
count == 0L
}
override def take(num: Int): Array[ConsumerRecord[K, V]] =
if (compacted) {
super.take(num)
} else if (num < 1) {
Array.empty[ConsumerRecord[K, V]]
} else {
val nonEmptyPartitions = this.partitions
.map(_.asInstanceOf[KafkaRDDPartition])
.filter(_.count > 0)
if (nonEmptyPartitions.isEmpty) {
Array.empty[ConsumerRecord[K, V]]
} else {
// Determine in advance how many messages need to be taken from each partition
val parts = nonEmptyPartitions.foldLeft(Map[Int, Int]()) { (result, part) =>
val remain = num - result.values.sum
if (remain > 0) {
val taken = Math.min(remain, part.count)
result + (part.index -> taken.toInt)
} else {
result
}
}
context.runJob(
this,
(tc: TaskContext, it: Iterator[ConsumerRecord[K, V]]) =>
it.take(parts(tc.partitionId)).toArray, parts.keys.toArray
).flatten
}
}
private def executors(): Array[ExecutorCacheTaskLocation] = {
val bm = sparkContext.env.blockManager
bm.master.getPeers(bm.blockManagerId).toArray
.map(x => ExecutorCacheTaskLocation(x.host, x.executorId))
.sortWith(compareExecutors)
}
protected[kafka010] def compareExecutors(
a: ExecutorCacheTaskLocation,
b: ExecutorCacheTaskLocation): Boolean =
if (a.host == b.host) {
a.executorId > b.executorId
} else {
a.host > b.host
}
override def getPreferredLocations(thePart: Partition): Seq[String] = {
// The intention is best-effort consistent executor for a given topicpartition,
// so that caching consumers can be effective.
// TODO what about hosts specified by ip vs name
val part = thePart.asInstanceOf[KafkaRDDPartition]
val allExecs = executors()
val tp = part.topicPartition
val prefHost = preferredHosts.get(tp)
val prefExecs = if (null == prefHost) allExecs else allExecs.filter(_.host == prefHost)
val execs = if (prefExecs.isEmpty) allExecs else prefExecs
if (execs.isEmpty) {
Seq.empty
} else {
// execs is sorted, tp.hashCode depends only on topic and partition, so consistent index
val index = Math.floorMod(tp.hashCode, execs.length)
val chosen = execs(index)
Seq(chosen.toString)
}
}
private def errBeginAfterEnd(part: KafkaRDDPartition): String =
s"Beginning offset ${part.fromOffset} is after the ending offset ${part.untilOffset} " +
s"for topic ${part.topic} partition ${part.partition}. " +
"You either provided an invalid fromOffset, or the Kafka topic has been damaged"
override def compute(thePart: Partition, context: TaskContext): Iterator[ConsumerRecord[K, V]] = {
val part = thePart.asInstanceOf[KafkaRDDPartition]
require(part.fromOffset <= part.untilOffset, errBeginAfterEnd(part))
if (part.fromOffset == part.untilOffset) {
logInfo(s"Beginning offset ${part.fromOffset} is the same as ending offset " +
s"skipping ${part.topic} ${part.partition}")
Iterator.empty
} else {
logInfo(s"Computing topic ${part.topic}, partition ${part.partition} " +
s"offsets ${part.fromOffset} -> ${part.untilOffset}")
if (compacted) {
new CompactedKafkaRDDIterator[K, V](
part,
context,
kafkaParams,
useConsumerCache,
pollTimeout,
cacheInitialCapacity,
cacheMaxCapacity,
cacheLoadFactor
)
} else {
new KafkaRDDIterator[K, V](
part,
context,
kafkaParams,
useConsumerCache,
pollTimeout,
cacheInitialCapacity,
cacheMaxCapacity,
cacheLoadFactor
)
}
}
}
}
/**
* An iterator that fetches messages directly from Kafka for the offsets in partition.
* Uses a cached consumer where possible to take advantage of prefetching
*/
private class KafkaRDDIterator[K, V](
part: KafkaRDDPartition,
context: TaskContext,
kafkaParams: ju.Map[String, Object],
useConsumerCache: Boolean,
pollTimeout: Long,
cacheInitialCapacity: Int,
cacheMaxCapacity: Int,
cacheLoadFactor: Float
) extends Iterator[ConsumerRecord[K, V]] {
val groupId = kafkaParams.get(ConsumerConfig.GROUP_ID_CONFIG).asInstanceOf[String]
context.addTaskCompletionListener(_ => closeIfNeeded())
val consumer = if (useConsumerCache) {
CachedKafkaConsumer.init(cacheInitialCapacity, cacheMaxCapacity, cacheLoadFactor)
if (context.attemptNumber >= 1) {
// just in case the prior attempt failures were cache related
CachedKafkaConsumer.remove(groupId, part.topic, part.partition)
}
CachedKafkaConsumer.get[K, V](groupId, part.topic, part.partition, kafkaParams)
} else {
CachedKafkaConsumer.getUncached[K, V](groupId, part.topic, part.partition, kafkaParams)
}
var requestOffset = part.fromOffset
def closeIfNeeded(): Unit = {
if (!useConsumerCache && consumer != null) {
consumer.close()
}
}
override def hasNext(): Boolean = requestOffset < part.untilOffset
override def next(): ConsumerRecord[K, V] = {
if (!hasNext) {
throw new ju.NoSuchElementException("Can't call getNext() once untilOffset has been reached")
}
val r = consumer.get(requestOffset, pollTimeout)
requestOffset += 1
r
}
}
/**
* An iterator that fetches messages directly from Kafka for the offsets in partition.
* Uses a cached consumer where possible to take advantage of prefetching.
* Intended for compacted topics, or other cases when non-consecutive offsets are ok.
*/
private class CompactedKafkaRDDIterator[K, V](
part: KafkaRDDPartition,
context: TaskContext,
kafkaParams: ju.Map[String, Object],
useConsumerCache: Boolean,
pollTimeout: Long,
cacheInitialCapacity: Int,
cacheMaxCapacity: Int,
cacheLoadFactor: Float
) extends KafkaRDDIterator[K, V](
part,
context,
kafkaParams,
useConsumerCache,
pollTimeout,
cacheInitialCapacity,
cacheMaxCapacity,
cacheLoadFactor
) {
consumer.compactedStart(part.fromOffset, pollTimeout)
private var nextRecord = consumer.compactedNext(pollTimeout)
private var okNext: Boolean = true
override def hasNext(): Boolean = okNext
override def next(): ConsumerRecord[K, V] = {
if (!hasNext) {
throw new ju.NoSuchElementException("Can't call getNext() once untilOffset has been reached")
}
val r = nextRecord
if (r.offset + 1 >= part.untilOffset) {
okNext = false
} else {
nextRecord = consumer.compactedNext(pollTimeout)
if (nextRecord.offset >= part.untilOffset) {
okNext = false
consumer.compactedPrevious()
}
}
r
}
}
| brad-kaiser/spark | external/kafka-0-10/src/main/scala/org/apache/spark/streaming/kafka010/KafkaRDD.scala | Scala | apache-2.0 | 11,527 |
package plugins
import eu.delving.metadata.Hasher
import java.io.File
import play.api.libs.Files
import org.apache.commons.io.FileUtils
/**
* Wrap the bootstrap directories so it can be used
*
* @author Gerald de Jong <gerald@delving.eu>
*/
class BootstrapSource(dataDirectory: File) {
val org = "delving"
val spec = dataDirectory.getName
val targetRoot = new File(System.getProperty("java.io.tmpdir"), "sample")
val targetDirectory = new File(targetRoot, dataDirectory.getName)
init()
def init() {
FileUtils.deleteQuietly(targetDirectory)
Files.createDirectory(targetDirectory)
dataDirectory.listFiles().foreach(file => Files.copyFile(file, new File(targetDirectory, file.getName)))
targetDirectory.listFiles().foreach(file => Hasher.ensureFileHashed(file))
}
def fileList() = targetDirectory.listFiles()
def fileNamesString() = fileList().map(file => file.getName).reduceLeft(_ + "\\n" + _)
def file(name: String): File =
fileList().filter(file => file.getName.endsWith(name))
.headOption.getOrElse(throw new RuntimeException("Could not find " + name))
}
object BootstrapSource {
val here = new File(".")
val baseDirectory = if (here.listFiles().exists(f => f.isDirectory && f.getName == "conf")) {
here
} else {
new File(here, "culture-hub")
}
val bootstrapDirectory = new File(baseDirectory, "modules/dataset/conf/bootstrap")
val files = {
bootstrapDirectory.listFiles().filter(_.isDirectory)
}
val sources = files.map(file => new BootstrapSource(file))
} | delving/culture-hub | modules/dataset/app/plugins/BootstrapSource.scala | Scala | apache-2.0 | 1,551 |
package edu.gemini.sp.vcs.log.osgi
import org.osgi.framework.{BundleActivator, BundleContext}
import edu.gemini.sp.vcs.log.VcsLog
import edu.gemini.sp.vcs.log.impl.PersistentVcsLog
import edu.gemini.util.osgi.ExternalStorage.getExternalDataFile
import java.io.File
import java.util.logging.Logger
import edu.gemini.spModel.core.{OcsVersionUtil, Version}
object Activator {
val BUNDLE_PROP_DIR = "edu.gemini.spdb.dir" // Same location as the SPDB
val LOG = Logger.getLogger(classOf[Activator].getName)
}
class Activator extends BundleActivator {
import Activator._
def start(ctx: BundleContext) {
val root:File = Option(ctx.getProperty(BUNDLE_PROP_DIR)).fold(getExternalDataFile(ctx, "spdb"))(new File(_))
val file:File = new File(OcsVersionUtil.getVersionDir(root, Version.current), "vcs")
file.mkdirs()
LOG.info(s"VCS log storage is at ${file.getAbsolutePath}")
ctx.registerService(classOf[VcsLog], new PersistentVcsLog(file), null)
}
def stop(ctx: BundleContext) {
}
}
| arturog8m/ocs | bundle/edu.gemini.sp.vcs.log/src/main/scala/edu/gemini/sp/vcs/log/osgi/Activator.scala | Scala | bsd-3-clause | 1,013 |
package org.jetbrains.plugins.scala.failed.resolve
import org.intellij.lang.annotations.Language
import org.jetbrains.plugins.scala.PerfCycleTests
import org.jetbrains.plugins.scala.annotator.{AnnotatorHolderMock, ApplicationAnnotator}
import org.jetbrains.plugins.scala.base.SimpleTestCase
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScReferenceExpression
import org.junit.experimental.categories.Category
/**
* @author Roman.Shein
* @since 25.03.2016.
*/
@Category(Array(classOf[PerfCycleTests]))
class ArgumentTypeMismatchTest extends SimpleTestCase {
def testSCL4687() = {
val code =
"""
|object A {
| class Z[T] {
| def m(t: T): T = t
| }
|
| def foo[T]: Z[T] = null.asInstanceOf[Z[T]]
|
| def goo[G](z: Z[G]): Z[G] = z
|
| goo(foo).m(1)
|}
""".stripMargin
assert(messages(code).isEmpty)
}
def testSCL9686() = assert(
messages {
"""
|class Scl9686 {
| class A {
| def foo(a: Int = 1): Unit = {}
| }
|
| class B extends A {
| override def foo(a: Int): Unit = {}
| }
|
| class C extends B {
| override def foo(a: Int): Unit = {}
| }
|
| class D extends C {
| override def foo(a: Int): Unit = {}
| }
|
| object Some {
| def main(args: Array[String]) {
| (new B()).foo()
| (new C()).foo() // Error: Cannot resolve reference foo() with such signature
| (new D()).foo() // Error: Cannot resolve reference foo() with such signature
| }
| }
|}""".stripMargin
}.isEmpty
)
def messages(@Language(value = "Scala") code: String) = {
val annotator = new ApplicationAnnotator {}
val mock = new AnnotatorHolderMock
code.parse.depthFirst.filter(elem => elem.isInstanceOf[ScReferenceExpression]).foreach {
case ref: ScReferenceExpression => annotator.annotateReference(ref, mock)
}
mock.annotations
}
}
| whorbowicz/intellij-scala | test/org/jetbrains/plugins/scala/failed/resolve/ArgumentTypeMismatchTest.scala | Scala | apache-2.0 | 2,124 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.charts.stats.buffers
import scala.collection.mutable
import io.gatling.charts.stats.RequestRecord
import io.gatling.commons.shared.unstable.model.stats.Group
private[stats] trait ErrorsBuffers {
val errorsBuffers = mutable.Map.empty[BufferKey, mutable.Map[String, Int]]
def getErrorsBuffers(requestName: Option[String], group: Option[Group]): mutable.Map[String, Int] =
errorsBuffers.getOrElseUpdate(BufferKey(requestName, group, None), mutable.Map.empty[String, Int])
def updateGlobalError(errorMessage: String): Unit = {
val buffer = getErrorsBuffers(None, None)
buffer += errorMessage -> (buffer.getOrElseUpdate(errorMessage, 0) + 1)
}
def updateErrorBuffers(record: RequestRecord): Unit = {
def updateGroupError(errorMessage: String): Unit = {
record.group.foreach { group =>
val buffer = getErrorsBuffers(None, Some(group))
buffer += errorMessage -> (buffer.getOrElseUpdate(errorMessage, 0) + 1)
}
}
def updateRequestError(errorMessage: String): Unit = {
val buffer = getErrorsBuffers(Some(record.name), record.group)
buffer += errorMessage -> (buffer.getOrElseUpdate(errorMessage, 0) + 1)
}
record.errorMessage.foreach { errorMessage =>
updateGlobalError(errorMessage)
updateGroupError(errorMessage)
updateRequestError(errorMessage)
}
}
}
| gatling/gatling | gatling-charts/src/main/scala/io/gatling/charts/stats/buffers/ErrorsBuffers.scala | Scala | apache-2.0 | 1,999 |
// goseumdochi: experiments with incarnation
// Copyright 2016 John V. Sichi
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.goseumdochi.common
import akka.actor._
import com.typesafe.config._
import java.util.concurrent._
import scala.concurrent.duration._
class Settings(rootConf : Config)
{
private val conf = rootConf.getConfig("goseumdochi")
private def getMillis(subConf: Config, path : String) =
TimeSpan(subConf.getDuration(path, TimeUnit.MILLISECONDS), MILLISECONDS)
object Bluetooth
{
val subConf = conf.getConfig("bluetooth")
val debug = subConf.getBoolean("debug")
}
object Sphero
{
val subConf = conf.getConfig("sphero")
val bluetoothId = subConf.getString("bluetooth-id")
}
object Vision
{
val subConf = conf.getConfig("vision")
val inputClass = subConf.getString("input-class-name")
val remoteInputUrl = subConf.getString("remote-input-url")
val throttlePeriod = getMillis(subConf, "throttle-period")
val sensorDelay = getMillis(subConf, "sensor-delay")
val debugDir = subConf.getString("debug-dir")
val crosshairsCircle = subConf.getBoolean("crosshairs-circle")
val transformGuidelineExpiration =
getMillis(subConf, "transform-guideline-expiration")
}
object Control
{
val subConf = conf.getConfig("control")
val orient = subConf.getBoolean("orient")
val monitorVisibility = subConf.getBoolean("monitor-visibility")
val visibilityCheckFreq = getMillis(subConf, "visibility-check-freq")
val panicDelay = getMillis(subConf, "panic-delay")
val panicClassName = subConf.getString("panic-class-name")
val panicBeforeOrientation = subConf.getBoolean("panic-before-orientation")
val maxMoveDuration = getMillis(subConf, "max-move-duration")
}
object Behavior
{
val subConf = conf.getConfig("behavior")
val className = subConf.getString("class-name")
}
object Perception
{
val subConf = conf.getConfig("perception")
val logFile = subConf.getString("log-file")
}
object View
{
val subConf = conf.getConfig("view")
val visualizeRetinal = subConf.getBoolean("visualize-retinal")
val className = subConf.getString("class-name")
val playbackRate = subConf.getDouble("playback-rate")
}
object Motor
{
val subConf = conf.getConfig("motor")
val defaultSpeed = subConf.getDouble("default-speed")
val fullSpeed = subConf.getDouble("full-speed")
}
object Orientation
{
val subConf = conf.getConfig("orientation")
val className = subConf.getString("class-name")
val localizationClassName = subConf.getString("localization-class-name")
val quietPeriod = getMillis(subConf, "quiet-period")
val persistenceFile = subConf.getString("persistence-file")
val alignmentSmallAngle = subConf.getDouble("alignment-small-angle")
val motionThreshold = subConf.getDouble("motion-threshold")
}
object BodyRecognition
{
val subConf = conf.getConfig("body-recognition")
val className = subConf.getString("class-name")
val minRadius = subConf.getInt("min-radius")
val maxRadius = subConf.getInt("max-radius")
}
object MotionDetection
{
val subConf = conf.getConfig("motion-detection")
val fineThreshold = subConf.getInt("fine-threshold")
val coarseThreshold = subConf.getInt("coarse-threshold")
}
object IntrusionDetection
{
val subConf = conf.getConfig("intrusion-detection")
val motionClassName = subConf.getString("motion-class-name")
val pausePeriod = getMillis(subConf, "pause-period")
}
object Test
{
val subConf = conf.getConfig("test")
val active = subConf.getBoolean("active")
val visualize = subConf.getBoolean("visualize")
val quiescencePeriod = getMillis(subConf, "quiescence-period")
}
def instantiateObject(className : String, args : AnyRef*) =
Class.forName(className).getConstructors.head.
newInstance((Seq(this) ++ args) : _*)
}
object Settings
{
def apply(config : Config) = new Settings(config)
def complainMissing(path : String)
{
throw new ConfigException.Missing(path)
}
}
class ActorSettings(rootConf : Config, extendedSystem : ExtendedActorSystem)
extends Settings(rootConf)
with Extension
{
}
object ActorSettings extends ExtensionId[ActorSettings] with ExtensionIdProvider
{
override def lookup = ActorSettings
override def createExtension(system : ExtendedActorSystem) =
new ActorSettings(system.settings.config, system)
def apply(context : ActorContext) : ActorSettings = apply(context.system)
}
| lingeringsocket/goseumdochi | base/src/main/scala/org/goseumdochi/common/Settings.scala | Scala | apache-2.0 | 5,091 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.utils
import java.util.concurrent._
/**
* A class used for unit testing things which depend on the Time interface.
*
* This class never manually advances the clock, it only does so when you call
* sleep(ms)
*
* It also comes with an associated scheduler instance for managing background tasks in
* a deterministic way.
*/
class MockTime(@volatile private var currentMs: Long) extends Time {
val scheduler = new MockScheduler(this)
def this() = this(System.currentTimeMillis)
def milliseconds: Long = currentMs
def nanoseconds: Long =
TimeUnit.NANOSECONDS.convert(currentMs, TimeUnit.MILLISECONDS)
def sleep(ms: Long) {
this.currentMs += ms
scheduler.tick()
}
override def toString() = "MockTime(%d)".format(milliseconds)
}
| unix1986/universe | tool/kafka-0.8.1.1-src/core/src/test/scala/unit/kafka/utils/MockTime.scala | Scala | bsd-2-clause | 1,597 |
package uk.gov.homeoffice.rtp.proxy
case class Server(host: String, port: Int)
case class ProxiedServer(host: String, port: Int) | UKHomeOffice/rtp-proxy-lib | src/main/scala/uk/gov/homeoffice/rtp/proxy/Server.scala | Scala | mit | 130 |
package com.codegans.moodmeter.service
import akka.actor.{Actor, ActorLogging}
import akka.event.LoggingAdapter
import com.codegans.moodmeter.model.Score.Score
import com.codegans.moodmeter.model.{Key, Presentation, Score, Statistics}
import com.codegans.moodmeter.service.StatisticWebService.Message
import com.codegans.moodmeter.util.Library._
import com.codegans.moodmeter.util.LocalJsonProtocol._
import org.squeryl.PrimitiveTypeMode._
import org.squeryl.Query
import org.squeryl.dsl.GroupWithMeasures
import spray.http.MediaTypes._
import spray.http.{HttpEntity, HttpResponse}
import spray.json.pimpAny
import spray.routing.RequestContext
/**
* JavaDoc here
*
* @author Victor Polischuk
* @since 15.03.2015 10:04
*/
trait StatisticWebService {
def log: LoggingAdapter
def process(message: Message) = {
log.debug("Load statistics: ", message.key)
val key = message.key
val timestamp = new DateType(System.currentTimeMillis() - 5 * 60 * 1000)
transaction {
val query: Query[GroupWithMeasures[Score, Long]] = from(votes)(v =>
where(v.presentationKey.value === key.value and v.timestamp >= timestamp)
groupBy (v.score)
compute (count))
val result = query.map(m => (m.key, m.measures)).toMap
val statistics = Statistics(
result.getOrElse(Score.Good, 0),
result.getOrElse(Score.Ok, 0),
result.getOrElse(Score.Bad, 0)
)
log.info("Query result is: {}", statistics)
message.requestContext.complete(
HttpResponse(entity = HttpEntity(`application/json`, statistics.toJson.toString()))
)
}
}
}
class StatisticActor extends Actor with ActorLogging with StatisticWebService {
def receive = {
case message: Message => process(message)
}
}
object StatisticWebService {
case class Message(requestContext: RequestContext, key: Key[Presentation])
}
| victor-cr/mood-meter | server/src/main/scala/com/codegans/moodmeter/service/StatisticWebService.scala | Scala | mit | 1,891 |
package models
import javax.inject.Inject
import anorm.SqlParser._
import anorm._
import play.api.db.Database
/**
* Created by manuel on 19.04.16.
*/
object PaperResult {
val SYMBOL_OK = 0
val SYMBOL_WARNING = 1
val SYMBOL_ERROR = 2
val TYPE_BASICS = 0
val TYPE_BASICS_SAMPLE_SIZE = 10
val TYPE_BASICS_ERROR_TERMS = 20
val TYPE_BASICS_P_VALUES = 30
val TYPE_BASICS_RANGE_P_VALUES = 40
val TYPE_BASICS_PRECISION_P_VALUES = 50
val TYPE_BASICS_SIDED_DISTRIBUTION = 60
val TYPE_BASICS_MEAN_WITHOUT_VARIANCE = 70
val TYPE_BASICS_VARIANCE_IFNOT_NORMAL = 80
val TYPE_BASICS_FIT_WITHOUT_GOF = 90
val TYPE_BASICS_POWER_EFFECT = 100
val TYPE_M2A = 1000
val TYPE_STATCHECK = 2000
val TYPE_STATCHECK_CHI2 = 2010
val TYPE_STATCHECK_F = 2020
val TYPE_STATCHECK_R = 2030
val TYPE_STATCHECK_T = 2040
val TYPE_STATCHECK_Z = 2050
val TYPE_LAYOUT = 3000
val TYPE_LAYOUT_BORDER = 3010
val TYPE_LAYOUT_COLORS = 3020
}
case class PaperResult(id: Option[Long], paperId: Int, resultType: Int, descr: String, result: String, symbol: Int,
position: String) extends Serializable
class PaperResultService @Inject()(db:Database) {
private val answerParser: RowParser[PaperResult] =
get[Option[Long]]("id") ~
get[Int]("paper_id") ~
get[Int]("result_type") ~
get[String]("descr") ~
get[String]("result") ~
get[Int]("symbol") ~
get[String]("position") map {
case id ~ paper_id ~ result_type ~ descr ~ result ~ symbol ~ position =>
PaperResult(id, paper_id, result_type, descr, result, symbol, position)
}
def findById(id: Long): Option[PaperResult] =
db.withConnection { implicit c =>
SQL("SELECT * FROM paper_results WHERE id = {id}").on(
'id -> id
).as(answerParser.singleOpt)
}
def findByPaperId(paperId: Int): List[PaperResult] =
db.withConnection { implicit c =>
SQL("SELECT * FROM paper_results WHERE paper_id = {paper_id} ORDER BY result_type").on(
'paper_id -> paperId
).as(answerParser *)
}
def countByConferenceTotal(conferenceId: Int) : Int = {
db.withConnection { implicit c =>
SQL("SELECT count(*) FROM paper_results r, papers p " +
"WHERE r.paper_id = p.id AND (symbol = " + PaperResult.SYMBOL_WARNING + " OR " +
"symbol = " + PaperResult.SYMBOL_ERROR + ") AND p.conference_id = {conference_id}").on(
'conference_id -> conferenceId
).as(scalar[Int].single)
}
}
def countByConferencePapers(conferenceId: Int) : Int = {
db.withConnection { implicit c =>
SQL("SELECT count(DISTINCT r.paper_id) FROM paper_results r, papers p " +
"WHERE r.paper_id = p.id AND (symbol = " + PaperResult.SYMBOL_WARNING + " OR " +
"symbol = " + PaperResult.SYMBOL_ERROR + ") AND p.conference_id = {conference_id}").on(
'conference_id -> conferenceId
).as(scalar[Int].single)
}
}
def create(paperId: Int, resultType: Int, descr: String, result: String, symbol: Int, position: String) =
db.withConnection { implicit c =>
SQL("INSERT INTO paper_results(paper_id, result_type, descr, result, symbol, position) " +
"VALUES ({paper_id},{result_type},{descr},{result},{symbol},{position})").on(
'paper_id -> paperId,
'result_type -> resultType,
'descr -> descr,
'result -> result,
'symbol -> symbol,
'position -> position
).executeInsert()
}
def delete(id: Long) =
db.withConnection { implicit c =>
SQL("DELETE FROM paper_results WHERE id={id}").on(
'id -> id
).executeUpdate()
}
} | manuelroesch/PaperValidator | app/models/PaperResultService.scala | Scala | mit | 3,451 |
object WithDefinition {
def foo(i: Int) {
/*start*/
def bar = {
val y = 0
y + 1
}
println(bar + i)
/*end*/
}
def foofoo() {
def baz = {
val x = 0
x + 1
}
println(baz + 1)
}
}
/*
object WithDefinition {
def foo(i: Int) {
/*start*/
testMethodName(i)
/*end*/
}
def testMethodName(i: Int) {
def bar = {
val y = 0
y + 1
}
println(bar + i)
}
def foofoo() {
testMethodName(1)
}
}
*/ | consulo/consulo-scala | testdata/extractMethod/duplicates/WithDefinition.scala | Scala | apache-2.0 | 496 |
package beam.agentsim.infrastructure.taz
import java.io._
import java.util
import java.util.zip.GZIPInputStream
import beam.utils.matsim_conversion.ShapeUtils.{CsvTaz, QuadTreeBounds}
import com.vividsolutions.jts.geom.Geometry
import org.matsim.api.core.v01.{Coord, Id}
import org.matsim.core.utils.collections.QuadTree
import org.matsim.core.utils.gis.ShapeFileReader
import org.opengis.feature.simple.SimpleFeature
import org.slf4j.LoggerFactory
import org.supercsv.io._
import org.supercsv.prefs.CsvPreference
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
class TAZTreeMap(val tazQuadTree: QuadTree[TAZ]) {
val stringIdToTAZMapping: mutable.HashMap[String, TAZ] = mutable.HashMap()
def getTAZs: Iterable[TAZ] = {
tazQuadTree.values().asScala
}
for (taz: TAZ <- tazQuadTree.values().asScala) {
stringIdToTAZMapping.put(taz.tazId.toString, taz)
}
def getTAZ(x: Double, y: Double): TAZ = {
// TODO: is this enough precise, or we want to get the exact TAZ where the coordinate is located?
tazQuadTree.getClosest(x, y)
}
def getTAZ(tazId: String): Option[TAZ] = {
stringIdToTAZMapping.get(tazId)
}
def getTAZ(tazId: Id[TAZ]): Option[TAZ] = {
stringIdToTAZMapping.get(tazId.toString)
}
def getTAZInRadius(x: Double, y: Double, radius: Double): util.Collection[TAZ] = {
// TODO: is this enough precise, or we want to get the exact TAZ where the coordinate is located?
tazQuadTree.getDisk(x, y, radius)
}
def getTAZInRadius(loc: Coord, radius: Double): util.Collection[TAZ] = {
tazQuadTree.getDisk(loc.getX, loc.getY, radius)
}
}
object TAZTreeMap {
private val logger = LoggerFactory.getLogger(this.getClass)
val emptyTAZId = Id.create("NA", classOf[TAZ])
def fromShapeFile(shapeFilePath: String, tazIDFieldName: String): TAZTreeMap = {
new TAZTreeMap(initQuadTreeFromShapeFile(shapeFilePath, tazIDFieldName))
}
private def initQuadTreeFromShapeFile(
shapeFilePath: String,
tazIDFieldName: String
): QuadTree[TAZ] = {
val shapeFileReader: ShapeFileReader = new ShapeFileReader
shapeFileReader.readFileAndInitialize(shapeFilePath)
val features: util.Collection[SimpleFeature] = shapeFileReader.getFeatureSet
val quadTreeBounds: QuadTreeBounds = quadTreeExtentFromShapeFile(features)
val tazQuadTree: QuadTree[TAZ] = new QuadTree[TAZ](
quadTreeBounds.minx,
quadTreeBounds.miny,
quadTreeBounds.maxx,
quadTreeBounds.maxy
)
for (f <- features.asScala) {
f.getDefaultGeometry match {
case g: Geometry =>
val taz = new TAZ(
f.getAttribute(tazIDFieldName).asInstanceOf[String],
new Coord(g.getCoordinate.x, g.getCoordinate.y),
g.getArea
)
tazQuadTree.put(taz.coord.getX, taz.coord.getY, taz)
case _ =>
}
}
tazQuadTree
}
private def quadTreeExtentFromShapeFile(
features: util.Collection[SimpleFeature]
): QuadTreeBounds = {
var minX: Double = Double.MaxValue
var maxX: Double = Double.MinValue
var minY: Double = Double.MaxValue
var maxY: Double = Double.MinValue
for (f <- features.asScala) {
f.getDefaultGeometry match {
case g: Geometry =>
val ca = g.getEnvelope.getEnvelopeInternal
//val ca = wgs2Utm(g.getEnvelope.getEnvelopeInternal)
minX = Math.min(minX, ca.getMinX)
minY = Math.min(minY, ca.getMinY)
maxX = Math.max(maxX, ca.getMaxX)
maxY = Math.max(maxY, ca.getMaxY)
case _ =>
}
}
QuadTreeBounds(minX, minY, maxX, maxY)
}
private def quadTreeExtentFromCsvFile(lines: Seq[CsvTaz]): QuadTreeBounds = {
var minX: Double = Double.MaxValue
var maxX: Double = Double.MinValue
var minY: Double = Double.MaxValue
var maxY: Double = Double.MinValue
for (l <- lines) {
minX = Math.min(minX, l.coordX)
minY = Math.min(minY, l.coordY)
maxX = Math.max(maxX, l.coordX)
maxY = Math.max(maxY, l.coordY)
}
QuadTreeBounds(minX, minY, maxX, maxY)
}
def fromCsv(csvFile: String): TAZTreeMap = {
val lines = readCsvFile(csvFile)
val quadTreeBounds: QuadTreeBounds = quadTreeExtentFromCsvFile(lines)
val tazQuadTree: QuadTree[TAZ] = new QuadTree[TAZ](
quadTreeBounds.minx,
quadTreeBounds.miny,
quadTreeBounds.maxx,
quadTreeBounds.maxy
)
for (l <- lines) {
val taz = new TAZ(l.id, new Coord(l.coordX, l.coordY), l.area)
tazQuadTree.put(taz.coord.getX, taz.coord.getY, taz)
}
new TAZTreeMap(tazQuadTree)
}
private def readerFromFile(filePath: String): java.io.Reader = {
if (filePath.endsWith(".gz")) {
new InputStreamReader(
new GZIPInputStream(new BufferedInputStream(new FileInputStream(filePath)))
)
} else {
new FileReader(filePath)
}
}
private def readCsvFile(filePath: String): Seq[CsvTaz] = {
var mapReader: ICsvMapReader = null
val res = ArrayBuffer[CsvTaz]()
try {
mapReader = new CsvMapReader(readerFromFile(filePath), CsvPreference.STANDARD_PREFERENCE)
val header = mapReader.getHeader(true)
var line: java.util.Map[String, String] = mapReader.read(header: _*)
while (null != line) {
val id = line.get("taz")
val coordX = line.get("coord-x")
val coordY = line.get("coord-y")
val area = line.get("area")
res.append(CsvTaz(id, coordX.toDouble, coordY.toDouble, area.toDouble))
line = mapReader.read(header: _*)
}
} finally {
if (null != mapReader)
mapReader.close()
}
res
}
def getTazTreeMap(filePath: String): TAZTreeMap = {
try {
TAZTreeMap.fromCsv(filePath)
} catch {
case fe: FileNotFoundException =>
logger.error("No TAZ file found at given file path (using defaultTazTreeMap): %s" format filePath, fe)
defaultTazTreeMap
case e: Exception =>
logger.error(
"Exception occurred while reading from CSV file from path (using defaultTazTreeMap): %s" format e.getMessage,
e
)
defaultTazTreeMap
}
}
val defaultTazTreeMap: TAZTreeMap = {
val tazQuadTree: QuadTree[TAZ] = new QuadTree(-1, -1, 1, 1)
val taz = new TAZ("0", new Coord(0.0, 0.0), 0.0)
tazQuadTree.put(taz.coord.getX, taz.coord.getY, taz)
new TAZTreeMap(tazQuadTree)
}
def randomLocationInTAZ(
taz: TAZ,
rand: scala.util.Random = new scala.util.Random(System.currentTimeMillis())
): Coord = {
val radius = Math.sqrt(taz.areaInSquareMeters / Math.PI) / 2
val a = 2 * Math.PI * rand.nextDouble()
val r = radius * Math.sqrt(rand.nextDouble())
val x = r * Math.cos(a)
val y = r * Math.sin(a)
new Coord(taz.coord.getX + x, taz.coord.getY + y)
}
}
| colinsheppard/beam | src/main/scala/beam/agentsim/infrastructure/taz/TAZTreeMap.scala | Scala | gpl-3.0 | 6,900 |
package com.karasiq.bootstrap4.buttons
import scala.language.postfixOps
import com.karasiq.bootstrap.components.BootstrapComponents
import com.karasiq.bootstrap.context.{ClassModifiers, RenderingContext}
trait UniversalButtons extends UniversalButtonStates with UniversalButtonGroups { self: RenderingContext with BootstrapComponents with ClassModifiers with Buttons ⇒
import scalaTags.all._
type Button = ButtonBuilder
object Button extends ButtonFactory {
/**
* Shortcut to [[com.karasiq.bootstrap4.buttons.UniversalButtons.ButtonBuilder ButtonBuilder]].
*/
def apply(style: ButtonStyle = ButtonStyle.default, size: ButtonSize = ButtonSize.default,
block: Boolean = false, active: Boolean = false, disabled: Boolean = false): ButtonBuilder = {
ButtonBuilder(style, size, block, active, disabled)
}
}
trait UniversalButton extends AbstractButton {
override def renderTag(md: ModifierT*): TagT = {
@inline def optional(flag: Boolean, className: String) = if (flag) Some(className) else None
val classList = Seq(Some("btn"), optional(block, "btn-block"), optional(active, "active"), optional(disabled, "disabled")).flatten.map(_.addClass)
scalaTags.tags.button(`type` := "button", classList, style, size)(md:_*)
}
}
/**
* Button builder
* @param style Use any of the available button classes to quickly create a styled button
* @param size Fancy larger or smaller buttons? Add `.btn-lg`, `.btn-sm`, or `.btn-xs` for additional sizes
* @param block Create block level buttons—those that span the full width of a parent— by adding `.btn-block`
* @param active Buttons will appear pressed (with a darker background, darker border, and inset shadow) when active
* @param disabled Make buttons look unclickable by fading them back with `opacity`
* @see [[http://getbootstrap.com/css/#buttons]]
*/
case class ButtonBuilder(style: ButtonStyle = ButtonStyle.default, size: ButtonSize = ButtonSize.default,
block: Boolean = false, active: Boolean = false, disabled: Boolean = false)
extends UniversalButton {
def withStyle(style: ButtonStyle): ButtonBuilder = copy(style = style)
def withSize(size: ButtonSize): ButtonBuilder = copy(size = size)
}
}
| Karasiq/scalajs-bootstrap | library-v4/shared/src/main/scala/com/karasiq/bootstrap4/buttons/UniversalButtons.scala | Scala | mit | 2,312 |
package com.bostontechnologies.quickfixs.messages
import quickfix.Message
import quickfix.field._
import quickfix.fix50.OrderCancelReject
class RichOrderCancelReject private(self: Message) extends RichMessage(self) {
require(RichMessage.isA(self, RichOrderCancelReject.msgType))
def hasOrderId: Boolean = self.isSetField(OrderID.FIELD)
def orderId: String = self.getString(OrderID.FIELD)
def orderId_=(value: String) {
self.setString(OrderID.FIELD, value)
}
def hasOrigClOrdId: Boolean = self.isSetField(OrigClOrdID.FIELD)
def origClOrdId: String = self.getString(OrigClOrdID.FIELD)
def origClOrdId_=(value: String) {
self.setString(OrigClOrdID.FIELD, value)
}
def hasClOrdId: Boolean = self.isSetField(ClOrdID.FIELD)
def clOrdId: String = self.getString(ClOrdID.FIELD)
def clOrdId_=(value: String) {
self.setString(ClOrdID.FIELD, value)
}
def hasOrdStatus: Boolean = self.isSetField(OrdStatus.FIELD)
def ordStatus: Char = self.getChar(OrdStatus.FIELD)
def ordStatus_=(value: Char) {
self.setChar(OrdStatus.FIELD, value)
}
def hasCancelRejectResponseTo: Boolean = self.isSetField(CxlRejResponseTo.FIELD)
def cancelRejectResponseTo: Char = self.getChar(CxlRejResponseTo.FIELD)
def cancelRejectResponseTo_=(value: Char) {
self.setChar(CxlRejResponseTo.FIELD, value)
}
def hasCancelRejectReason: Boolean = self.isSetField(CxlRejReason.FIELD)
def cancelRejectReason: Int = self.getInt(CxlRejReason.FIELD)
def cancelRejectReason_=(value: Int) {
self.setInt(CxlRejReason.FIELD, value)
}
def hasText: Boolean = self.isSetField(Text.FIELD)
def text: String = self.getString(Text.FIELD)
def text_=(value: String) {
self.setString(Text.FIELD, value)
}
}
object RichOrderCancelReject extends RichMessageExtractor[RichOrderCancelReject, OrderCancelReject] {
val msgType = MsgType.ORDER_CANCEL_REJECT
def apply(message: quickfix.fix50.OrderCancelReject): RichOrderCancelReject =
new RichOrderCancelReject(message)
def new50Message: RichOrderCancelReject = this(new quickfix.fix50.OrderCancelReject)
def apply(message: quickfix.fix44.OrderCancelReject): RichOrderCancelReject =
new RichOrderCancelReject(message)
def new44Message: RichOrderCancelReject = this(new quickfix.fix44.OrderCancelReject)
def newMessage: RichOrderCancelReject = new RichOrderCancelReject(RichMessage.newMessage(msgType).self)
} | Forexware/quickfixs | src/main/scala/com/bostontechnologies/quickfixs/messages/RichOrderCancelReject.scala | Scala | apache-2.0 | 2,426 |
package edison.search.tree
import edison.search.Samples
import edison.search.serialization.{ JsonSerialization, JsonSerializer, TreeSerializer }
import org.json4s.JsonAST.JObject
import org.json4s.JsonDSL._
object Helpers {
/** Creates human-readable tree representations */
implicit class TreePrettyPrinter(tree: Tree) {
def json: String = {
import JsonSerialization.DefaultSerializers._
JsonSerialization.ExtendedJson(JsonSerialization.serialize(tree)).pretty
}
/** Tree representation that does not include samples */
def shortJson: String = {
implicit object TreeSerializerNoSamples extends TreeSerializer {
override def samplesSerializer = new JsonSerializer[Samples] {
override def serialize(obj: Samples): JObject =
("size" -> obj.size) ~ ("mean" -> obj.mean)
}
}
JsonSerialization.ExtendedJson(JsonSerialization.serialize(tree)).pretty
}
}
}
| pawel-wiejacha/edison | core/src/main/scala/edison/search/tree/Helpers.scala | Scala | mit | 950 |
/******************************************************************************************************************\
* Rapture Core, version 2.0.0. Copyright 2010-2015 Jon Pretty, Propensive Ltd. *
* *
* The primary distribution site is http://rapture.io/ *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in complance *
* with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. *
* *
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed *
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License *
* for the specific language governing permissions and limitations under the License. *
\******************************************************************************************************************/
package rapture.core
import scala.util.Try
object ParseException
case class ParseException(bad: String, typ: String) extends Exception(s"could not parse '$bad' as $typ")
package booleanParsing {
object strict {
def apply() = implicitBooleanParsing
implicit def implicitBooleanParsing(implicit br: BooleanRepresentation): BooleanParser =
new BooleanParser {
def parse(s: String, mode: Mode[_]): mode.Wrap[Boolean, InvalidBoolean] = mode.wrap {
if(s == br.trueValue) true
else if(s == br.falseValue) false
else mode.exception(InvalidBoolean(s))
}
}
}
object permissive {
def apply(): BooleanParser = implicitBooleanParsing
private val trueValues = List("true", "yes", "on", "1")
private val falseValues = List("false", "no", "off", "0")
implicit val implicitBooleanParsing: BooleanParser = new BooleanParser {
def parse(b: String, mode: Mode[_]): mode.Wrap[Boolean, InvalidBoolean] = mode.wrap {
if(trueValues.contains(b.toLowerCase)) true
else if(falseValues.contains(b.toLowerCase)) false
else mode.exception(ParseException(b, "boolean using permissive parser"))
}
}
}
}
object BooleanParser { implicit val implicitBooleanParser: BooleanParser = booleanParsing.permissive() }
trait BooleanParser { def parse(s: String, mode: Mode[_]): mode.Wrap[Boolean, InvalidBoolean] }
trait StringParser[T] {
type Throws <: Exception
def parse(string: String, mode: Mode[_]): mode.Wrap[T, Throws]
}
case class InvalidBoolean(value: String) extends Exception(s"""The value "$value" is not a valid boolean.""")
case class InvalidNumber(value: String, numberType: String) extends Exception(s"""The value "$value" is not a valid $numberType.""")
object StringParser {
implicit def booleanParser(implicit bp: BooleanParser): StringParser[Boolean] { type Throws = InvalidBoolean } = new StringParser[Boolean] {
type Throws = InvalidBoolean
def parse(s: String, mode: Mode[_]): mode.Wrap[Boolean, InvalidBoolean] = bp.parse(s, mode.generic)
}
implicit val byteParser: StringParser[Byte] { type Throws = InvalidNumber } = new StringParser[Byte] {
type Throws = InvalidNumber
def parse(s: String, mode: Mode[_]): mode.Wrap[Byte, InvalidNumber] = mode.wrap {
try java.lang.Byte.parseByte(s) catch {
case e: NumberFormatException => mode.exception(InvalidNumber(s, "byte"))
}
}
}
implicit val charParser: StringParser[Char] { type Throws = InvalidNumber } = new StringParser[Char] {
type Throws = InvalidNumber
def parse(s: String, mode: Mode[_]): mode.Wrap[Char, InvalidNumber] = mode.wrap {
if(s.length == 1) s.charAt(0) else mode.exception(InvalidNumber(s, "character"))
}
}
implicit val shortParser: StringParser[Short] { type Throws = InvalidNumber } = new StringParser[Short] {
type Throws = InvalidNumber
def parse(s: String, mode: Mode[_]): mode.Wrap[Short, InvalidNumber] = mode.wrap {
try java.lang.Short.parseShort(s) catch {
case e: NumberFormatException => mode.exception(InvalidNumber(s, "short"))
}
}
}
implicit val intParser: StringParser[Int] { type Throws = InvalidNumber } = new StringParser[Int] {
type Throws = InvalidNumber
def parse(s: String, mode: Mode[_]): mode.Wrap[Int, InvalidNumber] = mode.wrap {
try java.lang.Integer.parseInt(s) catch {
case e: NumberFormatException => mode.exception(InvalidNumber(s, "integer"))
}
}
}
implicit val longParser: StringParser[Long] { type Throws = InvalidNumber } = new StringParser[Long] {
type Throws = InvalidNumber
def parse(s: String, mode: Mode[_]): mode.Wrap[Long, InvalidNumber] = mode.wrap {
try java.lang.Long.parseLong(s) catch {
case e: NumberFormatException => mode.exception(InvalidNumber(s, "long"))
}
}
}
implicit val stringParser: StringParser[String] { type Throws = Nothing } = new StringParser[String] {
type Throws = Nothing
def parse(s: String, mode: Mode[_]): mode.Wrap[String, Nothing] = mode.wrap(s)
}
implicit val doubleParser: StringParser[Double] = new StringParser[Double] {
type Throws = InvalidNumber
def parse(s: String, mode: Mode[_]): mode.Wrap[Double, InvalidNumber] = mode.wrap {
try java.lang.Double.parseDouble(s) catch {
case e: NumberFormatException => mode.exception(ParseException(s, "double"))
}
}
}
implicit val floatParser: StringParser[Float] = new StringParser[Float] {
type Throws = InvalidNumber
def parse(s: String, mode: Mode[_]): mode.Wrap[Float, InvalidNumber] = mode.wrap {
try java.lang.Float.parseFloat(s) catch {
case e: NumberFormatException => mode.exception(InvalidNumber(s, "float"))
}
}
}
implicit def optParser[T: StringParser]: StringParser[Option[T]] { type Throws = Nothing } = new StringParser[Option[T]] {
type Throws = Nothing
def parse(s: String, mode: Mode[_]): mode.Wrap[Option[T], Nothing] = mode.wrap {
try Some(mode.unwrap(?[StringParser[T]].parse(s, mode))) catch {
case e: Exception => None
}
}
}
implicit def tryParser[T: StringParser]: StringParser[Try[T]] { type Throws = Nothing } = new StringParser[Try[T]] {
type Throws = Nothing
def parse(s: String, mode: Mode[_]): mode.Wrap[Try[T], Nothing] = mode.wrap {
?[StringParser[T]].parse(s, modes.returnTry())
}
}
}
| utaal/rapture-core | src/parser.scala | Scala | apache-2.0 | 6,863 |
package eventstreams.support
import _root_.core.sysevents.SyseventOps.stringToSyseventOps
import _root_.core.sysevents.WithSyseventPublisher
import _root_.core.sysevents.ref.ComponentWithBaseSysevents
import akka.actor.Props
import eventstreams.Tools.configHelper
import eventstreams._
import eventstreams.core.actors.{ActorWithComposableBehavior, BaseActorSysevents}
import eventstreams.core.components.routing.MessageRouterActor
import play.api.libs.json.{JsValue, Json}
import scalaz.Scalaz._
private case class SubscribeTo(subj: Any)
private case class SendCommand(subj: Any, data: Option[JsValue])
trait SubscribingComponentStubSysevents extends ComponentWithBaseSysevents with BaseActorSysevents {
val StaleReceived = "StaleReceived".info
val UpdateReceived = "UpdateReceived".info
val CommandOkReceived = "CommandOkReceived".info
val CommandErrReceived = "CommandErrReceived".info
val UnknownMessageReceived = "UnknownMessageReceived".info
override def componentId: String = "Test.SubscribingComponentStub"
}
trait SubscribingComponentStub extends SubscribingComponentStubSysevents {
private def props(instanceId: String) = Props(new SubscribingComponentStubActor(instanceId))
private def startMessageSubscriber(sys: ActorSystemWrapper, id: String) = {
sys.start(props(id), id)
}
private def subscribeFrom(sys: ActorSystemWrapper, id: String, subj: Any) = sys.rootUserActorSelection(id) ! SubscribeTo(subj)
private def commandFrom(sys: ActorSystemWrapper, id: String, subj: Any, data: Option[JsValue]) = sys.rootUserActorSelection(id) ! SendCommand(subj, data)
def killMessageSubscriberN(sys: ActorSystemWrapper, c: Int) = sys.stopActor(subscriberStubInstanceIdFor(c))
def killMessageSubscriber1(sys: ActorSystemWrapper) = killMessageSubscriberN(sys, 1)
def killMessageSubscriber2(sys: ActorSystemWrapper) = killMessageSubscriberN(sys, 2)
def startMessageSubscriberN(sys: ActorSystemWrapper, c: Int) = startMessageSubscriber(sys, subscriberStubInstanceIdFor(c))
def startMessageSubscriber1(sys: ActorSystemWrapper) = startMessageSubscriberN(sys, 1)
def startMessageSubscriber2(sys: ActorSystemWrapper) = startMessageSubscriberN(sys, 2)
def subscribeFromN(sys: ActorSystemWrapper, c: Int, subj: Any) = subscribeFrom(sys, subscriberStubInstanceIdFor(c), subj)
def subscribeFrom1(sys: ActorSystemWrapper, subj: Any) = subscribeFromN(sys, 1, subj)
def subscribeFrom2(sys: ActorSystemWrapper, subj: Any) = subscribeFromN(sys, 2, subj)
def commandFromN(sys: ActorSystemWrapper, c: Int, subj: Any, data: Option[JsValue]) = commandFrom(sys, subscriberStubInstanceIdFor(c), subj, data)
def commandFrom1(sys: ActorSystemWrapper, subj: Any, data: Option[JsValue]) = commandFromN(sys, 1, subj, data)
def commandFrom2(sys: ActorSystemWrapper, subj: Any, data: Option[JsValue]) = commandFromN(sys, 2, subj, data)
def subscriberStubInstanceIdFor(c: Int) = "subscriberStub" + c
}
object SubscribingComponentStub extends SubscribingComponentStub
class SubscribingComponentStubActor(instanceId: String) extends ActorWithComposableBehavior with SubscribingComponentStubSysevents with WithSyseventPublisher {
override def commonBehavior: Receive = handler orElse super.commonBehavior
override def commonFields: Seq[(Symbol, Any)] = super.commonFields ++ Seq('InstanceId -> instanceId)
def handler: Receive = {
case SubscribeTo(subj) => MessageRouterActor.path(context) ! Subscribe(self, subj)
case SendCommand(subj, data) =>
MessageRouterActor.path(context) ! RegisterComponent(ComponentKey(uuid), self)
MessageRouterActor.path(context) ! Command(subj, Some(LocalSubj(ComponentKey(uuid), TopicKey("_"))), data.map(Json.stringify))
case x : Stale => StaleReceived >> ('Message -> x)
case x : Update => x.subj match {
case RemoteAddrSubj(addr, LocalSubj(k,t)) =>
UpdateReceived >>> Seq('Message -> x, 'Subject -> x.subj, 'Address -> addr, 'ComponentKey -> k.key, 'Topic -> t.key, 'Contents -> (Json.parse(x.data) ~> 'msg | "n/a"), 'Data -> x.data)
case LocalSubj(k,t) =>
UpdateReceived >>> Seq('Message -> x, 'Subject -> x.subj, 'Address -> "local", 'ComponentKey -> k.key, 'Topic -> t.key, 'Contents -> (Json.parse(x.data) ~> 'msg | "n/a"), 'Data -> x.data)
case _ =>
UpdateReceived >>> Seq('Message -> x, 'Subject -> x.subj, 'Contents -> (Json.parse(x.data) ~> 'msg | "n/a"), 'Data -> x.data)
}
case x : CommandErr => CommandErrReceived >> ('Message -> x, 'Subject -> x.subj, 'Contents -> ((Json.parse(x.data) #> 'error ~> 'msg) | "n/a"))
case x : CommandOk => CommandOkReceived >> ('Message -> x, 'Subject -> x.subj, 'Contents -> ((Json.parse(x.data) #> 'ok ~> 'msg) | "n/a"))
case x => UnknownMessageReceived >> ('Message -> x)
}
}
| intelix/eventstreams | es-core/es-api/src/test/scala/eventstreams/support/SubscribingComponentStub.scala | Scala | apache-2.0 | 4,790 |
/**
Renderer holds rendering parameters. You must define all parameters even if you don't use them all.
MODE: one of four rendering modes (points, lines, filled and filled_stroked) expressed as an Int. Specify mode via the Renderer Object definitions.
STROKEWIDTH: the width of the stroke expressed as Float.
STROKECOLOR: the stroke color (java.awt.Color)
FILLCOLOR: the stroke color (java.awt.Color)
*/
package org.loom.scene
import java.awt.Color
class Renderer (var mode: Int, var strokeWidth: Float, var strokeColor: Color, var fillColor: Color) {
/**
Clone the Renderer. Produces an independent copy.
*/
override def clone(): Renderer = {
var sc: Color = new Color(strokeColor.getRed(), strokeColor.getGreen(), strokeColor.getBlue(), strokeColor.getAlpha())
var fc: Color = new Color(fillColor.getRed(), fillColor.getGreen(), fillColor.getBlue(), fillColor.getAlpha())
new Renderer(mode, strokeWidth, sc, fc)
}
}
/**
Renderer defines the four rendering modes as names so that you can access a mode by a name.
*/
object Renderer {
val POINTS: Int = 0
val LINES: Int = 1
val FILLED: Int = 2
val FILLED_STROKED: Int = 3
}
| brogan/Loom | src/org/loom/scene/Renderer.scala | Scala | gpl-3.0 | 1,180 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.integration.torch
import com.intel.analytics.bigdl.nn.AddConstant
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.RandomGenerator._
@com.intel.analytics.bigdl.tags.Serial
class AddConstantSpec extends TorchSpec {
"A Add Module " should "generate correct output and grad" in {
torchCheck()
val inputN = 5
val seed = 100
RNG.setSeed(seed)
val module = new AddConstant[Double](inputN, true)
val input = Tensor[Double](1, 5)
input(Array(1, 1)) = -1
input(Array(1, 2)) = -2
input(Array(1, 3)) = -3
input(Array(1, 4)) = -4
input(Array(1, 5)) = -5
val gradOutput = Tensor[Double](1, 5)
gradOutput(Array(1, 1)) = -2
gradOutput(Array(1, 2)) = 5
gradOutput(Array(1, 3)) = -10
gradOutput(Array(1, 4)) = 17
gradOutput(Array(1, 5)) = -26
val code = "torch.manualSeed(" + seed + ")\n" +
"module = nn.AddConstant(5, true)\n" +
"output = module:forward(input)\n" +
"gradInput = module:backward(input, gradOutput)\n"
val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "gradOutput" -> gradOutput),
Array("output", "gradInput"))
val luaOutput1 = torchResult("output").asInstanceOf[Tensor[Double]]
val luaOutput2 = torchResult("gradInput").asInstanceOf[Tensor[Double]]
val start = System.nanoTime()
val output = module.forward(input)
val gradInput = module.backward(input, gradOutput)
val end = System.nanoTime()
val scalaTime = end - start
luaOutput1 should be(output)
luaOutput2 should be(gradInput)
}
}
| zhangxiaoli73/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/integration/torch/AddConstantSpec.scala | Scala | apache-2.0 | 2,223 |
package kvstore
import akka.testkit.{ TestProbe, TestKit, ImplicitSender }
import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.ShouldMatchers
import org.scalatest.FunSuite
import akka.actor.ActorSystem
import scala.concurrent.duration._
import kvstore.Arbiter.{ JoinedSecondary, Join }
import kvstore.Persistence.{ Persisted, Persist }
import scala.util.Random
import scala.util.control.NonFatal
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class Step2_SecondarySpec extends TestKit(ActorSystem("Step2SecondarySpec"))
with FunSuite
with BeforeAndAfterAll
with ShouldMatchers
with ImplicitSender
with Tools {
override def afterAll(): Unit = {
system.shutdown()
}
test("case1: Secondary (in isolation) should properly register itself to the provided Arbiter") {
val arbiter = TestProbe()
val secondary = system.actorOf(Replica.props(arbiter.ref, Persistence.props(flaky = false)), "case1-secondary")
arbiter.expectMsg(Join)
}
test("case2: Secondary (in isolation) must handle Snapshots") {
import Replicator._
val arbiter = TestProbe()
val replicator = TestProbe()
val secondary = system.actorOf(Replica.props(arbiter.ref, Persistence.props(flaky = false)), "case2-secondary")
val client = session(secondary)
arbiter.expectMsg(Join)
arbiter.send(secondary, JoinedSecondary)
client.get("k1") should be === None
replicator.send(secondary, Snapshot("k1", None, 0L))
replicator.expectMsg(SnapshotAck("k1", 0L))
client.get("k1") should be === None
replicator.send(secondary, Snapshot("k1", Some("v1"), 1L))
replicator.expectMsg(SnapshotAck("k1", 1L))
client.get("k1") should be === Some("v1")
replicator.send(secondary, Snapshot("k1", None, 2L))
replicator.expectMsg(SnapshotAck("k1", 2L))
client.get("k1") should be === None
}
test("case3: Secondary should drop and immediately ack snapshots with older sequence numbers") {
import Replicator._
val arbiter = TestProbe()
val replicator = TestProbe()
val secondary = system.actorOf(Replica.props(arbiter.ref, Persistence.props(flaky = false)), "case3-secondary")
val client = session(secondary)
arbiter.expectMsg(Join)
arbiter.send(secondary, JoinedSecondary)
client.get("k1") should be === None
replicator.send(secondary, Snapshot("k1", Some("v1"), 0L))
replicator.expectMsg(SnapshotAck("k1", 0L))
client.get("k1") should be === Some("v1")
replicator.send(secondary, Snapshot("k1", None, 0L))
replicator.expectMsg(SnapshotAck("k1", 0L))
client.get("k1") should be === Some("v1")
replicator.send(secondary, Snapshot("k1", Some("v2"), 1L))
replicator.expectMsg(SnapshotAck("k1", 1L))
client.get("k1") should be === Some("v2")
replicator.send(secondary, Snapshot("k1", None, 0L))
replicator.expectMsg(SnapshotAck("k1", 0L))
client.get("k1") should be === Some("v2")
}
test("case4: Secondary should drop snapshots with future sequence numbers") {
import Replicator._
val arbiter = TestProbe()
val replicator = TestProbe()
val secondary = system.actorOf(Replica.props(arbiter.ref, Persistence.props(flaky = false)), "case4-secondary")
val client = session(secondary)
arbiter.expectMsg(Join)
arbiter.send(secondary, JoinedSecondary)
client.get("k1") should be === None
replicator.send(secondary, Snapshot("k1", Some("v1"), 1L))
replicator.expectNoMsg(300.milliseconds)
client.get("k1") should be === None
replicator.send(secondary, Snapshot("k1", Some("v2"), 0L))
replicator.expectMsg(SnapshotAck("k1", 0L))
client.get("k1") should be === Some("v2")
}
} | mitochon/hexercise | src/mooc/reactive/week6.kvstore/src/test/scala/kvstore/Step2_SecondarySpec.scala | Scala | mit | 3,743 |
package water.fvec
import java.io.File
import java.net.URI
import water._
import water.parser.ParseSetup
/**
* Wrapper around H2O Frame to provide more Scala-like API.
*/
class H2OFrame private (key: Key[Frame], names: Array[String], vecs: Array[Vec])
extends Frame(key, names, vecs)
with FrameOps {
// Row type
type T = Array[Option[Any]]
// Scala DataFrame from a Frame. Simple field copy, so the Frames share
// underlying arrays. Recommended that the input Java Frame be dead after
// this call.
def this(fr : Frame) = this(if (fr._key!=null) fr._key else Key.make("dframe"+Key.rand()).asInstanceOf[Key[Frame]], fr._names, fr.vecs())
// Create DataFrame from existing Frame
def this(k : Key[Frame]) = this (DKV.get(k).get.asInstanceOf[Frame])
def this(s : String) = this (Key.make(s).asInstanceOf[Key[Frame]])
// Uniform call to load any resource referenced by URI
def this(uris: URI*) = this(water.util.FrameUtils.parseFrame(
Key.make(ParseSetup.createHexName(H2OFrame.baseName(uris(0)))),
uris : _*))
// Scala DataFrame by reading a CSV file
def this(file : File) = this(file.toURI)
// No-args public constructor for (de)serialization
def this() = this(null,null,new Array[Vec](0))
// Force into K/V store
assert(key!=null)
DKV.put(key, new Value(key, this))
def apply(cols: Array[String]): H2OFrame = new H2OFrame(subframe(cols))
def apply(cols: Symbol*): H2OFrame = apply(cols.map(_.name).toArray)
override def toString(): String = super[Frame].toString()
override def hashCode(): Int = super[Frame].hashCode()
}
/** Companion object providing factory methods to create frame
* from different sources.
*/
object H2OFrame {
def apply(key : Key[Frame]) = new H2OFrame(key)
def apply(f : Frame) = new H2OFrame(f)
def apply(s : String) = new H2OFrame(s)
def apply(file : File) = new H2OFrame(file)
def apply(uri : URI) = new H2OFrame(uri)
def baseName(uri: URI) = {
val s = uri.toString
s.substring(s.lastIndexOf('/')+1)
}
}
| brightchen/h2o-3 | h2o-scala/src/main/scala/water/fvec/H2OFrame.scala | Scala | apache-2.0 | 2,086 |
package com.arcusys.learn.liferay.update.version270
import java.sql.Connection
import com.arcusys.learn.liferay.update.version270.slide.SlideTableComponent
import com.arcusys.valamis.persistence.common.SlickDBInfo
import com.escalatesoft.subcut.inject.NewBindingModule
import org.scalatest.{BeforeAndAfter, FunSuite}
import scala.slick.driver.{H2Driver, JdbcDriver, JdbcProfile}
import scala.slick.jdbc.JdbcBackend
class CreatePropertiesForTemplateTest extends FunSuite with BeforeAndAfter{
val driver = H2Driver
import driver.simple._
val db = Database.forURL("jdbc:h2:mem:createproperties", driver = "org.h2.Driver")
var connection: Connection = _
val bindingModule = new NewBindingModule({ implicit module =>
module.bind[SlickDBInfo] toSingle new SlickDBInfo {
def databaseDef: JdbcBackend#DatabaseDef = db
def slickDriver: JdbcDriver = driver
def slickProfile: JdbcProfile = driver
}
})
before {
connection = db.source.createConnection()
table.createSchema()
}
after {
connection.close()
}
val table = new SlideTableComponent {
override protected val driver: JdbcProfile = H2Driver
def createSchema(): Unit = db.withSession { implicit s =>
import driver.simple._
(slides.ddl ++ slideSets.ddl ++ slideElements.ddl ++ slideElementProperties.ddl).create
}
}
import table._
val updater = new DBUpdater2711(bindingModule)
val slideSet = SlideSet(title = "title", description = "description", courseId = 1L)
test("create properties for template <Text and image>") {
import table._
val slide = Slide(title = "Text and image", slideSetId = -1, isTemplate = true)
val slideElement = SlideElement(None, "1", "old content", "text", -1)
val elementId = db.withSession { implicit s =>
val slideSetId = (slideSets returning slideSets.map(_.id)) += slideSet
val slideId = (slides returning slides.map(_.id)) += slide.copy(slideSetId = slideSetId)
(slideElements returning slideElements.map(_.id)) += slideElement.copy(slideId = slideId)
}
updater.doUpgrade()
val stored = db.withSession { implicit s =>
slideElementProperties.filter(_.slideElementId === elementId).list
}
assert(stored.size == 4)
}
test("create properties for non existing template"){
import table._
val slide = Slide(title = "Wrong name", slideSetId = -1, isTemplate = true)
val slideElement = SlideElement(None, "1", "old content", "text", -1)
val elementId = db.withSession { implicit s =>
val slideSetId = (slideSets returning slideSets.map(_.id)) += slideSet
val slideId = (slides returning slides.map(_.id)) += slide.copy(slideSetId = slideSetId)
(slideElements returning slideElements.map(_.id)) += slideElement.copy(slideId = slideId)
}
updater.doUpgrade()
val stored = db.withSession { implicit s =>
slideElementProperties.filter(_.slideElementId === elementId).list
}
assert(stored.isEmpty)
}
}
| igor-borisov/valamis | learn-portlet/src/test/scala/com/arcusys/learn/liferay/update/version270/CreatePropertiesForTemplateTest.scala | Scala | gpl-3.0 | 3,001 |
package com.orbitz.conifer.node.stats
import akka.actor._
import com.google.inject.Inject
import com.google.inject.name.Named
import com.hazelcast.core.{IMap, HazelcastInstance}
import scala.concurrent.duration._
import scala.collection.JavaConversions._
class CacheStatsReporter @Inject() (system: ActorSystem,
@Named("cacheStatsActor") statsActor: ActorRef) {
def start() = {
system.scheduler.schedule(0 milliseconds,
1 seconds,
system.actorOf(Props(classOf[CacheStatsActor], this)),
"tick")
}
}
class CacheStatsActor @Inject() (hazelcast: HazelcastInstance) extends Actor {
override def receive: Actor.Receive = {
case "tick" => {
hazelcast.getDistributedObjects.toSeq
.filter(_.isInstanceOf[IMap])
.map(_.asInstanceOf[IMap])
.filter(_.getName.startsWith("cache-"))
.foreach(hazelcast.getMap(s"stats").put(hazelcast.getLocalEndpoint.getUuid,
_.getLocalMapStats))
}
}
} | rickfast/conifer | conifer-node/src/main/scala/com/orbitz/conifer/node/stats/CacheStatsReporter.scala | Scala | apache-2.0 | 998 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.suiteprop
import org.scalatest._
import prop.TableDrivenPropertyChecks
import org.scalatest.{ freespec, funspec }
class OnlyFirstTestExecutedOnCreationExamples extends PathSuiteExamples {
case class Counts(
var firstTestCount: Int,
var secondTestCount: Int,
var instanceCount: Int
)
trait Services {
val expectedInstanceCount = 2
val expectedTotalTestsCount = 2
val expectFirstTestToRunInInitialInstance = true
val counts: Counts
}
type FixtureServices = Services
class EmptyPathFunSpecExample(val counts: Counts) extends funspec.PathAnyFunSpec with Services {
import counts._
instanceCount += 1
override def newInstance = new EmptyPathFunSpecExample(counts)
override val expectedInstanceCount = 1
override val expectedTotalTestsCount = 0
}
class EmptyNestedPathFunSpecExample(val counts: Counts) extends funspec.PathAnyFunSpec with Services {
import counts._
instanceCount += 1
describe("A subject") {
}
override def newInstance = new EmptyNestedPathFunSpecExample(counts)
override val expectedInstanceCount = 1
override val expectedTotalTestsCount = 0
}
class SiblingEmptyNestedPathFunSpecExample(val counts: Counts) extends funspec.PathAnyFunSpec with Services {
import counts._
instanceCount += 1
describe("A subject") {
}
describe("Another subject") {
}
override def newInstance = new SiblingEmptyNestedPathFunSpecExample(counts)
override val expectedTotalTestsCount = 0
}
class OneTestSiblingEmptyNestedPathFunSpecExample(val counts: Counts) extends funspec.PathAnyFunSpec with Services {
import counts._
instanceCount += 1
describe("A subject") {
}
describe("Another subject") {
it("first test") { firstTestCount += 1 }
}
override def newInstance = new OneTestSiblingEmptyNestedPathFunSpecExample(counts)
override val expectedTotalTestsCount = 1
override val expectFirstTestToRunInInitialInstance = false
}
class OneTestSiblingEmptyDeeplyNestedPathFunSpecExample(val counts: Counts) extends funspec.PathAnyFunSpec with Services {
import counts._
instanceCount += 1
describe("A subject") {
}
describe("Another subject") {
describe("when created") {
it("first test") { firstTestCount += 1 }
}
}
override def newInstance = new OneTestSiblingEmptyDeeplyNestedPathFunSpecExample(counts)
override val expectedTotalTestsCount = 1
override val expectFirstTestToRunInInitialInstance = false
}
class PathFunSpecExample(val counts: Counts) extends funspec.PathAnyFunSpec with Services {
import counts._
instanceCount += 1
it("first test") { firstTestCount += 1 }
it("second test") { secondTestCount += 1 }
override def newInstance = new PathFunSpecExample(counts)
}
class NestedPathFunSpecExample(val counts: Counts) extends funspec.PathAnyFunSpec with Services {
import counts._
instanceCount += 1
describe("A subject") {
it("should first test") { firstTestCount += 1 }
it("should second test") { secondTestCount += 1 }
}
override def newInstance = new NestedPathFunSpecExample(counts)
}
class SiblingNestedPathFunSpecExample(val counts: Counts) extends funspec.PathAnyFunSpec with Services {
import counts._
instanceCount += 1
describe("A subject") {
it("should first test") { firstTestCount += 1 }
}
describe("Another subject") {
it("should second test") { secondTestCount += 1 }
}
override def newInstance = new SiblingNestedPathFunSpecExample(counts)
}
class DeeplyNestedPathFunSpecExample(val counts: Counts) extends funspec.PathAnyFunSpec with Services {
import counts._
instanceCount += 1
describe("A subject") {
describe("when created") {
it("should first test") { firstTestCount += 1 }
it("should second test") { secondTestCount += 1 }
}
}
override def newInstance = new DeeplyNestedPathFunSpecExample(counts)
}
class SiblingDeeplyNestedPathFunSpecExample(val counts: Counts) extends funspec.PathAnyFunSpec with Services {
import counts._
instanceCount += 1
describe("A subject") {
describe("when created") {
it("should first test") { firstTestCount += 1 }
}
}
describe("Another subject") {
describe("when created") {
it("should second test") { secondTestCount += 1 }
}
}
override def newInstance = new SiblingDeeplyNestedPathFunSpecExample(counts)
}
class AsymetricalDeeplyNestedPathFunSpecExample(val counts: Counts) extends funspec.PathAnyFunSpec with Services {
import counts._
instanceCount += 1
describe("A subject") {
describe("when created") {
it("should first test") { firstTestCount += 1 }
}
it("should second test") { secondTestCount += 1 }
}
override def newInstance = new AsymetricalDeeplyNestedPathFunSpecExample(counts)
}
class EmptyPathFreeSpecExample(val counts: Counts) extends freespec.PathAnyFreeSpec with Services {
import counts._
instanceCount += 1
override def newInstance = new EmptyPathFreeSpecExample(counts)
override val expectedInstanceCount = 1
override val expectedTotalTestsCount = 0
}
class EmptyNestedPathFreeSpecExample(val counts: Counts) extends freespec.PathAnyFreeSpec with Services {
import counts._
instanceCount += 1
"A subject" - {
}
override def newInstance = new EmptyNestedPathFreeSpecExample(counts)
override val expectedInstanceCount = 1
override val expectedTotalTestsCount = 0
}
class SiblingEmptyNestedPathFreeSpecExample(val counts: Counts) extends freespec.PathAnyFreeSpec with Services {
import counts._
instanceCount += 1
"A subject" - {
}
"Another subject" - {
}
override def newInstance = new SiblingEmptyNestedPathFreeSpecExample(counts)
override val expectedTotalTestsCount = 0
}
class OneTestSiblingEmptyNestedPathFreeSpecExample(val counts: Counts) extends freespec.PathAnyFreeSpec with Services {
import counts._
instanceCount += 1
"A subject" - {
}
"Another subject" - {
"first test" in { firstTestCount += 1 }
}
override def newInstance = new OneTestSiblingEmptyNestedPathFreeSpecExample(counts)
override val expectedTotalTestsCount = 1
override val expectFirstTestToRunInInitialInstance = false
}
class OneTestSiblingEmptyDeeplyNestedPathFreeSpecExample(val counts: Counts) extends freespec.PathAnyFreeSpec with Services {
import counts._
instanceCount += 1
"A subject" - {
}
"Another subject" - {
"when created" - {
"first test" in { firstTestCount += 1 }
}
}
override def newInstance = new OneTestSiblingEmptyDeeplyNestedPathFreeSpecExample(counts)
override val expectedTotalTestsCount = 1
override val expectFirstTestToRunInInitialInstance = false
}
class PathFreeSpecExample(val counts: Counts) extends freespec.PathAnyFreeSpec with Services {
import counts._
instanceCount += 1
"first test" in { firstTestCount += 1 }
"second test" in { secondTestCount += 1 }
override def newInstance = new PathFreeSpecExample(counts)
}
class NestedPathFreeSpecExample(val counts: Counts) extends freespec.PathAnyFreeSpec with Services {
import counts._
instanceCount += 1
"A subject" - {
"should first test" in { firstTestCount += 1 }
"should second test" in { secondTestCount += 1 }
}
override def newInstance = new NestedPathFreeSpecExample(counts)
}
class SiblingNestedPathFreeSpecExample(val counts: Counts) extends freespec.PathAnyFreeSpec with Services {
import counts._
instanceCount += 1
"A subject" - {
"should first test" in { firstTestCount += 1 }
}
"Another subject" - {
"should second test" in { secondTestCount += 1 }
}
override def newInstance = new SiblingNestedPathFreeSpecExample(counts)
}
class DeeplyNestedPathFreeSpecExample(val counts: Counts) extends freespec.PathAnyFreeSpec with Services {
import counts._
instanceCount += 1
"A subject" - {
"when created" - {
"should first test" in { firstTestCount += 1 }
"should second test" in { secondTestCount += 1 }
}
}
override def newInstance = new DeeplyNestedPathFreeSpecExample(counts)
}
class SiblingDeeplyNestedPathFreeSpecExample(val counts: Counts) extends freespec.PathAnyFreeSpec with Services {
import counts._
instanceCount += 1
"A subject" - {
"when created" - {
"should first test" in { firstTestCount += 1 }
}
}
"Another subject" - {
"when created" - {
"should second test" in { secondTestCount += 1 }
}
}
override def newInstance = new SiblingDeeplyNestedPathFreeSpecExample(counts)
}
class AsymetricalDeeplyNestedPathFreeSpecExample(val counts: Counts) extends freespec.PathAnyFreeSpec with Services {
import counts._
instanceCount += 1
"A subject" - {
"when created" - {
"should first test" in { firstTestCount += 1 }
}
"should second test" in { secondTestCount += 1 }
}
override def newInstance = new AsymetricalDeeplyNestedPathFreeSpecExample(counts)
}
lazy val emptyPathFunSpec = new EmptyPathFunSpecExample(Counts(0, 0, 0))
lazy val emptyNestedPathFunSpec = new EmptyNestedPathFunSpecExample(Counts(0, 0, 0))
lazy val siblingEmptyNestedPathFunSpec = new SiblingEmptyNestedPathFunSpecExample(Counts(0, 0, 0))
lazy val oneTestSiblingEmptyNestedPathFunSpec = new OneTestSiblingEmptyNestedPathFunSpecExample(Counts(0, 0, 0))
lazy val oneTestSiblingEmptyDeeplyNestedPathFunSpec = new OneTestSiblingEmptyDeeplyNestedPathFunSpecExample(Counts(0, 0, 0))
lazy val pathFunSpec = new PathFunSpecExample(Counts(0, 0, 0))
lazy val nestedPathFunSpec = new NestedPathFunSpecExample(Counts(0, 0, 0))
lazy val siblingNestedPathFunSpec = new SiblingNestedPathFunSpecExample(Counts(0, 0, 0))
lazy val deeplyNestedPathFunSpec = new DeeplyNestedPathFunSpecExample(Counts(0, 0, 0))
lazy val siblingDeeplyNestedPathFunSpec = new SiblingDeeplyNestedPathFunSpecExample(Counts(0, 0, 0))
lazy val asymetricalDeeplyNestedPathFunSpec = new AsymetricalDeeplyNestedPathFunSpecExample(Counts(0, 0, 0))
lazy val emptyPathFreeSpec = new EmptyPathFreeSpecExample(Counts(0, 0, 0))
lazy val emptyNestedPathFreeSpec = new EmptyNestedPathFreeSpecExample(Counts(0, 0, 0))
lazy val siblingEmptyNestedPathFreeSpec = new SiblingEmptyNestedPathFreeSpecExample(Counts(0, 0, 0))
lazy val oneTestSiblingEmptyNestedPathFreeSpec = new OneTestSiblingEmptyNestedPathFreeSpecExample(Counts(0, 0, 0))
lazy val oneTestSiblingEmptyDeeplyNestedPathFreeSpec = new OneTestSiblingEmptyDeeplyNestedPathFreeSpecExample(Counts(0, 0, 0))
lazy val pathFreeSpec = new PathFreeSpecExample(Counts(0, 0, 0))
lazy val nestedPathFreeSpec = new NestedPathFreeSpecExample(Counts(0, 0, 0))
lazy val siblingNestedPathFreeSpec = new SiblingNestedPathFreeSpecExample(Counts(0, 0, 0))
lazy val deeplyNestedPathFreeSpec = new DeeplyNestedPathFreeSpecExample(Counts(0, 0, 0))
lazy val siblingDeeplyNestedPathFreeSpec = new SiblingDeeplyNestedPathFreeSpecExample(Counts(0, 0, 0))
lazy val asymetricalDeeplyNestedPathFreeSpec = new AsymetricalDeeplyNestedPathFreeSpecExample(Counts(0, 0, 0))
}
| scalatest/scalatest | jvm/scalatest-test/src/test/scala/org/scalatest/suiteprop/OnlyFirstTestExecutedOnCreationExamples.scala | Scala | apache-2.0 | 12,054 |
package utils
/**
* Created by ajay on 21/7/15.
*/
class CustomException (message:String) extends Exception(message)
case class UserCreateException(message:String) extends CustomException(message) {
override def toString: String = {
message
}
}
case class UserUpdateException(message:String) extends CustomException(message) {
override def toString: String = {
message
}
}
case class ArticleCreateException(message:String) extends CustomException(message) {
override def toString: String = {
message
}
}
| ajaygeorge91/play-silhouette-neo4j-angular-seed | app/utils/CustomException.scala | Scala | apache-2.0 | 536 |
/*
* Copyright 2012 Eike Kettner
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.eknet.publet.gitr.webui.scripts
import org.eknet.publet.engine.scala.ScalaScript
import org.eknet.publet.web.shiro.Security
import org.eknet.publet.gitr.auth.{DefaultRepositoryStore, RepositoryModel, GitAction, RepositoryTag}
import org.eknet.publet.web.util.{PubletWeb, PubletWebContext}
import org.eknet.publet.gitr.GitRequestUtils
import org.eknet.gitr.{GitrMan, RepositoryName}
class GitrCreate extends ScalaScript {
import org.eknet.publet.web.util.RenderUtils.makeJson
def checkName(repoName: String): Boolean = repoName.matches("[\\\\w_\\\\-]+")
def serve() = {
val tag = PubletWebContext.param("closed") match {
case Some("on") => RepositoryTag.closed
case _ => RepositoryTag.open
}
PubletWebContext.param("repositoryName") match {
case Some(r) => {
if (!checkName(r)) {
makeJson(Map("success"->false, "message"->"Invalid repository name!"))
} else {
val login = Security.username
val rootRepo = PubletWebContext.param("rootProject").collect({ case "on" => true}).getOrElse(false)
val normName = if (rootRepo) r else login +"/"+ r
if (rootRepo) {
GitRequestUtils.checkGitAction(GitAction.createRoot, RepositoryModel(normName, RepositoryTag.open, login))
}
GitRequestUtils.checkGitAction(GitAction.create, RepositoryModel(normName, RepositoryTag.open, login))
val repoName = RepositoryName(normName)
PubletWeb.instance[GitrMan].get.get(repoName).map(x=> error("Repository already exists"))
.getOrElse {
val newRepo = PubletWeb.instance[GitrMan].get.create(repoName, true)
PubletWebContext.param("description")
.collect({case d if (!d.isEmpty) => d})
.foreach(desc => newRepo.setDescription(desc))
PubletWeb.instance[DefaultRepositoryStore].get.updateRepository(RepositoryModel(normName, tag, login))
makeJson(Map(
"success" -> true,
"message" -> "Repository successfully created.",
"giturl" -> PubletWebContext.urlOf("/git/"+ repoName.nameDotGit)
))
}
}
}
case _ => {
error("No repository name given.")
}
}
}
def error(str:String) = makeJson(Map("success"->false, "message"->str))
} | eikek/publet | gitr-web/src/main/scala/org/eknet/publet/gitr/webui/scripts/GitrCreate.scala | Scala | apache-2.0 | 2,947 |
package a17
class 第七重 {
type Tail = 开始
type Head = 我
class 开始 {
type Tail = 继续
type Head = 叼
class 继续 {
type Tail = 余根
type Head = 你
class 余根 {
type Tail = 开始辛苦
type Head = 老
class 开始辛苦 {
type Tail = 顶唔顺
type Head = 味
class 顶唔顺 {
type Tail = 每次都重构类型系统_裸命咩
type Head = 个
class 每次都重构类型系统_裸命咩 {
type Tail = 入咗恶人谷扑街
type Head = 西
class 入咗恶人谷扑街
}
}
}
}
}
}
}
| djx314/ubw | a17-432/src/main/scala/第七重.scala | Scala | bsd-3-clause | 713 |
package collection
case class DMPoint1(x: Integer, y: Integer, var stay_seconds: Integer, sku_id: String, sku_action: String)
case class DM(x: Integer, y: Integer, var stay_seconds: Integer, list: List[Map[String,String]])
object ListTest extends App {
val site: List[DMPoint1] = List(DMPoint1(2,3,3,"1","3"), DMPoint1(2,3,3,"1","3"),DMPoint1(2,4,3,"1","3"),DMPoint1(2,4,3,"1","3"))
val groupByXY = site.groupBy(item => (item.x, item.y))
println(groupByXY)
val sum = groupByXY.mapValues(_.reduce((a,b) => DMPoint1(a.x, a.y, a.stay_seconds + b.stay_seconds, a.sku_id,a.sku_action)))
println(sum.values.toList)
/* val sum = groupByXY.mapValues(_.reduce((a,b) => {
var map = Map(a.sku_id -> a.sku_action,
b.sku_id -> b.sku_action)
var list = List[Map[String,String]]()
var dm = DM
list = list :+ map
dm.apply(a.x, a.y, a.stay_seconds, list)
dm
})*/
// sum.values.toList
// println(sum)
}
| chocolateBlack/LearningSpark | src/main/scala/collection/ListTest.scala | Scala | mit | 953 |
/*
* FutureConstant.scala
* (FScape)
*
* Copyright (c) 2001-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.fscape.lucre.graph.impl
import de.sciss.fscape.UGen.Adjunct
import de.sciss.fscape.lucre.stream
import de.sciss.fscape.stream.{BufElem, Control, StreamIn, StreamOut, StreamType, Builder => SBuilder}
import de.sciss.fscape.{UGen, UGenGraph, UGenIn, UGenInLike, UGenSource}
import de.sciss.synth.UGenSource.Vec
import scala.concurrent.Future
final case class FutureConstant[A, E <: BufElem[A]](adj: Adjunct, fut: Control => Future[A])
(implicit tpe: StreamType[A, E])
extends UGenSource.SingleOut {
protected def makeUGens(implicit b: UGenGraph.Builder): UGenInLike =
makeUGen(Vector.empty)
protected def makeUGen(args: Vec[UGenIn])(implicit b: UGenGraph.Builder): UGenInLike = {
UGen.SingleOut(this, args, adjuncts = adj :: Nil, isIndividual = true)
}
private[fscape] def makeStream(args: Vec[StreamIn])(implicit b: SBuilder): StreamOut = {
val res = stream.FutureConstant[A, E](fut)
tpe.mkStreamOut(res)
}
} | Sciss/FScape-next | lucre/shared/src/main/scala/de/sciss/fscape/lucre/graph/impl/FutureConstant.scala | Scala | agpl-3.0 | 1,300 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.connector
import java.io.File
import java.util
import java.util.OptionalLong
import scala.collection.JavaConverters._
import test.org.apache.spark.sql.connector._
import org.apache.spark.SparkException
import org.apache.spark.sql.{AnalysisException, DataFrame, QueryTest, Row}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.connector.catalog.{SupportsRead, Table, TableCapability, TableProvider}
import org.apache.spark.sql.connector.catalog.TableCapability._
import org.apache.spark.sql.connector.expressions.Transform
import org.apache.spark.sql.connector.read._
import org.apache.spark.sql.connector.read.partitioning.{ClusteredDistribution, Distribution, Partitioning}
import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper
import org.apache.spark.sql.execution.datasources.v2.{BatchScanExec, DataSourceV2Relation, DataSourceV2ScanRelation}
import org.apache.spark.sql.execution.exchange.{Exchange, ShuffleExchangeExec}
import org.apache.spark.sql.execution.vectorized.OnHeapColumnVector
import org.apache.spark.sql.functions._
import org.apache.spark.sql.sources.{Filter, GreaterThan}
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.{IntegerType, StructType}
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.sql.vectorized.ColumnarBatch
class DataSourceV2Suite extends QueryTest with SharedSparkSession with AdaptiveSparkPlanHelper {
import testImplicits._
private def getBatch(query: DataFrame): AdvancedBatch = {
query.queryExecution.executedPlan.collect {
case d: BatchScanExec =>
d.batch.asInstanceOf[AdvancedBatch]
}.head
}
private def getJavaBatch(query: DataFrame): JavaAdvancedDataSourceV2.AdvancedBatch = {
query.queryExecution.executedPlan.collect {
case d: BatchScanExec =>
d.batch.asInstanceOf[JavaAdvancedDataSourceV2.AdvancedBatch]
}.head
}
test("simplest implementation") {
Seq(classOf[SimpleDataSourceV2], classOf[JavaSimpleDataSourceV2]).foreach { cls =>
withClue(cls.getName) {
val df = spark.read.format(cls.getName).load()
checkAnswer(df, (0 until 10).map(i => Row(i, -i)))
checkAnswer(df.select('j), (0 until 10).map(i => Row(-i)))
checkAnswer(df.filter('i > 5), (6 until 10).map(i => Row(i, -i)))
}
}
}
test("advanced implementation") {
Seq(classOf[AdvancedDataSourceV2], classOf[JavaAdvancedDataSourceV2]).foreach { cls =>
withClue(cls.getName) {
val df = spark.read.format(cls.getName).load()
checkAnswer(df, (0 until 10).map(i => Row(i, -i)))
val q1 = df.select('j)
checkAnswer(q1, (0 until 10).map(i => Row(-i)))
if (cls == classOf[AdvancedDataSourceV2]) {
val batch = getBatch(q1)
assert(batch.filters.isEmpty)
assert(batch.requiredSchema.fieldNames === Seq("j"))
} else {
val batch = getJavaBatch(q1)
assert(batch.filters.isEmpty)
assert(batch.requiredSchema.fieldNames === Seq("j"))
}
val q2 = df.filter('i > 3)
checkAnswer(q2, (4 until 10).map(i => Row(i, -i)))
if (cls == classOf[AdvancedDataSourceV2]) {
val batch = getBatch(q2)
assert(batch.filters.flatMap(_.references).toSet == Set("i"))
assert(batch.requiredSchema.fieldNames === Seq("i", "j"))
} else {
val batch = getJavaBatch(q2)
assert(batch.filters.flatMap(_.references).toSet == Set("i"))
assert(batch.requiredSchema.fieldNames === Seq("i", "j"))
}
val q3 = df.select('i).filter('i > 6)
checkAnswer(q3, (7 until 10).map(i => Row(i)))
if (cls == classOf[AdvancedDataSourceV2]) {
val batch = getBatch(q3)
assert(batch.filters.flatMap(_.references).toSet == Set("i"))
assert(batch.requiredSchema.fieldNames === Seq("i"))
} else {
val batch = getJavaBatch(q3)
assert(batch.filters.flatMap(_.references).toSet == Set("i"))
assert(batch.requiredSchema.fieldNames === Seq("i"))
}
val q4 = df.select('j).filter('j < -10)
checkAnswer(q4, Nil)
if (cls == classOf[AdvancedDataSourceV2]) {
val batch = getBatch(q4)
// 'j < 10 is not supported by the testing data source.
assert(batch.filters.isEmpty)
assert(batch.requiredSchema.fieldNames === Seq("j"))
} else {
val batch = getJavaBatch(q4)
// 'j < 10 is not supported by the testing data source.
assert(batch.filters.isEmpty)
assert(batch.requiredSchema.fieldNames === Seq("j"))
}
}
}
}
test("columnar batch scan implementation") {
Seq(classOf[ColumnarDataSourceV2], classOf[JavaColumnarDataSourceV2]).foreach { cls =>
withClue(cls.getName) {
val df = spark.read.format(cls.getName).load()
checkAnswer(df, (0 until 90).map(i => Row(i, -i)))
checkAnswer(df.select('j), (0 until 90).map(i => Row(-i)))
checkAnswer(df.filter('i > 50), (51 until 90).map(i => Row(i, -i)))
}
}
}
test("schema required data source") {
Seq(classOf[SchemaRequiredDataSource], classOf[JavaSchemaRequiredDataSource]).foreach { cls =>
withClue(cls.getName) {
val e = intercept[IllegalArgumentException](spark.read.format(cls.getName).load())
assert(e.getMessage.contains("requires a user-supplied schema"))
val schema = new StructType().add("i", "int").add("s", "string")
val df = spark.read.format(cls.getName).schema(schema).load()
assert(df.schema == schema)
assert(df.collect().isEmpty)
}
}
}
test("SPARK-33369: Skip schema inference in DataframeWriter.save() if table provider " +
"supports external metadata") {
withTempDir { dir =>
val cls = classOf[SupportsExternalMetadataWritableDataSource].getName
spark.range(10).select('id as 'i, -'id as 'j).write.format(cls)
.option("path", dir.getCanonicalPath).mode("append").save()
val schema = new StructType().add("i", "long").add("j", "long")
checkAnswer(
spark.read.format(cls).option("path", dir.getCanonicalPath).schema(schema).load(),
spark.range(10).select('id, -'id))
}
}
test("partitioning reporting") {
import org.apache.spark.sql.functions.{count, sum}
Seq(classOf[PartitionAwareDataSource], classOf[JavaPartitionAwareDataSource]).foreach { cls =>
withClue(cls.getName) {
val df = spark.read.format(cls.getName).load()
checkAnswer(df, Seq(Row(1, 4), Row(1, 4), Row(3, 6), Row(2, 6), Row(4, 2), Row(4, 2)))
val groupByColA = df.groupBy('i).agg(sum('j))
checkAnswer(groupByColA, Seq(Row(1, 8), Row(2, 6), Row(3, 6), Row(4, 4)))
assert(collectFirst(groupByColA.queryExecution.executedPlan) {
case e: ShuffleExchangeExec => e
}.isEmpty)
val groupByColAB = df.groupBy('i, 'j).agg(count("*"))
checkAnswer(groupByColAB, Seq(Row(1, 4, 2), Row(2, 6, 1), Row(3, 6, 1), Row(4, 2, 2)))
assert(collectFirst(groupByColAB.queryExecution.executedPlan) {
case e: ShuffleExchangeExec => e
}.isEmpty)
val groupByColB = df.groupBy('j).agg(sum('i))
checkAnswer(groupByColB, Seq(Row(2, 8), Row(4, 2), Row(6, 5)))
assert(collectFirst(groupByColB.queryExecution.executedPlan) {
case e: ShuffleExchangeExec => e
}.isDefined)
val groupByAPlusB = df.groupBy('i + 'j).agg(count("*"))
checkAnswer(groupByAPlusB, Seq(Row(5, 2), Row(6, 2), Row(8, 1), Row(9, 1)))
assert(collectFirst(groupByAPlusB.queryExecution.executedPlan) {
case e: ShuffleExchangeExec => e
}.isDefined)
}
}
}
test ("statistics report data source") {
Seq(classOf[ReportStatisticsDataSource], classOf[JavaReportStatisticsDataSource]).foreach {
cls =>
withClue(cls.getName) {
val df = spark.read.format(cls.getName).load()
val logical = df.queryExecution.optimizedPlan.collect {
case d: DataSourceV2ScanRelation => d
}.head
val statics = logical.computeStats()
assert(statics.rowCount.isDefined && statics.rowCount.get === 10,
"Row count statics should be reported by data source")
assert(statics.sizeInBytes === 80,
"Size in bytes statics should be reported by data source")
}
}
}
test("SPARK-23574: no shuffle exchange with single partition") {
val df = spark.read.format(classOf[SimpleSinglePartitionSource].getName).load().agg(count("*"))
assert(df.queryExecution.executedPlan.collect { case e: Exchange => e }.isEmpty)
}
test("simple writable data source") {
// TODO: java implementation.
Seq(classOf[SimpleWritableDataSource]).foreach { cls =>
withTempPath { file =>
val path = file.getCanonicalPath
assert(spark.read.format(cls.getName).option("path", path).load().collect().isEmpty)
spark.range(10).select('id as 'i, -'id as 'j).write.format(cls.getName)
.option("path", path).mode("append").save()
checkAnswer(
spark.read.format(cls.getName).option("path", path).load(),
spark.range(10).select('id, -'id))
// default save mode is ErrorIfExists
intercept[AnalysisException] {
spark.range(10).select('id as 'i, -'id as 'j).write.format(cls.getName)
.option("path", path).save()
}
spark.range(10).select('id as 'i, -'id as 'j).write.mode("append").format(cls.getName)
.option("path", path).save()
checkAnswer(
spark.read.format(cls.getName).option("path", path).load(),
spark.range(10).union(spark.range(10)).select('id, -'id))
spark.range(5).select('id as 'i, -'id as 'j).write.format(cls.getName)
.option("path", path).mode("overwrite").save()
checkAnswer(
spark.read.format(cls.getName).option("path", path).load(),
spark.range(5).select('id, -'id))
val e = intercept[AnalysisException] {
spark.range(5).select('id as 'i, -'id as 'j).write.format(cls.getName)
.option("path", path).mode("ignore").save()
}
assert(e.message.contains("please use Append or Overwrite modes instead"))
val e2 = intercept[AnalysisException] {
spark.range(5).select('id as 'i, -'id as 'j).write.format(cls.getName)
.option("path", path).mode("error").save()
}
assert(e2.getMessage.contains("please use Append or Overwrite modes instead"))
// test transaction
val failingUdf = org.apache.spark.sql.functions.udf {
var count = 0
(id: Long) => {
if (count > 5) {
throw new RuntimeException("testing error")
}
count += 1
id
}
}
// this input data will fail to read middle way.
val input = spark.range(15).select(failingUdf('id).as('i)).select('i, -'i as 'j)
val e3 = intercept[SparkException] {
input.write.format(cls.getName).option("path", path).mode("overwrite").save()
}
assert(e3.getMessage.contains("Writing job aborted"))
// make sure we don't have partial data.
assert(spark.read.format(cls.getName).option("path", path).load().collect().isEmpty)
}
}
}
test("simple counter in writer with onDataWriterCommit") {
Seq(classOf[SimpleWritableDataSource]).foreach { cls =>
withTempPath { file =>
val path = file.getCanonicalPath
assert(spark.read.format(cls.getName).option("path", path).load().collect().isEmpty)
val numPartition = 6
spark.range(0, 10, 1, numPartition).select('id as 'i, -'id as 'j).write.format(cls.getName)
.mode("append").option("path", path).save()
checkAnswer(
spark.read.format(cls.getName).option("path", path).load(),
spark.range(10).select('id, -'id))
assert(SimpleCounter.getCounter == numPartition,
"method onDataWriterCommit should be called as many as the number of partitions")
}
}
}
test("SPARK-23293: data source v2 self join") {
val df = spark.read.format(classOf[SimpleDataSourceV2].getName).load()
val df2 = df.select(($"i" + 1).as("k"), $"j")
checkAnswer(df.join(df2, "j"), (0 until 10).map(i => Row(-i, i, i + 1)))
}
test("SPARK-23301: column pruning with arbitrary expressions") {
val df = spark.read.format(classOf[AdvancedDataSourceV2].getName).load()
val q1 = df.select('i + 1)
checkAnswer(q1, (1 until 11).map(i => Row(i)))
val batch1 = getBatch(q1)
assert(batch1.requiredSchema.fieldNames === Seq("i"))
val q2 = df.select(lit(1))
checkAnswer(q2, (0 until 10).map(i => Row(1)))
val batch2 = getBatch(q2)
assert(batch2.requiredSchema.isEmpty)
// 'j === 1 can't be pushed down, but we should still be able do column pruning
val q3 = df.filter('j === -1).select('j * 2)
checkAnswer(q3, Row(-2))
val batch3 = getBatch(q3)
assert(batch3.filters.isEmpty)
assert(batch3.requiredSchema.fieldNames === Seq("j"))
// column pruning should work with other operators.
val q4 = df.sort('i).limit(1).select('i + 1)
checkAnswer(q4, Row(1))
val batch4 = getBatch(q4)
assert(batch4.requiredSchema.fieldNames === Seq("i"))
}
test("SPARK-23315: get output from canonicalized data source v2 related plans") {
def checkCanonicalizedOutput(
df: DataFrame, logicalNumOutput: Int, physicalNumOutput: Int): Unit = {
val logical = df.queryExecution.logical.collect {
case d: DataSourceV2Relation => d
}.head
assert(logical.canonicalized.output.length == logicalNumOutput)
val physical = df.queryExecution.executedPlan.collect {
case d: BatchScanExec => d
}.head
assert(physical.canonicalized.output.length == physicalNumOutput)
}
val df = spark.read.format(classOf[AdvancedDataSourceV2].getName).load()
checkCanonicalizedOutput(df, 2, 2)
checkCanonicalizedOutput(df.select('i), 2, 1)
}
test("SPARK-25425: extra options should override sessions options during reading") {
val prefix = "spark.datasource.userDefinedDataSource."
val optionName = "optionA"
withSQLConf(prefix + optionName -> "true") {
val df = spark
.read
.option(optionName, false)
.format(classOf[DataSourceV2WithSessionConfig].getName).load()
val options = df.queryExecution.logical.collectFirst {
case d: DataSourceV2Relation => d.options
}.get
assert(options.get(optionName) === "false")
}
}
test("SPARK-25425: extra options should override sessions options during writing") {
withTempPath { path =>
val sessionPath = path.getCanonicalPath
withSQLConf("spark.datasource.simpleWritableDataSource.path" -> sessionPath) {
withTempPath { file =>
val optionPath = file.getCanonicalPath
val format = classOf[SimpleWritableDataSource].getName
val df = Seq((1L, 2L)).toDF("i", "j")
df.write.format(format).mode("append").option("path", optionPath).save()
assert(!new File(sessionPath).exists)
checkAnswer(spark.read.format(format).option("path", optionPath).load(), df)
}
}
}
}
test("SPARK-27411: DataSourceV2Strategy should not eliminate subquery") {
withTempView("t1") {
val t2 = spark.read.format(classOf[SimpleDataSourceV2].getName).load()
Seq(2, 3).toDF("a").createTempView("t1")
val df = t2.where("i < (select max(a) from t1)").select('i)
val subqueries = stripAQEPlan(df.queryExecution.executedPlan).collect {
case p => p.subqueries
}.flatten
assert(subqueries.length == 1)
checkAnswer(df, (0 until 3).map(i => Row(i)))
}
}
test("SPARK-32609: DataSourceV2 with different pushedfilters should be different") {
def getScanExec(query: DataFrame): BatchScanExec = {
query.queryExecution.executedPlan.collect {
case d: BatchScanExec => d
}.head
}
Seq(classOf[AdvancedDataSourceV2], classOf[JavaAdvancedDataSourceV2]).foreach { cls =>
withClue(cls.getName) {
val df = spark.read.format(cls.getName).load()
val q1 = df.select('i).filter('i > 6)
val q2 = df.select('i).filter('i > 5)
val scan1 = getScanExec(q1)
val scan2 = getScanExec(q2)
assert(!scan1.equals(scan2))
}
}
}
test("SPARK-33267: push down with condition 'in (..., null)' should not throw NPE") {
Seq(classOf[AdvancedDataSourceV2], classOf[JavaAdvancedDataSourceV2]).foreach { cls =>
withClue(cls.getName) {
val df = spark.read.format(cls.getName).load()
// before SPARK-33267 below query just threw NPE
df.select('i).where("i in (1, null)").collect()
}
}
}
}
case class RangeInputPartition(start: Int, end: Int) extends InputPartition
object SimpleReaderFactory extends PartitionReaderFactory {
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
val RangeInputPartition(start, end) = partition
new PartitionReader[InternalRow] {
private var current = start - 1
override def next(): Boolean = {
current += 1
current < end
}
override def get(): InternalRow = InternalRow(current, -current)
override def close(): Unit = {}
}
}
}
abstract class SimpleBatchTable extends Table with SupportsRead {
override def schema(): StructType = TestingV2Source.schema
override def name(): String = this.getClass.toString
override def capabilities(): util.Set[TableCapability] = Set(BATCH_READ).asJava
}
abstract class SimpleScanBuilder extends ScanBuilder
with Batch with Scan {
override def build(): Scan = this
override def toBatch: Batch = this
override def readSchema(): StructType = TestingV2Source.schema
override def createReaderFactory(): PartitionReaderFactory = SimpleReaderFactory
}
trait TestingV2Source extends TableProvider {
override def inferSchema(options: CaseInsensitiveStringMap): StructType = {
TestingV2Source.schema
}
override def getTable(
schema: StructType,
partitioning: Array[Transform],
properties: util.Map[String, String]): Table = {
getTable(new CaseInsensitiveStringMap(properties))
}
def getTable(options: CaseInsensitiveStringMap): Table
}
object TestingV2Source {
val schema = new StructType().add("i", "int").add("j", "int")
}
class SimpleSinglePartitionSource extends TestingV2Source {
class MyScanBuilder extends SimpleScanBuilder {
override def planInputPartitions(): Array[InputPartition] = {
Array(RangeInputPartition(0, 5))
}
}
override def getTable(options: CaseInsensitiveStringMap): Table = new SimpleBatchTable {
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new MyScanBuilder()
}
}
}
// This class is used by pyspark tests. If this class is modified/moved, make sure pyspark
// tests still pass.
class SimpleDataSourceV2 extends TestingV2Source {
class MyScanBuilder extends SimpleScanBuilder {
override def planInputPartitions(): Array[InputPartition] = {
Array(RangeInputPartition(0, 5), RangeInputPartition(5, 10))
}
}
override def getTable(options: CaseInsensitiveStringMap): Table = new SimpleBatchTable {
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new MyScanBuilder()
}
}
}
class AdvancedDataSourceV2 extends TestingV2Source {
override def getTable(options: CaseInsensitiveStringMap): Table = new SimpleBatchTable {
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new AdvancedScanBuilder()
}
}
}
class AdvancedScanBuilder extends ScanBuilder
with Scan with SupportsPushDownFilters with SupportsPushDownRequiredColumns {
var requiredSchema = TestingV2Source.schema
var filters = Array.empty[Filter]
override def pruneColumns(requiredSchema: StructType): Unit = {
this.requiredSchema = requiredSchema
}
override def readSchema(): StructType = requiredSchema
override def pushFilters(filters: Array[Filter]): Array[Filter] = {
val (supported, unsupported) = filters.partition {
case GreaterThan("i", _: Int) => true
case _ => false
}
this.filters = supported
unsupported
}
override def pushedFilters(): Array[Filter] = filters
override def build(): Scan = this
override def toBatch: Batch = new AdvancedBatch(filters, requiredSchema)
}
class AdvancedBatch(val filters: Array[Filter], val requiredSchema: StructType) extends Batch {
override def planInputPartitions(): Array[InputPartition] = {
val lowerBound = filters.collectFirst {
case GreaterThan("i", v: Int) => v
}
val res = scala.collection.mutable.ArrayBuffer.empty[InputPartition]
if (lowerBound.isEmpty) {
res.append(RangeInputPartition(0, 5))
res.append(RangeInputPartition(5, 10))
} else if (lowerBound.get < 4) {
res.append(RangeInputPartition(lowerBound.get + 1, 5))
res.append(RangeInputPartition(5, 10))
} else if (lowerBound.get < 9) {
res.append(RangeInputPartition(lowerBound.get + 1, 10))
}
res.toArray
}
override def createReaderFactory(): PartitionReaderFactory = {
new AdvancedReaderFactory(requiredSchema)
}
}
class AdvancedReaderFactory(requiredSchema: StructType) extends PartitionReaderFactory {
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
val RangeInputPartition(start, end) = partition
new PartitionReader[InternalRow] {
private var current = start - 1
override def next(): Boolean = {
current += 1
current < end
}
override def get(): InternalRow = {
val values = requiredSchema.map(_.name).map {
case "i" => current
case "j" => -current
}
InternalRow.fromSeq(values)
}
override def close(): Unit = {}
}
}
}
class SchemaRequiredDataSource extends TableProvider {
class MyScanBuilder(schema: StructType) extends SimpleScanBuilder {
override def planInputPartitions(): Array[InputPartition] = Array.empty
override def readSchema(): StructType = schema
}
override def supportsExternalMetadata(): Boolean = true
override def inferSchema(options: CaseInsensitiveStringMap): StructType = {
throw new IllegalArgumentException("requires a user-supplied schema")
}
override def getTable(
schema: StructType,
partitioning: Array[Transform],
properties: util.Map[String, String]): Table = {
val userGivenSchema = schema
new SimpleBatchTable {
override def schema(): StructType = userGivenSchema
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new MyScanBuilder(userGivenSchema)
}
}
}
}
class ColumnarDataSourceV2 extends TestingV2Source {
class MyScanBuilder extends SimpleScanBuilder {
override def planInputPartitions(): Array[InputPartition] = {
Array(RangeInputPartition(0, 50), RangeInputPartition(50, 90))
}
override def createReaderFactory(): PartitionReaderFactory = {
ColumnarReaderFactory
}
}
override def getTable(options: CaseInsensitiveStringMap): Table = new SimpleBatchTable {
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new MyScanBuilder()
}
}
}
object ColumnarReaderFactory extends PartitionReaderFactory {
private final val BATCH_SIZE = 20
override def supportColumnarReads(partition: InputPartition): Boolean = true
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
throw new UnsupportedOperationException
}
override def createColumnarReader(partition: InputPartition): PartitionReader[ColumnarBatch] = {
val RangeInputPartition(start, end) = partition
new PartitionReader[ColumnarBatch] {
private lazy val i = new OnHeapColumnVector(BATCH_SIZE, IntegerType)
private lazy val j = new OnHeapColumnVector(BATCH_SIZE, IntegerType)
private lazy val batch = new ColumnarBatch(Array(i, j))
private var current = start
override def next(): Boolean = {
i.reset()
j.reset()
var count = 0
while (current < end && count < BATCH_SIZE) {
i.putInt(count, current)
j.putInt(count, -current)
current += 1
count += 1
}
if (count == 0) {
false
} else {
batch.setNumRows(count)
true
}
}
override def get(): ColumnarBatch = batch
override def close(): Unit = batch.close()
}
}
}
class PartitionAwareDataSource extends TestingV2Source {
class MyScanBuilder extends SimpleScanBuilder
with SupportsReportPartitioning{
override def planInputPartitions(): Array[InputPartition] = {
// Note that we don't have same value of column `a` across partitions.
Array(
SpecificInputPartition(Array(1, 1, 3), Array(4, 4, 6)),
SpecificInputPartition(Array(2, 4, 4), Array(6, 2, 2)))
}
override def createReaderFactory(): PartitionReaderFactory = {
SpecificReaderFactory
}
override def outputPartitioning(): Partitioning = new MyPartitioning
}
override def getTable(options: CaseInsensitiveStringMap): Table = new SimpleBatchTable {
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new MyScanBuilder()
}
}
class MyPartitioning extends Partitioning {
override def numPartitions(): Int = 2
override def satisfy(distribution: Distribution): Boolean = distribution match {
case c: ClusteredDistribution => c.clusteredColumns.contains("i")
case _ => false
}
}
}
case class SpecificInputPartition(i: Array[Int], j: Array[Int]) extends InputPartition
object SpecificReaderFactory extends PartitionReaderFactory {
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
val p = partition.asInstanceOf[SpecificInputPartition]
new PartitionReader[InternalRow] {
private var current = -1
override def next(): Boolean = {
current += 1
current < p.i.length
}
override def get(): InternalRow = InternalRow(p.i(current), p.j(current))
override def close(): Unit = {}
}
}
}
class SchemaReadAttemptException(m: String) extends RuntimeException(m)
class SimpleWriteOnlyDataSource extends SimpleWritableDataSource {
override def getTable(options: CaseInsensitiveStringMap): Table = {
new MyTable(options) {
override def schema(): StructType = {
throw new SchemaReadAttemptException("schema should not be read.")
}
}
}
}
class SupportsExternalMetadataWritableDataSource extends SimpleWritableDataSource {
override def supportsExternalMetadata(): Boolean = true
override def inferSchema(options: CaseInsensitiveStringMap): StructType = {
throw new IllegalArgumentException(
"Dataframe writer should not require inferring table schema the data source supports" +
" external metadata.")
}
}
class ReportStatisticsDataSource extends SimpleWritableDataSource {
class MyScanBuilder extends SimpleScanBuilder
with SupportsReportStatistics {
override def estimateStatistics(): Statistics = {
new Statistics {
override def sizeInBytes(): OptionalLong = OptionalLong.of(80)
override def numRows(): OptionalLong = OptionalLong.of(10)
}
}
override def planInputPartitions(): Array[InputPartition] = {
Array(RangeInputPartition(0, 5), RangeInputPartition(5, 10))
}
}
override def getTable(options: CaseInsensitiveStringMap): Table = {
new SimpleBatchTable {
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new MyScanBuilder
}
}
}
}
| witgo/spark | sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2Suite.scala | Scala | apache-2.0 | 29,117 |
package com.sksamuel.avro4s.schemas
import com.sksamuel.avro4s.SchemaFor
import com.sksamuel.avro4s.avroutils.SchemaHelper
import com.sksamuel.avro4s.typeutils.{Annotations, SubtypeOrdering}
import magnolia1.SealedTrait
import org.apache.avro.SchemaBuilder
object TypeUnions {
// def decoder[T](ctx: SealedTrait[Decoder, T],
// env: DefinitionEnvironment[Decoder],
// update: SchemaUpdate): Decoder[T] = {
// // cannot extend the recursive environment with an initial type union decoder with empty union schema, as Avro Schema
// // doesn't support this. So we use the original recursive environment to build subtypes, meaning that in case of a
// // recursive schema, two identical type union decoders may be created instead of one.
// val subtypeDecoders = enrichedSubtypes(ctx, update).map { case (st, u) => new UnionDecoder[T](st)(env, u) }
// val schemaFor = buildSchema[T](update, subtypeDecoders.map(_.schema))
// val decoderByName = subtypeDecoders.map(decoder => decoder.fullName -> decoder).toMap
// new TypeUnionDecoder[T](ctx, schemaFor, decoderByName)
// }
def schema[T](ctx: SealedTrait[SchemaFor, T]): SchemaFor[T] = {
val schemas = Seq(SchemaBuilder.builder().booleanType(), SchemaBuilder.builder().intType())
val sortedSubtypes = ctx.subtypes.sorted(SubtypeOrdering).map(_.typeclass.schema)
SchemaFor(SchemaHelper.createSafeUnion(sortedSubtypes: _*))
}
//
// private def enrichedSubtypes[Typeclass[_], T](ctx: SealedTrait[Typeclass, T],
// update: SchemaUpdate): Seq[(Subtype[Typeclass, T], SchemaUpdate)] = {
// val enrichedUpdate = update match {
// case NoUpdate =>
// // in case of namespace annotations, pass the namespace update down to all subtypes
// val ns = new AnnotationExtractors(ctx.annotations).namespace
// ns.fold[SchemaUpdate](NoUpdate)(NamespaceUpdate)
// case _ => update
// }
//
// def subtypeSchemaUpdate(st: Subtype[Typeclass, T]) = enrichedUpdate match {
// case FullSchemaUpdate(schemaFor) =>
// val schema = schemaFor.schema
// val fieldMapper = schemaFor.fieldMapper
// val nameExtractor = NameExtractor(st.typeName, st.annotations ++ ctx.annotations)
// val subtypeSchema = SchemaFor(SchemaHelper.extractTraitSubschema(nameExtractor.fullName, schema), fieldMapper)
// FullSchemaUpdate(subtypeSchema)
// case _ => enrichedUpdate
// }
//
// def priority(st: Subtype[Typeclass, T]) = new AnnotationExtractors(st.annotations).sortPriority.getOrElse(0.0f)
// val sortedSubtypes = ctx.subtypes.sortWith((l, r) => priority(l) > priority(r))
//
// sortedSubtypes.map(st => (st, subtypeSchemaUpdate(st)))
// }
//
// private[avro4s] def validateNewSchema[T](schemaFor: SchemaFor[T]) = {
// val newSchema = schemaFor.schema
// if (newSchema.getType != Schema.Type.UNION)
// throw new Avro4sConfigurationException(s"Schema type for record codecs must be UNION, received $newSchema")
// }
}
| sksamuel/avro4s | avro4s-core/src/main/scala/com/sksamuel/avro4s/schemas/unions.scala | Scala | apache-2.0 | 3,144 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.joins
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.catalyst.plans.physical.{ClusteredDistribution, Partitioning}
import org.apache.spark.sql.execution.{BinaryNode, SparkPlan}
import org.apache.spark.sql.metric.SQLMetrics
/**
* :: DeveloperApi ::
* Performs an inner hash join of two child relations by first shuffling the data using the join
* keys.
*/
@DeveloperApi
case class ShuffledHashJoin(
leftKeys: Seq[Expression],
rightKeys: Seq[Expression],
buildSide: BuildSide,
left: SparkPlan,
right: SparkPlan)
extends BinaryNode with HashJoin {
override def outputPartitioning: Partitioning = left.outputPartitioning
override private[sql] lazy val metrics = Map(
"numLeftRows" -> SQLMetrics.createLongMetric(sparkContext, "number of left rows"),
"numRightRows" -> SQLMetrics.createLongMetric(sparkContext, "number of right rows"),
"numOutputRows" -> SQLMetrics.createLongMetric(sparkContext, "number of output rows"))
override def requiredChildDistribution: Seq[ClusteredDistribution] =
ClusteredDistribution(leftKeys) :: ClusteredDistribution(rightKeys) :: Nil
protected override def doExecute(): RDD[Row] = {
val (numBuildRows, numStreamedRows) = buildSide match {
case BuildLeft => (longMetric("numLeftRows"), longMetric("numRightRows"))
case BuildRight => (longMetric("numRightRows"), longMetric("numLeftRows"))
}
val numOutputRows = longMetric("numOutputRows")
buildPlan.execute().zipPartitions(streamedPlan.execute()) { (buildIter, streamIter) =>
val hashed = HashedRelation(buildIter, numBuildRows, buildSideKeyGenerator)
hashJoin(streamIter, numStreamedRows, hashed, numOutputRows)
}
}
}
| andrewor14/iolap | sql/core/src/main/scala/org/apache/spark/sql/execution/joins/ShuffledHashJoin.scala | Scala | apache-2.0 | 2,693 |
//
// Scaled - a scalable editor extensible via JVM languages
// http://github.com/scaled/scaled/blob/master/LICENSE
package scaled
import reactual.{Future, Property, OptValue, SignalV, Value, ValueV}
/** Visualizes a single line of text, potentially with style information. */
trait LineView {
/** The line being displayed by this view. */
def line :LineV
// TODO: margin decorations
// TOOD: access to the JavaFX scene graph Node on which to anchor bits?
}
/** The visualization of a text buffer. It also manages the UX for manipulating and editing the
* buffer. This includes:
* - a series of [LineView] instances visualizing each line of text
* - the point, which defines the cursor/insertion point and the point end of the point/mark
* - the scroll position of the view, which indicates which lines of the buffer are visible
* Anything other than the data model for the buffer itself (which is encapsulated in [Buffer])
* will be handled by this view.
*/
abstract class BufferView {
/** The buffer being displayed by this view. */
def buffer :Buffer
/** Views for the lines in this buffer. */
def lines :Seq[LineView]
/** The current point (aka the cursor position). */
def point :Property[Loc]
/** The width of the view, in characters. */
def width :Property[Int]
/** The height of the view, in characters. */
def height :Property[Int]
/** The index of the line at the top of the view. */
def scrollTop :Property[Int]
/** The column index of the character at the left of the view. */
def scrollLeft :Property[Int]
/** Adjusts the scroll position of this view by `delta` lines. The scroll position will be bounded
* based on the size of the buffer. The point will then be bounded into the visible area of the
* buffer. */
def scrollVert (delta :Int) {
val ctop = scrollTop()
val h = height()
// bound bottom first, then top; this snaps buffers that are less than one screen tall to top
// TODO: nix buffer.lines.length, use lines.length when lines is implemented
val ntop = math.max(math.min(ctop + delta, buffer.lines.length - h), 0)
// println(s"Updating scroll top ($delta ${lines.length} $height) $ctop => $ntop")
scrollTop() = ntop
val p = point()
if (p.row < ntop) point() = p.atRow(ntop)
else if (p.row >= ntop + h) point() = p.atRow(ntop + h - 1)
}
/** Displays `popup` in this buffer. */
def showPopup (popup :Popup) :Unit
}
/** `BufferView` related types and utilities. */
object BufferView {
/** An event emitted when lines are added to or removed from the buffer view. The removed lines
* will have already been removed and the added lines added when this edit is dispatched. */
case class Change (
/** The row at which the additions or removals start. */
row :Int,
/** If positive, the number of rows added, if negative the number of rows deleted. */
delta :Int,
/** The buffer view that was edited. */
view :BufferView)
}
/** A reactive version of [BufferView], used by modes. */
abstract class RBufferView (initWidth :Int, initHeight :Int) extends BufferView {
/** The (reactive) buffer being displayed by this view. */
override def buffer :RBuffer
/** A signal emitted when lines are added to or removed from this view. */
def changed :SignalV[BufferView.Change]
/** The current point (aka the cursor position). */
val point :Value[Loc] = new Value(Loc(0, 0)) {
override def update (loc :Loc) :Loc = super.update(buffer.bound(loc))
override def updateForce (loc :Loc) :Loc = super.update(buffer.bound(loc))
}
/** The width of the buffer view, in characters. */
val width :Value[Int] = Value(initWidth)
/** The height of the buffer view, in characters. */
val height :Value[Int] = Value(initHeight)
/** The index of the line at the top of the view. */
val scrollTop :Value[Int] = Value(0)
/** The column index of the character at the left of the view. */
val scrollLeft :Value[Int] = Value(0)
/** The popup being displayed by this buffer, if any. */
val popup :OptValue[Popup] = OptValue()
override def showPopup (popup :Popup) = this.popup() = popup
}
| swhgoon/scaled | api/src/main/scala/scaled/Views.scala | Scala | bsd-3-clause | 4,185 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.io.File
import org.junit.{Test, After, Before}
import kafka.zk.ZooKeeperTestHarness
import kafka.utils.TestUtils._
import kafka.producer.KeyedMessage
import kafka.serializer.StringEncoder
import kafka.utils.{TestUtils}
import kafka.common._
abstract class BaseReplicaFetchTest extends ZooKeeperTestHarness {
var brokers: Seq[KafkaServer] = null
val topic1 = "foo"
val topic2 = "bar"
/* If this is `Some`, SSL will be enabled */
protected def trustStoreFile: Option[File]
@Before
override def setUp() {
super.setUp()
brokers = createBrokerConfigs(2, zkConnect, enableControlledShutdown = false, enableSSL = trustStoreFile.isDefined, trustStoreFile = trustStoreFile)
.map(KafkaConfig.fromProps)
.map(TestUtils.createServer(_))
}
@After
override def tearDown() {
brokers.foreach(_.shutdown())
super.tearDown()
}
@Test
def testReplicaFetcherThread() {
val partition = 0
val testMessageList1 = List("test1", "test2", "test3", "test4")
val testMessageList2 = List("test5", "test6", "test7", "test8")
// create a topic and partition and await leadership
for (topic <- List(topic1,topic2)) {
createTopic(zkUtils, topic, numPartitions = 1, replicationFactor = 2, servers = brokers)
}
// send test messages to leader
val producer = TestUtils.createProducer[String, String](TestUtils.getBrokerListStrFromServers(brokers),
encoder = classOf[StringEncoder].getName,
keyEncoder = classOf[StringEncoder].getName)
val messages = testMessageList1.map(m => new KeyedMessage(topic1, m, m)) ++ testMessageList2.map(m => new KeyedMessage(topic2, m, m))
producer.send(messages:_*)
producer.close()
def logsMatch(): Boolean = {
var result = true
for (topic <- List(topic1, topic2)) {
val topicAndPart = TopicAndPartition(topic, partition)
val expectedOffset = brokers.head.getLogManager().getLog(topicAndPart).get.logEndOffset
result = result && expectedOffset > 0 && brokers.forall { item =>
(expectedOffset == item.getLogManager().getLog(topicAndPart).get.logEndOffset)
}
}
result
}
waitUntilTrue(logsMatch, "Broker logs should be identical")
}
}
| vkroz/kafka | core/src/test/scala/unit/kafka/server/BaseReplicaFetchTest.scala | Scala | apache-2.0 | 3,176 |
package io.iohk.ethereum.jsonrpc
import akka.actor.ActorSystem
import akka.testkit.{TestKit, TestProbe}
import io.iohk.ethereum.blockchain.sync.regular.RegularSync.NewCheckpoint
import io.iohk.ethereum.domain.{Block, BlockBody, BlockchainImpl}
import io.iohk.ethereum.jsonrpc.CheckpointingService._
import io.iohk.ethereum.{Fixtures, NormalPatience, WithActorSystemShutDown}
import monix.execution.Scheduler.Implicits.global
import org.scalacheck.Gen
import org.scalamock.scalatest.MockFactory
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.flatspec.AnyFlatSpecLike
import org.scalatest.matchers.should.Matchers
import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks
class CheckpointingServiceSpec
extends TestKit(ActorSystem("CheckpointingServiceSpec_System"))
with AnyFlatSpecLike
with WithActorSystemShutDown
with MockFactory
with ScalaFutures
with NormalPatience
with ScalaCheckPropertyChecks
with Matchers {
"CheckpointService" should "get latest block (at a correct checkpointing interval) from Blockchain" in new TestSetup {
val nums = for {
k <- Gen.choose[Int](1, 10) // checkpointing interval
m <- Gen.choose(0, 1000) // number of checkpoints in the chain
n <- Gen.choose(0, k - 1) // distance from best block to checkpointed block
} yield (k, m, n)
forAll(nums) { case (k, m, n) =>
val checkpointedBlockNum: BigInt = k * m
val bestBlockNum: BigInt = checkpointedBlockNum + n
val block = Block(Fixtures.Blocks.ValidBlock.header.copy(number = checkpointedBlockNum), BlockBody.empty)
val request = GetLatestBlockRequest(k)
val expectedResponse = GetLatestBlockResponse(block.hash, block.number)
(blockchain.getBestBlockNumber _).expects().returning(bestBlockNum)
(blockchain.getBlockByNumber _).expects(checkpointedBlockNum).returning(Some(block))
val result = service.getLatestBlock(request)
result.runSyncUnsafe() shouldEqual Right(expectedResponse)
}
}
it should "send new checkpoint to Sync" in new TestSetup {
val hash = Fixtures.Blocks.ValidBlock.block.hash
val signatures = Nil
val request = PushCheckpointRequest(hash, signatures)
val expectedResponse = PushCheckpointResponse()
val result = service.pushCheckpoint(request).runSyncUnsafe()
syncController.expectMsg(NewCheckpoint(hash, signatures))
result shouldEqual Right(expectedResponse)
}
it should "get latest block in case of blockchain re-org" in new TestSetup {
val block = Fixtures.Blocks.ValidBlock.block
val expectedResponse = GetLatestBlockResponse(block.hash, block.number)
(blockchain.getBestBlockNumber _)
.expects()
.returning(7)
(blockchain.getBlockByNumber _)
.expects(BigInt(4))
.returning(None)
(blockchain.getBestBlockNumber _)
.expects()
.returning(7)
(blockchain.getBlockByNumber _)
.expects(BigInt(4))
.returning(Some(block))
val result = service.getLatestBlock(GetLatestBlockRequest(4))
result.runSyncUnsafe() shouldEqual Right(expectedResponse)
}
trait TestSetup {
val blockchain = mock[BlockchainImpl]
val syncController = TestProbe()
val service = new CheckpointingService(blockchain, syncController.ref)
}
}
| input-output-hk/etc-client | src/test/scala/io/iohk/ethereum/jsonrpc/CheckpointingServiceSpec.scala | Scala | mit | 3,299 |
package fr.acinq.bitcoin
import java.io.{InputStream, OutputStream}
import java.math.BigInteger
import java.nio.ByteOrder
import fr.acinq.bitcoin.Protocol._
import scodec.bits._
object BlockHeader extends BtcSerializer[BlockHeader] {
override def read(input: InputStream, protocolVersion: Long): BlockHeader = {
val version = uint32(input)
val hashPreviousBlock = hash(input)
val hashMerkleRoot = hash(input)
val time = uint32(input)
val bits = uint32(input)
val nonce = uint32(input)
BlockHeader(version, hashPreviousBlock, hashMerkleRoot, time, bits, nonce)
}
override def write(input: BlockHeader, out: OutputStream, protocolVersion: Long) = {
writeUInt32(input.version.toInt, out)
writeBytes(input.hashPreviousBlock.toArray, out)
writeBytes(input.hashMerkleRoot.toArray, out)
writeUInt32(input.time.toInt, out)
writeUInt32(input.bits.toInt, out)
writeUInt32(input.nonce.toInt, out)
}
def getDifficulty(header: BlockHeader): BigInteger = {
val nsize = header.bits >> 24
val isneg = header.bits & 0x00800000
val nword = header.bits & 0x007fffff
val result = if (nsize <= 3)
BigInteger.valueOf(nword).shiftRight(8 * (3 - nsize.toInt))
else
BigInteger.valueOf(nword).shiftLeft(8 * (nsize.toInt - 3))
if (isneg != 0) result.negate() else result
}
/**
*
* @param bits difficulty target
* @return the amount of work represented by this difficulty target, as displayed
* by bitcoin core
*/
def blockProof(bits: Long): Double = {
val (target, negative, overflow) = decodeCompact(bits)
if (target == BigInteger.ZERO || negative || overflow) 0.0 else {
val work = BigInteger.valueOf(2).pow(256).divide(target.add(BigInteger.ONE))
work.doubleValue()
}
}
def blockProof(header: BlockHeader): Double = blockProof(header.bits)
/**
* Proof of work: hash(header) <= target difficulty
*
* @param header block header
* @return true if the input block header validates its expected proof of work
*/
def checkProofOfWork(header: BlockHeader): Boolean = {
val (target, _, _) = decodeCompact(header.bits)
val hash = new BigInteger(1, header.blockId.toArray)
hash.compareTo(target) <= 0
}
def calculateNextWorkRequired(lastHeader: BlockHeader, lastRetargetTime: Long): Long = {
var actualTimespan = lastHeader.time - lastRetargetTime
val targetTimespan = 14 * 24 * 60 * 60 // two weeks
if (actualTimespan < targetTimespan / 4) actualTimespan = targetTimespan / 4
if (actualTimespan > targetTimespan * 4) actualTimespan = targetTimespan * 4
var (target, false, false) = decodeCompact(lastHeader.bits)
target = target.multiply(BigInteger.valueOf(actualTimespan))
target = target.divide(BigInteger.valueOf(targetTimespan))
val powLimit = new BigInteger(1, hex"00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff".toArray)
target = target.min(powLimit)
encodeCompact(target)
}
}
/**
*
* @param version Block version information, based upon the software version creating this block
* @param hashPreviousBlock The hash value of the previous block this particular block references. Please not that
* this hash is not reversed (as opposed to Block.hash)
* @param hashMerkleRoot The reference to a Merkle tree collection which is a hash of all transactions related to this block
* @param time A timestamp recording when this block was created (Will overflow in 2106[2])
* @param bits The calculated difficulty target being used for this block
* @param nonce The nonce used to generate this block… to allow variations of the header and compute different hashes
*/
case class BlockHeader(version: Long, hashPreviousBlock: ByteVector32, hashMerkleRoot: ByteVector32, time: Long, bits: Long, nonce: Long) extends BtcSerializable[BlockHeader] {
lazy val hash: ByteVector32 = Crypto.hash256(BlockHeader.write(this))
// hash is reversed here (same as tx id)
lazy val blockId = hash.reverse
def blockProof = BlockHeader.blockProof(this)
override def serializer: BtcSerializer[BlockHeader] = BlockHeader
}
object Block extends BtcSerializer[Block] {
override def read(input: InputStream, protocolVersion: Long): Block = {
val raw = bytes(input, 80)
val header = BlockHeader.read(raw.toArray)
Block(header, readCollection[Transaction](input, protocolVersion))
}
override def write(input: Block, out: OutputStream, protocolVersion: Long) = {
BlockHeader.write(input.header, out)
writeCollection(input.tx, out, protocolVersion)
}
override def validate(input: Block): Unit = {
BlockHeader.validate(input.header)
require(input.header.hashMerkleRoot === MerkleTree.computeRoot(input.tx.map(_.hash)), "invalid block: merkle root mismatch")
require(input.tx.map(_.txid).toSet.size == input.tx.size, "invalid block: duplicate transactions")
input.tx.foreach(Transaction.validate)
}
def blockProof(block: Block): Double = BlockHeader.blockProof(block.header)
// genesis blocks
val LivenetGenesisBlock = {
val script = OP_PUSHDATA(writeUInt32(486604799L)) :: OP_PUSHDATA(hex"04") :: OP_PUSHDATA(ByteVector("The Times 03/Jan/2009 Chancellor on brink of second bailout for banks".getBytes("UTF-8"))) :: Nil
val scriptPubKey = OP_PUSHDATA(hex"04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f") :: OP_CHECKSIG :: Nil
Block(
BlockHeader(version = 1, hashPreviousBlock = ByteVector32.Zeroes, hashMerkleRoot = ByteVector32(hex"3ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4a"), time = 1231006505, bits = 0x1d00ffff, nonce = 2083236893),
List(
Transaction(version = 1,
txIn = List(TxIn.coinbase(script)),
txOut = List(TxOut(amount = 50 btc, publicKeyScript = scriptPubKey)),
lockTime = 0))
)
}
val SignetGenesisBlock = LivenetGenesisBlock.copy(header = LivenetGenesisBlock.header.copy(bits = 0x1e0377aeL, time = 1598918400, nonce = 52613770))
val TestnetGenesisBlock = LivenetGenesisBlock.copy(header = LivenetGenesisBlock.header.copy(time = 1296688602, nonce = 414098458))
val RegtestGenesisBlock = LivenetGenesisBlock.copy(header = LivenetGenesisBlock.header.copy(bits = 0x207fffffL, nonce = 2, time = 1296688602))
val SegnetGenesisBlock = LivenetGenesisBlock.copy(header = LivenetGenesisBlock.header.copy(bits = 503447551, time = 1452831101, nonce = 0))
/**
* Proof of work: hash(block) <= target difficulty
*
* @param block
* @return true if the input block validates its expected proof of work
*/
def checkProofOfWork(block: Block): Boolean = BlockHeader.checkProofOfWork(block.header)
/**
*
* @param tx coinbase transaction
* @return the witness reserved value included in the input of this tx if any
*/
def witnessReservedValue(tx: Transaction): Option[ByteVector] = tx.txIn(0).witness match {
case ScriptWitness(Seq(nonce)) if nonce.length == 32 => Some(nonce)
case _ => None
}
/**
*
* @param tx coinbase transaction
* @return the witness commitment included in this transaction, if any
*/
def witnessCommitment(tx: Transaction): Option[ByteVector32] = tx.txOut.map(o => Script.parse(o.publicKeyScript)).reverse.collectFirst {
// we've reversed the outputs because if there are more than one scriptPubKey matching the pattern, the one with
// the highest output index is assumed to be the commitment.
case OP_RETURN :: OP_PUSHDATA(commitmentHeader, _) :: Nil if commitmentHeader.length == 36 && Protocol.uint32(commitmentHeader.take(4).toArray, ByteOrder.BIG_ENDIAN) == 0xaa21a9edL => ByteVector32(commitmentHeader.takeRight(32))
}
/**
* Checks the witness commitment of a block
*
* @param block block
* @return true if the witness commitment for this block is valid, or if this block does not contain a witness commitment
* nor any segwit transactions.
*/
def checkWitnessCommitment(block: Block): Boolean = {
val coinbase = block.tx.head
(witnessReservedValue(coinbase), witnessCommitment(coinbase)) match {
case (Some(nonce), Some(commitment)) =>
val rootHash = MerkleTree.computeRoot(ByteVector32.Zeroes +: block.tx.tail.map(tx => tx.whash))
val commitmentHash = Crypto.hash256(rootHash ++ nonce)
commitment == commitmentHash
case _ if block.tx.exists(_.hasWitness) => false // block has segwit transactions but no witness commitment
case _ => true
}
}
}
/**
* Bitcoin block
*
* @param header block header
* @param tx transactions
*/
case class Block(header: BlockHeader, tx: Seq[Transaction]) extends BtcSerializable[Block] {
lazy val hash = header.hash
lazy val blockId = header.blockId
override def serializer: BtcSerializer[Block] = Block
}
| ACINQ/bitcoin-lib | src/main/scala/fr/acinq/bitcoin/Block.scala | Scala | apache-2.0 | 9,023 |
package org.analogweb.scala
import org.analogweb.{
PluginModulesConfig,
InvocationMetadataFactory,
ModulesBuilder,
UserModulesConfig
}
import org.analogweb.core.{
BindAttributeArgumentPreparator,
ConsumesMediaTypeVerifier,
ScopedMapArgumentPreparator
}
import org.analogweb.util.PropertyResourceBundleMessageResource
import org.analogweb.util.logging.Logs
class ScalaUserModulesConfig(
invocationMetadataFactory: Option[InvocationMetadataFactory] = None,
invocationFactory: Option[ScalaInvocationFactory] = None,
renderableResolver: Option[ScalaRenderableResolver] = None,
responseHandler: Option[ScalaResponseHandler] = None
) extends ScalaModulesConfig(invocationMetadataFactory,
invocationFactory,
renderableResolver,
responseHandler)
with UserModulesConfig
class ScalaModulesConfig(
invocationMetadataFactory: Option[InvocationMetadataFactory] = None,
invocationFactory: Option[ScalaInvocationFactory] = None,
renderableResolver: Option[ScalaRenderableResolver] = None,
responseHandler: Option[ScalaResponseHandler] = None
) extends PluginModulesConfig {
def this() {
this(None, None, None, None)
}
val messageLog =
new PropertyResourceBundleMessageResource("org.analogweb.scala.analog-messages")
val log =
Logs.getLog(classOf[ScalaModulesConfig])
def prepare(builder: ModulesBuilder): ModulesBuilder = {
log
.log(messageLog, "ISB000001")
val im = invocationMetadataFactory
.map(builder.addInvocationMetadataFactories(_))
.getOrElse(
builder.addInvocationMetadataFactoriesClass(classOf[ScalaInvocationMetadataFactory])
)
val in = invocationFactory
.map(im.setInvocationFactory(_))
.getOrElse(
im.setInvocationFactoryClass(classOf[ScalaInvocationFactory])
)
val rr = renderableResolver
.map(in.setRenderableResolver(_))
.getOrElse(
in.setRenderableResolverClass(classOf[ScalaRenderableResolver])
)
val rh = responseHandler
.map(rr.setResponseHandler(_))
.getOrElse(
rr.setResponseHandlerClass(classOf[ScalaResponseHandler])
)
// ignore ApplicationProcessors for Java.
rh.ignore(classOf[BindAttributeArgumentPreparator])
.ignore(classOf[ConsumesMediaTypeVerifier])
.ignore(classOf[ScopedMapArgumentPreparator])
}
}
| analogweb/scala-plugin | core/src/main/scala/org/analogweb/scala/ScalaModulesConfig.scala | Scala | mit | 2,441 |
package form.project
import ore.OreConfig
import ore.models.project.factory.ProjectFactory
/**
* Represents submitted [[ore.models.project.Version]] data.
*
* @param channelName Name of channel
* @param channelColorHex Channel color hex
* @param recommended True if recommended version
*/
case class VersionData(
unstable: Boolean,
recommended: Boolean,
channelName: String,
protected val channelColorHex: String,
nonReviewed: Boolean,
content: Option[String],
forumPost: Boolean
)(implicit val config: OreConfig, val factory: ProjectFactory)
extends TChannelData
| SpongePowered/Ore | ore/app/form/project/VersionData.scala | Scala | mit | 621 |
/**
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.crossdata.models
import org.apache.spark.sql.crossdata
case class TableModel(id: String,
name: String,
schema: String,
dataSource: String,
database: Option[String] = None,
partitionColumns: Seq[String] = Seq.empty,
options: Map[String, String] = Map.empty,
version: String = crossdata.CrossdataVersion) {
def getExtendedName: String =
database.fold(name) { databaseName => s"$databaseName.$name" }
}
| luismcl/crossdata | core/src/main/scala/org/apache/spark/sql/crossdata/models/TableModel.scala | Scala | apache-2.0 | 1,214 |
package controllers
import play.api.i18n.Messages
import play.api.libs.json._
import security._
/**
* @author zepeng.li@gmail.com
*/
class RegisteredSecured(
val modules: PermissionCheckable*
) {
object Modules {
def names: Seq[String] = modules.map(_.checkedModuleName.name)
def toJson(implicit messages: Messages) = Json.prettyPrint(
JsObject(
names.map { name =>
name -> JsString(messages(s"$name.name"))
}
)
)
}
object AccessDef {
def toJson(implicit messages: Messages) = Json.prettyPrint(
Json.toJson(
modules.map { m =>
val name = m.checkedModuleName.name
name ->
m.AccessDef.values.map { p =>
p.self.toString -> messages(s"$name.ac.$p")
}.toMap
}.toMap
)
)
}
} | lizepeng/app.io | app/controllers/RegisteredSecured.scala | Scala | apache-2.0 | 831 |
package chapter20
import java.io.File
import java.nio.file._
import java.nio.file.attribute.BasicFileAttributes
import akka.actor._
import scala.util.matching.Regex
/*
Write a program that counts how many words match a given regular expression in all files of
all subdirectories of a given directory.
Have one actor per file,
one actor that traverses the subdirectories,
and one actor to accumulate the results.
*/
object Exercise9 extends App {
class SharedCounter {
private var value = 0
def inc(matchesCount: Int) {
// value += matchesCount // doesn't atomic
// Might happen:
val oldValue = value
Thread.sleep(1L)
value = oldValue + matchesCount
}
override def toString: String = value.toString
}
object NoMoreFiles
class AnalyzeFileActor(regex: Regex, sink: ActorRef, counter: SharedCounter) extends Actor {
override def receive = {
case file: File =>
val matches = findMatches(file)
if (matches > 0) {
println(s"${file.toString}: $matches matches")
counter.inc(matches)
sink ! matches
}
}
private def findMatches(file: File): Int = {
val words = io.Source.fromFile(file).getLines().mkString("\\n").split("\\\\s+")
words.count(regex.findFirstIn(_).isDefined)
}
}
class AccumulateActor(counter: SharedCounter) extends Actor {
var matchesFound = 0
override def receive = {
case count: Int => matchesFound += count
case NoMoreFiles =>
println(s"Total: $counter matches (expected $matchesFound)")
context.system.shutdown()
}
}
val start = Paths.get("./data/")
val regex = "\\\\w{6,10}".r
val counter = new SharedCounter
val system = ActorSystem("regex-matcher")
val accumulate = system.actorOf(Props(new AccumulateActor(counter)))
val analyze1 = system.actorOf(Props(new AnalyzeFileActor(regex, accumulate, counter)))
val analyze2 = system.actorOf(Props(new AnalyzeFileActor(regex, accumulate, counter)))
println(s"Searching '$regex' in $start ...")
var switch = false
Files.walkFileTree(start, new SimpleFileVisitor[Path] {
override def visitFile(file: Path, attrs: BasicFileAttributes) = {
if (attrs.isRegularFile && file.toString.endsWith(".txt")) {
val analyze = if (switch) analyze1 else analyze2
analyze ! file.toFile
switch = !switch
}
FileVisitResult.CONTINUE
}
})
Thread.sleep(1000L)
accumulate ! NoMoreFiles
}
| vsuharnikov/books-exercises | scala/scala-for-the-impatient/src/main/scala/chapter20/Exercise9.scala | Scala | mit | 2,511 |
package com.mesosphere.cosmos.jsonschema
import cats.data.Xor
import com.fasterxml.jackson.databind.JsonNode
import com.github.fge.jsonschema.main.JsonSchemaFactory
import com.mesosphere.cosmos.jsonschema.Jackson._
import io.circe.syntax._
import io.circe.{Json, JsonObject}
import scala.collection.JavaConverters._
private[cosmos] object JsonSchemaValidation {
private[cosmos] def matchesSchema(document: JsonObject, schema: JsonObject): Iterable[Json] = {
matchesSchema(Json.fromJsonObject(document), Json.fromJsonObject(schema))
}
private[cosmos] def matchesSchema(document: Json, schema: Json): Iterable[Json] = {
val Xor.Right(documentNode) = document.as[JsonNode]
val Xor.Right(schemaNode) = schema.as[JsonNode]
JsonSchemaFactory
.byDefault()
.getValidator
.validate(schemaNode, documentNode)
.asScala
.map { message =>
val jacksonJson = message.asJson
val circeJson = jacksonJson.asJson
circeJson
}
}
}
| movicha/cosmos | cosmos-server/src/main/scala/com/mesosphere/cosmos/jsonschema/JsonSchemaValidation.scala | Scala | apache-2.0 | 1,001 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.system
import java.util
import org.apache.samza.SamzaException
import org.apache.samza.config.{JobConfig, Config}
import org.apache.samza.config.JobConfig.Config2Job
import scala.collection.JavaConverters._
class RangeSystemStreamPartitionMatcher extends SystemStreamPartitionMatcher {
override def filter(systemStreamPartitions: util.Set[SystemStreamPartition], config: Config): util.Set[SystemStreamPartition] = {
val sspRanges = config.getSSPMatcherConfigRanges
val ranges: Array[String] = sspRanges.split(",")
val rangeMap = collection.mutable.Map[Int, Int]()
// Accept single or multiple partition ranges in one config - 2,3,7-9,1 or 19
// Overlapping ranges are fine
val rxS = "(\\\\d+)".r // single digits
val rxR = "(\\\\d+-\\\\d+)".r // range like 7-9
ranges.foreach({
case rxS(s) => rangeMap.put(s.toInt, s.toInt)
case rxR(r) =>
val s = r.split("-")
(s(0).toInt to s(1).toInt).foreach(k => rangeMap.put(k, k))
case _ =>
val error = "Invalid partition range configuration '%s': %s"
.format(JobConfig.SSP_MATCHER_CONFIG_RANGES, sspRanges)
throw new SamzaException(error)
})
val sspSetScala = systemStreamPartitions.asScala
sspSetScala.filter(s => rangeMap.contains(s.partition.getPartitionId)).asJava
}
}
| InnovaCo/samza | samza-core/src/main/scala/org/apache/samza/system/RangeSystemStreamPartitionMatcher.scala | Scala | apache-2.0 | 2,149 |
package aecor.kafkadistributedprocessing.internal
import aecor.kafkadistributedprocessing.internal
import aecor.kafkadistributedprocessing.internal.Channel.CompletionCallback
import cats.effect.Concurrent
import cats.effect.concurrent.Deferred
import cats.effect.implicits._
import cats.implicits._
private[kafkadistributedprocessing] final case class Channel[F[_]](watch: F[CompletionCallback[F]],
close: F[Unit],
call: F[Unit])
private[kafkadistributedprocessing] object Channel {
type CompletionCallback[F[_]] = F[Unit]
def create[F[_]: Concurrent]: F[Channel[F]] =
for {
deferredCallback <- Deferred[F, CompletionCallback[F]]
closed <- Deferred[F, Unit]
close = closed.complete(())
watch = deferredCallback.get
call = Deferred[F, Unit]
.flatMap { deferredCompletion =>
deferredCallback
.complete(deferredCompletion.complete(()).attempt.void) >> deferredCompletion.get
}
.race(closed.get)
.void
} yield internal.Channel(watch, close, call)
}
| notxcain/aecor | modules/kafka-distributed-processing/src/main/scala/aecor/kafkadistributedprocessing/internal/Channel.scala | Scala | mit | 1,179 |
package coder.simon.types.phantom
/**
* @author simon
*/
object P1 {
sealed trait ServiceState
final class Started extends ServiceState
final class Stopped extends ServiceState
class Service[State <: ServiceState] private () {
def start(implicit ev: State =:= Stopped): Service[Started] = this.asInstanceOf[Service[Started]]
def stop(implicit ev: State =:= Started): Service[Stopped] = this.asInstanceOf[Service[Stopped]]
}
object Service {
def create() = new Service[Stopped]
}
def main(args: Array[String]): Unit = {
val x = Service.create()
val y = x.start
//y.start()
//val y = Service.create()
// compile time error
// x.stop()
}
} | erlangxk/fpscala | src/main/scala/coder/simon/types/phantom/P1.scala | Scala | mit | 700 |
package hu.bme.mit.ire.messages
case class Primary(value: ReteMessage) extends ReteMessage
| FTSRG/ire | src/main/scala/hu/bme/mit/ire/messages/Primary.scala | Scala | epl-1.0 | 92 |
package knot.core.stream.sources
import knot.core.testKit.CellSpec
import org.scalatest.Matchers._
import scala.collection.immutable
import scala.concurrent.Await
import scala.concurrent.duration.Duration
class IteratorSourceSpec extends CellSpec {
import knot.core.stream.dsl.Implicits._
private def test[O](iterable: immutable.Iterable[O]): Seq[O] = {
val f = iterable.toGraph.seq().run()
Await.result(f, Duration.Inf)
}
describe("iterator source") {
it("range") {
val actual = test(0 until 2)
actual should be(Seq(0, 1))
}
it("one") {
val actual = test(0 until 1)
actual should be(Seq(0))
}
it("empty") {
val actual = test(immutable.Seq.empty[Int])
actual should be(Seq())
}
it("null") {
an[IllegalArgumentException] should be thrownBy {
test[Int](null)
}
}
}
}
| defvar/knot | knot-core/src/test/scala/knot/core/stream/sources/IteratorSourceSpec.scala | Scala | mit | 881 |
import features.{PosBigrams, WordId, WordLength, NaiveHeuristicSyllableCounter, Feature}
import org.scalatest.{FlatSpec, Matchers}
import project.{FeatureMatrix, ProjectLoader, FM_Serializer, AbstractProject, SimpleProject}
import structures._
/**
* Created by mechko on 12/12/14.
*/
class FeatureExtracterTest extends FlatSpec with Matchers{
def time[R](block: => R): R = {
val t0 = System.nanoTime()
val result = block // call-by-name
val t1 = System.nanoTime()
println("Elapsed time: " + (t1 - t0) + "ns")
result
}
var sp = new SimpleProject()
val tc = "../../5"
/**
"A SimpleProject" should "be serializable" in {
sp.root("testProject")
sp.addFeature(NaiveHeuristicSyllableCounter)
sp.addFeature(WordLength)
sp.addFeature(WordId)
sp.addFeature(PosBigrams)
//val corpus = new BFSCorpus("src/test/resources")
//sp.computeFeatures(corpus)
sp.save()
}
"A SimpleProject" should "be deserializable from a file" in {
//val ips = new ObjectInputStream(File("testProject/project").inputStream())
val newObject = ProjectLoader.load[SimpleProject]("testProject")
//ips.close()
sp.allFeatures.zip(newObject.allFeatures).foreach({ f1 => println(f1._1, f1._2)})
//newObject.equals(sp) shouldBe(true)
}
"A SimpleProject" should "be able to compute features" in {
val corpus = new BFSCorpus(tc)
sp.computeFeatures(corpus)
sp.save()
}
**/
def runFM(proj : AbstractProject, feat : Feature, corpus : Corpus) = {
printf("computing %s_by_docId.fm\\n", feat.name)
val fm: FeatureMatrix = proj.featureToMatrix(feat, corpus)
println(fm.featureA.pre)
println("done.\\n")
println("\\tactiveSize", fm.matrix.keys.size)
println("\\tsum", fm.matrix.activeValuesIterator.foldLeft(0.0)((a,b) => a+b))
FM_Serializer.serialize(fm, proj.root)
}
def runFM(proj : AbstractProject, featureA : Feature, featureB : Feature, corpus : Corpus) = {
val fm: FeatureMatrix = proj.featureToMatrix(featureA, featureB, corpus)
println(fm.featureA.pre+" by " + fm.featureB.get.pre)
println("done.\\n")
println("\\tactiveSize", fm.matrix.keys.size)
println("\\tsum", fm.matrix.valuesIterator.foldLeft(0.0)((a,b) => a+b))
FM_Serializer.serialize(fm, proj.root)
}
sp.storedFeatures.foreach(feature => {
"A SimpleProject" should "be able to create Feature Matrices of "+feature.pre in {
sp = ProjectLoader.load[SimpleProject]("testProject")
sp.countableFeatures.foreach(f=>{
printf("%s %d\\n", f.name, f.dimension)
})
//sp.storedFeatures.foreach(feature => {
time {
val corpus = new BFSCorpus(tc)
runFM(sp, feature, corpus)
}
//})
}
})
val xs = sp.storedFeatures
val ys = sp.storedFeatures
(for (x <- xs; y <- ys) yield (x,y)).foreach(features => {
"A SimpleProject" should "be able to create feature matrices for " + features._1.pre + " " + features._2.pre in {
time {
val corpus = new BFSCorpus(tc)
runFM(sp, features._1, features._2, corpus)
}
}
})
"A FeatureMatrix" should "be reloadable" in {
sp = ProjectLoader.load[SimpleProject]("testProject")
sp.storedFeatures.foreach(feature => {time {
val fm = FM_Serializer.deserialize(sp.root, feature.name)
println(fm.featureA.pre)
println("done.\\n")
println("\\tactiveSize", fm.matrix.keys.size)
println("\\tsum", fm.matrix.valuesIterator.foldLeft(0.0)((a,b) => a+b))
//f.matrix.values.foreach(x => if(x!=0) printf("%d ", x))
}})
val xs = sp.storedFeatures
val ys = sp.storedFeatures
(for (x <- xs; y <- ys) yield (x,y)).foreach(features => {time {
val fm = FM_Serializer.deserialize(sp.root, features._1.name, features._2.name)
println(fm.featureA.pre+" by " + fm.featureB.get.pre)
println("done.\\n")
println("\\tactiveSize", fm.matrix.keys.size)
println("\\tsum", fm.matrix.valuesIterator.foldLeft(0.0)((a,b) => a+b))
}})
}
}
| mrmechko/ILF | src/test/scala/FeatureExtracterTest.scala | Scala | mit | 3,989 |
/*
* Copyright (C) 2018 Joan Goyeau.
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.streams.scala
import org.apache.kafka.streams.kstream.JoinWindows
import org.apache.kafka.streams.scala.Serdes._
import org.apache.kafka.streams.scala.ImplicitConversions._
import org.apache.kafka.streams.scala.utils.TestDriver
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{FlatSpec, Matchers}
@RunWith(classOf[JUnitRunner])
class KStreamTest extends FlatSpec with Matchers with TestDriver {
"filter a KStream" should "filter records satisfying the predicate" in {
val builder = new StreamsBuilder()
val sourceTopic = "source"
val sinkTopic = "sink"
builder.stream[String, String](sourceTopic).filter((_, value) => value != "value2").to(sinkTopic)
val testDriver = createTestDriver(builder)
testDriver.pipeRecord(sourceTopic, ("1", "value1"))
testDriver.readRecord[String, String](sinkTopic).value shouldBe "value1"
testDriver.pipeRecord(sourceTopic, ("2", "value2"))
testDriver.readRecord[String, String](sinkTopic) shouldBe null
testDriver.pipeRecord(sourceTopic, ("3", "value3"))
testDriver.readRecord[String, String](sinkTopic).value shouldBe "value3"
testDriver.readRecord[String, String](sinkTopic) shouldBe null
testDriver.close()
}
"filterNot a KStream" should "filter records not satisfying the predicate" in {
val builder = new StreamsBuilder()
val sourceTopic = "source"
val sinkTopic = "sink"
builder.stream[String, String](sourceTopic).filterNot((_, value) => value == "value2").to(sinkTopic)
val testDriver = createTestDriver(builder)
testDriver.pipeRecord(sourceTopic, ("1", "value1"))
testDriver.readRecord[String, String](sinkTopic).value shouldBe "value1"
testDriver.pipeRecord(sourceTopic, ("2", "value2"))
testDriver.readRecord[String, String](sinkTopic) shouldBe null
testDriver.pipeRecord(sourceTopic, ("3", "value3"))
testDriver.readRecord[String, String](sinkTopic).value shouldBe "value3"
testDriver.readRecord[String, String](sinkTopic) shouldBe null
testDriver.close()
}
"foreach a KStream" should "run foreach actions on records" in {
val builder = new StreamsBuilder()
val sourceTopic = "source"
var acc = ""
builder.stream[String, String](sourceTopic).foreach((_, value) => acc += value)
val testDriver = createTestDriver(builder)
testDriver.pipeRecord(sourceTopic, ("1", "value1"))
acc shouldBe "value1"
testDriver.pipeRecord(sourceTopic, ("2", "value2"))
acc shouldBe "value1value2"
testDriver.close()
}
"peek a KStream" should "run peek actions on records" in {
val builder = new StreamsBuilder()
val sourceTopic = "source"
val sinkTopic = "sink"
var acc = ""
builder.stream[String, String](sourceTopic).peek((k, v) => acc += v).to(sinkTopic)
val testDriver = createTestDriver(builder)
testDriver.pipeRecord(sourceTopic, ("1", "value1"))
acc shouldBe "value1"
testDriver.readRecord[String, String](sinkTopic).value shouldBe "value1"
testDriver.pipeRecord(sourceTopic, ("2", "value2"))
acc shouldBe "value1value2"
testDriver.readRecord[String, String](sinkTopic).value shouldBe "value2"
testDriver.close()
}
"selectKey a KStream" should "select a new key" in {
val builder = new StreamsBuilder()
val sourceTopic = "source"
val sinkTopic = "sink"
builder.stream[String, String](sourceTopic).selectKey((_, value) => value).to(sinkTopic)
val testDriver = createTestDriver(builder)
testDriver.pipeRecord(sourceTopic, ("1", "value1"))
testDriver.readRecord[String, String](sinkTopic).key shouldBe "value1"
testDriver.pipeRecord(sourceTopic, ("1", "value2"))
testDriver.readRecord[String, String](sinkTopic).key shouldBe "value2"
testDriver.readRecord[String, String](sinkTopic) shouldBe null
testDriver.close()
}
"join 2 KStreams" should "join correctly records" in {
val builder = new StreamsBuilder()
val sourceTopic1 = "source1"
val sourceTopic2 = "source2"
val sinkTopic = "sink"
val stream1 = builder.stream[String, String](sourceTopic1)
val stream2 = builder.stream[String, String](sourceTopic2)
stream1.join(stream2)((a, b) => s"$a-$b", JoinWindows.of(1000)).to(sinkTopic)
val testDriver = createTestDriver(builder)
testDriver.pipeRecord(sourceTopic1, ("1", "topic1value1"))
testDriver.pipeRecord(sourceTopic2, ("1", "topic2value1"))
testDriver.readRecord[String, String](sinkTopic).value shouldBe "topic1value1-topic2value1"
testDriver.readRecord[String, String](sinkTopic) shouldBe null
testDriver.close()
}
}
| Esquive/kafka | streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/KStreamTest.scala | Scala | apache-2.0 | 5,514 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.microsoft.azure.powerbi.extensions
import java.sql.Timestamp
import java.util.Date
import com.microsoft.azure.powerbi.authentication.PowerBIAuthentication
import com.microsoft.azure.powerbi.common.PowerBIUtils
import com.microsoft.azure.powerbi.models.{table, PowerBIDatasetDetails}
import org.apache.spark.streaming.dstream.DStream
object PairedDStreamExtensions {
implicit def PowerBIPairedDStream[A, B](dStream: DStream[(A, B)]): PowerBIPairedDStream[A, B]
= new PowerBIPairedDStream(dStream: DStream[(A, B)])
class PowerBIPairedDStream[A, B](dStream: DStream[(A, B)]) extends Serializable {
def stateTimelineToPowerBI(powerbiDatasetDetails: PowerBIDatasetDetails, powerbiTable: table,
powerBIAuthentication: PowerBIAuthentication): Unit = {
var authenticationToken: String = powerBIAuthentication.getAccessToken
dStream.foreachRDD(x => {
if (x.count() > 0) {
val currentTimestamp = new Timestamp(new Date().getTime)
val powerbiRow = Map(powerbiTable.columns.head.name -> currentTimestamp,
powerbiTable.columns(1).name -> x.first()._2)
var attemptCount = 0
var pushSuccessful = false
while (!pushSuccessful && attemptCount < this.retryCount) {
try {
PowerBIUtils.addRow(powerbiDatasetDetails, powerbiTable,
powerbiRow, authenticationToken)
pushSuccessful = true
}
catch {
case e: Exception => println("Exception inserting row: " + e.getMessage)
Thread.sleep(secondsBetweenRetry * 1000)
attemptCount += 1
authenticationToken = powerBIAuthentication.refreshAccessToken
}
}
}
})
}
private val retryCount: Int = 3
private val secondsBetweenRetry: Int = 1
}
}
| hdinsight/spark-powerbi-connector | src/main/scala/com/microsoft/azure/powerbi/extensions/PairedDStreamExtensions.scala | Scala | apache-2.0 | 2,777 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.storage
import java.io.{IOException, File, FileOutputStream, RandomAccessFile}
import java.nio.ByteBuffer
import java.nio.channels.FileChannel.MapMode
import org.apache.spark.Logging
import org.apache.spark.serializer.Serializer
import org.apache.spark.util.Utils
/**
* Stores BlockManager blocks on disk.
* 当MemoryStore没有足够空间时,就会使用DiskStore将块存入磁盘
*/
private[spark] class DiskStore(blockManager: BlockManager, diskManager: DiskBlockManager)
extends BlockStore(blockManager) with Logging {
//用于磁盘读取一个块大小进行内存映射,以M兆为单位,
val minMemoryMapBytes = blockManager.conf.getSizeAsBytes("spark.storage.memoryMapThreshold", "2m")
override def getSize(blockId: BlockId): Long = {
diskManager.getFile(blockId.name).length//获取文件长度
}
/**
* 将BlockId对应的字节缓存存储到磁盘
*/
override def putBytes(blockId: BlockId, _bytes: ByteBuffer, level: StorageLevel): PutResult = {
// So that we do not modify the input offsets !所以我们不修改输入偏移!
// duplicate does not copy buffer, so inexpensive
//duplicate()返回一个新的字节的缓冲区共享老缓冲区的内容
val bytes = _bytes.duplicate()//复制一个可读可写的缓冲区
logDebug(s"Attempting to put block $blockId")
val startTime = System.currentTimeMillis//文件写入开始时间
val file = diskManager.getFile(blockId)//获取文件,如果没有创建新的
//Channel是数据的源头或者数据的目的地,用于向buffer提供数据或者从buffer读取数据
val channel = new FileOutputStream(file).getChannel
//然后使用NIO的Channel将ByteBuffer写入文件
Utils.tryWithSafeFinally {
while (bytes.remaining > 0) {
channel.write(bytes)
}
} {
channel.close()
}
val finishTime = System.currentTimeMillis//文件写入完成时间
logDebug("Block %s stored as %s file on disk in %d ms".format(
file.getName, Utils.bytesToString(bytes.limit), finishTime - startTime))
//duplicate()返回一个新的字节的缓冲区共享老缓冲区的内容
PutResult(bytes.limit(), Right(bytes.duplicate()))//复制一个可读可写的缓冲区
}
//将BlockId对应的Array数据存储到磁盘,该方法先将Array序列化,然后存储到相应的文件。
override def putArray(
blockId: BlockId,
values: Array[Any],
level: StorageLevel,
returnValues: Boolean): PutResult = {
putIterator(blockId, values.toIterator, level, returnValues)
}
//将BlockId对应的Iterator数据存储到磁盘,该方法先将Iterator序列化,然后存储到相应的文件。
override def putIterator(
blockId: BlockId,
values: Iterator[Any],
level: StorageLevel,
returnValues: Boolean): PutResult = {
logDebug(s"Attempting to write values for block $blockId")
val startTime = System.currentTimeMillis
//使用diskManager.getFile获取blockId对应的block文件,
val file = diskManager.getFile(blockId)
//将file封装为FileOutputStream
val outputStream = new FileOutputStream(file)
try {
Utils.tryWithSafeFinally {
//使用dataSerializeStream方法,将FileOutputStrem序列化并压缩
blockManager.dataSerializeStream(blockId, outputStream, values)
} {
// Close outputStream here because it should be closed before file is deleted.
//关闭outputStream,因为它应该在文件被删除之前关闭
outputStream.close()
}
} catch {
case e: Throwable =>
if (file.exists()) {
file.delete()
}
throw e
}
val length = file.length
//所用的时间
val timeTaken = System.currentTimeMillis - startTime
logDebug("Block %s stored as %s file on disk in %d ms".format(
file.getName, Utils.bytesToString(length), timeTaken))
if (returnValues) {
// Return a byte buffer for the contents of the file
//将写入的文件使用getBytes读取为ByteBuffer
val buffer = getBytes(blockId).get
// 返回文件内容的字节缓冲区
PutResult(length, Right(buffer))
} else {
//只返回文件长度
PutResult(length, null)
}
}
/**
* 读取文件中偏移为offset,长度为length的内容。
* 该方法会判断length是否大于minMemoryMapBytes,若大于,则做内存映射,否则直接读取到字节缓存中。
*/
private def getBytes(file: File, offset: Long, length: Long): Option[ByteBuffer] = {
val channel = new RandomAccessFile(file, "r").getChannel
Utils.tryWithSafeFinally {
// For small files, directly read rather than memory map
//对于小文件,直接读取而不是内存映射
if (length < minMemoryMapBytes) {
/**
* 从FileChannel读取数据
* 1)首先,allocate分配一个Buffer,从FileChannel中读取的数据将被读到Buffer中
* 2)调用FileChannel.read()方法。该方法将数据从FileChannel读取到Buffer中。
* read()方法返回的int值表示了有多少字节被读到了Buffer中。如果返回-1,表示到了文件末尾。
*/
//ByteBuffer.allocate在能够读和写之前,必须有一个缓冲区,用静态方法 allocate() 来分配缓冲区
val buf = ByteBuffer.allocate(length.toInt) //分配块缓冲区
channel.position(offset)//位置
while (buf.remaining() != 0) {//剩余
if (channel.read(buf) == -1) {
throw new IOException("Reached EOF before filling buffer\\n" +
s"offset=$offset\\nfile=${file.getAbsolutePath}\\nbuf.remaining=${buf.remaining}")
}
}
buf.flip() //反转此缓冲区
Some(buf)
} else {
Some(channel.map(MapMode.READ_ONLY, offset, length))
}
} {
channel.close()
}
}
/**
*读取存储在磁盘中与BlockId对应的内容。
*/
override def getBytes(blockId: BlockId): Option[ByteBuffer] = {
val file = diskManager.getFile(blockId.name)
getBytes(file, 0, file.length)
}
//根据FileSegment读取内容,其中 FileSegment存放文件和要读取数据的偏移和大小
//根据FileSegment文件句柄,开始的offset和要读取的长度读取文件
def getBytes(segment: FileSegment): Option[ByteBuffer] = {
getBytes(segment.file, segment.offset, segment.length)
}
//读取BlockId对应的内容,并反序列化为Iterator
override def getValues(blockId: BlockId): Option[Iterator[Any]] = {
getBytes(blockId).map(buffer => blockManager.dataDeserialize(blockId, buffer))
}
/**
* A version of getValues that allows a custom serializer. This is used as part of the
* shuffle short-circuit code.
* 一个版本getValues允许自定义序列化程序,这是用作洗牌短路代码的一部分
* 读取BlockId对应的内容,并根据自定义的Serializer反序列化为Iterator。
*/
def getValues(blockId: BlockId, serializer: Serializer): Option[Iterator[Any]] = {
// TODO: Should bypass getBytes and use a stream based implementation, so that
// we won't use a lot of memory during e.g. external sort merge.
//在外部排序合并期间,我们不会占用大量内存
getBytes(blockId).map(bytes => blockManager.dataDeserialize(blockId, bytes, serializer))
}
//删除存储的BlockId对应的Block。
override def remove(blockId: BlockId): Boolean = {
val file = diskManager.getFile(blockId.name)
// If consolidation mode is used With HashShuffleMananger, the physical filename for the block
// is different from blockId.name. So the file returns here will not be exist, thus we avoid to
// delete the whole consolidated file by mistake.
//如果整理模式是用HashShuffleMananger,对块的物理文件名不同于blockid.name,
// 因此,这里返回的文件将不存在,因此我们避免错误地删除整个合并文件。
if (file.exists()) {
file.delete()
} else {
false
}
}
//判断是否存储BlockId对应的Block。
override def contains(blockId: BlockId): Boolean = {
val file = diskManager.getFile(blockId.name)
file.exists()
}
}
| tophua/spark1.52 | core/src/main/scala/org/apache/spark/storage/DiskStore.scala | Scala | apache-2.0 | 9,071 |
/*
Copyright (c) 2017-2021, Robby, Kansas State University
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.sireum
private[sireum] sealed trait FloatingPoint extends Any with Number {
def BitWidth: Z
def SignificandBitWidth: Z
def ExponentBitWidth: Z
}
object F32 {
object Boxer extends $internal.Boxer {
def box[T](o: scala.Any): T = o match {
case o: scala.Float => F32(o).asInstanceOf[T]
}
def unbox(o: scala.Any): scala.Float = o match {
case o: F32 => o.value
}
override def copyMut(src: AnyRef, srcPos: Z, dest: AnyRef, destPos: Z, length: Z): Unit =
copy(src, srcPos, dest, destPos, length)
override def create(length: Z): scala.AnyRef = new Array[scala.Float](length)
override def lookup[T](a: scala.AnyRef, i: Z): T = a match {
case a: Array[scala.Float] => box(a(i))
}
override def store(a: scala.AnyRef, i: Z, v: scala.Any): Unit = a match {
case a: Array[scala.Float] => a(i) = unbox(v)
}
}
def apply(s: String): Option[F32] = try Some($String(s.value)) catch {
case _: Throwable => None[F32]()
}
object $String {
def apply(s: Predef.String): F32 = s.toFloat
def unapply(n: F32): scala.Option[Predef.String] = scala.Some(n.toString)
}
def random: F32 = new _root_.java.util.Random().nextFloat
def unapply(f: F32): scala.Option[scala.Float] = scala.Some(f.value)
import scala.language.implicitConversions
implicit def apply(f: scala.Float): F32 = new F32(f)
}
final class F32(val value: scala.Float) extends AnyVal with FloatingPoint with $internal.HasBoxer {
def BitWidth: Z = 32
def SignificandBitWidth: Z = 24
def ExponentBitWidth: Z = 7
def unary_- : F32 = -value
def native: scala.Float = value
@inline def <(other: F32): B = value < other.value
@inline def <=(other: F32): B = value <= other.value
@inline def >(other: F32): B = value > other.value
@inline def >=(other: F32): B = value >= other.value
@inline def ===(other: F32): B = value == other.value
@inline def =!=(other: F32): B = value != other.value
@inline def +(other: F32): F32 = value + other.value
@inline def -(other: F32): F32 = value - other.value
@inline def *(other: F32): F32 = value * other.value
@inline def /(other: F32): F32 = value / other.value
@inline def %(other: F32): F32 = value % other.value
def string: String = toString
def boxer: $internal.Boxer = F32.Boxer
override def toString: _root_.java.lang.String = _root_.java.lang.Float.toString(value)
}
object F64 {
object Boxer extends $internal.Boxer {
def box[T](o: scala.Any): T = o match {
case o: scala.Double => F64(o).asInstanceOf[T]
}
def unbox(o: scala.Any): scala.Double = o match {
case o: F64 => o.value
}
override def copyMut(src: AnyRef, srcPos: Z, dest: AnyRef, destPos: Z, length: Z): Unit =
copy(src, srcPos, dest, destPos, length)
override def create(length: Z): scala.AnyRef = new Array[scala.Double](length)
override def lookup[T](a: scala.AnyRef, i: Z): T = a match {
case a: Array[scala.Double] => box(a(i))
}
override def store(a: scala.AnyRef, i: Z, v: scala.Any): Unit = a match {
case a: Array[scala.Double] => a(i) = unbox(v)
}
}
def apply(s: String): Option[F64] = try Some($String(s.value)) catch {
case _: Throwable => None[F64]()
}
object $String {
def apply(s: Predef.String): F64 = s.toDouble
def unapply(n: F64): scala.Option[Predef.String] = scala.Some(n.toString)
}
def random: F64 = new _root_.java.util.Random().nextDouble
def unapply(d: F64): scala.Option[scala.Double] = scala.Some(d.value)
import scala.language.implicitConversions
implicit def apply(d: scala.Double): F64 = new F64(d)
}
final class F64(val value: scala.Double) extends AnyVal with FloatingPoint with $internal.HasBoxer {
def BitWidth: Z = 64
def SignificandBitWidth: Z = 53
def ExponentBitWidth: Z = 10
def unary_- : F64 = -value
def native: scala.Double = value
@inline def <(other: F64): B = value < other.value
@inline def <=(other: F64): B = value <= other.value
@inline def >(other: F64): B = value > other.value
@inline def >=(other: F64): B = value >= other.value
@inline def ===(other: F64): B = value == other.value
@inline def =!=(other: F64): B = value != other.value
@inline def +(other: F64): F64 = value + other.value
@inline def -(other: F64): F64 = value - other.value
@inline def *(other: F64): F64 = value * other.value
@inline def /(other: F64): F64 = value / other.value
@inline def %(other: F64): F64 = value % other.value
def string: String = toString
def boxer: $internal.Boxer = F64.Boxer
override def toString: _root_.java.lang.String = _root_.java.lang.Double.toString(value)
}
| sireum/v3-logika-runtime | library/shared/src/main/scala/org/sireum/FloatingPoint.scala | Scala | bsd-2-clause | 6,076 |
package feh.tec.visual
import nicol.{Game => NGame, _}
import feh.tec.visual.api.{AppBasicControlApi, AgentApp}
import feh.tec.util._
import nicol.input.Key._
import org.lwjgl.opengl.Display
trait NicolBasedApp {
def game: NGame
}
trait NicolBasedAgentAppBasicControl extends AgentApp with NicolBasedApp with AppBasicControlApi{
def prepareDrawEnvironment(ops: DrawSettings): Unit = ???
protected def pauseEndApi: PauseEndAppInnerApi
protected def newGame(scene: => Scene/*, pauseScene: Scene, endScene: Scene*/): NGame
def appExecutionFinished(): Boolean
lazy val game: NGame = newGame(initScene >> baseScene)
def initScene: Scene
def baseScene: Scene
protected def render(): Unit = render(layout)
def start(): Unit = {
isRunning = true
game.start
}
def stop(): Unit = {
isRunning = false
game.stop
}
var isRunning = false
}
class NicolLikeBasicScene(render: () => Unit, exitScene: Lifted[Scene], terminatedScene: Lifted[Scene], terminated_? : () => Boolean, pauseScene: Scene => Scene)
(implicit easel: NicolLike2DEasel)
extends LoopScene with SyncableScene with ShowFPS
{
def update: Option[Scene] = {
sync
showFPS
render()
if(terminated_?()) terminatedScene()
else if(Display.isCloseRequested) exitScene()
else keyEvent {
e =>
e released {
case _ =>
}
e pressed {
case "escape" => exitScene()
case "space" => pauseScene(this)
}
}
}
} | fehu/agent-tareas | lwjgl/src/main/scala/feh/tec/visual/NicolBasedApp.scala | Scala | mit | 1,521 |
package hello
object world extends App {
println("hello dotty!")
trait AnimalPackage {
type Animal <: AnimalU
type AnimalU = { val age: Int }
def newAnimal(a: AnimalU): Animal
def newSubAnimal[T](a: AnimalU & T): Animal & T
}
val p: AnimalPackage = new AnimalPackage { p =>
type Animal = AnimalU
override def newAnimal(a: AnimalU): Animal = a
override def newSubAnimal[T](a: AnimalU & T): Animal & T = a
}
val lambda: p.Animal = p.newAnimal(new { val age = 1 })
trait CatPackage { pc =>
type Cat <: p.Animal & pc.CatDelta
type CatDelta = { val meow: Int }
type CatU = p.AnimalU & pc.CatDelta
def newCat(c: CatU): Cat
def newSubCat[T](c: CatU & T): Cat & T
}
val pc: CatPackage = new CatPackage { pc =>
type Cat = p.Animal & pc.CatDelta
def newCat(c: CatU): Cat = p.newSubAnimal[pc.CatDelta](c)
def newSubCat[T](c: CatU & T): Cat & T = p.newSubAnimal[pc.CatDelta & T](c)
}
val felix: pc.Cat = pc.newCat(new { val age = 1; val meow = 2 })
}
| densh/dotty | tests/pos/i1047.scala | Scala | bsd-3-clause | 1,023 |
/*
* Copyright 2013-2015 Websudos, Limited.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Explicit consent must be obtained from the copyright owner, Outworkers Limited before any redistribution is made.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.websudos.phantom.builder.query
import com.websudos.diesel.engine.query.AbstractQuery
case class CQLQuery(override val queryString: String) extends AbstractQuery[CQLQuery](queryString) {
def create(str: String): CQLQuery = CQLQuery(str)
override def toString: String = queryString
}
object CQLQuery {
def empty: CQLQuery = CQLQuery("")
def escape(str: String): String = "'" + str.replaceAll("'", "''") + "'"
def apply(collection: TraversableOnce[String]): CQLQuery = CQLQuery(collection.mkString(", "))
}
| levinson/phantom | phantom-dsl/src/main/scala/com/websudos/phantom/builder/query/CQLQuery.scala | Scala | bsd-2-clause | 2,027 |
import sbt._
import Keys._
import play.Project._
object ApplicationBuild extends Build {
val appName = "%APPLICATION_NAME%"
val appVersion = "1.0-SNAPSHOT"
val appDependencies = Seq(
// Add your project dependencies here,
javaCore,
javaJdbc,
javaEbean
)
val main = play.Project(appName, appVersion, appDependencies).settings(
// Add your own project settings here
)
}
| noel-yap/setter-for-catan | play-2.1.1/framework/skeletons/java-skel/project/Build.scala | Scala | apache-2.0 | 424 |
package org.jetbrains.plugins.scala.lang.psi.impl.statements
import com.intellij.lang.ASTNode
import com.intellij.openapi.progress.ProgressManager
import com.intellij.psi._
import com.intellij.psi.scope._
import org.jetbrains.plugins.scala.lang.psi.api.ScalaElementVisitor
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScParameter
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.psi.stubs.ScFunctionStub
import org.jetbrains.plugins.scala.lang.psi.types.{Any, ScType}
import org.jetbrains.plugins.scala.lang.psi.types.result.{Success, TypeResult, TypingContext}
/**
* @author Jason Zaugg
*/
class ScMacroDefinitionImpl extends ScFunctionImpl with ScMacroDefinition {
def this(node: ASTNode) = {
this(); setNode(node)
}
def this(stub: ScFunctionStub) = {
this(); setStub(stub); setNullNode()
}
override def processDeclarations(processor: PsiScopeProcessor,
state: ResolveState,
lastParent: PsiElement,
place: PsiElement): Boolean = {
//process function's parameters for dependent method types, and process type parameters
if (!super[ScFunctionImpl].processDeclarations(processor, state, lastParent, place)) return false
//do not process parameters for default parameters, only for function body
//processing parameters for default parameters in ScParameters
val parameterIncludingSynthetic: Seq[ScParameter] = effectiveParameterClauses.flatMap(_.parameters)
if (getStub == null) {
body match {
case Some(x)
if lastParent != null &&
(!needCheckProcessingDeclarationsForBody ||
x.getStartOffsetInParent == lastParent.getStartOffsetInParent) =>
for (p <- parameterIncludingSynthetic) {
ProgressManager.checkCanceled()
if (!processor.execute(p, state)) return false
}
case _ =>
}
} else {
if (lastParent != null && lastParent.getContext != lastParent.getParent) {
for (p <- parameterIncludingSynthetic) {
ProgressManager.checkCanceled()
if (!processor.execute(p, state)) return false
}
}
}
true
}
protected def needCheckProcessingDeclarationsForBody = true
override def toString: String = "ScMacroDefinition: " + name
def returnTypeInner: TypeResult[ScType] = returnTypeElement match {
case None => Success(doGetType(), Some(this)) // TODO look up type from the macro impl.
case Some(rte: ScTypeElement) => rte.getType(TypingContext.empty)
}
def body: Option[ScExpression] = {
val stub = getStub
if (stub != null) stub.asInstanceOf[ScFunctionStub].getBodyExpression else findChild(classOf[ScExpression])
}
override def hasAssign: Boolean = true
override def accept(visitor: ScalaElementVisitor) {
visitor.visitMacroDefinition(this)
}
override def getType(ctx: TypingContext): TypeResult[ScType] = {
super.getType(ctx)
}
def doGetType() = {
name match {
case "doMacro" =>
ScalaPsiElementFactory.createTypeElementFromText("(Int, String)", getManager).getType().get
case _ => Any
}
}
override def accept(visitor: PsiElementVisitor) {
visitor match {
case s: ScalaElementVisitor => s.visitMacroDefinition(this)
case _ => super.accept(visitor)
}
}
}
| triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/impl/statements/ScMacroDefinitionImpl.scala | Scala | apache-2.0 | 3,646 |
package com.phaller.rasync
package bench
import scala.concurrent.Promise
import scala.annotation.tailrec
import org.scalameter.api._
import org.scalameter.picklers.noPickler._
object FuturesAndPromisesBenchmarks extends PerformanceTest.Microbenchmark {
/* configuration */
override def executor = LocalExecutor(
new Executor.Warmer.Default,
Aggregator.min,
new Measurer.Default)
override def reporter = new LoggingReporter
override def persistor = Persistor.None
val nrOfPromises = 100000
val size = Gen.single("Number Of Promises")(nrOfPromises)
/* creation of promises */
performance of "Promises" in {
measure method "creating" in {
using(size) config (
exec.benchRuns -> 9) in {
r => for (i <- 1 to r) Promise[Int]()
}
}
}
/* creation and completion of futures */
performance of "Promises" in {
measure method "creating and completing" in {
using(size) config (
exec.benchRuns -> 9) in {
r =>
for (i <- 1 to r) {
val p = Promise[Int]
p.success(1)
}
}
}
}
/* refinement of promises */
performance of "Promises" in {
measure method "refinement" in {
using(Gen.unit(s"$nrOfPromises promises")) config (
exec.benchRuns -> 9) in {
(Unit) =>
{
var i = 0
val promises = createListPromises(nrOfPromises, List.empty)
for (p <- promises) {
i = i + 1
p.success(i)
}
}
}
}
}
@tailrec
def createListPromises(amount: Int, promises: List[Promise[Int]]): List[Promise[Int]] = {
val p = Promise[Int]
if (amount == 0) p :: promises
else createListPromises(amount - 1, p :: promises)
}
}
| phaller/reactive-async | bench/src/test/scala/com/phaller/rasync/bench/fpbenchmarks.scala | Scala | bsd-2-clause | 1,824 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File
import sbt._
import sbt.Keys._
import sbtsparkpackage.SparkPackagePlugin.autoImport._
object CassandraSparkBuild extends Build {
import Settings._
import sbtassembly.AssemblyPlugin
import Versions.scalaBinary
import sbtsparkpackage.SparkPackagePlugin
val namespace = "spark-cassandra-connector"
val demosPath = file(s"$namespace-demos")
lazy val root = RootProject(
name = "root",
dir = file("."),
settings = rootSettings ++ Seq(cassandraServerClasspath := { "" }),
contains = Seq(embedded, connector, demos)
).disablePlugins(AssemblyPlugin, SparkPackagePlugin)
lazy val cassandraServerProject = Project(
id = "cassandra-server",
base = file("cassandra-server"),
settings = defaultSettings ++ Seq(
libraryDependencies ++= Seq(Artifacts.cassandraServer % "it", Artifacts.airlift),
cassandraServerClasspath := {
(fullClasspath in IntegrationTest).value.map(_.data.getAbsoluteFile).mkString(File.pathSeparator)
}
)
) configs IntegrationTest
lazy val embedded = CrossScalaVersionsProject(
name = s"$namespace-embedded",
conf = defaultSettings ++ Seq(
libraryDependencies
++= Dependencies.embedded ++ Seq(
"org.scala-lang" % "scala-reflect" % scalaVersion.value,
"org.scala-lang" % "scala-compiler" % scalaVersion.value))
).disablePlugins(AssemblyPlugin, SparkPackagePlugin) configs IntegrationTest
lazy val connector = CrossScalaVersionsProject(
name = namespace,
conf = assembledSettings ++ Seq(libraryDependencies ++= Dependencies.connector ++ Seq(
"org.scala-lang" % "scala-reflect" % scalaVersion.value,
"org.scala-lang" % "scala-compiler" % scalaVersion.value % "test,it")) ++ pureCassandraSettings
).copy(dependencies = Seq(embedded % "test->test;it->it,test;")
) configs IntegrationTest
lazy val demos = RootProject(
name = "demos",
dir = demosPath,
contains = Seq(simpleDemos/*, kafkaStreaming*/, twitterStreaming)
).disablePlugins(AssemblyPlugin, SparkPackagePlugin)
lazy val simpleDemos = Project(
id = "simple-demos",
base = demosPath / "simple-demos",
settings = demoSettings,
dependencies = Seq(connector, embedded)
).disablePlugins(AssemblyPlugin, SparkPackagePlugin)
/*
lazy val kafkaStreaming = CrossScalaVersionsProject(
name = "kafka-streaming",
conf = demoSettings ++ kafkaDemoSettings ++ Seq(
libraryDependencies ++= (CrossVersion.partialVersion(scalaVersion.value) match {
case Some((2, minor)) if minor < 11 => Dependencies.kafka
case _ => Seq.empty
}))).copy(base = demosPath / "kafka-streaming", dependencies = Seq(connector, embedded))
*/
lazy val twitterStreaming = Project(
id = "twitter-streaming",
base = demosPath / "twitter-streaming",
settings = demoSettings ++ Seq(libraryDependencies ++= Dependencies.twitter),
dependencies = Seq(connector, embedded)
).disablePlugins(AssemblyPlugin, SparkPackagePlugin)
lazy val refDoc = Project(
id = s"$namespace-doc",
base = file(s"$namespace-doc"),
settings = defaultSettings ++ Seq(libraryDependencies ++= Dependencies.spark)
) dependsOn connector
def crossBuildPath(base: sbt.File, v: String): sbt.File = base / s"scala-$v" / "src"
/* templates */
def CrossScalaVersionsProject(name: String,
conf: Seq[Def.Setting[_]],
reliesOn: Seq[ClasspathDep[ProjectReference]] = Seq.empty) =
Project(id = name, base = file(name), dependencies = reliesOn, settings = conf ++ Seq(
unmanagedSourceDirectories in (Compile, packageBin) +=
crossBuildPath(baseDirectory.value, scalaBinaryVersion.value),
unmanagedSourceDirectories in (Compile, doc) +=
crossBuildPath(baseDirectory.value, scalaBinaryVersion.value),
unmanagedSourceDirectories in Compile +=
crossBuildPath(baseDirectory.value, scalaBinaryVersion.value)
))
def RootProject(
name: String,
dir: sbt.File, settings: =>
scala.Seq[sbt.Def.Setting[_]] = Seq.empty,
contains: Seq[ProjectReference]): Project =
Project(
id = name,
base = dir,
settings = parentSettings ++ settings,
aggregate = contains)
}
object Artifacts {
import Versions._
implicit class Exclude(module: ModuleID) {
def guavaExclude: ModuleID =
module exclude("com.google.guava", "guava")
def sparkExclusions: ModuleID = module.guavaExclude
.exclude("org.apache.spark", s"spark-core_$scalaBinary")
def logbackExclude: ModuleID = module
.exclude("ch.qos.logback", "logback-classic")
.exclude("ch.qos.logback", "logback-core")
def replExclusions: ModuleID = module.guavaExclude
.exclude("org.apache.spark", s"spark-bagel_$scalaBinary")
.exclude("org.apache.spark", s"spark-mllib_$scalaBinary")
.exclude("org.scala-lang", "scala-compiler")
def kafkaExclusions: ModuleID = module
.exclude("org.slf4j", "slf4j-simple")
.exclude("com.sun.jmx", "jmxri")
.exclude("com.sun.jdmk", "jmxtools")
.exclude("net.sf.jopt-simple", "jopt-simple")
}
val akkaActor = "com.typesafe.akka" %% "akka-actor" % Akka % "provided" // ApacheV2
val akkaRemote = "com.typesafe.akka" %% "akka-remote" % Akka % "provided" // ApacheV2
val akkaSlf4j = "com.typesafe.akka" %% "akka-slf4j" % Akka % "provided" // ApacheV2
val cassandraClient = "org.apache.cassandra" % "cassandra-clientutil" % Cassandra guavaExclude // ApacheV2
val cassandraDriver = "com.datastax.cassandra" % "cassandra-driver-core" % CassandraDriver guavaExclude // ApacheV2
val commonsLang3 = "org.apache.commons" % "commons-lang3" % CommonsLang3 // ApacheV2
val config = "com.typesafe" % "config" % Config % "provided" // ApacheV2
val guava = "com.google.guava" % "guava" % Guava
val jodaC = "org.joda" % "joda-convert" % JodaC
val jodaT = "joda-time" % "joda-time" % JodaT
val lzf = "com.ning" % "compress-lzf" % Lzf % "provided"
val slf4jApi = "org.slf4j" % "slf4j-api" % Slf4j % "provided" // MIT
val jsr166e = "com.twitter" % "jsr166e" % JSR166e // Creative Commons
val airlift = "io.airlift" % "airline" % Airlift
/* To allow spark artifact inclusion in the demos at runtime, we set 'provided' below. */
val sparkCore = "org.apache.spark" %% "spark-core" % Spark guavaExclude // ApacheV2
val sparkRepl = "org.apache.spark" %% "spark-repl" % Spark guavaExclude // ApacheV2
val sparkUnsafe = "org.apache.spark" %% "spark-unsafe" % Spark guavaExclude // ApacheV2
val sparkStreaming = "org.apache.spark" %% "spark-streaming" % Spark guavaExclude // ApacheV2
val sparkSql = "org.apache.spark" %% "spark-sql" % Spark sparkExclusions // ApacheV2
val sparkCatalyst = "org.apache.spark" %% "spark-catalyst" % Spark sparkExclusions // ApacheV2
val sparkHive = "org.apache.spark" %% "spark-hive" % Spark sparkExclusions // ApacheV2
val cassandraServer = "org.apache.cassandra" % "cassandra-all" % Settings.cassandraTestVersion logbackExclude // ApacheV2
object Metrics {
val metricsCore = "com.codahale.metrics" % "metrics-core" % CodaHaleMetrics % "provided"
val metricsJson = "com.codahale.metrics" % "metrics-json" % CodaHaleMetrics % "provided"
}
object Jetty {
val jettyServer = "org.eclipse.jetty" % "jetty-server" % SparkJetty % "provided"
val jettyServlet = "org.eclipse.jetty" % "jetty-servlet" % SparkJetty % "provided"
}
object Embedded {
val akkaCluster = "com.typesafe.akka" %% "akka-cluster" % Akka // ApacheV2
val jopt = "net.sf.jopt-simple" % "jopt-simple" % JOpt
val kafka = "org.apache.kafka" %% "kafka" % Kafka kafkaExclusions // ApacheV2
val sparkRepl = "org.apache.spark" %% "spark-repl" % Spark % "provided" replExclusions // ApacheV2
val snappy = "org.xerial.snappy" % "snappy-java" % "1.1.1.7"
}
object Demos {
val kafka = "org.apache.kafka" % "kafka_2.10" % Kafka kafkaExclusions // ApacheV2
val kafkaStreaming = "org.apache.spark" % "spark-streaming-kafka_2.10" % Spark % "provided" sparkExclusions // ApacheV2
val twitterStreaming = "org.apache.spark" %% "spark-streaming-twitter" % Spark % "provided" sparkExclusions // ApacheV2
}
object Test {
val akkaTestKit = "com.typesafe.akka" %% "akka-testkit" % Akka % "test,it" // ApacheV2
val commonsIO = "commons-io" % "commons-io" % CommonsIO % "test,it" // ApacheV2
val scalaCheck = "org.scalacheck" %% "scalacheck" % ScalaCheck % "test,it" // BSD
val scalaMock = "org.scalamock" %% "scalamock-scalatest-support" % ScalaMock % "test,it" // BSD
val scalaTest = "org.scalatest" %% "scalatest" % ScalaTest % "test,it" // ApacheV2
val scalactic = "org.scalactic" %% "scalactic" % Scalactic % "test,it" // ApacheV2
val sparkCoreT = "org.apache.spark" %% "spark-core" % Spark % "test,it" classifier "tests"
val sparkStreamingT = "org.apache.spark" %% "spark-streaming" % Spark % "test,it" classifier "tests"
val mockito = "org.mockito" % "mockito-all" % "1.10.19" % "test,it" // MIT
val junit = "junit" % "junit" % "4.11" % "test,it"
val junitInterface = "com.novocode" % "junit-interface" % "0.10" % "test,it"
val powerMock = "org.powermock" % "powermock-module-junit4" % "1.6.2" % "test,it" // ApacheV2
val powerMockMockito = "org.powermock" % "powermock-api-mockito" % "1.6.2" % "test,it" // ApacheV2
}
}
object Dependencies {
import BuildUtil._
import Artifacts._
val logging = Seq(slf4jApi)
val metrics = Seq(Metrics.metricsCore, Metrics.metricsJson)
val jetty = Seq(Jetty.jettyServer, Jetty.jettyServlet)
val testKit = Seq(
sparkRepl % "test,it",
Test.akkaTestKit,
Test.commonsIO,
Test.junit,
Test.junitInterface,
Test.scalaCheck,
Test.scalaMock,
Test.scalaTest,
Test.scalactic,
Test.sparkCoreT,
Test.sparkStreamingT,
Test.mockito,
Test.powerMock,
Test.powerMockMockito
)
val akka = Seq(akkaActor, akkaRemote, akkaSlf4j)
val cassandra = Seq(cassandraClient, cassandraDriver)
val spark = Seq(sparkCore, sparkStreaming, sparkSql, sparkCatalyst, sparkHive, sparkUnsafe)
val connector = testKit ++ metrics ++ jetty ++ logging ++ akka ++ cassandra ++ spark.map(_ % "provided") ++ Seq(
commonsLang3, config, guava, jodaC, jodaT, lzf, jsr166e)
val embedded = logging ++ spark ++ cassandra ++ Seq(
cassandraServer % "it,test", Embedded.jopt, Embedded.sparkRepl, Embedded.kafka, Embedded.snappy, guava)
val kafka = Seq(Demos.kafka, Demos.kafkaStreaming)
val twitter = Seq(sparkStreaming, Demos.twitterStreaming)
val documentationMappings = Seq(
DocumentationMapping(url(s"http://spark.apache.org/docs/${Versions.Spark}/api/scala/"),
sparkCore, sparkStreaming, sparkSql, sparkCatalyst, sparkHive
),
DocumentationMapping(url(s"http://doc.akka.io/api/akka/${Versions.Akka}/"),
akkaActor, akkaRemote, akkaSlf4j
)
)
}
| maasg/spark-cassandra-connector | project/SparkCassandraConnectorBuild.scala | Scala | apache-2.0 | 13,412 |
package org.scaladebugger.api.lowlevel.exceptions
import org.scaladebugger.api.lowlevel.RequestInfo
import org.scaladebugger.api.lowlevel.requests.JDIRequestArgument
import ExceptionRequestInfo._
/**
* Represents information about an exception request.
*
* @param requestId The id of the request
* @param isPending Whether or not this request is pending (not on remote JVM)
* @param className The full name of the exception class
* @param notifyCaught Whether or not caught exceptions are reported
* @param notifyUncaught Whether or not uncaught exceptions are reported
* @param extraArguments The additional arguments provided to the exception
*/
case class ExceptionRequestInfo(
requestId: String,
isPending: Boolean,
className: String,
notifyCaught: Boolean,
notifyUncaught: Boolean,
extraArguments: Seq[JDIRequestArgument] = Nil
) extends RequestInfo {
/**
* Indicates whether or not this exception request was to report all
* exceptions or a specific exception.
*
* @return True if all exceptions are intended to be reported with this
* request, otherwise false
*/
def isCatchall: Boolean = className == DefaultCatchallExceptionName
}
object ExceptionRequestInfo {
/** Represents the standard name for an request to catch all exceptions. */
val DefaultCatchallExceptionName = "<CATCHALL>"
}
| ensime/scala-debugger | scala-debugger-api/src/main/scala/org/scaladebugger/api/lowlevel/exceptions/ExceptionRequestInfo.scala | Scala | apache-2.0 | 1,355 |
package com.twitter.finatra.http.integration.startup
import com.twitter.finatra.http.internal.server.BaseHttpServer
import com.twitter.finatra.http.modules.ResponseBuilderModule
import com.twitter.finatra.http.test.EmbeddedHttpServer
import com.twitter.inject.Test
import com.twitter.util.Await
class BaseHttpServerStartupIntegrationTest extends Test {
"BaseHttpServer startup" in {
val server = new EmbeddedHttpServer(
twitterServer = new BaseHttpServer {
override val modules = Seq(ResponseBuilderModule)
})
server.start()
server.assertHealthy()
server.close()
}
"BaseHttpServer startup with only an http external port and no admin port" in {
val server = new EmbeddedHttpServer(
twitterServer = new BaseHttpServer {
override val disableAdminHttpServer = true
override val modules = Seq(ResponseBuilderModule)
})
server.start()
server.close()
}
}
| joecwu/finatra | http/src/test/scala/com/twitter/finatra/http/integration/startup/BaseHttpServerStartupIntegrationTest.scala | Scala | apache-2.0 | 939 |
package com.twitter.finagle.stats
import org.scalatest.OneInstancePerTest
import org.scalatest.funsuite.AnyFunSuite
class VerbosityAdjustingStatsReceiverTest extends AnyFunSuite with OneInstancePerTest {
val inMemory = new InMemoryStatsReceiver()
val verbose = new VerbosityAdjustingStatsReceiver(inMemory, Verbosity.Debug)
test("adjusts the verbosity") {
verbose.counter(Verbosity.Default, "foo")
verbose.scope("foo").stat("bar")
verbose.addGauge(Verbosity.Debug, "baz")(0f)
assert(inMemory.verbosity(Seq("foo")) == Verbosity.Debug)
assert(inMemory.verbosity(Seq("foo", "bar")) == Verbosity.Debug)
assert(inMemory.verbosity(Seq("baz")) == Verbosity.Debug)
}
}
| twitter/util | util-stats/src/test/scala/com/twitter/finagle/stats/VerbosityAdjustingStatsReceiverTest.scala | Scala | apache-2.0 | 698 |
package org.jetbrains.plugins.scala.codeInspection.collections
import org.jetbrains.plugins.scala.codeInspection.InspectionBundle
/**
* @author Nikolay.Tropin
*/
class SameElementsUnsortedTest extends OperationsOnCollectionInspectionTest {
override val inspectionClass: Class[_ <: OperationOnCollectionInspection] = classOf[CorrespondsUnsortedInspection]
override def hint: String = InspectionBundle.message("sameElements.unsorted")
def testSeqSet(): Unit = {
check(s"Seq(1) ${START}sameElements$END Set(1)")
}
def testSetSet(): Unit = {
check(s"Set(1) ${START}sameElements$END Set(1)")
}
def testSeqMap(): Unit = {
check(s"Map(1) ${START}sameElements$END Seq(1)")
}
def testSeqIterable(): Unit = {
check(s"Seq(1) ${START}sameElements$END Iterable(1)")
}
def testSetSortedSet(): Unit = {
check(s"Set(1).${START}sameElements$END(scala.collection.SortedSet(1))")
}
def testSeqSortedSet(): Unit = {
checkTextHasNoErrors("Seq(1).sameElements(scala.collection.SortedSet(1))")
}
def testSeqSortedMap(): Unit = {
checkTextHasNoErrors("Seq((1, 1)).sameElements(scala.collection.SortedMap(1 -> 1))")
}
def testSeqArray(): Unit = {
checkTextHasNoErrors("Seq(1).sameElements(Array(1))")
}
def testArrayArray(): Unit = {
checkTextHasNoErrors("Array(1).sameElements(Array(1))")
}
def testIterators(): Unit = {
checkTextHasNoErrors("Iterator(1).sameElements(Iterator(1))")
}
}
class CorrespondsUnsortedTest extends OperationsOnCollectionInspectionTest {
override val inspectionClass: Class[_ <: OperationOnCollectionInspection] = classOf[CorrespondsUnsortedInspection]
override def hint: String = InspectionBundle.message("corresponds.unsorted")
def testCorrespondsSet(): Unit = {
check(s"Iterator(1).${START}corresponds$END(Set(1))((x, y) => true)")
}
def testCorrespondsSortedSet(): Unit = {
checkTextHasNoErrors("Iterator(1).corresponds(scala.collection.SortedSet(1))((x, y) => true)")
}
def testCorrespondsArray(): Unit = {
checkTextHasNoErrors("Iterator(1).corresponds(Array(1))((x, y) => true)")
}
def testSeqCorrespondsSeq(): Unit = {
checkTextHasNoErrors("Seq(1).corresponds(Seq(1))((x, y) => true)")
}
def testSeqCorrespondsSet(): Unit = {
checkTextHasNoErrors("Seq(1).corresponds(Seq(1))((x, y) => true)")
}
}
| LPTK/intellij-scala | test/org/jetbrains/plugins/scala/codeInspection/collections/SameElementsUnsortedTest.scala | Scala | apache-2.0 | 2,357 |
package spark
import scala.collection.mutable.HashMap
class BlockRDDSplit(val blockId: String, idx: Int) extends Split {
val index = idx
}
class BlockRDD[T: ClassManifest](sc: SparkContext, blockIds: Array[String]) extends RDD[T](sc) {
@transient
val splits_ = (0 until blockIds.size).map(i => {
new BlockRDDSplit(blockIds(i), i).asInstanceOf[Split]
}).toArray
@transient
lazy val locations_ = {
val blockManager = SparkEnv.get.blockManager
/*val locations = blockIds.map(id => blockManager.getLocations(id))*/
val locations = blockManager.getLocations(blockIds)
HashMap(blockIds.zip(locations):_*)
}
override def splits = splits_
override def compute(split: Split): Iterator[T] = {
val blockManager = SparkEnv.get.blockManager
val blockId = split.asInstanceOf[BlockRDDSplit].blockId
blockManager.get(blockId) match {
case Some(block) => block.asInstanceOf[Iterator[T]]
case None =>
throw new Exception("Could not compute split, block " + blockId + " not found")
}
}
override def preferredLocations(split: Split) =
locations_(split.asInstanceOf[BlockRDDSplit].blockId)
override val dependencies: List[Dependency[_]] = Nil
}
| ankurdave/arthur | core/src/main/scala/spark/BlockRDD.scala | Scala | bsd-3-clause | 1,229 |
// Derived from Scalaz - http://scalaz.googlecode.com/svn/continuous/latest/browse.sxr/scalaz/PartialApplys.scala.html
trait PartialApply1Of2[T[_, _], A] { type Apply[B] = T[A, B] }
trait HKT[D[_]]
trait HKTBounded[C[X] <: Traversable[X], D[_]]
trait Cov[+T]
class Test {
// exercise type constructor inference in different ways
implicit def m[D[_]](t: HKT[D]): Int = 1
def mCov[D[_]](t: Cov[HKT[D]]): Any = ???
def mBounded[C[X] <: Traversable[X], D[_]](t: Cov[HKTBounded[C, D]]): Any = ???
val param: HKT[PartialApply1Of2[Tuple2, Int]#Apply] = ???
m[PartialApply1Of2[Tuple2, Int]#Apply](param): Int // Already compiled
m(param) // Compiles now
param: Int // Compiles now
val paramCov: Cov[HKT[PartialApply1Of2[Tuple2, Int]#Apply]] = ???
mCov[PartialApply1Of2[Tuple2, Int]#Apply](paramCov)
mCov(paramCov)
val paramBounded: Cov[HKTBounded[Traversable, PartialApply1Of2[Tuple2, Int]#Apply]] = ???
mBounded[Traversable, PartialApply1Of2[Tuple2, Int]#Apply](paramBounded)
mBounded(paramBounded)
}
| lrytz/scala | test/files/pos/t5075.scala | Scala | apache-2.0 | 1,057 |
package com.taig.tmpltr.engine.html
import com.taig.tmpltr._
import play.api.templates.Txt
class textarea( val attributes: Attributes, val content: Txt )
extends markup.textarea
with Tag.Body[textarea, Txt]
{
def this( name: Option[String], attributes: Attributes, content: Txt ) =
{
this( attributes ~ ( "name" -> name ), content )
}
}
object textarea
extends Tag.Body.Appliable[textarea, Txt]
{
def apply( name: Option[String] = None, attributes: Attributes = Attributes.empty )( content: Txt ) =
{
new textarea( name, attributes, content )
}
} | Taig/Play-Tmpltr | app/com/taig/tmpltr/engine/html/textarea.scala | Scala | mit | 560 |
/*
* Copyright 2015-2016 IBM Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.invoker
import java.nio.charset.StandardCharsets
import org.junit.runner.RunWith
import org.scalatest.BeforeAndAfter
import org.scalatest.FlatSpec
import org.scalatest.Matchers
import org.scalatest.junit.JUnitRunner
import common.StreamLogging
import spray.json.pimpAny
import whisk.common.TransactionId
import whisk.core.entity.size._
import whisk.http.Messages
@RunWith(classOf[JUnitRunner])
class ActionLogDriverTests
extends FlatSpec
with BeforeAndAfter
with Matchers
with ActionLogDriver
with StreamLogging {
private def makeLogMsgs(lines: Seq[String], stream: String = "stdout", addSentinel: Boolean = true) = {
val msgs = if (addSentinel) {
lines.map((stream, _)) :+
("stdout", s"${ActionLogDriver.LOG_ACTIVATION_SENTINEL}") :+
("stderr", s"${ActionLogDriver.LOG_ACTIVATION_SENTINEL}")
} else {
lines.map((stream, _))
}
msgs.map(p => LogLine("", p._1, p._2).toJson.compactPrint)
.mkString("\\n")
}
private def makeLogLines(lines: Seq[String], stream: String = "stdout") = {
lines.map(LogLine("", stream, _)).filter(_.log.nonEmpty).map(_.toFormattedString).toVector
}
behavior of "LogLine"
it should "truncate log line" in {
"❄".sizeInBytes shouldBe 3.B
Seq("abcdef", "❄ ☃ ❄").foreach { logline =>
val bytes = logline.sizeInBytes
LogLine("", "", logline).dropRight(0.B).log shouldBe logline
LogLine("", "", logline).dropRight(1.B).log shouldBe {
val truncated = logline.getBytes(StandardCharsets.UTF_8).dropRight(1)
new String(truncated, StandardCharsets.UTF_8)
}
}
}
behavior of "ActionLogDriver"
it should "mock container log drain" in {
makeLogMsgs(Seq("a", "b", "c")) shouldBe {
raw"""|{"time":"","stream":"stdout","log":"a"}
|{"time":"","stream":"stdout","log":"b"}
|{"time":"","stream":"stdout","log":"c"}
|{"time":"","stream":"stdout","log":"${ActionLogDriver.LOG_ACTIVATION_SENTINEL}"}
|{"time":"","stream":"stderr","log":"${ActionLogDriver.LOG_ACTIVATION_SENTINEL}"}""".stripMargin('|')
}
}
it should "handle empty logs" in {
implicit val tid = TransactionId.testing
processJsonDriverLogContents("", true, 0.B) shouldBe {
(false, false, Vector())
}
processJsonDriverLogContents("", false, 0.B) shouldBe {
(true, false, Vector())
}
}
it should "not truncate logs within limit" in {
implicit val tid = TransactionId.testing
Seq(
(Seq("\\n"), 1),
(Seq("a"), 1),
(Seq("❄"), 3),
(Seq("", "a", "❄"), 4),
(Seq("abc\\n", "abc\\n"), 8))
.foreach {
case (msgs, l) =>
Seq(false).foreach { sentinel =>
processJsonDriverLogContents(makeLogMsgs(msgs, addSentinel = sentinel), sentinel, l.B) shouldBe {
(true, false, makeLogLines(msgs))
}
}
}
}
it should "account for sentinels when logs are not from a sentinelled action runtime" in {
implicit val tid = TransactionId.testing
Seq(
(Seq(""), 0),
(Seq("\\n"), 1),
(Seq("a"), 1),
(Seq("❄"), 3),
(Seq("", "a", "❄"), 4),
(Seq("abc\\n", "abc\\n"), 8))
.foreach {
case (msgs, l) =>
processJsonDriverLogContents(makeLogMsgs(msgs, addSentinel = true), false, l.B) shouldBe {
(true, true, makeLogLines(msgs) ++ Vector(Messages.truncateLogs(l.B)))
}
}
}
it should "truncate logs exceeding limit" in {
implicit val tid = TransactionId.testing
Seq(
(Seq("\\n"), Seq(), 0),
(Seq("a"), Seq(), 0),
(Seq("ab"), Seq("a"), 1),
(Seq("❄"), Seq("�"), 1),
(Seq("❄"), Seq("�"), 2),
(Seq("abc\\n", "abc\\n", "abc\\n"), Seq("abc\\n", "abc\\n"), 8))
.foreach {
case (msgs, exp, l) =>
Seq(true, false).foreach { sentinel =>
processJsonDriverLogContents(makeLogMsgs(msgs, addSentinel = sentinel), sentinel, l.B) shouldBe {
(!sentinel, true, makeLogLines(exp) ++ Vector(Messages.truncateLogs(l.B)))
}
}
}
}
}
| xin-cai/openwhisk | tests/src/test/scala/whisk/core/invoker/ActionLogDriverTests.scala | Scala | apache-2.0 | 5,317 |
// Copyright: 2010 - 2017 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.core
import Predef.{ any2stringadd => _, _ }
import scala.reflect.internal.util.Position
import scala.reflect.internal.util.SourceFile
/**
* Simulate methods that were added in later versions of the scalac
* API, or to generate fake methods that we can use in both versions.
*/
trait PresentationCompilerBackCompat {
this: RichPresentationCompiler =>
implicit class RichSymbols(sym: Symbol) {
def isLocalToBlock: Boolean = sym.isLocal
def paramLists: List[List[Symbol]] = sym.paramss
}
}
trait PositionBackCompat {
implicit class RichPosition(pos: Position) {
def withSource(src: SourceFile): Position =
pos.withSource(src, 0)
def withShift(shift: Int): Position =
pos.withSource(pos.source, shift)
// I wish we could override `start` and `end`
def startOrCursor: Int = pos.startOrPoint
def endOrCursor: Int = pos.endOrPoint
}
}
| VlachJosef/ensime-server | core/src/main/scala-2.10/org/ensime/core/PresentationCompilerBackCompat.scala | Scala | gpl-3.0 | 1,038 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar
import org.specs2.matcher._
trait ConditionMatchers {
def beAbnormal[A](check: ValueCheck[A]): Matcher[Condition[A]] =
new OptionLikeCheckedMatcher[Condition, A, A](
"Abnormal",
Condition.abnormal[A].getOption(_),
check)
def beNormal[A]: Matcher[Condition[A]] =
new Matcher[Condition[A]] {
def apply[S <: Condition[A]](value: Expectable[S]) =
result(Condition.normal[A].nonEmpty(value.value),
value.description + " is Normal",
value.description + " is not Normal",
value)
}
}
| slamdata/slamengine | foundation/src/test/scala/quasar/ConditionMatchers.scala | Scala | apache-2.0 | 1,192 |
package com.linkedin.norbert
package network
package common
import logging.Logging
import norbertutils._
import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong}
import jmx.JMX.MBean
import cluster.Node
import java.util.{UUID, Map => JMap}
import netty.HealthScoreCalculator
import jmx.{JMX, RequestTimeTracker}
import netty.ClientStatisticsRequestStrategy
object CachedNetworkStatistics {
def apply[GroupIdType, RequestIdType](clock: Clock, timeWindow: Long, refreshInterval: Long): CachedNetworkStatistics[GroupIdType, RequestIdType] = {
new CachedNetworkStatistics(NetworkStatisticsTracker(clock, timeWindow), clock, refreshInterval)
}
}
case class CacheMaintainer[T](clock: Clock, ttl: Long, fn: () => T) {
val refreshing = new AtomicBoolean(false)
val lastUpdateTime = new AtomicLong(0)
@volatile var item: T = _
private def refresh {
val lut = lastUpdateTime.get
val now = clock.getCurrentTimeMilliseconds
if (item == null || now - lut > ttl) {
// Let one thread pass through to update the calculation
if (refreshing.compareAndSet(false, true)) {
lastUpdateTime.set(now)
refresh0
refreshing.set(false)
}
}
}
private def refresh0 {
item = fn()
}
def get: Option[T] = {
refresh
Option(item)
}
}
class CachedNetworkStatistics[GroupIdType, RequestIdType](private val stats: NetworkStatisticsTracker[GroupIdType, RequestIdType], clock: Clock, refreshInterval: Long) {
val finishedArray = CacheMaintainer(clock, refreshInterval, () => stats.getFinishedArrays)
val timings = CacheMaintainer(clock, refreshInterval, () => stats.getTimings)
val netty = CacheMaintainer(clock, refreshInterval, () => stats.getNettyTimings)
val pendingTimings = CacheMaintainer(clock, refreshInterval, () => stats.getPendingTimings)
val totalRequests = CacheMaintainer(clock, refreshInterval, () => stats.getTotalRequests )
val finishedQueueTimings = CacheMaintainer(clock, refreshInterval, () => stats.getQueueTimings)
val responseTimings = CacheMaintainer(clock, refreshInterval, () => stats.getResponseTimings)
val numRequestBytes = new AtomicLong
val numResponseBytes = new AtomicLong
def beginRequest(groupId: GroupIdType, requestId: RequestIdType, queueTime: Long) {
stats.beginRequest(groupId, requestId, queueTime)
}
def beginNetty(groupId: GroupIdType, requestId: RequestIdType, queueTime: Long) {
stats.beginNetty(groupId, requestId, queueTime)
}
def endNetty( groupId: GroupIdType, requestId: RequestIdType) {
stats.endNetty(groupId, requestId)
}
def endRequest(groupId: GroupIdType, requestId: RequestIdType) {
stats.endRequest(groupId, requestId)
}
def increaseNumRequestBytes(bytes: Long): Unit = {
numRequestBytes.addAndGet(bytes)
}
def increaseNumResponseBytes(bytes: Long): Unit = {
numResponseBytes.addAndGet(bytes)
}
def reset {
stats.reset
numRequestBytes.set(0)
numResponseBytes.set(0)
}
private def calculate(map: Map[GroupIdType, Array[Long]], p: Double) = {
map.mapValues { v =>
StatsEntry(calculatePercentile(v, p), v.length, v.sum)
//StatsEntry(calculatePercentile(v,p), v.length, 0)
}
}
val statisticsCache =
new java.util.concurrent.ConcurrentHashMap[Double, CacheMaintainer[JoinedStatistics[GroupIdType]]]
def getStatistics(p: Double) = {
atomicCreateIfAbsent(statisticsCache, p) { k =>
CacheMaintainer(clock, refreshInterval, () => {
JoinedStatistics(
finished = timings.get.map(calculate(_, p)).getOrElse(Map.empty),
nettyTimings = netty.get.map(calculate(_, p)).getOrElse(Map.empty),
pending = pendingTimings.get.map(calculate(_, p)).getOrElse(Map.empty),
totalRequests = () => totalRequests.get.getOrElse(Map.empty),
rps = () => finishedArray.get.map(_.mapValues(rps(_))).getOrElse(Map.empty),
requestQueueSize = () => finishedArray.get.map(_.mapValues(_.length)).getOrElse(Map.empty),
finishedQueueTime = finishedQueueTimings.get.map(calculate(_, p)).getOrElse(Map.empty),
finishedResponse = responseTimings.get.map(calculate(_, p)).getOrElse(Map.empty))
})
}.get
}
private def rps(data: Array[(Long, Long)]): Int = {
val now = clock.getCurrentTimeOffsetMicroseconds
implicit val timeOrdering: Ordering[(Long, Long)] = new Ordering[(Long, Long)] {
def compare(x: (Long, Long), y: (Long, Long)) = (x._1 - y._1).asInstanceOf[Int]
}
val bs = binarySearch(data, (now - 1000000L, 0L))
val idx = if (bs < 0) -bs - 1 else bs
data.size - idx
}
}
case class StatsEntry(percentile: Double, size: Int, total: Long)
case class JoinedStatistics[K](finished: Map[K, StatsEntry],
nettyTimings: Map[K, StatsEntry],
pending: Map[K, StatsEntry],
rps: () => Map[K, Int],
totalRequests: () => Map[K, Int],
requestQueueSize: () => Map[K, Int],
finishedQueueTime: Map[K, StatsEntry],
finishedResponse: Map[K, StatsEntry])
private case class NetworkStatisticsTracker[GroupIdType, RequestIdType](clock: Clock, timeWindow: Long) extends Logging {
private var timeTrackers: java.util.concurrent.ConcurrentMap[GroupIdType, RequestTimeTracker[RequestIdType]] =
new java.util.concurrent.ConcurrentHashMap[GroupIdType, RequestTimeTracker[RequestIdType]]
private def getTracker(groupId: GroupIdType) = {
atomicCreateIfAbsent(timeTrackers, groupId) { k => new RequestTimeTracker(clock, timeWindow) }
}
def beginRequest(groupId: GroupIdType, requestId: RequestIdType, queueTime:Long = 0) {
getTracker(groupId).beginRequest(requestId, queueTime)
}
def beginNetty(groupId: GroupIdType, requestId: RequestIdType, queueTime:Long = 0) {
getTracker(groupId).beginNetty(requestId, queueTime)
}
def endNetty(groupId: GroupIdType, requestId: RequestIdType) {
getTracker(groupId).endNetty(requestId)
}
def endRequest(groupId: GroupIdType, requestId: RequestIdType) {
getTracker(groupId).endRequest(requestId)
}
import scala.collection.JavaConversions._
def reset { timeTrackers.values.foreach(_.reset) }
def getPendingTimings = {
timeTrackers.toMap.mapValues( _.pendingRequestTimeTracker.getTimings)
}
def getTimings = {
getFinishedArrays.mapValues(array => array.map(_._2).sorted)
}
def getNettyTimings = {
getNettyArrays.mapValues(array => array.map(_._2).sorted)
}
def getFinishedArrays = {
timeTrackers.toMap.mapValues( _.finishedRequestTimeTracker.getArray)
}
def getNettyArrays = {
timeTrackers.toMap.mapValues( _.finishedNettyTimeTracker.getArray)
}
def getTotalRequests = timeTrackers.toMap.mapValues( _.pendingRequestTimeTracker.getTotalNumRequests )
//this does not need to be sorted since we are not doing 90th and 99th percentile
def getQueueTimings = {
timeTrackers.toMap.mapValues(_.queueTimeTracker.getTimings)
}
def getResponseTimings = {
getFinishedRequestTimeArrays.mapValues(array => array.map(_._2).sorted)
}
def getFinishedRequestTimeArrays = {
timeTrackers.toMap.mapValues( _.totalRequestProcessingTimeTracker.getArray)
}
}
trait NetworkClientStatisticsMBean {
def getNumPendingRequests: JMap[Int, Int]
def getMedianTimes: JMap[Int, Double]
def get75thTimes: JMap[Int, Double]
def get90thTimes: JMap[Int, Double]
def get95thTimes: JMap[Int, Double]
def get99thTimes: JMap[Int, Double]
def getHealthScoreTimings: JMap[Int, Double]
def getNettyMedianTimes: JMap[Int, Double]
def getNetty75thTimes: JMap[Int, Double]
def getNetty90thTimes: JMap[Int, Double]
def getNetty95thTimes: JMap[Int, Double]
def getNetty99thTimes: JMap[Int, Double]
def getRPS: JMap[Int, Int]
def getTotalRequests: JMap[Int, Int]
def getClusterRPS: Int
def getClusterAverageTime: Double
def getClusterPendingTime: Double
def getClusterMedianTime: Double
def getCluster75thTimes: Double
def getCluster90th: Double
def getCluster95th: Double
def getCluster99th: Double
def getClusterHealthScoreTiming: Double
def getClusterNettyMedianTime: Double
def getClusterNetty75thTimes: Double
def getClusterNetty90th: Double
def getClusterNetty95th: Double
def getClusterNetty99th: Double
def getClusterTotalRequests: Int
def getNumResponseBytes: Long
def getNumRequestBytes: Long
def getNumResponseKBytes: Long
def getNumRequestKBytes: Long
def reset
// Jill will be very upset if I break her graphs
def getRequestsPerSecond = getClusterRPS
def getAverageRequestProcessingTime = getClusterAverageTime
def getQueueSize: Int
def getNodesMarkedDown: Long
def getNumReroutedRequests: Long
}
class NetworkClientStatisticsMBeanImpl(clientName: Option[String], serviceName: String, val stats: CachedNetworkStatistics[Node, UUID], strategy: ClientStatisticsRequestStrategy)
extends MBean(classOf[NetworkClientStatisticsMBean], JMX.name(clientName, serviceName)) with HealthScoreCalculator
with NetworkClientStatisticsMBean {
private def getPendingStats(p: Double) = stats.getStatistics(p).map(_.pending).getOrElse(Map.empty)
private def getFinishedStats(p: Double) = stats.getStatistics(p).map(_.finished).getOrElse(Map.empty)
private def getNettyFinishedStats(p: Double) = stats.getStatistics(p).map(_.nettyTimings).getOrElse(Map.empty)
private def toMillis(statsMetric: Double):Double = statsMetric/1000
def getNumPendingRequests = toJMap(getPendingStats(0.5).map(kv => (kv._1.id, kv._2.size)))
def getMedianTimes =
toJMap(getFinishedStats(0.5).map(kv => (kv._1.id, toMillis(kv._2.percentile))))
def get75thTimes =
toJMap(getFinishedStats(0.75).map(kv => (kv._1.id, toMillis(kv._2.percentile))))
def get90thTimes =
toJMap(getFinishedStats(0.90).map(kv => (kv._1.id, toMillis(kv._2.percentile))))
def get95thTimes =
toJMap(getFinishedStats(0.95).map(kv => (kv._1.id, toMillis(kv._2.percentile))))
def get99thTimes =
toJMap(getFinishedStats(0.99).map(kv => (kv._1.id, toMillis(kv._2.percentile))))
def getNettyMedianTimes =
toJMap(getNettyFinishedStats(0.5).map(kv => (kv._1.id, toMillis(kv._2.percentile))))
def getNetty75thTimes =
toJMap(getNettyFinishedStats(0.75).map(kv => (kv._1.id, toMillis(kv._2.percentile))))
def getNetty90thTimes =
toJMap(getNettyFinishedStats(0.90).map(kv => (kv._1.id, toMillis(kv._2.percentile))))
def getNetty95thTimes =
toJMap(getNettyFinishedStats(0.95).map(kv => (kv._1.id, toMillis(kv._2.percentile))))
def getNetty99thTimes =
toJMap(getNettyFinishedStats(0.99).map(kv => (kv._1.id, toMillis(kv._2.percentile))))
def getHealthScoreTimings = {
val s = stats.getStatistics(0.5)
val f = s.map(_.finished).getOrElse(Map.empty)
val p = s.map(_.pending).getOrElse(Map.empty)
toJMap(f.map { case (n, nodeN) =>
val nodeP = p.get(n).getOrElse(StatsEntry(0.0, 0, 0))
(n.id, toMillis(doCalculation(Map(0 -> nodeP),Map(0 -> nodeN))))
})
}
def getRPS = toJMap(stats.getStatistics(0.5).map(_.rps().map(kv => (kv._1.id, kv._2))))
def getTotalRequests = toJMap(stats.getStatistics(0.5).map(_.totalRequests().map(kv => (kv._1.id, kv._2))))
def getClusterAverageTime = {
val s = getFinishedStats(0.5)
val total = s.values.map(_.total).sum
val size = s.values.map(_.size).sum
toMillis(safeDivide(total, size)(0.0))
}
def getClusterPendingTime = {
val s = getPendingStats(0.5)
toMillis(s.values.map(_.total).sum)
}
def getClusterMedianTime = toMillis(averagePercentiles(getFinishedStats(0.5)))
def getCluster75thTimes = toMillis(averagePercentiles(getFinishedStats(0.75)))
def getCluster90th = toMillis(averagePercentiles(getFinishedStats(0.90)))
def getCluster95th = toMillis(averagePercentiles(getFinishedStats(0.95)))
def getCluster99th = toMillis(averagePercentiles(getFinishedStats(0.99)))
def getClusterNettyMedianTime = toMillis(averagePercentiles(getNettyFinishedStats(0.5)))
def getClusterNetty75thTimes = toMillis(averagePercentiles(getNettyFinishedStats(0.75)))
def getClusterNetty90th = toMillis(averagePercentiles(getNettyFinishedStats(0.90)))
def getClusterNetty95th = toMillis(averagePercentiles(getNettyFinishedStats(0.95)))
def getClusterNetty99th = toMillis(averagePercentiles(getNettyFinishedStats(0.99)))
import scala.collection.JavaConversions._
def getClusterRPS = {
getRPS.values.sum
}
def getClusterTotalRequests = getTotalRequests.values.sum
def getClusterHealthScoreTiming = toMillis(doCalculation(getPendingStats(0.5), getFinishedStats(0.5)))
def getQueueSize = stats.getStatistics(0.5).map(_.requestQueueSize().values.sum) getOrElse(0)
def getNodesMarkedDown = strategy.canServeRequests.get.getOrElse(Map.empty).count({ case (node, up) => up == false })
def getNumReroutedRequests = strategy.totalNumReroutes.get
def getNumResponseBytes = stats.numResponseBytes.get
def getNumRequestBytes = stats.numRequestBytes.get
def getNumResponseKBytes = (stats.numResponseBytes.get / 1024).toLong
def getNumRequestKBytes = (stats.numRequestBytes.get / 1024).toLong
def reset = stats.reset
}
| linkedin/norbert | network/src/main/scala/com/linkedin/norbert/network/common/NetworkStatisticsActor.scala | Scala | apache-2.0 | 13,338 |
package roc
package postgresql
package server
import cats.data.Validated._
import cats.data.{NonEmptyList, Validated}
import cats.Semigroup
import org.scalacheck.Prop.forAll
import org.specs2.{ScalaCheck, Specification}
import roc.postgresql.failures.PostgresqlMessageDecodingFailure
import roc.postgresql.server.ErrorNoticeMessageFields._
final class PostgresqlMessageSpec extends Specification with ScalaCheck { def is = s2"""
PostgresqlMessage
must extract the value of a tuple by the Code ${PE().testExtractValueByCode}
must return Right(UnknownMessage(ErrorParams)) when given unknown SQLSTATE Code ${PE().testUnknownMessage}
must return Right(SuccesfulMessage) when given a valid Succes Code ${PE().testSuccessfulMessage}
must return Right(WarningMessage(ErrorParams)) when given a Warning Code ${PE().testWarningMessages}
must return Right(ErrorMessage(ErrorParams)) when given an Error Code ${PE().testErrorMessages}
ValidatePacket
must return RequiredParams when fields are valid ${VP().testAllValid}
must return Invalid when Severity is not present ${VP().testInvalidSeverity}
must return Invalid when SQLSTATECODE is not present ${VP().testInvalidSqlStateCode}
must return Invalid when Message is not present ${VP().testInvalidMessage}
must return Invalid when SQLSTATE Code & Message are not present ${VP().testInvalidSqlStateCodeMessage}
must return Invalid when Severity & Message are not present ${VP().testInvalidSeverityMessage}
must return Invalid when Severity & SQLSTATE Code are not present ${VP().testInvalidSeveritySqlStateCode}
must return Invalid when Severity & SQLSTATE Code & Message are not present ${VP().testInvalidAll}
BuildParamsFromTuples
must return Right(ErrorParams) when given valid Fields ${BPFT().testValidFields}
must return Left(PostgresqlMessageDecodingFailure) when given invalid Fields ${BPFT().testInvalidFields}
must have correct Error Message when Severity is invalid ${BPFT().testSeverityMessage}
must have correct Error Message when SQLSTATECode is invalid ${BPFT().testSqlStateCodeMessage}
must have correct Error Message when Message is invalid ${BPFT().testMessageMessage}
must have correct Error Message when Severity and SQLSTATECode is invalid ${BPFT().testSeveritySqlStateCodeMessage}
must have correct Error Message when Severity and Message is invalid ${BPFT().testSeverityMessageMessage}
must have correct Error Message when SQLSTATECode and Message is invalid ${BPFT().testSqlStateCodeMessageMessage}
must have correct Error Message when no required fields are present ${BPFT().testNoRequiredFieldsFoundMessage}
ErrorParams
PostgresqlMessage must have correct Severity ${EP().testSeverity}
PostgresqlMessage must have correct Code ${EP().testCode}
PostgresqlMessage must have correct Message ${EP().testMessage}
PostgresqlMessage must have correct Detail ${EP().testDetail}
PostgresqlMessage must have correct Hint ${EP().testHint}
PostgresqlMessage must have correct Position ${EP().testPosition}
PostgresqlMessage must have correct InternalPosition ${EP().testInternalPosition}
PostgresqlMessage must have correct InternalQuery ${EP().testInternalQuery}
PostgresqlMessage must have correct Where ${EP().testWhere}
PostgresqlMessage must have correct SchemaName ${EP().testSchemaName}
PostgresqlMessage must have correct TableName ${EP().testTableName}
PostgresqlMessage must have correct ColumnName ${EP().testColumnName}
PostgresqlMessage must have correct DataTypeName ${EP().testDataTypeName}
PostgresqlMessage must have correct ConstraintName ${EP().testConstraintName}
PostgresqlMessage must have correct File ${EP().testFile}
PostgresqlMessage must have correct Line ${EP().testLine}
PostgresqlMessage must have correct Routine ${EP().testRoutine}
ErrorMessage
ErrorMessage must have correct message ${EM().testMessage}
"""
case class PE() extends ErrorNoticeGen {
val testExtractValueByCode = forAll { container: ExtractValueByCodeContainer =>
val code = container.code
val xs = container.xs
val expected = xs.find(_._1 === code).map(_._2)
PostgresqlMessage.extractValueByCode(code, xs) must_== expected
}
val testUnknownMessage = forAll(unknownErrorGen) { x: FieldsAndErrorParams =>
PostgresqlMessage(x.fields) must_== Right(UnknownMessage(x.errorParams))
}
val testSuccessfulMessage = forAll(successfulMessageGen) { x: FieldsAndErrorParams =>
PostgresqlMessage(x.fields) must_== Right(SuccessMessage(x.errorParams))
}
val testWarningMessages = forAll(warningMessageGen) { x: FieldsAndErrorParams =>
PostgresqlMessage(x.fields) must_== Right(WarningMessage(x.errorParams))
}
val testErrorMessages = forAll(errorMessageGen) { x: FieldsAndErrorParams =>
PostgresqlMessage(x.fields) must_== Right(ErrorMessage(x.errorParams))
}
}
case class VP() extends ErrorNoticeGen {
val testAllValid = forAll(validRequiredFieldsGen) { xs: Fields =>
val severity = extractSeverity(xs)
val code = extractCode(xs)
val message = extractMessage(xs)
val actual = PostgresqlMessage.validatePacket(severity.toValidatedNel, code.toValidatedNel,
message.toValidatedNel)(RequiredParams.apply)
val expected = validatePacket(severity.toValidatedNel, code.toValidatedNel,
message.toValidatedNel)(RequiredParams.apply)
actual must_== expected
}
val testInvalidSeverity = forAll(invalidSeverityFieldsGen) { xs: Fields =>
val severity = extractSeverity(xs)
val code = extractCode(xs)
val message = extractMessage(xs)
val actual = PostgresqlMessage.validatePacket(severity.toValidatedNel, code.toValidatedNel,
message.toValidatedNel)(RequiredParams.apply)
val expected = validatePacket(severity.toValidatedNel, code.toValidatedNel,
message.toValidatedNel)(RequiredParams.apply)
actual must_== expected
}
val testInvalidSqlStateCode = forAll(invalidSqlStateCodeFieldsGen) { xs: Fields =>
val severity = extractSeverity(xs)
val code = extractCode(xs)
val message = extractMessage(xs)
val actual = PostgresqlMessage.validatePacket(severity.toValidatedNel, code.toValidatedNel,
message.toValidatedNel)(RequiredParams.apply)
val expected = validatePacket(severity.toValidatedNel, code.toValidatedNel,
message.toValidatedNel)(RequiredParams.apply)
actual must_== expected
}
val testInvalidMessage = forAll(invalidMessageFieldsGen) { xs: Fields =>
val severity = extractSeverity(xs)
val code = extractCode(xs)
val message = extractMessage(xs)
val actual = PostgresqlMessage.validatePacket(severity.toValidatedNel, code.toValidatedNel,
message.toValidatedNel)(RequiredParams.apply)
val expected = validatePacket(severity.toValidatedNel, code.toValidatedNel,
message.toValidatedNel)(RequiredParams.apply)
actual must_== expected
}
val testInvalidSqlStateCodeMessage = forAll(invalidSqlStateCodeMessageFieldsGen) { xs: Fields =>
val severity = extractSeverity(xs)
val code = extractCode(xs)
val message = extractMessage(xs)
val actual = PostgresqlMessage.validatePacket(severity.toValidatedNel, code.toValidatedNel,
message.toValidatedNel)(RequiredParams.apply)
val expected = validatePacket(severity.toValidatedNel, code.toValidatedNel,
message.toValidatedNel)(RequiredParams.apply)
actual must_== expected
}
val testInvalidSeverityMessage = forAll(invalidSeverityMessageFieldsGen) { xs: Fields =>
val severity = extractSeverity(xs)
val code = extractCode(xs)
val message = extractMessage(xs)
val actual = PostgresqlMessage.validatePacket(severity.toValidatedNel, code.toValidatedNel,
message.toValidatedNel)(RequiredParams.apply)
val expected = validatePacket(severity.toValidatedNel, code.toValidatedNel,
message.toValidatedNel)(RequiredParams.apply)
actual must_== expected
}
val testInvalidSeveritySqlStateCode = forAll(invalidSeveritySqlStateCodeFieldsGen) { xs: Fields =>
val severity = extractSeverity(xs)
val code = extractCode(xs)
val message = extractMessage(xs)
val actual = PostgresqlMessage.validatePacket(severity.toValidatedNel, code.toValidatedNel,
message.toValidatedNel)(RequiredParams.apply)
val expected = validatePacket(severity.toValidatedNel, code.toValidatedNel,
message.toValidatedNel)(RequiredParams.apply)
actual must_== expected
}
val testInvalidAll = forAll(genOptionalFields) { xs: Fields =>
val severity = extractSeverity(xs)
val code = extractCode(xs)
val message = extractMessage(xs)
val actual = PostgresqlMessage.validatePacket(severity.toValidatedNel, code.toValidatedNel,
message.toValidatedNel)(RequiredParams.apply)
val expected = validatePacket(severity.toValidatedNel, code.toValidatedNel,
message.toValidatedNel)(RequiredParams.apply)
actual must_== expected
}
private def extractSeverity(xs: Fields): Validated[String, String] =
xs.find(_._1 === Severity) match {
case Some(x) => Valid(x._2)
case None => Invalid("Required Severity Level was not present.")
}
private def extractCode(xs: Fields): Validated[String, String] =
xs.find(_._1 === ErrorNoticeMessageFields.Code) match {
case Some(x) => Valid(x._2)
case None => Invalid("Required SQLSTATE Code was not present.")
}
private def extractMessage(xs: Fields): Validated[String, String] =
xs.find(_._1 === Message) match {
case Some(x) => Valid(x._2)
case None => Invalid("Required Message was not present.")
}
private def validatePacket[E : Semigroup, A, B, C, D](v1: Validated[E, A], v2: Validated[E, B],
v3: Validated[E, C]) (f: (A, B, C) => D): Validated[E, D] = (v1, v2, v3) match {
case (Valid(a), Valid(b), Valid(c)) => Valid(f(a,b,c))
case (i@Invalid(_), Valid(_), Valid(_)) => i
case (Valid(_), i@Invalid(_), Valid(_)) => i
case (Valid(_), Valid(_), i@Invalid(_)) => i
case (Invalid(e1), Invalid(e2), Valid(_)) => Invalid(Semigroup[E].combine(e1, e2))
case (Invalid(e1), Valid(_), Invalid(e2)) => Invalid(Semigroup[E].combine(e1, e2))
case (Valid(_), Invalid(e1), Invalid(e2)) => Invalid(Semigroup[E].combine(e1, e2))
case (Invalid(e1), Invalid(e2), Invalid(e3)) =>
Invalid(Semigroup[E].combine(e1, Semigroup[E].combine(e2, e3)))
}
}
case class BPFT() extends ErrorNoticeGen {
val testValidFields = forAll(validFieldsGen) { xs: Fields =>
PostgresqlMessage.buildParamsFromTuples(xs).isRight must_== true
}
val testInvalidFields = forAll(invalidFieldsGen) { xs: Fields =>
PostgresqlMessage.buildParamsFromTuples(xs).isLeft must_== true
}
val testSeverityMessage = {
val xs = List((ErrorNoticeMessageFields.Code, "Foo"), (Message, "Bar"))
val actual = PostgresqlMessage.buildParamsFromTuples(xs)
val nel = NonEmptyList.of("Required Severity Level was not present.")
actual must_== Left(new PostgresqlMessageDecodingFailure(nel))
}
val testSqlStateCodeMessage = {
val xs = List((Severity, "Foo"), (Message, "Bar"))
val actual = PostgresqlMessage.buildParamsFromTuples(xs)
val nel = NonEmptyList.of("Required SQLSTATE Code was not present.")
actual must_== Left(new PostgresqlMessageDecodingFailure(nel))
}
val testMessageMessage = {
val xs = List((Severity, "Foo"), (ErrorNoticeMessageFields.Code, "Bar"))
val actual = PostgresqlMessage.buildParamsFromTuples(xs)
val nel = NonEmptyList.of("Required Message was not present.")
actual must_== Left(new PostgresqlMessageDecodingFailure(nel))
}
val testSeveritySqlStateCodeMessage = {
val xs = List((Message, "Foo"))
val actual = PostgresqlMessage.buildParamsFromTuples(xs)
val nel = NonEmptyList.of("Required Severity Level was not present.",
"Required SQLSTATE Code was not present.")
actual must_== Left(new PostgresqlMessageDecodingFailure(nel))
}
val testSeverityMessageMessage = {
val xs = List((ErrorNoticeMessageFields.Code, "Foo"))
val actual = PostgresqlMessage.buildParamsFromTuples(xs)
val nel = NonEmptyList.of("Required Severity Level was not present.",
"Required Message was not present.")
actual must_== Left(new PostgresqlMessageDecodingFailure(nel))
}
val testSqlStateCodeMessageMessage = {
val xs = List((Severity, "Foo"))
val actual = PostgresqlMessage.buildParamsFromTuples(xs)
val nel = NonEmptyList.of("Required SQLSTATE Code was not present.",
"Required Message was not present.")
actual must_== Left(new PostgresqlMessageDecodingFailure(nel))
}
val testNoRequiredFieldsFoundMessage = {
val xs = List.empty[Field]
val actual = PostgresqlMessage.buildParamsFromTuples(xs)
val nel = NonEmptyList.of("Required Severity Level was not present.",
"Required SQLSTATE Code was not present.",
"Required Message was not present.")
actual must_== Left(new PostgresqlMessageDecodingFailure(nel))
}
}
case class EP() extends ErrorNoticeGen {
val testSeverity = forAll { ep: ErrorParams =>
val error = new UnknownMessage(ep)
error.severity must_== ep.severity
}
val testCode = forAll { ep: ErrorParams =>
val error = new UnknownMessage(ep)
error.code must_== ep.code
}
val testMessage = forAll { ep: ErrorParams =>
val error = new UnknownMessage(ep)
error.message must_== ep.message
}
val testDetail = forAll { ep: ErrorParams =>
val error = new UnknownMessage(ep)
error.detail must_== ep.detail
}
val testHint = forAll { ep: ErrorParams =>
val error = new UnknownMessage(ep)
error.hint must_== ep.hint
}
val testPosition = forAll { ep: ErrorParams =>
val error = new UnknownMessage(ep)
error.position must_== ep.position
}
val testInternalPosition = forAll { ep: ErrorParams =>
val error = new UnknownMessage(ep)
error.internalPosition must_== ep.internalPosition
}
val testInternalQuery = forAll { ep: ErrorParams =>
val error = new UnknownMessage(ep)
error.internalQuery must_== ep.internalQuery
}
val testWhere = forAll { ep: ErrorParams =>
val error = new UnknownMessage(ep)
error.where must_== ep.where
}
val testSchemaName = forAll { ep: ErrorParams =>
val error = new UnknownMessage(ep)
error.schemaName must_== ep.schemaName
}
val testTableName = forAll { ep: ErrorParams =>
val error = new UnknownMessage(ep)
error.tableName must_== ep.tableName
}
val testColumnName = forAll { ep: ErrorParams =>
val error = new UnknownMessage(ep)
error.columnName must_== ep.columnName
}
val testDataTypeName = forAll { ep: ErrorParams =>
val error = new UnknownMessage(ep)
error.dataTypeName must_== ep.dataTypeName
}
val testConstraintName = forAll { ep: ErrorParams =>
val error = new UnknownMessage(ep)
error.constraintName must_== ep.constraintName
}
val testFile = forAll { ep: ErrorParams =>
val error = new UnknownMessage(ep)
error.file must_== ep.file
}
val testLine = forAll { ep: ErrorParams =>
val error = new UnknownMessage(ep)
error.line must_== ep.line
}
val testRoutine = forAll { ep: ErrorParams =>
val error = new UnknownMessage(ep)
error.routine must_== ep.routine
}
}
case class EM() extends ErrorNoticeGen {
val testMessage = forAll(errMsgAndRequiredFieldsGen) { x: ErrorMessageAndRequiredFields =>
val xs = x.errorParams
val ys = List(("Detail: ", xs.detail), ("Hint: ", xs.hint), ("Position: ", xs.position),
("Internal Position: ", xs.internalPosition), ("Internal Query: ", xs.internalQuery),
("Where: ", xs.where), ("Schema Name: ", xs.schemaName), ("Table Name: ", xs.tableName),
("Column Name: ", xs.columnName), ("Data Type Name: ", xs.dataTypeName), ("Constaint Name: ",
xs.constraintName), ("File: ", xs.file), ("Line: ", xs.line), ("Routine: ", xs.routine))
val optString = ys.filter(_._2 != None)
.map(x => (x._1, x._2.getOrElse("")))
.foldLeft("")((x,y) => x + y._1 + y._2 + "\n")
val requiredString = s"${xs.severity} - ${xs.message}. SQLSTATE: ${xs.code}."
val expectedMessage = requiredString + "\n" + optString
x.error.toString must_== expectedMessage
}
}
}
| penland365/roc | core/src/test/scala/roc/postgresql/server/PostgresqlMessageSpec.scala | Scala | bsd-3-clause | 17,572 |
package com.seanshubin.iteration.tracker.core.json
import com.fasterxml.jackson.core.{JsonParser, JsonToken}
import com.seanshubin.iteration.tracker.core.json.JsonAbstractSyntaxTree._
class JsonAbstractSyntaxTree(parser: JsonParser) {
def parseValue(): JsonValue = {
parser.nextToken match {
case JsonToken.START_OBJECT =>
val members = parseObjectMembers(Seq())
JsonObject(members)
case JsonToken.START_ARRAY =>
val values = parseArrayValues(Seq())
JsonArray(values)
case JsonToken.VALUE_STRING => extractString()
case JsonToken.VALUE_NUMBER_INT => extractWholeNumber()
case JsonToken.VALUE_NUMBER_FLOAT => extractFloatNumber()
case JsonToken.VALUE_TRUE => extractTrue()
case JsonToken.VALUE_FALSE => extractFalse()
case JsonToken.VALUE_NULL => extractNull()
case x => unexpected(x)
}
}
def parseObjectMembers(soFar: Seq[JsonPair]): Seq[JsonPair] = {
parser.nextToken match {
case JsonToken.FIELD_NAME =>
val fieldName = extractFieldName()
parseObjectMembersAfterFieldName(soFar, fieldName)
case JsonToken.END_OBJECT => soFar
case x => unexpected(x)
}
}
def parseObjectMembersAfterFieldName(soFar: Seq[JsonPair], fieldName: String): Seq[JsonPair] = {
val value = parseValue()
val pair = JsonPair(fieldName, value)
parseObjectMembers(soFar :+ pair)
}
def parseArrayValues(soFar: Seq[JsonValue]): Seq[JsonValue] = {
parser.nextToken match {
case JsonToken.START_OBJECT => parseArrayValues(soFar :+ JsonObject(parseObjectMembers(Seq())))
case JsonToken.START_ARRAY => parseArrayValues(soFar :+ JsonArray(parseArrayValues(Seq())))
case JsonToken.VALUE_STRING => parseArrayValues(soFar :+ extractString())
case JsonToken.VALUE_NUMBER_INT => parseArrayValues(soFar :+ extractWholeNumber())
case JsonToken.VALUE_NUMBER_FLOAT => parseArrayValues(soFar :+ extractFloatNumber())
case JsonToken.VALUE_TRUE => parseArrayValues(soFar :+ extractTrue())
case JsonToken.VALUE_FALSE => parseArrayValues(soFar :+ extractFalse())
case JsonToken.VALUE_NULL => parseArrayValues(soFar :+ extractNull())
case JsonToken.END_ARRAY => soFar
case x => unexpected(x)
}
}
def extractString() = JsonString(parser.getValueAsString)
def extractWholeNumber() = JsonWholeNumber(parser.getValueAsLong)
def extractFloatNumber() = JsonFloatNumber(parser.getValueAsDouble)
def extractTrue() = JsonBoolean(value = true)
def extractFalse() = JsonBoolean(value = false)
def extractNull() = JsonNull
def extractFieldName() = parser.getCurrentName
def unexpected(x: JsonToken): Nothing = {
val tokenEnum = JsonTokenEnum.fromToken(x)
val tokenName = tokenEnum.name
val message = s"json token $tokenName not expected here"
throw new RuntimeException(message)
}
}
object JsonAbstractSyntaxTree {
trait JsonValue {
def asString: String = throw new UnsupportedOperationException(s"Cannot convert ${this.getClass.getSimpleName} to a String")
def asLong: Long = throw new UnsupportedOperationException(s"Cannot convert ${this.getClass.getSimpleName} to a Long")
def asDouble: Double = throw new UnsupportedOperationException(s"Cannot convert ${this.getClass.getSimpleName} to a Double")
def asBoolean: Boolean = throw new UnsupportedOperationException(s"Cannot convert ${this.getClass.getSimpleName} to a Boolean")
def asSeq: Seq[JsonValue] = throw new UnsupportedOperationException(s"Cannot convert ${this.getClass.getSimpleName} to an Seq")
}
case class JsonString(value: String) extends JsonValue {
override def asString: String = value
}
case class JsonWholeNumber(value: Long) extends JsonValue {
override def asLong: Long = value
}
case class JsonFloatNumber(value: Double) extends JsonValue {
override def asDouble: Double = value
}
case class JsonObject(members: Seq[JsonPair]) extends JsonValue
case class JsonArray(elements: Seq[JsonValue]) extends JsonValue {
override def asSeq: Seq[JsonValue] = elements
}
case class JsonBoolean(value: Boolean) extends JsonValue {
override def asBoolean: Boolean = value
}
case object JsonNull extends JsonValue
case class JsonPair(name: String, value: JsonValue)
}
| SeanShubin/iteration-tracker | core/src/main/scala/com/seanshubin/iteration/tracker/core/json/JsonAbstractSyntaxTree.scala | Scala | unlicense | 4,329 |
package com.megafrock
import com.megafrock.thread.HttpServerThread
import com.typesafe.scalalogging.Logger
import org.slf4j.LoggerFactory
import scalikejdbc._
object Main {
val log = Logger(LoggerFactory.getLogger(this.getClass.getName))
def main(args: Array[String]) = {
initializeDb
val httpServerThread = new HttpServerThread
log.info("starting threads...")
httpServerThread.start()
httpServerThread.join()
}
def initializeDb = {
val rolePassword = sys.
env.
get("VALYX_MASTER_ROLE_PASSWORD").
getOrElse("test")
val postgresIPV4 = sys.
env.
get("POSTGRES_IPV4").
getOrElse("172.25.3.3")
ConnectionPool.singleton(
s"jdbc:postgresql://$postgresIPV4/valyx",
"valyx-master",
rolePassword)
val schema = getClass.getResourceAsStream("/valyx.sql")
if (schema == null) {
throw new RuntimeException("failed to load schema on classpath")
}
val schemaText = io.Source.fromInputStream(schema).mkString
using(DB(ConnectionPool.borrow())) { db =>
db autoCommit { session =>
session.execute(schemaText)
}
}
}
}
| maxdeliso/Valyx | src/main/scala/com/megafrock/Main.scala | Scala | mit | 1,159 |
package org.vitrivr.adampro.utils.ml
import breeze.linalg.DenseVector
import org.vitrivr.adampro.utils.Logging
import scala.util.Random
case class TrainingSample(f: DenseVector[Double], time: Double)
/**
* PegasosSVM
* <p>
* see S. Shalev-Shwartz, Y. Singer, N. Srebro, A. Cotter (2011). Pegasos: Primal Estimated sub-GrAdient SOlver for SVM.
*
*
* ADAMpro
*
* Ivan Giangreco
* November 2016
*/
@SerialVersionUID(100L)
class PegasosSVM(private val dims: Int, private val lambda: Double = 1, private val batchSize: Int = 5, private val epsilon: Double = 100) extends Logging with Serializable {
private val MAX_ITER = 5000
private var w = DenseVector.zeros[Double](dims)
private var t = 1
/**
*
* @param data
*/
def train(data: Seq[TrainingSample]) {
if (data.length > batchSize) {
var oldMSE = Double.MaxValue
var newMSE = Double.MaxValue
do {
val shuffledData = Random.shuffle(data)
val (trainData, testData) = shuffledData.splitAt((0.8 * data.length).toInt)
trainData.grouped(batchSize).foreach {
train(_)
}
oldMSE = newMSE
newMSE = mse(testData)
} while ((math.abs(newMSE - oldMSE) > 0.001 * oldMSE && t < 100) || t < MAX_ITER)
} else {
//do training with mini-batch
var batchW = DenseVector.zeros[Double](dims)
data.foreach { datum =>
val x = datum.f //vector
val y = datum.time //measurement
val loss = math.max(0, math.abs(y - (w.t * x)) - epsilon)
batchW += x * loss
}
w = w * (1.0 - 1.0 / t) + batchW * (1.0 / (data.length * t * lambda))
t += 1
}
}
private def mse(sample: Seq[TrainingSample]): Double = {
sample.map { datum =>
val x = datum.f //vector
val y = datum.time //measurement
val tested = test(x)
(y - tested) * (y - tested)
}.sum / sample.size.toDouble
}
/**
*
* @param f
* @return
*/
def test(f: DenseVector[Double]): Double = w.t * f
} | dbisUnibas/ADAMpro | src/main/scala/org/vitrivr/adampro/utils/ml/PegasosSVM.scala | Scala | mit | 2,039 |
package edu.depauw.scales.graphics
trait FontStyleType {
val value : Int
}
case object PLAIN extends FontStyleType {
val value = java.awt.Font.PLAIN
}
case object BOLD extends FontStyleType {
val value = java.awt.Font.BOLD
}
case object ITALIC extends FontStyleType {
val value = java.awt.Font.ITALIC
}
case object BOLD_ITALIC extends FontStyleType {
val value = java.awt.Font.BOLD + java.awt.Font.ITALIC
}
case class Font(name : String, style : FontStyleType, size : Double, g : Graphic) extends Graphic {
def render(gc : GraphicsContext) {
val oldFont = gc.g2d.getFont
gc.g2d.setFont(new java.awt.Font(name, style.value, 1).deriveFont(size.toFloat))
g.render(gc)
gc.g2d.setFont(oldFont)
}
}
case class FontName(name : String, g : Graphic) extends Graphic {
def render(gc : GraphicsContext) {
val oldFont = gc.g2d.getFont
gc.g2d.setFont(new java.awt.Font(name, oldFont.getStyle, 1).deriveFont(oldFont.getSize2D))
g.render(gc)
gc.g2d.setFont(oldFont)
}
}
case class FontStyle(style : FontStyleType, g : Graphic) extends Graphic {
def render(gc : GraphicsContext) {
val oldFont = gc.g2d.getFont
gc.g2d.setFont(oldFont.deriveFont(style.value))
g.render(gc)
gc.g2d.setFont(oldFont)
}
}
case class FontSize(size : Double, g : Graphic) extends Graphic {
def render(gc : GraphicsContext) {
val oldFont = gc.g2d.getFont
gc.g2d.setFont(oldFont.deriveFont(size.toFloat))
g.render(gc)
gc.g2d.setFont(oldFont)
}
} | bhoward/EscalatorOld | Scales2/src/edu/depauw/scales/graphics/Font.scala | Scala | apache-2.0 | 1,501 |
package turkey.sample
import turkey.tasks._
import scalajs.js.JSApp
/** Main class for the client; dispatches to the sample task. */
object Dispatcher extends TaskDispatcher with JSApp {
override val taskMapping = Map[String, () => Unit](
sampleTaskKey -> (() => Client.main())
)
}
| julianmichael/turkey | sample/js/src/main/scala/turkey/sample/Dispatcher.scala | Scala | mit | 294 |
package common.graphql
import sangria.ast
import sangria.schema.AstSchemaBuilder
import shapes.graphql.GraphQLSchemaExtension
case class Extension[Ctx](
override val document: ast.Document,
override val builder: AstSchemaBuilder[Ctx] = AstSchemaBuilder.default[Ctx]
) extends GraphQLSchemaExtension[Ctx]
| sysgears/apollo-universal-starter-kit | modules/core/server-scala/src/main/scala/common/graphql/Extension.scala | Scala | mit | 314 |
package org.scalaide.core.internal.hyperlink
import org.eclipse.jface.text.IRegion
import org.eclipse.jface.text.hyperlink.IHyperlink
import org.scalaide.logging.HasLogger
import org.scalaide.core.compiler.InteractiveCompilationUnit
import org.scalaide.core.compiler.IScalaPresentationCompiler.Implicits._
class ScalaDeclarationHyperlinkComputer extends HasLogger {
def findHyperlinks(icu: InteractiveCompilationUnit, wordRegion: IRegion): Option[List[IHyperlink]] = {
findHyperlinks(icu, wordRegion, wordRegion)
}
def findHyperlinks(icu: InteractiveCompilationUnit, wordRegion: IRegion, mappedRegion: IRegion): Option[List[IHyperlink]] = {
logger.info("detectHyperlinks: wordRegion = " + mappedRegion)
icu.withSourceFile({ (sourceFile, compiler) =>
if (mappedRegion == null || mappedRegion.getLength == 0)
None
else {
val start = mappedRegion.getOffset
val regionEnd = mappedRegion.getOffset + mappedRegion.getLength
// removing 1 handles correctly hyperlinking requests @ EOF
val end = if (sourceFile.length == regionEnd) regionEnd - 1 else regionEnd
val pos = compiler.rangePos(sourceFile, start, start, end)
import compiler.{ log => _, _ }
val typed = askTypeAt(pos).getOption()
val symsOpt: Option[List[(Symbol,String)]] = compiler.asyncExec {
val targetsOpt = typed map {
case Import(expr, sels) =>
if (expr.pos.includes(pos)) {
@annotation.tailrec
def locate(p: Position, inExpr: Tree): Symbol = inExpr match {
case Select(qualifier, name) =>
if (qualifier.pos.includes(p)) locate(p, qualifier)
else inExpr.symbol
case tree => tree.symbol
}
List(locate(pos, expr))
} else {
sels find (selPos => selPos.namePos >= pos.start && selPos.namePos <= pos.end) map { sel =>
val tpe = stabilizedType(expr)
List(tpe.member(sel.name), tpe.member(sel.name.toTypeName))
} getOrElse Nil
}
case Annotated(atp, _) => List(atp.symbol)
case Literal(const) if const.tag == compiler.ClazzTag => List(const.typeValue.typeSymbol)
case ap @ Select(qual, nme.apply) => List(ap.symbol, qual.symbol)
case st if st.symbol ne null => List(st.symbol)
case _ => List()
} map (_.filterNot{ sym => sym == NoSymbol || sym.hasPackageFlag || sym.isJavaDefined })
for {
targets <- targetsOpt.toList
target <- targets
} yield (target -> target.toString)
}.getOption()
symsOpt map { syms =>
syms flatMap {
case (sym, symName) => compiler.mkHyperlink(sym, s"Open Declaration (${symName})", wordRegion, icu.scalaProject.javaProject).toList
}
}
}
}).flatten
}
}
| romanowski/scala-ide | org.scala-ide.sdt.core/src/org/scalaide/core/internal/hyperlink/ScalaDeclarationHyperlinkComputer.scala | Scala | bsd-3-clause | 3,119 |
package edu.gemini.spModel.target.env
import edu.gemini.spModel.core.AlmostEqual.AlmostEqualOps
import edu.gemini.shared.util.immutable.{ImList, ImOption}
import edu.gemini.shared.util.immutable.ScalaConverters._
import edu.gemini.spModel.core.{Angle, Helpers}
import edu.gemini.spModel.guide.{GuideProbeMap, GuideProbe}
import edu.gemini.spModel.pio.xml.PioXmlFactory
import edu.gemini.spModel.target.SPTarget
import org.scalacheck.Prop._
import org.specs2.ScalaCheck
import org.specs2.mutable.Specification
import scala.collection.JavaConverters._
import scalaz._, Scalaz._
class GuideGroupSpec extends Specification with ScalaCheck with Arbitraries with Almosts with Helpers {
import GuideGroupSpec.AllProbes
"GuideGroup name" should {
"always be defined for manual groups, undefined for automatic" in
forAll { (g: GuideGroup) =>
g.grp match {
case a: AutomaticGroup => g.getName == ImOption.empty
case _ => g.getName != ImOption.empty
}
}
"be modifiable for manual groups but updates ignored for automatic groups" in
forAll { (g: GuideGroup, n: String) =>
val nopt = ImOption.apply(n)
g.grp match {
case a: AutomaticGroup => g.setName(nopt) === g
case _ => g.setName(nopt).getName == nopt && g.setName(n).getName == nopt
}
}
}
"GuideGroup contains" should {
"be false for any guide probe for the initial automatic group" in
forAll { (gp: GuideProbe) =>
!GuideGroup(AutomaticGroup.Initial).contains(gp)
}
"be false for any guide probe for the disabled automatic group" in
forAll { (gp: GuideProbe) =>
!GuideGroup(AutomaticGroup.Disabled).contains(gp)
}
"be true iff there are targets associated with the guide probe" in
forAll { (g: GuideGroup) =>
AllProbes.forall { gp =>
g.contains(gp) == g.get(gp).asScalaOpt.exists(_.getTargets.nonEmpty())
}
}
}
"GuideGroup get" should {
"return none or else a non empty list" in
forAll { (g: GuideGroup) =>
AllProbes.forall { gp =>
g.get(gp).asScalaOpt.forall(_.getTargets.nonEmpty)
}
}
"return none or else GuideProbeTargets with a matching probe" in
forAll { (g: GuideGroup) =>
AllProbes.forall { gp =>
g.get(gp).asScalaOpt.forall(_.getGuider === gp)
}
}
"return a non empty GuideProbeTargets iff the group contains the probe" in
forAll { (g: GuideGroup) =>
AllProbes.forall { gp =>
g.get(gp).asScalaOpt.isDefined === g.contains(gp)
}
}
"return a GuideProbeTargets with primary star that matches the options list focus" in
forAll { (g: GuideGroup) =>
AllProbes.forall { gp =>
g.get(gp).asScalaOpt.forall { gpt =>
gpt.getPrimary.asScalaOpt === (g.grp match {
case ManualGroup(_, m) => m.lookup(gp).flatMap(_.focus)
case AutomaticGroup.Active(m, _) => m.lookup(gp)
case AutomaticGroup.Initial => None
case AutomaticGroup.Disabled => None
})
}
}
}
}
"GuideGroup put" should {
"remove all guide stars if GuideProbeTargets is empty" in
forAll { (g: GuideGroup) =>
AllProbes.forall { gp =>
val emptyGpt = GuideProbeTargets.create(gp)
val g2 = g.put(emptyGpt)
!g2.contains(gp) && g2.get(gp).isEmpty
}
}
"remove the primary guide star if there is no primary in GuideProbeTargets" in
forAll { (g: GuideGroup) =>
AllProbes.forall { gp =>
val noPrimaryGpt = GuideProbeTargets.create(gp, new SPTarget()).clearPrimarySelection()
val g2 = g.put(noPrimaryGpt)
g2.get(gp).asScalaOpt.forall(_.getPrimary.isEmpty)
}
}
"remove the guider from automatic groups if there is no primary in GuideProbeTargets" in
forAll { (g: GuideGroup) =>
AllProbes.forall { gp =>
val noPrimaryGpt = GuideProbeTargets.create(gp, new SPTarget()).clearPrimarySelection()
val g2 = g.put(noPrimaryGpt)
val gpt2 = g2.get(gp).asScalaOpt
g2.grp match {
case _: AutomaticGroup => gpt2.isEmpty
case _: ManualGroup => gpt2.exists(_.getPrimary.isEmpty)
}
}
}
"set guide stars associated with a probe but ignore non-primary guide stars for automatic groups" in
forAll { (g: GuideGroup) =>
val primaryTarget = new SPTarget() <| (_.setName("primary"))
val notPrimaryTarget = new SPTarget() <| (_.setName("not primary"))
AllProbes.forall { gp =>
val gpt = GuideProbeTargets.create(gp, primaryTarget, notPrimaryTarget)
val g2 = g.put(gpt)
val lst2 = g2.get(gp).asScalaOpt.toList.flatMap(_.getTargets.asScalaList)
g2.grp match {
case _: AutomaticGroup => lst2 == List(primaryTarget)
case _: ManualGroup => lst2 == List(primaryTarget, notPrimaryTarget)
}
}
}
}
"GuideGroup remove" should {
"remove all targets associated with the given guider" in
forAll { (g: GuideGroup) =>
AllProbes.forall { gp =>
val g2 = g.remove(gp)
g2.get(gp).isEmpty && !g2.contains(gp)
}
}
}
"GuideGroup clear" should {
"remove all targets" in
forAll { (g: GuideGroup) =>
val g2 = g.clear()
g2.getAll.isEmpty && AllProbes.forall { gp =>
g2.get(gp).isEmpty && !g2.contains(gp)
}
}
}
"GuideGroup getAll" should {
"return its results in sorted order" in
forAll { (g: GuideGroup) =>
val probeOrder = AllProbes.zipWithIndex.toMap
val order = g.getAll.asScalaList.map(gpt => probeOrder(gpt.getGuider))
order == order.sorted
}
"return a result for each guider with associated guide stars" in
forAll { (g: GuideGroup) =>
val guiders = g.getAll.asScalaList.map(_.getGuider).toSet
g.grp match {
case AutomaticGroup.Active(m, _) => guiders === m.keySet.toSet
case AutomaticGroup.Initial => guiders.isEmpty
case AutomaticGroup.Disabled => guiders.isEmpty
case ManualGroup(_, m) => guiders === m.keySet.toSet
}
}
"return matching guide probe targets for all guide probes with associated targets" in
forAll { (g: GuideGroup) =>
val gpts1 = g.getAll.asScalaList
val gpts2 = AllProbes.flatMap { gp => g.get(gp).asScalaOpt }
gpts1 == gpts2
}
}
"GuideGroup putAll" should {
"behave the same as if a sequence of individual calls to put were made" in
forAll { (g: GuideGroup, ts: ImList[GuideProbeTargets]) =>
g.putAll(ts) === (g /: ts.asScalaList){(gg,gpt) => gg.put(gpt)}
}
"contain the original primary guide probes and all primary guide probes for which targets have been added" in
forAll { (g: GuideGroup, ts: ImList[GuideProbeTargets]) =>
val allGps = g.getPrimaryReferencedGuiders.asScala.toSet ++ ts.asScalaList.map(_.getGuider)
g.putAll(ts).getPrimaryReferencedGuiders.asScala.toSet == allGps
}
}
"GuideGroup setAll" should {
"produce the same guide group if the group is cleared and the elements are added via putAll" in
forAll { (g: GuideGroup, ts: ImList[GuideProbeTargets]) =>
g.setAll(ts) === g.clear().putAll(ts)
}
"only contain the guide probes in the collection" in
forAll { (g: GuideGroup, ts: ImList[GuideProbeTargets]) =>
val g2 = g.setAll(ts)
val gpOut = g.getReferencedGuiders.asScala.toSet -- g2.getReferencedGuiders.asScala
g2.getReferencedGuiders.asScala.toSet == ts.asScalaList.map(_.getGuider).toSet &&
gpOut.forall(gp => !g2.contains(gp))
}
}
"GuideGroup getPrimaryReferencedGuiders" should {
"contain a guider iff it has a primary target" in
forAll { (g: GuideGroup) =>
g.getAll.asScalaList.collect {
case gpt if gpt.getPrimary.isDefined => gpt.getGuider
}.toSet == g.getPrimaryReferencedGuiders.asScala.toSet
}
}
"GuideGroup getReferencedGuiders" should {
"contain a guider iff it has associated targets" in
forAll { (g: GuideGroup) =>
g.getAll.asScala.map(_.getGuider).toSet == g.getReferencedGuiders.asScala.toSet
}
}
"GuideGroup removeTarget" should {
"remove an existing target added to all guiders and preserve the order of the original targets" in
forAll { (g: GuideGroup, target: SPTarget) =>
val oldGpts = g.getAll
val newGpts = g.getAll.asScalaList.map(gt => gt.update(OptionsList.UpdateOps.append(target))).asImList
val remGpts = g.setAll(newGpts).removeTarget(target).getAll
remGpts == oldGpts
}
"remove an empty mapping from guide probe to targets" in
forAll { (g: GuideGroup) =>
g.getAll.asScalaList.forall { ts =>
val gNew = (g/:ts.getTargets.asScalaList){ (gg,t) => gg.removeTarget(t) }
!gNew.contains(ts.getGuider)
}
}
"maintain the primary target correctly when removing the primary target for a probe" in
forAll { (name: String, gp: GuideProbe, lefts: List[SPTarget], focus: SPTarget, rights: List[SPTarget]) =>
def unrollGroupOptsList[A](gg2: GuideGroup): List[SPTarget] = gg2.grp match {
case ManualGroup(_, tm) => tm.lookup(gp).fold(List.empty[SPTarget]) { ol =>
ol.focus.fold(List.empty[SPTarget])(t => t :: unrollGroupOptsList(gg2.removeTarget(t)))
}
case _ => Nil
}
val gg = new GuideGroup(ManualGroup(name, ==>>(gp -> OptsList.focused(lefts, focus, rights))))
val expectedOrder = focus :: (rights ++ lefts.reverse)
val actualOrder = unrollGroupOptsList(gg)
expectedOrder === actualOrder
}
}
"GuideGroup getAllMatching" should {
"partition the collections of guide probe targets by guide probe type" in
forAll { (g: GuideGroup) =>
val ptns = g.getReferencedGuiders.asScala.map(gp => g.getAllMatching(gp.getType).asScalaList.toSet).toSet
(ptns.toList.map(_.size).sum == g.getAll.size) && ptns.forall(p => ptns.forall(q => p == q || p.intersect(q).isEmpty))
}
}
"GuideGroup cloneTargets" should {
"create a new GuideGroup with cloned SPTargets but otherwise equivalent in structure" in
forAll { (g0: GuideGroup) =>
def targetList(g: GuideGroup): List[SPTarget] = g.getTargets.asScalaList
val g1 = g0.cloneTargets
(g0 ~= g1) && targetList(g0).zip(targetList(g1)).forall { case (t0, t1) => t0 =/= t1 }
}
}
"GuideGroup getAllContaining" should {
"return a subset of guide probe targets containing a specific target for manual groups" in
forAll { (g: GuideGroup, t: SPTarget) =>
val newGpt = g.getAll.asScalaList.zipWithIndex.map { case (gpt, idx) => if (idx % 2 == 0) gpt else gpt.update(OptionsList.UpdateOps.append(t)) }
val expected = g.setAll(newGpt.asImList).getAllContaining(t).asScalaList.map(_.getGuider).toSet
val actual = (g.grp match {
case _: ManualGroup => newGpt.zipWithIndex.collect { case (gpt, idx) if idx % 2 == 1 => gpt }
case _: AutomaticGroup => Nil
}).map(_.getGuider).toSet
expected === actual
}
"return nothing for automatic initial groups" in
forAll { (t: SPTarget) =>
val gg = GuideGroup(AutomaticGroup.Initial)
gg.getAllContaining(t).isEmpty
}
"return nothing for automatic disabled groups" in
forAll { (t: SPTarget) =>
val gg = GuideGroup(AutomaticGroup.Disabled)
gg.getAllContaining(t).isEmpty
}
"return a subset of guide probe targets containing a specific target for automatic, active groups" in
forAll { (t1Gps: Set[GuideProbe], t1: SPTarget, t2: SPTarget) =>
val t2Gps = AllProbes.toSet.diff(t1Gps)
val tm = ==>>.fromFoldable(t1Gps.map(_ -> t1) ++ t2Gps.map(_ -> t2))
val gg = GuideGroup(AutomaticGroup.Active(tm, Angle.zero))
gg.getAllContaining(t1).asScalaList.map(_.getGuider).toSet === t1Gps &&
gg.getAllContaining(t2).asScalaList.map(_.getGuider).toSet === t2Gps
}
}
"GuideGroup" should {
"be PIO Externalizable" in
forAll { (g: GuideGroup) =>
g ~= GuideGroup.fromParamSet(g.getParamSet(new PioXmlFactory()))
}
"be Serializable" in
forAll { (g: GuideGroup) =>
canSerializeP(g)(_ ~= _)
}
}
}
object GuideGroupSpec {
val AllProbes: List[GuideProbe] = GuideProbeMap.instance.values.asScala.toList.sorted
}
| spakzad/ocs | bundle/edu.gemini.pot/src/test/scala/edu/gemini/spModel/target/env/GuideGroupSpec.scala | Scala | bsd-3-clause | 12,846 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.benchmark
import java.io.File
import scala.collection.JavaConverters._
import scala.util.Random
import org.apache.spark.SparkConf
import org.apache.spark.benchmark.{Benchmark, BenchmarkBase}
import org.apache.spark.sql.{DataFrame, DataFrameWriter, Row, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.plans.SQLHelper
import org.apache.spark.sql.execution.datasources.parquet.{SpecificParquetRecordReaderBase, VectorizedParquetRecordReader}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.sql.vectorized.ColumnVector
/**
* Benchmark to measure data source read performance.
* To run this benchmark:
* {{{
* 1. without sbt: bin/spark-submit --class <this class>
* --jars <spark core test jar>,<spark catalyst test jar> <spark sql test jar>
* 2. build/sbt "sql/test:runMain <this class>"
* 3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "sql/test:runMain <this class>"
* Results will be written to "benchmarks/DataSourceReadBenchmark-results.txt".
* }}}
*/
object DataSourceReadBenchmark extends BenchmarkBase with SQLHelper {
val conf = new SparkConf()
.setAppName("DataSourceReadBenchmark")
// Since `spark.master` always exists, overrides this value
.set("spark.master", "local[1]")
.setIfMissing("spark.driver.memory", "3g")
.setIfMissing("spark.executor.memory", "3g")
.setIfMissing("spark.ui.enabled", "false")
val spark = SparkSession.builder.config(conf).getOrCreate()
// Set default configs. Individual cases will change them if necessary.
spark.conf.set(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key, "true")
spark.conf.set(SQLConf.ORC_COPY_BATCH_TO_SPARK.key, "false")
spark.conf.set(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key, "true")
spark.conf.set(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key, "true")
def withTempTable(tableNames: String*)(f: => Unit): Unit = {
try f finally tableNames.foreach(spark.catalog.dropTempView)
}
private def prepareTable(dir: File, df: DataFrame, partition: Option[String] = None): Unit = {
val testDf = if (partition.isDefined) {
df.write.partitionBy(partition.get)
} else {
df.write
}
saveAsCsvTable(testDf, dir.getCanonicalPath + "/csv")
saveAsJsonTable(testDf, dir.getCanonicalPath + "/json")
saveAsParquetTable(testDf, dir.getCanonicalPath + "/parquet")
saveAsOrcTable(testDf, dir.getCanonicalPath + "/orc")
}
private def saveAsCsvTable(df: DataFrameWriter[Row], dir: String): Unit = {
df.mode("overwrite").option("compression", "gzip").option("header", true).csv(dir)
spark.read.option("header", true).csv(dir).createOrReplaceTempView("csvTable")
}
private def saveAsJsonTable(df: DataFrameWriter[Row], dir: String): Unit = {
df.mode("overwrite").option("compression", "gzip").json(dir)
spark.read.json(dir).createOrReplaceTempView("jsonTable")
}
private def saveAsParquetTable(df: DataFrameWriter[Row], dir: String): Unit = {
df.mode("overwrite").option("compression", "snappy").parquet(dir)
spark.read.parquet(dir).createOrReplaceTempView("parquetTable")
}
private def saveAsOrcTable(df: DataFrameWriter[Row], dir: String): Unit = {
df.mode("overwrite").option("compression", "snappy").orc(dir)
spark.read.orc(dir).createOrReplaceTempView("orcTable")
}
def numericScanBenchmark(values: Int, dataType: DataType): Unit = {
// Benchmarks running through spark sql.
val sqlBenchmark = new Benchmark(
s"SQL Single ${dataType.sql} Column Scan",
values,
output = output)
// Benchmarks driving reader component directly.
val parquetReaderBenchmark = new Benchmark(
s"Parquet Reader Single ${dataType.sql} Column Scan",
values,
output = output)
withTempPath { dir =>
withTempTable("t1", "csvTable", "jsonTable", "parquetTable", "orcTable") {
import spark.implicits._
spark.range(values).map(_ => Random.nextLong).createOrReplaceTempView("t1")
prepareTable(dir, spark.sql(s"SELECT CAST(value as ${dataType.sql}) id FROM t1"))
sqlBenchmark.addCase("SQL CSV") { _ =>
spark.sql("select sum(id) from csvTable").collect()
}
sqlBenchmark.addCase("SQL Json") { _ =>
spark.sql("select sum(id) from jsonTable").collect()
}
sqlBenchmark.addCase("SQL Parquet Vectorized") { _ =>
spark.sql("select sum(id) from parquetTable").collect()
}
sqlBenchmark.addCase("SQL Parquet MR") { _ =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("select sum(id) from parquetTable").collect()
}
}
sqlBenchmark.addCase("SQL ORC Vectorized") { _ =>
spark.sql("SELECT sum(id) FROM orcTable").collect()
}
sqlBenchmark.addCase("SQL ORC Vectorized with copy") { _ =>
withSQLConf(SQLConf.ORC_COPY_BATCH_TO_SPARK.key -> "true") {
spark.sql("SELECT sum(id) FROM orcTable").collect()
}
}
sqlBenchmark.addCase("SQL ORC MR") { _ =>
withSQLConf(SQLConf.ORC_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("SELECT sum(id) FROM orcTable").collect()
}
}
sqlBenchmark.run()
// Driving the parquet reader in batch mode directly.
val files = SpecificParquetRecordReaderBase.listDirectory(new File(dir, "parquet")).toArray
val enableOffHeapColumnVector = spark.sessionState.conf.offHeapColumnVectorEnabled
val vectorizedReaderBatchSize = spark.sessionState.conf.parquetVectorizedReaderBatchSize
parquetReaderBenchmark.addCase("ParquetReader Vectorized") { _ =>
var longSum = 0L
var doubleSum = 0.0
val aggregateValue: (ColumnVector, Int) => Unit = dataType match {
case ByteType => (col: ColumnVector, i: Int) => longSum += col.getByte(i)
case ShortType => (col: ColumnVector, i: Int) => longSum += col.getShort(i)
case IntegerType => (col: ColumnVector, i: Int) => longSum += col.getInt(i)
case LongType => (col: ColumnVector, i: Int) => longSum += col.getLong(i)
case FloatType => (col: ColumnVector, i: Int) => doubleSum += col.getFloat(i)
case DoubleType => (col: ColumnVector, i: Int) => doubleSum += col.getDouble(i)
}
files.map(_.asInstanceOf[String]).foreach { p =>
val reader = new VectorizedParquetRecordReader(
null, enableOffHeapColumnVector, vectorizedReaderBatchSize)
try {
reader.initialize(p, ("id" :: Nil).asJava)
val batch = reader.resultBatch()
val col = batch.column(0)
while (reader.nextBatch()) {
val numRows = batch.numRows()
var i = 0
while (i < numRows) {
if (!col.isNullAt(i)) aggregateValue(col, i)
i += 1
}
}
} finally {
reader.close()
}
}
}
// Decoding in vectorized but having the reader return rows.
parquetReaderBenchmark.addCase("ParquetReader Vectorized -> Row") { num =>
var longSum = 0L
var doubleSum = 0.0
val aggregateValue: (InternalRow) => Unit = dataType match {
case ByteType => (col: InternalRow) => longSum += col.getByte(0)
case ShortType => (col: InternalRow) => longSum += col.getShort(0)
case IntegerType => (col: InternalRow) => longSum += col.getInt(0)
case LongType => (col: InternalRow) => longSum += col.getLong(0)
case FloatType => (col: InternalRow) => doubleSum += col.getFloat(0)
case DoubleType => (col: InternalRow) => doubleSum += col.getDouble(0)
}
files.map(_.asInstanceOf[String]).foreach { p =>
val reader = new VectorizedParquetRecordReader(
null, enableOffHeapColumnVector, vectorizedReaderBatchSize)
try {
reader.initialize(p, ("id" :: Nil).asJava)
val batch = reader.resultBatch()
while (reader.nextBatch()) {
val it = batch.rowIterator()
while (it.hasNext) {
val record = it.next()
if (!record.isNullAt(0)) aggregateValue(record)
}
}
} finally {
reader.close()
}
}
}
parquetReaderBenchmark.run()
}
}
}
def intStringScanBenchmark(values: Int): Unit = {
val benchmark = new Benchmark("Int and String Scan", values, output = output)
withTempPath { dir =>
withTempTable("t1", "csvTable", "jsonTable", "parquetTable", "orcTable") {
import spark.implicits._
spark.range(values).map(_ => Random.nextLong).createOrReplaceTempView("t1")
prepareTable(
dir,
spark.sql("SELECT CAST(value AS INT) AS c1, CAST(value as STRING) AS c2 FROM t1"))
benchmark.addCase("SQL CSV") { _ =>
spark.sql("select sum(c1), sum(length(c2)) from csvTable").collect()
}
benchmark.addCase("SQL Json") { _ =>
spark.sql("select sum(c1), sum(length(c2)) from jsonTable").collect()
}
benchmark.addCase("SQL Parquet Vectorized") { _ =>
spark.sql("select sum(c1), sum(length(c2)) from parquetTable").collect()
}
benchmark.addCase("SQL Parquet MR") { _ =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("select sum(c1), sum(length(c2)) from parquetTable").collect()
}
}
benchmark.addCase("SQL ORC Vectorized") { _ =>
spark.sql("SELECT sum(c1), sum(length(c2)) FROM orcTable").collect()
}
benchmark.addCase("SQL ORC Vectorized with copy") { _ =>
withSQLConf(SQLConf.ORC_COPY_BATCH_TO_SPARK.key -> "true") {
spark.sql("SELECT sum(c1), sum(length(c2)) FROM orcTable").collect()
}
}
benchmark.addCase("SQL ORC MR") { _ =>
withSQLConf(SQLConf.ORC_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("SELECT sum(c1), sum(length(c2)) FROM orcTable").collect()
}
}
benchmark.run()
}
}
}
def repeatedStringScanBenchmark(values: Int): Unit = {
val benchmark = new Benchmark("Repeated String", values, output = output)
withTempPath { dir =>
withTempTable("t1", "csvTable", "jsonTable", "parquetTable", "orcTable") {
import spark.implicits._
spark.range(values).map(_ => Random.nextLong).createOrReplaceTempView("t1")
prepareTable(
dir,
spark.sql("select cast((value % 200) + 10000 as STRING) as c1 from t1"))
benchmark.addCase("SQL CSV") { _ =>
spark.sql("select sum(length(c1)) from csvTable").collect()
}
benchmark.addCase("SQL Json") { _ =>
spark.sql("select sum(length(c1)) from jsonTable").collect()
}
benchmark.addCase("SQL Parquet Vectorized") { _ =>
spark.sql("select sum(length(c1)) from parquetTable").collect()
}
benchmark.addCase("SQL Parquet MR") { _ =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("select sum(length(c1)) from parquetTable").collect()
}
}
benchmark.addCase("SQL ORC Vectorized") { _ =>
spark.sql("select sum(length(c1)) from orcTable").collect()
}
benchmark.addCase("SQL ORC Vectorized with copy") { _ =>
withSQLConf(SQLConf.ORC_COPY_BATCH_TO_SPARK.key -> "true") {
spark.sql("select sum(length(c1)) from orcTable").collect()
}
}
benchmark.addCase("SQL ORC MR") { _ =>
withSQLConf(SQLConf.ORC_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("select sum(length(c1)) from orcTable").collect()
}
}
benchmark.run()
}
}
}
def partitionTableScanBenchmark(values: Int): Unit = {
val benchmark = new Benchmark("Partitioned Table", values, output = output)
withTempPath { dir =>
withTempTable("t1", "csvTable", "jsonTable", "parquetTable", "orcTable") {
import spark.implicits._
spark.range(values).map(_ => Random.nextLong).createOrReplaceTempView("t1")
prepareTable(dir, spark.sql("SELECT value % 2 AS p, value AS id FROM t1"), Some("p"))
benchmark.addCase("Data column - CSV") { _ =>
spark.sql("select sum(id) from csvTable").collect()
}
benchmark.addCase("Data column - Json") { _ =>
spark.sql("select sum(id) from jsonTable").collect()
}
benchmark.addCase("Data column - Parquet Vectorized") { _ =>
spark.sql("select sum(id) from parquetTable").collect()
}
benchmark.addCase("Data column - Parquet MR") { _ =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("select sum(id) from parquetTable").collect()
}
}
benchmark.addCase("Data column - ORC Vectorized") { _ =>
spark.sql("SELECT sum(id) FROM orcTable").collect()
}
benchmark.addCase("Data column - ORC Vectorized with copy") { _ =>
withSQLConf(SQLConf.ORC_COPY_BATCH_TO_SPARK.key -> "true") {
spark.sql("SELECT sum(id) FROM orcTable").collect()
}
}
benchmark.addCase("Data column - ORC MR") { _ =>
withSQLConf(SQLConf.ORC_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("SELECT sum(id) FROM orcTable").collect()
}
}
benchmark.addCase("Partition column - CSV") { _ =>
spark.sql("select sum(p) from csvTable").collect()
}
benchmark.addCase("Partition column - Json") { _ =>
spark.sql("select sum(p) from jsonTable").collect()
}
benchmark.addCase("Partition column - Parquet Vectorized") { _ =>
spark.sql("select sum(p) from parquetTable").collect()
}
benchmark.addCase("Partition column - Parquet MR") { _ =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("select sum(p) from parquetTable").collect()
}
}
benchmark.addCase("Partition column - ORC Vectorized") { _ =>
spark.sql("SELECT sum(p) FROM orcTable").collect()
}
benchmark.addCase("Partition column - ORC Vectorized with copy") { _ =>
withSQLConf(SQLConf.ORC_COPY_BATCH_TO_SPARK.key -> "true") {
spark.sql("SELECT sum(p) FROM orcTable").collect()
}
}
benchmark.addCase("Partition column - ORC MR") { _ =>
withSQLConf(SQLConf.ORC_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("SELECT sum(p) FROM orcTable").collect()
}
}
benchmark.addCase("Both columns - CSV") { _ =>
spark.sql("select sum(p), sum(id) from csvTable").collect()
}
benchmark.addCase("Both columns - Json") { _ =>
spark.sql("select sum(p), sum(id) from jsonTable").collect()
}
benchmark.addCase("Both columns - Parquet Vectorized") { _ =>
spark.sql("select sum(p), sum(id) from parquetTable").collect()
}
benchmark.addCase("Both columns - Parquet MR") { _ =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("select sum(p), sum(id) from parquetTable").collect
}
}
benchmark.addCase("Both columns - ORC Vectorized") { _ =>
spark.sql("SELECT sum(p), sum(id) FROM orcTable").collect()
}
benchmark.addCase("Both column - ORC Vectorized with copy") { _ =>
withSQLConf(SQLConf.ORC_COPY_BATCH_TO_SPARK.key -> "true") {
spark.sql("SELECT sum(p), sum(id) FROM orcTable").collect()
}
}
benchmark.addCase("Both columns - ORC MR") { _ =>
withSQLConf(SQLConf.ORC_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("SELECT sum(p), sum(id) FROM orcTable").collect()
}
}
benchmark.run()
}
}
}
def stringWithNullsScanBenchmark(values: Int, fractionOfNulls: Double): Unit = {
val benchmark = new Benchmark("String with Nulls Scan", values, output = output)
withTempPath { dir =>
withTempTable("t1", "csvTable", "jsonTable", "parquetTable", "orcTable") {
spark.range(values).createOrReplaceTempView("t1")
prepareTable(
dir,
spark.sql(
s"SELECT IF(RAND(1) < $fractionOfNulls, NULL, CAST(id as STRING)) AS c1, " +
s"IF(RAND(2) < $fractionOfNulls, NULL, CAST(id as STRING)) AS c2 FROM t1"))
benchmark.addCase("SQL CSV") { _ =>
spark.sql("select sum(length(c2)) from csvTable where c1 is " +
"not NULL and c2 is not NULL").collect()
}
benchmark.addCase("SQL Json") { _ =>
spark.sql("select sum(length(c2)) from jsonTable where c1 is " +
"not NULL and c2 is not NULL").collect()
}
benchmark.addCase("SQL Parquet Vectorized") { _ =>
spark.sql("select sum(length(c2)) from parquetTable where c1 is " +
"not NULL and c2 is not NULL").collect()
}
benchmark.addCase("SQL Parquet MR") { _ =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("select sum(length(c2)) from parquetTable where c1 is " +
"not NULL and c2 is not NULL").collect()
}
}
val files = SpecificParquetRecordReaderBase.listDirectory(new File(dir, "parquet")).toArray
val enableOffHeapColumnVector = spark.sessionState.conf.offHeapColumnVectorEnabled
val vectorizedReaderBatchSize = spark.sessionState.conf.parquetVectorizedReaderBatchSize
benchmark.addCase("ParquetReader Vectorized") { num =>
var sum = 0
files.map(_.asInstanceOf[String]).foreach { p =>
val reader = new VectorizedParquetRecordReader(
null, enableOffHeapColumnVector, vectorizedReaderBatchSize)
try {
reader.initialize(p, ("c1" :: "c2" :: Nil).asJava)
val batch = reader.resultBatch()
while (reader.nextBatch()) {
val rowIterator = batch.rowIterator()
while (rowIterator.hasNext) {
val row = rowIterator.next()
val value = row.getUTF8String(0)
if (!row.isNullAt(0) && !row.isNullAt(1)) sum += value.numBytes()
}
}
} finally {
reader.close()
}
}
}
benchmark.addCase("SQL ORC Vectorized") { _ =>
spark.sql("SELECT SUM(LENGTH(c2)) FROM orcTable " +
"WHERE c1 IS NOT NULL AND c2 IS NOT NULL").collect()
}
benchmark.addCase("SQL ORC Vectorized with copy") { _ =>
withSQLConf(SQLConf.ORC_COPY_BATCH_TO_SPARK.key -> "true") {
spark.sql("SELECT SUM(LENGTH(c2)) FROM orcTable " +
"WHERE c1 IS NOT NULL AND c2 IS NOT NULL").collect()
}
}
benchmark.addCase("SQL ORC MR") { _ =>
withSQLConf(SQLConf.ORC_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql("SELECT SUM(LENGTH(c2)) FROM orcTable " +
"WHERE c1 IS NOT NULL AND c2 IS NOT NULL").collect()
}
}
benchmark.run()
}
}
}
def columnsBenchmark(values: Int, width: Int): Unit = {
val benchmark = new Benchmark(
s"Single Column Scan from $width columns",
values,
output = output)
withTempPath { dir =>
withTempTable("t1", "csvTable", "jsonTable", "parquetTable", "orcTable") {
import spark.implicits._
val middle = width / 2
val selectExpr = (1 to width).map(i => s"value as c$i")
spark.range(values).map(_ => Random.nextLong).toDF()
.selectExpr(selectExpr: _*).createOrReplaceTempView("t1")
prepareTable(dir, spark.sql("SELECT * FROM t1"))
benchmark.addCase("SQL CSV") { _ =>
spark.sql(s"SELECT sum(c$middle) FROM csvTable").collect()
}
benchmark.addCase("SQL Json") { _ =>
spark.sql(s"SELECT sum(c$middle) FROM jsonTable").collect()
}
benchmark.addCase("SQL Parquet Vectorized") { _ =>
spark.sql(s"SELECT sum(c$middle) FROM parquetTable").collect()
}
benchmark.addCase("SQL Parquet MR") { _ =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql(s"SELECT sum(c$middle) FROM parquetTable").collect()
}
}
benchmark.addCase("SQL ORC Vectorized") { _ =>
spark.sql(s"SELECT sum(c$middle) FROM orcTable").collect()
}
benchmark.addCase("SQL ORC Vectorized with copy") { _ =>
withSQLConf(SQLConf.ORC_COPY_BATCH_TO_SPARK.key -> "true") {
spark.sql(s"SELECT sum(c$middle) FROM orcTable").collect()
}
}
benchmark.addCase("SQL ORC MR") { _ =>
withSQLConf(SQLConf.ORC_VECTORIZED_READER_ENABLED.key -> "false") {
spark.sql(s"SELECT sum(c$middle) FROM orcTable").collect()
}
}
benchmark.run()
}
}
}
override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
runBenchmark("SQL Single Numeric Column Scan") {
Seq(ByteType, ShortType, IntegerType, LongType, FloatType, DoubleType).foreach {
dataType => numericScanBenchmark(1024 * 1024 * 15, dataType)
}
}
runBenchmark("Int and String Scan") {
intStringScanBenchmark(1024 * 1024 * 10)
}
runBenchmark("Repeated String Scan") {
repeatedStringScanBenchmark(1024 * 1024 * 10)
}
runBenchmark("Partitioned Table Scan") {
partitionTableScanBenchmark(1024 * 1024 * 15)
}
runBenchmark("String with Nulls Scan") {
for (fractionOfNulls <- List(0.0, 0.50, 0.95)) {
stringWithNullsScanBenchmark(1024 * 1024 * 10, fractionOfNulls)
}
}
runBenchmark("Single Column Scan From Wide Columns") {
for (columnWidth <- List(10, 50, 100)) {
columnsBenchmark(1024 * 1024 * 1, columnWidth)
}
}
}
}
| ahnqirage/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DataSourceReadBenchmark.scala | Scala | apache-2.0 | 23,537 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.security
import java.io.{Closeable, InputStream, IOException, OutputStream}
import java.nio.ByteBuffer
import java.nio.channels.{ReadableByteChannel, WritableByteChannel}
import java.util.Properties
import java.util.concurrent.TimeUnit
import javax.crypto.KeyGenerator
import javax.crypto.spec.{IvParameterSpec, SecretKeySpec}
import scala.collection.JavaConverters._
import com.google.common.io.ByteStreams
import org.apache.commons.crypto.random._
import org.apache.commons.crypto.stream._
import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.network.util.{CryptoUtils, JavaUtils}
/**
* A util class for manipulating IO encryption and decryption streams.
*/
private[spark] object CryptoStreamUtils extends Logging {
// The initialization vector length in bytes.
val IV_LENGTH_IN_BYTES = 16
// The prefix of IO encryption related configurations in Spark configuration.
val SPARK_IO_ENCRYPTION_COMMONS_CONFIG_PREFIX = "spark.io.encryption.commons.config."
/**
* Helper method to wrap `OutputStream` with `CryptoOutputStream` for encryption.
*/
def createCryptoOutputStream(
os: OutputStream,
sparkConf: SparkConf,
key: Array[Byte]): OutputStream = {
val params = new CryptoParams(key, sparkConf)
val iv = createInitializationVector(params.conf)
os.write(iv)
new ErrorHandlingOutputStream(
new CryptoOutputStream(params.transformation, params.conf, os, params.keySpec,
new IvParameterSpec(iv)),
os)
}
/**
* Wrap a `WritableByteChannel` for encryption.
*/
def createWritableChannel(
channel: WritableByteChannel,
sparkConf: SparkConf,
key: Array[Byte]): WritableByteChannel = {
val params = new CryptoParams(key, sparkConf)
val iv = createInitializationVector(params.conf)
val helper = new CryptoHelperChannel(channel)
helper.write(ByteBuffer.wrap(iv))
new ErrorHandlingWritableChannel(
new CryptoOutputStream(params.transformation, params.conf, helper, params.keySpec,
new IvParameterSpec(iv)),
helper)
}
/**
* Helper method to wrap `InputStream` with `CryptoInputStream` for decryption.
*/
def createCryptoInputStream(
is: InputStream,
sparkConf: SparkConf,
key: Array[Byte]): InputStream = {
val iv = new Array[Byte](IV_LENGTH_IN_BYTES)
ByteStreams.readFully(is, iv)
val params = new CryptoParams(key, sparkConf)
new ErrorHandlingInputStream(
new CryptoInputStream(params.transformation, params.conf, is, params.keySpec,
new IvParameterSpec(iv)),
is)
}
/**
* Wrap a `ReadableByteChannel` for decryption.
*/
def createReadableChannel(
channel: ReadableByteChannel,
sparkConf: SparkConf,
key: Array[Byte]): ReadableByteChannel = {
val iv = new Array[Byte](IV_LENGTH_IN_BYTES)
val buf = ByteBuffer.wrap(iv)
JavaUtils.readFully(channel, buf)
val params = new CryptoParams(key, sparkConf)
new ErrorHandlingReadableChannel(
new CryptoInputStream(params.transformation, params.conf, channel, params.keySpec,
new IvParameterSpec(iv)),
channel)
}
def toCryptoConf(conf: SparkConf): Properties = {
CryptoUtils.toCryptoConf(SPARK_IO_ENCRYPTION_COMMONS_CONFIG_PREFIX,
conf.getAll.toMap.asJava.entrySet())
}
/**
* Creates a new encryption key.
*/
def createKey(conf: SparkConf): Array[Byte] = {
val keyLen = conf.get(IO_ENCRYPTION_KEY_SIZE_BITS)
val ioKeyGenAlgorithm = conf.get(IO_ENCRYPTION_KEYGEN_ALGORITHM)
val keyGen = KeyGenerator.getInstance(ioKeyGenAlgorithm)
keyGen.init(keyLen)
keyGen.generateKey().getEncoded()
}
/**
* This method to generate an IV (Initialization Vector) using secure random.
*/
private[this] def createInitializationVector(properties: Properties): Array[Byte] = {
val iv = new Array[Byte](IV_LENGTH_IN_BYTES)
val initialIVStart = System.nanoTime()
CryptoRandomFactory.getCryptoRandom(properties).nextBytes(iv)
val initialIVFinish = System.nanoTime()
val initialIVTime = TimeUnit.NANOSECONDS.toMillis(initialIVFinish - initialIVStart)
if (initialIVTime > 2000) {
logWarning(s"It costs ${initialIVTime} milliseconds to create the Initialization Vector " +
s"used by CryptoStream")
}
iv
}
/**
* This class is a workaround for CRYPTO-125, that forces all bytes to be written to the
* underlying channel. Since the callers of this API are using blocking I/O, there are no
* concerns with regards to CPU usage here.
*/
private class CryptoHelperChannel(sink: WritableByteChannel) extends WritableByteChannel {
override def write(src: ByteBuffer): Int = {
val count = src.remaining()
while (src.hasRemaining()) {
sink.write(src)
}
count
}
override def isOpen(): Boolean = sink.isOpen()
override def close(): Unit = sink.close()
}
/**
* SPARK-25535. The commons-crypto library will throw InternalError if something goes
* wrong, and leave bad state behind in the Java wrappers, so it's not safe to use them
* afterwards. This wrapper detects that situation and avoids further calls into the
* commons-crypto code, while still allowing the underlying streams to be closed.
*
* This should be removed once CRYPTO-141 is fixed (and Spark upgrades its commons-crypto
* dependency).
*/
trait BaseErrorHandler extends Closeable {
private var closed = false
/** The encrypted stream that may get into an unhealthy state. */
protected def cipherStream: Closeable
/**
* The underlying stream that is being wrapped by the encrypted stream, so that it can be
* closed even if there's an error in the crypto layer.
*/
protected def original: Closeable
protected def safeCall[T](fn: => T): T = {
if (closed) {
throw new IOException("Cipher stream is closed.")
}
try {
fn
} catch {
case ie: InternalError =>
closed = true
original.close()
throw ie
}
}
override def close(): Unit = {
if (!closed) {
cipherStream.close()
}
}
}
// Visible for testing.
class ErrorHandlingReadableChannel(
protected val cipherStream: ReadableByteChannel,
protected val original: ReadableByteChannel)
extends ReadableByteChannel with BaseErrorHandler {
override def read(src: ByteBuffer): Int = safeCall {
cipherStream.read(src)
}
override def isOpen(): Boolean = cipherStream.isOpen()
}
private class ErrorHandlingInputStream(
protected val cipherStream: InputStream,
protected val original: InputStream)
extends InputStream with BaseErrorHandler {
override def read(b: Array[Byte]): Int = safeCall {
cipherStream.read(b)
}
override def read(b: Array[Byte], off: Int, len: Int): Int = safeCall {
cipherStream.read(b, off, len)
}
override def read(): Int = safeCall {
cipherStream.read()
}
}
private class ErrorHandlingWritableChannel(
protected val cipherStream: WritableByteChannel,
protected val original: WritableByteChannel)
extends WritableByteChannel with BaseErrorHandler {
override def write(src: ByteBuffer): Int = safeCall {
cipherStream.write(src)
}
override def isOpen(): Boolean = cipherStream.isOpen()
}
private class ErrorHandlingOutputStream(
protected val cipherStream: OutputStream,
protected val original: OutputStream)
extends OutputStream with BaseErrorHandler {
override def flush(): Unit = safeCall {
cipherStream.flush()
}
override def write(b: Array[Byte]): Unit = safeCall {
cipherStream.write(b)
}
override def write(b: Array[Byte], off: Int, len: Int): Unit = safeCall {
cipherStream.write(b, off, len)
}
override def write(b: Int): Unit = safeCall {
cipherStream.write(b)
}
}
private class CryptoParams(key: Array[Byte], sparkConf: SparkConf) {
val keySpec = new SecretKeySpec(key, "AES")
val transformation = sparkConf.get(IO_CRYPTO_CIPHER_TRANSFORMATION)
val conf = toCryptoConf(sparkConf)
}
}
| shaneknapp/spark | core/src/main/scala/org/apache/spark/security/CryptoStreamUtils.scala | Scala | apache-2.0 | 9,131 |
/*
* Copyright 2018 Analytics Zoo Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.zoo.pipeline.api.keras.layers
import com.intel.analytics.bigdl.nn.abstractnn.AbstractModule
import com.intel.analytics.bigdl.nn.keras.{Flatten => BigDLFlatten}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.Shape
import com.intel.analytics.zoo.pipeline.api.Net
import scala.reflect.ClassTag
/**
* Flattens the input without affecting the batch size.
* For example, if inputShape = Shape(2, 3, 4),
* then outputShape will be Shape(24) with batch dimension unchanged.
*
* When you use this layer as the first layer of a model, you need to provide the argument
* inputShape (a Single Shape, does not include the batch dimension).
*
* @param inputShape A Single Shape, does not include the batch dimension.
* @tparam T Numeric type of parameter(e.g. weight, bias). Only support float/double now.
*/
class Flatten[T: ClassTag](
override val inputShape: Shape = null)(implicit ev: TensorNumeric[T])
extends BigDLFlatten[T](inputShape) with Net {
override def doBuild(inputShape: Shape): AbstractModule[Tensor[T], Tensor[T], T] = {
val input = inputShape.toSingle().toArray
val layer =
com.intel.analytics.bigdl.nn.Reshape(
Array(input.slice(1, input.length).product), batchMode = Some(true))
layer.asInstanceOf[AbstractModule[Tensor[T], Tensor[T], T]]
}
override private[zoo] def toKeras2(): String = {
val params = Net.inputShapeToString(inputShape) ++
Net.param(getName())
Net.kerasDef(this, params)
}
}
object Flatten {
def apply[@specialized(Float, Double) T: ClassTag](
inputShape: Shape = null)(implicit ev: TensorNumeric[T]): Flatten[T] = {
new Flatten[T](inputShape)
}
}
| intel-analytics/analytics-zoo | zoo/src/main/scala/com/intel/analytics/zoo/pipeline/api/keras/layers/Flatten.scala | Scala | apache-2.0 | 2,404 |
package com.dataintuitive.luciuscore
package model.v4
package lenses
import scalaz.Lens
object CombinedScoredPerturbationLenses extends Serializable {
val scoreLens =
ScoredPerturbationLenses.scoresLens
val idLens =
ScoredPerturbationLenses.perturbationLens >=>
PerturbationLenses.idLens
val filtersLens = ScoredPerturbationLenses.perturbationLens >=>
PerturbationLenses.filtersLens
val filtersMapLens = ScoredPerturbationLenses.perturbationLens >=>
PerturbationLenses.filtersMapLens
// Information Lenses
val batchLens =
ScoredPerturbationLenses.perturbationLens >=>
CombinedPerturbationLenses.batchLens
val plateLens =
ScoredPerturbationLenses.perturbationLens >=>
CombinedPerturbationLenses.plateLens
val wellLens =
ScoredPerturbationLenses.perturbationLens >=>
CombinedPerturbationLenses.wellLens
val cellLens =
ScoredPerturbationLenses.perturbationLens >=>
CombinedPerturbationLenses.cellLens
val yearLens =
ScoredPerturbationLenses.perturbationLens >=>
CombinedPerturbationLenses.yearLens
// Profiles lenses
val tLens =
ScoredPerturbationLenses.perturbationLens >=>
CombinedPerturbationLenses.tLens
val pLens =
ScoredPerturbationLenses.perturbationLens >=>
CombinedPerturbationLenses.pLens
val rLens =
ScoredPerturbationLenses.perturbationLens >=>
CombinedPerturbationLenses.rLens
val lengthLens =
ScoredPerturbationLenses.perturbationLens >=>
CombinedPerturbationLenses.lengthLens
val pTypeLens =
ScoredPerturbationLenses.perturbationLens >=>
CombinedPerturbationLenses.pTypeLens
val profileLens =
ScoredPerturbationLenses.perturbationLens >=>
CombinedPerturbationLenses.profileLens
// Treatment lenses
val trtTypeLens =
ScoredPerturbationLenses.perturbationLens >=>
CombinedPerturbationLenses.trtTypeLens
val trtIdLens =
ScoredPerturbationLenses.perturbationLens >=>
CombinedPerturbationLenses.trtIdLens
val trtNameLens =
ScoredPerturbationLenses.perturbationLens >=>
CombinedPerturbationLenses.trtNameLens
val smilesLens =
ScoredPerturbationLenses.perturbationLens >=>
CombinedPerturbationLenses.smilesLens
val inchikeyLens =
ScoredPerturbationLenses.perturbationLens >=>
CombinedPerturbationLenses.inchikeyLens
val pubchemIdLens =
ScoredPerturbationLenses.perturbationLens >=>
CombinedPerturbationLenses.pubchemIdLens
val doseLens =
ScoredPerturbationLenses.perturbationLens >=>
CombinedPerturbationLenses.doseLens
val doseUnitLens =
ScoredPerturbationLenses.perturbationLens >=>
CombinedPerturbationLenses.doseUnitLens
val timeLens =
ScoredPerturbationLenses.perturbationLens >=>
CombinedPerturbationLenses.timeLens
val timeUnitLens =
ScoredPerturbationLenses.perturbationLens >=>
CombinedPerturbationLenses.timeUnitLens
val targetsLens =
ScoredPerturbationLenses.perturbationLens >=>
CombinedPerturbationLenses.targetsLens
// Safe lenses
val safeBatchLens = batchLens >=> OptionLenses.stringLens
val safePlateLens = plateLens >=> OptionLenses.stringLens
val safeWellLens = wellLens >=> OptionLenses.stringLens
val safeCellLens = cellLens >=> OptionLenses.stringLens
val safeYearLens = yearLens >=> OptionLenses.stringLens
val safeSmilesLens = smilesLens >=> OptionLenses.stringLens
val safeInchikeyLens = inchikeyLens >=> OptionLenses.stringLens
val safePubchemIdLens = pubchemIdLens >=> OptionLenses.stringLens
val safeDoseLens = doseLens >=> OptionLenses.stringLens
val safeDoseUnitLens = doseUnitLens >=> OptionLenses.stringLens
val safeTimeLens = timeLens >=> OptionLenses.stringLens
val safeTimeUnitLens = timeUnitLens >=> OptionLenses.stringLens
val safeTargetsLens = targetsLens >=> OptionLenses.listLens
}
| data-intuitive/LuciusCore | src/main/scala/com/dataintuitive/luciuscore/model/v4/lenses/CombinedScoredPerturbationLenses.scala | Scala | apache-2.0 | 3,884 |
/*
* Copyright 2016-2020 47 Degrees Open Source <https://www.47deg.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package microsites
import microsites.layouts.PageLayout
import microsites.util.Arbitraries
import org.scalacheck.Prop._
import org.scalatestplus.scalacheck.Checkers
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
class PageLayoutTest extends AnyFunSuite with Checkers with Matchers with Arbitraries {
test("render should return a html document") {
val property = forAll { settings: MicrositeSettings ⇒
val layout = new PageLayout(settings)
layout.render.tag shouldBe "html"
!layout.render.void
}
check(property)
}
test("homeHeader should return a `header` TypeTag") {
val property = forAll { settings: MicrositeSettings ⇒
val layout = new PageLayout(settings)
layout.pageHeader.tag shouldBe "header"
!layout.pageHeader.void
}
check(property)
}
test("pageMain should return a `main` TypeTag") {
val property = forAll { implicit settings: MicrositeSettings ⇒
val layout = new PageLayout(settings)
layout.pageMain.tag shouldBe "main"
!layout.pageMain.void
}
check(property)
}
}
| 47deg/sbt-microsites | src/test/scala/microsites/PageLayoutTest.scala | Scala | apache-2.0 | 1,773 |
/*
Copyright 2014 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
import com.twitter.algebird.monad.Reader
import com.twitter.scalding.cascading_interop.FlowListenerPromise
import com.twitter.scalding.Dsl.flowDefToRichFlowDef
import scala.concurrent.{ Await, Future, Promise, ExecutionContext => ConcurrentExecutionContext }
import scala.util.{ Failure, Success, Try }
import cascading.flow.{ FlowDef, Flow }
/**
* This is a Monad, that represents a computation and a result
*/
sealed trait Execution[+T] {
import Execution.{ Mapped, FactoryExecution, FlatMapped, Zipped }
/*
* First run this Execution, then move to the result
* of the function
*/
def flatMap[U](fn: T => Execution[U]): Execution[U] =
FlatMapped(this, fn)
def flatten[U](implicit ev: T <:< Execution[U]): Execution[U] =
flatMap(ev)
def map[U](fn: T => U): Execution[U] =
Mapped(this, fn)
def run(conf: Config, mode: Mode)(implicit cec: ConcurrentExecutionContext): Future[T]
/**
* This is convenience for when we don't care about the result.
* same a .map(_ => ())
*/
def unit: Execution[Unit] = map(_ => ())
// This waits synchronously on run, using the global execution context
def waitFor(conf: Config, mode: Mode): Try[T] =
Try(Await.result(run(conf, mode)(ConcurrentExecutionContext.global),
scala.concurrent.duration.Duration.Inf))
/*
* run this and that in parallel, without any dependency
*/
def zip[U](that: Execution[U]): Execution[(T, U)] = that match {
// push zips as low as possible
case fact @ FactoryExecution(_) => fact.zip(this).map(_.swap)
case _ => Zipped(this, that)
}
}
object Execution {
private case class Const[T](get: () => T) extends Execution[T] {
def run(conf: Config, mode: Mode)(implicit cec: ConcurrentExecutionContext) =
Future(get())
override def unit = Const(() => ())
}
private case class FlatMapped[S, T](prev: Execution[S], fn: S => Execution[T]) extends Execution[T] {
def run(conf: Config, mode: Mode)(implicit cec: ConcurrentExecutionContext) = for {
s <- prev.run(conf, mode)
next = fn(s)
t <- next.run(conf, mode)
} yield t
}
private case class Mapped[S, T](prev: Execution[S], fn: S => T) extends Execution[T] {
def run(conf: Config, mode: Mode)(implicit cec: ConcurrentExecutionContext) =
prev.run(conf, mode).map(fn)
// Don't bother applying the function if we are mapped
override def unit = prev.unit
}
private case class Zipped[S, T](one: Execution[S], two: Execution[T]) extends Execution[(S, T)] {
def run(conf: Config, mode: Mode)(implicit cec: ConcurrentExecutionContext) =
one.run(conf, mode).zip(two.run(conf, mode))
// Make sure we remove any mapping functions on both sides
override def unit = one.unit.zip(two.unit).map(_ => ())
}
/*
* This is the main class the represents a flow without any combinators
*/
private case class FlowDefExecution[T](result: (Config, Mode) => (FlowDef, (JobStats => Future[T]))) extends Execution[T] {
def run(conf: Config, mode: Mode)(implicit cec: ConcurrentExecutionContext) = {
for {
(flowDef, fn) <- Future(result(conf, mode))
jobStats <- ExecutionContext.newContext(conf)(flowDef, mode).run
t <- fn(jobStats)
} yield t
}
/*
* Cascading can run parallel Executions in the same flow if they are both FlowDefExecutions
*/
override def zip[U](that: Execution[U]): Execution[(T, U)] =
that match {
case FlowDefExecution(result2) =>
FlowDefExecution({ (conf, m) =>
val (fd1, fn1) = result(conf, m)
val (fd2, fn2) = result2(conf, m)
val merged = fd1.copy
merged.mergeFrom(fd2)
(merged, { (js: JobStats) => fn1(js).zip(fn2(js)) })
})
case _ => super.zip(that)
}
}
private case class FactoryExecution[T](result: (Config, Mode) => Execution[T]) extends Execution[T] {
def run(conf: Config, mode: Mode)(implicit cec: ConcurrentExecutionContext) =
unwrap(conf, mode, this).run(conf, mode)
@annotation.tailrec
private def unwrap[U](conf: Config, mode: Mode, that: Execution[U]): Execution[U] =
that match {
case FactoryExecution(fn) => unwrap(conf, mode, fn(conf, mode))
case nonFactory => nonFactory
}
/*
* Cascading can run parallel Executions in the same flow if they are both FlowDefExecutions
*/
override def zip[U](that: Execution[U]): Execution[(T, U)] =
that match {
case FactoryExecution(result2) =>
FactoryExecution({ (conf, m) =>
val exec1 = unwrap(conf, m, result(conf, m))
val exec2 = unwrap(conf, m, result2(conf, m))
exec1.zip(exec2)
})
case _ =>
FactoryExecution({ (conf, m) =>
val exec1 = unwrap(conf, m, result(conf, m))
exec1.zip(that)
})
}
}
/**
* This makes a constant execution that runs no job.
*/
def from[T](t: => T): Execution[T] = Const(() => t)
private[scalding] def factory[T](fn: (Config, Mode) => Execution[T]): Execution[T] =
FactoryExecution(fn)
/**
* This converts a function into an Execution monad. The flowDef returned
* is never mutated. The returned callback funcion is called after the flow
* is run and succeeds.
*/
def fromFn[T](
fn: (Config, Mode) => ((FlowDef, JobStats => Future[T]))): Execution[T] =
FlowDefExecution(fn)
/**
* This creates a new ExecutionContext, passes to the reader, builds the flow
* and cleans up the state of the FlowDef
*/
def buildFlow[T](conf: Config, mode: Mode)(op: Reader[ExecutionContext, T]): (T, Try[Flow[_]]) = {
val ec = ExecutionContext.newContextEmpty(conf, mode)
try {
// This mutates the newFlowDef in ec
val resultT = op(ec)
(resultT, ec.buildFlow)
} finally {
// Make sure to clean up all state with flowDef
FlowStateMap.clear(ec.flowDef)
}
}
def run[T](conf: Config, mode: Mode)(op: Reader[ExecutionContext, T]): (T, Future[JobStats]) = {
val (t, tryFlow) = buildFlow(conf, mode)(op)
tryFlow match {
case Success(flow) => (t, run(flow))
case Failure(err) => (t, Future.failed(err))
}
}
/*
* This runs a Flow using Cascading's built in threads. The resulting JobStats
* are put into a promise when they are ready
*/
def run[C](flow: Flow[C]): Future[JobStats] =
// This is in Java because of the cascading API's raw types on FlowListener
FlowListenerPromise.start(flow, { f: Flow[C] => JobStats(f.getFlowStats) })
/*
* If you want scalding to fail if the sources cannot be validated, then
* use this.
* Alteratively, in your Reader, call Source.validateTaps(Mode) to
* control which sources individually need validation
* Suggested use:
* for {
* result <- job
* mightErr <- validateSources
* } yield mightErr.map(_ => result)
*/
def validateSources: Reader[ExecutionContext, Try[Unit]] =
Reader { ec => Try(FlowStateMap.validateSources(ec.flowDef, ec.mode)) }
def waitFor[T](conf: Config, mode: Mode)(op: Reader[ExecutionContext, T]): (T, Try[JobStats]) = {
val (t, tryFlow) = buildFlow(conf, mode)(op)
(t, tryFlow.flatMap(waitFor(_)))
}
/*
* This blocks the current thread until the job completes with either success or
* failure.
*/
def waitFor[C](flow: Flow[C]): Try[JobStats] =
Try {
flow.complete;
JobStats(flow.getStats)
}
def zip[A, B](ax: Execution[A], bx: Execution[B]): Execution[(A, B)] =
ax.zip(bx)
def zip[A, B, C](ax: Execution[A], bx: Execution[B], cx: Execution[C]): Execution[(A, B, C)] =
ax.zip(bx).zip(cx).map { case ((a, b), c) => (a, b, c) }
def zip[A, B, C, D](ax: Execution[A],
bx: Execution[B],
cx: Execution[C],
dx: Execution[D]): Execution[(A, B, C, D)] =
ax.zip(bx).zip(cx).zip(dx).map { case (((a, b), c), d) => (a, b, c, d) }
def zip[A, B, C, D, E](ax: Execution[A],
bx: Execution[B],
cx: Execution[C],
dx: Execution[D],
ex: Execution[E]): Execution[(A, B, C, D, E)] =
ax.zip(bx).zip(cx).zip(dx).zip(ex).map { case ((((a, b), c), d), e) => (a, b, c, d, e) }
/*
* If you have many Executions, it is better to combine them with
* zip than flatMap (which is sequential)
*/
def zipAll[T](exs: Seq[Execution[T]]): Execution[Seq[T]] = {
@annotation.tailrec
def go(xs: List[Execution[T]], acc: Execution[List[T]]): Execution[List[T]] = xs match {
case Nil => acc
case h :: tail => go(tail, h.zip(acc).map { case (y, ys) => y :: ys })
}
// This pushes all of them onto a list, and then reverse to keep order
go(exs.toList, from(Nil)).map(_.reverse)
}
}
| lucamilanesio/scalding | scalding-core/src/main/scala/com/twitter/scalding/Execution.scala | Scala | apache-2.0 | 9,350 |
import uk.gov.gchq.gaffer.graph._
import uk.gov.gchq.gaffer.user._
import uk.gov.gchq.gaffer.store.schema._
import uk.gov.gchq.gaffer.accumulostore._
import uk.gov.gchq.gaffer.data.element._
import uk.gov.gchq.gaffer.data.elementdefinition.view._
import uk.gov.gchq.gaffer.operation.data._
import uk.gov.gchq.gaffer.operation.impl.get._
import uk.gov.gchq.gaffer.spark.operation.scalardd._
import uk.gov.gchq.gaffer.spark.operation.javardd._
import uk.gov.gchq.gaffer.spark.operation.dataframe._
import org.apache.accumulo.core.client.ZooKeeperInstance
import org.apache.accumulo.core.client.security.tokens.PasswordToken
import org.apache.accumulo.core.security.TablePermission
import scala.io.Source
import scala.collection.JavaConversions._
val storeProperties = AccumuloProperties.loadStoreProperties(System.getenv("GAFFER_USER") + ".store.properties")
val accumuloInstance = new ZooKeeperInstance(storeProperties.getInstance, storeProperties.getZookeepers)
val accumulo = accumuloInstance.getConnector("root", new PasswordToken(Source.fromFile("../etc/root.password").mkString.trim))
var graphId = System.getenv("GRAPH_ID")
if (graphId == null || graphId.isEmpty) {
val tables = accumulo.tableOperations.list.filter(!_.startsWith("accumulo.")).filter(accumulo.securityOperations.hasTablePermission(storeProperties.getUser, _, TablePermission.READ))
if (tables.size == 0) {
println("There are no Accumulo tables that " + storeProperties.getUser + " can access!")
System.exit(1)
} else if (tables.size > 1) {
println("This Accumulo instance contains multiple Gaffer graphs, please specify the graphId that you wish to connect to on the command line")
System.exit(1)
} else {
graphId = tables.head
}
} else if (!accumulo.tableOperations.exists(graphId)) {
println("Accumulo table does not exist for graphId: " + graphId)
System.exit(1)
}
val schemas = accumulo.tableOperations.getProperties(graphId).filter(prop => prop.getKey.startsWith("table.iterator.") && prop.getKey.endsWith(".Schema")).map(_.getValue)
if (schemas.toList.distinct.length == 0) {
println("Unable to retrieve Gaffer Graph Schema from Accumulo Table!")
System.exit(1)
} else if (schemas.toList.distinct.length > 1) {
println("There are multiple different schemas stored on the Accumulo Table. Unable to continue!")
System.exit(1)
}
val schema = Schema.fromJson(schemas.head.getBytes)
val graph = new Graph.Builder().config(new GraphConfig.Builder().graphId(graphId).build()).addSchemas(schema).storeProperties(storeProperties).build()
println(Source.fromURL("https://raw.githubusercontent.com/gchq/Gaffer/master/logos/asciiLogo.txt").mkString)
println("You are connected to a Gaffer graph backed by an Accumulo Store:")
println("\\tAccumulo Instance: " + storeProperties.getInstance)
println("\\tZooKeepers: " + storeProperties.getZookeepers)
println("\\tGraphId: " + graphId)
println("\\tUsername: " + storeProperties.getUser)
println("Connection to Gaffer available at 'graph'")
println("")
println("Example Query:")
println("val sample = graph.execute(new GetAllElements(), new User())")
println("sample.take(20).foreach(println)")
println("sample.close")
| gchq/gaffer-tools | deployment/aws/core/spark-scripts/gaffer-spark-shell.scala | Scala | apache-2.0 | 3,160 |
package org.puma.analyzer.filter
/**
* Project: puma
* Package: org.puma.analyzer
*
* Author: Sergio Álvarez
* Date: 01/2014
*/
class SimpleTermExtractorFilter extends ExtractorFilter{
def extract(tweet: String): List[List[String]] = {
List.empty[List[String]]
}
def field:String = "text"
}
| sergioalvz/puma | src/main/scala/org/puma/analyzer/filter/SimpleTermExtractorFilter.scala | Scala | apache-2.0 | 310 |
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\\
* @ @ *
* # # # # (c) 2017 CAB *
* # # # # # # *
* # # # # # # # # # # # # *
* # # # # # # # # # *
* # # # # # # # # # # # # # # # # # # *
* # # # # # # # # # # # # # # # # # *
* # # # # # # # # # # # # # *
* # # # # # # # # # # # # # # *
* # # # # # # # # # # # # # # # # # # *
* @ @ *
\\* * http://github.com/alexcab * * * * * * * * * * * * * * * * * * * * * * * * * */
package mathact.core.bricks.linking
import mathact.core.bricks.blocks.BlockContext
import mathact.core.bricks.plumbing.fitting.{Plug, Socket}
import mathact.core.sketch.blocks.BlockLike
/** Chain connecting for blocks with single inflow
* Created by CAB on 13.11.2016.
*/
trait LinkIn[H]{ _: BlockLike ⇒
//Inlet producer method
def in: Socket[H]
//Connecting methods
def <~(linkOut: LinkOut[H])(implicit context: BlockContext): Unit = in.plug(linkOut.out)(context)
def <~(out: Plug[H])(implicit context: BlockContext): Unit = in.plug(out)(context)
def <~[T](linkThrough: LinkThrough[T,H])(implicit context: BlockContext): LinkThrough[T,H] = {
in.plug(linkThrough.out)(context)
linkThrough}}
| AlexCAB/MathAct | mathact_core/src/main/scala/mathact/core/bricks/linking/LinkIn.scala | Scala | mit | 1,829 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.common.util
import org.scalatest.{FunSuite, Outcome}
import org.apache.carbondata.common.logging.LogServiceFactory
private[spark] abstract class CarbonFunSuite extends FunSuite {
private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
/**
* Log the suite name and the test name before and after each test.
*
* Subclasses should never override this method. If they wish to run
* custom code before and after each test, they should should mix in
* the {{org.scalatest.BeforeAndAfter}} trait instead.
*/
final protected override def withFixture(test: NoArgTest): Outcome = {
val testName = test.text
val suiteName = this.getClass.getName
val shortSuiteName = suiteName.replaceAll("org.apache.spark", "o.a.s")
try {
LOGGER.info(s"\\n\\n===== TEST OUTPUT FOR $shortSuiteName: '$testName' =====\\n")
test()
} finally {
LOGGER.info(s"\\n\\n===== FINISHED $shortSuiteName: '$testName' =====\\n")
}
}
}
| zzcclp/carbondata | integration/spark-common-cluster-test/src/test/scala/org/apache/spark/sql/common/util/CarbonFunSuite.scala | Scala | apache-2.0 | 1,823 |
import org.scalajs.dom
import co.technius.scalajs.pixi
object ExampleApp {
def main(args: Array[String]): Unit = {
val renderer = pixi.Pixi.autoDetectRenderer(600, 600)
dom.document.body.appendChild(renderer.view)
val root = new pixi.Container
val shapeGfx =
new pixi.Graphics()
.beginFill(0xFFAA00, 1)
.drawRect(0, 0, 50, 50)
.endFill()
val shapeTexture = renderer.generateTexture(shapeGfx, pixi.Pixi.ScaleModes.Nearest, 1)
val minX, minY = 75
val maxX, maxY = 525
val shapeNW = initBoxAndAdd(shapeTexture, minX, minY, root)
val shapeNE = initBoxAndAdd(shapeTexture, maxX, minY, root)
val shapeSW = initBoxAndAdd(shapeTexture, minX, maxY, root)
val shapeSE = initBoxAndAdd(shapeTexture, maxX, maxY, root)
val text = new pixi.Text("scalajs-pixi", new pixi.TextStyle(fill = "white"))
text.scale.set(1.75)
text.position.x = (renderer.width - text.width) / 2
text.position.y = (renderer.height - text.height) / 2
root.addChild(text)
lazy val renderFn: (Double) => Unit = { totalTime =>
val moveBy = 425 * (1 + math.sin(totalTime / 700.0)) / 2
shapeNW.x = minX + moveBy
shapeSE.x = maxX - moveBy
shapeNE.y = minY + moveBy
shapeSW.y = maxY - moveBy
renderer.render(root)
dom.window.requestAnimationFrame(renderFn)
}
renderFn(0)
}
def createBox(texture: pixi.Texture): pixi.Sprite = {
val shape = new pixi.Sprite(texture)
shape.width = 50
shape.height = 50
shape.pivot.set(25, 25)
shape
}
def initBoxAndAdd(texture: pixi.Texture, x: Int, y: Int, root: pixi.Container): pixi.Sprite = {
val s = createBox(texture)
s.position.set(x, y)
root.addChild(s)
s
}
}
| Technius/scalajs-pixi | examples/src/main/scala/ExampleApp.scala | Scala | mit | 1,758 |
package com.twitter.finagle.thrift
import com.twitter.finagle.{Context, ContextHandler}
import com.twitter.io.Buf
import org.jboss.netty.buffer.{ChannelBuffers, ChannelBuffer}
private[finagle] object ClientIdContext {
val Key = Buf.Utf8("com.twitter.finagle.thrift.ClientIdContext")
val KeyBytes = Context.keyBytes(Key)
val KeyBytesChannelBuffer = ChannelBuffers.wrappedBuffer(KeyBytes)
/**
* Serialize an `Option[String]` representing an optional ClientId name into
* a tuple of key->value ChannelBuffers. Useful for piecing together context
* pairs to give to the construct of `Tdispatch`.
*/
private[finagle] def newKVTuple(clientIdOpt: Option[String]): (ChannelBuffer, ChannelBuffer) = {
val clientIdBuf = clientIdOpt match {
case Some(clientId) =>
val vBuf = Buf.Utf8(clientId)
val bytes = new Array[Byte](vBuf.length)
vBuf.write(bytes, 0)
ChannelBuffers.wrappedBuffer(bytes)
case None => ChannelBuffers.EMPTY_BUFFER
}
KeyBytesChannelBuffer.duplicate() -> clientIdBuf
}
}
/**
* A context handler for ClientIds.
*/
private[finagle] class ClientIdContext extends ContextHandler {
val key = ClientIdContext.Key
def handle(body: Buf) {
body match {
case buf if buf.length == 0 => ClientId.clear()
case Buf.Utf8(name) => ClientId.set(Some(ClientId(name)))
case invalid => ClientId.clear()
}
}
// It arguably doesn't make sense to pass through ClientIds, since
// they are meant to identify the immediate client of a downstream
// system.
def emit(): Option[Buf] = ClientId.current map { id => Buf.Utf8(id.name) }
}
| JustinTulloss/finagle | finagle-thrift/src/main/scala/com/twitter/finagle/thrift/ClientIdContext.scala | Scala | apache-2.0 | 1,645 |
package iot.pood.integration.actors
/**
* Created by rafik on 8.9.2017.
*/
trait IntegrationComponent {
val DATA = "data";
val COMMAND = "command";
}
trait IntegrationMessage {
def messageId: Long
}
| rafajpet/iot-pood | iot-pood-integration/src/main/scala/iot/pood/integration/actors/IntegrationComponent.scala | Scala | mit | 214 |
/*
* Copyright 2013 - 2020 Outworkers Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.outworkers.phantom.builder.primitives
import java.nio.{BufferUnderflowException, ByteBuffer}
import com.datastax.driver.core._
import com.datastax.driver.core.exceptions.{DriverInternalError, InvalidTypeException}
import com.outworkers.phantom.builder.QueryBuilder
import scala.collection.compat._
object Utils {
private[phantom] def unsupported(version: ProtocolVersion): DriverInternalError = {
new DriverInternalError(s"Unsupported protocol version $version")
}
private[this] val baseColSize = 4
private[this] def sizeOfValue(value: ByteBuffer, version: ProtocolVersion): Int = {
version match {
case ProtocolVersion.V1 | ProtocolVersion.V2 =>
val elemSize = value.remaining
if (elemSize > 65535) {
throw new IllegalArgumentException(
s"Native protocol version $version supports only elements " +
s"with size up to 65535 bytes - but element size is $elemSize bytes"
)
}
2 + elemSize
case ProtocolVersion.V3 | ProtocolVersion.V4 | ProtocolVersion.V5 =>
if (value == Primitive.nullValue) baseColSize else baseColSize + value.remaining
case _ => throw unsupported(version)
}
}
private[this] def sizeOfCollectionSize(version: ProtocolVersion): Int = version match {
case ProtocolVersion.V1 | ProtocolVersion.V2 => 2
case ProtocolVersion.V3 | ProtocolVersion.V4 | ProtocolVersion.V5 => baseColSize
case _ => throw unsupported(version)
}
/**
* Utility method that "packs" together a list of {@link ByteBuffer}s containing
* serialized collection elements.
* Mainly intended for use with collection codecs when serializing collections.
*
* @param buffers the collection elements
* @param elements the total number of elements
* @param version the protocol version to use
* @return The serialized collection
*/
def pack(
buffers: Array[ByteBuffer],
elements: Int,
version: ProtocolVersion
): ByteBuffer = {
val size = buffers.foldLeft(0)((acc, b) => acc + sizeOfValue(b, version))
val result = ByteBuffer.allocate(sizeOfCollectionSize(version) + size)
CodecUtils.writeSize(result, elements, version)
for (bb <- buffers) CodecUtils.writeValue(result, bb, version)
result.flip.asInstanceOf[ByteBuffer]
}
/**
* Utility method that "packs" together a list of {{java.nio.ByteBuffer}}s containing
* serialized collection elements.
* Mainly intended for use with collection codecs when serializing collections.
*
* @param buffers the collection elements
* @param elements the total number of elements
* @param version the protocol version to use
* @return The serialized collection
*/
def pack[M[X] <: Iterable[X]](
buffers: M[ByteBuffer],
elements: Int,
version: ProtocolVersion
): ByteBuffer = {
val size = buffers.foldLeft(0)((acc, b) => acc + sizeOfValue(b, version))
val result = ByteBuffer.allocate(sizeOfCollectionSize(version) + size)
CodecUtils.writeSize(result, elements, version)
for (bb <- buffers) CodecUtils.writeValue(result, bb, version)
result.flip.asInstanceOf[ByteBuffer]
}
}
object Primitives {
private[phantom] def emptyCollection: ByteBuffer = ByteBuffer.allocate(0)
private[this] def collectionPrimitive[M[X] <: IterableOnce[X], RR](
cType: String,
converter: M[RR] => String
)(
implicit ev: Primitive[RR],
cbf: Factory[RR, M[RR]]
): Primitive[M[RR]] = new Primitive[M[RR]] {
override def frozen: Boolean = true
override def shouldFreeze: Boolean = true
override def asCql(value: M[RR]): String = converter(value)
override val dataType: String = cType
override def serialize(coll: M[RR], version: ProtocolVersion): ByteBuffer = {
coll match {
case Primitive.nullValue => Primitive.nullValue
case c if c.iterator.isEmpty => Utils.pack(new Array[ByteBuffer](coll.size), coll.size, version)
case _ =>
val bbs = coll.iterator.foldLeft(Seq.empty[ByteBuffer]) { (acc, elt) =>
notNull(elt, "Collection elements cannot be null")
acc :+ ev.serialize(elt, version)
}
Utils.pack(bbs, coll.iterator.size, version)
}
}
override def deserialize(bytes: ByteBuffer, version: ProtocolVersion): M[RR] = {
if (bytes == Primitive.nullValue || bytes.remaining() == 0) {
cbf.newBuilder.result()
} else {
try {
val input = bytes.duplicate()
val size = CodecUtils.readSize(input, version)
val coll = cbf.newBuilder
coll.sizeHint(size)
for (_ <- 0 until size) {
val databb = CodecUtils.readValue(input, version)
coll += ev.deserialize(databb, version)
}
coll.result()
} catch {
case e: BufferUnderflowException =>
throw new InvalidTypeException("Not enough bytes to deserialize collection", e)
}
}
}
}
def list[T]()(implicit ev: Primitive[T]): Primitive[List[T]] = {
collectionPrimitive[List, T](
QueryBuilder.Collections.listType(ev.cassandraType).queryString,
value => QueryBuilder.Collections.serialize(value.map(ev.asCql)).queryString
)
}
def set[T]()(implicit ev: Primitive[T]): Primitive[Set[T]] = {
collectionPrimitive[Set, T](
QueryBuilder.Collections.setType(ev.cassandraType).queryString,
value => QueryBuilder.Collections.serialize(value.map(ev.asCql)).queryString
)
}
def option[T : Primitive]: Primitive[Option[T]] = {
val ev = implicitly[Primitive[T]]
val nullString = "null"
new Primitive[Option[T]] {
def serialize(obj: Option[T], protocol: ProtocolVersion): ByteBuffer = {
obj.fold(
Primitive.nullValue.asInstanceOf[ByteBuffer]
)(ev.serialize(_, protocol))
}
def deserialize(source: ByteBuffer, protocol: ProtocolVersion): Option[T] = {
if (source == Primitive.nullValue) {
None
} else {
Some(ev.deserialize(source, protocol))
}
}
override def dataType: String = ev.dataType
override def asCql(value: Option[T]): String = {
value.map(ev.asCql).getOrElse(nullString)
}
}
}
}
| outworkers/phantom | phantom-dsl/src/main/scala/com/outworkers/phantom/builder/primitives/Primitives.scala | Scala | apache-2.0 | 6,934 |
package pl.touk.nussknacker.engine.flink.util
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.scala._
import pl.touk.nussknacker.engine.util.KeyedValue
// Must be in object because of Java interop (problems with package object) and abstract type StringKeyedValue[V]
object KeyValueHelperTypeInformation {
// It is helper function for interop with java - e.g. in case when you want to have KeyedEvent[POJO, POJO]
def typeInformation[K, V](keyTypeInformation: TypeInformation[K], valueTypeInformation: TypeInformation[V]): TypeInformation[KeyedValue[K, V]] = {
implicit val implicitKeyTypeInformation: TypeInformation[K] = keyTypeInformation
implicit val implicitValueTypeInformation: TypeInformation[V] = valueTypeInformation
implicitly[TypeInformation[KeyedValue[K, V]]]
}
// It is helper function for interop with java - e.g. in case when you want to have StringKeyedEvent[POJO]
def typeInformation[V](valueTypeInformation: TypeInformation[V]): TypeInformation[KeyedValue[String, V]] = {
KeyValueHelperTypeInformation.typeInformation(implicitly[TypeInformation[String]], valueTypeInformation)
}
}
| TouK/nussknacker | engine/flink/components-utils/src/main/scala/pl/touk/nussknacker/engine/flink/util/KeyValueHelperTypeInformation.scala | Scala | apache-2.0 | 1,169 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import org.apache.spark.{SparkException, SparkFunSuite}
import org.apache.spark.ml.attribute.{Attribute, NominalAttribute}
import org.apache.spark.ml.param.ParamsSuite
import org.apache.spark.ml.util.{DefaultReadWriteTest, Identifiable, MLTestingUtils}
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.sql.Row
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.types.{DoubleType, StringType, StructField, StructType}
class StringIndexerSuite
extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest {
import testImplicits._
test("params") {
ParamsSuite.checkParams(new StringIndexer)
val model = new StringIndexerModel("indexer", Array("a", "b"))
val modelWithoutUid = new StringIndexerModel(Array("a", "b"))
ParamsSuite.checkParams(model)
ParamsSuite.checkParams(modelWithoutUid)
}
test("StringIndexer") {
val data = Seq((0, "a"), (1, "b"), (2, "c"), (3, "a"), (4, "a"), (5, "c"))
val df = data.toDF("id", "label")
val indexer = new StringIndexer()
.setInputCol("label")
.setOutputCol("labelIndex")
val indexerModel = indexer.fit(df)
MLTestingUtils.checkCopyAndUids(indexer, indexerModel)
val transformed = indexerModel.transform(df)
val attr = Attribute.fromStructField(transformed.schema("labelIndex"))
.asInstanceOf[NominalAttribute]
assert(attr.values.get === Array("a", "c", "b"))
val output = transformed.select("id", "labelIndex").rdd.map { r =>
(r.getInt(0), r.getDouble(1))
}.collect().toSet
// a -> 0, b -> 2, c -> 1
val expected = Set((0, 0.0), (1, 2.0), (2, 1.0), (3, 0.0), (4, 0.0), (5, 1.0))
assert(output === expected)
}
test("StringIndexerUnseen") {
val data = Seq((0, "a"), (1, "b"), (4, "b"))
val data2 = Seq((0, "a"), (1, "b"), (2, "c"), (3, "d"))
val df = data.toDF("id", "label")
val df2 = data2.toDF("id", "label")
val indexer = new StringIndexer()
.setInputCol("label")
.setOutputCol("labelIndex")
.fit(df)
// Verify we throw by default with unseen values
intercept[SparkException] {
indexer.transform(df2).collect()
}
indexer.setHandleInvalid("skip")
// Verify that we skip the c record
val transformedSkip = indexer.transform(df2)
val attrSkip = Attribute.fromStructField(transformedSkip.schema("labelIndex"))
.asInstanceOf[NominalAttribute]
assert(attrSkip.values.get === Array("b", "a"))
val outputSkip = transformedSkip.select("id", "labelIndex").rdd.map { r =>
(r.getInt(0), r.getDouble(1))
}.collect().toSet
// a -> 1, b -> 0
val expectedSkip = Set((0, 1.0), (1, 0.0))
assert(outputSkip === expectedSkip)
indexer.setHandleInvalid("keep")
// Verify that we keep the unseen records
val transformedKeep = indexer.transform(df2)
val attrKeep = Attribute.fromStructField(transformedKeep.schema("labelIndex"))
.asInstanceOf[NominalAttribute]
assert(attrKeep.values.get === Array("b", "a", "__unknown"))
val outputKeep = transformedKeep.select("id", "labelIndex").rdd.map { r =>
(r.getInt(0), r.getDouble(1))
}.collect().toSet
// a -> 1, b -> 0, c -> 2, d -> 3
val expectedKeep = Set((0, 1.0), (1, 0.0), (2, 2.0), (3, 2.0))
assert(outputKeep === expectedKeep)
}
test("StringIndexer with a numeric input column") {
val data = Seq((0, 100), (1, 200), (2, 300), (3, 100), (4, 100), (5, 300))
val df = data.toDF("id", "label")
val indexer = new StringIndexer()
.setInputCol("label")
.setOutputCol("labelIndex")
.fit(df)
val transformed = indexer.transform(df)
val attr = Attribute.fromStructField(transformed.schema("labelIndex"))
.asInstanceOf[NominalAttribute]
assert(attr.values.get === Array("100", "300", "200"))
val output = transformed.select("id", "labelIndex").rdd.map { r =>
(r.getInt(0), r.getDouble(1))
}.collect().toSet
// 100 -> 0, 200 -> 2, 300 -> 1
val expected = Set((0, 0.0), (1, 2.0), (2, 1.0), (3, 0.0), (4, 0.0), (5, 1.0))
assert(output === expected)
}
test("StringIndexer with NULLs") {
val data: Seq[(Int, String)] = Seq((0, "a"), (1, "b"), (2, "b"), (3, null))
val data2: Seq[(Int, String)] = Seq((0, "a"), (1, "b"), (3, null))
val df = data.toDF("id", "label")
val df2 = data2.toDF("id", "label")
val indexer = new StringIndexer()
.setInputCol("label")
.setOutputCol("labelIndex")
withClue("StringIndexer should throw error when setHandleInvalid=error " +
"when given NULL values") {
intercept[SparkException] {
indexer.setHandleInvalid("error")
indexer.fit(df).transform(df2).collect()
}
}
indexer.setHandleInvalid("skip")
val transformedSkip = indexer.fit(df).transform(df2)
val attrSkip = Attribute
.fromStructField(transformedSkip.schema("labelIndex"))
.asInstanceOf[NominalAttribute]
assert(attrSkip.values.get === Array("b", "a"))
val outputSkip = transformedSkip.select("id", "labelIndex").rdd.map { r =>
(r.getInt(0), r.getDouble(1))
}.collect().toSet
// a -> 1, b -> 0
val expectedSkip = Set((0, 1.0), (1, 0.0))
assert(outputSkip === expectedSkip)
indexer.setHandleInvalid("keep")
val transformedKeep = indexer.fit(df).transform(df2)
val attrKeep = Attribute
.fromStructField(transformedKeep.schema("labelIndex"))
.asInstanceOf[NominalAttribute]
assert(attrKeep.values.get === Array("b", "a", "__unknown"))
val outputKeep = transformedKeep.select("id", "labelIndex").rdd.map { r =>
(r.getInt(0), r.getDouble(1))
}.collect().toSet
// a -> 1, b -> 0, null -> 2
val expectedKeep = Set((0, 1.0), (1, 0.0), (3, 2.0))
assert(outputKeep === expectedKeep)
}
test("StringIndexerModel should keep silent if the input column does not exist.") {
val indexerModel = new StringIndexerModel("indexer", Array("a", "b", "c"))
.setInputCol("label")
.setOutputCol("labelIndex")
val df = spark.range(0L, 10L).toDF()
assert(indexerModel.transform(df).collect().toSet === df.collect().toSet)
}
test("StringIndexerModel can't overwrite output column") {
val df = Seq((1, 2), (3, 4)).toDF("input", "output")
intercept[IllegalArgumentException] {
new StringIndexer()
.setInputCol("input")
.setOutputCol("output")
.fit(df)
}
val indexer = new StringIndexer()
.setInputCol("input")
.setOutputCol("indexedInput")
.fit(df)
intercept[IllegalArgumentException] {
indexer.setOutputCol("output").transform(df)
}
}
test("StringIndexer read/write") {
val t = new StringIndexer()
.setInputCol("myInputCol")
.setOutputCol("myOutputCol")
.setHandleInvalid("skip")
testDefaultReadWrite(t)
}
test("StringIndexerModel read/write") {
val instance = new StringIndexerModel("myStringIndexerModel", Array("a", "b", "c"))
.setInputCol("myInputCol")
.setOutputCol("myOutputCol")
.setHandleInvalid("skip")
val newInstance = testDefaultReadWrite(instance)
assert(newInstance.labels === instance.labels)
}
test("IndexToString params") {
val idxToStr = new IndexToString()
ParamsSuite.checkParams(idxToStr)
}
test("IndexToString.transform") {
val labels = Array("a", "b", "c")
val df0 = Seq((0, "a"), (1, "b"), (2, "c"), (0, "a")).toDF("index", "expected")
val idxToStr0 = new IndexToString()
.setInputCol("index")
.setOutputCol("actual")
.setLabels(labels)
idxToStr0.transform(df0).select("actual", "expected").collect().foreach {
case Row(actual, expected) =>
assert(actual === expected)
}
val attr = NominalAttribute.defaultAttr.withValues(labels)
val df1 = df0.select(col("index").as("indexWithAttr", attr.toMetadata()), col("expected"))
val idxToStr1 = new IndexToString()
.setInputCol("indexWithAttr")
.setOutputCol("actual")
idxToStr1.transform(df1).select("actual", "expected").collect().foreach {
case Row(actual, expected) =>
assert(actual === expected)
}
}
test("StringIndexer, IndexToString are inverses") {
val data = Seq((0, "a"), (1, "b"), (2, "c"), (3, "a"), (4, "a"), (5, "c"))
val df = data.toDF("id", "label")
val indexer = new StringIndexer()
.setInputCol("label")
.setOutputCol("labelIndex")
.fit(df)
val transformed = indexer.transform(df)
val idx2str = new IndexToString()
.setInputCol("labelIndex")
.setOutputCol("sameLabel")
.setLabels(indexer.labels)
idx2str.transform(transformed).select("label", "sameLabel").collect().foreach {
case Row(a: String, b: String) =>
assert(a === b)
}
}
test("IndexToString.transformSchema (SPARK-10573)") {
val idxToStr = new IndexToString().setInputCol("input").setOutputCol("output")
val inSchema = StructType(Seq(StructField("input", DoubleType)))
val outSchema = idxToStr.transformSchema(inSchema)
assert(outSchema("output").dataType === StringType)
}
test("IndexToString read/write") {
val t = new IndexToString()
.setInputCol("myInputCol")
.setOutputCol("myOutputCol")
.setLabels(Array("a", "b", "c"))
testDefaultReadWrite(t)
}
test("SPARK 18698: construct IndexToString with custom uid") {
val uid = "customUID"
val t = new IndexToString(uid)
assert(t.uid == uid)
}
test("StringIndexer metadata") {
val data = Seq((0, "a"), (1, "b"), (2, "c"), (3, "a"), (4, "a"), (5, "c"))
val df = data.toDF("id", "label")
val indexer = new StringIndexer()
.setInputCol("label")
.setOutputCol("labelIndex")
.fit(df)
val transformed = indexer.transform(df)
val attrs =
NominalAttribute.decodeStructField(transformed.schema("labelIndex"), preserveName = true)
assert(attrs.name.nonEmpty && attrs.name.get === "labelIndex")
}
test("StringIndexer order types") {
val data = Seq((0, "b"), (1, "b"), (2, "c"), (3, "a"), (4, "a"), (5, "b"))
val df = data.toDF("id", "label")
val indexer = new StringIndexer()
.setInputCol("label")
.setOutputCol("labelIndex")
val expected = Seq(Set((0, 0.0), (1, 0.0), (2, 2.0), (3, 1.0), (4, 1.0), (5, 0.0)),
Set((0, 2.0), (1, 2.0), (2, 0.0), (3, 1.0), (4, 1.0), (5, 2.0)),
Set((0, 1.0), (1, 1.0), (2, 0.0), (3, 2.0), (4, 2.0), (5, 1.0)),
Set((0, 1.0), (1, 1.0), (2, 2.0), (3, 0.0), (4, 0.0), (5, 1.0)))
var idx = 0
for (orderType <- StringIndexer.supportedStringOrderType) {
val transformed = indexer.setStringOrderType(orderType).fit(df).transform(df)
val output = transformed.select("id", "labelIndex").rdd.map { r =>
(r.getInt(0), r.getDouble(1))
}.collect().toSet
assert(output === expected(idx))
idx += 1
}
}
}
| bOOm-X/spark | mllib/src/test/scala/org/apache/spark/ml/feature/StringIndexerSuite.scala | Scala | apache-2.0 | 11,763 |
package edu.msstate.dasi.csb.data.synth
import edu.msstate.dasi.csb.data.distributions.DataDistributions
import edu.msstate.dasi.csb.model.{EdgeData, VertexData}
import edu.msstate.dasi.csb.sc
import org.apache.spark.graphx.{Edge, Graph, VertexId}
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
import scala.util.Random
/**
* Kronecker based Graph generation given seed matrix.
*/
class StochasticKronecker(partitions: Int, mtxFile: String, genIter: Int) extends GraphSynth {
private def parseMtxDataFromFile(mtxFilePath: String): Array[Array[Double]] = {
sc.textFile(mtxFilePath)
.map(line => line.split(" "))
.map(record => record.map(number => number.toDouble).array)
.collect()
}
/**
* Generates a small probability matrix from a graph.
*
* The KronFit algorithm is a gradient descent based algorithm which ensures that the probability of generating the
* original graph from the small probability matrix after performing Kronecker multiplications is very high.
*/
private def kronFit(seed: Graph[VertexData, EdgeData]): Array[Array[Double]] = {
//inMtx already has a default value
// Util.time( "KronFit", KroFit.run(seed) )
parseMtxDataFromFile(mtxFile)
}
private def getKroRDD(nVerts: Long, nEdges: Long, mtxVerticesNum: Int, iter: Int, cumulativeProbMtx: Array[(Double, VertexId, VertexId)] ): RDD[Edge[EdgeData]] = {
val localPartitions = math.min(nEdges, partitions).toInt
val recordsPerPartition = math.min( (nEdges / localPartitions).toInt, Int.MaxValue )
val i = sc.parallelize(Seq.empty[Char], localPartitions).mapPartitions( _ => (1 to recordsPerPartition).iterator )
val edgeList = i.map { _ =>
var range = nVerts
var srcId = 0L
var dstId = 0L
for ( _ <- 1 to iter ) {
val prob = Random.nextDouble()
var index = 0
while (prob > cumulativeProbMtx(index)._1) {
index += 1
}
range = range / mtxVerticesNum
srcId += cumulativeProbMtx(index)._2 * range
dstId += cumulativeProbMtx(index)._3 * range
}
Edge[EdgeData](srcId, dstId)
}
edgeList
}
/**
* Computes the RDD of the additional edges that should be added accordingly to the edge distribution.
*
* @param edgeList The RDD of the edges returned by the Kronecker algorithm.
*
* @return The RDD of the additional edges that should be added to the one returned by Kronecker algorithm.
*/
private def getMultiEdgesRDD(edgeList: RDD[Edge[EdgeData]], seedDists: DataDistributions): RDD[Edge[EdgeData]] = {
val dataDistBroadcast = sc.broadcast(seedDists)
val multiEdgeList = edgeList.flatMap { edge =>
val multiEdgesNum = dataDistBroadcast.value.outDegree.sample
var multiEdges = Array.empty[Edge[EdgeData]]
for ( _ <- 1 until multiEdgesNum ) {
multiEdges :+= Edge[EdgeData](edge.srcId, edge.dstId)
}
multiEdges
}
multiEdgeList
}
/** Function to generate and return a kronecker graph
*
* @param probMtx Probability Matrix used to generate Kronecker Graph
*
* @return Graph containing vertices + VertexData, edges + EdgeData*/
private def generateKroGraph(probMtx: Array[Array[Double]], seedDists: DataDistributions): Graph[VertexData, EdgeData] = {
val mtxVerticesNum = probMtx.length
val mtxSum = probMtx.map(record => record.sum).sum
val nVerts = math.pow(mtxVerticesNum, genIter).toLong
val nEdges = math.pow(mtxSum, genIter).toLong
println("Expected # of Vertices: " + nVerts)
println("Expected # of Edges: " + nEdges)
var cumProb: Double = 0.0
var cumulativeProbMtx = Array.empty[(Double, VertexId, VertexId)]
for (i <- 0 until mtxVerticesNum; j <- 0 until mtxVerticesNum) {
val prob = probMtx(i)(j)
cumProb += prob
cumulativeProbMtx :+= (cumProb/mtxSum, i.toLong, j.toLong)
}
var startTime = System.nanoTime()
var curEdges: Long = 0
var edgeList = sc.emptyRDD[Edge[EdgeData]]
while (curEdges < nEdges) {
println("getKroRDD(" + (nEdges - curEdges) + ")")
val oldEdgeList = edgeList
val newRDD = getKroRDD(nVerts, nEdges - curEdges, mtxVerticesNum, genIter, cumulativeProbMtx)
edgeList = oldEdgeList.union(newRDD).distinct()
.coalesce(partitions).setName("edgeList#" + curEdges).persist(StorageLevel.MEMORY_AND_DISK)
curEdges = edgeList.count()
oldEdgeList.unpersist()
println(s"Requested/created: $nEdges/$curEdges")
}
var timeSpan = (System.nanoTime() - startTime) / 1e9
println(s"All getKroRDD time: $timeSpan s")
println("Number of edges before union: " + edgeList.count())
startTime = System.nanoTime()
val newEdges = getMultiEdgesRDD(edgeList, seedDists).setName("newEdges")
// TODO: finalEdgeList should be un-persisted after the next action (but the action will probably be outside this method)
val finalEdgeList = edgeList.union(newEdges)
.coalesce(partitions).setName("finalEdgeList").persist(StorageLevel.MEMORY_AND_DISK)
println("Total # of Edges (including multi edges): " + finalEdgeList.count())
timeSpan = (System.nanoTime() - startTime) / 1e9
println(s"MultiEdges time: $timeSpan s")
Graph.fromEdges(
finalEdgeList,
null.asInstanceOf[VertexData],
StorageLevel.MEMORY_AND_DISK,
StorageLevel.MEMORY_AND_DISK
)
}
/**
* Synthesize a graph from a seed graph and its property distributions.
*
* @param seed Seed graph object begin generating synthetic graph with.
* @param seedDists Seed distributions to use when generating the synthetic graph.
*
* @return Synthetic graph object containing properties
*/
protected def genGraph(seed: Graph[VertexData, EdgeData], seedDists: DataDistributions): Graph[VertexData, EdgeData] = {
//val probMtx: Array[Array[Float]] = Array(Array(0.1f, 0.9f), Array(0.9f, 0.5f))
val probMtx: Array[Array[Double]] = kronFit(seed)
println()
print("Matrix: ")
probMtx.foreach(_.foreach(record => print(record + " ")))
println()
println()
println()
println(s"Running Kronecker with $genIter iterations.")
println()
//Run Kronecker with the adjacency matrix
generateKroGraph(probMtx, seedDists)
}
}
| msstate-dasi/csb | csb/src/main/scala/edu/msstate/dasi/csb/data/synth/StochasticKronecker.scala | Scala | gpl-3.0 | 6,338 |
package com.mthaler.xmlconfect
import scala.util.Try
/**
* Cache for default args
*/
object DefaultArgsCache {
@volatile
private var cache = Map.empty[Class[_], IndexedSeq[Any]]
/**
* Gets default args for a given class
*
* If default args for the given class are not found, default args of the given class are determined using reflection
* and the result will be added to the cache
*
* @param clazz
* @return
*/
def get(clazz: Class[_]): IndexedSeq[Any] = {
cache.get(clazz) match {
case Some(args) => args
case None =>
val defaultArgs = Try { Classes.defaultArgs(clazz).toIndexedSeq } getOrElse (IndexedSeq.empty)
cache += clazz -> defaultArgs
defaultArgs
}
}
}
| mthaler/xmlconfect | src/main/scala/com/mthaler/xmlconfect/DefaultArgsCache.scala | Scala | apache-2.0 | 748 |
package euler
import org.scalatest.FunSuite
import org.scalatest.matchers.ShouldMatchers
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import euler.P09._
@RunWith(classOf[JUnitRunner])
class P09test extends FunSuite with ShouldMatchers{
test("9. Find the only Pythagorean triplet, {a, b, c}, for which a + b + c = 1000.") {
r should be (31875000)
}
} | szekai/euler | src/test/scala/euler/P09test.scala | Scala | apache-2.0 | 402 |
package io.iohk.ethereum.consensus.ethash
import akka.actor.Actor
import io.iohk.ethereum.consensus.ethash.MinerResponses.MinerNotSupport
trait MinerUtils {
self: Actor =>
def notSupportedMockedMinerMessages: Receive = { case msg: MockedMinerProtocol =>
sender() ! MinerNotSupport(msg)
}
}
| input-output-hk/etc-client | src/main/scala/io/iohk/ethereum/consensus/ethash/MinerUtils.scala | Scala | mit | 303 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.