code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.transform.vision.image
import com.intel.analytics.bigdl.tensor.Tensor
import org.scalatest.{FlatSpec, Matchers}
class ConvertorSpec extends FlatSpec with Matchers {
val resource = getClass.getClassLoader.getResource("pascal/")
"MatToTensor" should "work properly" in {
val data = ImageFrame.read(resource.getFile)
val imF = data.asInstanceOf[LocalImageFrame].array.head
val tensor2 = imF.toTensor(ImageFeature.floats)
val transformer = MatToTensor[Float]()
transformer(data)
val tensor = imF[Tensor[Float]](ImageFeature.imageTensor)
tensor should be (tensor2)
}
}
| luchy0120/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/transform/vision/image/ConvertorSpec.scala | Scala | apache-2.0 | 1,239 |
package com.bot4s.telegram.methods
import com.bot4s.telegram.models.ChatId
/**
* Use this method to set a new group sticker set for a supergroup.
* The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
* Use the field can_set_sticker_set optionally returned in getChat requests to check if the bot can use this method. Returns True on success.
*
* @param chatId Integer or String Yes Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername)
* @param stickerSetName String Yes Name of the sticker set to be set as the group sticker set
*/
case class SetChatStickerSet(chatId: ChatId, stickerSetName: String) extends JsonRequest[Boolean]
| mukel/telegrambot4s | core/src/com/bot4s/telegram/methods/SetChatStickerSet.scala | Scala | apache-2.0 | 761 |
/*
* Copyright 2021 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.extra.sparkey
import com.spotify.scio.coders.Coder
import com.spotify.scio.extra.sparkey.instances.{SparkeyMap, SparkeySet}
import com.spotify.scio.values.{SCollection, SideInput}
import com.spotify.sparkey.CompressionType
/**
* Extra functions available on SCollections of (key, value) pairs for hash based joins through an
* implicit conversion, using the Sparkey-backed LargeMapSideInput for dramatic speed increases over
* the in-memory versions for datasets >100MB. As long as the RHS fits on disk, these functions are
* usually much much faster than regular joins and save on shuffling.
*
* Note that these are nearly identical to the functions in PairHashSCollectionFunctions.scala, but
* we can't reuse the implementations there as SideInput[T] is not covariant over T.
*
* @groupname join
* Join Operations
*/
class PairLargeHashSCollectionFunctions[K, V](private val self: SCollection[(K, V)]) {
implicit private[this] val (keyCoder, valueCoder): (Coder[K], Coder[V]) =
(self.keyCoder, self.valueCoder)
/**
* Perform an inner join by replicating `rhs` to all workers. The right side should be <<10x
* smaller than the left side, and must fit on disk.
*
* @group join
*/
def largeHashJoin[W](
rhs: SCollection[(K, W)],
numShards: Short = DefaultSideInputNumShards,
compressionType: CompressionType = DefaultCompressionType,
compressionBlockSize: Int = DefaultCompressionBlockSize
): SCollection[(K, (V, W))] = {
implicit val wCoder: Coder[W] = rhs.valueCoder
largeHashJoin(rhs.asLargeMultiMapSideInput(numShards, compressionType, compressionBlockSize))
}
/**
* Perform an inner join with a MultiMap `SideInput[SparkeyMap[K, Iterable[V]]`
*
* The right side must fit on disk. The SideInput can be used reused for multiple joins.
*
* @example
* {{{
* val si = pairSCollRight.asLargeMultiMapSideInput
* val joined1 = pairSColl1Left.hashJoin(si)
* val joined2 = pairSColl2Left.hashJoin(si)
* }}}
*
* @group join
*/
def largeHashJoin[W: Coder](
sideInput: SideInput[SparkeyMap[K, Iterable[W]]]
): SCollection[(K, (V, W))] =
self.transform { in =>
in.withSideInputs(sideInput)
.flatMap[(K, (V, W))] { (kv, sideInputCtx) =>
sideInputCtx(sideInput)
.getOrElse(kv._1, Iterable.empty[W])
.iterator
.map(w => (kv._1, (kv._2, w)))
}
.toSCollection
}
/**
* Perform a left outer join by replicating `rhs` to all workers. The right side must fit on disk.
*
* @example
* {{{
* val si = pairSCollRight
* val joined = pairSColl1Left.largeHashLeftOuterJoin(pairSCollRight)
* }}}
* @group join
* @param rhs
* The SCollection[(K, W)] treated as right side of the join.
*/
def largeHashLeftOuterJoin[W](
rhs: SCollection[(K, W)],
numShards: Short = DefaultSideInputNumShards,
compressionType: CompressionType = DefaultCompressionType,
compressionBlockSize: Int = DefaultCompressionBlockSize
): SCollection[(K, (V, Option[W]))] = {
implicit val wCoder: Coder[W] = rhs.valueCoder
largeHashLeftOuterJoin(
rhs.asLargeMultiMapSideInput(numShards, compressionType, compressionBlockSize)
)
}
/**
* Perform a left outer join with a MultiMap `SideInput[SparkeyMap[K, Iterable[V]]`
*
* @example
* {{{
* val si = pairSCollRight.asLargeMultiMapSideInput
* val joined1 = pairSColl1Left.hashLeftOuterJoin(si)
* val joined2 = pairSColl2Left.hashLeftOuterJoin(si)
* }}}
* @group join
*/
def largeHashLeftOuterJoin[W: Coder](
sideInput: SideInput[SparkeyMap[K, Iterable[W]]]
): SCollection[(K, (V, Option[W]))] = {
self.transform { in =>
in.withSideInputs(sideInput)
.flatMap[(K, (V, Option[W]))] { case ((k, v), sideInputCtx) =>
// Using .get here instead of if/else to avoid calling .get twice on a disk-based map.
sideInputCtx(sideInput)
.get(k)
.map(_.iterator.map(w => (k, (v, Some(w)))))
.getOrElse(Iterator((k, (v, None))))
}
.toSCollection
}
}
/**
* Perform a full outer join by replicating `rhs` to all workers. The right side must fit on disk.
*
* @group join
*/
def largeHashFullOuterJoin[W](
rhs: SCollection[(K, W)],
numShards: Short = DefaultSideInputNumShards,
compressionType: CompressionType = DefaultCompressionType,
compressionBlockSize: Int = DefaultCompressionBlockSize
): SCollection[(K, (Option[V], Option[W]))] = {
implicit val wCoder = rhs.valueCoder
largeHashFullOuterJoin(
rhs.asLargeMultiMapSideInput(numShards, compressionType, compressionBlockSize)
)
}
/**
* Perform a full outer join with a `SideInput[SparkeyMap[K, Iterable[W]]]`.
*
* @example
* {{{
* val si = pairSCollRight.asLargeMultiMapSideInput
* val joined1 = pairSColl1Left.hashFullOuterJoin(si)
* val joined2 = pairSColl2Left.hashFullOuterJoin(si)
* }}}
*
* @group join
*/
def largeHashFullOuterJoin[W: Coder](
sideInput: SideInput[SparkeyMap[K, Iterable[W]]]
): SCollection[(K, (Option[V], Option[W]))] =
self.transform { in =>
val leftHashed = in
.withSideInputs(sideInput)
.flatMap { case ((k, v), sideInputCtx) =>
val rhsSideMap = sideInputCtx(sideInput)
if (rhsSideMap.contains(k)) {
rhsSideMap(k).iterator
.map[(K, (Option[V], Option[W]), Boolean)](w => (k, (Some(v), Some(w)), true))
} else {
Iterator((k, (Some(v), None), false))
}
}
.toSCollection
val rightHashed = leftHashed
.filter(_._3)
.map(_._1)
.aggregate(Set.empty[K])(_ + _, _ ++ _)
.withSideInputs(sideInput)
.flatMap { (mk, sideInputCtx) =>
val m = sideInputCtx(sideInput)
(m.keySet diff mk)
.flatMap(k => m(k).iterator.map[(K, (Option[V], Option[W]))](w => (k, (None, Some(w)))))
}
.toSCollection
leftHashed.map(x => (x._1, x._2)) ++ rightHashed
}
/**
* Return an SCollection with the pairs from `this` whose keys are in `rhs` given `rhs` is small
* enough to fit on disk.
*
* Unlike [[SCollection.intersection]] this preserves duplicates in `this`.
*
* @group per
* key
*/
def largeHashIntersectByKey(
rhs: SCollection[K],
numShards: Short = DefaultSideInputNumShards,
compressionType: CompressionType = DefaultCompressionType,
compressionBlockSize: Int = DefaultCompressionBlockSize
): SCollection[(K, V)] =
largeHashIntersectByKey(
rhs.asLargeSetSideInput(numShards, compressionType, compressionBlockSize)
)
/**
* Return an SCollection with the pairs from `this` whose keys are in the SideSet `rhs`.
*
* Unlike [[SCollection.intersection]] this preserves duplicates in `this`.
*
* @group per
* key
*/
def largeHashIntersectByKey(sideInput: SideInput[SparkeySet[K]]): SCollection[(K, V)] =
self
.withSideInputs(sideInput)
.filter { case ((k, _), sideInputCtx) => sideInputCtx(sideInput).contains(k) }
.toSCollection
/**
* Return an SCollection with the pairs from `this` whose keys are not in SCollection[V] `rhs`.
*
* Rhs must be small enough to fit on disk.
*
* @group per
* key
*/
def largeHashSubtractByKey(
rhs: SCollection[K],
numShards: Short = DefaultSideInputNumShards,
compressionType: CompressionType = DefaultCompressionType,
compressionBlockSize: Int = DefaultCompressionBlockSize
): SCollection[(K, V)] =
largeHashSubtractByKey(
rhs.asLargeSetSideInput(numShards, compressionType, compressionBlockSize)
)
/**
* Return an SCollection with the pairs from `this` whose keys are not in SideInput[Set] `rhs`.
*
* @group per
* key
*/
def largeHashSubtractByKey(sideInput: SideInput[SparkeySet[K]]): SCollection[(K, V)] =
self
.withSideInputs(sideInput)
.filter { case ((k, _), sideInputCtx) => !sideInputCtx(sideInput).contains(k) }
.toSCollection
}
| spotify/scio | scio-extra/src/main/scala/com/spotify/scio/extra/sparkey/PairLargeHashSCollectionFunctions.scala | Scala | apache-2.0 | 8,807 |
package agilesitesng.deploy.spoon
import agilesites.annotations.{FlexFamily, Site}
import agilesitesng.deploy.model.{Spooler, SpoonModel, Uid}
import org.slf4j.LoggerFactory
import spoon.processing.AbstractAnnotationProcessor
import spoon.reflect.declaration.CtClass
/**
* Created by msciab on 06/08/15.
*/
class FlexFamilyAnnotationProcessor extends AbstractAnnotationProcessor[FlexFamily, CtClass[_]] {
def logger = LoggerFactory.getLogger(classOf[FlexFamilyAnnotationProcessor])
def process(a: FlexFamily, cl: CtClass[_]) {
val attr = a.flexAttribute()
val content = a.flexContent()
val parent = a.flexParent()
val contentDef = a.flexContentDefinition()
val parentDef = a.flexParentDefinition()
val filter = a.flexFilter()
Spooler.insert(110, SpoonModel.FlexFamily(attr,contentDef,parentDef,content,parent,filter))
logger.debug("...FlexFamily...")
}
}
| agilesites/agilesites2-build | src/main/scala/agilesitesng/deploy/spoon/FlexFamilyAnnotationProcessor.scala | Scala | mit | 902 |
import scala.language.experimental.macros
object Macros {
object Macros {
def foo: Unit = macro Impls.foo
}
}
object Test extends App {
val outer = Macros
outer.Macros.foo
}
| scala/scala | test/files/run/macro-term-declared-in-object-object/Macros_Test_2.scala | Scala | apache-2.0 | 187 |
package changecase
class ChangeCase {
class SomeClass {
def meThod1(): Unit = {}
def metHod1(): Unit = {}
}
val someClass = new SomeClass()
someClass.method1()
"str" subsequence(0, 1)
def meThod2(): Unit = {}
def metHod2(): Unit = {}
method2()
}
| Kwestor/scala-ide | org.scala-ide.sdt.core.tests/test-workspace/quickassist/src/changecase/ChangeCase.scala | Scala | bsd-3-clause | 276 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.io._
import java.net.URI
import java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}
import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}
import scala.collection.JavaConverters._
import scala.collection.Map
import scala.collection.immutable
import scala.collection.mutable.HashMap
import scala.language.implicitConversions
import scala.reflect.{classTag, ClassTag}
import scala.util.control.NonFatal
import com.google.common.collect.MapMaker
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}
import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}
import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}
import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}
import org.apache.spark.executor.{ExecutorMetrics, ExecutorMetricsSource}
import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.Tests._
import org.apache.spark.internal.config.UI._
import org.apache.spark.internal.plugin.PluginContainer
import org.apache.spark.io.CompressionCodec
import org.apache.spark.metrics.source.JVMCPUSource
import org.apache.spark.partial.{ApproximateEvaluator, PartialResult}
import org.apache.spark.rdd._
import org.apache.spark.resource._
import org.apache.spark.resource.ResourceUtils._
import org.apache.spark.rpc.RpcEndpointRef
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackend
import org.apache.spark.scheduler.local.LocalSchedulerBackend
import org.apache.spark.shuffle.ShuffleDataIOUtils
import org.apache.spark.shuffle.api.ShuffleDriverComponents
import org.apache.spark.status.{AppStatusSource, AppStatusStore}
import org.apache.spark.status.api.v1.ThreadStackTrace
import org.apache.spark.storage._
import org.apache.spark.storage.BlockManagerMessages.TriggerThreadDump
import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}
import org.apache.spark.util._
import org.apache.spark.util.logging.DriverLogger
/**
* Main entry point for Spark functionality. A SparkContext represents the connection to a Spark
* cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster.
*
* @note Only one `SparkContext` should be active per JVM. You must `stop()` the
* active `SparkContext` before creating a new one.
* @param config a Spark Config object describing the application configuration. Any settings in
* this config overrides the default configs as well as system properties.
*/
class SparkContext(config: SparkConf) extends Logging {
// The call site where this SparkContext was constructed.
private val creationSite: CallSite = Utils.getCallSite()
// In order to prevent multiple SparkContexts from being active at the same time, mark this
// context as having started construction.
// NOTE: this must be placed at the beginning of the SparkContext constructor.
SparkContext.markPartiallyConstructed(this)
val startTime = System.currentTimeMillis()
private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false)
private[spark] def assertNotStopped(): Unit = {
if (stopped.get()) {
val activeContext = SparkContext.activeContext.get()
val activeCreationSite =
if (activeContext == null) {
"(No active SparkContext.)"
} else {
activeContext.creationSite.longForm
}
throw new IllegalStateException(
s"""Cannot call methods on a stopped SparkContext.
|This stopped SparkContext was created at:
|
|${creationSite.longForm}
|
|The currently active SparkContext was created at:
|
|$activeCreationSite
""".stripMargin)
}
}
/**
* Create a SparkContext that loads settings from system properties (for instance, when
* launching with ./bin/spark-submit).
*/
def this() = this(new SparkConf())
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI
* @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters
*/
def this(master: String, appName: String, conf: SparkConf) =
this(SparkContext.updatedConf(conf, master, appName))
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
* @param sparkHome Location where Spark is installed on cluster nodes.
* @param jars Collection of JARs to send to the cluster. These can be paths on the local file
* system or HDFS, HTTP, HTTPS, or FTP URLs.
* @param environment Environment variables to set on worker nodes.
*/
def this(
master: String,
appName: String,
sparkHome: String = null,
jars: Seq[String] = Nil,
environment: Map[String, String] = Map()) = {
this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment))
}
// The following constructors are required when Java code accesses SparkContext directly.
// Please see SI-4278
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
*/
private[spark] def this(master: String, appName: String) =
this(master, appName, null, Nil, Map())
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
* @param sparkHome Location where Spark is installed on cluster nodes.
*/
private[spark] def this(master: String, appName: String, sparkHome: String) =
this(master, appName, sparkHome, Nil, Map())
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
* @param sparkHome Location where Spark is installed on cluster nodes.
* @param jars Collection of JARs to send to the cluster. These can be paths on the local file
* system or HDFS, HTTP, HTTPS, or FTP URLs.
*/
private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) =
this(master, appName, sparkHome, jars, Map())
// log out Spark Version in Spark driver log
logInfo(s"Running Spark version $SPARK_VERSION")
/* ------------------------------------------------------------------------------------- *
| Private variables. These variables keep the internal state of the context, and are |
| not accessible by the outside world. They're mutable since we want to initialize all |
| of them to some neutral value ahead of time, so that calling "stop()" while the |
| constructor is still running is safe. |
* ------------------------------------------------------------------------------------- */
private var _conf: SparkConf = _
private var _eventLogDir: Option[URI] = None
private var _eventLogCodec: Option[String] = None
private var _listenerBus: LiveListenerBus = _
private var _env: SparkEnv = _
private var _statusTracker: SparkStatusTracker = _
private var _progressBar: Option[ConsoleProgressBar] = None
private var _ui: Option[SparkUI] = None
private var _hadoopConfiguration: Configuration = _
private var _executorMemory: Int = _
private var _schedulerBackend: SchedulerBackend = _
private var _taskScheduler: TaskScheduler = _
private var _heartbeatReceiver: RpcEndpointRef = _
@volatile private var _dagScheduler: DAGScheduler = _
private var _applicationId: String = _
private var _applicationAttemptId: Option[String] = None
private var _eventLogger: Option[EventLoggingListener] = None
private var _driverLogger: Option[DriverLogger] = None
private var _executorAllocationManager: Option[ExecutorAllocationManager] = None
private var _cleaner: Option[ContextCleaner] = None
private var _listenerBusStarted: Boolean = false
private var _jars: Seq[String] = _
private var _files: Seq[String] = _
private var _shutdownHookRef: AnyRef = _
private var _statusStore: AppStatusStore = _
private var _heartbeater: Heartbeater = _
private var _resources: immutable.Map[String, ResourceInformation] = _
private var _shuffleDriverComponents: ShuffleDriverComponents = _
private var _plugins: Option[PluginContainer] = None
private var _resourceProfileManager: ResourceProfileManager = _
/* ------------------------------------------------------------------------------------- *
| Accessors and public fields. These provide access to the internal state of the |
| context. |
* ------------------------------------------------------------------------------------- */
private[spark] def conf: SparkConf = _conf
/**
* Return a copy of this SparkContext's configuration. The configuration ''cannot'' be
* changed at runtime.
*/
def getConf: SparkConf = conf.clone()
def resources: Map[String, ResourceInformation] = _resources
def jars: Seq[String] = _jars
def files: Seq[String] = _files
def master: String = _conf.get("spark.master")
def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE)
def appName: String = _conf.get("spark.app.name")
private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED)
private[spark] def eventLogDir: Option[URI] = _eventLogDir
private[spark] def eventLogCodec: Option[String] = _eventLogCodec
def isLocal: Boolean = Utils.isLocalMaster(_conf)
/**
* @return true if context is stopped or in the midst of stopping.
*/
def isStopped: Boolean = stopped.get()
private[spark] def statusStore: AppStatusStore = _statusStore
// An asynchronous listener bus for Spark events
private[spark] def listenerBus: LiveListenerBus = _listenerBus
// This function allows components created by SparkEnv to be mocked in unit tests:
private[spark] def createSparkEnv(
conf: SparkConf,
isLocal: Boolean,
listenerBus: LiveListenerBus): SparkEnv = {
SparkEnv.createDriverEnv(conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf))
}
private[spark] def env: SparkEnv = _env
// Used to store a URL for each static file/jar together with the file's local timestamp
private[spark] val addedFiles = new ConcurrentHashMap[String, Long]().asScala
private[spark] val addedJars = new ConcurrentHashMap[String, Long]().asScala
// Keeps track of all persisted RDDs
private[spark] val persistentRdds = {
val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]()
map.asScala
}
def statusTracker: SparkStatusTracker = _statusTracker
private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar
private[spark] def ui: Option[SparkUI] = _ui
def uiWebUrl: Option[String] = _ui.map(_.webUrl)
/**
* A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse.
*
* @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you
* plan to set some global configurations for all Hadoop RDDs.
*/
def hadoopConfiguration: Configuration = _hadoopConfiguration
private[spark] def executorMemory: Int = _executorMemory
// Environment variables to pass to our executors.
private[spark] val executorEnvs = HashMap[String, String]()
// Set SPARK_USER for user who is running SparkContext.
val sparkUser = Utils.getCurrentUserName()
private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend
private[spark] def taskScheduler: TaskScheduler = _taskScheduler
private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = {
_taskScheduler = ts
}
private[spark] def dagScheduler: DAGScheduler = _dagScheduler
private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = {
_dagScheduler = ds
}
private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents
/**
* A unique identifier for the Spark application.
* Its format depends on the scheduler implementation.
* (i.e.
* in case of local spark app something like 'local-1433865536131'
* in case of YARN something like 'application_1433865536131_34483'
* in case of MESOS something like 'driver-20170926223339-0001'
* )
*/
def applicationId: String = _applicationId
def applicationAttemptId: Option[String] = _applicationAttemptId
private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger
private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] =
_executorAllocationManager
private[spark] def resourceProfileManager: ResourceProfileManager = _resourceProfileManager
private[spark] def cleaner: Option[ContextCleaner] = _cleaner
private[spark] var checkpointDir: Option[String] = None
// Thread Local variable that can be used by users to pass information down the stack
protected[spark] val localProperties = new InheritableThreadLocal[Properties] {
override protected def childValue(parent: Properties): Properties = {
// Note: make a clone such that changes in the parent properties aren't reflected in
// the those of the children threads, which has confusing semantics (SPARK-10563).
Utils.cloneProperties(parent)
}
override protected def initialValue(): Properties = new Properties()
}
/* ------------------------------------------------------------------------------------- *
| Initialization. This code initializes the context in a manner that is exception-safe. |
| All internal fields holding state are initialized here, and any error prompts the |
| stop() method to be called. |
* ------------------------------------------------------------------------------------- */
private def warnSparkMem(value: String): String = {
logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " +
"deprecated, please use spark.executor.memory instead.")
value
}
/** Control our logLevel. This overrides any user-defined log settings.
* @param logLevel The desired log level as a string.
* Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
*/
def setLogLevel(logLevel: String): Unit = {
// let's allow lowercase or mixed case too
val upperCased = logLevel.toUpperCase(Locale.ROOT)
require(SparkContext.VALID_LOG_LEVELS.contains(upperCased),
s"Supplied level $logLevel did not match one of:" +
s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}")
Utils.setLogLevel(org.apache.log4j.Level.toLevel(upperCased))
}
try {
_conf = config.clone()
_conf.validateSettings()
if (!_conf.contains("spark.master")) {
throw new SparkException("A master URL must be set in your configuration")
}
if (!_conf.contains("spark.app.name")) {
throw new SparkException("An application name must be set in your configuration")
}
_driverLogger = DriverLogger(_conf)
val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE)
_resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt)
logResourceInfo(SPARK_DRIVER_PREFIX, _resources)
// log out spark.app.name in the Spark driver logs
logInfo(s"Submitted application: $appName")
// System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster
if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) {
throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " +
"Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.")
}
if (_conf.getBoolean("spark.logConf", false)) {
logInfo("Spark configuration:\n" + _conf.toDebugString)
}
// Set Spark driver host and port system properties. This explicitly sets the configuration
// instead of relying on the default value of the config constant.
_conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS))
_conf.setIfMissing(DRIVER_PORT, 0)
_conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER)
_jars = Utils.getUserJars(_conf)
_files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty))
.toSeq.flatten
_eventLogDir =
if (isEventLogEnabled) {
val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/")
Some(Utils.resolveURI(unresolvedDir))
} else {
None
}
_eventLogCodec = {
val compress = _conf.get(EVENT_LOG_COMPRESS)
if (compress && isEventLogEnabled) {
Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName)
} else {
None
}
}
_listenerBus = new LiveListenerBus(_conf)
_resourceProfileManager = new ResourceProfileManager(_conf)
// Initialize the app status store and listener before SparkEnv is created so that it gets
// all events.
val appStatusSource = AppStatusSource.createSource(conf)
_statusStore = AppStatusStore.createLiveStore(conf, appStatusSource)
listenerBus.addToStatusQueue(_statusStore.listener.get)
// Create the Spark execution environment (cache, map output tracker, etc)
_env = createSparkEnv(_conf, isLocal, listenerBus)
SparkEnv.set(_env)
// If running the REPL, register the repl's output dir with the file server.
_conf.getOption("spark.repl.class.outputDir").foreach { path =>
val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path))
_conf.set("spark.repl.class.uri", replUri)
}
_statusTracker = new SparkStatusTracker(this, _statusStore)
_progressBar =
if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) {
Some(new ConsoleProgressBar(this))
} else {
None
}
_ui =
if (conf.get(UI_ENABLED)) {
Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "",
startTime))
} else {
// For tests, do not enable the UI
None
}
// Bind the UI before starting the task scheduler to communicate
// the bound port to the cluster manager properly
_ui.foreach(_.bind())
_hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf)
// Performance optimization: this dummy call to .size() triggers eager evaluation of
// Configuration's internal `properties` field, guaranteeing that it will be computed and
// cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create
// a new per-session Configuration. If `properties` has not been computed by that time
// then each newly-created Configuration will perform its own expensive IO and XML
// parsing to load configuration defaults and populate its own properties. By ensuring
// that we've pre-computed the parent's properties, the child Configuration will simply
// clone the parent's properties.
_hadoopConfiguration.size()
// Add each JAR given through the constructor
if (jars != null) {
jars.foreach(addJar)
}
if (files != null) {
files.foreach(addFile)
}
_executorMemory = _conf.getOption(EXECUTOR_MEMORY.key)
.orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY")))
.orElse(Option(System.getenv("SPARK_MEM"))
.map(warnSparkMem))
.map(Utils.memoryStringToMb)
.getOrElse(1024)
// Convert java options to env vars as a work around
// since we can't set env vars directly in sbt.
for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key))
value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} {
executorEnvs(envKey) = value
}
Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v =>
executorEnvs("SPARK_PREPEND_CLASSES") = v
}
// The Mesos scheduler backend relies on this environment variable to set executor memory.
// TODO: Set this only in the Mesos scheduler.
executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m"
executorEnvs ++= _conf.getExecutorEnv
executorEnvs("SPARK_USER") = sparkUser
_shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(config).driver()
_shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) =>
_conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v)
}
// We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will
// retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640)
_heartbeatReceiver = env.rpcEnv.setupEndpoint(
HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this))
// Initialize any plugins before the task scheduler is initialized.
_plugins = PluginContainer(this, _resources.asJava)
// Create and start the scheduler
val (sched, ts) = SparkContext.createTaskScheduler(this, master, deployMode)
_schedulerBackend = sched
_taskScheduler = ts
_dagScheduler = new DAGScheduler(this)
_heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet)
val _executorMetricsSource =
if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) {
Some(new ExecutorMetricsSource)
} else {
None
}
// create and start the heartbeater for collecting memory metrics
_heartbeater = new Heartbeater(
() => SparkContext.this.reportHeartBeat(_executorMetricsSource),
"driver-heartbeater",
conf.get(EXECUTOR_HEARTBEAT_INTERVAL))
_heartbeater.start()
// start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's
// constructor
_taskScheduler.start()
_applicationId = _taskScheduler.applicationId()
_applicationAttemptId = _taskScheduler.applicationAttemptId()
_conf.set("spark.app.id", _applicationId)
if (_conf.get(UI_REVERSE_PROXY)) {
System.setProperty("spark.ui.proxyBase", "/proxy/" + _applicationId)
}
_ui.foreach(_.setAppId(_applicationId))
_env.blockManager.initialize(_applicationId)
// The metrics system for Driver need to be set spark.app.id to app ID.
// So it should start after we get app ID from the task scheduler and set spark.app.id.
_env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED))
// Attach the driver metrics servlet handler to the web ui after the metrics system is started.
_env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler)))
_eventLogger =
if (isEventLogEnabled) {
val logger =
new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get,
_conf, _hadoopConfiguration)
logger.start()
listenerBus.addToEventLogQueue(logger)
Some(logger)
} else {
None
}
_cleaner =
if (_conf.get(CLEANER_REFERENCE_TRACKING)) {
Some(new ContextCleaner(this, _shuffleDriverComponents))
} else {
None
}
_cleaner.foreach(_.start())
val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf)
_executorAllocationManager =
if (dynamicAllocationEnabled) {
schedulerBackend match {
case b: ExecutorAllocationClient =>
Some(new ExecutorAllocationManager(
schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf,
cleaner = cleaner, resourceProfileManager = resourceProfileManager))
case _ =>
None
}
} else {
None
}
_executorAllocationManager.foreach(_.start())
setupAndStartListenerBus()
postEnvironmentUpdate()
postApplicationStart()
// Post init
_taskScheduler.postStartHook()
_env.metricsSystem.registerSource(_dagScheduler.metricsSource)
_env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager))
_env.metricsSystem.registerSource(new JVMCPUSource())
_executorMetricsSource.foreach(_.register(_env.metricsSystem))
_executorAllocationManager.foreach { e =>
_env.metricsSystem.registerSource(e.executorAllocationManagerSource)
}
appStatusSource.foreach(_env.metricsSystem.registerSource(_))
_plugins.foreach(_.registerMetrics(applicationId))
// Make sure the context is stopped if the user forgets about it. This avoids leaving
// unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM
// is killed, though.
logDebug("Adding shutdown hook") // force eager creation of logger
_shutdownHookRef = ShutdownHookManager.addShutdownHook(
ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () =>
logInfo("Invoking stop() from shutdown hook")
try {
stop()
} catch {
case e: Throwable =>
logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e)
}
}
} catch {
case NonFatal(e) =>
logError("Error initializing SparkContext.", e)
try {
stop()
} catch {
case NonFatal(inner) =>
logError("Error stopping SparkContext after init error.", inner)
} finally {
throw e
}
}
/**
* Called by the web UI to obtain executor thread dumps. This method may be expensive.
* Logs an error and returns None if we failed to obtain a thread dump, which could occur due
* to an executor being dead or unresponsive or due to network issues while sending the thread
* dump message back to the driver.
*/
private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = {
try {
if (executorId == SparkContext.DRIVER_IDENTIFIER) {
Some(Utils.getThreadDump())
} else {
val endpointRef = env.blockManager.master.getExecutorEndpointRef(executorId).get
Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump))
}
} catch {
case e: Exception =>
logError(s"Exception getting thread dump from executor $executorId", e)
None
}
}
private[spark] def getLocalProperties: Properties = localProperties.get()
private[spark] def setLocalProperties(props: Properties): Unit = {
localProperties.set(props)
}
/**
* Set a local property that affects jobs submitted from this thread, such as the Spark fair
* scheduler pool. User-defined properties may also be set here. These properties are propagated
* through to worker tasks and can be accessed there via
* [[org.apache.spark.TaskContext#getLocalProperty]].
*
* These properties are inherited by child threads spawned from this thread. This
* may have unexpected consequences when working with thread pools. The standard java
* implementation of thread pools have worker threads spawn other worker threads.
* As a result, local properties may propagate unpredictably.
*/
def setLocalProperty(key: String, value: String): Unit = {
if (value == null) {
localProperties.get.remove(key)
} else {
localProperties.get.setProperty(key, value)
}
}
/**
* Get a local property set in this thread, or null if it is missing. See
* `org.apache.spark.SparkContext.setLocalProperty`.
*/
def getLocalProperty(key: String): String =
Option(localProperties.get).map(_.getProperty(key)).orNull
/** Set a human readable description of the current job. */
def setJobDescription(value: String): Unit = {
setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value)
}
/**
* Assigns a group ID to all the jobs started by this thread until the group ID is set to a
* different value or cleared.
*
* Often, a unit of execution in an application consists of multiple Spark actions or jobs.
* Application programmers can use this method to group all those jobs together and give a
* group description. Once set, the Spark web UI will associate such jobs with this group.
*
* The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all
* running jobs in this group. For example,
* {{{
* // In the main thread:
* sc.setJobGroup("some_job_to_cancel", "some job description")
* sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count()
*
* // In a separate thread:
* sc.cancelJobGroup("some_job_to_cancel")
* }}}
*
* @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()`
* being called on the job's executor threads. This is useful to help ensure that the tasks
* are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS
* may respond to Thread.interrupt() by marking nodes as dead.
*/
def setJobGroup(groupId: String,
description: String, interruptOnCancel: Boolean = false): Unit = {
setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description)
setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId)
// Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids
// changing several public APIs and allows Spark cancellations outside of the cancelJobGroup
// APIs to also take advantage of this property (e.g., internal job failures or canceling from
// JobProgressTab UI) on a per-job basis.
setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString)
}
/** Clear the current thread's job group ID and its description. */
def clearJobGroup(): Unit = {
setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null)
setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null)
setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null)
}
/**
* Execute a block of code in a scope such that all new RDDs created in this body will
* be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}.
*
* @note Return statements are NOT allowed in the given body.
*/
private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body)
// Methods for creating RDDs
/** Distribute a local Scala collection to form an RDD.
*
* @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call
* to parallelize and before the first action on the RDD, the resultant RDD will reflect the
* modified collection. Pass a copy of the argument to avoid this.
* @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an
* RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions.
* @param seq Scala collection to distribute
* @param numSlices number of partitions to divide the collection into
* @return RDD representing distributed collection
*/
def parallelize[T: ClassTag](
seq: Seq[T],
numSlices: Int = defaultParallelism): RDD[T] = withScope {
assertNotStopped()
new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]())
}
/**
* Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by
* `step` every element.
*
* @note if we need to cache this RDD, we should make sure each partition does not exceed limit.
*
* @param start the start value.
* @param end the end value.
* @param step the incremental step
* @param numSlices number of partitions to divide the collection into
* @return RDD representing distributed range
*/
def range(
start: Long,
end: Long,
step: Long = 1,
numSlices: Int = defaultParallelism): RDD[Long] = withScope {
assertNotStopped()
// when step is 0, range will run infinitely
require(step != 0, "step cannot be 0")
val numElements: BigInt = {
val safeStart = BigInt(start)
val safeEnd = BigInt(end)
if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) {
(safeEnd - safeStart) / step
} else {
// the remainder has the same sign with range, could add 1 more
(safeEnd - safeStart) / step + 1
}
}
parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) =>
val partitionStart = (i * numElements) / numSlices * step + start
val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start
def getSafeMargin(bi: BigInt): Long =
if (bi.isValidLong) {
bi.toLong
} else if (bi > 0) {
Long.MaxValue
} else {
Long.MinValue
}
val safePartitionStart = getSafeMargin(partitionStart)
val safePartitionEnd = getSafeMargin(partitionEnd)
new Iterator[Long] {
private[this] var number: Long = safePartitionStart
private[this] var overflow: Boolean = false
override def hasNext =
if (!overflow) {
if (step > 0) {
number < safePartitionEnd
} else {
number > safePartitionEnd
}
} else false
override def next() = {
val ret = number
number += step
if (number < ret ^ step < 0) {
// we have Long.MaxValue + Long.MaxValue < Long.MaxValue
// and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step
// back, we are pretty sure that we have an overflow.
overflow = true
}
ret
}
}
}
}
/** Distribute a local Scala collection to form an RDD.
*
* This method is identical to `parallelize`.
* @param seq Scala collection to distribute
* @param numSlices number of partitions to divide the collection into
* @return RDD representing distributed collection
*/
def makeRDD[T: ClassTag](
seq: Seq[T],
numSlices: Int = defaultParallelism): RDD[T] = withScope {
parallelize(seq, numSlices)
}
/**
* Distribute a local Scala collection to form an RDD, with one or more
* location preferences (hostnames of Spark nodes) for each object.
* Create a new partition for each collection item.
* @param seq list of tuples of data and location preferences (hostnames of Spark nodes)
* @return RDD representing data partitioned according to location preferences
*/
def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope {
assertNotStopped()
val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap
new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs)
}
/**
* Read a text file from HDFS, a local file system (available on all nodes), or any
* Hadoop-supported file system URI, and return it as an RDD of Strings.
* The text files must be encoded as UTF-8.
*
* @param path path to the text file on a supported file system
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of lines of the text file
*/
def textFile(
path: String,
minPartitions: Int = defaultMinPartitions): RDD[String] = withScope {
assertNotStopped()
hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text],
minPartitions).map(pair => pair._2.toString).setName(path)
}
/**
* Read a directory of text files from HDFS, a local file system (available on all nodes), or any
* Hadoop-supported file system URI. Each file is read as a single record and returned in a
* key-value pair, where the key is the path of each file, the value is the content of each file.
* The text files must be encoded as UTF-8.
*
* <p> For example, if you have the following files:
* {{{
* hdfs://a-hdfs-path/part-00000
* hdfs://a-hdfs-path/part-00001
* ...
* hdfs://a-hdfs-path/part-nnnnn
* }}}
*
* Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`,
*
* <p> then `rdd` contains
* {{{
* (a-hdfs-path/part-00000, its content)
* (a-hdfs-path/part-00001, its content)
* ...
* (a-hdfs-path/part-nnnnn, its content)
* }}}
*
* @note Small files are preferred, large file is also allowable, but may cause bad performance.
* @note On some filesystems, `.../path/*` can be a more efficient way to read all files
* in a directory rather than `.../path/` or `.../path`
* @note Partitioning is determined by data locality. This may result in too few partitions
* by default.
*
* @param path Directory to the input data files, the path can be comma separated paths as the
* list of inputs.
* @param minPartitions A suggestion value of the minimal splitting number for input data.
* @return RDD representing tuples of file path and the corresponding file content
*/
def wholeTextFiles(
path: String,
minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope {
assertNotStopped()
val job = NewHadoopJob.getInstance(hadoopConfiguration)
// Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking
// comma separated files as input. (see SPARK-7155)
NewFileInputFormat.setInputPaths(job, path)
val updateConf = job.getConfiguration
new WholeTextFileRDD(
this,
classOf[WholeTextFileInputFormat],
classOf[Text],
classOf[Text],
updateConf,
minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path)
}
/**
* Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file
* (useful for binary data)
*
* For example, if you have the following files:
* {{{
* hdfs://a-hdfs-path/part-00000
* hdfs://a-hdfs-path/part-00001
* ...
* hdfs://a-hdfs-path/part-nnnnn
* }}}
*
* Do
* `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`,
*
* then `rdd` contains
* {{{
* (a-hdfs-path/part-00000, its content)
* (a-hdfs-path/part-00001, its content)
* ...
* (a-hdfs-path/part-nnnnn, its content)
* }}}
*
* @note Small files are preferred; very large files may cause bad performance.
* @note On some filesystems, `.../path/*` can be a more efficient way to read all files
* in a directory rather than `.../path/` or `.../path`
* @note Partitioning is determined by data locality. This may result in too few partitions
* by default.
*
* @param path Directory to the input data files, the path can be comma separated paths as the
* list of inputs.
* @param minPartitions A suggestion value of the minimal splitting number for input data.
* @return RDD representing tuples of file path and corresponding file content
*/
def binaryFiles(
path: String,
minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope {
assertNotStopped()
val job = NewHadoopJob.getInstance(hadoopConfiguration)
// Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking
// comma separated files as input. (see SPARK-7155)
NewFileInputFormat.setInputPaths(job, path)
val updateConf = job.getConfiguration
new BinaryFileRDD(
this,
classOf[StreamInputFormat],
classOf[String],
classOf[PortableDataStream],
updateConf,
minPartitions).setName(path)
}
/**
* Load data from a flat binary file, assuming the length of each record is constant.
*
* @note We ensure that the byte array for each record in the resulting RDD
* has the provided record length.
*
* @param path Directory to the input data files, the path can be comma separated paths as the
* list of inputs.
* @param recordLength The length at which to split the records
* @param conf Configuration for setting up the dataset.
*
* @return An RDD of data with values, represented as byte arrays
*/
def binaryRecords(
path: String,
recordLength: Int,
conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope {
assertNotStopped()
conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength)
val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path,
classOf[FixedLengthBinaryInputFormat],
classOf[LongWritable],
classOf[BytesWritable],
conf = conf)
br.map { case (k, v) =>
val bytes = v.copyBytes()
assert(bytes.length == recordLength, "Byte array does not have correct length")
bytes
}
}
/**
* Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other
* necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable),
* using the older MapReduce API (`org.apache.hadoop.mapred`).
*
* @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast.
* Therefore if you plan to reuse this conf to create multiple RDDs, you need to make
* sure you won't modify the conf. A safe approach is always creating a new conf for
* a new RDD.
* @param inputFormatClass storage format of the data to be read
* @param keyClass `Class` of the key associated with the `inputFormatClass` parameter
* @param valueClass `Class` of the value associated with the `inputFormatClass` parameter
* @param minPartitions Minimum number of Hadoop Splits to generate.
* @return RDD of tuples of key and corresponding value
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
*/
def hadoopRDD[K, V](
conf: JobConf,
inputFormatClass: Class[_ <: InputFormat[K, V]],
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope {
assertNotStopped()
// This is a hack to enforce loading hdfs-site.xml.
// See SPARK-11227 for details.
FileSystem.getLocal(conf)
// Add necessary security credentials to the JobConf before broadcasting it.
SparkHadoopUtil.get.addCredentials(conf)
new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions)
}
/** Get an RDD for a Hadoop file with an arbitrary InputFormat
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param inputFormatClass storage format of the data to be read
* @param keyClass `Class` of the key associated with the `inputFormatClass` parameter
* @param valueClass `Class` of the value associated with the `inputFormatClass` parameter
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of tuples of key and corresponding value
*/
def hadoopFile[K, V](
path: String,
inputFormatClass: Class[_ <: InputFormat[K, V]],
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope {
assertNotStopped()
// This is a hack to enforce loading hdfs-site.xml.
// See SPARK-11227 for details.
FileSystem.getLocal(hadoopConfiguration)
// A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it.
val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration))
val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path)
new HadoopRDD(
this,
confBroadcast,
Some(setInputPathsFunc),
inputFormatClass,
keyClass,
valueClass,
minPartitions).setName(path)
}
/**
* Smarter version of hadoopFile() that uses class tags to figure out the classes of keys,
* values and the InputFormat so that users don't need to pass them directly. Instead, callers
* can just write, for example,
* {{{
* val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions)
* }}}
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of tuples of key and corresponding value
*/
def hadoopFile[K, V, F <: InputFormat[K, V]]
(path: String, minPartitions: Int)
(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope {
hadoopFile(path,
fm.runtimeClass.asInstanceOf[Class[F]],
km.runtimeClass.asInstanceOf[Class[K]],
vm.runtimeClass.asInstanceOf[Class[V]],
minPartitions)
}
/**
* Smarter version of hadoopFile() that uses class tags to figure out the classes of keys,
* values and the InputFormat so that users don't need to pass them directly. Instead, callers
* can just write, for example,
* {{{
* val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path)
* }}}
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths as
* a list of inputs
* @return RDD of tuples of key and corresponding value
*/
def hadoopFile[K, V, F <: InputFormat[K, V]](path: String)
(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope {
hadoopFile[K, V, F](path, defaultMinPartitions)
}
/**
* Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys,
* values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user
* don't need to pass them directly. Instead, callers can just write, for example:
* ```
* val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path)
* ```
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @return RDD of tuples of key and corresponding value
*/
def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]
(path: String)
(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope {
newAPIHadoopFile(
path,
fm.runtimeClass.asInstanceOf[Class[F]],
km.runtimeClass.asInstanceOf[Class[K]],
vm.runtimeClass.asInstanceOf[Class[V]])
}
/**
* Get an RDD for a given Hadoop file with an arbitrary new API InputFormat
* and extra configuration options to pass to the input format.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param fClass storage format of the data to be read
* @param kClass `Class` of the key associated with the `fClass` parameter
* @param vClass `Class` of the value associated with the `fClass` parameter
* @param conf Hadoop configuration
* @return RDD of tuples of key and corresponding value
*/
def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]](
path: String,
fClass: Class[F],
kClass: Class[K],
vClass: Class[V],
conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope {
assertNotStopped()
// This is a hack to enforce loading hdfs-site.xml.
// See SPARK-11227 for details.
FileSystem.getLocal(hadoopConfiguration)
// The call to NewHadoopJob automatically adds security credentials to conf,
// so we don't need to explicitly add them ourselves
val job = NewHadoopJob.getInstance(conf)
// Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking
// comma separated files as input. (see SPARK-7155)
NewFileInputFormat.setInputPaths(job, path)
val updatedConf = job.getConfiguration
new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path)
}
/**
* Get an RDD for a given Hadoop file with an arbitrary new API InputFormat
* and extra configuration options to pass to the input format.
*
* @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast.
* Therefore if you plan to reuse this conf to create multiple RDDs, you need to make
* sure you won't modify the conf. A safe approach is always creating a new conf for
* a new RDD.
* @param fClass storage format of the data to be read
* @param kClass `Class` of the key associated with the `fClass` parameter
* @param vClass `Class` of the value associated with the `fClass` parameter
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
*/
def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]](
conf: Configuration = hadoopConfiguration,
fClass: Class[F],
kClass: Class[K],
vClass: Class[V]): RDD[(K, V)] = withScope {
assertNotStopped()
// This is a hack to enforce loading hdfs-site.xml.
// See SPARK-11227 for details.
FileSystem.getLocal(conf)
// Add necessary security credentials to the JobConf. Required to access secure HDFS.
val jconf = new JobConf(conf)
SparkHadoopUtil.get.addCredentials(jconf)
new NewHadoopRDD(this, fClass, kClass, vClass, jconf)
}
/**
* Get an RDD for a Hadoop SequenceFile with given key and value types.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param keyClass `Class` of the key associated with `SequenceFileInputFormat`
* @param valueClass `Class` of the value associated with `SequenceFileInputFormat`
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of tuples of key and corresponding value
*/
def sequenceFile[K, V](path: String,
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int
): RDD[(K, V)] = withScope {
assertNotStopped()
val inputFormatClass = classOf[SequenceFileInputFormat[K, V]]
hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions)
}
/**
* Get an RDD for a Hadoop SequenceFile with given key and value types.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param keyClass `Class` of the key associated with `SequenceFileInputFormat`
* @param valueClass `Class` of the value associated with `SequenceFileInputFormat`
* @return RDD of tuples of key and corresponding value
*/
def sequenceFile[K, V](
path: String,
keyClass: Class[K],
valueClass: Class[V]): RDD[(K, V)] = withScope {
assertNotStopped()
sequenceFile(path, keyClass, valueClass, defaultMinPartitions)
}
/**
* Version of sequenceFile() for types implicitly convertible to Writables through a
* WritableConverter. For example, to access a SequenceFile where the keys are Text and the
* values are IntWritable, you could simply write
* {{{
* sparkContext.sequenceFile[String, Int](path, ...)
* }}}
*
* WritableConverters are provided in a somewhat strange way (by an implicit function) to support
* both subclasses of Writable and types for which we define a converter (e.g. Int to
* IntWritable). The most natural thing would've been to have implicit objects for the
* converters, but then we couldn't have an object for every subclass of Writable (you can't
* have a parameterized singleton object). We use functions instead to create a new converter
* for the appropriate type. In addition, we pass the converter a ClassTag of its type to
* allow it to figure out the Writable class to use in the subclass case.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of tuples of key and corresponding value
*/
def sequenceFile[K, V]
(path: String, minPartitions: Int = defaultMinPartitions)
(implicit km: ClassTag[K], vm: ClassTag[V],
kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = {
withScope {
assertNotStopped()
val kc = clean(kcf)()
val vc = clean(vcf)()
val format = classOf[SequenceFileInputFormat[Writable, Writable]]
val writables = hadoopFile(path, format,
kc.writableClass(km).asInstanceOf[Class[Writable]],
vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions)
writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) }
}
}
/**
* Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and
* BytesWritable values that contain a serialized partition. This is still an experimental
* storage format and may not be supported exactly as is in future Spark releases. It will also
* be pretty slow if you use the default serializer (Java serialization),
* though the nice thing about it is that there's very little effort required to save arbitrary
* objects.
*
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD representing deserialized data from the file(s)
*/
def objectFile[T: ClassTag](
path: String,
minPartitions: Int = defaultMinPartitions): RDD[T] = withScope {
assertNotStopped()
sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions)
.flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader))
}
protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope {
new ReliableCheckpointRDD[T](this, path)
}
/** Build the union of a list of RDDs. */
def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope {
val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty)
val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet
if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) {
new PartitionerAwareUnionRDD(this, nonEmptyRdds)
} else {
new UnionRDD(this, nonEmptyRdds)
}
}
/** Build the union of a list of RDDs passed as variable-length arguments. */
def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope {
union(Seq(first) ++ rest)
}
/** Get an RDD that has no partitions or elements. */
def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this)
// Methods for creating shared variables
/**
* Register the given accumulator.
*
* @note Accumulators must be registered before use, or it will throw exception.
*/
def register(acc: AccumulatorV2[_, _]): Unit = {
acc.register(this)
}
/**
* Register the given accumulator with given name.
*
* @note Accumulators must be registered before use, or it will throw exception.
*/
def register(acc: AccumulatorV2[_, _], name: String): Unit = {
acc.register(this, name = Option(name))
}
/**
* Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`.
*/
def longAccumulator: LongAccumulator = {
val acc = new LongAccumulator
register(acc)
acc
}
/**
* Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`.
*/
def longAccumulator(name: String): LongAccumulator = {
val acc = new LongAccumulator
register(acc, name)
acc
}
/**
* Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`.
*/
def doubleAccumulator: DoubleAccumulator = {
val acc = new DoubleAccumulator
register(acc)
acc
}
/**
* Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`.
*/
def doubleAccumulator(name: String): DoubleAccumulator = {
val acc = new DoubleAccumulator
register(acc, name)
acc
}
/**
* Create and register a `CollectionAccumulator`, which starts with empty list and accumulates
* inputs by adding them into the list.
*/
def collectionAccumulator[T]: CollectionAccumulator[T] = {
val acc = new CollectionAccumulator[T]
register(acc)
acc
}
/**
* Create and register a `CollectionAccumulator`, which starts with empty list and accumulates
* inputs by adding them into the list.
*/
def collectionAccumulator[T](name: String): CollectionAccumulator[T] = {
val acc = new CollectionAccumulator[T]
register(acc, name)
acc
}
/**
* Broadcast a read-only variable to the cluster, returning a
* [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions.
* The variable will be sent to each cluster only once.
*
* @param value value to broadcast to the Spark nodes
* @return `Broadcast` object, a read-only variable cached on each machine
*/
def broadcast[T: ClassTag](value: T): Broadcast[T] = {
assertNotStopped()
require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass),
"Can not directly broadcast RDDs; instead, call collect() and broadcast the result.")
val bc = env.broadcastManager.newBroadcast[T](value, isLocal)
val callSite = getCallSite
logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm)
cleaner.foreach(_.registerBroadcastForCleanup(bc))
bc
}
/**
* Add a file to be downloaded with this Spark job on every node.
*
* If a file is added during execution, it will not be available until the next TaskSet starts.
*
* @param path can be either a local file, a file in HDFS (or other Hadoop-supported
* filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs,
* use `SparkFiles.get(fileName)` to find its download location.
*
* @note A path can be added only once. Subsequent additions of the same path are ignored.
*/
def addFile(path: String): Unit = {
addFile(path, false)
}
/**
* Returns a list of file paths that are added to resources.
*/
def listFiles(): Seq[String] = addedFiles.keySet.toSeq
/**
* Add a file to be downloaded with this Spark job on every node.
*
* If a file is added during execution, it will not be available until the next TaskSet starts.
*
* @param path can be either a local file, a file in HDFS (or other Hadoop-supported
* filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs,
* use `SparkFiles.get(fileName)` to find its download location.
* @param recursive if true, a directory can be given in `path`. Currently directories are
* only supported for Hadoop-supported filesystems.
*
* @note A path can be added only once. Subsequent additions of the same path are ignored.
*/
def addFile(path: String, recursive: Boolean): Unit = {
val uri = new Path(path).toUri
val schemeCorrectedURI = uri.getScheme match {
case null => new File(path).getCanonicalFile.toURI
case "local" =>
logWarning("File with 'local' scheme is not supported to add to file server, since " +
"it is already available on every node.")
return
case _ => uri
}
val hadoopPath = new Path(schemeCorrectedURI)
val scheme = schemeCorrectedURI.getScheme
if (!Array("http", "https", "ftp").contains(scheme)) {
val fs = hadoopPath.getFileSystem(hadoopConfiguration)
val isDir = fs.getFileStatus(hadoopPath).isDirectory
if (!isLocal && scheme == "file" && isDir) {
throw new SparkException(s"addFile does not support local directories when not running " +
"local mode.")
}
if (!recursive && isDir) {
throw new SparkException(s"Added file $hadoopPath is a directory and recursive is not " +
"turned on.")
}
} else {
// SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies
Utils.validateURL(uri)
}
val key = if (!isLocal && scheme == "file") {
env.rpcEnv.fileServer.addFile(new File(uri.getPath))
} else {
if (uri.getScheme == null) {
schemeCorrectedURI.toString
} else {
path
}
}
val timestamp = System.currentTimeMillis
if (addedFiles.putIfAbsent(key, timestamp).isEmpty) {
logInfo(s"Added file $path at $key with timestamp $timestamp")
// Fetch the file locally so that closures which are run on the driver can still use the
// SparkFiles API to access files.
Utils.fetchFile(uri.toString, new File(SparkFiles.getRootDirectory()), conf,
env.securityManager, hadoopConfiguration, timestamp, useCache = false)
postEnvironmentUpdate()
} else {
logWarning(s"The path $path has been added already. Overwriting of added paths " +
"is not supported in the current version.")
}
}
/**
* :: DeveloperApi ::
* Register a listener to receive up-calls from events that happen during execution.
*/
@DeveloperApi
def addSparkListener(listener: SparkListenerInterface): Unit = {
listenerBus.addToSharedQueue(listener)
}
/**
* :: DeveloperApi ::
* Deregister the listener from Spark's listener bus.
*/
@DeveloperApi
def removeSparkListener(listener: SparkListenerInterface): Unit = {
listenerBus.removeListener(listener)
}
private[spark] def getExecutorIds(): Seq[String] = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
b.getExecutorIds()
case _ =>
logWarning("Requesting executors is not supported by current scheduler.")
Nil
}
}
/**
* Get the max number of tasks that can be concurrent launched based on the ResourceProfile
* being used.
* Note that please don't cache the value returned by this method, because the number can change
* due to add/remove executors.
*
* @param rp ResourceProfile which to use to calculate max concurrent tasks.
* @return The max number of tasks that can be concurrent launched currently.
*/
private[spark] def maxNumConcurrentTasks(rp: ResourceProfile): Int = {
schedulerBackend.maxNumConcurrentTasks(rp)
}
/**
* Update the cluster manager on our scheduling needs. Three bits of information are included
* to help it make decisions. This applies to the default ResourceProfile.
* @param numExecutors The total number of executors we'd like to have. The cluster manager
* shouldn't kill any running executor to reach this number, but,
* if all existing executors were to die, this is the number of executors
* we'd want to be allocated.
* @param localityAwareTasks The number of tasks in all active stages that have a locality
* preferences. This includes running, pending, and completed tasks.
* @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages
* that would like to like to run on that host.
* This includes running, pending, and completed tasks.
* @return whether the request is acknowledged by the cluster manager.
*/
@DeveloperApi
def requestTotalExecutors(
numExecutors: Int,
localityAwareTasks: Int,
hostToLocalTaskCount: immutable.Map[String, Int]
): Boolean = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
// this is being applied to the default resource profile, would need to add api to support
// others
val defaultProfId = resourceProfileManager.defaultResourceProfile.id
b.requestTotalExecutors(immutable.Map(defaultProfId-> numExecutors),
immutable.Map(localityAwareTasks -> defaultProfId),
immutable.Map(defaultProfId -> hostToLocalTaskCount))
case _ =>
logWarning("Requesting executors is not supported by current scheduler.")
false
}
}
/**
* :: DeveloperApi ::
* Request an additional number of executors from the cluster manager.
* @return whether the request is received.
*/
@DeveloperApi
def requestExecutors(numAdditionalExecutors: Int): Boolean = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
b.requestExecutors(numAdditionalExecutors)
case _ =>
logWarning("Requesting executors is not supported by current scheduler.")
false
}
}
/**
* :: DeveloperApi ::
* Request that the cluster manager kill the specified executors.
*
* This is not supported when dynamic allocation is turned on.
*
* @note This is an indication to the cluster manager that the application wishes to adjust
* its resource usage downwards. If the application wishes to replace the executors it kills
* through this method with new ones, it should follow up explicitly with a call to
* {{SparkContext#requestExecutors}}.
*
* @return whether the request is received.
*/
@DeveloperApi
def killExecutors(executorIds: Seq[String]): Boolean = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
require(executorAllocationManager.isEmpty,
"killExecutors() unsupported with Dynamic Allocation turned on")
b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false,
force = true).nonEmpty
case _ =>
logWarning("Killing executors is not supported by current scheduler.")
false
}
}
/**
* :: DeveloperApi ::
* Request that the cluster manager kill the specified executor.
*
* @note This is an indication to the cluster manager that the application wishes to adjust
* its resource usage downwards. If the application wishes to replace the executor it kills
* through this method with a new one, it should follow up explicitly with a call to
* {{SparkContext#requestExecutors}}.
*
* @return whether the request is received.
*/
@DeveloperApi
def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId))
/**
* Request that the cluster manager kill the specified executor without adjusting the
* application resource requirements.
*
* The effect is that a new executor will be launched in place of the one killed by
* this request. This assumes the cluster manager will automatically and eventually
* fulfill all missing application resource requests.
*
* @note The replace is by no means guaranteed; another application on the same cluster
* can steal the window of opportunity and acquire this application's resources in the
* mean time.
*
* @return whether the request is received.
*/
private[spark] def killAndReplaceExecutor(executorId: String): Boolean = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true,
force = true).nonEmpty
case _ =>
logWarning("Killing executors is not supported by current scheduler.")
false
}
}
/** The version of Spark on which this application is running. */
def version: String = SPARK_VERSION
/**
* Return a map from the slave to the max memory available for caching and the remaining
* memory available for caching.
*/
def getExecutorMemoryStatus: Map[String, (Long, Long)] = {
assertNotStopped()
env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) =>
(blockManagerId.host + ":" + blockManagerId.port, mem)
}
}
/**
* :: DeveloperApi ::
* Return information about what RDDs are cached, if they are in mem or on disk, how much space
* they take, etc.
*/
@DeveloperApi
def getRDDStorageInfo: Array[RDDInfo] = {
getRDDStorageInfo(_ => true)
}
private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = {
assertNotStopped()
val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray
rddInfos.foreach { rddInfo =>
val rddId = rddInfo.id
val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId))
rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0)
rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L)
rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L)
}
rddInfos.filter(_.isCached)
}
/**
* Returns an immutable map of RDDs that have marked themselves as persistent via cache() call.
*
* @note This does not necessarily mean the caching or computation was successful.
*/
def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap
/**
* :: DeveloperApi ::
* Return pools for fair scheduler
*/
@DeveloperApi
def getAllPools: Seq[Schedulable] = {
assertNotStopped()
// TODO(xiajunluan): We should take nested pools into account
taskScheduler.rootPool.schedulableQueue.asScala.toSeq
}
/**
* :: DeveloperApi ::
* Return the pool associated with the given name, if one exists
*/
@DeveloperApi
def getPoolForName(pool: String): Option[Schedulable] = {
assertNotStopped()
Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool))
}
/**
* Return current scheduling mode
*/
def getSchedulingMode: SchedulingMode.SchedulingMode = {
assertNotStopped()
taskScheduler.schedulingMode
}
/**
* Gets the locality information associated with the partition in a particular rdd
* @param rdd of interest
* @param partition to be looked up for locality
* @return list of preferred locations for the partition
*/
private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = {
dagScheduler.getPreferredLocs(rdd, partition)
}
/**
* Register an RDD to be persisted in memory and/or disk storage
*/
private[spark] def persistRDD(rdd: RDD[_]): Unit = {
persistentRdds(rdd.id) = rdd
}
/**
* Unpersist an RDD from memory and/or disk storage
*/
private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = {
env.blockManager.master.removeRdd(rddId, blocking)
persistentRdds.remove(rddId)
listenerBus.post(SparkListenerUnpersistRDD(rddId))
}
/**
* Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future.
*
* If a jar is added during execution, it will not be available until the next TaskSet starts.
*
* @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems),
* an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node.
*
* @note A path can be added only once. Subsequent additions of the same path are ignored.
*/
def addJar(path: String): Unit = {
def addLocalJarFile(file: File): String = {
try {
if (!file.exists()) {
throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found")
}
if (file.isDirectory) {
throw new IllegalArgumentException(
s"Directory ${file.getAbsoluteFile} is not allowed for addJar")
}
env.rpcEnv.fileServer.addJar(file)
} catch {
case NonFatal(e) =>
logError(s"Failed to add $path to Spark environment", e)
null
}
}
def checkRemoteJarFile(path: String): String = {
val hadoopPath = new Path(path)
val scheme = hadoopPath.toUri.getScheme
if (!Array("http", "https", "ftp").contains(scheme)) {
try {
val fs = hadoopPath.getFileSystem(hadoopConfiguration)
if (!fs.exists(hadoopPath)) {
throw new FileNotFoundException(s"Jar ${path} not found")
}
if (fs.isDirectory(hadoopPath)) {
throw new IllegalArgumentException(
s"Directory ${path} is not allowed for addJar")
}
path
} catch {
case NonFatal(e) =>
logError(s"Failed to add $path to Spark environment", e)
null
}
} else {
path
}
}
if (path == null || path.isEmpty) {
logWarning("null or empty path specified as parameter to addJar")
} else {
val key = if (path.contains("\\")) {
// For local paths with backslashes on Windows, URI throws an exception
addLocalJarFile(new File(path))
} else {
val uri = new Path(path).toUri
// SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies
Utils.validateURL(uri)
uri.getScheme match {
// A JAR file which exists only on the driver node
case null =>
// SPARK-22585 path without schema is not url encoded
addLocalJarFile(new File(uri.getPath))
// A JAR file which exists only on the driver node
case "file" => addLocalJarFile(new File(uri.getPath))
// A JAR file which exists locally on every worker node
case "local" => "file:" + uri.getPath
case _ => checkRemoteJarFile(path)
}
}
if (key != null) {
val timestamp = System.currentTimeMillis
if (addedJars.putIfAbsent(key, timestamp).isEmpty) {
logInfo(s"Added JAR $path at $key with timestamp $timestamp")
postEnvironmentUpdate()
} else {
logWarning(s"The jar $path has been added already. Overwriting of added jars " +
"is not supported in the current version.")
}
}
}
}
/**
* Returns a list of jar files that are added to resources.
*/
def listJars(): Seq[String] = addedJars.keySet.toSeq
/**
* When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark
* may wait for some internal threads to finish. It's better to use this method to stop
* SparkContext instead.
*/
private[spark] def stopInNewThread(): Unit = {
new Thread("stop-spark-context") {
setDaemon(true)
override def run(): Unit = {
try {
SparkContext.this.stop()
} catch {
case e: Throwable =>
logError(e.getMessage, e)
throw e
}
}
}.start()
}
/**
* Shut down the SparkContext.
*/
def stop(): Unit = {
if (LiveListenerBus.withinListenerThread.value) {
throw new SparkException(s"Cannot stop SparkContext within listener bus thread.")
}
// Use the stopping variable to ensure no contention for the stop scenario.
// Still track the stopped variable for use elsewhere in the code.
if (!stopped.compareAndSet(false, true)) {
logInfo("SparkContext already stopped.")
return
}
if (_shutdownHookRef != null) {
ShutdownHookManager.removeShutdownHook(_shutdownHookRef)
}
if (listenerBus != null) {
Utils.tryLogNonFatalError {
postApplicationEnd()
}
}
Utils.tryLogNonFatalError {
_driverLogger.foreach(_.stop())
}
Utils.tryLogNonFatalError {
_ui.foreach(_.stop())
}
if (env != null) {
Utils.tryLogNonFatalError {
env.metricsSystem.report()
}
}
Utils.tryLogNonFatalError {
_cleaner.foreach(_.stop())
}
Utils.tryLogNonFatalError {
_executorAllocationManager.foreach(_.stop())
}
if (_dagScheduler != null) {
Utils.tryLogNonFatalError {
_dagScheduler.stop()
}
_dagScheduler = null
}
if (_listenerBusStarted) {
Utils.tryLogNonFatalError {
listenerBus.stop()
_listenerBusStarted = false
}
}
Utils.tryLogNonFatalError {
_plugins.foreach(_.shutdown())
}
Utils.tryLogNonFatalError {
_eventLogger.foreach(_.stop())
}
if (_heartbeater != null) {
Utils.tryLogNonFatalError {
_heartbeater.stop()
}
_heartbeater = null
}
if (_shuffleDriverComponents != null) {
Utils.tryLogNonFatalError {
_shuffleDriverComponents.cleanupApplication()
}
}
if (env != null && _heartbeatReceiver != null) {
Utils.tryLogNonFatalError {
env.rpcEnv.stop(_heartbeatReceiver)
}
}
Utils.tryLogNonFatalError {
_progressBar.foreach(_.stop())
}
_taskScheduler = null
// TODO: Cache.stop()?
if (_env != null) {
Utils.tryLogNonFatalError {
_env.stop()
}
SparkEnv.set(null)
}
if (_statusStore != null) {
_statusStore.close()
}
// Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this
// `SparkContext` is stopped.
localProperties.remove()
ResourceProfile.clearDefaultProfile()
// Unset YARN mode system env variable, to allow switching between cluster types.
SparkContext.clearActiveContext()
logInfo("Successfully stopped SparkContext")
}
/**
* Get Spark's home location from either a value set through the constructor,
* or the spark.home Java property, or the SPARK_HOME environment variable
* (in that order of preference). If neither of these is set, return None.
*/
private[spark] def getSparkHome(): Option[String] = {
conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME")))
}
/**
* Set the thread-local property for overriding the call sites
* of actions and RDDs.
*/
def setCallSite(shortCallSite: String): Unit = {
setLocalProperty(CallSite.SHORT_FORM, shortCallSite)
}
/**
* Set the thread-local property for overriding the call sites
* of actions and RDDs.
*/
private[spark] def setCallSite(callSite: CallSite): Unit = {
setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm)
setLocalProperty(CallSite.LONG_FORM, callSite.longForm)
}
/**
* Clear the thread-local property for overriding the call sites
* of actions and RDDs.
*/
def clearCallSite(): Unit = {
setLocalProperty(CallSite.SHORT_FORM, null)
setLocalProperty(CallSite.LONG_FORM, null)
}
/**
* Capture the current user callsite and return a formatted version for printing. If the user
* has overridden the call site using `setCallSite()`, this will return the user's version.
*/
private[spark] def getCallSite(): CallSite = {
lazy val callSite = Utils.getCallSite()
CallSite(
Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm),
Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm)
)
}
/**
* Run a function on a given set of partitions in an RDD and pass the results to the given
* handler function. This is the main entry point for all actions in Spark.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like `first()`
* @param resultHandler callback to pass each result to
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
partitions: Seq[Int],
resultHandler: (Int, U) => Unit): Unit = {
if (stopped.get()) {
throw new IllegalStateException("SparkContext has been shutdown")
}
val callSite = getCallSite
val cleanedFunc = clean(func)
logInfo("Starting job: " + callSite.shortForm)
if (conf.getBoolean("spark.logLineage", false)) {
logInfo("RDD's recursive dependencies:\n" + rdd.toDebugString)
}
dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get)
progressBar.foreach(_.finishAll())
rdd.doCheckpoint()
}
/**
* Run a function on a given set of partitions in an RDD and return the results as an array.
* The function that is run against each partition additionally takes `TaskContext` argument.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like `first()`
* @return in-memory collection with a result of the job (each collection element will contain
* a result from one partition)
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
partitions: Seq[Int]): Array[U] = {
val results = new Array[U](partitions.size)
runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res)
results
}
/**
* Run a function on a given set of partitions in an RDD and return the results as an array.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like `first()`
* @return in-memory collection with a result of the job (each collection element will contain
* a result from one partition)
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
func: Iterator[T] => U,
partitions: Seq[Int]): Array[U] = {
val cleanedFunc = clean(func)
runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions)
}
/**
* Run a job on all partitions in an RDD and return the results in an array. The function
* that is run against each partition additionally takes `TaskContext` argument.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @return in-memory collection with a result of the job (each collection element will contain
* a result from one partition)
*/
def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = {
runJob(rdd, func, 0 until rdd.partitions.length)
}
/**
* Run a job on all partitions in an RDD and return the results in an array.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @return in-memory collection with a result of the job (each collection element will contain
* a result from one partition)
*/
def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = {
runJob(rdd, func, 0 until rdd.partitions.length)
}
/**
* Run a job on all partitions in an RDD and pass the results to a handler function. The function
* that is run against each partition additionally takes `TaskContext` argument.
*
* @param rdd target RDD to run tasks on
* @param processPartition a function to run on each partition of the RDD
* @param resultHandler callback to pass each result to
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
processPartition: (TaskContext, Iterator[T]) => U,
resultHandler: (Int, U) => Unit): Unit = {
runJob[T, U](rdd, processPartition, 0 until rdd.partitions.length, resultHandler)
}
/**
* Run a job on all partitions in an RDD and pass the results to a handler function.
*
* @param rdd target RDD to run tasks on
* @param processPartition a function to run on each partition of the RDD
* @param resultHandler callback to pass each result to
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
processPartition: Iterator[T] => U,
resultHandler: (Int, U) => Unit): Unit = {
val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter)
runJob[T, U](rdd, processFunc, 0 until rdd.partitions.length, resultHandler)
}
/**
* :: DeveloperApi ::
* Run a job that can return approximate results.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param evaluator `ApproximateEvaluator` to receive the partial results
* @param timeout maximum time to wait for the job, in milliseconds
* @return partial result (how partial depends on whether the job was finished before or
* after timeout)
*/
@DeveloperApi
def runApproximateJob[T, U, R](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
evaluator: ApproximateEvaluator[U, R],
timeout: Long): PartialResult[R] = {
assertNotStopped()
val callSite = getCallSite
logInfo("Starting job: " + callSite.shortForm)
val start = System.nanoTime
val cleanedFunc = clean(func)
val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout,
localProperties.get)
logInfo(
"Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s")
result
}
/**
* Submit a job for execution and return a FutureJob holding the result.
*
* @param rdd target RDD to run tasks on
* @param processPartition a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like `first()`
* @param resultHandler callback to pass each result to
* @param resultFunc function to be executed when the result is ready
*/
def submitJob[T, U, R](
rdd: RDD[T],
processPartition: Iterator[T] => U,
partitions: Seq[Int],
resultHandler: (Int, U) => Unit,
resultFunc: => R): SimpleFutureAction[R] =
{
assertNotStopped()
val cleanF = clean(processPartition)
val callSite = getCallSite
val waiter = dagScheduler.submitJob(
rdd,
(context: TaskContext, iter: Iterator[T]) => cleanF(iter),
partitions,
callSite,
resultHandler,
localProperties.get)
new SimpleFutureAction(waiter, resultFunc)
}
/**
* Submit a map stage for execution. This is currently an internal API only, but might be
* promoted to DeveloperApi in the future.
*/
private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C])
: SimpleFutureAction[MapOutputStatistics] = {
assertNotStopped()
val callSite = getCallSite()
var result: MapOutputStatistics = null
val waiter = dagScheduler.submitMapStage(
dependency,
(r: MapOutputStatistics) => { result = r },
callSite,
localProperties.get)
new SimpleFutureAction[MapOutputStatistics](waiter, result)
}
/**
* Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup`
* for more information.
*/
def cancelJobGroup(groupId: String): Unit = {
assertNotStopped()
dagScheduler.cancelJobGroup(groupId)
}
/** Cancel all jobs that have been scheduled or are running. */
def cancelAllJobs(): Unit = {
assertNotStopped()
dagScheduler.cancelAllJobs()
}
/**
* Cancel a given job if it's scheduled or running.
*
* @param jobId the job ID to cancel
* @param reason optional reason for cancellation
* @note Throws `InterruptedException` if the cancel message cannot be sent
*/
def cancelJob(jobId: Int, reason: String): Unit = {
dagScheduler.cancelJob(jobId, Option(reason))
}
/**
* Cancel a given job if it's scheduled or running.
*
* @param jobId the job ID to cancel
* @note Throws `InterruptedException` if the cancel message cannot be sent
*/
def cancelJob(jobId: Int): Unit = {
dagScheduler.cancelJob(jobId, None)
}
/**
* Cancel a given stage and all jobs associated with it.
*
* @param stageId the stage ID to cancel
* @param reason reason for cancellation
* @note Throws `InterruptedException` if the cancel message cannot be sent
*/
def cancelStage(stageId: Int, reason: String): Unit = {
dagScheduler.cancelStage(stageId, Option(reason))
}
/**
* Cancel a given stage and all jobs associated with it.
*
* @param stageId the stage ID to cancel
* @note Throws `InterruptedException` if the cancel message cannot be sent
*/
def cancelStage(stageId: Int): Unit = {
dagScheduler.cancelStage(stageId, None)
}
/**
* Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI
* or through SparkListener.onTaskStart.
*
* @param taskId the task ID to kill. This id uniquely identifies the task attempt.
* @param interruptThread whether to interrupt the thread running the task.
* @param reason the reason for killing the task, which should be a short string. If a task
* is killed multiple times with different reasons, only one reason will be reported.
*
* @return Whether the task was successfully killed.
*/
def killTaskAttempt(
taskId: Long,
interruptThread: Boolean = true,
reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = {
dagScheduler.killTaskAttempt(taskId, interruptThread, reason)
}
/**
* Clean a closure to make it ready to be serialized and sent to tasks
* (removes unreferenced variables in $outer's, updates REPL variables)
* If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively
* check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt>
* if not.
*
* @param f the closure to clean
* @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability
* @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not
* serializable
* @return the cleaned closure
*/
private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = {
ClosureCleaner.clean(f, checkSerializable)
f
}
/**
* Set the directory under which RDDs are going to be checkpointed.
* @param directory path to the directory where checkpoint files will be stored
* (must be HDFS path if running in cluster)
*/
def setCheckpointDir(directory: String): Unit = {
// If we are running on a cluster, log a warning if the directory is local.
// Otherwise, the driver may attempt to reconstruct the checkpointed RDD from
// its own local file system, which is incorrect because the checkpoint files
// are actually on the executor machines.
if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) {
logWarning("Spark is not running in local mode, therefore the checkpoint directory " +
s"must not be on the local filesystem. Directory '$directory' " +
"appears to be on the local filesystem.")
}
checkpointDir = Option(directory).map { dir =>
val path = new Path(dir, UUID.randomUUID().toString)
val fs = path.getFileSystem(hadoopConfiguration)
fs.mkdirs(path)
fs.getFileStatus(path).getPath.toString
}
}
def getCheckpointDir: Option[String] = checkpointDir
/** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */
def defaultParallelism: Int = {
assertNotStopped()
taskScheduler.defaultParallelism
}
/**
* Default min number of partitions for Hadoop RDDs when not given by user
* Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2.
* The reasons for this are discussed in https://github.com/mesos/spark/pull/718
*/
def defaultMinPartitions: Int = math.min(defaultParallelism, 2)
private val nextShuffleId = new AtomicInteger(0)
private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement()
private val nextRddId = new AtomicInteger(0)
/** Register a new RDD, returning its RDD ID */
private[spark] def newRddId(): Int = nextRddId.getAndIncrement()
/**
* Registers listeners specified in spark.extraListeners, then starts the listener bus.
* This should be called after all internal listeners have been registered with the listener bus
* (e.g. after the web UI and event logging listeners have been registered).
*/
private def setupAndStartListenerBus(): Unit = {
try {
conf.get(EXTRA_LISTENERS).foreach { classNames =>
val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf)
listeners.foreach { listener =>
listenerBus.addToSharedQueue(listener)
logInfo(s"Registered listener ${listener.getClass().getName()}")
}
}
} catch {
case e: Exception =>
try {
stop()
} finally {
throw new SparkException(s"Exception when registering SparkListener", e)
}
}
listenerBus.start(this, _env.metricsSystem)
_listenerBusStarted = true
}
/** Post the application start event */
private def postApplicationStart(): Unit = {
// Note: this code assumes that the task scheduler has been initialized and has contacted
// the cluster manager to get an application ID (in case the cluster manager provides one).
listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId),
startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls,
schedulerBackend.getDriverAttributes))
_driverLogger.foreach(_.startSync(_hadoopConfiguration))
}
/** Post the application end event */
private def postApplicationEnd(): Unit = {
listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis))
}
/** Post the environment update event once the task scheduler is ready */
private def postEnvironmentUpdate(): Unit = {
if (taskScheduler != null) {
val schedulingMode = getSchedulingMode.toString
val addedJarPaths = addedJars.keys.toSeq
val addedFilePaths = addedFiles.keys.toSeq
val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration,
schedulingMode, addedJarPaths, addedFilePaths)
val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails)
listenerBus.post(environmentUpdate)
}
}
/** Reports heartbeat metrics for the driver. */
private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = {
val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager)
executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics))
val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics]
// In the driver, we do not track per-stage metrics, so use a dummy stage for the key
driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics))
val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0)
listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates,
driverUpdates))
}
// In order to prevent multiple SparkContexts from being active at the same time, mark this
// context as having finished construction.
// NOTE: this must be placed at the end of the SparkContext constructor.
SparkContext.setActiveContext(this)
}
/**
* The SparkContext object contains a number of implicit conversions and parameters for use with
* various Spark features.
*/
object SparkContext extends Logging {
private val VALID_LOG_LEVELS =
Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN")
/**
* Lock that guards access to global variables that track SparkContext construction.
*/
private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object()
/**
* The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`.
*
* Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`.
*/
private val activeContext: AtomicReference[SparkContext] =
new AtomicReference[SparkContext](null)
/**
* Points to a partially-constructed SparkContext if another thread is in the SparkContext
* constructor, or `None` if no SparkContext is being constructed.
*
* Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`.
*/
private var contextBeingConstructed: Option[SparkContext] = None
/**
* Called to ensure that no other SparkContext is running in this JVM.
*
* Throws an exception if a running context is detected and logs a warning if another thread is
* constructing a SparkContext. This warning is necessary because the current locking scheme
* prevents us from reliably distinguishing between cases where another context is being
* constructed and cases where another constructor threw an exception.
*/
private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
Option(activeContext.get()).filter(_ ne sc).foreach { ctx =>
val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." +
s"The currently running SparkContext was created at:\n${ctx.creationSite.longForm}"
throw new SparkException(errMsg)
}
contextBeingConstructed.filter(_ ne sc).foreach { otherContext =>
// Since otherContext might point to a partially-constructed context, guard against
// its creationSite field being null:
val otherContextCreationSite =
Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location")
val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" +
" constructor). This may indicate an error, since only one SparkContext should be" +
" running in this JVM (see SPARK-2243)." +
s" The other SparkContext was created at:\n$otherContextCreationSite"
logWarning(warnMsg)
}
}
}
/**
* This function may be used to get or instantiate a SparkContext and register it as a
* singleton object. Because we can only have one active SparkContext per JVM,
* this is useful when applications may wish to share a SparkContext.
*
* @param config `SparkConfig` that will be used for initialisation of the `SparkContext`
* @return current `SparkContext` (or a new one if it wasn't created before the function call)
*/
def getOrCreate(config: SparkConf): SparkContext = {
// Synchronize to ensure that multiple create requests don't trigger an exception
// from assertNoOtherContextIsRunning within setActiveContext
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
if (activeContext.get() == null) {
setActiveContext(new SparkContext(config))
} else {
if (config.getAll.nonEmpty) {
logWarning("Using an existing SparkContext; some configuration may not take effect.")
}
}
activeContext.get()
}
}
/**
* This function may be used to get or instantiate a SparkContext and register it as a
* singleton object. Because we can only have one active SparkContext per JVM,
* this is useful when applications may wish to share a SparkContext.
*
* This method allows not passing a SparkConf (useful if just retrieving).
*
* @return current `SparkContext` (or a new one if wasn't created before the function call)
*/
def getOrCreate(): SparkContext = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
if (activeContext.get() == null) {
setActiveContext(new SparkContext())
}
activeContext.get()
}
}
/** Return the current active [[SparkContext]] if any. */
private[spark] def getActive: Option[SparkContext] = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
Option(activeContext.get())
}
}
/**
* Called at the beginning of the SparkContext constructor to ensure that no SparkContext is
* running. Throws an exception if a running context is detected and logs a warning if another
* thread is constructing a SparkContext. This warning is necessary because the current locking
* scheme prevents us from reliably distinguishing between cases where another context is being
* constructed and cases where another constructor threw an exception.
*/
private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
assertNoOtherContextIsRunning(sc)
contextBeingConstructed = Some(sc)
}
}
/**
* Called at the end of the SparkContext constructor to ensure that no other SparkContext has
* raced with this constructor and started.
*/
private[spark] def setActiveContext(sc: SparkContext): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
assertNoOtherContextIsRunning(sc)
contextBeingConstructed = None
activeContext.set(sc)
}
}
/**
* Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's
* also called in unit tests to prevent a flood of warnings from test suites that don't / can't
* properly clean up their SparkContexts.
*/
private[spark] def clearActiveContext(): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
activeContext.set(null)
}
}
private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description"
private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id"
private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel"
private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool"
private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope"
private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride"
/**
* Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was
* changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see
* SPARK-6716 for more details).
*/
private[spark] val DRIVER_IDENTIFIER = "driver"
private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T])
: ArrayWritable = {
def anyToWritable[U <: Writable](u: U): Writable = u
new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]],
arr.map(x => anyToWritable(x)).toArray)
}
/**
* Find the JAR from which a given class was loaded, to make it easy for users to pass
* their JARs to SparkContext.
*
* @param cls class that should be inside of the jar
* @return jar that contains the Class, `None` if not found
*/
def jarOfClass(cls: Class[_]): Option[String] = {
val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class")
if (uri != null) {
val uriStr = uri.toString
if (uriStr.startsWith("jar:file:")) {
// URI will be of the form "jar:file:/path/foo.jar!/package/cls.class",
// so pull out the /path/foo.jar
Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!')))
} else {
None
}
} else {
None
}
}
/**
* Find the JAR that contains the class of a particular object, to make it easy for users
* to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in
* your driver program.
*
* @param obj reference to an instance which class should be inside of the jar
* @return jar that contains the class of the instance, `None` if not found
*/
def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass)
/**
* Creates a modified version of a SparkConf with the parameters that can be passed separately
* to SparkContext, to make it easier to write SparkContext's constructors. This ignores
* parameters that are passed as the default value of null, instead of throwing an exception
* like SparkConf would.
*/
private[spark] def updatedConf(
conf: SparkConf,
master: String,
appName: String,
sparkHome: String = null,
jars: Seq[String] = Nil,
environment: Map[String, String] = Map()): SparkConf =
{
val res = conf.clone()
res.setMaster(master)
res.setAppName(appName)
if (sparkHome != null) {
res.setSparkHome(sparkHome)
}
if (jars != null && !jars.isEmpty) {
res.setJars(jars)
}
res.setExecutorEnv(environment.toSeq)
res
}
/**
* The number of cores available to the driver to use for tasks such as I/O with Netty
*/
private[spark] def numDriverCores(master: String): Int = {
numDriverCores(master, null)
}
/**
* The number of cores available to the driver to use for tasks such as I/O with Netty
*/
private[spark] def numDriverCores(master: String, conf: SparkConf): Int = {
def convertToInt(threads: String): Int = {
if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt
}
master match {
case "local" => 1
case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads)
case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads)
case "yarn" =>
if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") {
conf.getInt(DRIVER_CORES.key, 0)
} else {
0
}
case _ => 0 // Either driver is not being used, or its core count will be interpolated later
}
}
/**
* Create a task scheduler based on a given master URL.
* Return a 2-tuple of the scheduler backend and the task scheduler.
*/
private def createTaskScheduler(
sc: SparkContext,
master: String,
deployMode: String): (SchedulerBackend, TaskScheduler) = {
import SparkMasterRegex._
// When running locally, don't try to re-execute tasks on failure.
val MAX_LOCAL_TASK_FAILURES = 1
// Ensure that default executor's resources satisfies one or more tasks requirement.
// This function is for cluster managers that don't set the executor cores config, for
// others its checked in ResourceProfile.
def checkResourcesPerTask(executorCores: Int): Unit = {
val taskCores = sc.conf.get(CPUS_PER_TASK)
if (!sc.conf.get(SKIP_VALIDATE_CORES_TESTING)) {
validateTaskCpusLargeEnough(sc.conf, executorCores, taskCores)
}
val defaultProf = sc.resourceProfileManager.defaultResourceProfile
ResourceUtils.warnOnWastedResources(defaultProf, sc.conf, Some(executorCores))
}
master match {
case "local" =>
checkResourcesPerTask(1)
val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true)
val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1)
scheduler.initialize(backend)
(backend, scheduler)
case LOCAL_N_REGEX(threads) =>
def localCpuCount: Int = Runtime.getRuntime.availableProcessors()
// local[*] estimates the number of cores on the machine; local[N] uses exactly N threads.
val threadCount = if (threads == "*") localCpuCount else threads.toInt
if (threadCount <= 0) {
throw new SparkException(s"Asked to run locally with $threadCount threads")
}
checkResourcesPerTask(threadCount)
val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true)
val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount)
scheduler.initialize(backend)
(backend, scheduler)
case LOCAL_N_FAILURES_REGEX(threads, maxFailures) =>
def localCpuCount: Int = Runtime.getRuntime.availableProcessors()
// local[*, M] means the number of cores on the computer with M failures
// local[N, M] means exactly N threads with M failures
val threadCount = if (threads == "*") localCpuCount else threads.toInt
checkResourcesPerTask(threadCount)
val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true)
val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount)
scheduler.initialize(backend)
(backend, scheduler)
case SPARK_REGEX(sparkUrl) =>
val scheduler = new TaskSchedulerImpl(sc)
val masterUrls = sparkUrl.split(",").map("spark://" + _)
val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls)
scheduler.initialize(backend)
(backend, scheduler)
case LOCAL_CLUSTER_REGEX(numSlaves, coresPerSlave, memoryPerSlave) =>
checkResourcesPerTask(coresPerSlave.toInt)
// Check to make sure memory requested <= memoryPerSlave. Otherwise Spark will just hang.
val memoryPerSlaveInt = memoryPerSlave.toInt
if (sc.executorMemory > memoryPerSlaveInt) {
throw new SparkException(
"Asked to launch cluster with %d MiB RAM / worker but requested %d MiB/worker".format(
memoryPerSlaveInt, sc.executorMemory))
}
// For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED
// to false because this mode is intended to be used for testing and in this case all the
// executors are running on the same host. So if host local reading was enabled here then
// testing of the remote fetching would be secondary as setting this config explicitly to
// false would be required in most of the unit test (despite the fact that remote fetching
// is much more frequent in production).
sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false)
val scheduler = new TaskSchedulerImpl(sc)
val localCluster = new LocalSparkCluster(
numSlaves.toInt, coresPerSlave.toInt, memoryPerSlaveInt, sc.conf)
val masterUrls = localCluster.start()
val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls)
scheduler.initialize(backend)
backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => {
localCluster.stop()
}
(backend, scheduler)
case masterUrl =>
val cm = getClusterManager(masterUrl) match {
case Some(clusterMgr) => clusterMgr
case None => throw new SparkException("Could not parse Master URL: '" + master + "'")
}
try {
val scheduler = cm.createTaskScheduler(sc, masterUrl)
val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler)
cm.initialize(scheduler, backend)
(backend, scheduler)
} catch {
case se: SparkException => throw se
case NonFatal(e) =>
throw new SparkException("External scheduler cannot be instantiated", e)
}
}
}
private def getClusterManager(url: String): Option[ExternalClusterManager] = {
val loader = Utils.getContextOrSparkClassLoader
val serviceLoaders =
ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url))
if (serviceLoaders.size > 1) {
throw new SparkException(
s"Multiple external cluster managers registered for the url $url: $serviceLoaders")
}
serviceLoaders.headOption
}
}
/**
* A collection of regexes for extracting information from the master string.
*/
private object SparkMasterRegex {
// Regular expression used for local[N] and local[*] master formats
val LOCAL_N_REGEX = """local\[([0-9]+|\*)\]""".r
// Regular expression for local[N, maxRetries], used in tests with failing tasks
val LOCAL_N_FAILURES_REGEX = """local\[([0-9]+|\*)\s*,\s*([0-9]+)\]""".r
// Regular expression for simulating a Spark cluster of [N, cores, memory] locally
val LOCAL_CLUSTER_REGEX = """local-cluster\[\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*]""".r
// Regular expression for connecting to Spark deploy clusters
val SPARK_REGEX = """spark://(.*)""".r
}
/**
* A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable`
* class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the
* conversion.
* The getter for the writable class takes a `ClassTag[T]` in case this is a generic object
* that doesn't know the type of `T` when it is created. This sounds strange but is necessary to
* support converting subclasses of `Writable` to themselves (`writableWritableConverter()`).
*/
private[spark] class WritableConverter[T](
val writableClass: ClassTag[T] => Class[_ <: Writable],
val convert: Writable => T)
extends Serializable
object WritableConverter {
// Helper objects for converting common types to Writable
private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T)
: WritableConverter[T] = {
val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]]
new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W]))
}
// The following implicit functions were in SparkContext before 1.3 and users had to
// `import SparkContext._` to enable them. Now we move them here to make the compiler find
// them automatically. However, we still keep the old functions in SparkContext for backward
// compatibility and forward to the following functions directly.
// The following implicit declarations have been added on top of the very similar ones
// below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta
// expansion of zero-arg methods and thus won't match a no-arg method where it expects
// an implicit that is a function of no args.
implicit val intWritableConverterFn: () => WritableConverter[Int] =
() => simpleWritableConverter[Int, IntWritable](_.get)
implicit val longWritableConverterFn: () => WritableConverter[Long] =
() => simpleWritableConverter[Long, LongWritable](_.get)
implicit val doubleWritableConverterFn: () => WritableConverter[Double] =
() => simpleWritableConverter[Double, DoubleWritable](_.get)
implicit val floatWritableConverterFn: () => WritableConverter[Float] =
() => simpleWritableConverter[Float, FloatWritable](_.get)
implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] =
() => simpleWritableConverter[Boolean, BooleanWritable](_.get)
implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = {
() => simpleWritableConverter[Array[Byte], BytesWritable] { bw =>
// getBytes method returns array which is longer then data to be returned
Arrays.copyOfRange(bw.getBytes, 0, bw.getLength)
}
}
implicit val stringWritableConverterFn: () => WritableConverter[String] =
() => simpleWritableConverter[String, Text](_.toString)
implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] =
() => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])
// These implicits remain included for backwards-compatibility. They fulfill the
// same role as those above.
implicit def intWritableConverter(): WritableConverter[Int] =
simpleWritableConverter[Int, IntWritable](_.get)
implicit def longWritableConverter(): WritableConverter[Long] =
simpleWritableConverter[Long, LongWritable](_.get)
implicit def doubleWritableConverter(): WritableConverter[Double] =
simpleWritableConverter[Double, DoubleWritable](_.get)
implicit def floatWritableConverter(): WritableConverter[Float] =
simpleWritableConverter[Float, FloatWritable](_.get)
implicit def booleanWritableConverter(): WritableConverter[Boolean] =
simpleWritableConverter[Boolean, BooleanWritable](_.get)
implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = {
simpleWritableConverter[Array[Byte], BytesWritable] { bw =>
// getBytes method returns array which is longer then data to be returned
Arrays.copyOfRange(bw.getBytes, 0, bw.getLength)
}
}
implicit def stringWritableConverter(): WritableConverter[String] =
simpleWritableConverter[String, Text](_.toString)
implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] =
new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])
}
/**
* A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable`
* class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the
* conversion.
* The `Writable` class will be used in `SequenceFileRDDFunctions`.
*/
private[spark] class WritableFactory[T](
val writableClass: ClassTag[T] => Class[_ <: Writable],
val convert: T => Writable) extends Serializable
object WritableFactory {
private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W)
: WritableFactory[T] = {
val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]]
new WritableFactory[T](_ => writableClass, convert)
}
implicit def intWritableFactory: WritableFactory[Int] =
simpleWritableFactory(new IntWritable(_))
implicit def longWritableFactory: WritableFactory[Long] =
simpleWritableFactory(new LongWritable(_))
implicit def floatWritableFactory: WritableFactory[Float] =
simpleWritableFactory(new FloatWritable(_))
implicit def doubleWritableFactory: WritableFactory[Double] =
simpleWritableFactory(new DoubleWritable(_))
implicit def booleanWritableFactory: WritableFactory[Boolean] =
simpleWritableFactory(new BooleanWritable(_))
implicit def bytesWritableFactory: WritableFactory[Array[Byte]] =
simpleWritableFactory(new BytesWritable(_))
implicit def stringWritableFactory: WritableFactory[String] =
simpleWritableFactory(new Text(_))
implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] =
simpleWritableFactory(w => w)
}
| matthewfranglen/spark | core/src/main/scala/org/apache/spark/SparkContext.scala | Scala | mit | 123,235 |
package de.vorb.namcap
import java.util.Locale
package object l10n {
val defaultLanguage = en_US
val languages = Map(
"en_US" -> en_US,
"en" -> en_US,
"de_DE" -> de_DE,
"de" -> de_DE)
// get default system locale and select language accordingly
val locale = Locale.getDefault.toString
val translate = languages.getOrElse(locale, defaultLanguage)
}
| pvorb/NamCap | src/main/scala/de/vorb/namcap/l10n/package.scala | Scala | mit | 377 |
package main.scala.com.spark.scala.kafka.prodcons
import kafka.serializer.StringDecoder
import org.apache.spark.streaming._
import org.apache.spark.streaming.kafka._
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.DStream.toPairDStreamFunctions
import java.util.Properties
class ConsumerSparkStreaming (props : Properties) {
def consume() = {
// Create context with 2 second batch interval
val sparkConf = new SparkConf().setAppName(props.getProperty("AppName")).setMaster(props.getProperty("Master"))
val ssc = new StreamingContext(sparkConf, Seconds(Integer.parseInt(props.getProperty("frequency"))));
// Create direct kafka stream with brokers and topics
val topicsSet = props.getProperty("topicsSet").split(",").toSet
val kafkaParams = Map[String, String]("metadata.broker.list" -> props.getProperty("metadata.broker.list"))
val messages = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](
ssc, kafkaParams, topicsSet)
// Get the lines, split them into words, count the words and print
val lines = messages.map(_._2)
val words = lines.flatMap(_.split(" "))
val wordCounts = words.map(x => (x, 1L)).reduceByKey(_ + _)
wordCounts.print()
// Start the computation
ssc.start()
ssc.awaitTermination()
}
} | rayanegouda/Spark-Scala | src/main/scala/com/spark/scala/kafka/prodcons/ConsumerSparkStreaming.scala | Scala | apache-2.0 | 1,339 |
/*
* Copyright (c) 2012, 2013, 2014, 2015, 2016 SURFnet BV
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
* following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this list of conditions and the following
* disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided with the distribution.
* * Neither the name of the SURFnet BV nor the names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package controllers
import akka.actor._
import akka.event.LoggingReceive
import akka.pattern.ask
import akka.util.Timeout
import java.net.URI
import java.time.{ Clock, Instant, ZoneOffset }
import java.time.temporal._
import nl.surfnet.nsiv2.messages._
import nl.surfnet.nsiv2.persistence._
import nl.surfnet.nsiv2.utils._
import nl.surfnet.safnari._
import org.ogf.schemas.nsi._2013._12.connection.types._
import play.Logger
import scala.concurrent._
import scala.concurrent.duration._
import scala.concurrent.stm._
import scala.reflect.ClassTag
import scala.util.{ Failure, Try }
import controllers.ActorSupport._
case class Connection(actor: ActorRef) {
def !(operation: Connection.Operation)(implicit sender: ActorRef): Unit = actor ! operation
def ?(operation: Connection.Operation)(implicit timeout: Timeout): Future[operation.Result] =
(actor ? operation).mapTo(operation.resultClassTag)
}
object Connection {
sealed trait Operation {
type Result
def resultClassTag: ClassTag[Result]
}
case object Query extends Operation {
final case class Result(summary: QuerySummaryResultType, pendingCriteria: Option[ReservationRequestCriteriaType], lastModifiedAt: Instant)
final val resultClassTag = implicitly[ClassTag[Result]]
}
case object QuerySegments extends Operation {
type Result = Seq[ConnectionData]
final val resultClassTag = implicitly[ClassTag[Result]]
}
case object QueryNotifications extends Operation {
type Result = Seq[NotificationBaseType]
final val resultClassTag = implicitly[ClassTag[Result]]
}
case class QueryRecursive(message: FromRequester) extends Operation {
type Result = ToRequester
final val resultClassTag = implicitly[ClassTag[Result]]
}
case object QueryResults extends Operation {
type Result = Seq[QueryResultResponseType]
final val resultClassTag = implicitly[ClassTag[Result]]
}
case class Command[+T <: Message](timestamp: Instant, message: T) extends Operation {
type Result = NsiAcknowledgement
final val resultClassTag = implicitly[ClassTag[Result]]
}
case class Replay(messages: Seq[Command[Message]]) extends Operation {
type Result = Try[Unit]
final val resultClassTag = implicitly[ClassTag[Result]]
}
case object Delete extends Operation {
type Result = Nothing
final val resultClassTag = implicitly[ClassTag[Result]]
}
}
class ConnectionManager(
connectionFactory: (ConnectionId, NsiProviderMessage[InitialReserve]) => (ActorRef, ConnectionEntity),
configuration: Configuration,
val messageStore: MessageStore[Message]
) {
private val connections = TMap.empty[ConnectionId, Connection]
private val globalReservationIdsMap = TMap.empty[GlobalReservationId, Set[Connection]]
private val connectionsByRequesterCorrelationId = TMap.empty[(RequesterNsa, CorrelationId), Connection]
private val childConnections = TMap.empty[ConnectionId, Connection]
private val deleteHooks = TMap.empty[ConnectionId, InTxn => Unit]
private def addDeleteHook(connectionId: ConnectionId)(f: InTxn => Unit)(implicit tx: InTxn): Unit = {
val existing = deleteHooks.getOrElse(connectionId, (_: InTxn) => ())
deleteHooks.put(connectionId, { txn => existing(txn); f(txn) })
()
}
private def runDeleteHook(connectionId: ConnectionId): Unit = atomic { implicit txn =>
deleteHooks.remove(connectionId).foreach { hook => hook(txn) }
}
def add(connectionId: ConnectionId, globalReservationId: Option[GlobalReservationId], connection: Connection): Unit = atomic { implicit txn =>
connections(connectionId) = connection
addDeleteHook(connectionId) { implicit txn => connections.remove(connectionId); () }
globalReservationId foreach { globalReservationId =>
globalReservationIdsMap(globalReservationId) = globalReservationIdsMap.getOrElse(globalReservationId, Set()) + connection
addDeleteHook(connectionId) { implicit txn => deleteConnectionFromGlobalReservationIds(connection, globalReservationId) }
}
}
private def deleteConnectionFromGlobalReservationIds(connection: Connection, globalReservationId: GlobalReservationId)(implicit txn: InTxn): Unit = {
val remainingConnections = globalReservationIdsMap.getOrElse(globalReservationId, Set()) - connection
remainingConnections match {
case s if s.nonEmpty => globalReservationIdsMap(globalReservationId) = s
case _ => globalReservationIdsMap.remove(globalReservationId); ()
}
}
def get(connectionId: ConnectionId): Option[Connection] = connections.single.get(connectionId)
def find(connectionIds: Seq[ConnectionId]): Seq[Connection] = {
val connectionsMap = connections.single.snapshot
connectionIds.flatMap(connectionsMap.get)
}
def findByGlobalReservationIds(globalReservationIds: Seq[GlobalReservationId]): Seq[Connection] = {
val globalIdsMap = globalReservationIdsMap.single.snapshot
globalReservationIds.flatMap(globalIdsMap.getOrElse(_, Set()))
}
private def findByRequesterCorrelationId(requesterNsa: RequesterNsa, correlationId: CorrelationId): Option[Connection] =
connectionsByRequesterCorrelationId.single.get((requesterNsa, correlationId))
private def addChildConnectionId(connection: Connection, aggregatedConnectionId: ConnectionId, childConnectionId: ConnectionId)(implicit txn: InTxn): Unit = {
childConnections(childConnectionId) = connection
addDeleteHook(aggregatedConnectionId) { implicit txn =>
childConnections.remove(childConnectionId)
()
}
}
private def registerRequesterAndCorrelationId(requesterNsa: RequesterNsa, correlationId: CorrelationId, connectionId: ConnectionId, connection: Connection): Unit = atomic { implicit txn =>
val key = (requesterNsa, correlationId)
connectionsByRequesterCorrelationId(key) = connection
addDeleteHook(connectionId) { implicit txn => connectionsByRequesterCorrelationId.remove(key); () }
}
def findByChildConnectionId(connectionId: ConnectionId): Option[Connection] = childConnections.single.get(connectionId)
def all: Seq[Connection] = connections.single.values.toSeq
def restore(implicit actorSystem: ActorSystem, executionContext: ExecutionContext): Future[Unit] = {
val replayedConnections = Future.sequence(for {
(connectionId, records @ (MessageRecord(_, _, _, FromRequester(NsiProviderMessage(headers, initialReserve: InitialReserve))) +: _)) <- messageStore.loadEverything()
} yield {
val commands = records.map { record => Connection.Command(record.createdAt, record.message) }
val connection = createConnection(connectionId, NsiProviderMessage(headers, initialReserve))
commands.foreach {
case Connection.Command(_, FromRequester(message)) =>
registerRequesterAndCorrelationId(message.headers.requesterNSA, message.headers.correlationId, connectionId, connection)
case _ =>
}
(connection ? Connection.Replay(commands))
}).map(_.collect {
case Failure(exception) => exception
})
replayedConnections.flatMap { exceptions =>
if (exceptions.isEmpty) {
Future.successful(())
} else {
val exception = new Exception(s"replay failed with exceptions ${exceptions.mkString(", ")}")
exceptions.foreach(exception.addSuppressed)
Future.failed(exception)
}
}
}
def findOrCreateConnection(request: NsiProviderMessage[NsiProviderCommand])(implicit actorSystem: ActorSystem): Option[Connection] = atomic { implicit txn =>
findByRequesterCorrelationId(request.headers.requesterNSA, request.headers.correlationId).orElse {
val result = request match {
case NsiProviderMessage(headers@_, update: NsiProviderUpdateCommand) =>
get(update.connectionId).map(update.connectionId -> _)
case NsiProviderMessage(headers, initialReserve: InitialReserve) =>
val connectionId = newConnectionId
val connection = createConnection(connectionId, NsiProviderMessage(headers, initialReserve))
messageStore.create(connectionId, Instant.now(), headers.requesterNSA)
Some(connectionId -> connection)
}
result.foreach {
case (connectionId, connection) =>
registerRequesterAndCorrelationId(request.headers.requesterNSA, request.headers.correlationId, connectionId, connection)
}
result.map(_._2)
}
}
private def createConnection(connectionId: ConnectionId, initialReserve: NsiProviderMessage[InitialReserve])(implicit actorSystem: ActorSystem): Connection = {
val (outputActor, connectionEntity) = connectionFactory(connectionId, initialReserve)
val connection = Connection(actorSystem.actorOf(ConnectionActor.props(connectionEntity, outputActor), s"con-$connectionId"))
val globalReservationId = Option(initialReserve.body.body.getGlobalReservationId()).map(URI.create)
add(connectionId, globalReservationId, connection)
connection
}
private object ConnectionActor {
def props(connectionEntity: ConnectionEntity, outputActor: ActorRef): Props = Props(new ConnectionActor(connectionEntity, outputActor))
}
private class ConnectionActor(connection: ConnectionEntity, output: ActorRef) extends Actor {
private val process = new IdempotentProvider(connection.aggregatorNsa, ManageChildConnections(connection.process))
private val uuidGenerator = Uuid.randomUuidGenerator()
private def newPassedEndTimeCorrelationId = CorrelationId.fromUuid(uuidGenerator())
var queryRequesters: Map[CorrelationId, ActorRef] = Map.empty
var endTimeCancellable: Option[Cancellable] = None
var expirationCancellable: Option[Cancellable] = None
override def postStop(): Unit = {
endTimeCancellable.foreach(_.cancel)
expirationCancellable.foreach(_.cancel)
}
override def receive = LoggingReceive {
case Connection.Query => sender ! Connection.Query.Result(connection.query, connection.rsm.pendingCriteria, connection.lastUpdatedAt)
case Connection.QuerySegments => sender ! connection.segments
case Connection.QueryNotifications => sender ! connection.notifications
case Connection.QueryResults => sender ! connection.results
case Connection.QueryRecursive(query @ FromRequester(NsiProviderMessage(_, QueryRecursive(_, _)))) =>
queryRequesters += (query.correlationId -> sender)
for {
outbounds <- connection.queryRecursive(query)
outbound <- outbounds
} output ! outbound
case Connection.Command(_, inbound @ FromProvider(NsiRequesterMessage(_, QueryRecursiveConfirmed(_)))) =>
for {
messages <- connection.queryRecursiveResult(inbound)
msg <- messages
requester <- queryRequesters.get(msg.correlationId)
} {
queryRequesters -= msg.correlationId
requester ! msg
}
case Connection.Command(timestamp, inbound: InboundMessage) =>
val context = ConnectionContext(clock = Clock.fixed(timestamp, ZoneOffset.UTC))
val result = PersistMessages(timestamp, process.apply)(inbound)(context)
schedulePassedEndTimeMessage()
val response: NsiAcknowledgement = result match {
case Left(error) =>
ServiceException(error)
case Right(outbound) =>
scheduleExpiration(timestamp)
outbound.foreach(connection.process)
outbound.foreach(output ! _)
inbound match {
case FromRequester(NsiProviderMessage(_, _: Reserve)) => ReserveResponse(connection.id)
case _ => GenericAck()
}
}
sender ! response
case Connection.Replay(messages) =>
Logger.info(s"Replaying ${messages.size} messages for connection ${connection.id}")
val result = Try {
messages.foreach {
case Connection.Command(timestamp, inbound: InboundMessage) =>
val context = ConnectionContext(clock = Clock.fixed(timestamp, ZoneOffset.UTC))
process(inbound)(context).left.foreach { error =>
Logger.warn(s"Connection ${connection.id} failed to replay message $inbound (ignored): $error")
}
case Connection.Command(timestamp, outbound: OutboundMessage) =>
val context = ConnectionContext(clock = Clock.fixed(timestamp, ZoneOffset.UTC))
connection.process(outbound)(context)
}
schedulePassedEndTimeMessage()
messages.lastOption.foreach { lastMessage => scheduleExpiration(lastMessage.timestamp) }
}
sender ! result
case Connection.Delete =>
Logger.info(s"Stopping $connection.id")
messageStore.delete(connection.id, Instant.now())
runDeleteHook(connection.id)
self ! PoisonPill
}
private def schedulePassedEndTimeMessage(): Unit = {
endTimeCancellable.foreach(_.cancel())
endTimeCancellable = (for {
criteria <- connection.rsm.committedCriteria
endTime <- criteria.getSchedule().endTime.toOption(None)
if connection.lsm.lifecycleState == LifecycleStateEnumType.CREATED
} yield {
val delay = (endTime.toEpochMilli - Instant.now().toEpochMilli).milliseconds
val message = Connection.Command(endTime, PassedEndTime(newPassedEndTimeCorrelationId, connection.id, endTime))
Logger.debug(s"Scheduling $message for execution after $delay")
try {
context.system.scheduler.scheduleOnce(delay) {
Logger.debug(s"Sending scheduled message $message")
self ! message
}(context.dispatcher)
} catch {
case e: IllegalArgumentException =>
// Akka's scheduled currently limits delays to 248 days or less. Retry scheduling later when the real delay fails.
context.system.scheduler.scheduleOnce(100.days)(schedulePassedEndTimeMessage)(context.dispatcher)
}
})
}
private def scheduleExpiration(lastMessageTimestamp: Instant): Unit = {
expirationCancellable.foreach(_.cancel())
expirationCancellable = if (connection.psm.isDefined && connection.lsm.lifecycleState == LifecycleStateEnumType.CREATED) None else {
val expirationTime = lastMessageTimestamp.plus(configuration.ConnectionExpirationTime.toMillis, ChronoUnit.MILLIS)
val delay = (expirationTime.toEpochMilli - Instant.now().toEpochMilli).milliseconds
val message = Connection.Delete
Logger.debug(s"Scheduling $message for execution after $delay")
Some(context.system.scheduler.scheduleOnce(delay) {
self ! message
}(context.dispatcher))
}
}
private def PersistMessages[E](timestamp: Instant, wrapped: InboundMessage => ConnectionContext => Either[E, Seq[OutboundMessage]])(inbound: InboundMessage)(context: ConnectionContext): Either[E, Seq[OutboundMessage]] = {
val result = wrapped(inbound)(context)
result.right.foreach { outbound =>
messageStore.storeInboundWithOutboundMessages(connection.id, timestamp, inbound, outbound)
}
result
}
private def ManageChildConnections[E, A](wrapped: InboundMessage => ConnectionContext => Either[E, A])(inbound: InboundMessage)(context: ConnectionContext): Either[E, A] = {
val outbound = wrapped(inbound)(context)
if (outbound.isRight) {
updateChildConnection(inbound)
}
outbound
}
private def updateChildConnection(message: InboundMessage): Unit = atomic { implicit txn =>
val childConnectionId = message match {
case AckFromProvider(NsiProviderMessage(_, ReserveResponse(connectionId))) =>
Some(connectionId)
case FromProvider(NsiRequesterMessage(_, ReserveConfirmed(connectionId, _))) =>
Some(connectionId)
case FromProvider(NsiRequesterMessage(_, ReserveFailed(body))) =>
Some(body.getConnectionId)
case _ =>
None
}
childConnectionId.foreach(addChildConnectionId(Connection(self), connection.id, _))
}
}
}
| BandwidthOnDemand/nsi-safnari | app/controllers/ConnectionManager.scala | Scala | bsd-3-clause | 17,733 |
class A {
object O {
class B {
val a = y
}
class C
}
class Inner {
def f(n: String) = new O.C
}
val inner = new Inner
val b = new O.B
val y = 10 // error
} | som-snytt/dotty | tests/init/neg/inner16.scala | Scala | apache-2.0 | 196 |
package org.kangmo.helper
object Logger {
// val log = play.Logger.of("application");
object log {
def error(message:String) {
// BUGBUG : write message to a log file.
}
}
}
| Kangmo/korbit-nodejs-sdk | main/src/main/scala/helper/Logger.scala | Scala | apache-2.0 | 199 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples.mllib
import org.apache.log4j.{Level, Logger}
import scopt.OptionParser
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.mllib.clustering.KMeans
import org.apache.spark.mllib.linalg.Vectors
/**
* An example k-means app. Run with
* 一个k-均值应用程序的例子
* {{{
* ./bin/run-example org.apache.spark.examples.mllib.DenseKMeans [options] <input>
* }}}
* If you use it as a template to create your own app, please use `spark-submit` to submit your app.
* 如果您使用它作为模板来创建您自己的应用程序,请使用`spark-submit`提交您的应用程序
*/
object DenseKMeans {
object InitializationMode extends Enumeration {
type InitializationMode = Value
val Random, Parallel = Value
}
import InitializationMode._
case class Params(
//input: String = null,
input: String = "../data/mllib/kmeans_data.txt",
k: Int = 3,
numIterations: Int = 10,
initializationMode: InitializationMode = Parallel) extends AbstractParams[Params]
def main(args: Array[String]) {
val defaultParams = Params()
val parser = new OptionParser[Params]("DenseKMeans") {
//密集的Kmeans:例k-均值应用程序的数据密集型
head("DenseKMeans: an example k-means app for dense data.")
opt[Int]('k', "k")
//.required()
.text(s"number of clusters, required")
.action((x, c) => c.copy(k = x))
opt[Int]("numIterations")
.text(s"number of iterations, default: ${defaultParams.numIterations}")
.action((x, c) => c.copy(numIterations = x))
opt[String]("initMode")
.text(s"initialization mode (${InitializationMode.values.mkString(",")}), " +
s"default: ${defaultParams.initializationMode}")
.action((x, c) => c.copy(initializationMode = InitializationMode.withName(x)))
/* arg[String]("<input>")
.text("input paths to examples")
.required()
.action((x, c) => c.copy(input = x))*/
}
parser.parse(args, defaultParams).map { params =>
run(params)
}.getOrElse {
sys.exit(1)
}
}
def run(params: Params) {
val conf = new SparkConf().setAppName(s"DenseKMeans with $params").setMaster("local")
val sc = new SparkContext(conf)
Logger.getRootLogger.setLevel(Level.WARN)
val examples = sc.textFile(params.input).map { line =>
Vectors.dense(line.split(' ').map(_.toDouble))
}.cache()
val numExamples = examples.count()
//numExamples = 6.
println(s"numExamples = $numExamples.")
val initMode = params.initializationMode match {
case Random => KMeans.RANDOM
case Parallel => KMeans.K_MEANS_PARALLEL
}
val model = new KMeans()
.setInitializationMode(initMode)//初始聚类中心点的选择方式
.setK(params.k)//聚类的个数
.setMaxIterations(params.numIterations)//迭代次数
.run(examples)
/**
* computeCost通过计算所有数据点到其最近的中心点的平方和来评估聚类的效果,
* 统计聚类错误的样本比例
*/
val cost = model.computeCost(examples)
//Total cost = 0.07499999999994544.
println(s"Total cost = $cost.")
sc.stop()
}
}
// scalastyle:on println
| tophua/spark1.52 | examples/src/main/scala/org/apache/spark/examples/mllib/DenseKMeans.scala | Scala | apache-2.0 | 4,119 |
package SpMVAccel
import Chisel._
import TidbitsOCM._
import TidbitsDMA._
class SimpleDMVectorCache(val p: SpMVAccelWrapperParams) extends Module {
val io = new SinglePortCacheIF(p)
val tagMem = Module(new CacheTagMemory(p)).io
val pOCM = new OCMParameters( p.ocmDepth*p.opWidth, p.opWidth, p.opWidth, 2,
p.ocmReadLatency)
val dataMem = Module(if (p.ocmPrebuilt) new OnChipMemory(pOCM, p.ocmName) else
new AsymDualPortRAM(pOCM)).io
val ctlM = Module(new CacheController(p, pOCM))
val ctl = ctlM.io
io <> ctl.externalIF
ctl.dataPortA <> dataMem.ports(0)
ctl.dataPortB <> dataMem.ports(1)
ctl.tagPortA <> tagMem.portA
ctl.tagPortB <> tagMem.portB
}
| maltanar/spmv-vector-cache | chisel/cache-old/SimpleDMVectorCache.scala | Scala | bsd-3-clause | 723 |
package katas.groovy.gdcr2012
import org.scalatest.Matchers
import org.junit.Test
class Conway3 extends Matchers {
val aliveCell = new Cell(isAlive = true, amountOfNeighbours = 0)
@Test def liveCellWithFewerThanTwoNeighboursShouldDieOnTheNextStep() {
aliveCell.amountOfNeighbours = 1
aliveCell.nextStep()
aliveCell.isAlive should equal(false)
}
@Test def liveCellWithTwoOrThreeLiveNeighboursShouldLiveOnTheNextStep() {
aliveCell.amountOfNeighbours = 2
// aliveCell.isAlive = (aliveCell.amountOfNeighbours == 2 || aliveCell.amountOfNeighbours == 3)
aliveCell.nextStep()
aliveCell.isAlive should equal(true)
}
@Test def liveCellWithMoreThanThreeNeighboursShouldDieOnTheNextStep() {
aliveCell.amountOfNeighbours = 4
aliveCell.isAlive = aliveCell.amountOfNeighbours < 3
aliveCell.isAlive should equal(false)
}
@Test def deadCellWithThreeNeighboursShouldBecomeLiveOnTheNextStep() {
val deadCell = new Cell(isAlive = false, amountOfNeighbours = 3)
deadCell.isAlive = deadCell.amountOfNeighbours == 3
deadCell.isAlive should equal(true)
}
class Cell(var isAlive: Boolean, var amountOfNeighbours: Int) {
def nextStep() {
isAlive = !(aliveCell.amountOfNeighbours < 2)
}
}
}
| dkandalov/katas | java/src/main/groovy/katas/groovy/gdcr2012/Conway3.scala | Scala | unlicense | 1,219 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.clustering
import breeze.linalg.{DenseVector => BDV}
import org.apache.hadoop.fs.Path
import org.apache.spark.annotation.Since
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.ml.{Estimator, Model}
import org.apache.spark.ml.impl.Utils.EPSILON
import org.apache.spark.ml.linalg._
import org.apache.spark.ml.param._
import org.apache.spark.ml.param.shared._
import org.apache.spark.ml.stat.distribution.MultivariateGaussian
import org.apache.spark.ml.util._
import org.apache.spark.ml.util.Instrumentation.instrumented
import org.apache.spark.mllib.linalg.{Matrices => OldMatrices, Matrix => OldMatrix,
Vector => OldVector, Vectors => OldVectors}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.apache.spark.storage.StorageLevel
/**
* Common params for GaussianMixture and GaussianMixtureModel
*/
private[clustering] trait GaussianMixtureParams extends Params with HasMaxIter with HasFeaturesCol
with HasSeed with HasPredictionCol with HasWeightCol with HasProbabilityCol with HasTol
with HasAggregationDepth {
/**
* Number of independent Gaussians in the mixture model. Must be greater than 1. Default: 2.
*
* @group param
*/
@Since("2.0.0")
final val k = new IntParam(this, "k", "Number of independent Gaussians in the mixture model. " +
"Must be > 1.", ParamValidators.gt(1))
/** @group getParam */
@Since("2.0.0")
def getK: Int = $(k)
/**
* Validates and transforms the input schema.
*
* @param schema input schema
* @return output schema
*/
protected def validateAndTransformSchema(schema: StructType): StructType = {
SchemaUtils.validateVectorCompatibleColumn(schema, getFeaturesCol)
val schemaWithPredictionCol = SchemaUtils.appendColumn(schema, $(predictionCol), IntegerType)
SchemaUtils.appendColumn(schemaWithPredictionCol, $(probabilityCol), new VectorUDT)
}
}
/**
* Multivariate Gaussian Mixture Model (GMM) consisting of k Gaussians, where points
* are drawn from each Gaussian i with probability weights(i).
*
* @param weights Weight for each Gaussian distribution in the mixture.
* This is a multinomial probability distribution over the k Gaussians,
* where weights(i) is the weight for Gaussian i, and weights sum to 1.
* @param gaussians Array of `MultivariateGaussian` where gaussians(i) represents
* the Multivariate Gaussian (Normal) Distribution for Gaussian i
*/
@Since("2.0.0")
class GaussianMixtureModel private[ml] (
@Since("2.0.0") override val uid: String,
@Since("2.0.0") val weights: Array[Double],
@Since("2.0.0") val gaussians: Array[MultivariateGaussian])
extends Model[GaussianMixtureModel] with GaussianMixtureParams with MLWritable
with HasTrainingSummary[GaussianMixtureSummary] {
@Since("3.0.0")
lazy val numFeatures: Int = gaussians.head.mean.size
/** @group setParam */
@Since("2.1.0")
def setFeaturesCol(value: String): this.type = set(featuresCol, value)
/** @group setParam */
@Since("2.1.0")
def setPredictionCol(value: String): this.type = set(predictionCol, value)
/** @group setParam */
@Since("2.1.0")
def setProbabilityCol(value: String): this.type = set(probabilityCol, value)
@Since("2.0.0")
override def copy(extra: ParamMap): GaussianMixtureModel = {
val copied = copyValues(new GaussianMixtureModel(uid, weights, gaussians), extra)
copied.setSummary(trainingSummary).setParent(this.parent)
}
@Since("2.0.0")
override def transform(dataset: Dataset[_]): DataFrame = {
val outputSchema = transformSchema(dataset.schema, logging = true)
val vectorCol = DatasetUtils.columnToVector(dataset, $(featuresCol))
var outputData = dataset
var numColsOutput = 0
if ($(probabilityCol).nonEmpty) {
val probUDF = udf((vector: Vector) => predictProbability(vector))
outputData = outputData.withColumn($(probabilityCol), probUDF(vectorCol),
outputSchema($(probabilityCol)).metadata)
numColsOutput += 1
}
if ($(predictionCol).nonEmpty) {
if ($(probabilityCol).nonEmpty) {
val predUDF = udf((vector: Vector) => vector.argmax)
outputData = outputData.withColumn($(predictionCol), predUDF(col($(probabilityCol))),
outputSchema($(predictionCol)).metadata)
} else {
val predUDF = udf((vector: Vector) => predict(vector))
outputData = outputData.withColumn($(predictionCol), predUDF(vectorCol),
outputSchema($(predictionCol)).metadata)
}
numColsOutput += 1
}
if (numColsOutput == 0) {
this.logWarning(s"$uid: GaussianMixtureModel.transform() does nothing" +
" because no output columns were set.")
}
outputData.toDF
}
@Since("2.0.0")
override def transformSchema(schema: StructType): StructType = {
var outputSchema = validateAndTransformSchema(schema)
if ($(predictionCol).nonEmpty) {
outputSchema = SchemaUtils.updateNumValues(outputSchema,
$(predictionCol), weights.length)
}
if ($(probabilityCol).nonEmpty) {
outputSchema = SchemaUtils.updateAttributeGroupSize(outputSchema,
$(probabilityCol), weights.length)
}
outputSchema
}
@Since("3.0.0")
def predict(features: Vector): Int = {
val r = predictProbability(features)
r.argmax
}
@Since("3.0.0")
def predictProbability(features: Vector): Vector = {
val probs: Array[Double] =
GaussianMixtureModel.computeProbabilities(features.asBreeze.toDenseVector, gaussians, weights)
Vectors.dense(probs)
}
/**
* Retrieve Gaussian distributions as a DataFrame.
* Each row represents a Gaussian Distribution.
* Two columns are defined: mean and cov.
* Schema:
* {{{
* root
* |-- mean: vector (nullable = true)
* |-- cov: matrix (nullable = true)
* }}}
*/
@Since("2.0.0")
def gaussiansDF: DataFrame = {
val modelGaussians = gaussians.map { gaussian =>
(OldVectors.fromML(gaussian.mean), OldMatrices.fromML(gaussian.cov))
}
SparkSession.builder().getOrCreate().createDataFrame(modelGaussians).toDF("mean", "cov")
}
/**
* Returns a [[org.apache.spark.ml.util.MLWriter]] instance for this ML instance.
*
* For [[GaussianMixtureModel]], this does NOT currently save the training [[summary]].
* An option to save [[summary]] may be added in the future.
*
*/
@Since("2.0.0")
override def write: MLWriter = new GaussianMixtureModel.GaussianMixtureModelWriter(this)
@Since("3.0.0")
override def toString: String = {
s"GaussianMixtureModel: uid=$uid, k=${weights.length}, numFeatures=$numFeatures"
}
/**
* Gets summary of model on training set. An exception is
* thrown if `hasSummary` is false.
*/
@Since("2.0.0")
override def summary: GaussianMixtureSummary = super.summary
}
@Since("2.0.0")
object GaussianMixtureModel extends MLReadable[GaussianMixtureModel] {
@Since("2.0.0")
override def read: MLReader[GaussianMixtureModel] = new GaussianMixtureModelReader
@Since("2.0.0")
override def load(path: String): GaussianMixtureModel = super.load(path)
/** [[MLWriter]] instance for [[GaussianMixtureModel]] */
private[GaussianMixtureModel] class GaussianMixtureModelWriter(
instance: GaussianMixtureModel) extends MLWriter {
private case class Data(weights: Array[Double], mus: Array[OldVector], sigmas: Array[OldMatrix])
override protected def saveImpl(path: String): Unit = {
// Save metadata and Params
DefaultParamsWriter.saveMetadata(instance, path, sc)
// Save model data: weights and gaussians
val weights = instance.weights
val gaussians = instance.gaussians
val mus = gaussians.map(g => OldVectors.fromML(g.mean))
val sigmas = gaussians.map(c => OldMatrices.fromML(c.cov))
val data = Data(weights, mus, sigmas)
val dataPath = new Path(path, "data").toString
sparkSession.createDataFrame(Seq(data)).repartition(1).write.parquet(dataPath)
}
}
private class GaussianMixtureModelReader extends MLReader[GaussianMixtureModel] {
/** Checked against metadata when loading model */
private val className = classOf[GaussianMixtureModel].getName
override def load(path: String): GaussianMixtureModel = {
val metadata = DefaultParamsReader.loadMetadata(path, sc, className)
val dataPath = new Path(path, "data").toString
val row = sparkSession.read.parquet(dataPath).select("weights", "mus", "sigmas").head()
val weights = row.getSeq[Double](0).toArray
val mus = row.getSeq[OldVector](1).toArray
val sigmas = row.getSeq[OldMatrix](2).toArray
require(mus.length == sigmas.length, "Length of Mu and Sigma array must match")
require(mus.length == weights.length, "Length of weight and Gaussian array must match")
val gaussians = mus.zip(sigmas).map {
case (mu, sigma) =>
new MultivariateGaussian(mu.asML, sigma.asML)
}
val model = new GaussianMixtureModel(metadata.uid, weights, gaussians)
metadata.getAndSetParams(model)
model
}
}
/**
* Compute the probability (partial assignment) for each cluster for the given data point.
*
* @param features Data point
* @param dists Gaussians for model
* @param weights Weights for each Gaussian
* @return Probability (partial assignment) for each of the k clusters
*/
private[clustering]
def computeProbabilities(
features: BDV[Double],
dists: Array[MultivariateGaussian],
weights: Array[Double]): Array[Double] = {
val p = weights.zip(dists).map {
case (weight, dist) => EPSILON + weight * dist.pdf(features)
}
val pSum = p.sum
var i = 0
while (i < weights.length) {
p(i) /= pSum
i += 1
}
p
}
}
/**
* Gaussian Mixture clustering.
*
* This class performs expectation maximization for multivariate Gaussian
* Mixture Models (GMMs). A GMM represents a composite distribution of
* independent Gaussian distributions with associated "mixing" weights
* specifying each's contribution to the composite.
*
* Given a set of sample points, this class will maximize the log-likelihood
* for a mixture of k Gaussians, iterating until the log-likelihood changes by
* less than convergenceTol, or until it has reached the max number of iterations.
* While this process is generally guaranteed to converge, it is not guaranteed
* to find a global optimum.
*
* @note This algorithm is limited in its number of features since it requires storing a covariance
* matrix which has size quadratic in the number of features. Even when the number of features does
* not exceed this limit, this algorithm may perform poorly on high-dimensional data.
* This is due to high-dimensional data (a) making it difficult to cluster at all (based
* on statistical/theoretical arguments) and (b) numerical issues with Gaussian distributions.
*/
@Since("2.0.0")
class GaussianMixture @Since("2.0.0") (
@Since("2.0.0") override val uid: String)
extends Estimator[GaussianMixtureModel] with GaussianMixtureParams with DefaultParamsWritable {
setDefault(
k -> 2,
maxIter -> 100,
tol -> 0.01)
@Since("2.0.0")
override def copy(extra: ParamMap): GaussianMixture = defaultCopy(extra)
@Since("2.0.0")
def this() = this(Identifiable.randomUID("GaussianMixture"))
/** @group setParam */
@Since("2.0.0")
def setFeaturesCol(value: String): this.type = set(featuresCol, value)
/** @group setParam */
@Since("2.0.0")
def setPredictionCol(value: String): this.type = set(predictionCol, value)
/** @group setParam */
@Since("2.0.0")
def setProbabilityCol(value: String): this.type = set(probabilityCol, value)
/** @group setParam */
@Since("3.0.0")
def setWeightCol(value: String): this.type = set(weightCol, value)
/** @group setParam */
@Since("2.0.0")
def setK(value: Int): this.type = set(k, value)
/** @group setParam */
@Since("2.0.0")
def setMaxIter(value: Int): this.type = set(maxIter, value)
/** @group setParam */
@Since("2.0.0")
def setTol(value: Double): this.type = set(tol, value)
/** @group setParam */
@Since("2.0.0")
def setSeed(value: Long): this.type = set(seed, value)
/** @group expertSetParam */
@Since("3.0.0")
def setAggregationDepth(value: Int): this.type = set(aggregationDepth, value)
/**
* Number of samples per cluster to use when initializing Gaussians.
*/
private val numSamples = 5
@Since("2.0.0")
override def fit(dataset: Dataset[_]): GaussianMixtureModel = instrumented { instr =>
transformSchema(dataset.schema, logging = true)
val spark = dataset.sparkSession
import spark.implicits._
val numFeatures = MetadataUtils.getNumFeatures(dataset, $(featuresCol))
require(numFeatures < GaussianMixture.MAX_NUM_FEATURES, s"GaussianMixture cannot handle more " +
s"than ${GaussianMixture.MAX_NUM_FEATURES} features because the size of the covariance" +
s" matrix is quadratic in the number of features.")
val handlePersistence = dataset.storageLevel == StorageLevel.NONE
val w = if (isDefined(weightCol) && $(weightCol).nonEmpty) {
col($(weightCol)).cast(DoubleType)
} else {
lit(1.0)
}
val instances = dataset.select(DatasetUtils.columnToVector(dataset, $(featuresCol)), w)
.as[(Vector, Double)]
.rdd
if (handlePersistence) {
instances.persist(StorageLevel.MEMORY_AND_DISK)
}
val sc = spark.sparkContext
val numClusters = $(k)
instr.logPipelineStage(this)
instr.logDataset(dataset)
instr.logParams(this, featuresCol, predictionCol, probabilityCol, weightCol, k, maxIter,
seed, tol, aggregationDepth)
instr.logNumFeatures(numFeatures)
val shouldDistributeGaussians = GaussianMixture.shouldDistributeGaussians(
numClusters, numFeatures)
// TODO: SPARK-15785 Support users supplied initial GMM.
val (weights, gaussians) = initRandom(instances, numClusters, numFeatures)
var logLikelihood = Double.MinValue
var logLikelihoodPrev = 0.0
var iter = 0
while (iter < $(maxIter) && math.abs(logLikelihood - logLikelihoodPrev) > $(tol)) {
val bcWeights = sc.broadcast(weights)
val bcGaussians = sc.broadcast(gaussians)
// aggregate the cluster contribution for all sample points
val sums = instances.treeAggregate(
new ExpectationAggregator(numFeatures, bcWeights, bcGaussians))(
seqOp = (c: ExpectationAggregator, v: (Vector, Double)) => c.add(v._1, v._2),
combOp = (c1: ExpectationAggregator, c2: ExpectationAggregator) => c1.merge(c2),
depth = $(aggregationDepth))
bcWeights.destroy()
bcGaussians.destroy()
if (iter == 0) {
instr.logNumExamples(sums.count)
instr.logSumOfWeights(sums.weights.sum)
}
/*
Create new distributions based on the partial assignments
(often referred to as the "M" step in literature)
*/
val sumWeights = sums.weights.sum
if (shouldDistributeGaussians) {
val numPartitions = math.min(numClusters, 1024)
val tuples = Seq.tabulate(numClusters) { i =>
(sums.means(i), sums.covs(i), sums.weights(i))
}
val (ws, gs) = sc.parallelize(tuples, numPartitions).map { case (mean, cov, weight) =>
GaussianMixture.updateWeightsAndGaussians(mean, cov, weight, sumWeights)
}.collect().unzip
Array.copy(ws, 0, weights, 0, ws.length)
Array.copy(gs, 0, gaussians, 0, gs.length)
} else {
var i = 0
while (i < numClusters) {
val (weight, gaussian) = GaussianMixture.updateWeightsAndGaussians(
sums.means(i), sums.covs(i), sums.weights(i), sumWeights)
weights(i) = weight
gaussians(i) = gaussian
i += 1
}
}
logLikelihoodPrev = logLikelihood // current becomes previous
logLikelihood = sums.logLikelihood // this is the freshly computed log-likelihood
iter += 1
}
if (handlePersistence) {
instances.unpersist()
}
val gaussianDists = gaussians.map { case (mean, covVec) =>
val cov = GaussianMixture.unpackUpperTriangularMatrix(numFeatures, covVec.values)
new MultivariateGaussian(mean, cov)
}
val model = copyValues(new GaussianMixtureModel(uid, weights, gaussianDists)).setParent(this)
val summary = new GaussianMixtureSummary(model.transform(dataset),
$(predictionCol), $(probabilityCol), $(featuresCol), $(k), logLikelihood, iter)
instr.logNamedValue("logLikelihood", logLikelihood)
instr.logNamedValue("clusterSizes", summary.clusterSizes)
model.setSummary(Some(summary))
}
@Since("2.0.0")
override def transformSchema(schema: StructType): StructType = {
validateAndTransformSchema(schema)
}
/**
* Initialize weights and corresponding gaussian distributions at random.
*
* We start with uniform weights, a random mean from the data, and diagonal covariance matrices
* using component variances derived from the samples.
*
* @param instances The training instances.
* @param numClusters The number of clusters.
* @param numFeatures The number of features of training instance.
* @return The initialized weights and corresponding gaussian distributions. Note the
* covariance matrix of multivariate gaussian distribution is symmetric and
* we only save the upper triangular part as a dense vector (column major).
*/
private def initRandom(
instances: RDD[(Vector, Double)],
numClusters: Int,
numFeatures: Int): (Array[Double], Array[(DenseVector, DenseVector)]) = {
val (samples, sampleWeights) = instances
.takeSample(withReplacement = true, numClusters * numSamples, $(seed))
.unzip
val weights = new Array[Double](numClusters)
val weightSum = sampleWeights.sum
val gaussians = Array.tabulate(numClusters) { i =>
val start = i * numSamples
val end = start + numSamples
val sampleSlice = samples.view(start, end)
val weightSlice = sampleWeights.view(start, end)
val localWeightSum = weightSlice.sum
weights(i) = localWeightSum / weightSum
val mean = {
val v = new DenseVector(new Array[Double](numFeatures))
var j = 0
while (j < numSamples) {
BLAS.axpy(weightSlice(j), sampleSlice(j), v)
j += 1
}
BLAS.scal(1.0 / localWeightSum, v)
v
}
/*
Construct matrix where diagonal entries are element-wise
variance of input vectors (computes biased variance).
Since the covariance matrix of multivariate gaussian distribution is symmetric,
only the upper triangular part of the matrix (column major) will be saved as
a dense vector in order to reduce the shuffled data size.
*/
val cov = {
val ss = new DenseVector(new Array[Double](numFeatures)).asBreeze
var j = 0
while (j < numSamples) {
val v = sampleSlice(j).asBreeze - mean.asBreeze
ss += (v * v) * weightSlice(j)
j += 1
}
val diagVec = Vectors.fromBreeze(ss)
BLAS.scal(1.0 / localWeightSum, diagVec)
val covVec = new DenseVector(Array.ofDim[Double](numFeatures * (numFeatures + 1) / 2))
diagVec.toArray.zipWithIndex.foreach { case (v: Double, i: Int) =>
covVec.values(i + i * (i + 1) / 2) = v
}
covVec
}
(mean, cov)
}
(weights, gaussians)
}
}
@Since("2.0.0")
object GaussianMixture extends DefaultParamsReadable[GaussianMixture] {
/** Limit number of features such that numFeatures^2^ < Int.MaxValue */
private[clustering] val MAX_NUM_FEATURES = math.sqrt(Int.MaxValue).toInt
@Since("2.0.0")
override def load(path: String): GaussianMixture = super.load(path)
/**
* Heuristic to distribute the computation of the [[MultivariateGaussian]]s, approximately when
* numFeatures > 25 except for when numClusters is very small.
*
* @param numClusters Number of clusters
* @param numFeatures Number of features
*/
private[clustering] def shouldDistributeGaussians(
numClusters: Int,
numFeatures: Int): Boolean = {
((numClusters - 1.0) / numClusters) * numFeatures > 25.0
}
/**
* Convert an n * (n + 1) / 2 dimension array representing the upper triangular part of a matrix
* into an n * n array representing the full symmetric matrix (column major).
*
* @param n The order of the n by n matrix.
* @param triangularValues The upper triangular part of the matrix packed in an array
* (column major).
* @return A dense matrix which represents the symmetric matrix in column major.
*/
private[clustering] def unpackUpperTriangularMatrix(
n: Int,
triangularValues: Array[Double]): DenseMatrix = {
val symmetricValues = new Array[Double](n * n)
var r = 0
var i = 0
while (i < n) {
var j = 0
while (j <= i) {
symmetricValues(i * n + j) = triangularValues(r)
symmetricValues(j * n + i) = triangularValues(r)
r += 1
j += 1
}
i += 1
}
new DenseMatrix(n, n, symmetricValues)
}
/**
* Update the weight, mean and covariance of gaussian distribution.
*
* @param mean The mean of the gaussian distribution.
* @param cov The covariance matrix of the gaussian distribution. Note we only
* save the upper triangular part as a dense vector (column major).
* @param weight The weight of the gaussian distribution.
* @param sumWeights The sum of weights of all clusters.
* @return The updated weight, mean and covariance.
*/
private[clustering] def updateWeightsAndGaussians(
mean: DenseVector,
cov: DenseVector,
weight: Double,
sumWeights: Double): (Double, (DenseVector, DenseVector)) = {
BLAS.scal(1.0 / weight, mean)
BLAS.spr(-weight, mean, cov)
BLAS.scal(1.0 / weight, cov)
val newWeight = weight / sumWeights
val newGaussian = (mean, cov)
(newWeight, newGaussian)
}
}
/**
* ExpectationAggregator computes the partial expectation results.
*
* @param numFeatures The number of features.
* @param bcWeights The broadcast weights for each Gaussian distribution in the mixture.
* @param bcGaussians The broadcast array of Multivariate Gaussian (Normal) Distribution
* in the mixture. Note only upper triangular part of the covariance
* matrix of each distribution is stored as dense vector (column major)
* in order to reduce shuffled data size.
*/
private class ExpectationAggregator(
numFeatures: Int,
bcWeights: Broadcast[Array[Double]],
bcGaussians: Broadcast[Array[(DenseVector, DenseVector)]]) extends Serializable {
private val k: Int = bcWeights.value.length
private var totalCnt: Long = 0L
private var newLogLikelihood: Double = 0.0
private lazy val newWeights: Array[Double] = Array.ofDim[Double](k)
private lazy val newMeans: Array[DenseVector] = Array.fill(k)(
new DenseVector(Array.ofDim[Double](numFeatures)))
private lazy val newCovs: Array[DenseVector] = Array.fill(k)(
new DenseVector(Array.ofDim[Double](numFeatures * (numFeatures + 1) / 2)))
@transient private lazy val oldGaussians = {
bcGaussians.value.map { case (mean, covVec) =>
val cov = GaussianMixture.unpackUpperTriangularMatrix(numFeatures, covVec.values)
new MultivariateGaussian(mean, cov)
}
}
def count: Long = totalCnt
def logLikelihood: Double = newLogLikelihood
def weights: Array[Double] = newWeights
def means: Array[DenseVector] = newMeans
def covs: Array[DenseVector] = newCovs
/**
* Add a new training instance to this ExpectationAggregator, update the weights,
* means and covariances for each distributions, and update the log likelihood.
*
* @param instance The instance of data point to be added.
* @param weight The instance weight.
* @return This ExpectationAggregator object.
*/
def add(instance: Vector, weight: Double): this.type = {
val localWeights = bcWeights.value
val localOldGaussians = oldGaussians
val prob = new Array[Double](k)
var probSum = 0.0
var i = 0
while (i < k) {
val p = EPSILON + localWeights(i) * localOldGaussians(i).pdf(instance)
prob(i) = p
probSum += p
i += 1
}
newLogLikelihood += math.log(probSum) * weight
val localNewWeights = newWeights
val localNewMeans = newMeans
val localNewCovs = newCovs
i = 0
while (i < k) {
val w = prob(i) / probSum * weight
localNewWeights(i) += w
BLAS.axpy(w, instance, localNewMeans(i))
BLAS.spr(w, instance, localNewCovs(i))
i += 1
}
totalCnt += 1
this
}
/**
* Merge another ExpectationAggregator, update the weights, means and covariances
* for each distributions, and update the log likelihood.
* (Note that it's in place merging; as a result, `this` object will be modified.)
*
* @param other The other ExpectationAggregator to be merged.
* @return This ExpectationAggregator object.
*/
def merge(other: ExpectationAggregator): this.type = {
if (other.count != 0) {
totalCnt += other.totalCnt
val localThisNewWeights = this.newWeights
val localOtherNewWeights = other.newWeights
val localThisNewMeans = this.newMeans
val localOtherNewMeans = other.newMeans
val localThisNewCovs = this.newCovs
val localOtherNewCovs = other.newCovs
var i = 0
while (i < k) {
localThisNewWeights(i) += localOtherNewWeights(i)
BLAS.axpy(1.0, localOtherNewMeans(i), localThisNewMeans(i))
BLAS.axpy(1.0, localOtherNewCovs(i), localThisNewCovs(i))
i += 1
}
newLogLikelihood += other.newLogLikelihood
}
this
}
}
/**
* Summary of GaussianMixture.
*
* @param predictions `DataFrame` produced by `GaussianMixtureModel.transform()`.
* @param predictionCol Name for column of predicted clusters in `predictions`.
* @param probabilityCol Name for column of predicted probability of each cluster
* in `predictions`.
* @param featuresCol Name for column of features in `predictions`.
* @param k Number of clusters.
* @param logLikelihood Total log-likelihood for this model on the given data.
* @param numIter Number of iterations.
*/
@Since("2.0.0")
class GaussianMixtureSummary private[clustering] (
predictions: DataFrame,
predictionCol: String,
@Since("2.0.0") val probabilityCol: String,
featuresCol: String,
k: Int,
@Since("2.2.0") val logLikelihood: Double,
numIter: Int)
extends ClusteringSummary(predictions, predictionCol, featuresCol, k, numIter) {
/**
* Probability of each cluster.
*/
@Since("2.0.0")
@transient lazy val probability: DataFrame = predictions.select(probabilityCol)
}
| jkbradley/spark | mllib/src/main/scala/org/apache/spark/ml/clustering/GaussianMixture.scala | Scala | apache-2.0 | 28,031 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.tail
import cats.laws._
import cats.laws.discipline._
import monix.eval.{Coeval, Task}
import monix.execution.cancelables.BooleanCancelable
import monix.execution.exceptions.DummyException
import monix.execution.internal.Platform
import monix.tail.Iterant.Suspend
import monix.tail.batches.{Batch, BatchCursor}
import org.scalacheck.Test
import org.scalacheck.Test.Parameters
object IterantTakeSuite extends BaseTestSuite {
override lazy val checkConfig: Parameters = {
if (Platform.isJVM)
Test.Parameters.default.withMaxSize(256)
else
Test.Parameters.default.withMaxSize(32)
}
test("Iterant[Task].take equivalence with List.take") { implicit s =>
check3 { (list: List[Int], idx: Int, nr: Int) =>
val iter = arbitraryListToIterant[Task, Int](list, math.abs(idx) + 1, allowErrors = false)
val stream = iter ++ Iterant[Task].of(1, 2, 3)
val length = list.length
val n =
if (nr == 0) 0
else if (length == 0) math.abs(nr)
else math.abs(math.abs(nr) % length)
stream.take(n).toListL <-> stream.toListL.map(_.take(n))
}
}
test("Iterant[Coeval].take releases resources") { implicit s =>
check3 { (list: List[Int], idx: Int, nr: Int) =>
val cancelable = BooleanCancelable()
val stream = arbitraryListToIterant[Coeval, Int](list, math.abs(idx) + 1)
.onErrorIgnore
.guarantee(Coeval.eval(cancelable.cancel()))
val length = list.length
var n = if (length == 0) 1000 else Math.floorMod(nr, length + 1)
if (n <= 0) n = 1
stream.take(n).toListL.value == list.take(n) &&
cancelable.isCanceled
}
}
test("Iterant.take protects against broken batches") { implicit s =>
check1 { (iter: Iterant[Task, Int]) =>
val dummy = DummyException("dummy")
val suffix = Iterant[Task].nextBatchS[Int](new ThrowExceptionBatch(dummy), Task.now(Iterant[Task].empty))
val stream = iter.onErrorIgnore ++ suffix
val received = stream.take(Int.MaxValue)
received <-> iter.onErrorIgnore ++ Iterant[Task].haltS[Int](Some(dummy))
}
}
test("Iterant.take protects against broken cursors") { implicit s =>
check1 { (iter: Iterant[Task, Int]) =>
val dummy = DummyException("dummy")
val suffix = Iterant[Task].nextCursorS[Int](new ThrowExceptionCursor(dummy), Task.now(Iterant[Task].empty))
val stream = iter.onErrorIgnore ++ suffix
val received = stream.take(Int.MaxValue)
received <-> iter.onErrorIgnore ++ Iterant[Task].haltS[Int](Some(dummy))
}
}
test("Iterant.take releases resources on exception") { _ =>
check1 { (iter: Iterant[Coeval, Int]) =>
val cancelable = BooleanCancelable()
val dummy = DummyException("dummy")
val suffix = Iterant[Coeval].nextCursorS[Int](new ThrowExceptionCursor(dummy), Coeval.now(Iterant[Coeval].empty))
val stream = (iter.onErrorIgnore ++ suffix).guarantee(Coeval.eval(cancelable.cancel()))
intercept[DummyException] { stream.take(Int.MaxValue).toListL.value() }
cancelable.isCanceled
}
}
test("Iterant.take suspends execution for NextCursor or NextBatch") { _ =>
val iter1 = Iterant[Coeval].nextBatchS(Batch(1,2,3), Coeval.now(Iterant[Coeval].empty[Int]))
assert(iter1.take(2).isInstanceOf[Suspend[Coeval, Int]], "NextBatch should be suspended")
assertEquals(iter1.take(2).toListL.value(), List(1, 2))
val iter2 = Iterant[Coeval].nextCursorS(BatchCursor(1,2,3), Coeval.now(Iterant[Coeval].empty[Int]))
assert(iter2.take(2).isInstanceOf[Suspend[Coeval, Int]], "NextCursor should be suspended")
assertEquals(iter2.take(2).toListL.value(), List(1, 2))
}
test("Iterant.take preserves the source earlyStop") { implicit s =>
var effect = 0
val source = Iterant[Coeval].nextCursorS(BatchCursor(1,2,3), Coeval.now(Iterant[Coeval].empty[Int]))
.guarantee(Coeval.eval(effect += 1))
val stream = source.take(3)
stream.completedL.value()
assertEquals(effect, 1)
}
}
| Wogan/monix | monix-tail/shared/src/test/scala/monix/tail/IterantTakeSuite.scala | Scala | apache-2.0 | 4,699 |
//
// MessagePack for Java
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package org.msgpack.core
import org.msgpack.value.Value
import org.msgpack.value.holder.ValueHolder
import scala.util.Random
import MessagePack.Code
import java.io.ByteArrayOutputStream
import java.math.BigInteger
import java.nio.CharBuffer
import java.nio.charset.{UnmappableCharacterException, CodingErrorAction}
/**
* Created on 2014/05/07.
*/
class MessagePackTest extends MessagePackSpec {
def isValidUTF8(s: String) = {
MessagePack.UTF8.newEncoder().canEncode(s)
}
def containsUnmappableCharacter(s: String) : Boolean = {
try {
MessagePack.UTF8.newEncoder().onUnmappableCharacter(CodingErrorAction.REPORT).encode(CharBuffer.wrap(s))
false
}
catch {
case e: UnmappableCharacterException =>
true
case _: Exception => false
}
}
"MessagePack" should {
"detect fixint values" in {
for (i <- 0 until 0x79) {
Code.isPosFixInt(i.toByte) shouldBe true
}
for (i <- 0x80 until 0xFF) {
Code.isPosFixInt(i.toByte) shouldBe false
}
}
"detect fixint quickly" in {
val N = 100000
val idx = (0 until N).map(x => Random.nextInt(256).toByte).toArray[Byte]
time("check fixint", repeat = 100) {
block("mask") {
var i = 0
var count = 0
while (i < N) {
if ((idx(i) & Code.POSFIXINT_MASK) == 0) {
count += 1
}
i += 1
}
}
block("mask in func") {
var i = 0
var count = 0
while (i < N) {
if (Code.isPosFixInt(idx(i))) {
count += 1
}
i += 1
}
}
block("shift cmp") {
var i = 0
var count = 0
while (i < N) {
if ((idx(i) >>> 7) == 0) {
count += 1
}
i += 1
}
}
}
}
"detect neg fix int values" in {
for (i <- 0 until 0xe0) {
Code.isNegFixInt(i.toByte) shouldBe false
}
for (i <- 0xe0 until 0xFF) {
Code.isNegFixInt(i.toByte) shouldBe true
}
}
def check[A](v: A, pack: MessagePacker => Unit, unpack: MessageUnpacker => A, msgpack:MessagePack = MessagePack.DEFAULT): Unit = {
var b: Array[Byte] = null
try {
val bs = new ByteArrayOutputStream()
val packer = msgpack.newPacker(bs)
pack(packer)
packer.close()
b = bs.toByteArray
val unpacker = msgpack.newUnpacker(b)
val ret = unpack(unpacker)
ret shouldBe v
}
catch {
case e: Exception =>
warn(e.getMessage)
if (b != null)
warn(s"packed data (size:${b.length}): ${toHex(b)}")
throw e
}
}
def checkException[A](v: A, pack: MessagePacker => Unit, unpack: MessageUnpacker => A, msgpack:MessagePack=MessagePack.DEFAULT) : Unit = {
var b: Array[Byte] = null
val bs = new ByteArrayOutputStream()
val packer = msgpack.newPacker(bs)
pack(packer)
packer.close()
b = bs.toByteArray
val unpacker = msgpack.newUnpacker(b)
val ret = unpack(unpacker)
fail("cannot not reach here")
}
def checkOverflow[A](v: A, pack: MessagePacker => Unit, unpack: MessageUnpacker => A) {
try {
checkException[A](v, pack, unpack)
}
catch {
case e:MessageIntegerOverflowException => // OK
}
}
"pack/unpack primitive values" taggedAs("prim") in {
forAll { (v: Boolean) => check(v, _.packBoolean(v), _.unpackBoolean)}
forAll { (v: Byte) => check(v, _.packByte(v), _.unpackByte)}
forAll { (v: Short) => check(v, _.packShort(v), _.unpackShort)}
forAll { (v: Int) => check(v, _.packInt(v), _.unpackInt)}
forAll { (v: Float) => check(v, _.packFloat(v), _.unpackFloat)}
forAll { (v: Long) => check(v, _.packLong(v), _.unpackLong)}
forAll { (v: Double) => check(v, _.packDouble(v), _.unpackDouble)}
check(null, _.packNil, _.unpackNil())
}
"pack/unpack integer values" taggedAs("int") in {
val sampleData = Seq[Long](Int.MinValue.toLong - 10, -65535, -8191, -1024, -255, -127, -63, -31, -15, -7, -3, -1, 0, 2, 4, 8, 16, 32, 64, 128, 256, 1024, 8192, 65536, Int.MaxValue.toLong + 10)
for(v <- sampleData) {
check(v, _.packLong(v), _.unpackLong)
if(v.isValidInt) {
val vi = v.toInt
check(vi, _.packInt(vi), _.unpackInt)
}
else {
checkOverflow(v, _.packLong(v), _.unpackInt)
}
if(v.isValidShort) {
val vi = v.toShort
check(vi, _.packShort(vi), _.unpackShort)
}
else {
checkOverflow(v, _.packLong(v), _.unpackShort)
}
if(v.isValidByte) {
val vi = v.toByte
check(vi, _.packByte(vi), _.unpackByte)
}
else {
checkOverflow(v, _.packLong(v), _.unpackByte)
}
}
}
"pack/unpack BigInteger" taggedAs("bi") in {
forAll { (a: Long) =>
val v = BigInteger.valueOf(a)
check(v, _.packBigInteger(v), _.unpackBigInteger)
}
for(bi <- Seq(BigInteger.valueOf(Long.MaxValue).add(BigInteger.valueOf(1)))) {
check(bi, _.packBigInteger(bi), _.unpackBigInteger())
}
for(bi <- Seq(BigInteger.valueOf(Long.MaxValue).shiftLeft(10))) {
try {
checkException(bi, _.packBigInteger(bi), _.unpackBigInteger())
fail("cannot reach here")
}
catch {
case e:IllegalArgumentException => // OK
}
}
}
"pack/unpack strings" taggedAs ("string") in {
forAll { (v: String) =>
whenever(isValidUTF8(v)) {
check(v, _.packString(v), _.unpackString)
}
}
}
"pack/unpack large strings" taggedAs ("large-string") in {
// Large string
val strLen = Seq(1000, 2000, 10000, 50000, 100000, 500000)
for(l <- strLen) {
val v : String = Iterator.continually(Random.nextString(l * 10)).find(isValidUTF8).get
check(v, _.packString(v), _.unpackString)
}
}
"report errors when packing/unpacking malformed strings" taggedAs("malformed") in {
// TODO produce malformed utf-8 strings in Java8"
pending
// Create 100 malformed UTF8 Strings
val r = new Random(0)
val malformedStrings = Iterator.continually {
val b = new Array[Byte](10)
r.nextBytes(b)
b
}
.filter(b => !isValidUTF8(new String(b))).take(100)
for (malformedBytes <- malformedStrings) {
// Pack tests
val malformed = new String(malformedBytes)
try {
checkException(malformed, _.packString(malformed), _.unpackString())
}
catch {
case e: MessageStringCodingException => // OK
}
try {
checkException(malformed, { packer =>
packer.packRawStringHeader(malformedBytes.length)
packer.writePayload(malformedBytes)
},
_.unpackString())
}
catch {
case e: MessageStringCodingException => // OK
}
}
}
"report errors when packing/unpacking strings that contain unmappable characters" taggedAs("unmap") in {
val unmappable = Array[Byte](0xfc.toByte, 0x0a.toByte)
//val unmappableChar = Array[Char](new Character(0xfc0a).toChar)
// Report error on unmappable character
val config = new MessagePack.ConfigBuilder().onMalFormedInput(CodingErrorAction.REPORT).onUnmappableCharacter(CodingErrorAction.REPORT).build()
val msgpack = new MessagePack(config)
for(bytes <- Seq(unmappable)) {
When("unpacking")
try {
checkException(bytes,
{ packer =>
packer.packRawStringHeader(bytes.length)
packer.writePayload(bytes)
},
_.unpackString(),
msgpack)
}
catch {
case e:MessageStringCodingException => // OK
}
// When("packing")
// try {
// val s = new String(unmappableChar)
// checkException(s, _.packString(s), _.unpackString())
// }
// catch {
// case e:MessageStringCodingException => // OK
// }
}
}
"pack/unpack binary" taggedAs ("binary") in {
forAll { (v: Array[Byte]) =>
check(v, { packer => packer.packBinaryHeader(v.length); packer.writePayload(v)}, { unpacker =>
val len = unpacker.unpackBinaryHeader()
val out = new Array[Byte](len)
unpacker.readPayload(out, 0, len)
out
}
)
}
val len = Seq(1000, 2000, 10000, 50000, 100000, 500000)
for(l <- len) {
val v = new Array[Byte](l)
Random.nextBytes(v)
check(v, { packer => packer.packBinaryHeader(v.length); packer.writePayload(v)}, { unpacker =>
val len = unpacker.unpackBinaryHeader()
val out = new Array[Byte](len)
unpacker.readPayload(out, 0, len)
out
}
)
}
}
val testHeaderLength = Seq(1, 2, 4, 8, 16, 17, 32, 64, 255, 256, 1000, 2000, 10000, 50000, 100000, 500000)
"pack/unpack arrays" taggedAs ("array") in {
forAll { (v: Array[Int]) =>
check(v, { packer =>
packer.packArrayHeader(v.length)
v.map(packer.packInt(_))
}, { unpacker =>
val len = unpacker.unpackArrayHeader()
val out = new Array[Int](len)
for (i <- 0 until v.length)
out(i) = unpacker.unpackInt
out
}
)
}
for(l <- testHeaderLength) {
check(l, _.packArrayHeader(l), _.unpackArrayHeader())
}
try {
checkException(0, _.packArrayHeader(-1), _.unpackArrayHeader)
}
catch {
case e: IllegalArgumentException => // OK
}
}
"pack/unpack maps" taggedAs ("map") in {
forAll { (v: Array[Int]) =>
val m = v.map(i => (i, i.toString))
check(m, { packer =>
packer.packMapHeader(v.length)
m.map { case (k: Int, v: String) =>
packer.packInt(k)
packer.packString(v)
}
}, { unpacker =>
val len = unpacker.unpackMapHeader()
val b = Seq.newBuilder[(Int, String)]
for (i <- 0 until len)
b += ((unpacker.unpackInt, unpacker.unpackString))
b.result
}
)
}
for(l <- testHeaderLength) {
check(l, _.packMapHeader(l), _.unpackMapHeader())
}
try {
checkException(0, _.packMapHeader(-1), _.unpackMapHeader)
}
catch {
case e: IllegalArgumentException => // OK
}
}
"pack/unpack extended types" taggedAs("ext") in {
forAll { (dataLen: Int, tpe: Int) =>
val l = Math.abs(dataLen)
val t = Math.abs(tpe) % 128
whenever(l >= 0) {
val ext = new ExtendedTypeHeader(l, t)
check(ext, _.packExtendedTypeHeader(ext.getType, ext.getLength), _.unpackExtendedTypeHeader())
}
}
for(l <- testHeaderLength) {
val ext = new ExtendedTypeHeader(l, Random.nextInt(128))
check(ext, _.packExtendedTypeHeader(ext.getType, ext.getLength), _.unpackExtendedTypeHeader())
}
}
"pack/unpack maps in lists" in {
val aMap = List(Map("f" -> "x"))
check(aMap, { packer =>
packer.packArrayHeader(aMap.size)
for (m <- aMap) {
packer.packMapHeader(m.size)
for ((k, v) <- m) {
packer.packString(k)
packer.packString(v)
}
}
}, { unpacker =>
val holder = new ValueHolder()
unpacker.unpackValue(holder)
val v = holder.get()
v.asArrayValue().toValueArray.map { m =>
val mv = m.asMapValue()
val kvs = mv.toKeyValueSeq
kvs.grouped(2).map({ kvp: Array[Value] =>
val k = kvp(0)
val v = kvp(1)
(k.asString().toString, v.asString().toString)
}).toMap
}.toList
})
}
}
} | xerial/msgpack-java | msgpack-core/src/test/scala/org/msgpack/core/MessagePackTest.scala | Scala | apache-2.0 | 12,832 |
package openstackApi
import akka.actor.{ActorSystem, Props}
import openstackApi.api.AkkaHttpService
object AkkaHttpServiceBoot extends App {
// create an ActorSystem
implicit val system = ActorSystem( "AkkaHttpSystem" )
// create an Actor which serving Http requests
val serviceActor = system.actorOf( Props( new AkkaHttpService(system) ), name = "Akka-Http-Routing" )
// time to shutdown the Actor serving a request
system.registerOnTermination {
system.log.info( "Actor per request demo shutdown." )
}
}
| Spirals-Team/ermis | src/main/scala/openstackApi/AkkaHttpServiceBoot.scala | Scala | agpl-3.0 | 530 |
/* Copyright 2009-2016 EPFL, Lausanne */
import leon.annotation._
import leon.lang._
object OperatorEquals {
case class T(x: Int)
def _main(): Int = {
val x = T(42)
val y = T(42)
// Custom operator `==` not supported
if (x == y) 0
else 1
}
@extern
def main(args: Array[String]): Unit = _main()
}
| regb/leon | src/test/resources/regression/genc/invalid/OperatorEquals.scala | Scala | gpl-3.0 | 333 |
package services
import akka.actor._
import pdi.jwt._
import scala.util._
import com.softwaremill.tagging._
import akka.http.scaladsl.model.Uri
import akka.util.Timeout
import scala.concurrent.duration._
import domain.InstanceAggregate
import domain.InstanceAggregate.Start
import akka.event.LoggingReceive
import utils.IdUtils
import domain.SchedulerAggregate
import akka.cluster.sharding.ClusterSharding
import akka.cluster.sharding.ShardRegion
import akka.cluster.sharding.ClusterShardingSettings
import domain.EventBroadcaster
object InstanceSharder {
sealed trait Command
case class StartInstance(
schedulerId: String,
clusterId: String,
parentInstanceId: Option[String],
accessPassIds: List[String],
image: String,
tags: Seq[String]) extends Command
case class VerifyJwt(token: String) extends Command
case class Envelope(instanceId: String, msg: Any) extends Command
def apply(
keyringSharder: ActorRef @@ KeyRingSharder.type,
schedulerSharder: ActorRef @@ SchedulerSharder.type)(implicit system: ActorSystem): ActorRef = {
val statusBroadcaster = system.actorOf(
Props(classOf[EventBroadcaster], system.eventStream),
"instance-status-broadcaster").taggedWith[EventBroadcaster.type]
ClusterSharding(system).start(
typeName = "InstanceAggregate",
entityProps = Props(
classOf[InstanceAggregate], keyringSharder, schedulerSharder, statusBroadcaster),
settings = ClusterShardingSettings(system),
extractEntityId = extractEntityId,
extractShardId = extractShardId)
}
// Because identity can be any valid string, we need the ID to be encoded
def extractEntityId(implicit system: ActorSystem): ShardRegion.ExtractEntityId = {
case s: StartInstance =>
(newInstanceId, Start.tupled(StartInstance.unapply(s).get))
case Envelope(instanceId, msg) => (instanceId, msg)
}
val extractShardId: ShardRegion.ExtractShardId = {
case _: StartInstance => "00" // All instance creation will happen in one shard, but that's OK
case Envelope(userId, _) => userId.reverse.take(2).reverse // Last two characters of aggregate ID (it'll do for now)
}
private def newInstanceId = IdUtils.timePrefix+IdUtils.randomId(16)
def resolveJwtInstanceId(token: String): Either[String, String] =
JwtJson.decode(token, JwtOptions(signature=false))
.toOption.toRight("Unable to decode token")
.right.flatMap { claim =>
val issuerPrefix = "instance-"
claim.issuer match {
case Some(id) if id.startsWith(issuerPrefix) =>
Right(id.stripPrefix(issuerPrefix))
case Some(id) =>
Left("Invalid issuer format")
case None =>
Left("No issuer defined")
}
}
} | dit4c/dit4c | dit4c-portal/app/services/InstanceSharder.scala | Scala | mit | 2,829 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.keras.nn
import com.intel.analytics.bigdl.keras.KerasBaseSpec
import com.intel.analytics.bigdl.dllib.nn.abstractnn.AbstractModule
import com.intel.analytics.bigdl.dllib.nn.internal.{Convolution2D, Dense, TimeDistributed, Sequential => KSequential}
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.utils.Shape
import com.intel.analytics.bigdl.dllib.utils.serializer.ModuleSerializationTest
import scala.util.Random
class TimeDistributedSpec extends KerasBaseSpec {
"TimeDistributed Dense" should "be the same as Keras" in {
val kerasCode =
"""
|input_tensor = Input(shape=[10, 12])
|input = np.random.random([3, 10, 12])
|output_tensor = TimeDistributed(Dense(8, activation="relu"))(input_tensor)
|model = Model(input=input_tensor, output=output_tensor)
""".stripMargin
val seq = KSequential[Float]()
val layer = TimeDistributed[Float](Dense(8, activation = "relu"), inputShape = Shape(10, 12))
seq.add(layer)
seq.getOutputShape().toSingle().toArray should be (Array(-1, 10, 8))
def weightConverter(in: Array[Tensor[Float]]): Array[Tensor[Float]] = Array(in(0).t(), in(1))
checkOutputAndGrad(seq.asInstanceOf[AbstractModule[Tensor[Float], Tensor[Float], Float]],
kerasCode, weightConverter)
}
"TimeDistributed Convolution2D" should "be the same as Keras" in {
val kerasCode =
"""
|input_tensor = Input(shape=[4, 3, 12, 12])
|input = np.random.random([2, 4, 3, 12, 12])
|output_tensor = TimeDistributed(Convolution2D(8, 3, 3, dim_ordering="th"))(input_tensor)
|model = Model(input=input_tensor, output=output_tensor)
""".stripMargin
val seq = KSequential[Float]()
val layer = TimeDistributed[Float](Convolution2D(8, 3, 3),
inputShape = Shape(4, 3, 12, 12))
seq.add(layer)
seq.getOutputShape().toSingle().toArray should be (Array(-1, 4, 8, 10, 10))
checkOutputAndGrad(seq.asInstanceOf[AbstractModule[Tensor[Float], Tensor[Float], Float]],
kerasCode, precision = 1e-3)
}
}
class TimeDistributedSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val layer = TimeDistributed[Float](Dense(8), inputShape = Shape(10, 12))
layer.build(Shape(3, 10, 12))
val input = Tensor[Float](3, 10, 12).apply1(_ => Random.nextFloat())
runSerializationTest(layer, input)
}
}
| intel-analytics/BigDL | scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/nn/keras/nn/TimeDistributedSpec.scala | Scala | apache-2.0 | 3,049 |
package stronghold.phylogeny
import org.scalatest.{FreeSpec, Matchers, Inspectors}
class PhylogenySuite extends FreeSpec with Matchers with Inspectors {
object Constants {
val absoluteTolerance: Double = 0.001
}
"CountingPhylogeneticAncestors" - {
import CountingPhylogeneticAncestors.{getData, numberOfInnerNodesInUnrootedBinaryTree}
"should calculate the number of internal nodes of any unrooted binary tree having n leaves" in {
val numberOfLeaves: Int = getData(isPractice = true)
numberOfInnerNodesInUnrootedBinaryTree(numberOfLeaves) shouldEqual 2
}
}
"CreatingADistanceMatrix" - {
import CreatingADistanceMatrix.{getData, calcDistanceMatrix}
import utils.Dna
import Constants.absoluteTolerance
"Should retrieve the distance matrix corresponding to the p-distance on the given string" in {
val dnas: List[Dna] = getData(isPractice = true)
val expectedDistanceMatrix: Array[Array[Double]] =
Array(
Array(0.0, 0.4, 0.1, 0.1),
Array(0.4, 0.0, 0.4, 0.3),
Array(0.1, 0.4, 0.0, 0.2),
Array(0.1, 0.3, 0.2, 0.0)
)
val calculatedDistanceMatrix: Array[Array[Double]] = calcDistanceMatrix(dnas)
forAll(expectedDistanceMatrix.flatten.toList.zip(calculatedDistanceMatrix.flatten.toList)) {
case (expected, calculated) => expected shouldBe (calculated +- absoluteTolerance)
}
}
}
"DistancesInTree" - {
import DistancesInTrees.{NewickTree, NodePair, getData, calcDistancesInNewickTrees, calcDistanceBetweenNodes}
"should calculate the distance between two nodes in an unweighted Newick tree for the test cases" in {
val treesAndNodePairs: List[(NewickTree, NodePair)] = getData(isPractice = true)
calcDistancesInNewickTrees(treesAndNodePairs) shouldEqual List(1, 2)
}
"should calculate the distance between two nodes in an unweighted Newick tree for a larger tree" in {
val tree: NewickTree = "(((a,b),(c,d)x),((f,g),e)y);"
val node1: String = "a"
val node2: String = "e"
calcDistanceBetweenNodes(tree, node1, node2) shouldEqual 5
}
}
"CreatingACharacterTable" - {
import CreatingACharacterTable.{getData, createCharacterTableFromNewickTree}
"should create a character table having the same splits as the edge splits of the tree" in {
val newickTreeString: String = getData(isPractice = true)
createCharacterTableFromNewickTree(newickTreeString) should contain theSameElementsAs
List(List(false, false, true, true, false), List(false, false, true, true, true))
}
}
}
| ghostrider77/Bioinformatics | Bioinformatics/src/test/scala-2.11/stronghold/phylogeny/PhylogenySuite.scala | Scala | mit | 2,620 |
package io.swagger.client.model
import io.swagger.client.core.ApiModel
import org.joda.time.DateTime
case class UserTokenSuccessfulResponseInnerUserField (
/* WordPress user ID */
id: Int,
/* User token */
accessToken: String)
extends ApiModel
| QuantiModo/QuantiModo-SDK-Akka-Scala | src/main/scala/io/swagger/client/model/UserTokenSuccessfulResponseInnerUserField.scala | Scala | gpl-2.0 | 260 |
package org.scalajs.testsuite.javalib.time.chrono
import java.time.DateTimeException
import java.time.chrono.IsoEra
import java.time.temporal.ChronoField
import org.junit.Test
import org.junit.Assert._
import org.scalajs.testsuite.javalib.time.TemporalAccessorTest
import org.scalajs.testsuite.utils.AssertThrows._
class IsoEraTest extends TemporalAccessorTest {
import IsoEra._
val samples = values.toSeq
def isSupported(field: ChronoField): Boolean =
field == ChronoField.ERA
@Test def test_getValue(): Unit = {
assertEquals(0, BCE.getValue)
assertEquals(1, CE.getValue)
}
@Test def test_getLong(): Unit = {
for (era <- samples)
assertEquals(era.getValue.toLong, era.getLong(ChronoField.ERA))
}
@Test def test_compareTo(): Unit = {
assertEquals(0, BCE.compareTo(BCE))
assertTrue(BCE.compareTo(CE) < 0)
assertTrue(CE.compareTo(BCE) > 0)
assertEquals(0, CE.compareTo(CE))
}
@Test def test_values(): Unit = {
val eras = Array[AnyRef](BCE, CE)
assertArrayEquals(eras, values.asInstanceOf[Array[AnyRef]])
}
@Test def test_valueOf(): Unit = {
assertEquals(BCE, valueOf("BCE"))
assertEquals(CE, valueOf("CE"))
expectThrows(classOf[IllegalArgumentException], valueOf(""))
}
@Test def test_of(): Unit = {
assertEquals(BCE, of(0))
assertEquals(CE, of(1))
for (n <- Seq(Int.MinValue, -1, 2, Int.MaxValue))
expectThrows(classOf[DateTimeException], of(n))
}
}
| ummels/scala-js | test-suite/shared/src/test/require-jdk8/org/scalajs/testsuite/javalib/time/chrono/IsoEraTest.scala | Scala | bsd-3-clause | 1,467 |
package com.hj.examples
import org.apache.spark.{SparkConf, SparkContext}
object OWL1 {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("OWL1").setMaster("local[2]")
val sc = new SparkContext(conf)
val lines = sc.textFile("input/OWL1.in")
val triples = lines.map(x => {
val arr = x.split(" ")
(arr(0), arr(1), arr(2))
})
/*
p rdf:type owl:FunctionalProperty
u p v
u p w
=>
v owl:sameAs w
*/
val funcProp = triples
.filter(x => x._2.equals("rdf:type") && x._3.equals("owl:FunctionalProperty")).map(x => x._1)
val funcPropSet = sc.broadcast(funcProp.collect.toSet)
val funcTriple = triples.filter(x => funcPropSet.value.contains(x._2))
val func1 = funcTriple.map(x => ((x._1, x._2), x._3))
val result = func1.join(func1).map(x => (x._2._1, x._2._2)).filter(x => !x._1.equals(x._2))
result.foreach(x => println(x))
sc.stop()
}
}
| huangjuegeek/SparkSRE | src/main/scala/com/hj/examples/OWL1.scala | Scala | apache-2.0 | 937 |
package pl.touk.nussknacker.engine.lite.kafka
import cats.data.NonEmptyList
import org.apache.kafka.common.{Metric, MetricName}
import pl.touk.nussknacker.engine.util.metrics.{Gauge, MetricIdentifier, MetricsProviderForScenario}
import scala.collection.mutable
import scala.jdk.CollectionConverters.mapAsScalaMapConverter
//We have to pass taskId, as we need different tags. `metrics` map passed in constructor is mutable (by Kafka), so we
//create own set of registered metrics to remove them correctly
private[kafka] class KafkaMetricsRegistrar(taskId: String, metrics: java.util.Map[MetricName, _ <: Metric], metricsProvider: MetricsProviderForScenario) extends AutoCloseable {
private val registeredNames: mutable.Set[MetricIdentifier] = new mutable.HashSet[MetricIdentifier]()
def registerMetrics(): Unit = {
metrics.forEach { case (name, metric) =>
val metricIdentifier = prepareMetricIdentifier(name)
registeredNames.add(metricIdentifier)
metricsProvider.registerGauge[AnyRef](metricIdentifier, new Gauge[AnyRef] {
override def getValue: AnyRef = metric.metricValue()
})
}
}
override def close(): Unit = {
registeredNames.foreach(metricsProvider.remove)
}
private def prepareMetricIdentifier(name: MetricName) = {
val tags = name.tags().asScala.toMap + ("taskId" -> taskId) + ("kafkaGroup" -> name.group())
MetricIdentifier(NonEmptyList.of(name.name()), tags)
}
}
| TouK/nussknacker | engine/lite/kafka/runtime/src/main/scala/pl/touk/nussknacker/engine/lite/kafka/KafkaMetricsRegistrar.scala | Scala | apache-2.0 | 1,446 |
package cromwell.backend.sfs
import java.io.{FileNotFoundException, IOException}
import cats.instances.try_._
import cats.syntax.functor._
import com.typesafe.config.Config
import com.typesafe.scalalogging.StrictLogging
import cromwell.backend.io.JobPaths
import cromwell.core.CromwellFatalExceptionMarker
import cromwell.core.path.{DefaultPath, DefaultPathBuilder, Path, PathFactory}
import common.util.TryUtil
import wom.WomFileMapper
import wom.values._
import scala.collection.JavaConverters._
import scala.language.postfixOps
import scala.util.{Failure, Success, Try}
object SharedFileSystem extends StrictLogging {
final case class AttemptedLookupResult(name: String, value: Try[WomValue]) {
def toPair: (String, Try[WomValue]) = name -> value
}
object AttemptedLookupResult {
implicit class AugmentedAttemptedLookupSequence(s: Seq[AttemptedLookupResult]) {
def toLookupMap: Map[String, WomValue] = s collect {
case AttemptedLookupResult(name, Success(value)) => (name, value)
} toMap
}
}
case class PairOfFiles(src: Path, dst: Path)
type DuplicationStrategy = (Path, Path) => Try[Unit]
/**
* Return a `Success` result if the file has already been localized, otherwise `Failure`.
*/
private def localizePathAlreadyLocalized(originalPath: Path, executionPath: Path): Try[Unit] = {
if (executionPath.exists) Success(()) else Failure(new RuntimeException(s"$originalPath doesn't exists"))
}
private def localizePathViaCopy(originalPath: Path, executionPath: Path): Try[Unit] = {
val action = Try {
executionPath.parent.createPermissionedDirectories()
val executionTmpPath = executionPath.plusExt("tmp")
originalPath.copyTo(executionTmpPath, overwrite = true).moveTo(executionPath, overwrite = true)
}.void
logOnFailure(action, "copy")
}
private def localizePathViaHardLink(originalPath: Path, executionPath: Path): Try[Unit] = {
val action = Try {
executionPath.parent.createPermissionedDirectories()
originalPath.linkTo(executionPath)
}.void
logOnFailure(action, "hard link")
}
private def localizePathViaSymbolicLink(originalPath: Path, executionPath: Path): Try[Unit] = {
if (originalPath.isDirectory) Failure(new UnsupportedOperationException("Cannot localize directory with symbolic links"))
else if (!originalPath.exists) Failure(new FileNotFoundException(originalPath.pathAsString))
else {
val action = Try {
executionPath.parent.createPermissionedDirectories()
executionPath.linkTo(originalPath, symbolic = true)
}.void
logOnFailure(action, "symbolic link")
}
}
private def logOnFailure(action: Try[Unit], actionLabel: String): Try[Unit] = {
if (action.isFailure) logger.warn(s"Localization via $actionLabel has failed: ${action.failed.get.getMessage}")
action
}
private def duplicate(description: String, source: Path, dest: Path, strategies: Stream[DuplicationStrategy]): Try[Unit] = {
val attempts: Stream[Try[Unit]] = strategies.map(_ (source.followSymbolicLinks, dest))
attempts.find(_.isSuccess) getOrElse {
TryUtil.sequence(attempts, s"Could not $description $source -> $dest").void
}
}
}
trait SharedFileSystem extends PathFactory {
import SharedFileSystem._
def sharedFileSystemConfig: Config
lazy val DefaultStrategies = Seq("hard-link", "soft-link", "copy")
lazy val LocalizationStrategies: Seq[String] = getConfigStrategies("localization")
lazy val Localizers: Seq[DuplicationStrategy] = createStrategies(LocalizationStrategies, docker = false)
lazy val DockerLocalizers: Seq[DuplicationStrategy] = createStrategies(LocalizationStrategies, docker = true)
lazy val CachingStrategies: Seq[String] = getConfigStrategies("caching.duplication-strategy")
lazy val Cachers: Seq[DuplicationStrategy] = createStrategies(CachingStrategies, docker = false)
private def getConfigStrategies(configPath: String): Seq[String] = {
if (sharedFileSystemConfig.hasPath(configPath)) {
sharedFileSystemConfig.getStringList(configPath).asScala
} else {
DefaultStrategies
}
}
private def createStrategies(configStrategies: Seq[String], docker: Boolean): Seq[DuplicationStrategy] = {
// If localizing for a docker job, remove soft-link as an option
val filteredConfigStrategies = configStrategies filter {
case "soft-link" if docker => false
case _ => true
}
// Convert the (remaining) config strategies to duplication strategies
val mappedDuplicationStrategies = filteredConfigStrategies map {
case "hard-link" => localizePathViaHardLink _
case "soft-link" => localizePathViaSymbolicLink _
case "copy" => localizePathViaCopy _
case unsupported => throw new UnsupportedOperationException(s"Strategy $unsupported is not recognized")
}
// Prepend the default duplication strategy, and return the sequence
localizePathAlreadyLocalized _ +: mappedDuplicationStrategies
}
private def hostAbsoluteFilePath(callRoot: Path, pathString: String): Path = {
val wdlPath = PathFactory.buildPath(pathString, pathBuilders)
wdlPath match {
case _: DefaultPath if !wdlPath.isAbsolute => callRoot.resolve(wdlPath).toAbsolutePath
case _ => wdlPath
}
}
def outputMapper(job: JobPaths)(womValue: WomValue): Try[WomValue] = {
WomFileMapper.mapWomFiles(mapJobWomFile(job))(womValue)
}
def mapJobWomFile(job: JobPaths)(womFile: WomFile): WomFile = {
womFile match {
case fileNotFound: WomFile if !hostAbsoluteFilePath(job.callExecutionRoot, fileNotFound.valueString).exists =>
throw new RuntimeException("Could not process output, file not found: " +
s"${hostAbsoluteFilePath(job.callExecutionRoot, fileNotFound.valueString).pathAsString}")
case _ => WomFile(hostAbsoluteFilePath(job.callExecutionRoot, womFile.valueString).pathAsString)
}
}
def cacheCopy(sourceFilePath: Path, destinationFilePath: Path): Try[Unit] = {
duplicate("cache", sourceFilePath, destinationFilePath, Cachers.toStream)
}
/**
* Return a possibly altered copy of inputs reflecting any localization of input file paths that might have
* been performed for this `Backend` implementation.
*/
def localizeInputs(inputsRoot: Path, docker: Boolean)(inputs: WomEvaluatedCallInputs): Try[WomEvaluatedCallInputs] = {
TryUtil.sequenceMap(
inputs mapValues WomFileMapper.mapWomFiles(localizeWomFile(inputsRoot, docker)),
"Failures during localization"
) recoverWith {
case e => Failure(new IOException(e.getMessage) with CromwellFatalExceptionMarker)
}
}
def localizeWomFile(inputsRoot: Path, docker: Boolean)(value: WomFile): WomFile = {
val strategies = if (docker) DockerLocalizers else Localizers
// Strip the protocol scheme
def stripProtocolScheme(path: Path): Path = DefaultPathBuilder.get(path.pathWithoutScheme)
/*
* Transform an original input path to a path in the call directory.
* The new path matches the original path, it only "moves" the root to be the call directory.
*/
def toCallPath(path: String): Try[PairOfFiles] = Try {
val src = buildPath(path)
// Strip out potential prefix protocol
val localInputPath = stripProtocolScheme(src)
val dest = if (inputsRoot.isParentOf(localInputPath)) localInputPath
else {
// Concatenate call directory with absolute input path
DefaultPathBuilder.get(inputsRoot.pathAsString, localInputPath.pathAsString)
}
PairOfFiles(src, dest)
}
// Optional function to adjust the path to "docker path" if the call runs in docker
localizeWomFile(toCallPath _, strategies.toStream)(value)
}
/**
* Try to localize a WomFile.
*
* @param toDestPath function specifying how to generate the destination path from the source path
* @param strategies strategies to use for localization
* @param womFile WomFile to localize
* @return localized wdl file
*/
private def localizeWomFile(toDestPath: (String => Try[PairOfFiles]), strategies: Stream[DuplicationStrategy])
(womFile: WomFile): WomFile = {
val path = womFile.value
val result = toDestPath(path) flatMap {
case PairOfFiles(src, dst) => duplicate("localize", src, dst, strategies) map { _ => WomFile(dst.pathAsString) }
}
result.get
}
}
| ohsu-comp-bio/cromwell | supportedBackends/sfs/src/main/scala/cromwell/backend/sfs/SharedFileSystem.scala | Scala | bsd-3-clause | 8,473 |
package blended.security.ssl
import java.math.BigInteger
import java.security.cert.X509Certificate
import java.security.{KeyPair, KeyPairGenerator, SecureRandom}
import blended.util.logging.Logger
import org.bouncycastle.cert.X509v3CertificateBuilder
import scala.util.Try
class SelfSignedCertificateProvider(cfg: SelfSignedConfig)
extends CertificateProvider
with CertificateRequestBuilder
with CertificateSigner {
private[this] val log = Logger[SelfSignedCertificateProvider]
private def generateKeyPair(): KeyPair = {
val kpg = KeyPairGenerator.getInstance("RSA")
kpg.initialize(cfg.keyStrength, new SecureRandom())
kpg.genKeyPair()
}
override def refreshCertificate(existing: Option[CertificateHolder], cnProvider : CommonNameProvider): Try[CertificateHolder] = Try {
val oldCert = existing.map(_.chain.head)
val requesterKeypair : KeyPair = existing match {
case Some(c) => c.privateKey match {
case None => throw new NoPrivateKeyException("Existing certificate must have a private key to update")
case Some(pk) => new KeyPair(c.publicKey, pk)
}
case None => generateKeyPair()
}
val serial = oldCert match {
case Some(c) => c.getSerialNumber().add(BigInteger.ONE)
case None => BigInteger.ONE
}
val certBuilder : X509v3CertificateBuilder = hostCertificateRequest(
cnProvider = cnProvider,
serial = serial,
validDays = cfg.validDays,
keyPair = requesterKeypair
).get
val cert : X509Certificate = sign(certBuilder, cfg.sigAlg, requesterKeypair.getPrivate()).get
log.debug(s"Generated certificate [${X509CertificateInfo(cert)}]")
CertificateHolder.create(requesterKeypair, List(cert)).get
}
}
| lefou/blended | blended.security.ssl/src/main/scala/blended/security/ssl/SelfSignedCertificateProvider.scala | Scala | apache-2.0 | 1,745 |
package lms.util
import lms._
import scala.lms.common._
import scala.lms.internal.GenericCodegen
import scala.reflect.SourceContext
import java.io.PrintWriter
/**
* A CPS encoding of Pairs
* an alternative to the struct representation
*/
trait PairCPS
extends Base
with IfThenElse
with BooleanOps
with LiftVariables
with TupleOps
/*with ZeroVal*/ {
/**
* implicits for creating Type Manifests
* new boilerplate after the Manifest -> Typ change
*/
implicit def paircps_typ[A: Typ, B: Typ]: Typ[PairCPS[A, B]]
implicit def paircps_nul[A: Typ: Nul, B: Typ: Nul]: Nul[PairCPS[A, B]]
/**
* CPS encoding for Option
* isDefined does not make sense for this encoding
*/
abstract class PairCPS[A: Typ: Nul, B: Typ: Nul] { self =>
def apply[X: Typ: Nul](k: (Rep[A], Rep[B]) => Rep[X]): Rep[X]
def map[C: Typ: Nul, D: Typ: Nul](
f: Rep[A] => Rep[C],
g: Rep[B] => Rep[D]) = new PairCPS[C, D] {
def apply[X: Typ: Nul](k: (Rep[C], Rep[D]) => Rep[X]) =
self.apply((a, b) => k(f(a), g(b)))
}
def toPair: Rep[(A, B)] = self.apply((a, b) => make_tuple2(a, b))
}
/**
* Companion object
*/
object PairCPS {
def Pair[A: Typ: Nul, B: Typ: Nul](a: Rep[A], b: Rep[B]) = new PairCPS[A, B] {
def apply[X: Typ: Nul](k: (Rep[A], Rep[B]) => Rep[X]): Rep[X] = k(a, b)
}
/**
* a conditional expression for PairCPS, mixed-stage
* needs a different name than __ifThenElse because the latter requires
* Rep `then` and `else` parameters
*/
def conditional[A: Typ: Nul, B: Typ: Nul](
cond: Rep[Boolean],
thenp: => PairCPS[A, B],
elsep: => PairCPS[A, B]
): PairCPS[A, B] = new PairCPS[A, B] {
def apply[X: Typ: Nul](k: (Rep[A], Rep[B]) => Rep[X]): Rep[X] = {
var tmpPair = zeroVal[PairCPS[A, B]]
val assignK = (a: Rep[A], b: Rep[B]) => { tmpPair = mkPair(a, b) }
if (cond) thenp.apply(assignK) else elsep.apply(assignK)
k(readVar(tmpPair)._1, readVar(tmpPair)._2)
}
}
}
/**
* Pimping my ride, now I have access to Rep[PairCPS]
*/
implicit class PairCPSCls[A: Typ: Nul, B: Typ: Nul](pair: Rep[PairCPS[A, B]]) {
def map[C: Typ: Nul, D: Typ: Nul](f: Rep[A] => Rep[C], g: Rep[B] => Rep[D]) =
paircps_map(pair, f, g)
def apply[X: Typ: Nul](k: (Rep[A], Rep[B]) => Rep[X]): Rep[X] =
paircps_apply(pair, k)
def toPair: Rep[(A, B)] = paircps_toPair(pair)
def _1: Rep[A] = paircps_1(pair)
def _2: Rep[B] = paircps_2(pair)
/**
* for now we don't include other operations on pairCPS, we don't
* seem to need them
*/
}
/**
* interface level functions
*/
def mkPair[A: Typ: Nul, B: Typ: Nul](a: Rep[A], b: Rep[B]): Rep[PairCPS[A, B]]
def paircps_map[A: Typ: Nul, B: Typ: Nul, C: Typ: Nul, D: Typ: Nul](
pair: Rep[PairCPS[A, B]],
f: Rep[A] => Rep[C],
g: Rep[B] => Rep[D]): Rep[PairCPS[C, D]]
def paircps_apply[A: Typ: Nul, B: Typ: Nul, X: Typ: Nul](
pair: Rep[PairCPS[A, B]],
k: (Rep[A], Rep[B]) => Rep[X]): Rep[X]
def pair_conditional[A: Typ: Nul, B: Typ: Nul](
cond: Rep[Boolean],
thenp: => Rep[PairCPS[A, B]],
elsep: => Rep[PairCPS[A, B]]): Rep[PairCPS[A, B]]
//def __ifThenElse[A: Typ: Nul, B: Typ: Nul](
// cond: Rep[Boolean],
// thenp: => Rep[PairCPS[A, B]],
// elsep: => Rep[PairCPS[A, B]]) = pair_conditional(cond, thenp, elsep)
def paircps_toPair[A: Typ: Nul, B: Typ: Nul](pair: Rep[PairCPS[A, B]]): Rep[(A, B)]
def paircps_1[A: Typ: Nul, B: Typ: Nul](pair: Rep[PairCPS[A, B]]): Rep[A]
def paircps_2[A: Typ: Nul, B: Typ: Nul](pair: Rep[PairCPS[A, B]]): Rep[B]
}
trait PairCPSExp
extends PairCPS
with BaseExp
with TupleOpsExp
with IfThenElseExp
with BooleanOpsExp
with OptionOpsExp
/*with ZeroValExp*/ {
import PairCPS._
/**
* implicits for creating Type Manifests
* new boilerplate after the Manifest -> Typ change
*/
implicit def paircps_typ[A: Typ, B: Typ]: Typ[PairCPS[A, B]] = {
implicit val ManifestTyp(mA) = typ[A]
implicit val ManifestTyp(mB) = typ[B]
manifestTyp
}
implicit def paircps_nul[A: Typ: Nul, B: Typ: Nul] = new Nul[PairCPS[A, B]] {
def nullValue = mkPair(zeroVal[A], zeroVal[B])
def nlArguments = nul[A] :: nul[B] :: Nil
}
/**
* overriding variable assignment specifically for PairCPS
*/
case class PairVar[A: Typ: Nul, B: Typ: Nul](
a: Var[A],
b: Var[B]) extends Def[Variable[PairCPS[A, B]]]
def mkPairVar[A: Typ: Nul, B: Typ: Nul](
a: Var[A],
b: Var[B]): Exp[Variable[PairCPS[A, B]]] = PairVar(a, b)
/**
* Inspired from the Structs variable treatment at https://github.com/TiarkRompf/virtualization-lms-core/blob/develop/src/common/Structs.scala
* With appropriate ``annotation hacks'' where needed.
*/
override def var_new[T: Typ: Nul](init: Exp[T])(implicit pos: SourceContext): Var[T] = init match {
case Def(PairWrapper(PairStruct(a, b))) =>
val new_a = var_new(a)(a.tp, a.nl, pos)
val new_b = var_new(b)(b.tp, b.nl, pos)
/**
* We know that we will have an Exp of type Var T, but the type system does not,
* unfortunately, hence the cast.
*/
val pairVar: Exp[Var[T]] = mkPairVar(new_a, new_b)(
mtype(a.tp), ntype(a.nl),
mtype(b.tp), ntype(b.nl)).asInstanceOf[Exp[Var[T]]]
Variable(pairVar)
case _ => super.var_new(init)
}
/**
* Inspired from the Structs variable treatment at https://github.com/TiarkRompf/virtualization-lms-core/blob/develop/src/common/Structs.scala
* With appropriate ``annotation hacks'' where needed.
*/
override def var_assign[T: Typ: Nul](lhs: Var[T], rhs: Exp[T])
(implicit pos: SourceContext): Exp[Unit] = {
(lhs, rhs) match {
case (Variable(Def(Reflect(PairVar(v1, v2), _, _))),
Def(PairWrapper(p))) =>
p apply { (a, b) =>
var_assign(v1, a)(a.tp, a.nl, pos)
var_assign(v2, b)(b.tp, b.nl, pos)
}
case _ => super.var_assign(lhs, rhs)
}
}
override def readVar[T: Typ: Nul](v: Var[T])(implicit pos: SourceContext): Exp[T] = v match {
case Variable(Def(Reflect(PairVar(v1 @ Variable(a), v2 @ Variable(b)), _, _))) =>
/**
* critical to pass the correct implicits to readVar
* as well as to makePair. `makePair` only needs A, B in PairCPS[A]
* and so we must deconstruct as appropriate.
*
* Since we are in the PairCPS[A, B] case there will always be
* two elements in typ[T]
*/
val ta :: tb :: _ = typ[T].typeArguments
val na :: nb :: _ = nul[T].nlArguments
mkPair(
readVar(v1)(
mtype(a.tp.typeArguments.head),
ntype(a.nl.nlArguments.head),
pos
),
readVar(v2)(
mtype(b.tp.typeArguments.head),
ntype(b.nl.nlArguments.head),
pos
)
)(mtype(ta), ntype(na), mtype(tb), ntype(nb)).asInstanceOf[Exp[T]]
case _ => super.readVar(v)
}
/**
* The wrapper acting as Rep[PairCPS[A]]
*/
case class PairWrapper[A: Typ: Nul, B: Typ: Nul](p: PairCPS[A, B])
extends Def[PairCPS[A, B]]
/**
* An extra wrapper in the Exp world to make handling variables a bit easier
*/
case class PairStruct[A: Typ: Nul, B: Typ: Nul](
a: Rep[A], b: Rep[B]) extends PairCPS[A, B] {
def apply[X: Typ: Nul](k: (Rep[A], Rep[B]) => Rep[X]): Rep[X] = k(a, b)
}
/**
* dummy typeclasses to satisfy using `unit(PairStruct(..))`
*/
implicit def pairstruct_typ[A: Typ, B: Typ]: Typ[PairStruct[A, B]] = {
implicit val ManifestTyp(mA) = typ[A]
implicit val ManifestTyp(mB) = typ[B]
manifestTyp
}
implicit def pairstruct_nul[A: Typ: Nul, B: Typ: Nul] = new Nul[PairStruct[A, B]] {
def nullValue = null
def nlArguments = Nil
}
def mkPair[A: Typ: Nul, B: Typ: Nul](a: Rep[A], b: Rep[B]): Exp[PairCPS[A, B]]
= PairWrapper(PairStruct(a, b))
def paircps_map[A: Typ: Nul, B: Typ: Nul, C: Typ: Nul, D: Typ: Nul](
pair: Rep[PairCPS[A, B]],
f: Rep[A] => Rep[C],
g: Rep[B] => Rep[D]): Rep[PairCPS[C, D]] = pair match {
case Def(PairWrapper(pair)) => PairWrapper(pair.map(f, g))
}
def paircps_apply[A: Typ: Nul, B: Typ: Nul, X: Typ: Nul](
pair: Rep[PairCPS[A, B]],
k: (Rep[A], Rep[B]) => Rep[X]): Rep[X] = pair match {
case Def(PairWrapper(pair)) => pair(k)
}
/**
* a 'conditional' pair
* lifts conditional expressions to pair level
*
* Note: this implementation works only because we are
* evaluating `thenp` and `elsep` here, and they are simple expressions
* If they are blocks, the pattern match will fail.
*/
def pair_conditional[A: Typ: Nul, B: Typ: Nul](
cond: Rep[Boolean],
thenp: => Rep[PairCPS[A, B]],
elsep: => Rep[PairCPS[A, B]]
): Rep[PairCPS[A, B]] = (thenp, elsep) match { //stricting them here
case (Const(t), Const(e)) =>
PairWrapper(conditional(cond, t, e))
}
def paircps_toPair[A: Typ: Nul, B: Typ: Nul](
pair: Rep[PairCPS[A, B]]): Rep[(A, B)] = pair match {
case Def(PairWrapper(pair)) => pair.toPair
}
def paircps_1[A: Typ: Nul, B: Typ: Nul](
pair: Rep[PairCPS[A, B]]): Rep[A] = pair match {
case Def(PairWrapper(PairStruct(a, _))) => a
case Def(PairWrapper(p)) => p.apply((a, _) => a)
}
def paircps_2[A: Typ: Nul, B: Typ: Nul](
pair: Rep[PairCPS[A, B]]): Rep[B] = pair match {
case Def(PairWrapper(PairStruct(_, b))) => b
case Def(PairWrapper(p)) => p.apply((_, b) => b)
}
}
/**
* Specific code generator for PairCPS that doesn't generate PairVars
*/
trait PairCPSGenBase extends GenericCodegen {
val IR: PairCPSExp
import IR._
override def emitNode(sym: Sym[Any], rhs: Def[Any]) = rhs match {
case PairVar(_, _) => ()
case _ => super.emitNode(sym, rhs)
}
}
| manojo/lms-utils | util/src/main/scala/lms/util/PairCPS.scala | Scala | mit | 9,974 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn.ops
import com.intel.analytics.bigdl.nn.abstractnn.DataFormat
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.T
import com.intel.analytics.bigdl.utils.serializer.ModuleSerializationTest
import scala.util.Random
class DepthwiseConv2DBackpropFilterSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val depWiseConv2dBackProp = DepthwiseConv2DBackpropFilter[Float](1,
1, 0, 0, DataFormat.NHWC).setName("depWiseConv2dBackProp")
val input = T(Tensor[Float](4, 24, 24, 3).apply1(_ => Random.nextFloat()),
Tensor[Int](T(2, 2, 3, 1)),
Tensor[Float](4, 23, 23, 3).apply1(_ => Random.nextFloat()))
runSerializationTest(depWiseConv2dBackProp, input)
}
}
| wzhongyuan/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/ops/DepthwiseConv2DBackpropFilterSpec.scala | Scala | apache-2.0 | 1,382 |
package teleporter.integration.protocol.fbs
import java.nio.ByteBuffer
import java.sql.{Date, Timestamp, Types}
import com.google.flatbuffers.FlatBufferBuilder
import teleporter.integration.component.jdbc._
import teleporter.integration.core.{TId, TransferMessage}
import teleporter.integration.protocol.fbs.generate._
/**
* Author: kui.dai
* Date: 2016/4/8.
*/
object FbsJdbcParam {
import teleporter.integration.utils.Bytes._
def apply(builder: FlatBufferBuilder, value: Any): Int = {
value match {
case Some(v) ⇒ build(builder, v)
case None ⇒ JdbcParam.createJdbcParam(builder, Types.NULL, JdbcParam.createValueVector(builder, Array.emptyByteArray))
case x ⇒ build(builder, x)
}
}
private def build(builder: FlatBufferBuilder, value: Any): Int = {
value match {
case v: Boolean ⇒ JdbcParam.createJdbcParam(builder, Types.BOOLEAN, JdbcParam.createValueVector(builder, v))
case v: Char ⇒ JdbcParam.createJdbcParam(builder, Types.CHAR, JdbcParam.createValueVector(builder, v))
case v: Short ⇒ JdbcParam.createJdbcParam(builder, Types.SMALLINT, JdbcParam.createValueVector(builder, v))
case v: Int ⇒ JdbcParam.createJdbcParam(builder, Types.INTEGER, JdbcParam.createValueVector(builder, v))
case v: Long ⇒ JdbcParam.createJdbcParam(builder, Types.BIGINT, JdbcParam.createValueVector(builder, v))
case v: Float ⇒ JdbcParam.createJdbcParam(builder, Types.FLOAT, JdbcParam.createValueVector(builder, v))
case v: Double ⇒ JdbcParam.createJdbcParam(builder, Types.DOUBLE, JdbcParam.createValueVector(builder, v))
case v: BigDecimal ⇒ JdbcParam.createJdbcParam(builder, Types.DOUBLE, JdbcParam.createValueVector(builder, v))
case v: java.math.BigDecimal ⇒ JdbcParam.createJdbcParam(builder, Types.DOUBLE, JdbcParam.createValueVector(builder, toBytes(v)))
case v: Date ⇒ JdbcParam.createJdbcParam(builder, Types.DATE, JdbcParam.createValueVector(builder, v.toString))
case v: Timestamp ⇒ JdbcParam.createJdbcParam(builder, Types.TIMESTAMP, JdbcParam.createValueVector(builder, v.toString))
case v: String ⇒ JdbcParam.createJdbcParam(builder, Types.VARCHAR, JdbcParam.createValueVector(builder, v))
case v: Array[Byte] ⇒ JdbcParam.createJdbcParam(builder, Types.BLOB, JdbcParam.createValueVector(builder, v))
case null ⇒ JdbcParam.createJdbcParam(builder, Types.NULL, JdbcParam.createValueVector(builder, Array.emptyByteArray))
case v ⇒ throw new IllegalArgumentException(s"Unsupport data type $v, ${v.getClass.getSimpleName}")
}
}
def unapply(statement: JdbcStatement): Seq[Any] = scala.collection.Seq.tabulate(statement.paramsLength())(statement.params).map(FbsJdbcParam.unapply)
def unapply(field: JdbcParam): Any = {
val b = Array.tabulate(field.valueLength())(field.value)
field.`type`() match {
case Types.NULL ⇒ null
case Types.BOOLEAN ⇒ toBoolean(b)
case Types.CHAR ⇒ toChar(b)
case Types.SMALLINT ⇒ toShort(b)
case Types.INTEGER ⇒ toInt(b)
case Types.BIGINT ⇒ toLong(b)
case Types.FLOAT ⇒ toFloat(b)
case Types.DOUBLE ⇒ toDouble(b)
case Types.DATE ⇒ Date.valueOf(b)
case Types.TIMESTAMP ⇒ Timestamp.valueOf(b)
case Types.VARCHAR ⇒ toStr(b)
case Types.BLOB ⇒ b
case _ ⇒ throw new IllegalArgumentException(s"Unsupport type:${field.`type`()}")
}
}
}
object FbsJdbc {
implicit def asPreparedSql(statement: JdbcStatement): PreparedSql = PreparedSql(statement.sql(), FbsJdbcParam.unapply(statement))
private def apply(action: JdbcAction): Action = {
action.`type`() match {
case ActionType.Update ⇒
Update(action.statements(0))
case ActionType.Upsert ⇒
Upsert(action.statements(0), action.statements(1))
}
}
def apply(message: generate.JdbcMessage): TransferMessage[Seq[Action]] = {
val tId = TId.keyFromBytes(Array.tabulate(message.tidLength())(message.tid))
val jdbcRecord = scala.collection.immutable.Seq.tabulate(message.actionsLength())(message.actions).map(apply)
TransferMessage[Seq[Action]](id = tId, data = jdbcRecord)
}
def apply(byteBuffer: ByteBuffer): Seq[TransferMessage[Seq[Action]]] = {
val messages = JdbcMessages.getRootAsJdbcMessages(byteBuffer)
scala.collection.immutable.Seq.tabulate(messages.messagesLength())(messages.messages).map(apply)
}
def apply(bytes: Array[Byte]): Seq[TransferMessage[Seq[Action]]] = {
val messages = JdbcMessages.getRootAsJdbcMessages(ByteBuffer.wrap(bytes))
scala.collection.immutable.Seq.tabulate(messages.messagesLength())(messages.messages).map(apply)
}
private def unapply(builder: FlatBufferBuilder, sql: Sql): Int = {
val preparedSql = sql match {
case nameSql: NameSql ⇒ nameSql.toPreparedSql
case preparedSql: PreparedSql ⇒ preparedSql
}
val params = JdbcStatement.createParamsVector(builder, preparedSql.params.map(param ⇒ FbsJdbcParam(builder, param)).toArray)
JdbcStatement.createJdbcStatement(builder, builder.createString(preparedSql.sql), params)
}
def unapply(record: TransferMessage[Seq[Action]], builder: FlatBufferBuilder): Int = {
val tId = JdbcMessage.createTidVector(builder, record.id.toBytes)
val jdbcActions = record.data.map {
case update: Update ⇒
val statements = JdbcAction.createStatementsVector(builder, Array(unapply(builder, update.sql)))
JdbcAction.createJdbcAction(builder, ActionType.Update, statements)
case upsert: Upsert ⇒
val statements = JdbcAction.createStatementsVector(builder, Array(unapply(builder, upsert.up), unapply(builder, upsert.sert)))
JdbcAction.createJdbcAction(builder, ActionType.Upsert, statements)
}
val actions = JdbcMessage.createActionsVector(builder, jdbcActions.toArray)
JdbcMessage.createJdbcMessage(builder, tId, actions)
}
def unapply(records: Seq[TransferMessage[Seq[Action]]], initialCapacity: Int): FlatBufferBuilder = {
val builder = new FlatBufferBuilder(initialCapacity)
val messages = JdbcMessages.createMessagesVector(builder, records.map(unapply(_, builder)).toArray)
val root = JdbcMessages.createJdbcMessages(builder, messages)
builder.finish(root)
builder
}
} | huanwuji/teleporter | src/main/scala/teleporter/integration/protocol/fbs/FbsJdbc.scala | Scala | agpl-3.0 | 6,314 |
package zzb.domain
import akka.actor._
import akka.pattern._
import akka.util.Timeout
import shapeless.{::, HNil}
import spray.http.StatusCodes._
import spray.json.{DefaultJsonProtocol, JsArray}
import zzb.datatype._
import zzb.domain.directive._
import zzb.rest.{StatusCode, _}
import zzb.rest.directives.{MethodDirectives, OnCompleteFutureMagnet, OnSuccessFutureMagnet}
import zzb.storage._
import zzb.util._
import scala.collection.immutable
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util.control.NonFatal
import scala.util.{Failure, Random, Success, Try}
/**
* Created by Simon on 2014/4/16
*
* 当一个 MultiQuoteActor 被创建时,有两种可能
* 1. 用户请求创建一个新DomainActor (isNewCreate = true)
* 2. 用户需要访问一个已存在的DomainActor (isNewCreate = false)
*
* 如果是第二种情况,会立即装载这个DomainActor,
* 如果不存在,该Actor收到的所有请求都会响应404
* 如果存在,对每一个请求要检查操作者,如果是用户操作,不是该多方的所有人响应403(禁止访问)
* 如果是业管操作则通过(先不检查业管的具体权限)
*
* URL段说明
* 1. /latest/{/...} 【Get】 对最新版本的操作请求,一般只有Get动作,不支持Put修改
* 2. /ver/{versionNum}/{/...} 【Get】 对指定版本的操作请求,也是只读(Get)
* /tag/{tagStr}/{/...} 【Get】 对指定Tag的操作请求,也是只读(Get)
* 3. /versions 【Get】 获得版本列表
* 4. /alter{/...} 【Put/Delete】 直接修改指定路径的数据(会将变更会话的 “创建”、“修改”、“提交”一次完成
* 5. /alter{/...} 【Post】 申请修改指定路径,返回用于修改的URL(包含一个修改会话码,alterSid),如果已经有人在修改,返回修改者的工号或业管号
* /alter/{alterSid}{/...} 【Get】 正在修改的版本的数据,包含已经修改的内容
* /alter/{alterSid}{/...} 【Put】 修改文档指定路径的内容
* /alter/{alterSid}{/...} 【Delete】 删除文档指定路径的内容
* /alter/{alterSid} 【Delete】 放弃修改内容
* /alter/{alterSid} 【Post】 提交本次修改,alterSid将会失效,/latest/... 将会得到最新版本的内容
* /alter/{alterSid}?action={actionName} 【Post】 提交本次修改,同时执行处理动作 . alterSid将会失效,/latest/... 将会得到最新版本的内容
* 6. /action/{actionName} 【Post】 执行动作,如果有未提交的变更会话,会报409错误
* /action/{actionName}?force=true 【Post】 强制执行动作,所有有未提交的变更会话会被废弃
* 7. 【Delete】 将文档从缓存中清理,并将actor销毁(并没有从缓存中删除文档)
*
*/
trait DomainActor[K, KT <: DataType[K], T <: TStorable[K, KT]] extends RestServiceActor with DocProcessor[K, KT, T]
with AuthorizeDirectives with DomainDirectives with DomainLogging {
val docType = specStorage.storage.driver.docType
/**
* 领域对象主键
*/
def domId: K
private val hlog = HeritLog("dom")
//获取命名超时
val getTimeout: String => Timeout = str => Timeout(2000.milli)
//是否每次保存数据时都立即存到数据库
def alwaysflush = true
//自动保存到数据库的时间间隔(秒)
def autoFlushIntervalSeconds = 60
/**
* 创建时的所有者
*/
val createOwner: Option[String]
/**
* 是否是新创建
*/
val isNewCreate: Boolean
val domainType: T
def docState: Int
def getOwnerFromDoc(doc: T#Pack): String
override def preRestart(reason: Throwable, message: Option[Any]): Unit = {
hlog(log.error(reason, "preRestart"))
super.preRestart(reason, message)
}
override def postStop(): Unit = {
hlog(log.info("domain actor stoped!"))
flush()
}
override def receive: Receive = msgReceive orElse restReceive
protected def restReceive = runRoute(domainRoute)
def domainRoute: Route =
pathEndOrSingleSlash {
MethodDirectives.delete {
onComplete(specStorage.release()) {
case Success(Some(d)) =>
self ! PoisonPill
complete(s"Actor $domId Released")
case Success(None) =>
self ! PoisonPill
complete(s"Actor $domId Released")
case Failure(e) =>
self ! PoisonPill
complete(InternalServerError, s"Actor已销毁,但从缓存清理失败:${e.getMessage}")
}
} ~ docFutureRoute(latest)
} ~
pathPrefix("ver" / IntNumber) {
verNum: Int =>
docFutureRoute(load(verNum))
} ~ pathPrefix("tag" / Segment) {
tag =>
docFutureRoute(load(tag))
} ~ pathPrefix(Segment) {
case "latest" => docFutureRoute(latest)
case "alter" => alterRoute
case "versions" => versionsRoute
case "action" => actionRoute
case _ => reject
}
import zzb.domain.DomainActor._
private var initWaits = List[ActorRef]()
private var ctxWhenIniting = List[RestReqContext]()
import zzb.domain.DocLoadStatus._
private var docLoadStatus = DocUnload
private var initResult: InitDocOver[_] = _
protected def msgReceive: Receive = {
case AutoFlush => flush()
case StartInit =>
docLoadStatus match {
case DocUnload =>
docLoadStatus = DocLoading
initLoadDoc
if (sender() != self) initWaits = sender :: initWaits
case DocLoading =>
if (sender() != self) initWaits = sender :: initWaits
case _ =>
if (initResult != null) sender ! initResult
}
case res: InitDocOver[_] =>
res match {
case m@InitDocOver(Right(doc)) =>
docLoadStatus = DocLoaded
hlog(sysopt)(log.info(s"doc loaded!"))
if(!alwaysflush && autoFlushIntervalSeconds > 0)
context.system.scheduler.schedule(autoFlushIntervalSeconds.seconds,autoFlushIntervalSeconds.seconds,self,AutoFlush)
case m@InitDocOver(Left(e@RestException(_))) =>
docLoadStatus = DocLoadFailed
hlog(sysopt)(log.info(e.err.value))
self ! PoisonPill
case m@InitDocOver(Left(e)) =>
docLoadStatus = DocLoadFailed
hlog(sysopt)(log.error(s"doc load failed!"))
self ! PoisonPill
}
initResult = res
//initDocOver 函数需要在 initWaits.map(_ ! res) 之前被调用
initDocOver(res.asInstanceOf[InitDocOver[T]])
if (initWaits.size > 0) {
initWaits.map(_ ! res)
initWaits = Nil
}
if (ctxWhenIniting.size > 0) {
ctxWhenIniting.map(self ! _)
ctxWhenIniting = Nil
}
case AlterSessionsChanged(sessions, Some(oldDoc), Some(newDoc), change) =>
as = sessions
DocChanged(oldDoc, newDoc, change)
case AlterSessionsChanged(sessions, _, _, _) => as = sessions
case ctx: RestReqContext if docLoadStatus == DocLoading =>
ctxWhenIniting = ctx :: ctxWhenIniting
}
private def initLoadDoc() = {
hlog(sysopt)(log.info("fsm starting by load latest doc ... "))
latest.onComplete {
case Success(Some(mq)) =>
self ! InitDocOver(Right(mq))
//hlog(sysopt)(log.info(s"${if (isNewCreate) "create" else "load"} [${docType.t_memo_}] success!"))
case Success(None) =>
// self ! InitDocOver(Left(new ResourceNotFound(domId.toString)))
self ! InitDocOver(Left(new RestException(StatusCodes.NotFound.copy()(reason = s"request resource ${domId.toString} not found!", defaultMessage = StatusCodes.NotFound.defaultMessage))))
//hlog(sysopt)(log.warning(s"${if (isNewCreate) "create" else "load"} [${docType.t_memo_}] failed!"))
case Failure(e) =>
self ! InitDocOver(Left(e))
//hlog(sysopt)(log.error(s"${if (isNewCreate) "create" else "load"} [${docType.t_memo_}] error!"))
}
}
def initDocOver(res: InitDocOver[T]) {}
def startInit() {
self ! StartInit
}
def alterVerify(curDoc: T#Pack, path: StructPath, fieldValue: Option[ValuePack[Any]],
operator: AuthorizedOperator): Either[(StatusCode, String), Option[ValuePack[Any]]]
def alterPathVerify(path: StructPath, operator: AuthorizedOperator): Option[String] = None
def submitAlter(doc: T#Pack, operator: AuthorizedOperator, seq: Int,
action: Option[String] = None, params: Map[String, String] = Map[String, String]()): Future[(StatusCode, ActionResult)] = {
if (action.isDefined)
hlog(operator)(log.info("alter session {} submitting with action '{}' ...", seq, action.get))
else
hlog(operator)(log.info("alter session {} submitting... ", seq))
val p = Promise[(StatusCode, ActionResult)]()
val newTag = params.get("tag")
val doFlush = params.contains("flush") || alwaysflush
val f1 = save(doc,operator.id,operator.isManager,doFlush,newTag.getOrElse("")).map { savedDocOpt =>
val savedDoc = savedDocOpt.get
hlog(log.info("saved! ver = {} revise = {}", savedDoc.version, savedDoc.revise))
(StatusCodes.OK, ActionResult(0, "AlterOK", VersionRevise(savedDoc.version, savedDoc.revise)))
}
val res_f = if (action.isDefined) {
for {
v1 <- f1
v2 <- self.ask(Action(action.get, operator, params, None))(getTimeout(s"action-${action.get}"))
} yield v2
}
else f1
res_f.onComplete {
case Success(v) => p.success(anyToActionResult(v))
case Failure(e) => p.failure(e)
}
p.future
}
val owner: Future[Option[String]] = latest.map {
case None => None
case Some(v: T#Pack) => Some(getOwnerFromDoc(v))
}
protected def DocChanged(oldDoc: T#Pack, newDoc: T#Pack, change: List[StructPath]) {
val oldVer = oldDoc.version
val newVer = newDoc.version
val invokedPath = scala.collection.mutable.Set[StructPath]() //记录已经激活的监听器,确保监听器不会被调用多次
change.foreach { changedPath =>
alterMonitors.foreach { monitor =>
if (!invokedPath.contains(monitor.path) && (
monitor.path.contains(changedPath) || changedPath.contains(monitor.path)
)
) {
val notifyPath = if (monitor.onlyChangedPath) changedPath else monitor.path
val oldData = notifyPath.getDomainData(oldDoc)
val newData = notifyPath.getDomainData(newDoc)
invokedPath.add(monitor.path)
if (oldData != newData) {
monitor.handler(notifyPath, newData, newVer, oldData, oldVer)
}
}
}
}
}
implicit def dtToPath(dt: DataType[Any]): Path = Path(dt.path)
implicit def dtFunToPath(dtFun: () => DataType[Any]): Path = Path(dtFun().path)
implicit def dtIdxToPath(dtIdx: (DataType[Any], Int)): Path = Path(dtIdx._1.path + "/" + dtIdx._2)
implicit def dtFunIdxToPath(dtFunIdx: (() => DataType[Any], Int)): Path = Path(dtFunIdx._1().path + "/" + dtFunIdx._2)
implicit def dtKeyToPath(dtKey: (DataType[Any], String)): Path = Path(dtKey._1.path + "/" + dtKey._2)
implicit def dtKeyFuncToPath(dtKey: (() => DataType[Any], String)): Path = Path(dtKey._1().path + "/" + dtKey._2)
case class Path(value: String)
type AlterMonitorHandler = (StructPath, Option[Any], Int, Option[Any], Int) => Unit
case class AlterMonitor(path: StructPath, handler: AlterMonitorHandler,onlyChangedPath:Boolean)
private var alterMonitors = List[AlterMonitor]()
/**
*
* @param path 监控的路径
* @param monitorHandler 数据变更的处理函数
* @param notifyRealChangedPath 如果为 true ,monitorHandler 被调用时,传入的额路径是实际变更的路径;
* 如果为false 传入的路径为监控的路径
*/
final def monitorAlter(path: StructPath, monitorHandler: AlterMonitorHandler,notifyRealChangedPath :Boolean = false): Unit =
alterMonitors :+= AlterMonitor(path, monitorHandler,notifyRealChangedPath)
var as = List[AlterSession]()
val actionBuilder: ActionBuilder = null
def actionRoute: Route =
if (actionBuilder == null)
reject
else
post {
innerActionRoute(actionBuilder.actions.toMap)
}
private def innerActionRoute(actionDefs: Map[String, ActionDefine]): Route = {
val routes = for ((actName, define) <- actionDefs) yield {
define.entityType match {
case None =>
path(actName) {
operator {
opt =>
if (supportAction(opt, actName, withEntity = false))
parameterMap { params =>
val action = new Action(actName, opt, params, None)
actionExeRouteCheckSessionConflict(action)
}
else {
val msg = s"${if (opt.isManager) "manager" else "user"} can't execute action $actName"
hlog(opt)(log.warning(msg))
complete(Forbidden, ActionResult(docState, msg, VersionRevise(-1, -1)))
}
}
}
case Some(dt) =>
path(actName) {
operator {
opt =>
if (supportAction(opt, actName, withEntity = true))
entity(unpack(dt)) { pk =>
parameterMap { params =>
val action = new Action(actName, opt, params, Some(pk))
actionExeRouteCheckSessionConflict(action)
}
}
else {
val msg = s"${if (opt.isManager) "manager" else "user"} can't execute action $actName"
hlog(opt)(log.warning(msg))
complete(Forbidden, ActionResult(docState, msg, VersionRevise(-1, -1)))
}
}
}
}
}
routes.reduce(_ ~ _) ~ path(Segment) {
actName => complete(NotFound, s"action '$actName' not exist!")
}
}
//检查是否有会话冲突
def actionExeRouteCheckSessionConflict(a: Action): Route = {
val params = a.params
val force = params.contains("force") && params("force").toLowerCase == "true"
(as.size, force) match {
case (0, _) => clearSessionBeforeActionExeRoute(a)
case (_, true) => clearSessionBeforeActionExeRoute(a)
case (n, _) =>
hlog(a.opt)(log.warning("refuse execute action '{}' for unsubmit({}) alter session ", a.name, n))
val msg = JsArray(as.map(_.copy(seq = -1)).map(AlterSession.format.write):_*)
complete(Conflict, msg)
}
}
def clearSessionBeforeActionExeRoute(a: Action): Route = {
if (as.size > 0) {
hlog(a.opt)(log.info("force close all({}) alter sessions ", as.size))
onSuccess(context.child("alter").get.ask(AlterHalt)(500 milliseconds)) {
case _ =>
hlog(a.opt)(log.info("request execute action '{}' ", a.name))
actionExeRoute(a)
}
} else {
hlog(a.opt)(log.info("request execute action '{}' ", a.name))
actionExeRoute(a)
}
}
//实际启动Action执行
def actionExeRoute(a: Action): Route =
onComplete(self.ask(a)(getTimeout(s"action-${a.name}"))) {
case Success(v) =>
val res = anyToActionResult(v)
complete(res._1, res._2)
case Failure(e) => failWith(e)
}
protected def anyToActionResult(any: Any): (StatusCode, ActionResult) = any match {
case as: ActionResult =>
(OK, as)
case (sc: StatusCode, ar: ActionResult) =>
(sc, ar)
case (sc: StatusCode, ae: ActionError) =>
(sc, ActionResult(ae.intValue, ae.reason, VersionRevise(-1, -1)))
case (sc: StatusCode, ar: String) =>
(sc, ActionResult(-1, ar, VersionRevise(-1, -1)))
case doc: T#Pack =>
(OK, ActionResult(-1, "OK2", VersionRevise(doc.version, doc.revise)))
case Some(doc: T#Pack) =>
(OK, ActionResult(-1, "OK1", VersionRevise(doc.version, doc.revise)))
case e: Throwable =>
(InternalServerError, ActionResult(-1, e.getMessage, VersionRevise(-1, -1)))
case _ =>
(InternalServerError, ActionResult(-1, "Failed", VersionRevise(-1, -1)))
}
/**
* 检查请求执行的动作是否存在,并且操作者是否有权限执行这个动作,并且满足数据实体要求。
* 检查结果与状态无关。 通过了这个检查,只是表明命令存在且操作者具有执行这个命令的权
* 限,但也有可能数据状态不允许执行这个命令。
* @param opt 操作者
* @param action 动作名称
* @return
*/
def supportAction(opt: AuthorizedOperator, action: String, withEntity: Boolean): Boolean =
if (actionBuilder == null) false
else {
actionBuilder.actions.get(action) match {
case Some(actDef) => //
actDef.roleCheck(opt) && actDef.entityType.isDefined == withEntity
case None => false
}
}
def docFutureRoute(docFuture: Future[Option[T#Pack]]): Route = {
val docNodeRoute: DomainRoute = {
path =>
get {
onSuccess(docFuture) {
case Some(d) =>
if (path.inStructPath.size == 1) complete(d)
else {
path.getDomainData(d) match {
case Some(v) =>
complete(v)
case None =>
complete(NotFound)
}
}
case None =>
complete(NotFound)
}
}
}
handDoc(domainType, docNodeRoute)
}
def versionsRoute: Route = get {
onSuccess(versions) {
case vers: Seq[_] => complete(VersionInfos(vers.toList))
}
}
def alterRoute: Route = ctx => {
context.child("alter").getOrElse(
context.actorOf(Props(new AlterManagerActor), "alter")
).forward(ctx)
}
case object AlterHalt
case class AlterSessionsChanged(sessions: List[AlterSession], oldDoc: Option[T#Pack], newDoc: Option[T#Pack], change: List[StructPath])
class AlterManagerActor extends RestServiceActor with AuthorizeDirectives {
case class AlterOver(seq: Int, oldDoc: T#Pack, newDoc: Option[T#Pack] = None, change: List[StructPath] = Nil)
def receive: Receive = rcv orElse runRoute(alterRoute)
val random = new Random(java.lang.System.currentTimeMillis)
var curAlterInfo: AlterSession = _
val alterSessions = scala.collection.mutable.Map[Int, AlterSession]()
/**
* 生成下一个变更会话的序号
* @return
*/
private def nextSeq: Int = {
val seq = random.nextInt().abs % 10000000
if (!alterSessions.contains(seq)) seq else nextSeq
}
def childByName(name: String) = {
context.child(name) match {
case None => Left((NotFound, "not found"))
case Some(act) => Right(act)
}
}
def rcv: Receive = {
case AlterOver(seq, oldDoc, newDoc, change) =>
alterSessions.remove(seq)
context.parent ! AlterSessionsChanged(alterSessions.values.toList, Some(oldDoc), newDoc, change)
case AlterHalt =>
val children = context.children
context.actorOf(Props(new AlterTerminatorActor(children, sender())))
children.foreach { c => c ! AlterHalt}
}
def alterRoute: Route =
pathPrefixTest(IntNumber) { seq =>
forwardChild(childByName, context)
} ~ handDoc(domainType, alterRequestRoute)
//申请启动一个变更会话,返回变更会话序号,或返回409冲突的错误
def alterRequestRoute(path: StructPath): Route =
operator {
opt =>
onSuccess(latest) {
case None => reject
case Some(doc) =>
alterPathVerify(path, opt) match {
case Some(msg) =>
ctx =>
ctx.complete(Forbidden, ActionResult(-1, msg, VersionRevise(doc.version, doc.revise)))
case None =>
alterSessions.values.find(s => s.path.startsWith(path.toString) || path.toString.startsWith(s.path)) match {
case Some(s) =>
ctx =>
//变更请求冲突
val msg = AlterSession.format.write(s.copy(seq = -1)).toString()
hlog(opt)(log.info("alter request reused for conflict,request {},unsubmit {}", path, s.path))
ctx.complete(Conflict, ActionResult(docState, msg, VersionRevise(doc.version, doc.revise)))
case None => //没有冲突的变更会话,可以执行新的变更
path.inStructPath.through match {
case listType: TList[_] => //pathGet(path) ~ pathPut(path) ~ listPost(path) ~ pathDelete(path)
directAlterPut(opt, doc, path) ~ directAlterDelete(opt, doc, path) ~ post {
hasEntity {
case true =>
val listPath = ListPath(path.inStructPath, -1)
mapRequest(_.copy(method = RestMethods.PUT)) {
directAlterPut(opt, doc, listPath)
}
case false =>
newAlterSession(opt, doc, path) ~ directAlterDelete(opt, doc, path)
}
}
case _ => newAlterSession(opt, doc, path) ~ directAlterPut(opt, doc, path) ~ directAlterDelete(opt, doc, path)
}
}
}
}
}
def newAlterSession(opt: AuthorizedOperator, doc: T#Pack, path: StructPath): Route = post { ctx =>
val seq = nextSeq
val newSession = AlterSession(seq, opt, VersionRevise(doc.version, doc.revise), path.toString)
context.actorOf(Props(new AlterActor(path, doc, newSession)), seq.toString)
hlog(opt)(log.info("alter session {} created,{}", seq, path))
alterSessions(seq) = newSession
context.parent ! AlterSessionsChanged(alterSessions.values.toList, None, None, Nil)
ctx.complete(ActionResult(seq, "", VersionRevise(doc.version, doc.revise)))
}
//执行直接修改的 put 动作
def directAlterPut(opt: AuthorizedOperator, doc: T#Pack, path: StructPath): Route = put {
parameterMap { params =>
val merge = params.get("merge")
val action = params.get("action")
entity(unpack(path.targetType)) {
pk =>
onComplete(execDirectAlter(doc, Some(pk), opt, path, merge, action, params)) {
case Success(res) =>
ctx =>
if (res._1 == OK) hlog(opt)(log.info("direct alter success,{} = {}", path, pk))
ctx.complete(res._1, res._2)
case Failure(e: Throwable) =>
ctx =>
hlog(opt)(log.error("direct alter data failed,{}. reason:{}", path, e.stackTrace))
ctx.complete(InternalServerError, ActionResult(docState, e.getMessage, VersionRevise(doc.version, doc.revise)))
}
}
}
}
//执行直接修改的 delete 动作
def directAlterDelete(opt: AuthorizedOperator, doc: T#Pack, path: StructPath): Route = delete {
//parameters("action".as[String] ?) { action =>
parameterMap { params =>
val action = params.get("action")
onComplete(execDirectAlter(doc, None, opt, path, Some(MergeManner.Replace.toString), action, params)) {
case Success(res) => complete(res._1, res._2)
case Failure(e: RequiredFieldNotSetException) =>
complete(BadRequest, ActionResult(docState, e.getMessage, VersionRevise(doc.version, doc.revise)))
case Failure(e:AlterDataFailed) =>
ctx =>
hlog(opt)(log.warning("direct delete data failed,{}. reason:{}", path, e.cause.stackTrace))
ctx.complete(BadRequest, ActionResult(docState, e.cause.getMessage, VersionRevise(doc.version, doc.revise)))
case Failure(e: Throwable) =>
ctx =>
hlog(opt)(log.warning("direct delete data failed,{}. reason:{}", path, e.stackTrace))
ctx.complete(InternalServerError, ActionResult(docState, e.getMessage, VersionRevise(doc.version, doc.revise)))
}
}
}
//不创建 AlterActor 直接修改数据,在这一个函数中完成变更会话的创建、执行、提交/放弃
def execDirectAlter(orientDoc: T#Pack, data: Option[ValuePack[Any]], opt: AuthorizedOperator, path: StructPath,
merge: Option[String], action: Option[String], params: Map[String, String]): Future[(StatusCode, ActionResult)] = {
val promise = Promise[(StatusCode, ActionResult)]()
if (path.inStructPath.length == 1 && data.isEmpty) {
promise.success((Forbidden, ActionResult(docState, "can't delete total document", VersionRevise(orientDoc.version, orientDoc.revise))))
} else if (action.isDefined && !supportAction(opt, action.get, withEntity = false)) {
val msg = s"${if (opt.isManager) "manager" else "user"} can't execute action ${action.get}"
hlog(opt)(log.warning(msg))
promise.success((Forbidden, ActionResult(docState, msg, VersionRevise(orientDoc.version, orientDoc.revise))))
} else {
//校验能否进行数据修改
alterVerify(orientDoc, path, data, opt) match {
//可以修改数据
case Right(goodPk) =>
//创建变更会话
val seq = nextSeq
val newSession = AlterSession(seq, opt, VersionRevise(orientDoc.version, orientDoc.revise), path.toString)
//context.actorOf(Props(new AlterActor(path, doc, newSession)), seq.toString)
alterSessions(seq) = newSession
context.parent ! AlterSessionsChanged(alterSessions.values.toList, None, None, Nil)
try {
//执行数据修改
val alteredDoc = path.alterDomainData(orientDoc, goodPk, MergeManner.fromString(merge))
//提交数据修改
submitAlter(alteredDoc, opt, seq, action, params).onComplete {
case Success(res) =>
self ! AlterOver(seq, orientDoc, Some(alteredDoc), List(path))
promise.success(res)
case Failure(e: Throwable) =>
self ! AlterOver(seq, orientDoc)
promise.failure(e)
}
} catch {
case e: Throwable =>
self ! AlterOver(seq, orientDoc)
promise.failure(AlterDataFailed("",e))
}
//无权修改数据
case Left((statusCode, errMsg)) =>
hlog(opt)(log.info("refuse direct alter {}", path.toString))
promise.success((statusCode, ActionResult(docState, errMsg, VersionRevise(orientDoc.version, orientDoc.revise))))
}
}
promise.future
}
case object GetAlterSessions
class AlterActor(val allowPath: StructPath, val orientDoc: T#Pack, alter: AlterSession) extends RestServiceActor with AuthorizeDirectives with DomainDirectives {
def receive: Receive = innerReceive orElse runRoute(route)
var alteredDoc = orientDoc
//变更列表,只记路径,后修改的路径在头部
var changeList = List[StructPath]()
def innerReceive: Receive = {
case AlterHalt =>
context.parent ! AlterOver(alter.seq, orientDoc)
self ! PoisonPill
}
def route: Route =
operatorIs(alter.opt.roles.keySet) {
handDoc(domainType, domainRoute)
}
//获取指定路径的数据
private def pathGet(path: StructPath): Route = get {
ctx =>
path.getDomainData(alteredDoc) match {
case Some(v) => ctx.complete(v)
case None => ctx.complete(NotFound)
}
}
//修改指定路径的数据,pathIn 用来检查要修改的路径是否在允许的范围(创建变更会话时指定的)之内
private def pathAlter(path: StructPath): Route = {
def checkAbandon(abandon: Boolean, opt: AuthorizedOperator) {
if (abandon) {
hlog(opt)(log.warning("alter session {} force abandon for alter error", alter.seq))
context.parent ! AlterOver(alter.seq, orientDoc)
self ! PoisonPill
}
}
pathIn(allowPath, path) {
operator {
opt =>
parameters("merge".as[String] ?, 'errorThenAbandon.as[Boolean] ?) {
(merge, errorThenAbandon) =>
entity(unpack(path.targetType, () => {
errorThenAbandon.asInstanceOf[Option[Boolean]].map(checkAbandon(_, opt))
})) {
pk => ctx =>
alterVerify(alteredDoc, path, Some(pk), opt) match {
case Right(goodPk) =>
try {
alteredDoc = path.alterDomainData(alteredDoc, goodPk, MergeManner.fromString(merge))
changeList = path :: changeList
hlog(opt)(log.info("alter session {} set data success,{} = {}", alter.seq, path, goodPk.map(_.toJsValue).getOrElse("")))
ctx.complete(ActionResult(0, "", VersionRevise(alteredDoc.version, alteredDoc.revise)))
} catch {
case e: Throwable =>
hlog(opt)(log.warning("alter session {} set data failed,{} = {}", alter.seq, path, goodPk.map(_.toJsValue).getOrElse("")))
val msg = e.getMessage
errorThenAbandon.asInstanceOf[Option[Boolean]].map(checkAbandon(_, opt))
ctx.complete(BadRequest, msg)
}
case Left((statusCode, errMsg)) =>
errorThenAbandon.asInstanceOf[Option[Boolean]].map(checkAbandon(_, opt))
ctx.complete(statusCode, errMsg)
}
}
}
}
}
}
private def pathDelete(path: StructPath): Route = {
def checkAbandon(abandon: Boolean, opt: AuthorizedOperator) {
if (abandon) {
hlog(opt)(log.warning("alter session {} force abandon for alter error", alter.seq))
context.parent ! AlterOver(alter.seq, orientDoc)
self ! PoisonPill
}
}
delete {
pathIn(allowPath, path) {
operator {
opt =>
parameters("errorThenAbandon".as[Boolean] ?) { errorThenAbandon =>
ctx =>
alterVerify(alteredDoc, path, None, opt) match {
case Right(goodPk) =>
try {
alteredDoc = path.alterDomainData(alteredDoc, goodPk, MergeManner.Replace)
changeList = path :: changeList
ctx.complete(ActionResult(0, "", VersionRevise(alteredDoc.version, alteredDoc.revise)))
} catch {
case e: Throwable =>
hlog(opt)(log.warning("alter session {} delete data failed,{}", alter.seq, path))
errorThenAbandon.map(checkAbandon(_, opt))
ctx.complete(BadRequest, e.getMessage)
}
case Left((statusCode, errMsg)) =>
ctx.complete(statusCode, errMsg)
}
}
}
}
}
}
// Put方法修改指定路径的数据
private def pathPut(path: StructPath): Route = put {
pathAlter(path)
}
//Post方法向列表末尾添加数据,转换成与 Put 方法相同的处理模式,但是在路径上把列表的索引值设置为-1
private def listPost(path: StructPath): Route = post {
pathAlter(ListPath(path.inStructPath, -1))
}
//提交变更会话
private def alterSubmit: Route = post {
operator {
opt =>
// 检查有没有同时提交的动作请求
parameterMap { params =>
val action: Option[String] = params.get("action")
if (action.isDefined && !supportAction(opt, action.get, withEntity = false)) {
val msg = s"${if (opt.isManager) "manager" else "user"} can't execute action ${action.get}"
hlog(opt)(log.warning("alter session {} submit failed,force abandon {}! reason:{}", alter.seq, alter.path, msg))
context.parent ! AlterOver(alter.seq, orientDoc)
complete(Forbidden, ActionResult(docState, msg, VersionRevise(orientDoc.version, orientDoc.revise)))
} else {
onComplete(submitAlter(alteredDoc, opt, alter.seq, action, params)) {
//结束修改,提交文档
case Success(res) =>
ctx =>
hlog(opt)(log.info("alter session {} submited,{}", alter.seq, alter.path))
context.parent ! AlterOver(alter.seq, orientDoc, Some(alteredDoc), changeList)
self ! PoisonPill
ctx.complete(res._1, res._2)
case Failure(e: Throwable) =>
ctx =>
hlog(opt)(log.error("alter session {} submit failed,force abandon {}! reason:{}", alter.seq, alter.path, e.stackTrace))
context.parent ! AlterOver(alter.seq, orientDoc)
complete(InternalServerError, ActionResult(docState, e.getMessage, VersionRevise(alteredDoc.version, alteredDoc.revise)))
}
}
}
}
}
//放弃变更会话
private def alterAbandon: Route = delete {
operator {
opt =>
ctx =>
hlog(opt)(log.info("alter session {} abandon", alter.seq))
context.parent ! AlterOver(alter.seq, orientDoc)
self ! PoisonPill
ctx.complete(ActionResult(docState, "", VersionRevise(orientDoc.version, orientDoc.revise)))
}
}
private def domainRoute(path: StructPath): Route =
if (path.inStructPath.size == 1) {
pathGet(path) ~ pathPut(path) ~ alterSubmit ~ alterAbandon
} else {
path.inStructPath.through match {
case listType: TList[_] => pathGet(path) ~ pathPut(path) ~ listPost(path) ~ pathDelete(path)
case _ => pathGet(path) ~ pathPut(path) ~ pathDelete(path)
}
}
}
}
}
object DomainActor {
val sysopt = AuthorizedOperator("system", isManager = true)
case object StartInit
case object AutoFlush
case class InitDocOver[T <: TStruct](result: Either[Throwable, T#Pack])
implicit class WaitInit(domainActor: ActorRef) {
def startInit[T <: TStruct](implicit timeout: Timeout): Future[InitDocOver[T]] = {
(domainActor ? StartInit).asInstanceOf[Future[InitDocOver[T]]]
}
}
implicit def checkSuccess[T <: TStruct](future: ⇒ Future[InitDocOver[T]])(implicit hl: HListable[Either[Throwable, T#Pack]], ec: ExecutionContext) =
new Directive[hl.Out] with OnSuccessFutureMagnet {
type Out = hl.Out
def get = this
def happly(f: Out ⇒ Route) = ctx ⇒ future.onComplete {
case Success(t) ⇒
try f(hl(t.result))(ctx)
catch {
case NonFatal(error) ⇒ ctx.failWith(error)
}
case Failure(error) ⇒ ctx.failWith(error)
}
}
implicit def checkComplete[T <: TStruct](future: ⇒ Future[InitDocOver[T]])(implicit ec: ExecutionContext) =
new OnCompleteFutureMagnet[InitDocOver[T]] {
def happly(f: (Try[InitDocOver[T]] :: HNil) ⇒ Route): (RestReqContext) ⇒ Unit = ctx ⇒
try future.onComplete(t ⇒ f(t :: HNil)(ctx))
catch {
case NonFatal(error) ⇒ ctx.failWith(error)
}
}
}
case class VersionRevise(version: Int, revise: Int)
class AlterTerminatorActor(alters: immutable.Iterable[ActorRef], reportTo: ActorRef) extends Actor {
private var count = 0
val ss = alters.size
alters.foreach { a =>
count = count + 1
context.watch(a)
}
def receive = {
case Terminated(a) =>
count = count - 1
if (count == 0) {
reportTo ! AlterAbandoned
self ! PoisonPill
}
}
}
object VersionRevise extends DefaultJsonProtocol {
implicit val format = jsonFormat2(VersionRevise.apply)
}
case object AlterAbandoned
case class AlterSession(seq: Int, opt: AuthorizedOperator, initVer: VersionRevise, path: String)
object AlterSession extends DefaultJsonProtocol {
implicit val format = jsonFormat4(AlterSession.apply)
}
case class ResourceNotFound(id: String) extends Exception(s"request resource $id not found!")
case class RestException(err: StatusCode) extends Exception(err.value)
case class AlterDataFailed(message:String,cause:Throwable) extends Exception(message,cause)
private object DocLoadStatus extends Enumeration {
val DocUnload = Value(1, "Unload")
val DocLoading = Value(2, "Loading")
val DocLoaded = Value(3, "Loaded")
val DocNotFound = Value(4, "NotFound")
val DocLoadFailed = Value(5, "Failed")
} | stepover/zzb | zzb-domain/src/main/scala/zzb/domain/DomainActor.scala | Scala | mit | 37,832 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.utils.classpath
import java.io.{File, FileFilter, FilenameFilter}
import java.net.{URLClassLoader, URLDecoder}
import com.typesafe.scalalogging.LazyLogging
import scala.collection.mutable.ArrayBuffer
object ClassPathUtils extends LazyLogging {
private val jarFileFilter = new FilenameFilter() {
override def accept(dir: File, name: String) =
name.endsWith(".jar") && !name.endsWith("-sources.jar") && !name.endsWith("-javadoc.jar")
}
private val folderFileFilter = new FileFilter() {
override def accept(pathname: File) = pathname.isDirectory
}
private val fileFilter = new FileFilter() {
override def accept(pathname: File) = pathname.isFile()
}
def findJars(jars: Seq[String], searchPath: Iterator[() => Seq[File]]): Seq[File] = {
val foundJars = ArrayBuffer.empty[File]
var remaining = jars
// search each path in order until we've found all our jars
while (remaining.nonEmpty && searchPath.hasNext) {
val files = searchPath.next()()
remaining = remaining.filter { jarPrefix =>
val matched = files.filter(_.getName.startsWith(jarPrefix))
foundJars ++= matched
matched.isEmpty
}
}
if (remaining.nonEmpty) {
logger.warn(s"Could not find requested jars: $remaining")
}
foundJars.distinct.toSeq
}
/**
* Finds URLs of jar files based on an environment variable
*
* @param home
* @return
*/
def getJarsFromEnvironment(home: String): Seq[File] =
sys.env.get(home).map(new File(_)).filter(_.isDirectory).toSeq.flatMap(loadJarsFromFolder)
/**
* Finds URLs of jar files based on an environment variable
*
* @param home
* @param path - the path to append to the result of the env var
* @return
*/
def getJarsFromEnvironment(home: String, path: String): Seq[File] =
sys.env.get(home).map(h => new File(new File(h), path)).filter(_.isDirectory).toSeq.flatMap(loadJarsFromFolder)
/**
* Finds URLs of jar files based on the current classpath
*
* @param clas
* @return
*/
def getJarsFromClasspath(clas: Class[_]): Seq[File] = {
clas.getClassLoader match {
case cl: URLClassLoader => cl.getURLs.map(u => new File(cleanClassPathURL(u.getFile)))
case cl =>
logger.warn(s"Can't load jars from classloader of type ${cl.getClass.getCanonicalName}")
Seq.empty
}
}
// noinspection AccessorLikeMethodIsEmptyParen
def getJarsFromSystemClasspath(): Seq[File] = {
val urls = ClassLoader.getSystemClassLoader.asInstanceOf[URLClassLoader].getURLs
urls.map(u => new File(cleanClassPathURL(u.getFile)))
}
/**
* Recursively searches folders for jar files
*
* @param dir
* @return
*/
def loadJarsFromFolder(dir: File): Seq[File] = {
val files = Option(dir.listFiles(jarFileFilter)).toSeq.flatten
val children = Option(dir.listFiles(folderFileFilter)).toSeq.flatten.flatMap(loadJarsFromFolder)
files ++ children
}
/**
* Finds URLs of files based on a system property
*
* @param prop
* @return
*/
def getFilesFromSystemProperty(prop: String): Seq[File] = {
Option(System.getProperty(prop)) match {
case Some(path) => path.toString().split(":").map(new File(_)).toSeq.flatMap(loadFiles)
case None =>
logger.debug(s"No files loaded onto classpath from system property: ${prop}")
Seq.empty
}
}
/**
* Recursively searches file for all files. Accepts file or dir.
*
* @param file
* @return
*/
def loadFiles(file: File): Seq[File] = {
if (file.isDirectory) {
val files = Option(file.listFiles(fileFilter)).toSeq.flatten
val childDirs = Option(file.listFiles(folderFileFilter)).toSeq.flatten.flatMap(loadFiles)
files ++ childDirs
} else {
Option(file).toSeq
}
}
def cleanClassPathURL(url: String): String =
URLDecoder.decode(url, "UTF-8").replace("file:", "").replace("!", "")
/**
* <p>Load files (jars, resources, configuration, etc) from a classpath defined by an environmental
* variable following these rules:
* <ul>
* <li>Entries are colon (:) separated</li>
* <li>If the entry ends with "/*", treat it as a directory, and list jars in that
* directory...no recursion</li>
* <li>If the entry is a file then add it</li>
* <li>If the entry is a directory list all files (jars and files) in the directory</li>
* </ul>
* </p>
*
* @param prop - environmental variable
* @return a list of files found in the classpath
*/
def loadClassPathFromEnv(prop: String): Seq[File] = {
val files = sys.env.get(prop).toSeq.flatMap(_.split(':').toSeq).flatMap { entry =>
if (entry.endsWith("/*")) {
new File(entry.dropRight(2)).listFiles(jarFileFilter)
} else {
val f = new File(entry)
if (f.isDirectory) {
Option(f.listFiles).toSeq.flatten
} else {
Seq(f)
}
}
}
logger.debug(s"Loaded env classpath '$prop': ${files.map(_.getAbsolutePath).mkString(":")}")
files
}
}
| tkunicki/geomesa | geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/classpath/ClassPathUtils.scala | Scala | apache-2.0 | 5,598 |
package sampleclean.crowd
import java.net.InetSocketAddress
import java.util.concurrent.ConcurrentHashMap
import com.twitter.finagle.Service
import com.twitter.finagle.builder.{ClientBuilder, Server, ServerBuilder}
import com.twitter.finagle.http._
import com.twitter.finagle.http.service.RoutingService
import com.twitter.util.{Await, Future => TFuture}
import org.apache.spark.rdd.RDD
import org.jboss.netty.handler.codec.http.{HttpRequest, HttpResponse, HttpResponseStatus}
import org.json4s.JsonDSL._
import org.json4s._
import org.json4s.jackson.JsonMethods._
import org.json4s.native.Serialization
import org.json4s.native.Serialization.{write => swrite}
import sampleclean.activeml.utils
import sampleclean.crowd.context.{PointLabelingContext, GroupLabelingContext}
import scala.collection.mutable
/**
* An abstract individual result from the crowd.
*/
abstract class CrowdResultItem {
val identifier:String
val value:Any
}
/**
* A crowd result containing a double
* @param identifier the id of the result point
* @param value the value of the result point.
*/
case class CrowdResultDouble(identifier:String, value:Double) extends CrowdResultItem
/**
* A batch of results from the crowd.
* @param group_id the unique identifier for the batch.
* @param answers a list of individual results in the batch.
* @tparam T the type of individual results.
*/
case class CrowdResult[T <: CrowdResultItem](group_id: String, answers: List[T])
/** A webserver that requests data from the crowd and waits for results asynchronously */
object CrowdHTTPServer {
private final val crowdJobURL = "crowds/%s/tasks/"
private final val taskTypeMap = Map[String, Class[_]](
"er" -> classOf[Double],
"sa" -> classOf[Double]
)
// Store result objects for in-process groups
private val results = new ConcurrentHashMap[String, AsyncCrowdResult[_]]()
// Remember which point ids were in which group.
private val groupMap = new ConcurrentHashMap[String, Set[String]]()
// Remember what type of task each group is.
private val groupTypes = new mutable.HashMap[String, String]
private var running = false
private var server: Server = null
// Build the finagle service to process incoming data.
private val crowdResultService = new Service[Request, Response] {
def apply(req: Request): TFuture[Response] = {
// parse the request JSON
//implicit val formats = DefaultFormats
implicit val format = Serialization.formats(NoTypeHints)
val rawJSON = req.getParam("data")
println("[SampleClean] Received CrowdLabels")
// println("GOT DATA FROM THE CROWD! Data: " + rawJSON)
// get the group id out to figure out the data type
val parsedJSON = parse(rawJSON)
val groupId = (parsedJSON \\ "group_id").extract[String]
val taskType = groupTypes.getOrElse(groupId, "")
val resultType = taskTypeMap.get(taskType)
// handle the result
resultType match {
case Some(d) if d == classOf[Double] => handleResult(parsedJSON.extract[CrowdResult[CrowdResultDouble]])
case None => throw new RuntimeException("Unkonwn groupID: " + groupId + " or task type: " + taskType)
case _ => throw new RuntimeException("Invalid crowd datatype: " + resultType)
}
// acknowledge the response
val res = Response(req)
res.setStatus(HttpResponseStatus.OK)
TFuture.value(res)
}
}
// Accept data at the top-level path.
private val service = RoutingService.byPath {
case "/" => crowdResultService
}
// create the server.
private val builder = ServerBuilder()
.codec(new RichHttp[Request](Http.get()))
.name("CrowdHttpServer")
//.daemon(true)
/**
* Send a group of points to the crowd service for asynchronous processing.
* @param inputData the points to label, with enough context to label them.
* @param groupContext context shared by all points in the group.
* @param crowdConfiguration configuration for the crowd service.
* @param taskConfiguration configuration options for this group of tasks.
* @tparam C class for individual point context.
* @tparam G class for group context.
* @tparam O type of data returned by the crowd.
* @return a future that will eventually hold the crowd's response.
*/
def makeRequest[C <: PointLabelingContext, G <: GroupLabelingContext, O]
(inputData:RDD[(String, C)], groupContext: G,
crowdConfiguration: CrowdConfiguration,
taskConfiguration: CrowdTaskConfiguration): AsyncCrowdResult[O] = {
// Make sure the server is running
if (!running) {
start(crowdConfiguration.responseServerPort)
}
// collect the points but save their SparkContext
implicit val sc = inputData.sparkContext
val points = inputData.collect()
// Generate a random id for the group and register it with a result object
val groupId = utils.randomUUID()
//val resultObjectClass = classOf[AsyncCrowdResult[O]]
//val resultObject = resultObjectClass.newInstance()
val resultObject = new AsyncCrowdResult[O]()
results.put(groupId, resultObject)
groupTypes.put(groupId, groupContext.taskType)
// Register the point ids in the group map.
groupMap.put(groupId, points map {p => p._1} toSet)
// Generate JSON according to the crowd server's API
implicit val formats = Serialization.formats(NoTypeHints)
val pointsJSON = (points map {point => point._1 -> parse(swrite(point._2.content))}).toMap
val groupContextJSON = parse(swrite(groupContext.data))
val crowdConfigJSON = parse(swrite(taskConfiguration.crowdTaskOptions))
val requestData = compact(render(
("configuration" ->
("task_type" -> groupContext.taskType) ~
("task_batch_size" -> taskConfiguration.maxPointsPerTask) ~
("num_assignments" -> taskConfiguration.votesPerPoint) ~
(crowdConfiguration.crowdName -> crowdConfigJSON) ~
("callback_url" -> ("http://" + crowdConfiguration.responseServerHost + ":" + crowdConfiguration.responseServerPort))) ~
("group_id" -> groupId) ~
("group_context" -> groupContextJSON) ~
("content" -> pointsJSON)))
//println("Request JSON: " + requestData)
// Send the request to the crowd server.
//println("Issuing request...")
val use_ssl = sys.env.getOrElse("SSL", "0") == "1"
val builder = ClientBuilder()
.codec(Http())
.hosts(crowdConfiguration.crowdServerHost + ":" + crowdConfiguration.crowdServerPort)
.hostConnectionLimit(1)
val client: Service[HttpRequest, HttpResponse] = if (use_ssl) builder.tlsWithoutValidation().build() else builder.build()
val url_scheme = if (use_ssl) "https" else "http"
val request = RequestBuilder()
.url(url_scheme + "://" + crowdConfiguration.crowdServerHost + ":"
+ crowdConfiguration.crowdServerPort + "/"
+ crowdJobURL format crowdConfiguration.crowdName)
.addHeader("Charset", "UTF-8")
.addFormElement(("data", requestData))
.buildFormPost()
val responseFuture = client(request)
// Check that our crowd request was successful. The response data will be handled by handleResponse()
responseFuture onSuccess { resp: HttpResponse =>
val responseData = resp.getContent.toString("UTF-8")
println(responseData)
resp.getStatus match {
case HttpResponseStatus.OK =>
implicit val formats = DefaultFormats
(parse(responseData) \\ "status").extract[String] match {
case "ok" => println("[SampleClean] Created AMT HIT")
case other: String => println("Error! Bad request: " + other)
}
case other: HttpResponseStatus =>
println("Error! Got unexpected response status " + other.getCode + ". Data: " + responseData)
}
} onFailure { exc: Throwable =>
println("Failure!")
throw exc
}
Await.result(responseFuture)
resultObject
}
/**
* Stores new crowd responses in the result object and cleans up once the whole group has been labeled.
* @param newResult new crowd response.
*/
def handleResult[T <: CrowdResultItem](newResult: CrowdResult[T]) {
// Look up the result object for the new result's group
val groupId = newResult.group_id
if (!(groupMap containsKey groupId)) throw new RuntimeException("Invalid groupID from crowd: " + groupId)
// Scary scala reflection to add the new result to the group object
val resultObject = results.get(groupId)
val updateFunc = resultObject.getClass.getMethods.filter(m => m.getName == "tupleProcessed").head
newResult.answers map {
answer => updateFunc.invoke(resultObject, answer.identifier -> answer.value)
}
// Delete ids with results from the groupMap to track progress
val newIds = (newResult.answers map { r => r.identifier}).toSet
groupMap.replace(groupId, groupMap.get(groupId) &~ newIds)
// if we have results for every point in the group, clean up.
if (groupMap.get(groupId) isEmpty) {
resultObject.complete()
groupMap.remove(groupId)
results.remove(groupId)
}
}
/**
* Starts the server if it isn't already running.
* @param port the port on which to listen.
*/
def start(port: Int) {
synchronized {
if (!running) {
running = true
server = builder
.bindTo(new InetSocketAddress(port))
.build(CrowdHTTPServer.service)
}
}
}
/** Stops the server if it is running. */
def stop() {
synchronized {
if (server != null) {
server.close()
running = false
}
}
}
}
| sjyk/sampleclean-async | src/main/scala/sampleclean/crowd/CrowdHTTPServer.scala | Scala | apache-2.0 | 9,665 |
package patmat
import org.scalatest.FunSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import patmat.Huffman._
@RunWith(classOf[JUnitRunner])
class HuffmanSuite extends FunSuite {
trait TestTrees {
val t1 = Fork(Leaf('a',2), Leaf('b',3), List('a','b'), 5)
val t2 = Fork(Fork(Leaf('a',2), Leaf('b',3), List('a','b'), 5), Leaf('d',4), List('a','b','d'), 9)
}
test("weight of a larger tree") {
new TestTrees {
assert(weight(t1) === 5)
}
}
test("chars of a larger tree") {
new TestTrees {
assert(chars(t2) === List('a','b','d'))
}
}
test("string2chars(\\"hello, world\\")") {
assert(string2Chars("hello, world") === List('h', 'e', 'l', 'l', 'o', ',', ' ', 'w', 'o', 'r', 'l', 'd'))
}
test("times") {
assert(times(List('a', 'b', 'a')) === List(('a', 2), ('b', 1)))
}
test("makeOrderedLeafList for some frequency table") {
assert(makeOrderedLeafList(List(('t', 2), ('e', 1), ('x', 3))) === List(Leaf('e',1), Leaf('t',2), Leaf('x',3)))
}
test("combine of some leaf list") {
val leaflist = List(Leaf('e', 1), Leaf('t', 2), Leaf('x', 4))
assert(combine(leaflist) === List(Fork(Leaf('e',1),Leaf('t',2),List('e', 't'),3), Leaf('x',4)))
}
test("decode and encode a very short text should be identity") {
new TestTrees {
assert(decode(t1, encode(t1)("ab".toList)) === "ab".toList)
}
}
test("decode and quick encode a very short text should be identity") {
new TestTrees {
assert(decode(t1, quickEncode(t1)("ab".toList)) === "ab".toList)
}
}
}
| eguneys/coursera-notes | reactive/assignments/functional/patmat/src/test/scala/patmat/HuffmanSuite.scala | Scala | mit | 1,591 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.net.URI
import scala.collection.JavaConverters._
import org.apache.hadoop.fs.{FileStatus, Path}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
import org.apache.spark.sql.execution.datasources.{DataSource, InMemoryFileIndex, LogicalRelation}
import org.apache.spark.sql.types.StructType
/**
* A very simple source that reads files from the given directory as they appear.
*/
class FileStreamSource(
sparkSession: SparkSession,
path: String,
fileFormatClassName: String,
override val schema: StructType,
partitionColumns: Seq[String],
metadataPath: String,
options: Map[String, String]) extends Source with Logging {
import FileStreamSource._
private val sourceOptions = new FileStreamOptions(options)
private val hadoopConf = sparkSession.sessionState.newHadoopConf()
@transient private val fs = new Path(path).getFileSystem(hadoopConf)
private val qualifiedBasePath: Path = {
fs.makeQualified(new Path(path)) // can contain glob patterns
}
private val optionsWithPartitionBasePath = sourceOptions.optionMapWithoutPath ++ {
if (!SparkHadoopUtil.get.isGlobPath(new Path(path)) && options.contains("path")) {
Map("basePath" -> path)
} else {
Map()
}}
private val metadataLog =
new FileStreamSourceLog(FileStreamSourceLog.VERSION, sparkSession, metadataPath)
private var metadataLogCurrentOffset = metadataLog.getLatest().map(_._1).getOrElse(-1L)
/** Maximum number of new files to be considered in each batch */
private val maxFilesPerBatch = sourceOptions.maxFilesPerTrigger
private val fileSortOrder = if (sourceOptions.latestFirst) {
logWarning(
"""'latestFirst' is true. New files will be processed first, which may affect the watermark
|value. In addition, 'maxFileAge' will be ignored.""".stripMargin)
implicitly[Ordering[Long]].reverse
} else {
implicitly[Ordering[Long]]
}
private val maxFileAgeMs: Long = if (sourceOptions.latestFirst && maxFilesPerBatch.isDefined) {
Long.MaxValue
} else {
sourceOptions.maxFileAgeMs
}
private val fileNameOnly = sourceOptions.fileNameOnly
if (fileNameOnly) {
logWarning("'fileNameOnly' is enabled. Make sure your file names are unique (e.g. using " +
"UUID), otherwise, files with the same name but under different paths will be considered " +
"the same and causes data lost.")
}
/** A mapping from a file that we have processed to some timestamp it was last modified. */
// Visible for testing and debugging in production.
val seenFiles = new SeenFilesMap(maxFileAgeMs, fileNameOnly)
metadataLog.allFiles().foreach { entry =>
seenFiles.add(entry.path, entry.timestamp)
}
seenFiles.purge()
logInfo(s"maxFilesPerBatch = $maxFilesPerBatch, maxFileAgeMs = $maxFileAgeMs")
/**
* Returns the maximum offset that can be retrieved from the source.
*
* `synchronized` on this method is for solving race conditions in tests. In the normal usage,
* there is no race here, so the cost of `synchronized` should be rare.
*/
private def fetchMaxOffset(): FileStreamSourceOffset = synchronized {
// All the new files found - ignore aged files and files that we have seen.
val newFiles = fetchAllFiles().filter {
case (path, timestamp) => seenFiles.isNewFile(path, timestamp)
}
// Obey user's setting to limit the number of files in this batch trigger.
val batchFiles =
if (maxFilesPerBatch.nonEmpty) newFiles.take(maxFilesPerBatch.get) else newFiles
batchFiles.foreach { file =>
seenFiles.add(file._1, file._2)
logDebug(s"New file: $file")
}
val numPurged = seenFiles.purge()
logTrace(
s"""
|Number of new files = ${newFiles.size}
|Number of files selected for batch = ${batchFiles.size}
|Number of seen files = ${seenFiles.size}
|Number of files purged from tracking map = $numPurged
""".stripMargin)
if (batchFiles.nonEmpty) {
metadataLogCurrentOffset += 1
metadataLog.add(metadataLogCurrentOffset, batchFiles.map { case (p, timestamp) =>
FileEntry(path = p, timestamp = timestamp, batchId = metadataLogCurrentOffset)
}.toArray)
logInfo(s"Log offset set to $metadataLogCurrentOffset with ${batchFiles.size} new files")
}
FileStreamSourceOffset(metadataLogCurrentOffset)
}
/**
* For test only. Run `func` with the internal lock to make sure when `func` is running,
* the current offset won't be changed and no new batch will be emitted.
*/
def withBatchingLocked[T](func: => T): T = synchronized {
func
}
/** Return the latest offset in the [[FileStreamSourceLog]] */
def currentLogOffset: Long = synchronized { metadataLogCurrentOffset }
/**
* Returns the data that is between the offsets (`start`, `end`].
*/
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
val startOffset = start.map(FileStreamSourceOffset(_).logOffset).getOrElse(-1L)
val endOffset = FileStreamSourceOffset(end).logOffset
assert(startOffset <= endOffset)
val files = metadataLog.get(Some(startOffset + 1), Some(endOffset)).flatMap(_._2)
logInfo(s"Processing ${files.length} files from ${startOffset + 1}:$endOffset")
logTrace(s"Files are:\\n\\t" + files.mkString("\\n\\t"))
val newDataSource =
DataSource(
sparkSession,
paths = files.map(f => new Path(new URI(f.path)).toString),
userSpecifiedSchema = Some(schema),
partitionColumns = partitionColumns,
className = fileFormatClassName,
options = optionsWithPartitionBasePath)
Dataset.ofRows(sparkSession, LogicalRelation(newDataSource.resolveRelation(
checkFilesExist = false), isStreaming = true))
}
/**
* If the source has a metadata log indicating which files should be read, then we should use it.
* Only when user gives a non-glob path that will we figure out whether the source has some
* metadata log
*
* None means we don't know at the moment
* Some(true) means we know for sure the source DOES have metadata
* Some(false) means we know for sure the source DOSE NOT have metadata
*/
@volatile private[sql] var sourceHasMetadata: Option[Boolean] =
if (SparkHadoopUtil.get.isGlobPath(new Path(path))) Some(false) else None
private def allFilesUsingInMemoryFileIndex() = {
val globbedPaths = SparkHadoopUtil.get.globPathIfNecessary(fs, qualifiedBasePath)
val fileIndex = new InMemoryFileIndex(sparkSession, globbedPaths, options, Some(new StructType))
fileIndex.allFiles()
}
private def allFilesUsingMetadataLogFileIndex() = {
// Note if `sourceHasMetadata` holds, then `qualifiedBasePath` is guaranteed to be a
// non-glob path
new MetadataLogFileIndex(sparkSession, qualifiedBasePath, None).allFiles()
}
/**
* Returns a list of files found, sorted by their timestamp.
*/
private def fetchAllFiles(): Seq[(String, Long)] = {
val startTime = System.nanoTime
var allFiles: Seq[FileStatus] = null
sourceHasMetadata match {
case None =>
if (FileStreamSink.hasMetadata(Seq(path), hadoopConf)) {
sourceHasMetadata = Some(true)
allFiles = allFilesUsingMetadataLogFileIndex()
} else {
allFiles = allFilesUsingInMemoryFileIndex()
if (allFiles.isEmpty) {
// we still cannot decide
} else {
// decide what to use for future rounds
// double check whether source has metadata, preventing the extreme corner case that
// metadata log and data files are only generated after the previous
// `FileStreamSink.hasMetadata` check
if (FileStreamSink.hasMetadata(Seq(path), hadoopConf)) {
sourceHasMetadata = Some(true)
allFiles = allFilesUsingMetadataLogFileIndex()
} else {
sourceHasMetadata = Some(false)
// `allFiles` have already been fetched using InMemoryFileIndex in this round
}
}
}
case Some(true) => allFiles = allFilesUsingMetadataLogFileIndex()
case Some(false) => allFiles = allFilesUsingInMemoryFileIndex()
}
val files = allFiles.sortBy(_.getModificationTime)(fileSortOrder).map { status =>
(status.getPath.toUri.toString, status.getModificationTime)
}
val endTime = System.nanoTime
val listingTimeMs = (endTime.toDouble - startTime) / 1000000
if (listingTimeMs > 2000) {
// Output a warning when listing files uses more than 2 seconds.
logWarning(s"Listed ${files.size} file(s) in $listingTimeMs ms")
} else {
logTrace(s"Listed ${files.size} file(s) in $listingTimeMs ms")
}
logTrace(s"Files are:\\n\\t" + files.mkString("\\n\\t"))
files
}
override def getOffset: Option[Offset] = Some(fetchMaxOffset()).filterNot(_.logOffset == -1)
override def toString: String = s"FileStreamSource[$qualifiedBasePath]"
/**
* Informs the source that Spark has completed processing all data for offsets less than or
* equal to `end` and will only request offsets greater than `end` in the future.
*/
override def commit(end: Offset): Unit = {
// No-op for now; FileStreamSource currently garbage-collects files based on timestamp
// and the value of the maxFileAge parameter.
}
override def stop() {}
}
object FileStreamSource {
/** Timestamp for file modification time, in ms since January 1, 1970 UTC. */
type Timestamp = Long
case class FileEntry(path: String, timestamp: Timestamp, batchId: Long) extends Serializable
/**
* A custom hash map used to track the list of files seen. This map is not thread-safe.
*
* To prevent the hash map from growing indefinitely, a purge function is available to
* remove files "maxAgeMs" older than the latest file.
*/
class SeenFilesMap(maxAgeMs: Long, fileNameOnly: Boolean) {
require(maxAgeMs >= 0)
/** Mapping from file to its timestamp. */
private val map = new java.util.HashMap[String, Timestamp]
/** Timestamp of the latest file. */
private var latestTimestamp: Timestamp = 0L
/** Timestamp for the last purge operation. */
private var lastPurgeTimestamp: Timestamp = 0L
@inline private def stripPathIfNecessary(path: String) = {
if (fileNameOnly) new Path(new URI(path)).getName else path
}
/** Add a new file to the map. */
def add(path: String, timestamp: Timestamp): Unit = {
map.put(stripPathIfNecessary(path), timestamp)
if (timestamp > latestTimestamp) {
latestTimestamp = timestamp
}
}
/**
* Returns true if we should consider this file a new file. The file is only considered "new"
* if it is new enough that we are still tracking, and we have not seen it before.
*/
def isNewFile(path: String, timestamp: Timestamp): Boolean = {
// Note that we are testing against lastPurgeTimestamp here so we'd never miss a file that
// is older than (latestTimestamp - maxAgeMs) but has not been purged yet.
timestamp >= lastPurgeTimestamp && !map.containsKey(stripPathIfNecessary(path))
}
/** Removes aged entries and returns the number of files removed. */
def purge(): Int = {
lastPurgeTimestamp = latestTimestamp - maxAgeMs
val iter = map.entrySet().iterator()
var count = 0
while (iter.hasNext) {
val entry = iter.next()
if (entry.getValue < lastPurgeTimestamp) {
count += 1
iter.remove()
}
}
count
}
def size: Int = map.size()
}
}
| lvdongr/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSource.scala | Scala | apache-2.0 | 12,629 |
/*
* Copyright 2015 Data Artisans GmbH
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.dataartisans.flink.example.eventpattern
import org.apache.flink.util.Collector
/**
* Base for standalone generators that use the state machine to create event
* sequences and push them for example into Kafka.
*/
abstract class StandaloneGeneratorBase {
def runGenerator(collectors: Array[_ <: Collector[Event]]): Unit = {
val threads = new Array[GeneratorThread](collectors.length)
val range = Integer.MAX_VALUE / collectors.length
// create the generator threads
for (i <- threads.indices) {
val min = range * i
val max = min + range
val thread = new GeneratorThread(collectors(i), min, max)
threads(i) = thread
thread.setName("Generator " + i)
}
var delay: Long = 2L
var nextErroneous: Int = 0
var running: Boolean = true
threads.foreach( _.setDelay(delay) )
threads.foreach( _.start() )
// val throughputLogger = new ThroughputLogger(threads)
// throughputLogger.start()
println("Commands:")
println(" -> q : Quit")
println(" -> + : increase latency")
println(" -> - : decrease latency")
println(" -> e : inject invalid state transition")
// input loop
while (running) {
val next: Int = System.in.read()
next match {
case 'q' =>
println("Quitting...")
running = false
case 'e' =>
println("Injecting erroneous transition ...")
threads(nextErroneous).sendInvalidStateTransition()
nextErroneous = (nextErroneous + 1) % threads.length
case '+' =>
delay = Math.max(delay * 2, 1)
println("Delay is " + delay)
threads.foreach( _.setDelay(delay) )
case '-' =>
delay /= 2
println("Delay is " + delay)
threads.foreach( _.setDelay(delay) )
case _ =>
}
}
// shutdown
// throughputLogger.shutdown()
threads.foreach( _.shutdown() )
threads.foreach( _.join() )
}
}
/**
* A thread running a [[EventsGenerator]] and pushes generated events to the given collector
* (such as Kafka / Socket / ...)
*
* @param out The collector to push the generated records to.
* @param minAddress The lower bound for the range from which a new IP address may be picked.
* @param maxAddress The upper bound for the range from which a new IP address may be picked.
*/
class GeneratorThread(private[this] val out: Collector[Event],
private[this] val minAddress: Int,
private[this] val maxAddress: Int) extends Thread {
private[this] var runningThread: Thread = _
private[this] var delay: Long = 0
private[this] var count: Long = 0
private[this] var running: Boolean = true
private[this] var injectInvalid: Boolean = false
override def run(): Unit = {
runningThread = Thread.currentThread()
val generator = new EventsGenerator()
while (running) {
if (injectInvalid) {
injectInvalid = false
generator.nextInvalid() match {
case Some(evt) => out.collect(evt)
case None =>
}
}
else {
out.collect(generator.next(minAddress, maxAddress))
}
count += 1
// sleep the delay to throttle
if (delay > 0) {
try {
Thread.sleep(delay)
}
catch {
case e: InterruptedException =>
}
}
}
}
def currentCount: Long = count
def shutdown(): Unit = {
running = false
if (runningThread != null) {
runningThread.interrupt()
}
}
def setDelay(delay: Long): Unit = {
this.delay = delay
}
def sendInvalidStateTransition(): Unit = {
this.injectInvalid = true
}
}
/**
* Thread that periodically print the number of elements generated per second.
*
* @param generators The generator threads whose aggregate throughput should be logged.
*/
class ThroughputLogger(private[this] val generators: Array[GeneratorThread]) extends Thread {
private[this] var running = true
override def run(): Unit = {
var lastCount: Long = 0L
var lastTimeStamp: Long = System.currentTimeMillis()
while (running) {
Thread.sleep(1000)
val ts = System.currentTimeMillis()
val currCount: Long = generators.foldLeft(0L)( (v, gen) => v + gen.currentCount)
val factor: Double = (ts - lastTimeStamp) / 1000
val perSec: Double = (currCount - lastCount) / factor
lastTimeStamp = ts
lastCount = currCount
System.out.println(perSec + " / sec")
}
}
def shutdown(): Unit = {
running = false
}
}
| StephanEwen/flink-demos | streaming-state-machine/src/main/scala/com/dataartisans/flink/example/eventpattern/StandaloneGeneratorBase.scala | Scala | apache-2.0 | 5,302 |
import java.util.EnumSet
import javax.servlet._
import gitbucket.core.controller._
import gitbucket.core.plugin.PluginRegistry
import gitbucket.core.service.SystemSettingsService
import gitbucket.core.servlet._
import gitbucket.core.util.Directory
import org.scalatra._
class ScalatraBootstrap extends LifeCycle with SystemSettingsService {
override def init(context: ServletContext) {
val settings = loadSystemSettings()
if(settings.baseUrl.exists(_.startsWith("https://"))) {
context.getSessionCookieConfig.setSecure(true)
}
// Register TransactionFilter and BasicAuthenticationFilter at first
context.addFilter("transactionFilter", new TransactionFilter)
context.getFilterRegistration("transactionFilter").addMappingForUrlPatterns(EnumSet.allOf(classOf[DispatcherType]), true, "/*")
context.addFilter("gitAuthenticationFilter", new GitAuthenticationFilter)
context.getFilterRegistration("gitAuthenticationFilter").addMappingForUrlPatterns(EnumSet.allOf(classOf[DispatcherType]), true, "/git/*")
context.addFilter("apiAuthenticationFilter", new ApiAuthenticationFilter)
context.getFilterRegistration("apiAuthenticationFilter").addMappingForUrlPatterns(EnumSet.allOf(classOf[DispatcherType]), true, "/api/v3/*")
context.addFilter("ghCompatRepositoryAccessFilter", new GHCompatRepositoryAccessFilter)
context.getFilterRegistration("ghCompatRepositoryAccessFilter").addMappingForUrlPatterns(EnumSet.allOf(classOf[DispatcherType]), true, "/*")
// Register controllers
context.mount(new AnonymousAccessController, "/*")
PluginRegistry().getControllers.foreach { case (controller, path) =>
context.mount(controller, path)
}
context.mount(new IndexController, "/")
context.mount(new ApiController, "/api/v3")
context.mount(new FileUploadController, "/upload")
context.mount(new SystemSettingsController, "/admin")
context.mount(new DashboardController, "/*")
context.mount(new AccountController, "/*")
context.mount(new RepositoryViewerController, "/*")
context.mount(new WikiController, "/*")
context.mount(new LabelsController, "/*")
context.mount(new MilestonesController, "/*")
context.mount(new IssuesController, "/*")
context.mount(new PullRequestsController, "/*")
context.mount(new RepositorySettingsController, "/*")
// Create GITBUCKET_HOME directory if it does not exist
val dir = new java.io.File(Directory.GitBucketHome)
if(!dir.exists){
dir.mkdirs()
}
}
override def destroy(context: ServletContext): Unit = {
Database.closeDataSource()
}
}
| shiena/gitbucket | src/main/scala/ScalatraBootstrap.scala | Scala | apache-2.0 | 2,623 |
package reactive.api
import akka.actor.{ActorSystem, Props}
import akka.pattern.ask
import akka.stream.FlowMaterializer
import akka.stream.actor.ActorPublisher
import akka.stream.scaladsl.Source
import akka.testkit.{TestKit, TestProbe}
import akka.util.Timeout
import org.scalatest.{FlatSpecLike, Matchers}
import reactive.WebSocketMessage._
import reactive.socket.ReactiveServer
import reactive.socket.ReactiveServer.{ResourceSubscription, SubscribeForResource}
import reactive.{WebSocketActorClient, WebSocketClose, WebSocketSend}
import scala.concurrent.Future
import scala.concurrent.duration._
class ReactiveWebsocketTest extends TestKit(ActorSystem("Websockets"))
with FlatSpecLike with Matchers{
implicit val materializer = FlowMaterializer()
implicit val exec = system.dispatcher
implicit val timeout = Timeout(3.seconds)
"The websocket" should "exchange basic messages between client and server" in {
val probe = TestProbe()
val server = system.actorOf(Props(classOf[ReactiveServer], 8080))
(server ? SubscribeForResource("/somepath"))
.onSuccess{ case ResourceSubscription(routeSource) =>
routeSource.foreach(connection => {
Source(ActorPublisher[String](connection))
.foreach(str => {
probe.ref ! s"server received: $str"
connection ! WebSocketSend("server message")
})
})
}
val client = system.actorOf(Props(classOf[WebSocketActorClient], "ws://localhost:8080/somepath"))
Source(ActorPublisher(client))
.foreach { any : String =>
probe.ref ! s"client received: $any"
}
client ! WebSocketSend("client message")
probe.expectMsg("server received: client message")
probe.expectMsg("client received: server message")
}
"The websocket" should "exchange basic messages between client and server3" in {
val serverProbe = TestProbe()
val clientProbe = TestProbe()
val server = system.actorOf(Props(classOf[ReactiveServer], 8080))
(server ? SubscribeForResource("/somepath"))
.onSuccess{ case ResourceSubscription(routeSource) =>
routeSource.foreach(connection => {
(connection ? SubscribeOpen)
.mapTo[Future[ServerOpen]]
.map(fut=> fut.onSuccess{ case open =>
serverProbe.ref ! "open" })
(connection ? SubscribeClose)
.mapTo[Future[Close]]
.map(fut=> fut.onSuccess{ case close =>
serverProbe.ref ! "close" })
Source(ActorPublisher[String](connection))
.foreach(str => {
connection ! WebSocketSend("server message")
serverProbe.ref ! s"server received: $str"
})
})
}
val client = system.actorOf(Props(classOf[WebSocketActorClient], "ws://localhost:8080/somepath"))
(client ? SubscribeOpen)
.mapTo[Future[ClientOpen]]
.map(fut=> fut.onSuccess{ case open => clientProbe.ref ! "open" })
(client ? SubscribeClose)
.mapTo[Future[Close]]
.map(fut=> fut.onSuccess{ case close => clientProbe.ref ! "close" })
Source(ActorPublisher(client))
.foreach { any : String =>
clientProbe.ref ! s"client received: $any"
}
client ! WebSocketSend("client message")
Thread.sleep(500.millis.toMillis)
client ! WebSocketClose
clientProbe.expectMsg("open")
clientProbe.expectMsg("client received: server message")
clientProbe.expectMsg("close")
serverProbe.expectMsg("open")
serverProbe.expectMsg("server received: client message")
serverProbe.expectMsg("close")
}
} | PiotrTrzpil/vitrace | backend/src/test/scala/reactive/api/ReactiveWebsocketTest.scala | Scala | mit | 3,780 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.util
import scala.collection.JavaConverters._
import org.apache.spark.sql.hive.{CarbonMetaData, DictionaryMap}
import org.apache.carbondata.core.metadata.encoder.Encoding
import org.apache.carbondata.core.metadata.schema.table.CarbonTable
import org.apache.carbondata.core.util.CarbonUtil
case class TransformHolder(rdd: Any, mataData: CarbonMetaData)
object CarbonSparkUtil {
def createSparkMeta(carbonTable: CarbonTable): CarbonMetaData = {
val dimensionsAttr = carbonTable.getDimensionByTableName(carbonTable.getFactTableName)
.asScala.map(x => x.getColName) // wf : may be problem
val measureAttr = carbonTable.getMeasureByTableName(carbonTable.getFactTableName)
.asScala.map(x => x.getColName)
val dictionary =
carbonTable.getDimensionByTableName(carbonTable.getFactTableName).asScala.map { f =>
(f.getColName.toLowerCase,
f.hasEncoding(Encoding.DICTIONARY) && !f.hasEncoding(Encoding.DIRECT_DICTIONARY) &&
!CarbonUtil.hasComplexDataType(f.getDataType))
}
CarbonMetaData(dimensionsAttr, measureAttr, carbonTable, DictionaryMap(dictionary.toMap))
}
}
| ksimar/incubator-carbondata | integration/spark2/src/main/scala/org/apache/carbondata/spark/util/CarbonSparkUtil.scala | Scala | apache-2.0 | 1,983 |
package pt.tecnico.dsi.akkastrator
/** A function that calculates how many votes are needed to achieve a quorum, given the number of destinations. */
trait MinimumVotes extends (Int => Int) {
def apply(numberOfDestinations: Int): Int
}
/** A MinimumVotes function where a majority of votes are needed to achieve a quorum. */
object Majority extends MinimumVotes {
def apply(numberOfDestinations: Int): Int = 1 + numberOfDestinations / 2
}
/** A MinimumVotes function where at least `n` votes are needed to achieve a quorum. */
case class AtLeast(n: Int) extends MinimumVotes {
def apply(numberOfDestinations: Int): Int = Math.min(n, numberOfDestinations)
}
/** A MinimumVotes function where all the votes are needed to achieve a quorum. */
object All extends MinimumVotes {
def apply(numberOfDestinations: Int): Int = numberOfDestinations
} | ist-dsi/akkastrator | src/main/scala/pt/tecnico/dsi/akkastrator/MinimumVotes.scala | Scala | mit | 852 |
/*
* Copyright © 2015 Lukas Rosenthaler, Benjamin Geer, Ivan Subotic,
* Tobias Schweizer, André Kilchenmann, and Sepideh Alassi.
*
* This file is part of Knora.
*
* Knora is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Knora is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public
* License along with Knora. If not, see <http://www.gnu.org/licenses/>.
*/
package org.knora.webapi.messages.v1.responder.storemessages
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import org.knora.webapi.messages.v1.responder.{KnoraRequestV1, KnoraResponseV1}
import org.knora.webapi.messages.v1.store.triplestoremessages.{RdfDataObject, TriplestoreJsonProtocol}
import spray.json._
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Messages
sealed trait StoreResponderRequestV1 extends KnoraRequestV1
/**
* Requests to load the triplestore with data referenced inside [[RdfDataObject]]. Any data contained inside the
* triplestore will be deleted first.
*
* @param rdfDataObjects a sequence of [[RdfDataObject]] objects containing the path to the data and the name of
* the named graph into which the data should be loaded.
*/
case class ResetTriplestoreContentRequestV1(rdfDataObjects: Seq[RdfDataObject]) extends StoreResponderRequestV1
case class ResetTriplestoreContentResponseV1(message: String) extends KnoraResponseV1 with StoreV1JsonProtocol {
def toJsValue = resetTriplestoreContentResponseV1Format.write(this)
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// JSON formatting
/**
* A spray-json protocol for generating Knora API v1 JSON for property values.
*/
trait StoreV1JsonProtocol extends SprayJsonSupport with DefaultJsonProtocol with NullOptions with TriplestoreJsonProtocol {
/* Very strange construct at the end is needed, but I don't really understand why and what it means */
implicit val resetTriplestoreContentResponseV1Format: RootJsonFormat[ResetTriplestoreContentResponseV1] = jsonFormat[String, ResetTriplestoreContentResponseV1](ResetTriplestoreContentResponseV1, "message")
} | nie-ine/Knora | webapi/src/main/scala/org/knora/webapi/messages/v1/responder/storemessages/StoreMessagesV1.scala | Scala | agpl-3.0 | 2,672 |
package com.bwsw.cloudstack.pulse.influx
object CounterField {
def apply(name: String, aggregation: String, modifier: String = "") = new CounterField(name, aggregation, modifier)
private[pulse] def transformAggregationToSeconds(value: String) = {
val Pattern = "^([0-9]+)([mhdw])$".r
value match {
case Pattern(number, scale) => scale match {
case "w" => number.toInt * 3600 * 24 * 7
case "d" => number.toInt * 3600 * 24
case "h" => number.toInt * 3600
case "m" => number.toInt * 60
}
}
}
}
class CounterField(name: String, aggregation: String, modifier: String) extends Field {
val aggregationSeconds = CounterField.transformAggregationToSeconds(aggregation)
override def toString() = {
s"""NON_NEGATIVE_DERIVATIVE(MEAN("$name"), """ + aggregation + s")$modifier / " + aggregationSeconds
}
}
| bwsw/cs-pulse-server | src/main/scala-2.12/com/bwsw/cloudstack/pulse/influx/CounterField.scala | Scala | apache-2.0 | 870 |
package com.github.rgafiyatullin.creek_xml.utf8
import java.nio.charset.StandardCharsets
import scala.collection.immutable.Queue
trait Utf8Error extends Exception
object Utf8Error {
case object CharComplete extends Utf8Error
final case class InvalidLeadingByte(b: Byte) extends Utf8Error
final case class InvalidNonLeadingByte(b: Byte) extends Utf8Error
}
trait Utf8InputStream {
def out: (Option[Char], Utf8InputStream)
def in(b: Byte): Either[Utf8Error, Utf8InputStream]
}
object Utf8InputStream {
def empty: Utf8InputStream = Empty
case object Empty extends Utf8InputStream {
override def in(b: Byte): Either[Utf8Error, Utf8InputStream] =
bytesExpectedByFirstByte(b) match {
case 0 =>
Right(CompleteChar(b.toChar))
case some if some >= 1 && some <= 3 =>
Right(ExpectBytes(some, Queue(b)))
case _ =>
Left(Utf8Error.InvalidLeadingByte(b))
}
override def out: (Option[Char], Utf8InputStream) = (None, this)
}
final case class CompleteChar(c: Char) extends Utf8InputStream {
override def in(b: Byte): Either[Utf8Error, Utf8InputStream] =
Left(Utf8Error.CharComplete)
override def out: (Option[Char], Utf8InputStream) = (Some(c), Empty)
}
final case class ExpectBytes(bytesLeft: Int, acc: Queue[Byte]) extends Utf8InputStream {
override def in(b: Byte): Either[Utf8Error, Utf8InputStream] =
bytesExpectedByFirstByte(b) match {
case -1 if bytesLeft == 1 =>
Right(CompleteChar(toChar(acc.enqueue(b))))
case -1 if bytesLeft > 1 =>
Right(copy(bytesLeft = bytesLeft - 1, acc = acc.enqueue(b)))
case invalid if invalid != -1 =>
Left(Utf8Error.InvalidNonLeadingByte(b))
}
override def out: (Option[Char], Utf8InputStream) = (None, this)
private def toChar(acc: Queue[Byte]): Char = {
new String(acc.toArray, StandardCharsets.UTF_8).charAt(0)
}
}
val query0: Byte = 0xc0.toByte // 1100 0000
val query1: Byte = 0x80.toByte // 1000 0000
val query2: Byte = 0xe0.toByte // 1110 0000
val query3: Byte = 0xf0.toByte // 1111 0000
val query4: Byte = 0xf8.toByte // 1111 1000
val mask0: Byte = 0x80.toByte // 1000 0000
val mask1: Byte = 0x00.toByte // 0000 0000
val mask2: Byte = 0xc0.toByte // 1100 0000
val mask3: Byte = 0xe0.toByte // 1110 0000
val mask4: Byte = 0xf0.toByte // 1111 0000
private def bytesExpectedByFirstByte(b: Byte): Int = {
if ((b & query1) == mask1) 0
else if ((b & query0) == mask0 ) -1
else if ((b & query2) == mask2) 1
else if ((b & query3) == mask3) 2
else if ((b & query4) == mask4) 3
else -2
}
} | RGafiyatullin/creek-xml | src/main/scala/com/github/rgafiyatullin/creek_xml/utf8/Utf8InputStream.scala | Scala | mit | 2,677 |
/*
* Copyright 2001-2009 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package samples
/*
ScalaTest facilitates different styles of testing by providing traits you can mix
together to get the behavior and syntax you prefer. A few examples are
included here. For more information, visit:
http://www.scalatest.org/
One way to use ScalaTest is to help make JUnit or TestNG tests more
clear and concise. Here's an example:
*/
import scala.collection.mutable.Stack
import org.scalatest.{Spec, FunSpec, Assertions}
import org.junit.Test
class StackSuite extends Assertions {
@Test def stackShouldPopValuesIinLastInFirstOutOrder() {
val stack = new Stack[Int]
stack.push(1)
stack.push(2)
assert(stack.pop() === 2)
assert(stack.pop() === 1)
}
@Test def stackShouldThrowNoSuchElementExceptionIfAnEmptyStackIsPopped() {
val emptyStack = new Stack[String]
intercept[NoSuchElementException] {
emptyStack.pop()
}
}
}
/*
Here's an example of a FunSuite with ShouldMatchers mixed in:
*/
import org.scalatest.FunSuite
import org.scalatest.matchers.ShouldMatchers
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ListSuite extends FunSuite with ShouldMatchers {
test("An empty list should be empty") {
List() should be ('empty)
Nil should be ('empty)
}
test("A non-empty list should not be empty") {
List(1, 2, 3) should not be ('empty)
List("fee", "fie", "foe", "fum") should not be ('empty)
}
test("A list's length should equal the number of elements it contains") {
List() should have length (0)
List(1, 2) should have length (2)
List("fee", "fie", "foe", "fum") should have length (4)
}
}
/*
ScalaTest also supports the behavior-driven development style, in which you
combine tests with text that specifies the behavior being tested. Here's
an example whose text output when run looks like:
A Map
- should only contain keys and values that were added to it
- should report its size as the number of key/value pairs it contains
*/
import org.scalatest.matchers.MustMatchers
class MapSpec extends FunSpec with MustMatchers{
describe("A Map") {
it("should only contain keys and values that were added to it") {
Map("ho" -> 12) must (not contain key ("hi") and not contain value (13))
Map("hi" -> 13) must (contain key ("hi") and contain value (13))
}
it("should report its size as the number of key/value pairs it contains") {
Map("ho" -> 12) must have size (1)
Map("hi" -> 13, "ho" -> 12) must have size (2)
}
}
}
| iPomme/scanao | server/src/test/scala/samples/scalatest.scala | Scala | mit | 3,244 |
package code
package snippet
import java.text.Collator
import _root_.net.liftweb.util.CssSel
import code.model._
import code.service._
import net.liftweb.common.{Box, Empty, Full}
import net.liftweb.http.js.JE.JsRaw
import net.liftweb.http.js.JsCmd
import net.liftweb.http.js.JsCmds.SetHtml
import net.liftweb.http.{S, SHtml, _}
import net.liftweb.mapper.By
import net.liftweb.util.Helpers._
import scala.util.Sorting
import scala.xml.NodeSeq
/**
* Task administration snippet.
* @author David Csakvari
*/
class ProjectsSnippet {
private val collator: Collator = Collator.getInstance(S.locale)
object showInactiveTasks extends SessionVar(false)
object selectedTask extends SessionVar[Box[Task]](Empty)
def toggleInactiveView: CssSel = {
"type=submit [value]" #> (if (showInactiveTasks.get) S.?("projects.hide_inactive") else S.?("projects.show_inactive")) &
"type=submit" #> SHtml.onSubmitUnit(() => {
showInactiveTasks.set(!showInactiveTasks.get)
net.liftweb.http.js.JsCmds.Reload
})
}
def addRoot(): CssSel = {
".add-root [onclick]" #> SHtml.ajaxInvoke(() => addChild(Empty)).toJsCmd
}
def moveToRoot: CssSel = {
def submit: JsCmd = {
selectedTask.is.flatMap { sp =>
TaskService.moveToRoot(sp)
selectedTask.set(Empty)
}
rerenderTree
}
"a [onclick]" #> SHtml.ajaxInvoke(submit _).toJsCmd
}
def tasks(in: NodeSeq): NodeSeq = {
taskTemplate
}
private val taskTemplate: NodeSeq =
<div class="lift:projectsSnippet.taskList">
<div class="task-root">
<div class="parentId" style="display:none;"></div>
<span class="task-inner">
<span class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown" role="button" aria-haspopup="true" aria-expanded="false">
<span class="name"></span>
<span class="caret"></span>
</a>
<ul class="dropdown-menu">
<li><a class="edit"><lift:loc>projects.edit</lift:loc></a></li>
<li><a class="add-subtask"><lift:loc>projects.add_task</lift:loc></a></li>
<li><a class="delete"><lift:loc>projects.delete</lift:loc></a></li>
<li><a class="select"><lift:loc>projects.select</lift:loc></a></li>
<li><a class="moveto"><lift:loc>projects.moveto</lift:loc></a></li>
<li><a class="merge"><lift:loc>projects.mergeinto</lift:loc></a></li>
</ul>
</span>
</span>
<div class="subtasks"></div>
</div>
</div>
def taskList(in: NodeSeq): NodeSeq = {
val parentId: Long = {
try {
(in \\ "div" \\ "div").filter(_.attribute("class").get.text == "parentId").text.toLong
} catch {
case _: Exception => -1
}
}
val tasks = if (parentId != -1L) {
val parent = Task.find(By(Task.id, parentId))
if (!showInactiveTasks.get) {
Task.findAll(By(Task.parent, parent), By(Task.active, true))
} else {
Task.findAll(By(Task.parent, parent))
}
} else {
if (!showInactiveTasks.get) {
Task.findAll(By(Task.parent, Empty), By(Task.active, true))
} else {
Task.findAll(By(Task.parent, Empty))
}
}
val data = tasks.toArray
Sorting.quickSort(data)(new Ordering[Task] {
def compare(x: Task, y: Task): Int = {
if (x.selectable.get && !y.selectable.get) {
1
} else if (!x.selectable.get && y.selectable.get) {
-1
} else {
collator.compare(x.name.get, y.name.get)
}
}
})
data.toSeq.flatMap(task => renderTask(task)(in))
}
private def renderTask(task: Task):CssSel = {
val displayName =
if (task.active.get)
task.name.get
else
task.name.get + " (" + S.?("projects.inactive") + ")"
val rootClass =
if (selectedTask.get === task)
"task selected"
else
"task"
val innerClass =
if (task.active.get)
if (task.selectable.get)
"taskName"
else
"projectName"
else
if (task.selectable.get)
"taskName inactive"
else
"projectName inactive"
val subsCssSel:CssSel = ".parentId *" #> task.id.toString
".name" #> displayName &
".task-root [class]" #> rootClass &
".task-inner [class]" #> innerClass &
".edit [onclick]" #> SHtml.ajaxInvoke(() => editor(task)).toJsCmd &
".delete [onclick]" #> SHtml.ajaxInvoke(() => deleteTask(task)).toJsCmd &
".select [onclick]" #> SHtml.ajaxInvoke(() => selectTask(task)).toJsCmd &
".add-subtask [onclick]" #> SHtml.ajaxInvoke(() => addChild(Full(task))).toJsCmd &
".merge [onclick]" #> SHtml.ajaxInvoke(() => mergeTask(task)).toJsCmd &
".moveto [onclick]" #> SHtml.ajaxInvoke(() => moveTo(task)).toJsCmd &
".subtasks *" #> subsCssSel(taskTemplate)
}
private def editor(task: Task): JsCmd = {
object name extends TransientRequestVar(task.name.get)
object description extends TransientRequestVar(task.description.get)
object color extends TransientRequestVar(task.color.get)
object active extends TransientRequestVar(task.active.get)
object specifiable extends TransientRequestVar(task.specifiable.get)
object selectable extends TransientRequestVar(task.selectable.get)
object useGeneratedColor extends TransientRequestVar(
if (!task.color.get.isEmpty)
S.?("projects.popup.use_custom_color")
else
S.?("projects.popup.use_generated_color")
)
def submit: JsCmd = {
val selectedColor = if (useGeneratedColor.get == S.?("projects.popup.use_custom_color")) color.get else ""
Task.findByKey(task.id.get).openOrThrowException("Item must be defined!")
.name(name.get)
.description(description.get)
.color(selectedColor)
.active(active.get)
.specifiable(specifiable.get)
.selectable(selectable.get)
.save
rerenderTree &
closeDialog
}
val defaultFieldBindings =
renderProperty(
".name *" #> S.?("projects.popup.name") &
".field" #> SHtml.textElem(name, "class" -> "form-control")) ++
renderProperty(
".name *" #> S.?("projects.popup.description") &
".field" #> SHtml.textElem(description, "class" -> "form-control")) ++
renderProperty(
".name *" #> S.?("projects.popup.color") &
".field" #> (
<br/> ++
SHtml.radioElem(List(
S.?("projects.popup.use_generated_color"),
S.?("projects.popup.use_custom_color")),
Full(useGeneratedColor.get)) {
_.map(v => useGeneratedColor.set(v))
}.toForm ++
SHtml.textElem(color, "type" -> "color"))
)
val fieldbindingsWithActive =
if (task.active.get)
defaultFieldBindings
else
defaultFieldBindings ++
renderProperty(
".name *" #> S.?("projects.popup.active") &
".field" #> SHtml.checkboxElem(active))
val fieldBindings =
fieldbindingsWithActive ++
renderProperty(
".name *" #> S.?("projects.popup.specifiable") &
".field" #> SHtml.checkboxElem(specifiable)) ++
renderProperty(
".name *" #> S.?("projects.popup.selectable") &
".field" #> SHtml.checkboxElem(selectable))
SetHtml("inject",
(
".fields *" #> fieldBindings &
".title *" #> S.?("projects.edit") &
".submit-button" #> SHtml.ajaxSubmit(S.?("button.save"), submit _, "class" -> "btn btn-primary") &
".close-button" #> SHtml.ajaxSubmit(S.?("button.close"), closeDialog _, "class" -> "btn btn-default")
)(editorTemplate)
) &
openDialog
}
private def renderProperty(cssSel: CssSel): NodeSeq = {
cssSel(editorPropertyTemplate)
}
private def addChild(parent: Box[Task]): JsCmd = {
object name extends TransientRequestVar("")
object description extends TransientRequestVar("")
object specifiable extends TransientRequestVar(true)
object selectable extends TransientRequestVar(true)
def submit: JsCmd = {
Task.create
.parent(parent)
.name(name.get)
.description(description.get)
.active(true)
.specifiable(specifiable.get)
.selectable(selectable.get)
.save
rerenderTree &
closeDialog
}
SetHtml("inject",
(
".fields *" #>
(
renderProperty(
".name *" #> S.?("projects.popup.name") &
".field" #> SHtml.textElem(name, "class" -> "form-control")) ++
renderProperty(
".name" #> S.?("projects.popup.description") &
".field" #> SHtml.textElem(description, "class" -> "form-control")) ++
renderProperty(
".name *" #> S.?("projects.popup.specifiable") &
".field" #> SHtml.checkboxElem(specifiable)) ++
renderProperty(
".name *" #> S.?("projects.popup.selectable") &
".field" #> SHtml.checkboxElem(selectable))
) &
".title *" #> S.?("projects.add_task") &
".submit-button" #> SHtml.ajaxSubmit(S.?("button.save"), submit _, "class" -> "btn btn-primary") &
".close-button" #> SHtml.ajaxSubmit(S.?("button.close"), closeDialog _, "class" -> "btn btn-default")
)(editorTemplate)
) &
openDialog
}
private def moveTo(task: Task): JsCmd = {
selectedTask.is.flatMap { st =>
TaskService.move(st, task)
selectedTask.set(Empty)
}
rerenderTree
}
private def mergeTask(task: Task): JsCmd = {
selectedTask.is.flatMap { st =>
TaskService.merge(st, task)
selectedTask.set(Empty)
}
rerenderTree
}
private def selectTask(task: Task): JsCmd = {
selectedTask.is match {
case Full(st) =>
if (st == task) {
selectedTask.set(Empty)
} else {
selectedTask.set(Some(task))
}
case Empty => selectedTask.set(Some(task))
}
rerenderTree
}
private def deleteTask(task: Task): JsCmd = {
def submit: JsCmd = {
try {
TaskService.delete(task)
rerenderTree &
closeDialog
} catch {
case e: Exception => net.liftweb.http.js.JsCmds.Alert(e.getMessage)
}
}
SetHtml("inject",
(
".fields *" #>
(
renderProperty(
".name *" #> S.?("projects.popup.name") &
".field" #> task.name) ++
renderProperty(
".name" #> S.?("projects.popup.description") &
".field" #> task.description)) &
".title *" #> S.?("projects.delete") &
".submit-button" #> SHtml.ajaxSubmit(S.?("button.delete"), submit _, "class" -> "btn btn-primary") &
".close-button" #> SHtml.ajaxSubmit(S.?("button.close"), closeDialog _, "class" -> "btn btn-default")
)(editorTemplate)
) &
openDialog
}
def closeDialog: JsCmd = JsRaw("$('.modal').modal('hide')").cmd
def openDialog: JsCmd = JsRaw("$('.modal').modal()").cmd
def rerenderTree: JsCmd = SetHtml("project-tree", tasks(NodeSeq.Empty))
val editorTemplate: NodeSeq =
<div class="modal fade" data-backdrop="static" data-keyboard="false">
<div class="modal-dialog">
<form class="lift:form.ajax" role="form">
<div class="modal-content">
<div class="modal-header">
<h4 class="modal-title title"></h4>
</div>
<div class="modal-body fields"></div>
<div class="modal-footer">
<input class="submit-button"/>
<input class="close-button"/>
</div>
</div>
</form>
</div>
</div>
val editorPropertyTemplate: NodeSeq =
<div class="form-group">
<label class="name"></label>
<input class="field"/>
</div>
}
| dodie/time-admin | src/main/scala/code/snippet/admin/ProjectsSnippet.scala | Scala | apache-2.0 | 12,015 |
/*
* Copyright 2015 Paul Horn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.knutwalker.ntparser.model
sealed trait Node {
def n3: String
}
case class Resource(uri: String) extends Node {
private[this] final val dataValue = uri
lazy final val n3 = s"<$dataValue>"
override def toString: String = dataValue
}
case class Literal(lexical: String, lang: Option[String], dt: Resource) extends Node {
private[this] final val dataValue = lexical
private[this] lazy final val langValue = lang.fold("")("@" + _)
private[this] lazy final val dtValue = "^^" + dt.n3
private[this] lazy final val stringValue = s""""$dataValue"$langValue$dtValue"""
lazy final val n3 = stringValue
override def toString: String = dataValue
}
object Literal {
private final val SimpleStringType = Resource("http://www.w3.org/2001/XMLSchema#string")
private final val LangStringType = Resource("http://www.w3.org/1999/02/22-rdf-syntax-ns#langString")
def simple(lexical: String): Literal =
new Literal(lexical, None, SimpleStringType)
def tagged(lexical: String, lang: String): Literal =
new Literal(lexical, Some(lang), LangStringType)
def typed(lexical: String, dt: Resource): Literal =
new Literal(lexical, None, dt)
}
case class BNode(nodeId: String) extends Node {
private[this] final val dataValue = nodeId
lazy final val n3 = s"_:$dataValue"
override def toString: String = dataValue
}
sealed abstract class Statement(val s: Node, val p: Resource, val o: Node) extends Node {
lazy final val n3 = s"${s.n3} ${p.n3} ${o.n3} ."
override def toString: String = s"$s $p $o"
}
object Statement {
val Empty: Statement = Triple(BNode(""), Resource(""), BNode(""))
}
case class Triple(override val s: Node, override val p: Resource, override val o: Node) extends Statement(s, p, o)
| knutwalker/NtParser | models/nt/src/main/scala/de/knutwalker/ntparser/model/types.scala | Scala | apache-2.0 | 2,350 |
package io.igu.cityindex.accountInformation.model
case class MultipleUsersDetailsResponse() {
}
| deadcore/city-index-scala-api | src/main/scala/io/igu/cityindex/accountInformation/model/MultipleUsersDetailsResponse.scala | Scala | apache-2.0 | 98 |
package ecommerce.system.infrastructure.process
import akka.actor.{ ActorSystem, ActorRef }
import ddd.support.domain.event.DomainEvent
import ddd.support.domain._
import ecommerce.system.infrastructure.events.ForwardingConsumer
import ecommerce.system.infrastructure.office.{ Office, OfficeFactory }
import infrastructure.actor.CreationSupport
object SagaSupport {
type ExchangeName = String
type ExchangeSubscriptions[A <: Saga[_]] = Map[ExchangeName, Array[Class[_ <: DomainEvent]]]
implicit def defaultCaseIdResolution[A <: Saga[_]]() = new EntityIdResolution[A]
def registerSaga[A <: Saga[_] : ExchangeSubscriptions : IdResolution : OfficeFactory : BusinessEntityActorFactory](implicit system: ActorSystem, creator: CreationSupport): ActorRef = {
val sagaOffice = Office.office[A]
registerEventListeners(sagaOffice)
sagaOffice
}
private def registerEventListeners[A <: Saga[_]](sagaOffice: ActorRef)(implicit es: ExchangeSubscriptions[_], creator: CreationSupport) {
for ((exchangeName, events) <- es) {
ForwardingConsumer(exchangeName, sagaOffice)
}
}
}
| pawelkaczor/ddd-leaven-akka | src/main/scala/ecommerce/system/infrastructure/process/SagaSupport.scala | Scala | mit | 1,106 |
package com.example
import org.jooq.{Allow, SQL, Support};
object API {
@Support
def foo: SQL = ???
}
object SQLDialectWartTest {
@Allow
def bar(): Unit = API.foo
}
| kxbmap/sbt-jooq | checker/src/sbt-test/jooq-checker/test-scope/changes/AllowAllDialects.scala | Scala | apache-2.0 | 180 |
package model.battle
/**
* Created by salim on 12/09/2016.
*/
object BattleRoundResult extends Enumeration {
val Won, Lost, Fled = Value
}
| salimfadhley/scalamoo | src/main/scala/model/battle/BattleRoundResult.scala | Scala | mit | 146 |
// Copyright 2014 Foursquare Labs Inc. All Rights Reserved.
package io.fsq.common.scala
/**
* Wrapper for lazy vals used within methods. lazy vals in methods synchronize
* on the object instance, not in the scope of the method. Thus multiple threads
* can block on evaluating the same lazy val in the method.
*
* <b>ALWAYS USE THIS IF YOU WANT TO USE A lazy val IN A METHOD</b>, e.g.
*
* <tt>
* def myLazilyEvaluatedFunc(..) = {..}
* val l = LazyLocal(myLazilyEvaluatedFunc)
* ...
* l.value
* </tt>
*/
class LazyLocal[T](f: => T) {
lazy val value: T = f
}
object LazyLocal {
def apply[T](f: => T) = {
new LazyLocal(f)
}
}
| foursquare/fsqio | src/jvm/io/fsq/common/scala/LazyLocal.scala | Scala | apache-2.0 | 661 |
object Lib {
def hello = "hello" + Libj2.s
}
| markhibberd/scx | test/java/src.lib/Lib.scala | Scala | bsd-3-clause | 47 |
package reactify.bind
sealed trait BindSet
/**
* BindSet defines how a binding should be applied when first defined
*/
object BindSet {
/**
* The left value is assigned to the right
*/
case object LeftToRight extends BindSet
/**
* The right value is assigned to the left
*/
case object RightToLeft extends BindSet
/**
* Values are not modified at bind-time
*/
case object None extends BindSet
} | outr/reactify | reactify/src/main/scala/reactify/bind/BindSet.scala | Scala | mit | 438 |
package com.dominikgruber.fpinscala.chapter07
import org.scalatest._
import java.util.concurrent.Executors
class Exercise13Spec extends FlatSpec with Matchers with BeforeAndAfter {
import Par._
var pool = Executors.newFixedThreadPool(1)
after {
pool.shutdown()
}
"chooser" should "return the correct result" in {
chooser(unit(1))((a: Int) => unit(a + 1))(pool).get should be (2)
}
"choiceN" should "return the correct result" in {
choiceN_2(unit(1))(List(unit(1), unit(2), unit(3)))(pool).get should be (2)
}
"choice" should "pick true" in {
choice_2(unit(true))(unit(1), unit(2))(pool).get should be (1)
}
it should "pick false" in {
choice_2(unit(false))(unit(1), unit(2))(pool).get should be (2)
}
}
| TheDom/functional-programming-in-scala | src/test/scala/com/dominikgruber/fpinscala/chapter07/Exercise13Spec.scala | Scala | mit | 755 |
package org.http4s
package syntax
trait AllSyntax
extends AnyRef
with AsyncSyntax
with EffectResponseSyntax
with EffectRequestSyntax
with KleisliSyntax
with NonEmptyListSyntax
with StringSyntax
with LiteralsSyntax
| reactormonk/http4s | core/src/main/scala/org/http4s/syntax/AllSyntax.scala | Scala | apache-2.0 | 247 |
/**
* **************************************************************
* Licensed to the AOS Community (AOS) under one or more *
* contributor license agreements. See the NOTICE file *
* distributed with this work for additional information *
* regarding copyright ownership. The AOS licenses this file *
* to you under the Apache License, Version 2.0 (the *
* "License"); you may not use this file except in compliance *
* with the License. You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, *
* software distributed under the License is distributed on an *
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
* KIND, either express or implied. See the License for the *
* specific language governing permissions and limitations *
* under the License. *
* **************************************************************
*/
package io.datalayer.controlchart
import java.io.File
import org.sameersingh.scalaplot.MemXYSeries
import org.sameersingh.scalaplot.XYPlotStyle
import org.sameersingh.scalaplot.Implicits._
object Stat {
def computeMean(input: Array[Float]): Float = {
val mean = input.reduceLeft(_ + _) / input.length
return mean.toFloat
}
/** Compute empirical variance */
def computeVariance(input: Array[Float], mean: Float): Float = {
val variance = input.map(x => Math.pow(x - mean, 2)).reduceLeft(_ + _) / (input.length - 1)
return variance.toFloat
}
/** Compute naively the area under the curve. */
def computeIntegral(xAxis: Array[Float], yAxis: Array[Float]): Float = {
val step = Math.abs(xAxis(1) - xAxis(0))
var integ = yAxis.map(x => x * step).reduceLeft(_ + _)
return integ.toFloat
}
/** Compute the deviation from the mean. */
def computeStdDev(value: Float, mean: Float): Float = {
return Math.abs(value - mean)
}
}
/** File reader utility
* @param filePath the absolute/relative path of the csv.
* */
class ReadCSV(filePath: String) {
def arToFloat(ar: Array[String]): Array[Float] = {
val newAr = ar.map(_.toFloat)
return newAr
}
def getColumn(col: Int): Array[Float] = {
val column = event.map(x => x(col))
return column.toArray
}
def getLine(col: Int): Array[Float] = {
val line = event(col)
return line
}
val file = scala.io.Source.fromFile(filePath)
val parserIt = file.getLines().drop(0).map(_.split(","))
val colNames = parserIt.next()
val event = scala.collection.mutable.ArrayBuffer.empty[Array[Float]]
parserIt.foreach(a => event.append(arToFloat(a)))
file.close()
}
class ReadCSVFolder(folderPath: String) {
def listFiles(f: File): Array[File] = {
return f.listFiles.sorted
}
val folder = new File(folderPath)
val files = listFiles(folder).filter(_.toString.endsWith(".csv"))
val data = files.map(x => new ReadCSV(x.toString))
}
/** The Control Chart class perform a basic control chart. It simply oulines
* data which are 2 times greater than the standard deviation.
* @param data An array of float representing the data to analyze. */
class ControlChart(data: Array[Float]) {
val mean = Stat.computeMean(data)
val stdDev = Math.sqrt(Stat.computeVariance(data, mean)).toFloat
val localDev = data.map(x => Stat.computeStdDev(x, mean))
val outliers = localDev.map(x => Math.abs(x)).zipWithIndex.filter(_._1 > 2*stdDev).map(_._2)
def summary() = {
println("Mean = " + mean)
println("Standard Deviation = " + stdDev)
println("Sample size = " + data.length)
print("\\n")
println("Outliers:")
outliers.foreach(x => print("Data value : \\t" + data(x) + " \\t-- " + x + "\\n"))
}
val series = new MemXYSeries((1 to data.length).toList.map(_.toDouble).toSeq, data.map(_.toDouble).toSeq)
series.plotStyle = XYPlotStyle.Points
def plotASCII() = {
output(ASCII, plot(series))
}
}
| echalkpad/t4f-data | algorithm/src/main/scala/io/datalayer/controlchart/ControlChart.scala | Scala | apache-2.0 | 4,170 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.actormonitor.testcube
import akka.actor.Actor
class TestActor extends Actor {
def receive = {
case x => sender() ! x
}
}
class TestActorWithRoute extends Actor {
def receive = {
case x => sender() ! x
}
}
class TestActor1 extends Actor {
def receive = {
case x =>context.stop(self)
}
}
| akara/squbs | squbs-actormonitor/src/test/scala/org/squbs/actormonitor/TestCube/TestCube.scala | Scala | apache-2.0 | 930 |
package io.plasmap.pamphlet
import io.plasmap.util.Helpers.JSOptionBuilder
import scala.scalajs.js
/**
* Created by mark on 26.02.15.
*/
trait Marker extends ILayer {
def addTo(map:LeafletMap):Marker = js.native
def getLatLng():LeafletLatLng = js.native
def setLatLng(latlng:LeafletLatLng):Marker = js.native
def setIcon(icon:Icon):Marker = js.native
def setZIndexOffset(offset:Int):Marker = js.native
def setOpacity(opacity:Double):Marker = js.native
def update():Marker = js.native
def bindPopup(html:String, popupOptions:LeafletPopupOptions = ???):Marker = js.native
def unbindPopup():Marker = js.native
def openPopup():Marker = js.native
def getPopup():LeafletPopup = js.native
def closePopup():Marker = js.native
def togglePopup():Marker = js.native
def toGeoJSON():js.Object = js.native
def on(`type`:String, fn:js.Function1[LeafletEvents.LeafletEvent,Unit]):Marker = js.native
}
trait MarkerOptions extends js.Object
object MarkerOptions extends MarkerOptionBuilder(Map.empty[String, Any])
class MarkerOptionBuilder(val dict:Map[String, Any]) extends
JSOptionBuilder[MarkerOptions, MarkerOptionBuilder](new MarkerOptionBuilder(_)) {
def icon(v:Icon) = jsOpt("icon", v)
def clickable(v:Boolean) = jsOpt("clickable", v)
def draggable(v:Boolean) = jsOpt("draggable", v)
def keyboard(v:Boolean) = jsOpt("keyboard", v)
def title(v:String) = jsOpt("title", v)
def alt(v:String) = jsOpt("alt", v)
def zIndexOffset(v:Int) = jsOpt("zIndexOffset", v)
def opacity(v:Double) = jsOpt("opacity", v)
def riseOnHover(v:Boolean) = jsOpt("riseOnHover", v)
def riseOffset(v:Int) = jsOpt("riseOffset", v)
}
| CapeSepias/pamphlet | src/main/scala/io/plasmap/pamphlet/Marker.scala | Scala | apache-2.0 | 1,652 |
trait Applicative[F[_]] extends Functor[F] {
// `map2` is implemented by first currying `f` so we get a function
// of type `A => B => C`. This is a function that takes `A` and returns
// another function of type `B => C`. So if we map `f.curried` over an
// `F[A]`, we get `F[B => C]`. Passing that to `apply` along with the
// `F[B]` will give us the desired `F[C]`.
def map2[A,B](fa: F[A], fb: F[B])(f: (A, B) => C): F[C] =
apply(map(fa)(f.curried), fb)
// We simply use `map2` to lift a function into `F` so we can apply it
// to both `fab` and `fa`. The function being lifted here is `_(_)`,
// which is the same as the lambda notation `(f, x) => f(x)`. That is,
// It's a function that takes two arguments:
// 1. A function `f`
// 2. An argument `x` to that function
// and it simply applies `f` to `x`.
def apply[A,B](fab: F[A => B])(fa: F[A]): F[B] =
map2(fa, fab)(_(_))
def unit[A](a: => A): F[A]
def map[A,B](fa: F[A])(f: A => B): F[B] =
apply(unit(f))(fa)
} | galarragas/FpInScala | answerkey/applicative/1.answer.scala | Scala | mit | 1,021 |
/*
* Copyright 2012 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.json4s
package mongo
import JsonDSL._
import org.bson.types.ObjectId
import org.specs2.mutable.Specification
import scala.collection.JavaConverters._
import com.mongodb.DBObject
import scala.util.control.Exception._
object JObjectParserSpec extends Specification {
title("JObjectParser Specification")
sequential
def buildTestData: (ObjectId, DBObject) = {
val oid = ObjectId.get
val dbo = JObjectParser.parse(("x" -> oid.toString))(DefaultFormats)
(oid, dbo)
}
"JObjectParser" should {
"convert strings to ObjectId by default" in {
val (oid, dbo) = buildTestData
val xval = allCatch.opt(dbo.get("x").asInstanceOf[ObjectId])
xval.isDefined must_== true
xval.toList map { x =>
x must_== oid
} reduce (_ and _)
}
"not convert strings to ObjectId when configured not to" in {
JObjectParser.stringProcessor.set((s: String) => s)
val (oid, dbo) = buildTestData
val xval = allCatch.opt(dbo.get("x").asInstanceOf[String])
xval.isDefined must_== true
xval.toList map { x =>
x must_== oid.toString
} reduce (_ and _)
}
}
}
| geggo98/json4s | tests/src/test/scala/org/json4s/mongo/JObjectParserSpec.scala | Scala | apache-2.0 | 1,771 |
package ch.awae.mcddpui.ui
import FunctionalAction.Implicit.bodyToFunction
import ch.awae.mcddpui.api.CommandExecutor
import ch.awae.mcddpui.api.ManagerCommand
import ch.awae.mcddpui.api.MutationCommand
import ch.awae.mcddpui.api.ReadCommand
import ch.awae.mcddpui.api.functionNameTuple2mutationCommand
import ch.awae.mcddpui.api.mutationCommand2function
import ch.awae.mcddpui.exceptions.ManagerCommandExecutionException
import ch.awae.mcddpui.exceptions.MutationCommandExecutionException
import ch.awae.mcddpui.exceptions.ReadCommandExecutionException
object CommandAction {
def apply(name: String, desc: String, cmd: ManagerCommand)(implicit ex: () => CommandExecutor[_]) =
FunctionalAction(name, desc)(ex()(cmd))
def apply[T](name: String, desc: String, cmd: ReadCommand[T])(implicit ex: () => CommandExecutor[T]) =
FunctionalAction(name, desc)(ex()(cmd))
def apply[T](name: String, desc: String, cmd: MutationCommand[T, T])(implicit ex: () => CommandExecutor[T]): FunctionalAction =
if (cmd.name == null)
apply(name, desc, (cmd: T => T, desc))
else
FunctionalAction(name, desc)(ex()(cmd))
} | ksmonkey123/mcddpui | src/main/scala/ch/awae/mcddpui/ui/CommandAction.scala | Scala | mit | 1,169 |
package com.twitter.summingbird.example
import backtype.storm.spout.SpoutOutputCollector
import backtype.storm.spout.ISpout
import backtype.storm.task.TopologyContext
import backtype.storm.topology.OutputFieldsDeclarer
import backtype.storm.utils.Time
import backtype.storm.topology.base.BaseRichSpout
import backtype.storm.tuple.{Tuple, Fields, Values}
import com.twitter.tormenta.spout.Spout
import java.util
import scala.collection.{TraversableOnce, mutable}
//import scala.TraversableOnce
//import java.util._
//import util.Random
import java.util.{Map => JMap, Random}
import collection.mutable.{Map, HashMap}
import util.Random
import util.List
import java.util.concurrent.{BlockingQueue, LinkedBlockingQueue}
//import twitter4j._
//(fn:Tuple3<String> => TraversableOnce[T])
object RandomSentenceSpout
{
//val QUEUE_LIMIT = 1000 // default max queue size.
val FIELD_NAME = "tweet" // default output field name.
//def apply(fieldName: String)
def apply(fieldName: String = FIELD_NAME): RandomSentenceSpout[String] =
new RandomSentenceSpout(fieldName)(i => Some(i))
//def apply(fieldName: String):RandomSentenceSpout[String] = new RandomSentenceSpout(fieldName)(i => Some(i))
//(i => Some(i))
}
//(fn: Tuple => TraversableOnce[T])
class RandomSentenceSpout[+T](fieldName:String)(fn: String => TraversableOnce[T])extends BaseRichSpout with Spout[T] {
//val sentences = mutable.Queue("tweet"->"the cow jumped over the moon",
// "tweet"->"an apple a day keeps the doctor away",
// "tweet"->"four score and seven years ago",
// "tweet"->"snow white and the seven dwarfs",
// "tweet"->"i am at two with nature")
val limit = 1000 // default max queue size.
val FIELD_NAME = "tweet" // default output field name.
//val stream: Tuple3<String> = null
var collector: SpoutOutputCollector = null
lazy val queue = new LinkedBlockingQueue[Map[String,String]](limit)
def AddItem()
{
val sentence = Map("tweet"->"Sentence")
var a:Int = 1
while(a>0)
{
Time.sleep(10)
queue.offer(sentence)
//println(sentence)
a = a+1
}
}
def onException(ex: Exception) {}
def getSpout = this
override def open(conf: JMap[_,_], context:TopologyContext, coll: SpoutOutputCollector)
{
collector = coll
AddItem()
}
override def declareOutputFields(declarer: OutputFieldsDeclarer)
{
declarer.declare(new Fields(FIELD_NAME))
}
def onEmpty: Unit = Time.sleep(50)
override def nextTuple()
{
//val q0 = collection.immutable.Queue("1","Two","iii")
println("#####POLLING#######")
Option(queue.poll) match{
// case None => onEmpty
case Some(items) => items.foreach{item => collector.emit(new Values(item.asInstanceOf[AnyRef]))}
}
}
// val sentences = List("tweet"->"the cow jumped over the moon",
// "tweet"->"an apple a day keeps the doctor away",
// "tweet"->"four score and seven years ago",
// "tweet"->"snow white and the sfneven dwarfs",
// "tweet"->"i am at two with nature")
//println(sentences)
// Time.sleep(100)
//for( sentence <- sentences ){
//println("###"+sentence+"####")
// (1, "hello").productIterator
// .foreach {
// case s: String => println("string: " + s)
// case i: Int => println("int: " + i)
// }
// Thread sleep 10
// collector.emit(new Values(sentence.asInstanceOf[AnyRef]))
//println("###EMITTING####")
// collector.emit(sentences(Random.nextInt(sentences.length)))
//val sentence:Option[String] = null
//sentence match {
// case None => onEmpty
// case Some(sentences) => sentences.foreach(sentence => collector.emit(new Values(sentence.asInstanceOf[AnyRef])))
// println("###EMITTING####")
// }
// val sentence:Option[] = null
// sentence match{
// case None => onEmpty
// case Some(items) => items.foreach{item => collector.emit(new Values(item.asInstanceOf[AnyRef]))}
//WORKS FROM HERE:
//Time.sleep(100)
//val sentence:String = null
//sentences.foreach(sentences => collector.emit(new Values(sentence.asInstanceOf[AnyRef])))
// println("###EMITTING####")
//sentences.productIterator.map{case Some(sentences) => sentences.productIterator.foeach{sentence => collector.emit(new Values(item.asInstanceOf[AnyRef]))
// val sentences = ("tweet"->"dsjfka sdfaf","tweet"->"adfjhadfja fdss","tweet"->"fdsafdasfd fdsfd")
// Time.sleep(100)
// println("PRODUCT ITERATOR")
// sentences.productIterator.map{sentence => collector.emit(new Values(sentence.asInstanceOf[AnyRef]))}
//var sentence = sentences(Random.nextInt(sentences.length))
//collector.emit(new Values(sentence))
override def flatMap[U](newFn: T => TraversableOnce[U]) = new RandomSentenceSpout(fieldName)(fn(_).flatMap(newFn))
}
| surabhiiyer/summingbird | summingbird-example/src/main/scala/com/twitter/summingbird/example/RandomSentenceSpout.scala | Scala | apache-2.0 | 4,800 |
/**
* Copyright (C) 2013 Carnegie Mellon University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package tdb.ddg
import scala.collection.mutable.Buffer
import tdb.Constants.ModId
class Ordering {
val maxSize = Int.MaxValue / 2
val base = new Sublist(0, null)
base.next = new Sublist(1, base)
base.previous = base.next
base.next.base.end = base.base
def after(t: Timestamp, node: Node): Timestamp = {
val previousSublist =
if (t == null) {
base.next
} else {
t.sublist
}
val newTimestamp = previousSublist.after(t, node)
if (previousSublist.size > 63) {
val newSublist = sublistAfter(previousSublist)
assert(previousSublist.id != newSublist.id)
previousSublist.split(newSublist)
}
newTimestamp
}
def append(node: Node): Timestamp = {
val newTimestamp =
if (base.previous.size > 31) {
val newSublist = sublistAppend()
newSublist.append(node)
} else {
base.previous.append(node)
}
newTimestamp
}
private def sublistAppend(): Sublist = {
val previous = base.previous
val newSublist = new Sublist(previous.id + 1, base)
newSublist.previous = previous
previous.next = newSublist
base.previous = newSublist
newSublist
}
private def sublistAfter(s: Sublist): Sublist = {
var node = s.next
while (node != base) {
node.id += 1
node = node.next
}
val newSublist = new Sublist(s.id + 1, s.next)
newSublist.previous = s
s.next = newSublist
newSublist.next.previous = newSublist
newSublist
/*val previous =
if (s == null) {
base
} else {
s
}
val v0 = previous.id
var j = 1
var vj = previous.next
var wj =
if (vj == base) {
maxSize
} else {
(vj.id - v0) % maxSize
}
while (wj <= j * j) {
vj = vj.next
j += 1
wj =
if (vj == base) {
maxSize
} else {
(vj.id - v0) % maxSize
}
}
var sx = previous.next
for (i <- 1 to j - 1) {
sx.id = (wj * (i / j) + v0) % maxSize
sx = sx.next
}
val nextId =
if (previous.next == base) {
maxSize
} else {
previous.next.id
}
val newSublist = new Sublist((v0 + nextId) / 2, previous.next)
previous.next = newSublist
newSublist*/
}
def remove(t: Timestamp) {
t.sublist.remove(t)
if (t.sublist.size == 0) {
t.sublist.previous.next = t.sublist.next
t.sublist.next.previous = t.sublist.previous
}
}
def getMods(): Iterable[ModId] = {
val mods = Buffer[ModId]()
var time = base.next.base.next
while (time < base.previous.base.previous) {
val node = time.node
if (time.end != null) {
node match {
case modNode: ModNode =>
if (modNode.modId1 != -1) {
mods += modNode.modId1
}
if (modNode.modId2 != -1) {
mods += modNode.modId2
}
case _ =>
}
}
time = time.getNext()
}
mods
}
def splice(start: Timestamp, end: Timestamp, c: tdb.Context) {
var time = start
while (time < end) {
val node = time.node
if (time.end != null) {
node match {
case readNode: ReadNode =>
c.ddg.reads(readNode.modId) -= time
readNode.updated = false
case read2Node: Read2Node =>
c.ddg.reads(read2Node.modId1) -= time
c.ddg.reads(read2Node.modId2) -= time
read2Node.updated = false
case memoNode: MemoNode =>
memoNode.memoizer.removeEntry(time, memoNode.signature)
case modNode: ModNode =>
if (modNode.modId1 != -1) {
c.remove(modNode.modId1)
}
if (modNode.modId2 != -1) {
c.remove(modNode.modId2)
}
case parNode: ParNode =>
parNode.updated = false
case putNode: PutNode =>
c.buffers(putNode.input)
.removeAll(Iterable((putNode.key, putNode.value)))
case putNode: PutAllNode =>
c.buffers(putNode.input).removeAll(putNode.values)
case putNode: PutInNode =>
c.bufs(putNode.traceable.inputId).remove(putNode.parameters)
case getNode: GetNode =>
getNode.updated = false
c.ddg.keys(getNode.input.inputId)(getNode.key) -= time
case x => println("Tried to splice unknown node type " + x)
}
if (time.end > end) {
remove(time.end)
}
}
time = time.getNext()
}
if (start.sublist == end.sublist) {
start.previous.next = end
end.previous = start.previous
var size = 0
var stamp = start.sublist.base.next
while (stamp != start.sublist.base) {
size += 1
stamp = stamp.next
}
start.sublist.size = size
} else {
val startSublist =
if (start.previous == start.sublist.base) {
start.sublist.previous
} else {
start.previous.next = start.sublist.base
start.sublist.base.previous = start.previous
var size = 0
var stamp = start.sublist.base.next
while (stamp != start.sublist.base) {
size += 1
stamp = stamp.next
}
start.sublist.size = size
start.sublist
}
end.previous = end.sublist.base
end.sublist.base.next = end
var size = 0
var stamp = end.sublist.base.next
while (stamp != end.sublist.base) {
size += 1
stamp = stamp.next
}
end.sublist.size = size
startSublist.next = end.sublist
end.sublist.previous = startSublist
}
}
def getChildren(start: Timestamp, end: Timestamp): Buffer[Timestamp] = {
val children = Buffer[Timestamp]()
var time = start.getNext()
while (time != end) {
children += time
time = time.end.getNext()
}
children
}
override def toString = {
var node = base.next
var ret = base.toString
while (node != base) {
print(node + " ")
ret += ", " + node
node = node.next
}
ret
}
}
| twmarshall/tdb | core/src/main/scala/tdb/ddg/Ordering.scala | Scala | apache-2.0 | 6,834 |
package monocle.function
import monocle.Iso
import scala.annotation.implicitNotFound
@implicitNotFound("Could not find an instance of Reverse[${S},${A}], please check Monocle instance location policy to " +
"find out which import is necessary")
abstract class Reverse[S, A] extends Serializable {
/** Creates an Iso from S to a reversed S */
def reverse: Iso[S, A]
}
trait ReverseFunctions {
def reverseFromReverseFunction[S](_reverse: S => S): Reverse[S, S] = new Reverse[S, S] {
val reverse = Iso(_reverse)(_reverse)
}
def reverse[S, A](implicit ev: Reverse[S, A]): Iso[S, A] = ev.reverse
def _reverse[S](s: S)(implicit ev: Reverse[S, S]): S = ev.reverse.get(s)
}
object Reverse extends ReverseFunctions {
/************************************************************************************************/
/** Std instances */
/************************************************************************************************/
implicit def listReverse[A]: Reverse[List[A], List[A]] =
reverseFromReverseFunction(_.reverse)
implicit def streamReverse[A]: Reverse[Stream[A], Stream[A]] =
reverseFromReverseFunction(_.reverse)
implicit val stringReverse: Reverse[String, String] =
reverseFromReverseFunction(_.reverse)
implicit def tuple1Reverse[A]: Reverse[Tuple1[A], Tuple1[A]] = new Reverse[Tuple1[A], Tuple1[A]] {
val reverse = Iso.id[Tuple1[A]]
}
implicit def tuple2Reverse[A, B]: Reverse[(A, B), (B, A)] = new Reverse[(A, B), (B, A)] {
val reverse = Iso[(A, B), (B, A)](_.swap)(_.swap)
}
implicit def tuple3Reverse[A, B, C]: Reverse[(A, B, C), (C, B, A)] = new Reverse[(A, B, C), (C, B, A)] {
val reverse = Iso{t: (A, B, C) => (t._3, t._2, t._1)}(t => (t._3, t._2, t._1))
}
implicit def tuple4Reverse[A, B, C, D]: Reverse[(A, B, C, D), (D, C, B, A)] = new Reverse[(A, B, C, D), (D, C, B, A)] {
val reverse = Iso{t: (A, B, C, D) => (t._4, t._3, t._2, t._1)}(t => (t._4, t._3, t._2, t._1))
}
implicit def tuple5Reverse[A, B, C, D, E]: Reverse[(A, B, C, D, E), (E, D, C, B, A)] = new Reverse[(A, B, C, D, E), (E, D, C, B, A)] {
val reverse = Iso{t: (A, B, C, D, E) => (t._5, t._4, t._3, t._2, t._1)}(t => (t._5, t._4, t._3, t._2, t._1))
}
implicit def tuple6Reverse[A, B, C, D, E, F]: Reverse[(A, B, C, D, E, F), (F, E, D, C, B, A)] = new Reverse[(A, B, C, D, E, F), (F, E, D, C, B, A)] {
val reverse = Iso{t: (A, B, C, D, E, F) => (t._6, t._5, t._4, t._3, t._2, t._1)}(t => (t._6, t._5, t._4, t._3, t._2, t._1))
}
implicit def vectorReverse[A]: Reverse[Vector[A], Vector[A]] =
reverseFromReverseFunction(_.reverse)
/************************************************************************************************/
/** Scalaz instances */
/************************************************************************************************/
import scalaz.{IList, NonEmptyList, Tree}
implicit def iListReverse[A]: Reverse[IList[A], IList[A]] =
reverseFromReverseFunction(_.reverse)
implicit def nelReverse[A]: Reverse[NonEmptyList[A], NonEmptyList[A]] =
reverseFromReverseFunction(_.reverse)
implicit def treeReverse[A]: Reverse[Tree[A], Tree[A]] = new Reverse[Tree[A], Tree[A]] {
val reverse = Iso[Tree[A], Tree[A]](reverseTree)(reverseTree)
private def reverseTree(tree: Tree[A]): Tree[A] = Tree.Node(tree.rootLabel, tree.subForest.reverse.map(reverseTree))
}
} | rperry/Monocle | core/shared/src/main/scala/monocle/function/Reverse.scala | Scala | mit | 3,560 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.util.concurrent.ConcurrentHashMap
import scala.collection.mutable
import org.eclipse.jetty.util.ConcurrentHashSet
import org.scalatest.concurrent.Eventually
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.concurrent.Timeouts._
import org.scalatest.time.SpanSugar._
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.streaming.ProcessingTime
import org.apache.spark.sql.streaming.util.StreamManualClock
class ProcessingTimeExecutorSuite extends SparkFunSuite {
val timeout = 10.seconds
test("nextBatchTime") {
val processingTimeExecutor = ProcessingTimeExecutor(ProcessingTime(100))
assert(processingTimeExecutor.nextBatchTime(0) === 100)
assert(processingTimeExecutor.nextBatchTime(1) === 100)
assert(processingTimeExecutor.nextBatchTime(99) === 100)
assert(processingTimeExecutor.nextBatchTime(100) === 200)
assert(processingTimeExecutor.nextBatchTime(101) === 200)
assert(processingTimeExecutor.nextBatchTime(150) === 200)
}
test("trigger timing") {
val triggerTimes = new ConcurrentHashSet[Int]
val clock = new StreamManualClock()
@volatile var continueExecuting = true
@volatile var clockIncrementInTrigger = 0L
val executor = ProcessingTimeExecutor(ProcessingTime("1000 milliseconds"), clock)
val executorThread = new Thread() {
override def run(): Unit = {
executor.execute(() => {
// Record the trigger time, increment clock if needed and
triggerTimes.add(clock.getTimeMillis.toInt)
clock.advance(clockIncrementInTrigger)
clockIncrementInTrigger = 0 // reset this so that there are no runaway triggers
continueExecuting
})
}
}
executorThread.start()
// First batch should execute immediately, then executor should wait for next one
eventually {
assert(triggerTimes.contains(0))
assert(clock.isStreamWaitingAt(0))
assert(clock.isStreamWaitingFor(1000))
}
// Second batch should execute when clock reaches the next trigger time.
// If next trigger takes less than the trigger interval, executor should wait for next one
clockIncrementInTrigger = 500
clock.setTime(1000)
eventually {
assert(triggerTimes.contains(1000))
assert(clock.isStreamWaitingAt(1500))
assert(clock.isStreamWaitingFor(2000))
}
// If next trigger takes less than the trigger interval, executor should immediately execute
// another one
clockIncrementInTrigger = 1500
clock.setTime(2000) // allow another trigger by setting clock to 2000
eventually {
// Since the next trigger will take 1500 (which is more than trigger interval of 1000)
// executor will immediately execute another trigger
assert(triggerTimes.contains(2000) && triggerTimes.contains(3500))
assert(clock.isStreamWaitingAt(3500))
assert(clock.isStreamWaitingFor(4000))
}
continueExecuting = false
clock.advance(1000)
waitForThreadJoin(executorThread)
}
test("calling nextBatchTime with the result of a previous call should return the next interval") {
val intervalMS = 100
val processingTimeExecutor = ProcessingTimeExecutor(ProcessingTime(intervalMS))
val ITERATION = 10
var nextBatchTime: Long = 0
for (it <- 1 to ITERATION) {
nextBatchTime = processingTimeExecutor.nextBatchTime(nextBatchTime)
}
// nextBatchTime should be 1000
assert(nextBatchTime === intervalMS * ITERATION)
}
private def testBatchTermination(intervalMs: Long): Unit = {
var batchCounts = 0
val processingTimeExecutor = ProcessingTimeExecutor(ProcessingTime(intervalMs))
processingTimeExecutor.execute(() => {
batchCounts += 1
// If the batch termination works correctly, batchCounts should be 3 after `execute`
batchCounts < 3
})
assert(batchCounts === 3)
}
test("batch termination") {
testBatchTermination(0)
testBatchTermination(10)
}
test("notifyBatchFallingBehind") {
val clock = new StreamManualClock()
@volatile var batchFallingBehindCalled = false
val t = new Thread() {
override def run(): Unit = {
val processingTimeExecutor = new ProcessingTimeExecutor(ProcessingTime(100), clock) {
override def notifyBatchFallingBehind(realElapsedTimeMs: Long): Unit = {
batchFallingBehindCalled = true
}
}
processingTimeExecutor.execute(() => {
clock.waitTillTime(200)
false
})
}
}
t.start()
// Wait until the batch is running so that we don't call `advance` too early
eventually { assert(clock.isStreamWaitingFor(200)) }
clock.advance(200)
waitForThreadJoin(t)
assert(batchFallingBehindCalled === true)
}
private def eventually(body: => Unit): Unit = {
Eventually.eventually(Timeout(timeout)) { body }
}
private def waitForThreadJoin(thread: Thread): Unit = {
failAfter(timeout) { thread.join() }
}
}
| aokolnychyi/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/ProcessingTimeExecutorSuite.scala | Scala | apache-2.0 | 5,882 |
/*
* Copyright (C) 2016 Language Technology Group and Interactive Graphics Systems Group, Technische Universität Darmstadt, Germany
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/** Defines the controller instances of the application. */
package object controllers
| thorbenwiese/newsleak-frontend | app/controllers/package.scala | Scala | agpl-3.0 | 894 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.database
import akka.actor.ActorSystem
import org.apache.openwhisk.common.{Logging, TransactionId}
import org.apache.openwhisk.core.cli.{CommandError, CommandMessages, IllegalState, WhiskCommand}
import org.apache.openwhisk.core.database.LimitsCommand.LimitEntity
import org.apache.openwhisk.core.entity.types.AuthStore
import org.apache.openwhisk.core.entity._
import org.apache.openwhisk.http.Messages
import org.apache.openwhisk.spi.SpiLoader
import org.rogach.scallop.{ScallopConfBase, Subcommand}
import spray.json.{JsObject, JsString, JsValue, RootJsonFormat}
import scala.concurrent.{ExecutionContext, Future}
import scala.language.reflectiveCalls
import scala.reflect.classTag
import scala.util.{Properties, Try}
class LimitsCommand extends Subcommand("limits") with WhiskCommand {
descr("manage namespace-specific limits")
val set = new Subcommand("set") {
descr("set limits for a given namespace")
val namespace = trailArg[String](descr = "the namespace to set limits for")
//name is explicitly mentioned for backward compatibility
//otherwise scallop would convert it to - separated names
val invocationsPerMinute =
opt[Int](
descr = "invocations per minute allowed",
argName = "INVOCATIONSPERMINUTE",
validate = _ >= 0,
name = "invocationsPerMinute",
noshort = true)
val firesPerMinute =
opt[Int](
descr = "trigger fires per minute allowed",
argName = "FIRESPERMINUTE",
validate = _ >= 0,
name = "firesPerMinute",
noshort = true)
val concurrentInvocations =
opt[Int](
descr = "concurrent invocations allowed for this namespace",
argName = "CONCURRENTINVOCATIONS",
validate = _ >= 0,
name = "concurrentInvocations",
noshort = true)
val allowedKinds =
opt[List[String]](
descr = "list of runtime kinds allowed in this namespace",
argName = "ALLOWEDKINDS",
name = "allowedKinds",
noshort = true,
default = None)
val storeActivations =
opt[String](
descr = "enable or disable storing of activations to datastore for this namespace",
argName = "STOREACTIVATIONS",
name = "storeActivations",
noshort = true,
default = None)
lazy val limits: LimitEntity =
new LimitEntity(
EntityName(namespace()),
UserLimits(
invocationsPerMinute.toOption,
concurrentInvocations.toOption,
firesPerMinute.toOption,
allowedKinds.toOption.map(_.toSet),
storeActivations.toOption.map(_.toBoolean)))
}
addSubcommand(set)
val get = new Subcommand("get") {
descr("get limits for a given namespace (if none exist, system defaults apply)")
val namespace = trailArg[String](descr = "the namespace to get limits for`")
}
addSubcommand(get)
val delete = new Subcommand("delete") {
descr("delete limits for a given namespace (system defaults apply)")
val namespace = trailArg[String](descr = "the namespace to delete limits for")
}
addSubcommand(delete)
def exec(cmd: ScallopConfBase)(implicit system: ActorSystem,
logging: Logging,
transid: TransactionId): Future[Either[CommandError, String]] = {
implicit val executionContext = system.dispatcher
val authStore = LimitsCommand.createDataStore()
val result = cmd match {
case `set` => setLimits(authStore)
case `get` => getLimits(authStore)
case `delete` => delLimits(authStore)
}
result.onComplete { _ =>
authStore.shutdown()
}
result
}
def setLimits(authStore: AuthStore)(implicit transid: TransactionId,
ec: ExecutionContext): Future[Either[CommandError, String]] = {
authStore
.get[LimitEntity](set.limits.docinfo)
.flatMap { limits =>
val newLimits = set.limits.revision[LimitEntity](limits.rev)
authStore.put(newLimits).map(_ => Right(CommandMessages.limitsSuccessfullyUpdated(limits.name.asString)))
}
.recoverWith {
case _: NoDocumentException =>
authStore.put(set.limits).map(_ => Right(CommandMessages.limitsSuccessfullySet(set.limits.name.asString)))
}
}
def getLimits(authStore: AuthStore)(implicit transid: TransactionId,
ec: ExecutionContext): Future[Either[CommandError, String]] = {
val info = DocInfo(LimitsCommand.limitIdOf(EntityName(get.namespace())))
authStore
.get[LimitEntity](info)
.map { le =>
val l = le.limits
val msg = Seq(
l.concurrentInvocations.map(ci => s"concurrentInvocations = $ci"),
l.invocationsPerMinute.map(i => s"invocationsPerMinute = $i"),
l.firesPerMinute.map(i => s"firesPerMinute = $i"),
l.allowedKinds.map(k => s"allowedKinds = ${k.mkString(", ")}"),
l.storeActivations.map(sa => s"storeActivations = $sa")).flatten.mkString(Properties.lineSeparator)
Right(msg)
}
.recover {
case _: NoDocumentException =>
Right(CommandMessages.defaultLimits)
}
}
def delLimits(authStore: AuthStore)(implicit transid: TransactionId,
ec: ExecutionContext): Future[Either[CommandError, String]] = {
val info = DocInfo(LimitsCommand.limitIdOf(EntityName(delete.namespace())))
authStore
.get[LimitEntity](info)
.flatMap { l =>
authStore.del(l.docinfo).map(_ => Right(CommandMessages.limitsDeleted))
}
.recover {
case _: NoDocumentException =>
Left(IllegalState(CommandMessages.limitsNotFound(delete.namespace())))
}
}
}
object LimitsCommand {
def limitIdOf(name: EntityName) = DocId(s"${name.name}/limits")
def createDataStore()(implicit system: ActorSystem, logging: Logging): ArtifactStore[WhiskAuth] =
SpiLoader
.get[ArtifactStoreProvider]
.makeStore[WhiskAuth]()(classTag[WhiskAuth], LimitsFormat, WhiskDocumentReader, system, logging)
class LimitEntity(val name: EntityName, val limits: UserLimits) extends WhiskAuth(Subject(), Set.empty) {
override def docid: DocId = limitIdOf(name)
//There is no api to write limits. So piggy back on WhiskAuth but replace auth json
//with limits!
override def toJson: JsObject = UserLimits.serdes.write(limits).asJsObject
}
private object LimitsFormat extends RootJsonFormat[WhiskAuth] {
override def read(json: JsValue): WhiskAuth = {
val r = Try[LimitEntity] {
val limits = UserLimits.serdes.read(json)
val JsString(id) = json.asJsObject.fields("_id")
val JsString(rev) = json.asJsObject.fields("_rev")
val Array(name, _) = id.split('/')
new LimitEntity(EntityName(name), limits).revision[LimitEntity](DocRevision(rev))
}
if (r.isSuccess) r.get else throw DocumentUnreadable(Messages.corruptedEntity)
}
override def write(obj: WhiskAuth): JsValue = obj.toDocumentRecord
}
}
| style95/openwhisk | tools/admin/src/main/scala/org/apache/openwhisk/core/database/LimitsCommand.scala | Scala | apache-2.0 | 7,936 |
package io.digitallibrary.bookapi.service
import io.digitallibrary.bookapi.{TestData, TestEnvironment, UnitSuite}
import io.digitallibrary.bookapi.model._
import io.digitallibrary.bookapi.model.api.{Contributor, ValidationException}
import io.digitallibrary.bookapi.model.domain.Translation
import org.mockito.ArgumentMatchers.{any, anyString, eq => eqTo}
import org.mockito.Mockito._
import scalikejdbc.{AutoSession, DBSession}
class ImportServiceTest extends UnitSuite with TestEnvironment {
val service = new ImportService
override def beforeEach = {
resetMocks()
}
test("that validCategories returns Failure with validation messages for each invalid category") {
val invalidCategory1 = api.Category(2, 1, "This is invalid")
val invalidCategory2 = api.Category(3, 1, "This is also invalid")
val book = TestData.Internal.DefaultInternalBook.copy(categories = Seq(invalidCategory1, invalidCategory2))
when(categoryRepository.withName(anyString())(any[DBSession])).thenReturn(None)
val result = service.validCategories(book)
result should be a 'Failure
val validationException = result.failed.get.asInstanceOf[ValidationException]
validationException.errors.head.message should equal("This is invalid is not a valid category.")
validationException.errors.last.message should equal("This is also invalid is not a valid category.")
}
test("that validCategories returns Success when all categories are valid") {
val category1 = api.Category(2, 1, "category1")
val category2 = api.Category(3, 1, "category2")
val book = TestData.Internal.DefaultInternalBook.copy(categories = Seq(category1, category2))
when(categoryRepository.withName(eqTo("category1"))(any[DBSession])).thenReturn(Some(TestData.Domain.DefaultCategory.copy(name = "Category 1")))
when(categoryRepository.withName(eqTo("category2"))(any[DBSession])).thenReturn(Some(TestData.Domain.DefaultCategory.copy(name = "Category 1")))
val result = service.validCategories(book)
result should be a 'Success
}
test("that persistContributors adds persons with new names") {
val existingPersonContributor = TestData.Api.author1
val nonExistingPersonContributor = TestData.Api.author1.copy(name = "Does not exist")
val contributors: Seq[Contributor] = Seq(existingPersonContributor, nonExistingPersonContributor)
val translation: Translation = TestData.Domain.DefaultTranslation
when(personRepository.withName(eqTo(existingPersonContributor.name))(any[DBSession])).thenReturn(Some(TestData.Domain.DefaultPerson))
when(personRepository.withName(eqTo(nonExistingPersonContributor.name))(any[DBSession])).thenReturn(None)
when(personRepository.add(any[domain.Person])(any[DBSession])).thenReturn(TestData.Domain.DefaultPerson)
when(contributorRepository.add(any[domain.Contributor])(any[DBSession])).thenReturn(TestData.Domain.DefaultContributor)
val result = service.persistContributors(contributors, translation)
result should be a 'Success
result.get.size should be(2)
verify(personRepository, times(1)).add(any[domain.Person])(any[DBSession])
verify(contributorRepository, times(2)).add(any[domain.Contributor])(any[DBSession])
}
test("that persistContributorsUpdate removes contributors that no longer are part of book") {
val translation = TestData.Domain.DefaultTranslation.copy(contributors = Seq(TestData.Domain.DefaultContributor))
val book = TestData.Internal.DefaultInternalBook.copy(contributors = Seq())
service.persistContributorsUpdate(translation, book)
verify(contributorRepository).remove(any[domain.Contributor])(any[DBSession])
}
test("that persistContributorsUpdate adds contributors that are part of book") {
val translation = TestData.Domain.DefaultTranslation.copy(contributors = Seq())
val book = TestData.Internal.DefaultInternalBook.copy(contributors = Seq(TestData.Api.author1))
when(personRepository.withName(anyString())(any[DBSession])).thenReturn(Some(TestData.Domain.DefaultPerson))
when(contributorRepository.add(any[domain.Contributor])(any[DBSession])).thenReturn(TestData.Domain.DefaultContributor)
service.persistContributorsUpdate(translation, book)
verify(contributorRepository, never()).remove(any[domain.Contributor])(any[DBSession])
verify(contributorRepository).add(any[domain.Contributor])(any[DBSession])
}
test("that persistChapterUpdates updates existing chapters") {
val translation = TestData.Domain.DefaultTranslation
val book = TestData.Internal.DefaultInternalBook.copy(chapters = Seq(TestData.Api.Chapter1))
when(chapterRepository.forTranslationWithSeqNo(eqTo(translation.id.get), eqTo(TestData.Api.Chapter1.seqNo.toLong))(any[DBSession])).thenReturn(Some(TestData.Domain.DefaultChapter))
when(chapterRepository.updateChapter(any[domain.Chapter])(any[DBSession])).thenReturn(TestData.Domain.DefaultChapter)
val result = service.persistChapterUpdates(book, translation)
result should be a 'Success
verify(chapterRepository).updateChapter(any[domain.Chapter])(any[DBSession])
}
test("that persistChapterUpdates adds new chapters and deletes redundant chapters") {
val translation = TestData.Domain.DefaultTranslation
val book = TestData.Internal.DefaultInternalBook.copy(chapters = Seq(TestData.Api.Chapter1))
when(chapterRepository.forTranslationWithSeqNo(eqTo(translation.id.get), eqTo(TestData.Api.Chapter1.seqNo.toLong))(any[DBSession])).thenReturn(None)
when(chapterRepository.updateChapter(any[domain.Chapter])(any[DBSession])).thenReturn(TestData.Domain.DefaultChapter)
when(converterService.toDomainChapter(any[api.Chapter], any[Long])).thenReturn(TestData.Domain.DefaultChapter)
when(chapterRepository.add(any[domain.Chapter])(any[DBSession])).thenReturn(TestData.Domain.DefaultChapter)
val result = service.persistChapterUpdates(book, translation)
result should be a 'Success
verify(chapterRepository).add(any[domain.Chapter])(any[DBSession])
verify(chapterRepository).deleteChaptersExceptGivenSeqNumbers(translation.id.get, Seq(TestData.Api.Chapter1.seqNo))(AutoSession)
}
test("that persistPublisher creates a new publisher when no id") {
val publisher = TestData.Domain.DefaultPublisher.copy(id = None, revision = None)
when(publisherRepository.add(any[domain.Publisher])(any[DBSession])).thenReturn(TestData.Domain.DefaultPublisher)
val result = service.persistPublisher(publisher)
result should be a 'Success
verify(publisherRepository).add(any[domain.Publisher])(any[DBSession])
}
}
| GlobalDigitalLibraryio/book-api | src/test/scala/io/digitallibrary/bookapi/service/ImportServiceTest.scala | Scala | apache-2.0 | 6,622 |
package examples
import anyfin.data._
sealed trait SystemCall[A]
object SystemCall {
@constr def ReadLine: SystemCall[String]
@constr def PrintLine (value: String): SystemCall[Unit]
@constr def Signal (process: Int, signal: Int): SystemCall[Unit]
}
| 4lex1v/anyfin | Examples/src/main/scala/examples/SystemCall.scala | Scala | apache-2.0 | 258 |
package org.joda.time.chrono
import org.joda.time.Chronology
import org.joda.time.DateTimeConstants
import org.joda.time.DateTimeZone
import GregorianChronology._
import org.joda.time.chrono.AssembledChronology.Fields
import scala.scalajs.js
object GregorianChronology {
private final val cCache =
new collection.mutable.HashMap[DateTimeZone,
js.Array[GregorianChronology]]()
private final val MILLIS_PER_YEAR =
(365.2425 * DateTimeConstants.MILLIS_PER_DAY).toLong
private final val MILLIS_PER_MONTH =
(365.2425 * DateTimeConstants.MILLIS_PER_DAY / 12).toLong
private final val DAYS_0000_TO_1970 = 719527
private final val MIN_YEAR = -292275054
private final val MAX_YEAR = 292278993
private final val INSTANCE_UTC = getInstance(DateTimeZone.UTC)
def getInstanceUTC(): GregorianChronology = INSTANCE_UTC
def getInstance(): GregorianChronology =
getInstance(DateTimeZone.getDefault, 4)
def getInstance(zone: DateTimeZone): GregorianChronology =
getInstance(zone, 4)
def getInstance(zone: DateTimeZone,
minDaysInFirstWeek: Int): GregorianChronology = {
var _zone: DateTimeZone = zone
if (_zone == null) {
_zone = DateTimeZone.getDefault
}
var chrono: GregorianChronology = null
var chronos = cCache.get(_zone).orNull
if (chronos == null) {
chronos = js.Array[GregorianChronology]()
val oldChronos = if (cCache.get(_zone).isEmpty) {
cCache(_zone) = chronos
chronos
} else {
chronos
}
if (oldChronos != null) {
chronos = oldChronos
}
}
chrono = chronos(minDaysInFirstWeek - 1)
if (chrono == null) {
chronos.synchronized {
chrono = chronos(minDaysInFirstWeek - 1)
if (chrono == null) {
if (_zone == DateTimeZone.UTC) {
chrono = new GregorianChronology(null, null, minDaysInFirstWeek)
} else {
chrono = getInstance(DateTimeZone.UTC, minDaysInFirstWeek)
chrono = new GregorianChronology(
ZonedChronology.getInstance(chrono, _zone),
null,
minDaysInFirstWeek)
}
chronos(minDaysInFirstWeek - 1) = chrono
}
}
}
chrono
}
}
@SerialVersionUID(-861407383323710522L)
class GregorianChronology private (base: Chronology,
param: AnyRef,
minDaysInFirstWeek: Int)
extends BasicGJChronology(base, param, minDaysInFirstWeek) {
private def readResolve(): AnyRef = {
val base = getBase
var minDays = getMinimumDaysInFirstWeek
minDays = if (minDays == 0) 4 else minDays
if (base == null) getInstance(DateTimeZone.UTC, minDays)
else getInstance(base.getZone, minDays)
}
def withUTC(): Chronology = INSTANCE_UTC
def withZone(zone: DateTimeZone): Chronology = {
var _zone = zone
if (_zone == null) {
_zone = DateTimeZone.getDefault
}
if (_zone == getZone) {
return this
}
getInstance(_zone)
}
override protected def assemble(fields: Fields) {
if (getBase == null) {
super.assemble(fields)
}
}
def isLeapYear(year: Int): Boolean = {
((year & 3) == 0) && ((year % 100) != 0 || (year % 400) == 0)
}
def calculateFirstDayOfYearMillis(year: Int): Long = {
var leapYears = year / 100
if (year < 0) {
leapYears = ((year + 3) >> 2) - leapYears + ((leapYears + 3) >> 2) -
1
} else {
leapYears = (year >> 2) - leapYears + (leapYears >> 2)
if (isLeapYear(year)) {
leapYears -= 1
}
}
(year * 365L + (leapYears - DAYS_0000_TO_1970)) * DateTimeConstants.MILLIS_PER_DAY
}
def getMinYear(): Int = MIN_YEAR
def getMaxYear(): Int = MAX_YEAR
def getAverageMillisPerYear(): Long = MILLIS_PER_YEAR
def getAverageMillisPerYearDividedByTwo(): Long = MILLIS_PER_YEAR / 2
def getAverageMillisPerMonth(): Long = MILLIS_PER_MONTH
def getApproxMillisAtEpochDividedByTwo(): Long =
(1970L * MILLIS_PER_YEAR) / 2
}
| mdedetrich/soda-time | js/src/main/scala/org/joda/time/chrono/GregorianChronology.scala | Scala | bsd-2-clause | 4,094 |
package koncept.http.web.auth
import com.sun.net.httpserver.Authenticator
import com.sun.net.httpserver.HttpExchange
import koncept.http.web.sessions.Sessions
import koncept.http.web.sessions.Session
import koncept.http.web.cookie.CookieHelper
import com.sun.net.httpserver.HttpContext
class SessionCookieAuthenticator(val sessionCookieName: String = "sessionId", var minutesTimeout: Int = 20) extends Authenticator {
def login(username: String, realm: String)(implicit exchange: HttpExchange): Session = {
ensureNoExistingSession
val principal = new RichPrincipal(username, realm)
val session = Sessions(exchange.getHttpContext().getAttributes()).create(principal)
CookieHelper(exchange).create(sessionCookieName, session.id)
exchange.setAttribute("session", session)
session
}
def logout(implicit exchange: HttpExchange) {
ensureNoExistingSession
CookieHelper(exchange).delete(sessionCookieName)
exchange.setAttribute("session", null)
}
def cleanup(httpContext: HttpContext) {
Sessions(httpContext.getAttributes()).cleanup(minutesTimeout)
}
private def ensureNoExistingSession(implicit exchange: HttpExchange) {
exchange.getAttribute("session") match {
case null =>{}
case session: Session => {
Sessions(exchange.getHttpContext().getAttributes()).destroy(session.id)
}
}
}
override def authenticate (exchange: HttpExchange): Authenticator.Result = {
val sessionCookie = CookieHelper(exchange).getCookie(sessionCookieName)
if (sessionCookie.isEmpty)
return new Authenticator.Success(null) //work around - no principal = not authenticated
val session = Sessions(exchange.getHttpContext().getAttributes()).get(sessionCookie.get)
if (session == null)
return new Authenticator.Success(null)
//consider adding in extra 'IP' or 'UserAgent' code
// exchange.getRemoteAddress().getAddress().getAddress() //byte array
session.principal.asInstanceOf[RichPrincipal].touch
exchange.setAttribute("session", session)
return new Authenticator.Success(session.principal)
}
}
class SessionCookieAuthenticatorCleaner(authenticator: SessionCookieAuthenticator, httpContext: HttpContext) extends Runnable {
override def run() {
authenticator.cleanup(httpContext)
}
} | nkrul/http-router | src/main/scala/koncept/http/web/auth/SessionCookieAuthenticator.scala | Scala | mit | 2,393 |
package mesosphere.marathon.api.v2.json
import mesosphere.marathon.{ MarathonTestHelper, MarathonSpec }
import org.scalatest.GivenWhenThen
/**
* Tests that test that the given JSON is rejected by the JSON schema.
*
* Since the JSON is not representable by an V2AppDefinition,
* JSON is used directly.
*/
class V2AppDefinitionSchemaJSONTest extends MarathonSpec with GivenWhenThen {
test("command health checks WITHOUT a nested value should be rejected") {
Given("an app definition WITHOUT a nested value in command section of a health check")
val json =
"""
|{
| "id": "/test",
| "cmd": "echo hi",
| "healthChecks": [
| {
| "protocol": "COMMAND",
| "command": "curl -f -X GET http://$HOST:$PORT0/health"
| }
| ]
|}
""".stripMargin
Then("validation should fail")
MarathonTestHelper.validateJsonSchemaForString(json, valid = false)
}
test("command health checks WITH a nested value should be accepted") {
Given("an app definition WITH a nested value in command section of a health check")
val json =
"""
|{
| "id": "/test",
| "cmd": "echo hi",
| "healthChecks": [
| {
| "protocol": "COMMAND",
| "command": { "value": "curl -f -X GET http://$HOST:$PORT0/health" }
| }
| ]
|}
""".stripMargin
Then("validation should succeed")
MarathonTestHelper.validateJsonSchemaForString(json, valid = true)
}
}
| EasonYi/marathon | src/test/scala/mesosphere/marathon/api/v2/json/V2AppDefinitionSchemaJSONTest.scala | Scala | apache-2.0 | 1,572 |
package org.jetbrains.plugins.scala.compilationCharts.ui
import junit.framework.TestCase
import org.junit.Assert.assertEquals
import scala.concurrent.duration.{DurationInt, FiniteDuration}
class DiagramsComponentTest extends TestCase {
def testStringifyForSegmentTooltip(): Unit = {
def doTest(duration: FiniteDuration, expectedText: String): Unit = {
assertEquals(expectedText, DiagramsComponent.stringifyForSegmentTooltip(duration))
}
doTest(10.hours, "10 h")
doTest(2.hours, "2 h")
doTest(1.hours, "1 h")
doTest(59.minutes, "59 m")
doTest(2.minutes, "2 m")
doTest(1.minutes, "1 m")
doTest(59.seconds, "59 s")
doTest(2.seconds, "2 s")
doTest(1.seconds, "1 s")
doTest(999.millis, "999 ms")
doTest(99.millis, "99 ms")
doTest(2.millis, "2 ms")
doTest(1.millis, "1 ms")
// mixed units
doTest(1.hours + 1.minutes + 1.seconds + 1.millis, "1 h 1 m 1 s")
doTest(1.hours + 1.minutes + 1.seconds + 99.millis, "1 h 1 m 1 s")
doTest(1.hours + 1.minutes + 1.seconds + 199.millis, "1 h 1 m 1 s")
doTest(1.hours + 1.minutes + 1.seconds + 499.millis, "1 h 1 m 1 s")
doTest(1.hours + 1.minutes + 1.seconds + 501.millis, "1 h 1 m 2 s")
doTest(1.hours + 1.minutes + 1.seconds + 999.millis, "1 h 1 m 2 s")
doTest(1.minutes + 1.seconds + 1.millis, "1 m 1 s")
doTest(1.minutes + 1.seconds + 99.millis, "1 m 1 s")
doTest(1.minutes + 1.seconds + 199.millis, "1 m 1 s")
doTest(1.minutes + 1.seconds + 499.millis, "1 m 1 s")
doTest(1.minutes + 1.seconds + 501.millis, "1 m 2 s")
doTest(1.minutes + 1.seconds + 999.millis, "1 m 2 s")
doTest(59.seconds + 1.millis, "59 s")
doTest(59.seconds + 99.millis, "59.1 s")
doTest(59.seconds + 199.millis, "59.2 s")
doTest(59.seconds + 499.millis, "59.5 s")
doTest(59.seconds + 501.millis, "59.5 s")
doTest(59.seconds + 999.millis, "1 m")
doTest(1.seconds + 1.millis, "1 s")
doTest(1.seconds + 99.millis, "1.1 s")
doTest(1.seconds + 199.millis, "1.2 s")
doTest(1.seconds + 499.millis, "1.5 s")
doTest(1.seconds + 501.millis, "1.5 s")
doTest(1.seconds + 999.millis, "2 s")
doTest(0.seconds + 1.millis, "1 ms")
doTest(0.seconds + 99.millis, "99 ms")
doTest(0.seconds + 199.millis, "199 ms")
doTest(0.seconds + 499.millis, "499 ms")
doTest(0.seconds + 501.millis, "501 ms")
doTest(0.seconds + 999.millis, "999 ms")
}
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/compilationCharts/ui/DiagramsComponentTest.scala | Scala | apache-2.0 | 2,441 |
package pl.touk.nussknacker.engine.lite.metrics.dropwizard.influxdb
import com.typesafe.config.Config
import com.typesafe.scalalogging.LazyLogging
import io.dropwizard.metrics5.{MetricName, MetricRegistry}
import io.dropwizard.metrics5.influxdb.InfluxDbReporter
import java.util.concurrent.TimeUnit
import scala.util.Try
import scala.util.control.NonFatal
object LiteEngineInfluxDbReporter extends LiteEngineInfluxDbReporter
trait LiteEngineInfluxDbReporter extends LazyLogging {
import net.ceedubs.ficus.Ficus._
import net.ceedubs.ficus.readers.ArbitraryTypeReader._
def createAndRunReporterIfConfigured(metricRegistry: MetricRegistry, prefix: MetricName, config: Config): Option[InfluxDbReporter] = {
Try(config.getAs[InfluxSenderConfig]("influx")).recover {
case NonFatal(ex) =>
logger.warn(s"Error while parsing influx configuration: ${ex.getMessage}. InfluxDb Reported will be disabled.")
None
}.get.map { influxSenderConfig =>
createAndRunReporter(metricRegistry, prefix, influxSenderConfig)
} orElse {
logger.info("Influxdb metrics reporter config not found")
None
}
}
protected def createAndRunReporter(metricRegistry: MetricRegistry, prefix: MetricName, influxSenderConfig: InfluxSenderConfig): InfluxDbReporter = {
logger.info("Found Influxdb metrics reporter config, starting reporter")
val reporter = InfluxDbHttpReporter.build(metricRegistry, prefix, influxSenderConfig)
reporter.start(influxSenderConfig.reporterPolling.toSeconds, TimeUnit.SECONDS)
reporter
}
}
| TouK/nussknacker | engine/lite/runtime/src/main/scala/pl/touk/nussknacker/engine/lite/metrics/dropwizard/influxdb/LiteEngineInfluxDbReporter.scala | Scala | apache-2.0 | 1,568 |
package extruder
package object meta {
object typeable extends TypeableInstances
}
| janstenpickle/extruder | core/src/main/scala/extruder/meta/package.scala | Scala | mit | 86 |
package com.karasiq.shadowcloud.storage.repository
import akka.stream.scaladsl.Source
import scala.collection.immutable.TreeSet
trait SeqRepository[Key] extends Repository[Key] {
def sortedKeys(implicit ord: Ordering[Key]): Source[Key, Result] = {
keys.fold(TreeSet.empty[Key])(_ + _).mapConcat(identity)
}
def keysBefore(id: Key)(implicit ord: Ordering[Key]): Source[Key, Result] = {
keys.filter(ord.lt(_, id))
}
def keysAfter(id: Key)(implicit ord: Ordering[Key]): Source[Key, Result] = {
keys.filter(ord.gt(_, id))
}
}
| Karasiq/shadowcloud | storage/parent/src/main/scala/com/karasiq/shadowcloud/storage/repository/SeqRepository.scala | Scala | apache-2.0 | 551 |
/*
* Copyright 2015 Tsukasa Kitachi
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sbtjooq.codegen
import sbt._
import scala.language.implicitConversions
import scala.xml.{Elem, Node, NodeBuffer}
sealed trait CodegenConfig
object CodegenConfig {
sealed trait Single extends CodegenConfig
case class FromFile(file: File) extends Single
case class FromResource(resource: String) extends Single
case class FromXML(xml: Node) extends Single
case class Sequence(seq: Seq[Single]) extends CodegenConfig
implicit class CodegenConfigOps(config: CodegenConfig) {
def toSeq: Seq[Single] =
config match {
case single: Single => Seq(single)
case Sequence(seq) => seq
}
def isEmpty: Boolean =
config match {
case Sequence(seq) => seq.isEmpty
case _ => false
}
def +(other: Single): CodegenConfig =
Sequence(toSeq :+ other)
def ++(other: CodegenConfig): CodegenConfig =
Sequence(toSeq ++ other.toSeq)
}
def empty: CodegenConfig = Sequence(Seq.empty)
def fromURI(uri: URI): Single =
uri.getScheme match {
case "classpath" => FromResource(uri.getSchemeSpecificPart)
case "file" => FromFile(new File(uri))
case _ => throw new IllegalArgumentException(s"Unknown scheme: $uri")
}
def fromURIString(uri: String): Single = fromURI(sbt.uri(uri))
implicit def fileToCodegenConfig(file: File): Single = FromFile(file)
implicit def xmlElemToCodegenConfig(xml: Elem): Single = FromXML(xml)
implicit def uriToCodegenConfig(uri: URI): Single = fromURI(uri)
implicit def seqToCodegenConfig[A](seq: Seq[A])(implicit ev: A => CodegenConfig): Sequence =
Sequence(seq.flatMap(ev(_).toSeq))
implicit def nodeBufferToCodegenConfig(buffer: NodeBuffer): Sequence =
Sequence(buffer.map(FromXML))
implicit val appendCodegenConfigToCodegenConfig: Append.Values[CodegenConfig, CodegenConfig] = _ ++ _
implicit def appendSingleToCodegenConfig[A](implicit ev: A => Single): Append.Value[CodegenConfig, A] = _ + _
implicit def appendSequenceToCodegenConfig[A](implicit ev: A => Sequence): Append.Values[CodegenConfig, A] = _ ++ _
}
| kxbmap/sbt-jooq | codegen/src/main/scala/sbtjooq/codegen/CodegenConfig.scala | Scala | apache-2.0 | 2,691 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.features.avro.serde
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import org.apache.avro.io.Decoder
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.utils.text.WKTUtils
/**
* Version 1 AvroSimpleFeature encodes fields as WKT (Well Known Text) in an Avro String
*/
object Version1Deserializer extends ASFDeserializer {
override def setGeometry(sf: ScalaSimpleFeature, field: Int, in:Decoder): Unit = {
var (bb, bytes) = buffers.getOrElseUpdate((ByteBuffer.allocate(16), Array.empty))
bb = in.readBytes(bb)
val length = bb.remaining
if (bytes.length < length) {
bytes = Array.ofDim(length)
}
buffers.put((bb, bytes))
bb.get(bytes, 0, length)
sf.setAttributeNoConvert(field, WKTUtils.read(new String(bytes, 0, length, StandardCharsets.UTF_8)))
}
override def consumeGeometry(in: Decoder): Unit = in.skipBytes()
}
| locationtech/geomesa | geomesa-features/geomesa-feature-avro/src/main/scala/org/locationtech/geomesa/features/avro/serde/Version1Deserializer.scala | Scala | apache-2.0 | 1,424 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.geotools
import org.locationtech.geomesa.utils.collection.TieredOrdering
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.filter.expression.PropertyName
import org.opengis.filter.sort.{SortBy, SortOrder}
import scala.math.Ordering
/**
* Ordering for simple features. Assumes that any attributes implement `Comparable`
*/
object SimpleFeatureOrdering {
private val cached = Array.tabulate(16)(new AttributeOrdering(_))
/**
* Sort on the ith attribute of a simple feature
*
* @param i attribute to sort on
* @return
*/
def apply(i: Int): Ordering[SimpleFeature] =
if (i < cached.length) { cached(i) } else { new AttributeOrdering(i) }
/**
* Sort on an attribute by name. `null`, `"id"` or an empty string can be used to indicate
* 'natural' ordering by feature ID
*
* @param sft simple feature type
* @param sortBy attribute to sort by
* @return
*/
def apply(sft: SimpleFeatureType, sortBy: String): Ordering[SimpleFeature] = apply(sft, sortBy, reverse = false)
/**
* Sort on an attribute by name. `null`, `"id"` or an empty string can be used to indicate
* * 'natural' ordering by feature ID
*
* @param sft simple feature type
* @param sortBy attribute to sort by
* @param reverse reverse the sort (from ascending to descending)
* @return
*/
def apply(sft: SimpleFeatureType, sortBy: String, reverse: Boolean): Ordering[SimpleFeature] = {
val sort = if (sortBy == null || sortBy.isEmpty || sortBy.equalsIgnoreCase("id")) { fid } else {
val i = sft.indexOf(sortBy)
if (i == -1) {
throw new IllegalArgumentException(s"Trying to sort by an attribute that is not in the schema: $sortBy")
}
apply(i)
}
if (reverse) { sort.reverse } else { sort }
}
/**
* Sort by multiple attributes by name
*
* @param sft simple feature type
* @param sortBy pairs of (attribute name, reverse ordering)
* @return
*/
def apply(sft: SimpleFeatureType, sortBy: Seq[(String, Boolean)]): Ordering[SimpleFeature] = {
if (sortBy.lengthCompare(1) == 0) {
apply(sft, sortBy.head._1, sortBy.head._2)
} else {
TieredOrdering(sortBy.map { case (field, reverse) => apply(sft, field, reverse) })
}
}
/**
* Sort on a geotools SortBy instance
*
* @param sft simple feature type
* @param sortBy sort by
* @return
*/
def apply(sft: SimpleFeatureType, sortBy: SortBy): Ordering[SimpleFeature] = {
val name = Option(sortBy.getPropertyName).map(_.getPropertyName).orNull
apply(sft, name, sortBy.getSortOrder == SortOrder.DESCENDING)
}
/**
* Sort on a geotools SortBy array
*
* @param sft simple feature type
* @param sortBy sort by
* @return
*/
def apply(sft: SimpleFeatureType, sortBy: Array[SortBy]): Ordering[SimpleFeature] = {
if (sortBy.length == 1) {
apply(sft, sortBy.head)
} else {
TieredOrdering(sortBy.map(apply(sft, _)))
}
}
/**
* Sort based on the feature ID ('natural' ordering)
*
* @return
*/
def fid: Ordering[SimpleFeature] = Fid
private object Fid extends Ordering[SimpleFeature] {
override def compare(x: SimpleFeature, y: SimpleFeature): Int = x.getID.compareTo(y.getID)
}
private class AttributeOrdering(i: Int) extends Ordering[SimpleFeature] {
override def compare(x: SimpleFeature, y: SimpleFeature): Int =
nullCompare(x.getAttribute(i).asInstanceOf[Comparable[Any]], y.getAttribute(i))
}
private class PropertyOrdering(property: PropertyName) extends Ordering[SimpleFeature] {
override def compare(x: SimpleFeature, y: SimpleFeature): Int =
nullCompare(property.evaluate(x).asInstanceOf[Comparable[Any]], property.evaluate(y))
}
private class UserDataOrdering(key: String) extends Ordering[SimpleFeature] {
override def compare(x: SimpleFeature, y: SimpleFeature): Int =
nullCompare(x.getUserData.get(key).asInstanceOf[Comparable[Any]], y.getUserData.get(key))
}
/**
* Compares two values, nulls are ordered first
*
* @param x left value
* @param y right value
* @return
*/
def nullCompare(x: Comparable[Any], y: Any): Int = {
if (x == null) {
if (y == null) { 0 } else { -1 }
} else if (y == null) {
1
} else {
x.compareTo(y)
}
}
}
| elahrvivaz/geomesa | geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/geotools/SimpleFeatureOrdering.scala | Scala | apache-2.0 | 4,900 |
package org.jetbrains.plugins.scala
package lang
package completion
package filters.expression
import com.intellij.psi.filters.ElementFilter
import com.intellij.psi.{PsiElement, _}
import org.jetbrains.annotations.NonNls
import org.jetbrains.plugins.scala.lang.completion.ScalaCompletionUtil._
import org.jetbrains.plugins.scala.lang.parser.ScalaElementTypes
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.scaladoc.psi.api.ScDocComment
/**
* @author Alexander Podkhalyuzin
* Date: 22.05.2008
*/
class FinallyFilter extends ElementFilter{
def isAcceptable(element: Object, context: PsiElement): Boolean = {
if (context.isInstanceOf[PsiComment]) return false
val leaf = getLeafByOffset(context.getTextRange.getStartOffset, context)
if (leaf != null) {
val parent = leaf.getParent
var i = getPrevNotWhitespaceAndComment(context.getTextRange.getStartOffset - 1, context)
var leaf1 = getLeafByOffset(i, context)
while (leaf1 != null && !leaf1.isInstanceOf[ScTryStmt]) leaf1 = leaf1.getParent
if (leaf1 == null) return false
if (leaf1.getNode.getChildren(null).exists(_.getElementType == ScalaElementTypes.FINALLY_BLOCK)) return false
i = getNextNotWhitespaceAndComment(context.getTextRange.getEndOffset, context)
if (Array("catch", "finally").contains(getLeafByOffset(i, context).getText)) return false
return true
}
false
}
def isClassAcceptable(hintClass: java.lang.Class[_]): Boolean = {
true
}
@NonNls
override def toString: String = {
"statements keyword filter"
}
def getPrevNotWhitespaceAndComment(index: Int, context: PsiElement): Int = {
var i = index
if (i < 0) return 0
while (i > 0 && (context.getContainingFile.getText.charAt(i) == ' ' ||
context.getContainingFile.getText.charAt(i) == '\\n')) i = i - 1
val leaf = getLeafByOffset(i, context)
if (leaf.isInstanceOf[PsiComment] || leaf.isInstanceOf[ScDocComment])
return getPrevNotWhitespaceAndComment(leaf.getTextRange.getStartOffset - 1, context)
i
}
def getNextNotWhitespaceAndComment(index: Int, context: PsiElement): Int = {
var i = index
if (i >= context.getContainingFile.getTextLength - 1) return context.getContainingFile.getTextLength - 2
while (i < context.getContainingFile.getText.length - 1 && (context.getContainingFile.getText.charAt(i) == ' ' ||
context.getContainingFile.getText.charAt(i) == '\\n')) i = i + 1
val leaf = getLeafByOffset(i, context)
if (leaf.isInstanceOf[PsiComment] || leaf.isInstanceOf[ScDocComment])
return getNextNotWhitespaceAndComment(leaf.getTextRange.getEndOffset, context)
i
}
} | double-y/translation-idea-plugin | src/org/jetbrains/plugins/scala/lang/completion/filters/expression/FinallyFilter.scala | Scala | apache-2.0 | 2,724 |
package uk.co.turingatemyhamster.shortbol.ops
import scalaz.Scalaz._
import shapeless._
import uk.co.turingatemyhamster.shortbol.sharedAst._
import uk.co.turingatemyhamster.shortbol.shorthandAst
import uk.co.turingatemyhamster.shortbol.longhandAst
import uk.co.turingatemyhamster.shortbol.ops.Eval.EvalState
/**
*
*
* @author Matthew Pocock
*/
trait ChangeIdentifiers[T] {
def apply(t: T): EvalState[T]
}
object ChangeIdentifiers {
case class at(transform: Identifier => EvalState[Identifier]) extends TypeClassCompanion[ChangeIdentifiers]
{
self =>
override val typeClass: TypeClass[ChangeIdentifiers] = new TypeClass[ChangeIdentifiers] {
override def coproduct[L, R <: Coproduct](cl: => ChangeIdentifiers[L],
cr: => ChangeIdentifiers[R]) =
new ChangeIdentifiers[:+:[L, R]] {
override def apply(t: :+:[L, R]) = t match {
case Inl(l) => for {
cll <- cl(l)
} yield Inl(cll)
case Inr(r) => for {
crr <- cr(r)
} yield Inr(crr)
}
}
override def emptyCoproduct = new ChangeIdentifiers[CNil] {
override def apply(t: CNil) = ???
}
override def product[H, T <: HList](ch: ChangeIdentifiers[H],
ct: ChangeIdentifiers[T]) = new ChangeIdentifiers[::[H, T]] {
override def apply(t: ::[H, T]) = for {
chh <- ch(t.head)
ctt <- ct(t.tail)
} yield chh :: ctt
}
override def emptyProduct = miss[HNil]
override def project[F, G](instance: => ChangeIdentifiers[G],
to: (F) => G,
from: (G) => F) = new ChangeIdentifiers[F] {
override def apply(t: F) = for {
tot <- instance(to(t))
} yield from(tot)
}
}
implicit def deriveNodeInstance[F <: AstNode, G]
(implicit gen: Generic.Aux[F, G], cg: Lazy[ChangeIdentifiers[G]]): ChangeIdentifiers[F] = {
val fg = typeClass.project(cg.value, gen.to _, gen.from _)
new ChangeIdentifiers[F] {
override def apply(t: F) = for {
ff <- fg.apply(t)
} yield {
ff.region = t.region
ff
}
}
}
implicit def seq[T](implicit e: ChangeIdentifiers[T]): ChangeIdentifiers[Seq[T]] =
typeClass.project[Seq[T], List[T]](implicitly[ChangeIdentifiers[List[T]]], _.to[List], identity)
implicit val missString = miss[String]
implicit val missInt = miss[Int]
implicit val missBoolean = miss[Boolean]
implicit val identifier: ChangeIdentifiers[Identifier] = new ChangeIdentifiers[Identifier] {
override def apply(t: Identifier) = transform(t)
}
implicit val style = self[StringLiteral.Style]
implicit val literal = self[Literal]
implicit val assignment = self[shorthandAst.Assignment]
implicit val tpeConstructor = self[shorthandAst.TpeConstructor]
implicit val instanceExp = self[longhandAst.InstanceExp]
implicit val constructorApp = self[shorthandAst.ConstructorApp]
implicit val constructorDef = self[shorthandAst.ConstructorDef]
implicit val valueExp = self[shorthandAst.ValueExp]
implicit val propertyValue = self[shorthandAst.PropertyValue]
implicit val propertyExp = self[shorthandAst.PropertyExp]
implicit val bodyStmt = self[shorthandAst.BodyStmt]
implicit val topLevel = self[shorthandAst.TopLevel]
implicit val sbFile = self[shorthandAst.SBFile]
def miss[T]: ChangeIdentifiers[T] = new MissIdentifiers[T]
class MissIdentifiers[T] extends ChangeIdentifiers[T] {
override def apply(t: T) = t.point[EvalState]
}
}
} | drdozer/shortbol | shortbol/core/shared/src/main/scala/uk/co/turingatemyhamster/shortbol/ops/ChangeIdentifiers.scala | Scala | apache-2.0 | 3,756 |
package com.github.mdr.mash.view.render.help
import com.github.mdr.mash.functions.Parameter
import com.github.mdr.mash.screen.Line
import com.github.mdr.mash.screen.Style._
import com.github.mdr.mash.utils.Utils._
object ParameterHelpRenderer extends AbstractHelpRenderer {
def renderSection(parameters: Seq[Parameter]): Seq[Line] =
if (parameters.nonEmpty) {
val headingLines = Seq(Line.Empty, Line(SectionTitleStyle("PARAMETERS")))
val paramLines = parameters.zipWithIndex.flatMap { case (param, i) ⇒
val paddingLines = if (i > 0) Seq(Line.Empty) else Seq()
paddingLines ++ renderParameterHelp(param)
}
headingLines ++ paramLines
} else
Seq()
private def renderParameterHelp(param: Parameter): Seq[Line] = {
val qualifierString = getParamQualifiers(param) match {
case Seq() ⇒ ""
case qualifiers ⇒ qualifiers.mkString(" [", ", ", "]")
}
val name = param.nameOpt getOrElse Parameter.AnonymousParamName
val paramName = ParamNameStyle(if (param.isFlag) "--" + name else name)
val shortFlagDescription = param.shortFlagOpt.map(f ⇒ s" | -$f").getOrElse("").style(ParamNameStyle)
val summaryDescription = param.summaryOpt.fold("")(" - " + _).style
val summaryLine = Line(IndentSpace + paramName + shortFlagDescription + qualifierString.style + summaryDescription)
val descriptionLines = param.descriptionOpt.toSeq.flatMap(description ⇒
renderDescription(description, indentLevel = 2))
summaryLine +: descriptionLines
}
private def getParamQualifiers(param: Parameter): Seq[String] =
Seq(
param.isLazy.option("lazy"),
param.isNamedArgsParam.option("namedArgs"),
param.isAllArgsParam.option("allArgs"),
param.isSafe.option("safe"),
param.hasDefault.option("optional"),
param.isVariadic.option("variadic"),
param.variadicAtLeastOne.option("at least one"),
param.variadicFlatten.option("flatten")).flatten
}
| mdr/mash | src/main/scala/com/github/mdr/mash/view/render/help/ParameterHelpRenderer.scala | Scala | mit | 1,989 |
// Solution-2.scala
// Solution to Exercise 2 in "Functions as Objects"
import com.atomicscala.AtomicTest._
var str1 = ""
val numberV = Vector(1, 2, 3, 4)
numberV.foreach(n => str1 += n + ",")
str1 is "1,2,3,4,"
/* OUTPUT_SHOULD_BE
1,2,3,4,
*/
| P7h/ScalaPlayground | Atomic Scala/atomic-scala-solutions/31_FunctionsasObjects/Solution-2.scala | Scala | apache-2.0 | 246 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.partest.nest
import language.postfixOps
trait RunnerSpec extends Spec with Meta.StdOpts with Interpolation {
def referenceSpec = RunnerSpec
def programInfo = Spec.Info(
"console-runner",
"Usage: ConsoleRunner [options] [test test ...]",
"scala.tools.partest.nest.ConsoleRunner")
heading("Test categories:")
val optPos = "pos" / "run compilation tests (success)" --?
val optNeg = "neg" / "run compilation tests (failure)" --?
val optRun = "run" / "run interpreter and backend tests" --?
val optJvm = "jvm" / "run JVM backend tests" --?
val optRes = "res" / "run resident compiler tests" --?
val optScalap = "scalap" / "run scalap tests" --?
val optSpecialized = "specialized" / "run specialization tests" --?
val optInstrumented = "instrumented" / "run instrumented tests" --?
val optPresentation = "presentation" / "run presentation compiler tests" --?
heading("Test runner options:")
val optFailed = "failed" / "run only those tests that failed during the last run" --?
val optTimeout = "timeout" / "aborts the test suite after the given amount of time" --|
val optPack = "pack" / "pick compiler/reflect/library in build/pack, and run all tests" --?
val optGrep = "grep" / "run all tests whose source file contains the expression given to grep" --|
val optUpdateCheck = "update-check" / "instead of failing tests with output change, update checkfile (use with care!)" --?
val optNoExec = "no-exec" / "instead of running tests, stop after dry-run compilation" --?
val optBuildPath = "buildpath" / "set (relative) path to build jars (ex.: --buildpath build/pack)" --|
val optClassPath = "classpath" / "set (absolute) path to build classes" --|
val optSourcePath = "srcpath" / "set (relative) path to test source files (ex.: --srcpath pending)" --|
heading("Test output options:")
val optShowDiff = "show-diff" / "show diffs for failed tests" --?
val optShowLog = "show-log" / "show log files for failed tests" --?
val optVerbose = "verbose" / "show verbose progress information" --?
val optTerse = "terse" / "show terse progress information" --?
val optDebug = "debug" / "enable debugging output, preserve generated files" --?
heading("Other options:")
val optVersion = "version" / "show Scala version and exit" --?
val optHelp = "help" / "show this page and exit" --?
}
object RunnerSpec extends RunnerSpec with Reference {
trait Config extends RunnerSpec with Instance
type ThisCommandLine = CommandLine
def creator(args: List[String]): ThisCommandLine = new CommandLine(RunnerSpec, args)
def forArgs(args: Array[String]): Config = new { val parsed = creator(args.toList) } with Config
}
| martijnhoekstra/scala | src/partest/scala/tools/partest/nest/RunnerSpec.scala | Scala | apache-2.0 | 3,590 |
package org.psliwa.idea.composerJson.composer.model.repository
import org.psliwa.idea.composerJson.composer.model.PackageName
private class ComposedRepository[Package](repositories: List[Repository[Package]]) extends Repository[Package] {
override def getPackages: Seq[Package] = {
repositories.flatMap(_.getPackages)
}
override def getPackageVersions(packageName: PackageName): Seq[String] = {
repositories
.flatMap(_.getPackageVersions(packageName))
}
override def map[NewPackage](f: Package => NewPackage): Repository[NewPackage] =
new ComposedRepository(repositories.map(_ map f))
}
| psliwa/idea-composer-plugin | src/main/scala/org/psliwa/idea/composerJson/composer/model/repository/ComposedRepository.scala | Scala | mit | 619 |
package lichess
case class LightUser(id: String, name: String, title: Option[String] = None)
case class Users(white: LightUser, black: LightUser) {
def apply(color: chess.Color) = color.fold(white, black)
}
| ornicar/lichess-db | src/main/scala/LightUser.scala | Scala | agpl-3.0 | 212 |
package org.bitcoins.testkit.chain.fixture
import org.bitcoins.chain.blockchain.ChainHandler
import org.bitcoins.chain.models.BlockHeaderDAO
/**
* This ADT represents all Chain test fixtures. If you set this type to be your
* FixtureParam and override withFixture to be withChainFixutre, then simply tag
* tests to specify which fixture that test should receive and then use inFixutred
* which takes a PartialFunction[ChainFixture, Future[Assertion] ] (i.e. just
* specify the relevant case for your expected fixture)
*/
sealed trait ChainFixture
object ChainFixture {
case object Empty extends ChainFixture
case class GenisisBlockHeaderDAO(dao: BlockHeaderDAO) extends ChainFixture
case class PopulatedBlockHeaderDAO(dao: BlockHeaderDAO) extends ChainFixture
case class GenisisChainHandler(chainHandler: ChainHandler)
extends ChainFixture
case class PopulatedChainHandler(chainHandler: ChainHandler)
extends ChainFixture
case class BitcoindZmqChainHandlerWithBlock(
bitcoindChainHandler: BitcoindChainHandlerViaZmq)
extends ChainFixture
}
| bitcoin-s/bitcoin-s-core | testkit/src/main/scala/org/bitcoins/testkit/chain/fixture/ChainFixture.scala | Scala | mit | 1,096 |
package org.jetbrains.plugins.scala
package testingSupport.test.specs2
import com.intellij.execution._
import com.intellij.execution.actions.ConfigurationContext
import com.intellij.execution.configurations.RunConfiguration
import com.intellij.psi._
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.extensions.PsiElementExt
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScExpression, ScInfixExpr}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTypeDefinition
import org.jetbrains.plugins.scala.testingSupport.test.structureView.TestNodeProvider
import org.jetbrains.plugins.scala.testingSupport.test.{ClassTestData, SingleTestData, TestConfigurationProducer, TestConfigurationUtil}
/**
* User: Alexander Podkhalyuzin
* Date: 04.05.2009
*/
class Specs2ConfigurationProducer extends {
val confType = new Specs2ConfigurationType
val confFactory = confType.confFactory
} with TestConfigurationProducer(confType) {
override def suitePaths = List("org.specs2.specification.SpecificationStructure",
"org.specs2.specification.core.SpecificationStructure")
override def findExistingByElement(location: Location[_ <: PsiElement],
existingConfigurations: Array[RunnerAndConfigurationSettings],
context: ConfigurationContext): RunnerAndConfigurationSettings = {
super.findExistingByElement(location, existingConfigurations, context)
}
override def createConfigurationByLocation(location: Location[_ <: PsiElement]): Option[(PsiElement, RunnerAndConfigurationSettings)] = {
val element = location.getPsiElement
if (element == null) return None
if (element.isInstanceOf[PsiPackage] || element.isInstanceOf[PsiDirectory]) {
val name = element match {
case p: PsiPackage => p.getName
case d: PsiDirectory => d.getName
}
return Some((element, TestConfigurationUtil.packageSettings(element, location, confFactory, ScalaBundle.message("test.in.scope.specs2.presentable.text", name))))
}
val parent: ScTypeDefinition = PsiTreeUtil.getParentOfType(element, classOf[ScTypeDefinition], false)
if (parent == null) return None
val settings = RunManager.getInstance(location.getProject).createRunConfiguration(parent.name, confFactory)
val runConfiguration = settings.getConfiguration.asInstanceOf[Specs2RunConfiguration]
val (testClass, testName) = getLocationClassAndTest(location)
if (testClass == null) return None
val testClassPath = testClass.qualifiedName
runConfiguration.initWorkingDir()
// If the selected element is a non-empty string literal, we assume that this
// is the name of an example to be filtered.
if (testName != null) {
val options = runConfiguration.getJavaOptions
runConfiguration.setJavaOptions(options)
val testNamePrefixed = testClassPath + "::" + testName
runConfiguration.setGeneratedName(testNamePrefixed)
runConfiguration.setName(testNamePrefixed)
}
runConfiguration.setTestConfigurationData(ClassTestData(runConfiguration, testClassPath, testName))
try {
val module = ScalaPsiUtil.getModule(element)
if (module != null) {
runConfiguration.setModule(module)
}
}
catch {
case _: Exception =>
}
JavaRunConfigurationExtensionManager.getInstance.extendCreatedConfiguration(runConfiguration, location)
Some((testClass, settings))
}
override def isConfigurationByLocation(configuration: RunConfiguration, location: Location[_ <: PsiElement]): Boolean = {
val element = location.getPsiElement
if (element == null) return false
if (element.isInstanceOf[PsiPackage] || element.isInstanceOf[PsiDirectory]) {
if (!configuration.isInstanceOf[Specs2RunConfiguration]) return false
return TestConfigurationUtil.isPackageConfiguration(element, configuration)
}
val parent: ScTypeDefinition = PsiTreeUtil.getParentOfType(element, classOf[ScTypeDefinition], false)
if (parent == null) return false
val suiteClasses = suitePaths.flatMap {
parent.elementScope.getCachedClass(_)
}
if (suiteClasses.isEmpty) return false
val suiteClazz = suiteClasses.head
if (!ScalaPsiUtil.isInheritorDeep(parent, suiteClazz)) return false
val (testClass, testName) = getLocationClassAndTest(location)
if (testClass == null) return false
val testClassPath = testClass.qualifiedName
configuration match {
case configuration: Specs2RunConfiguration =>
configuration.testConfigurationData match {
case testData: SingleTestData => testData.testClassPath == testClassPath && testData.testName == testName
case classData: ClassTestData => classData.testClassPath == testClassPath && testName == null
case _ => false
}
case _ => false
}
}
private def extractStaticTestName(testDefExpr: ScInfixExpr): Option[String] = {
testDefExpr.getChildren.filter(_.isInstanceOf[ScExpression]).map(_.asInstanceOf[ScExpression]).headOption.
flatMap(TestConfigurationUtil.getStaticTestName(_))
}
def getLocationClassAndTest(location: Location[_ <: PsiElement]): (ScTypeDefinition, String) = {
val element = location.getPsiElement
val testClassDef: ScTypeDefinition = PsiTreeUtil.getParentOfType(element, classOf[ScTypeDefinition], false)
if (testClassDef == null) return (null, null)
val suiteClasses = suitePaths.flatMap {
element.elementScope.getCachedClass(_)
}
if (suiteClasses.isEmpty) return (null, null)
val suiteClazz = suiteClasses.head
if (!ScalaPsiUtil.isInheritorDeep(testClassDef, suiteClazz)) return (null, null)
ScalaPsiUtil.getParentWithProperty(element, strict = false, e => TestNodeProvider.isSpecs2TestExpr(e)) match {
case Some(infixExpr: ScInfixExpr) => (testClassDef, extractStaticTestName(infixExpr).orNull)
case _ => (testClassDef, null)
}
}
}
| jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/testingSupport/test/specs2/Specs2ConfigurationProducer.scala | Scala | apache-2.0 | 6,054 |
/*
* MilmSearch is a mailing list searching system.
*
* Copyright (C) 2013 MilmSearch Project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 3
* of the License, or any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*
* You can contact MilmSearch Project at mailing list
* milm-search-public@lists.sourceforge.jp.
*/
package org.milmsearch.core.test.util
import org.scalamock.Mock
import org.scalamock.ProxyMockFactory
import org.scalamock.scalatest.MockFactory
trait MockCreatable {
protected def mock[T: ClassManifest]: T with Mock
def createMock[T: ClassManifest](f: T with Mock => Unit): T = {
val m = mock[T]
f(m)
m
}
} | mzkrelx/milm-search-core | src/test/scala/org/milmsearch/core/test/util/MockCreatable.scala | Scala | gpl-3.0 | 1,164 |
/*
* Copyright (C) 2016 University of Basel, Graphics and Vision Research Group
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package scalismo.ui.rendering.actor.mixin
import scalismo.ui.model.properties._
import scalismo.ui.rendering.actor.{ActorEvents, SingleActor}
trait ActorOpacity extends SingleActor with ActorEvents {
def opacity: OpacityProperty
listenTo(opacity)
reactions += {
case NodeProperty.event.PropertyChanged(p) if p eq opacity => setAppearance()
}
private def setAppearance(): Unit = {
GetProperty().SetOpacity(opacity.value)
actorChanged()
}
setAppearance()
}
| unibas-gravis/scalismo-ui | src/main/scala/scalismo/ui/rendering/actor/mixin/ActorOpacity.scala | Scala | gpl-3.0 | 1,224 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.T
import com.intel.analytics.bigdl.utils.serializer.ModuleSerializationTest
import org.scalatest.{FlatSpec, Matchers}
import scala.util.Random
class MeanSpec extends FlatSpec with Matchers {
"mean" should "work correctly" in {
val input = Tensor[Float](T(
T(1.0f, 2.0f),
T(3.0f, 4.0f)
))
val layer = Mean[Float](dimension = 2)
val expect = Tensor[Float](T(1.5f, 3.5f))
layer.forward(input) should be(expect)
}
"mean" should "work correctly without squeeze" in {
val input = Tensor[Float](T(
T(1.0f, 2.0f),
T(3.0f, 4.0f)
))
val layer = Mean[Float](dimension = 2, squeeze = false)
val expect = Tensor[Float](T(T(1.5f), T(3.5f)))
layer.forward(input) should be(expect)
}
}
class MeanSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val mean = Mean[Float](2).setName("mean")
val input = Tensor[Float](5, 5).apply1(_ => Random.nextFloat())
runSerializationTest(mean, input)
}
}
| yiheng/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/MeanSpec.scala | Scala | apache-2.0 | 1,724 |
package org.jetbrains.plugins.scala.lang.psi.api.base.types
/**
* @author Alexander Podkhalyuzin
* Date: 13.03.2008
*/
trait ScParenthesisedTypeElement extends ScTypeElement {
override protected val typeName = "TypeInParenthesis"
def typeElement: Option[ScTypeElement] = findChild(classOf[ScTypeElement])
}
object ScParenthesisedTypeElement {
def unapply(e: ScParenthesisedTypeElement): Option[ScTypeElement] = e.typeElement
} | gtache/intellij-lsp | intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/psi/api/base/types/ScParenthesisedTypeElement.scala | Scala | apache-2.0 | 436 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package toplevel
import com.intellij.psi.PsiClass
import org.jetbrains.plugins.scala.caches.BlockModificationTracker
import org.jetbrains.plugins.scala.lang.psi.api.PropertyMethods.DefinitionRole
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScClassParameter
import org.jetbrains.plugins.scala.lang.psi.light.{PsiClassWrapper, PsiTypedDefinitionWrapper, StaticPsiTypedDefinitionWrapper}
import org.jetbrains.plugins.scala.lang.psi.types.result.Typeable
import org.jetbrains.plugins.scala.macroAnnotations.Cached
/**
* Member definitions, classes, named patterns which have types
*/
trait ScTypedDefinition extends ScNamedElement with Typeable {
/**
* @return true - if the definition has a stable type<br>
* false - otherwise
*
* This particular method is about "stable type"
*/
def isStable = true
def isVar: Boolean = false
def isVal: Boolean = false
// TODO Add ScMember.isAbstract, also see isAbstarct in ScValue / ScVariable
def isAbstractMember: Boolean = nameContext match {
case _: ScFunctionDefinition | _: ScPatternDefinition | _: ScVariableDefinition => false
case _: ScClassParameter => false
case _ => true
}
@Cached(BlockModificationTracker(this), this)
def getTypedDefinitionWrapper(isStatic: Boolean, isAbstract: Boolean, role: DefinitionRole,
cClass: Option[PsiClass] = None): PsiTypedDefinitionWrapper = {
new PsiTypedDefinitionWrapper(this, isStatic, isAbstract, role, cClass)
}
@Cached(BlockModificationTracker(this), this)
def getStaticTypedDefinitionWrapper(role: DefinitionRole, cClass: PsiClassWrapper): StaticPsiTypedDefinitionWrapper = {
new StaticPsiTypedDefinitionWrapper(this, role, cClass)
}
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/api/toplevel/ScTypedDefinition.scala | Scala | apache-2.0 | 1,894 |
package jmh
import org.openjdk.jmh.annotations._
import org.openjdk.jmh.infra._
import java.util.concurrent.TimeUnit
import scala.offheap._
import scala.offheap.internal.SunMisc.UNSAFE
@State(Scope.Thread)
class Array {
implicit val alloc = malloc
implicit val props = Region.Props(Pool(malloc, pageSize = 81920, chunkSize = 81920))
val jarr: scala.Array[Long] = (0 to 9999).toArray.map(_.toLong)
val arr: offheap.Array[Long] = {
val arr = offheap.Array.uninit[Long](10000)
for (i <- 0 to 9999) arr(i) = i
arr
}
val uarr: Long = {
val addr = UNSAFE.allocateMemory(10000 * 8)
var i = 0
while (i <= 10000) {
UNSAFE.putLong(addr + i * 8, i)
i += 1
}
addr
}
val _0 = 0
val _42 = 42
@Benchmark
def offheapAccess = arr(_0)
@Benchmark
def onheapAccess = jarr(_0)
@Benchmark
def unsafeAccess = UNSAFE.getLong(uarr + _0)
@Benchmark
def offheapUpdate = { arr(_0) = _42; _42 }
@Benchmark
def onheapUpdate = { jarr(_0) = _42; _42 }
@Benchmark
def unsafeUpdate = { UNSAFE.putLong(uarr + _0, _42); _42 }
@Benchmark
def offheapSum = {
val len = arr.length
var sum = 0L
var i = 0
while (i < len) {
sum += arr(i)
i += 1
}
sum
}
@Benchmark
def unsafeSum = {
val len = arr.length
var sum = 0
var i = 0
while (i < len) {
sum += UNSAFE.getInt(uarr + 4 * i)
i += 1
}
sum
}
@Benchmark
def onheapSum = {
val len = jarr.length
var sum = 0L
var i = 0
while (i < len) {
sum += jarr(i)
i += 1
}
sum
}
@Benchmark
def offheapForeach(bh: Blackhole) = arr.foreach { v => bh.consume(v) }
@Benchmark
def onheapForeach(bh: Blackhole) = jarr.foreach { v => bh.consume(v) }
@Benchmark
def offheapMap = Region { r => arr.map(_ * 2)(r) }
@Benchmark
def onheapMap = jarr.map(_ * 2)
}
| arosenberger/scala-offheap | jmh/src/main/scala/Array.scala | Scala | bsd-3-clause | 1,889 |
/*
Copyright (c) 2010 ymnk, JCraft,Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution.
3. The names of the authors may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT,
INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.jcraft.dpfwds
import java.net.InetAddress
import java.io.{InputStream, OutputStream}
object IOHandlerException extends Exception
trait IOHandler {
val in: InputStream
val out: OutputStream
protected val buf = new Array[Byte](1024)
protected def read(buf: Array[Byte], s: Int, l: Int): Unit = {
var _s = s
var _l = l
while(_l > 0){
in.read(buf, _s, _l) match {
case -1 => throw IOHandlerException
case i =>
_s += i
_l -= i
}
}
}
protected def read: Byte = {
in.read match {
case -1 => throw IOHandlerException
case c => (c&0xff).asInstanceOf[Byte]
}
}
// reading ipv4.address
protected def readIPV4Address: String = {
val i = read(buf, 0, 4)
val tmp = new Array[Byte](4)
System.arraycopy(buf, 0, tmp, 0, 4)
InetAddress.getByAddress(tmp).getHostAddress
}
// reading string, which starts with its length
protected def readNString: String = {
val n = read
read(buf, 0, n)
new String(buf, 0, n)
}
// reading null terminated string.
protected def readString: String = {
val str = scala.collection.mutable.ArrayBuffer.empty[Char]
for(c <- Stream.continually(read).takeWhile(_ != 0)){
str += (c%0xff).asInstanceOf[Char]
}
str.mkString
}
// reading null terminated string.
protected def readShort: Int = {
val i = read(buf, 0, 2)
((buf(0)<<8)&0xff00) | (buf(1)&0xff)
}
protected def readBytes: Array[Byte] = {
val n = read
(0 until n).foldLeft(new Array[Byte](n)) {
case (a, index) =>
a(index) = read
a
}
}
}
| ymnk/dpfwds | src/main/scala/com/jcraft/dpfwds/IOHandler.scala | Scala | bsd-3-clause | 3,110 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution.internal.collection.queues
import org.jctools.queues.MessagePassingQueue.Consumer
import scala.collection.mutable
private[internal] final class QueueDrain[A](buffer: mutable.Buffer[A])
extends Consumer[A] {
private[this] var _count = 0
def count: Int = _count
def accept(e: A): Unit = {
buffer += e
_count += 1
}
}
| Wogan/monix | monix-execution/jvm/src/main/scala/monix/execution/internal/collection/queues/QueueDrain.scala | Scala | apache-2.0 | 1,036 |
package p04Euler
object smallestMultiple {
// evenly divisible signifie divisible par tous les nombres de la liste, rien à voir avec nombre pair
// on peut commencer par chercher à partir du produit des nombres premiers
def isEvDiv(n: Long, listDiv: List[Long]): Boolean = listDiv match {
case Nil => true
case x :: xs => if (n % x == 0) isEvDiv(n, xs) else false
}
def biggestEvDiv(n: Long, listDiv: List[Long]): Long = if (isEvDiv(n, listDiv)) n else biggestEvDiv(n + 1, listDiv)
def main(args: Array[String]): Unit = {
val listDiv = 1L to 20L toList
val listPrime = List[Long](2, 3, 5, 7, 11, 13, 17, 19)
println(biggestEvDiv(listPrime.product, listDiv))
}
} | vkubicki/ScalaTest | src/main/scala/p04Euler/005 - smallestMultiple.scala | Scala | mit | 685 |
/*
* Copyright 2013 - 2020 Outworkers Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.outworkers.phantom
import java.util.concurrent.TimeUnit
import com.datastax.driver.core.VersionNumber
import com.outworkers.phantom.database.DatabaseProvider
import com.outworkers.phantom.dsl.UUID
import com.outworkers.phantom.tables.TestDatabase
import com.outworkers.util.samplers._
import io.circe.{Encoder, Json}
import org.joda.time.{DateTime, DateTimeZone, LocalDate}
import org.json4s.Formats
import org.scalatest._
import org.scalatest.concurrent.{PatienceConfiguration, ScalaFutures}
import org.scalatest.time.{Millis, Seconds, Span}
import scala.concurrent.duration.{Duration, FiniteDuration}
import scala.concurrent.{Await, Future}
trait PhantomBaseSuite extends Suite with Matchers
with BeforeAndAfterAll
with ScalaFutures
with JsonFormats
with OptionValues {
implicit val formats: Formats = org.json4s.DefaultFormats + new DateTimeSerializer + new UUIDSerializer
protected[this] val defaultScalaTimeoutSeconds = 25L
private[this] val defaultScalaInterval = 50L
implicit val defaultScalaTimeout: FiniteDuration = {
scala.concurrent.duration.Duration(defaultScalaTimeoutSeconds, TimeUnit.SECONDS)
}
private[this] val defaultTimeoutSpan = Span(defaultScalaTimeoutSeconds, Seconds)
implicit val defaultTimeout: PatienceConfiguration.Timeout = timeout(defaultTimeoutSpan)
implicit object JodaTimeSampler extends Sample[DateTime] {
override def sample: DateTime = DateTime.now(DateTimeZone.UTC)
}
implicit object JodaLocalDateSampler extends Sample[LocalDate] {
override def sample: LocalDate = LocalDate.now(DateTimeZone.UTC)
}
override implicit val patienceConfig: PatienceConfig = PatienceConfig(
timeout = defaultTimeoutSpan,
interval = Span(defaultScalaInterval, Millis)
)
implicit class CqlConverter[T](val obj: T) {
def asCql()(implicit primitive: com.outworkers.phantom.builder.primitives.Primitive[T]): String = {
primitive.asCql(obj)
}
}
implicit class BlockHelper[T](val f: Future[T]) {
def block(timeout: Duration): T = Await.result(f, timeout)
}
}
trait TestDatabaseProvider extends DatabaseProvider[TestDatabase] {
override val database: TestDatabase = TestDatabase
}
trait PhantomSuite extends FlatSpec with PhantomBaseSuite with TestDatabaseProvider {
implicit val datetimeEncoder: Encoder[DateTime] = Encoder.instance(dt => Json.fromLong(dt.getMillis))
implicit val uuidEncoder: Encoder[UUID] = Encoder.instance(uuid => Json.fromString(uuid.toString))
def requireVersion[T](v: VersionNumber)(fn: => T): Unit = if (cassandraVersion.value.compareTo(v) >= 0) {
val _ = fn
} else {
()
}
}
trait PhantomFreeSuite extends FreeSpec with PhantomBaseSuite with TestDatabaseProvider
| outworkers/phantom | phantom-dsl/src/test/scala/com/outworkers/phantom/PhantomSuite.scala | Scala | apache-2.0 | 3,336 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.