code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.sj.common.engine.core.managment
import com.bwsw.sj.common.dal.model.instance.{BatchInstanceDomain, ExecutionPlan}
import com.bwsw.sj.common.dal.model.module.BatchSpecificationDomain
import com.bwsw.sj.common.dal.model.stream.StreamDomain
import com.bwsw.sj.common.engine.StreamingExecutor
import com.bwsw.sj.common.engine.core.batch.{BatchCollector, BatchStreamingPerformanceMetrics}
import com.bwsw.sj.common.engine.core.environment.{EnvironmentManager, ModuleEnvironmentManager}
import com.bwsw.sj.common.si.model.instance.{BatchInstance, RegularInstance}
import com.bwsw.sj.common.utils.{EngineLiterals, StreamLiterals}
import com.bwsw.tstreams.agents.producer.Producer
import scaldi.Injector
import scala.collection.mutable
/**
* Class allows to manage an environment of [[com.bwsw.sj.common.utils.EngineLiterals.regularStreamingType]] or
* [[com.bwsw.sj.common.utils.EngineLiterals.batchStreamingType]] task
*
* @author Kseniya Mikhaleva
*/
class CommonTaskManager(implicit injector: Injector) extends TaskManager {
val inputs: mutable.Map[StreamDomain, Array[Int]] = getInputs(getExecutionPlan())
val outputProducers: Map[String, Producer] = createOutputProducers()
require(numberOfAgentsPorts >=
inputs.count(x => x._1.streamType == StreamLiterals.tstreamsType),
"Not enough ports for t-stream consumers." +
s"${inputs.count(x => x._1.streamType == StreamLiterals.tstreamsType)} ports are required")
def getExecutor(environmentManager: EnvironmentManager): StreamingExecutor = {
logger.debug(s"Task: $taskName. Start loading an executor class from module jar.")
val executor = executorClass
.getConstructor(classOf[ModuleEnvironmentManager])
.newInstance(environmentManager)
.asInstanceOf[StreamingExecutor]
logger.debug(s"Task: $taskName. Load an executor class.")
executor
}
def getBatchCollector(instance: BatchInstanceDomain,
performanceMetrics: BatchStreamingPerformanceMetrics,
inputStreams: Array[String]): BatchCollector = {
instance match {
case _: BatchInstanceDomain =>
logger.info(s"Task: $taskName. Getting a batch collector class from jar of file: " +
instance.moduleType + "-" + instance.moduleName + "-" + instance.moduleVersion + ".")
val inputs = inputStreams.map(x => streamRepository.get(x).get)
val batchCollectorClassName = fileMetadata.specification.asInstanceOf[BatchSpecificationDomain].batchCollectorClass
val batchCollector = moduleClassLoader
.loadClass(batchCollectorClassName)
.getConstructor(
classOf[BatchInstanceDomain],
classOf[BatchStreamingPerformanceMetrics],
classOf[Array[StreamDomain]])
.newInstance(instance, performanceMetrics, inputs)
.asInstanceOf[BatchCollector]
batchCollector
case _ =>
logger.error("A batch collector exists only for batch engine.")
throw new RuntimeException("A batch collector exists only for batch engine.")
}
}
private def getExecutionPlan(): ExecutionPlan = {
logger.debug("Get an execution plan of instance.")
instance match {
case regularInstance: RegularInstance =>
regularInstance.executionPlan
case batchInstance: BatchInstance =>
batchInstance.executionPlan
case _ =>
logger.error(s"CommonTaskManager can be used only for ${EngineLiterals.regularStreamingType} or ${EngineLiterals.batchStreamingType} engine.")
throw new RuntimeException("CommonTaskManager can be used only for ${EngineLiterals.regularStreamingType} or ${EngineLiterals.batchStreamingType} engine.")
}
}
}
|
bwsw/sj-platform
|
core/sj-common/src/main/scala/com/bwsw/sj/common/engine/core/managment/CommonTaskManager.scala
|
Scala
|
apache-2.0
| 4,536
|
package liang.don.dzimageconverter.log
/**
* Log level.<br>
* Level priority:
* Fatal > Error > Info > Debug
*
* @author Don Liang
* @Version 0.1, 22/09/2011
*/
object LogLevel extends Enumeration {
val Debug = Value(0)
val Info = Value(1)
val Error = Value(2)
val Fatal = Value(3)
}
|
dl2k84/DeepZoomImageConverter
|
src/liang/don/dzimageconverter/log/LogLevel.scala
|
Scala
|
mit
| 302
|
/**
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.crossdata.connector.cassandra
import org.apache.spark.sql.crossdata.ExecutionType._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class CassandraFunctionIT extends CassandraWithSharedContext {
val execTypes: List[ExecutionType] = Native::Spark::Nil
execTypes.foreach { exec =>
"The Cassandra connector" should s"be able to ${exec.toString}ly select the built-in functions `now`, `dateOf` and `unixTimeStampOf`" in {
assumeEnvironmentIsUpAndRunning
val query = s"SELECT cassandra_now() as t, cassandra_now() as a, cassandra_dateOf(cassandra_now()) as dt, cassandra_unixTimestampOf(cassandra_now()) as ut FROM $Table"
sql(query).collect(exec) should have length 10
}
}
it should s"be able to resolve non-duplicates functions automatically without specifying the datasource" in {
assumeEnvironmentIsUpAndRunning
val query = s"SELECT unixTimestampOf(now()) as ut FROM $Table"
sql(query).collect(Native) should have length 10
}
}
|
luismcl/crossdata
|
cassandra/src/test/scala/com/stratio/crossdata/connector/cassandra/CassandraFunctionIT.scala
|
Scala
|
apache-2.0
| 1,680
|
package collins.solr
import java.util.Collections
import java.util.Date
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.atomic.AtomicReference
import scala.collection.JavaConverters.asScalaSetConverter
import play.api.Logger
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import collins.models.Asset
import collins.models.AssetLog
import akka.actor.Actor
/**
* The SolrUpdater queues asset updates for batch updating. Most importantly,
* if it receives multiple requests of the same asset, it will only update the
* asset once per batch. This is to avoid reindexing an asset many times
* during large updates (such as updating lshw/lldp, which triggers dozens of
* callbacks)
*/
class AssetSolrUpdater extends Actor {
private[this] def newAssetTagSet = Collections.newSetFromMap[String](
new ConcurrentHashMap[String, java.lang.Boolean]())
private[this] val assetTagsRef = new AtomicReference(newAssetTagSet)
private[this] val logger = Logger("SolrUpdater")
//mutex to prevent multiple concurrent scheduler calls
val scheduled = new AtomicBoolean(false)
case object Reindex
/**
* Note, even though the callback checks if the asset is deleted, we're still
* gonna get index requests from the delete asset's meta value deletions
*
* Note - also we re-fetch the asset from MySQL to avoid a possible race
* condition where an asset is deleted immediately after it is updated
*/
def receive = {
case asset: Asset =>
assetTagsRef.get.add(asset.tag)
if (scheduled.compareAndSet(false, true)) {
logger.debug("Scheduling reindex of %s within %s".format(asset.tag, SolrConfig.assetBatchUpdateWindow))
context.system.scheduler.scheduleOnce(SolrConfig.assetBatchUpdateWindow, self, Reindex)
} else {
logger.trace("Ignoring already scheduled reindex of %s".format(asset.tag))
}
case Reindex =>
if (scheduled.get == true) {
val assetTags = assetTagsRef.getAndSet(newAssetTagSet).asScala.toSeq
val indexTime = new Date
val assets = assetTags.flatMap(Asset.findByTag(_))
logger.debug("Got Reindex task, working on %d assets".format(assetTags.size))
SolrHelper.updateAssets(assets, indexTime)
scheduled.set(false)
}
}
}
class AssetLogSolrUpdater extends Actor {
def receive = {
case log: AssetLog => SolrHelper.updateAssetLogs(List(log), new Date)
}
}
|
discordianfish/collins
|
app/collins/solr/SolrUpdater.scala
|
Scala
|
apache-2.0
| 2,516
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import scala.collection.JavaConverters._
import org.apache.hadoop.fs.FileStatus
import org.apache.spark.sql.{AnalysisException, SparkSession}
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.sources.v2.{DataSourceOptions, SupportsBatchRead, SupportsBatchWrite, Table}
import org.apache.spark.sql.types.StructType
abstract class FileTable(
sparkSession: SparkSession,
options: DataSourceOptions,
userSpecifiedSchema: Option[StructType])
extends Table with SupportsBatchRead with SupportsBatchWrite {
lazy val fileIndex: PartitioningAwareFileIndex = {
val filePaths = options.paths()
val hadoopConf =
sparkSession.sessionState.newHadoopConfWithOptions(options.asMap().asScala.toMap)
val rootPathsSpecified = DataSource.checkAndGlobPathIfNecessary(filePaths, hadoopConf,
checkEmptyGlobPath = true, checkFilesExist = options.checkFilesExist())
val fileStatusCache = FileStatusCache.getOrCreate(sparkSession)
new InMemoryFileIndex(sparkSession, rootPathsSpecified,
options.asMap().asScala.toMap, userSpecifiedSchema, fileStatusCache)
}
lazy val dataSchema: StructType = userSpecifiedSchema.orElse {
inferSchema(fileIndex.allFiles())
}.getOrElse {
throw new AnalysisException(
s"Unable to infer schema for $name. It must be specified manually.")
}.asNullable
override def schema(): StructType = {
val caseSensitive = sparkSession.sessionState.conf.caseSensitiveAnalysis
PartitioningUtils.mergeDataAndPartitionSchema(dataSchema,
fileIndex.partitionSchema, caseSensitive)._1
}
/**
* When possible, this method should return the schema of the given `files`. When the format
* does not support inference, or no valid files are given should return None. In these cases
* Spark will require that user specify the schema manually.
*/
def inferSchema(files: Seq[FileStatus]): Option[StructType]
}
|
WindCanDie/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/FileTable.scala
|
Scala
|
apache-2.0
| 2,788
|
/**
* This file is part of the TA Buddy project.
* Copyright (c) 2013-2014 Alexey Aksenov ezh@ezh.msk.ru
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Affero General Global License version 3
* as published by the Free Software Foundation with the addition of the
* following permission added to Section 15 as permitted in Section 7(a):
* FOR ANY PART OF THE COVERED WORK IN WHICH THE COPYRIGHT IS OWNED
* BY Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS»,
* Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS» DISCLAIMS
* THE WARRANTY OF NON INFRINGEMENT OF THIRD PARTY RIGHTS.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Affero General Global License for more details.
* You should have received a copy of the GNU Affero General Global License
* along with this program; if not, see http://www.gnu.org/licenses or write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA, 02110-1301 USA, or download the license from the following URL:
* http://www.gnu.org/licenses/agpl.html
*
* The interactive user interfaces in modified source and object code versions
* of this program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU Affero General Global License.
*
* In accordance with Section 7(b) of the GNU Affero General Global License,
* you must retain the producer line in every report, form or document
* that is created or manipulated using TA Buddy.
*
* You can be released from the requirements of the license by purchasing
* a commercial license. Buying such a license is mandatory as soon as you
* develop commercial activities involving the TA Buddy software without
* disclosing the source code of your own applications.
* These activities include: offering paid services to customers,
* serving files in a web or/and network application,
* shipping TA Buddy with a closed source product.
*
* For more information, please contact Digimead Team at this
* address: ezh@ezh.msk.ru
*/
package org.digimead.tabuddy.desktop.core.ui.definition.widget
import akka.actor.{ ActorRef, actorRef2Scala }
import com.google.common.collect.MapMaker
import java.util.UUID
import org.digimead.tabuddy.desktop.core.definition.Context
import org.digimead.tabuddy.desktop.core.support.App
import org.digimead.tabuddy.desktop.core.ui.block.{ Configuration, View }
import org.eclipse.e4.core.di.InjectionException
import org.eclipse.swt.custom.ScrolledComposite
import org.eclipse.swt.events.{ DisposeEvent, DisposeListener }
import org.eclipse.swt.widgets.Composite
import scala.collection.JavaConverters.mapAsScalaMapConverter
/**
* View composite that contains additional reference to content actor.
* Content view actor is bound to root component because of changing parent by Akka is unsupported.
*/
class VComposite(val id: UUID, val ref: ActorRef, val contentRef: ActorRef, val factory: Configuration.Factory,
parent: ScrolledComposite, style: Int)
extends Composite(parent, style) with View.ViewMapDisposer with SComposite {
initialize
/** Get view context. */
// There are a lot of situations when an application try to access context after WComposite is disposed.
def getContext(): Option[Context] = VComposite.contextMap.get(this)
/** Get content context. */
def getContentContext(): Option[Context] = factory().withContexts(_.get(contentRef.path.name)).map(_._2)
/** Returns the receiver's parent, which must be a ScrolledComposite. */
override def getParent(): ScrolledComposite = super.getParent.asInstanceOf[ScrolledComposite]
/** Initialize current view composite. */
protected def initialize() {
addDisposeListener(new DisposeListener {
def widgetDisposed(e: DisposeEvent) {
VComposite.contextMap.remove(VComposite.this).foreach { context ⇒
try context.dispose()
catch { case e: InjectionException ⇒ context.dispose() } // Yes, there is a bug in org.eclipse.e4.core.internal.contexts.EclipseContext.dispose
}
viewRemoveFromCommonMap()
ref ! App.Message.Destroy(None, ref)
}
})
}
override lazy val toString = s"VComposite{${factory().name.name}}[%08X]".format(id.hashCode())
}
object VComposite {
protected val contextMap = new MapMaker().weakKeys().makeMap[VComposite, Context]().asScala
/** Set context for VComposite. */
trait ContextSetter {
def setVCompositeContext(vComposite: VComposite, context: Context) = contextMap(vComposite) = context
}
}
|
digimead/digi-TABuddy-desktop
|
part-core-ui/src/main/scala/org/digimead/tabuddy/desktop/core/ui/definition/widget/VComposite.scala
|
Scala
|
agpl-3.0
| 4,737
|
package implicits
class Implicits {
implicit def listToString[T](l: List[T]): String = {
l.mkString("List(", " ", ")")
}
val str: String = List(1,2,3) /*<*/
println(List(1,2): String) /*<*/
}
|
Kwestor/scala-ide
|
org.scala-ide.sdt.core.tests/test-workspace/implicits-highlighting/src/implicit-highlighting/Implicits.scala
|
Scala
|
bsd-3-clause
| 206
|
package blended.streams.dispatcher.cbe
import java.util.Date
import blended.streams.transaction.{EventSeverity, FlowTransactionStateStarted}
import blended.testsupport.scalatest.LoggingFreeSpec
import blended.util.XMLSupport
import org.scalatest.matchers.should.Matchers
import scala.concurrent.duration._
class CbeEventSpec extends LoggingFreeSpec
with Matchers {
val headers : Map[String, String] = Map(
"foo" -> "bar",
"Application" -> "XX",
"Module" -> "YY"
)
//scalastyle:off magic.number
val comp : CbeComponent = CbeComponent(
"SIB-2.0",
"TestComponent",
"cc-9999",
"Shop",
"TestRoute",
"ResourceType",
"Route",
9999
)
// scalastyle:on magic.number
private[this] def validateCBE(event : CbeTransactionEvent, xml : String) : XMLSupport = {
val component = event.component
val xmlSupport = new XMLSupport(xml)
xmlSupport.validate("cbe_1.0.1_kl.xsd")
xmlSupport.applyXPath("//*[local-name() = 'sourceComponentId']/@component") should be(component.component)
xmlSupport.applyXPath("//*[local-name() = 'sourceComponentId']/@subComponent") should be(component.subComponent)
xmlSupport.applyXPath("//*[local-name() = 'sourceComponentId']/@instanceId") should be(component.instanceId.toString)
xmlSupport.applyXPath("//*[local-name() = 'children' and ./@name = 'TRANSACTION_STATUS']/*/text()") should be(event.state.get.toString)
xmlSupport.applyXPath("//*[local-name() = 'children' and ./@name = 'TRANSACTION_ID']/*/text()") should be(event.id)
xmlSupport.applyXPath("//*[local-name() = 'extendedDataElements' and ./@name = 'Application']/*/text()") should be("SIB-2.0")
xmlSupport.applyXPath("//*[local-name() = 'extendedDataElements' and ./@name = 'Module']/*/text()") should be(component.subComponent)
xmlSupport
}
"A Transaction Event event" - {
"be representable as a CBE XML" in {
val event = CbeTransactionEvent(
id = "myId",
severity = EventSeverity.Information,
component = comp,
state = Some(FlowTransactionStateStarted),
properties = headers,
closeProcess = false,
timeout = 1.second,
timestamp = new Date()
)
val xml = event.asCBE()
validateCBE(event, xml)
}
"populate ModuleLast if the process is to be closed" in {
val event = CbeTransactionEvent(
id = "myId",
severity = EventSeverity.Information,
component = comp,
state = Some(FlowTransactionStateStarted),
properties = headers,
closeProcess = true,
timeout = 1.second,
timestamp = new Date()
)
val xml = event.asCBE()
val xmlSupport = validateCBE(event, xml)
xmlSupport.applyXPath("//*[local-name() = 'extendedDataElements' and ./@name = 'ModuleLast']/*/text()") should be(comp.subComponent)
}
}
}
|
woq-blended/blended
|
blended.streams.dispatcher/src/test/scala/blended/streams/dispatcher/cbe/CbeTransactionEventSpec.scala
|
Scala
|
apache-2.0
| 2,959
|
import org.specs._
import com.redis._
import org.specs.mock.Mockito
import org.mockito.Mock._
import org.mockito.Mockito._
import org.mockito.Mockito.doNothing
object SortedSetOperationsSpec extends Specification with Mockito {
"Redis Client Sorted Set Operations" should {
var client: RedisTestClient = null
var connection: Connection = null
doBefore{
connection = mock[Connection]
client = new RedisTestClient(connection)
}
"add a member to a sorted set" in {
connection.readBoolean returns true
client.zSetAdd("set", 0, "value") must beTrue
connection.write("ZADD set 0 5\\r\\nvalue\\r\\n") was called
}
"delete a member of a sorted set" in {
connection.readBoolean returns true
client.zSetDelete("set","value") must beTrue
connection.write("ZREM set 5\\r\\nvalue\\r\\n") was called
}
"increment by score" in {
connection.readString returns Some("3")
client.zSetIncrementBy("set", 3, "value") mustEqual Some(3)
connection.write("ZINCRBY set 3 5\\r\\nvalue\\r\\n") was called
}
"return a range" in {
connection.readSet returns Some(Set("a", "b", "c"))
client.zSetRange("set", 0, 10) mustEqual Some(Set("a", "b", "c"))
connection.write("ZRANGE set 0 10\\r\\n") was called
}
"return a reversed range" in {
connection.readSet returns Some(Set("c", "b", "a"))
client.zSetReverseRange("set", 0, 10) mustEqual Some(Set("c", "b", "a"))
connection.write("ZREVRANGE set 0 10\\r\\n") was called
}
"return a range by score" in {
connection.readSet returns Some(Set("c", "b", "a"))
client.zSetRangeByScore("set", 0, 10) mustEqual Some(Set("c", "b", "a"))
connection.write("ZRANGEBYSCORE set 0 10\\r\\n") was called
}
"return a range by score with offset and limit" in {
connection.readSet returns Some(Set("b", "a"))
client.zSetRangeByScore("set", 0, 10, 1, 2) mustEqual Some(Set("b", "a"))
connection.write("ZRANGEBYSCORE set 0 10 LIMIT 1 2\\r\\n") was called
}
"return the count" in {
connection.readInt returns Some(2)
client.zSetCount("set") mustEqual Some(2)
connection.write("ZCARD set\\r\\n") was called
}
"return the score of an element" in {
connection.readString returns Some("2")
client.zSetScore("set", "element") mustEqual Some(2)
connection.write("ZSCORE set 7\\r\\nelement\\r\\n") was called
}
}
}
|
acrosa/scala-redis
|
src/test/scala/com/redis/operations/SortedSetOperationsSpec.scala
|
Scala
|
mit
| 2,495
|
package com.nexelem.graph.gremlins
import com.ansvia.graph.BlueprintsWrapper._
import com.ansvia.graph.annotation.Persistent
import com.tinkerpop.blueprints.Direction.{IN, OUT}
class HelloGodsSpec extends GraphSpec {
"blueprints-scala simple access" should {
"properly store & read simple classes" in {
val greekRealm = connector.save(Realm("Greek"))
val hercules = connector.save(God("Hercules", "Demigod"))
val ares = connector.save(God("Ares", "God"))
val realms = connector.findAll[Realm]
realms should have size 1
val gods = connector.findAll[God]
gods should have size 2
}
"properly delete classes" in {
val mesopotamianRealm = connector.save(Realm("Mesopotamian"))
val cthulhuRealm = connector.save(Realm("Cthulhu"))
val shintoRealm = connector.save(Realm("Shinto"))
connector.findAll[Realm] should contain theSameElementsAs Seq(mesopotamianRealm, cthulhuRealm, shintoRealm)
connector.delete(mesopotamianRealm)
connector.findAll[Realm] should contain theSameElementsAs Seq(cthulhuRealm, shintoRealm)
connector.delete(shintoRealm)
connector.findAll[Realm] should contain theSameElementsAs Seq(cthulhuRealm)
}
"properly associate edges with vertices" in {
val hinduRealm = connector.save(Realm("Hindu"))
val vishnu = connector.save(God("Vishnu", "Deity"))
connector.getLinked(hinduRealm, classOf[God], IN, "livesIn") should have size 0
vishnu.getVertex --> "livesIn" --> hinduRealm.getVertex
connector.getLinked(hinduRealm, classOf[God], IN, "livesIn") should have size 1
val shakti = connector.save(God("Shakti", "Deity"))
connector.getLinked(hinduRealm, classOf[God], IN, "livesIn") should have size 1
connector.findAll[God] should have size 2
}
"properly handle linking vertices" in {
val chineseRealm = connector.save(Realm("Chinese"))
val hundun = connector.save(God("Hundun", "Abstract"))
hundun.getVertex --> "livesIn" --> chineseRealm.getVertex
val taotie = connector.save(God("Taotie", "Abstract"))
hundun.getVertex --> "livesIn" --> chineseRealm.getVertex
connector.getLinked(chineseRealm, classOf[God], IN, "livesIn") should have size 2
val jiaolong = connector.save(God("Jiaolong", "Dragon"))
val mizuchi = connector.save(God("Mizuchi", "Dragon"))
jiaolong.getVertex --> "livesIn" --> chineseRealm.getVertex <-- "livesIn" <-- mizuchi.getVertex
connector.getLinked(chineseRealm, classOf[God], IN, "livesIn") should have size 4
jiaolong.getVertex <--> "aliasOf" <--> mizuchi.getVertex
connector.getLinked(jiaolong, classOf[God], OUT, "aliasOf") should contain(mizuchi)
connector.getLinked(mizuchi, classOf[God], OUT, "aliasOf") should contain(jiaolong)
jiaolong.getVertex.mutual("aliasOf").printDump("Mutual aliases:", "name")
}
}
}
case class God (name: String, godType: String) extends BaseEntity
case class Realm (name: String) extends BaseEntity
case class Hero(name: String) extends BaseEntity {
@Persistent
var born = ""
}
//val achilles = new Hero("Achilles")
//achilles.born = "1250 BC"
//achilles.save()
|
nexelem/graphs-gremlins
|
src/test/scala/com/nexelem/graph/gremlins/HelloGodsSpec.scala
|
Scala
|
apache-2.0
| 3,214
|
package immortan.utils
import fr.acinq.bitcoin.Crypto.PublicKey
import fr.acinq.bitcoin.DeterministicWallet.ExtendedPublicKey
import fr.acinq.bitcoin.{ByteVector32, Satoshi}
import fr.acinq.eclair.MilliSatoshi
import fr.acinq.eclair.blockchain.electrum.db.{ChainWalletInfo, SigningWallet, WatchingWallet}
import fr.acinq.eclair.blockchain.fee._
import fr.acinq.eclair.wire.ChannelCodecs.extendedPublicKeyCodec
import fr.acinq.eclair.wire.ChannelUpdate
import fr.acinq.eclair.wire.CommonCodecs._
import fr.acinq.eclair.wire.LightningMessageCodecs._
import immortan._
import immortan.crypto.Tools.Fiat2Btc
import immortan.fsm.SplitInfo
import immortan.utils.FiatRates.{BitpayItemList, CoinGeckoItemMap}
import scodec.bits.BitVector
import spray.json._
object ImplicitJsonFormats extends DefaultJsonProtocol {
val json2String: JsValue => String = (_: JsValue).convertTo[String]
final val TAG = "tag"
def writeExt[T](ext: (String, JsValue), base: JsValue): JsObject = JsObject(base.asJsObject.fields + ext)
def to[T : JsonFormat](raw: String): T = raw.parseJson.convertTo[T]
def taggedJsonFmt[T](base: JsonFormat[T], tag: String): JsonFormat[T] = new JsonFormat[T] {
def write(unserialized: T): JsValue = writeExt(TAG -> JsString(tag), base write unserialized)
def read(serialized: JsValue): T = base read serialized
}
def json2BitVec(json: JsValue): Option[BitVector] = BitVector fromHex json2String(json)
def sCodecJsonFmt[T](codec: scodec.Codec[T] = null): JsonFormat[T] = new JsonFormat[T] {
def read(serialized: JsValue): T = codec.decode(json2BitVec(serialized).get).require.value
def write(unserialized: T): JsValue = codec.encode(unserialized).require.toHex.toJson
}
implicit val publicKeyFmt: JsonFormat[PublicKey] = sCodecJsonFmt(publicKey)
implicit val byteVector32Fmt: JsonFormat[ByteVector32] = sCodecJsonFmt(bytes32)
implicit val channelUpdateFmt: JsonFormat[ChannelUpdate] = sCodecJsonFmt(channelUpdateCodec)
implicit val milliSatoshiFmt: JsonFormat[MilliSatoshi] = jsonFormat[Long, MilliSatoshi](MilliSatoshi.apply, "underlying")
implicit val satoshiFmt: JsonFormat[Satoshi] = jsonFormat[Long, Satoshi](Satoshi.apply, "underlying")
implicit val extendedPublicKeyFmt: JsonFormat[ExtendedPublicKey] = sCodecJsonFmt(extendedPublicKeyCodec)
// Chain wallet types
implicit object ChainWalletInfoFmt extends JsonFormat[ChainWalletInfo] {
def read(raw: JsValue): ChainWalletInfo = raw.asJsObject.fields(TAG) match {
case JsString("WatchingWallet") => raw.convertTo[WatchingWallet]
case JsString("SigningWallet") => raw.convertTo[SigningWallet]
case _ => throw new Exception
}
def write(internal: ChainWalletInfo): JsValue = internal match {
case walletInfo: WatchingWallet => walletInfo.toJson
case walletInfo: SigningWallet => walletInfo.toJson
case _ => throw new Exception
}
}
implicit val signingWalletFmt: JsonFormat[SigningWallet] =
taggedJsonFmt(jsonFormat[String, Boolean, SigningWallet](SigningWallet.apply, "walletType", "isRemovable"), tag = "SigningWallet")
implicit val watchingWalletFmt: JsonFormat[WatchingWallet] =
taggedJsonFmt(jsonFormat[String, Option[Long], ExtendedPublicKey, Boolean,
WatchingWallet](WatchingWallet.apply, "walletType", "masterFingerprint", "xPub", "isRemovable"), tag = "WatchingWallet")
// PaymentInfo stuff
implicit val semanticOrderFmt: JsonFormat[SemanticOrder] = jsonFormat[String, Long, SemanticOrder](SemanticOrder.apply, "id", "order")
implicit val lNUrlDescription: JsonFormat[LNUrlDescription] =
jsonFormat[Option[String], Option[SemanticOrder], String, ByteVector32, ByteVector32, MilliSatoshi,
LNUrlDescription](LNUrlDescription.apply, "label", "semanticOrder", "privKey", "lastHash", "lastSecret", "lastMsat")
implicit object TxDescriptionFmt extends JsonFormat[TxDescription] {
def read(raw: JsValue): TxDescription = raw.asJsObject.fields(TAG) match {
case JsString("PlainTxDescription") => raw.convertTo[PlainTxDescription]
case JsString("OpReturnTxDescription") => raw.convertTo[OpReturnTxDescription]
case JsString("ChanFundingTxDescription") => raw.convertTo[ChanFundingTxDescription]
case JsString("ChanRefundingTxDescription") => raw.convertTo[ChanRefundingTxDescription]
case JsString("HtlcClaimTxDescription") => raw.convertTo[HtlcClaimTxDescription]
case JsString("PenaltyTxDescription") => raw.convertTo[PenaltyTxDescription]
case _ => throw new Exception
}
def write(internal: TxDescription): JsValue = internal match {
case txDescription: PlainTxDescription => txDescription.toJson
case txDescription: OpReturnTxDescription => txDescription.toJson
case txDescription: ChanFundingTxDescription => txDescription.toJson
case txDescription: ChanRefundingTxDescription => txDescription.toJson
case txDescription: HtlcClaimTxDescription => txDescription.toJson
case txDescription: PenaltyTxDescription => txDescription.toJson
case _ => throw new Exception
}
}
implicit val rbfParams: JsonFormat[RBFParams] = jsonFormat[ByteVector32, Long, RBFParams](RBFParams.apply, "ofTxid", "mode")
implicit val plainTxDescriptionFmt: JsonFormat[PlainTxDescription] =
taggedJsonFmt(jsonFormat[List[String], Option[String], Option[SemanticOrder], Option[ByteVector32], Option[ByteVector32], Option[RBFParams],
PlainTxDescription](PlainTxDescription.apply, "addresses", "label", "semanticOrder", "cpfpBy", "cpfpOf", "rbf"), tag = "PlainTxDescription")
implicit val opReturnTxDescriptionFmt: JsonFormat[OpReturnTxDescription] =
taggedJsonFmt(jsonFormat[List[ByteVector32], Option[String], Option[SemanticOrder], Option[ByteVector32], Option[ByteVector32], Option[RBFParams],
OpReturnTxDescription](OpReturnTxDescription.apply, "preimages", "label", "semanticOrder", "cpfpBy", "cpfpOf", "rbf"), tag = "OpReturnTxDescription")
implicit val chanFundingTxDescriptionFmt: JsonFormat[ChanFundingTxDescription] =
taggedJsonFmt(jsonFormat[PublicKey, Option[String], Option[SemanticOrder], Option[ByteVector32], Option[ByteVector32], Option[RBFParams],
ChanFundingTxDescription](ChanFundingTxDescription.apply, "nodeId", "label", "semanticOrder", "cpfpBy", "cpfpOf", "rbf"), tag = "ChanFundingTxDescription")
implicit val chanRefundingTxDescriptionFmt: JsonFormat[ChanRefundingTxDescription] =
taggedJsonFmt(jsonFormat[PublicKey, Option[String], Option[SemanticOrder], Option[ByteVector32], Option[ByteVector32], Option[RBFParams],
ChanRefundingTxDescription](ChanRefundingTxDescription.apply, "nodeId", "label", "semanticOrder", "cpfpBy", "cpfpOf", "rbf"), tag = "ChanRefundingTxDescription")
implicit val htlcClaimTxDescriptionFmt: JsonFormat[HtlcClaimTxDescription] =
taggedJsonFmt(jsonFormat[PublicKey, Option[String], Option[SemanticOrder], Option[ByteVector32], Option[ByteVector32], Option[RBFParams],
HtlcClaimTxDescription](HtlcClaimTxDescription.apply, "nodeId", "label", "semanticOrder", "cpfpBy", "cpfpOf", "rbf"), tag = "HtlcClaimTxDescription")
implicit val penaltyTxDescriptionFmt: JsonFormat[PenaltyTxDescription] =
taggedJsonFmt(jsonFormat[PublicKey, Option[String], Option[SemanticOrder], Option[ByteVector32], Option[ByteVector32], Option[RBFParams],
PenaltyTxDescription](PenaltyTxDescription.apply, "nodeId", "label", "semanticOrder", "cpfpBy", "cpfpOf", "rbf"), tag = "PenaltyTxDescription")
implicit val splitInfoFmt: JsonFormat[SplitInfo] = jsonFormat[MilliSatoshi, MilliSatoshi, SplitInfo](SplitInfo.apply, "totalSum", "myPart")
implicit val paymentDescriptionFmt: JsonFormat[PaymentDescription] =
jsonFormat[Option[SplitInfo], Option[String], Option[SemanticOrder], String, Option[String], Option[String], Option[Long], Option[ByteVector32],
PaymentDescription](PaymentDescription.apply, "split", "label", "semanticOrder", "invoiceText", "proofTxid", "meta", "holdPeriodSec", "toSelfPreimage")
// Payment action
implicit object PaymentActionFmt extends JsonFormat[PaymentAction] {
def read(raw: JsValue): PaymentAction = raw.asJsObject.fields(TAG) match {
case JsString("message") => raw.convertTo[MessageAction]
case JsString("aes") => raw.convertTo[AESAction]
case JsString("url") => raw.convertTo[UrlAction]
case _ => throw new Exception
}
def write(internal: PaymentAction): JsValue = internal match {
case paymentAction: MessageAction => paymentAction.toJson
case paymentAction: UrlAction => paymentAction.toJson
case paymentAction: AESAction => paymentAction.toJson
case _ => throw new Exception
}
}
implicit val aesActionFmt: JsonFormat[AESAction] = taggedJsonFmt(jsonFormat[Option[String], String, String, String, AESAction](AESAction.apply, "domain", "description", "ciphertext", "iv"), tag = "aes")
implicit val messageActionFmt: JsonFormat[MessageAction] = taggedJsonFmt(jsonFormat[Option[String], String, MessageAction](MessageAction.apply, "domain", "message"), tag = "message")
implicit val urlActionFmt: JsonFormat[UrlAction] = taggedJsonFmt(jsonFormat[Option[String], String, String, UrlAction](UrlAction.apply, "domain", "description", "url"), tag = "url")
// LNURL
implicit object LNUrlDataFmt extends JsonFormat[LNUrlData] {
def write(unserialized: LNUrlData): JsValue = throw new RuntimeException
def read(serialized: JsValue): LNUrlData = serialized.asJsObject fields TAG match {
case JsString("hostedChannelRequest") => serialized.convertTo[HostedChannelRequest]
case JsString("channelRequest") => serialized.convertTo[NormalChannelRequest]
case JsString("withdrawRequest") => serialized.convertTo[WithdrawRequest]
case JsString("payRequest") => serialized.convertTo[PayRequest]
case _ => throw new Exception
}
}
// Note: tag on these MUST start with lower case because it is defined that way on protocol level
implicit val normalChannelRequestFmt: JsonFormat[NormalChannelRequest] = taggedJsonFmt(jsonFormat[String, String, String,
NormalChannelRequest](NormalChannelRequest.apply, "uri", "callback", "k1"), tag = "channelRequest")
implicit val hostedChannelRequestFmt: JsonFormat[HostedChannelRequest] = taggedJsonFmt(jsonFormat[String, Option[String], String,
HostedChannelRequest](HostedChannelRequest.apply, "uri", "alias", "k1"), tag = "hostedChannelRequest")
implicit val withdrawRequestFmt: JsonFormat[WithdrawRequest] = taggedJsonFmt(jsonFormat[String, String, Long, String, Option[Long], Option[Long], Option[String], Option[String],
WithdrawRequest](WithdrawRequest.apply, "callback", "k1", "maxWithdrawable", "defaultDescription", "minWithdrawable", "balance", "balanceCheck", "payLink"), tag = "withdrawRequest")
implicit val payRequestFmt: JsonFormat[PayRequest] = taggedJsonFmt(jsonFormat[String, Long, Long, String, Option[Int],
PayRequest](PayRequest.apply, "callback", "maxSendable", "minSendable", "metadata", "commentAllowed"), tag = "payRequest")
implicit val payRequestFinalFmt: JsonFormat[PayRequestFinal] = jsonFormat[Option[PaymentAction], Option[Boolean], String, PayRequestFinal](PayRequestFinal.apply, "successAction", "disposable", "pr")
// Fiat feerates
implicit val blockchainInfoItemFmt: JsonFormat[BlockchainInfoItem] = jsonFormat[Double, BlockchainInfoItem](BlockchainInfoItem.apply, "last")
implicit val bitpayItemFmt: JsonFormat[BitpayItem] = jsonFormat[String, Double, BitpayItem](BitpayItem.apply, "code", "rate")
implicit val coinGeckoItemFmt: JsonFormat[CoinGeckoItem] = jsonFormat[Double, CoinGeckoItem](CoinGeckoItem.apply, "value")
implicit val coinGeckoFmt: JsonFormat[CoinGecko] = jsonFormat[CoinGeckoItemMap, CoinGecko](CoinGecko.apply, "rates")
implicit val bitpayFmt: JsonFormat[Bitpay] = jsonFormat[BitpayItemList, Bitpay](Bitpay.apply, "data")
implicit val fiatRatesInfoFmt: JsonFormat[FiatRatesInfo] = jsonFormat[Fiat2Btc, Fiat2Btc, Long, FiatRatesInfo](FiatRatesInfo.apply, "rates", "oldRates", "stamp")
// Chain feerates
implicit val bitGoFeeRateStructureFmt: JsonFormat[BitGoFeeRateStructure] = jsonFormat[Map[String, Long], Long, BitGoFeeRateStructure](BitGoFeeRateStructure.apply, "feeByBlockTarget", "feePerKb")
implicit val feeratePerKBFmt: JsonFormat[FeeratePerKB] = jsonFormat[Satoshi, FeeratePerKB](FeeratePerKB.apply, "feerate")
implicit val feeratesPerKBFmt: JsonFormat[FeeratesPerKB] =
jsonFormat[FeeratePerKB, FeeratePerKB, FeeratePerKB, FeeratePerKB, FeeratePerKB, FeeratePerKB, FeeratePerKB, FeeratePerKB, FeeratePerKB,
FeeratesPerKB](FeeratesPerKB.apply, "mempoolMinFee", "block_1", "blocks_2", "blocks_6", "blocks_12", "blocks_36", "blocks_72", "blocks_144", "blocks_1008")
implicit val feeratePerKwFmt: JsonFormat[FeeratePerKw] = jsonFormat[Satoshi, FeeratePerKw](FeeratePerKw.apply, "feerate")
implicit val feeratesPerKwFmt: JsonFormat[FeeratesPerKw] =
jsonFormat[FeeratePerKw, FeeratePerKw, FeeratePerKw, FeeratePerKw, FeeratePerKw, FeeratePerKw, FeeratePerKw, FeeratePerKw, FeeratePerKw,
FeeratesPerKw](FeeratesPerKw.apply, "mempoolMinFee", "block_1", "blocks_2", "blocks_6", "blocks_12", "blocks_36", "blocks_72", "blocks_144", "blocks_1008")
implicit val feeRatesInfoFmt: JsonFormat[FeeRatesInfo] = jsonFormat[FeeratesPerKw, List[FeeratesPerKB], Long, FeeRatesInfo](FeeRatesInfo.apply, "smoothed", "history", "stamp")
}
|
btcontract/wallet
|
app/src/main/java/immortan/utils/ImplicitJsonFormats.scala
|
Scala
|
apache-2.0
| 13,474
|
/**
* Copyright (C) 2009-2014 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.zeromq
import scala.collection.immutable
import akka.util.ByteString
/**
* Deserializes ZeroMQ messages into an immutable sequence of frames
*/
class ZMQMessageDeserializer extends Deserializer {
def apply(frames: immutable.Seq[ByteString]): ZMQMessage = ZMQMessage(frames)
}
|
Fincore/org.spark-project.akka
|
zeromq/src/main/scala/akka/zeromq/ZMQMessageDeserializer.scala
|
Scala
|
mit
| 370
|
import java.net.URI
import scala.tools.nsc.doc.base._
import scala.tools.nsc.doc.model._
import scala.tools.partest.ScaladocModelTest
object Test extends ScaladocModelTest {
override def code = """
package scala.test.scaladoc.T8857
/**
* A link:
*
* [[scala.Option$ object Option]].
*/
class A
"""
def scalaURL = "https://www.scala-lang.org/api/current/"
// a non-canonical path to scala-library.jar should still work
override def scaladocSettings = {
val samplePath = getClass.getClassLoader.getResource("scala/Function1.class").getPath.replace('\\', '/')
val scalaLibPath = if(samplePath.contains("!")) { // in scala-library.jar
val scalaLibUri = samplePath.split("!")(0)
val p = new URI(scalaLibUri).getPath
// this is a bit fragile (depends on the scala library being in build/pack)
p.replace("/pack/lib/scala-library.jar", "/pack/bin/../lib/scala-library.jar")
} else { // individual class files on disk
val p = samplePath.dropRight("scala/Function1.class".length + 1)
p + "/.." + p.takeRight(p.length - p.lastIndexOf('/'))
}
s"-doc-external-doc $scalaLibPath#$scalaURL"
}
def testModel(rootPackage: Package) = {
// get the quick access implicit defs in scope (_package(s), _class(es), _trait(s), object(s) _method(s), _value(s))
import access._
val a = rootPackage._package("scala")._package("test")._package("scaladoc")._package("T8857")._class("A")
val links = countLinks(a.comment.get, _.link.isInstanceOf[LinkToExternalTpl])
assert(links == 1, s"$links == 1 (the links to external in class A)")
}
}
|
scala/scala
|
test/scaladoc/run/t8557.scala
|
Scala
|
apache-2.0
| 1,660
|
/* Copyright 2009-2017 EPFL, Lausanne */
import leon.annotation._
import leon.lang._
object Functions7 {
def index(xs: Array[Int]): Unit = {
var i = 0
(while (i < xs.length) {
xs(i) = i + 1
i += 1
}) invariant (0 <= i && i <= xs.length)
}
def apply(fun: Array[Int] => Unit, xs: Array[Int]): Unit = fun(xs)
@extern
def main(args: Array[String]): Unit = _main()
def _main(): Int = {
val xs = Array(-1, -1, -1)
apply(index, xs)
xs(2) - xs(1) - xs(0)
} ensuring { _ == 0 }
}
|
epfl-lara/leon
|
src/test/resources/regression/genc/valid/Functions7.scala
|
Scala
|
gpl-3.0
| 532
|
/* Copyright (C) 2016 Tomáš Janoušek
* This file is a part of locus-rflkt-addon.
* See the COPYING and LICENSE files in the project root directory.
*/
package cz.nomi.locusRflktAddon
object Formatters {
import RflktApi.Str
private val timeFormat = new java.text.SimpleDateFormat("HH:mm:ss")
def formatString(s: Option[String]): Str = Str(s.getOrElse("--"))
def formatTime(t: java.util.Date): Str = Str(timeFormat.format(t))
def formatDuration(totalSecondsOpt: Option[Long]): Str =
formatString {
totalSecondsOpt.map { totalSeconds =>
val seconds = totalSeconds % 60
val totalMinutes = totalSeconds / 60
val minutes = totalMinutes % 60
val totalHours = totalMinutes / 60
f"$totalHours%02d:$minutes%02d:$seconds%02d"
}
}
def formatInt(i: Option[Int]): Str =
formatString(i.map(v => f"$v%d"))
def formatFloatRound(f: Option[Float]): Str =
formatString(f.map(v => f"$v%.0f"))
def formatDoubleRound(d: Option[Double]): Str =
formatString(d.map(v => f"$v%.0f"))
def formatFloatFixed(f: Option[Float]): Str =
formatString(f.map(v => f"$v%.1f"))
def formatDoubleFixed(d: Option[Double]): Str =
formatString(d.map(v => f"$v%.1f"))
def formatDouble(d: Option[Double]): Str =
formatString {
d.map { v =>
if (v.abs > 99) {
f"$v%.0f"
} else if (v.abs > 9) {
f"$v%.1f"
} else {
f"$v%.2f"
}
}
}
def normalizeString(s: String): String = {
import java.text.Normalizer
val split = Normalizer.normalize(s, Normalizer.Form.NFD)
"\\p{M}".r.replaceAllIn(split, "")
}
}
|
liskin/locus-rflkt-addon
|
src/main/scala/cz/nomi/locusRflktAddon/Formatters.scala
|
Scala
|
gpl-3.0
| 1,666
|
package com.mfglabs.stream
import akka.NotUsed
import akka.stream._
import akka.stream.scaladsl._
import akka.stream.stage._
import akka.util.ByteString
import scala.concurrent._
import scala.concurrent.duration._
import akka.stream.scaladsl._
trait FlowExt {
/**
* Create a Flow which debounce messages with similar hashes
*/
def debounce[A](per: FiniteDuration, toHash: A => String): Flow[A, A, NotUsed] = {
Flow[A]
.via(Debounce(per, toHash))
.collect { case Debounced.Ok(elem) => elem }
}
/**
* Create a Flow whose creation depends on the first element of the upstream.
* @param includeHeadInUpStream true if we want the first element of the upstream to be included in the dowstream.
* @param f takes the first element of upstream in input and returns the resulting flow
* @tparam A
* @tparam B
* @tparam M
* @return the flow returned by f
*/
def withHead[A, B, M](includeHeadInUpStream: Boolean)(f: A => Flow[A, B, M]): Flow[A, B, NotUsed] = {
Flow[A]
.prefixAndTail(1)
.map {
case (Seq(), _) => Source.empty
case (head +: _, tailStream) =>
if (includeHeadInUpStream) Source.combine(Source.single(head), tailStream)(Concat(_)).via(f(head))
else tailStream.via(f(head))
}
.flatMapConcat(identity)
}
/**
* Zip a stream with the indices of its elements.
* @return
*/
def zipWithIndex[A]: Flow[A, (A, Long), NotUsed] = {
withHead(includeHeadInUpStream = false) { head =>
Flow[A].scan((head, 0L)) { case ((_, n), el) => (el, n + 1) }
}
}
/**
* Rechunk of stream of bytes according to a separator
* @param separator the separator to split the stream. For example ByteString("\\n") to split a stream by lines.
* @param maximumChunkBytes the maximum possible size of a split to send downstream (in bytes). If no separator is found
* before reaching this limit, the stream fails.
* @return
*/
def rechunkByteStringBySeparator(separator: ByteString, maximumChunkBytes: Int): Flow[ByteString, ByteString, NotUsed] = {
def stage = new PushPullStage[ByteString, ByteString] {
private val separatorBytes = separator
private val firstSeparatorByte = separatorBytes.head
private var buffer = ByteString.empty
private var nextPossibleMatch = 0
override def onPush(chunk: ByteString, ctx: Context[ByteString]): SyncDirective = {
buffer ++= chunk
emitChunkOrPull(ctx)
}
override def onPull(ctx: Context[ByteString]): SyncDirective = emitChunkOrPull(ctx)
private def emitChunkOrPull(ctx: Context[ByteString]): SyncDirective = {
val possibleMatchPos = buffer.indexOf(firstSeparatorByte, from = nextPossibleMatch)
if (possibleMatchPos == -1) {
// No matching character, we need to accumulate more bytes into the buffer
nextPossibleMatch = buffer.size
pushIfLastChunkOrElsePull(ctx)
} else if (possibleMatchPos + separatorBytes.size > buffer.size) {
// We have found a possible match (we found the first character of the terminator
// sequence) but we don't have yet enough bytes. We remember the position to
// retry from next time.
nextPossibleMatch = possibleMatchPos
pushIfLastChunkOrElsePull(ctx)
} else {
if (buffer.slice(possibleMatchPos, possibleMatchPos + separatorBytes.size) == separatorBytes) {
// Found a match
val nextChunk = buffer.slice(0, possibleMatchPos)
buffer = buffer.drop(possibleMatchPos + separatorBytes.size)
nextPossibleMatch -= possibleMatchPos + separatorBytes.size
ctx.push(nextChunk)
} else {
nextPossibleMatch += 1
pushIfLastChunkOrElsePull(ctx)
}
}
}
private def pushIfLastChunkOrElsePull(ctx: Context[ByteString]) = {
if (ctx.isFinishing) {
if (buffer.isEmpty) {
ctx.finish()
} else {
ctx.pushAndFinish(buffer) // last uncompleted line
}
}
else {
if (buffer.size > maximumChunkBytes)
ctx.fail(new IllegalStateException(s"Read ${buffer.size} bytes " +
s"which is more than $maximumChunkBytes without seeing a line terminator"))
else
ctx.pull()
}
}
override def onUpstreamFinish(ctx: Context[ByteString]): TerminationDirective = ctx.absorbTermination()
}
Flow[ByteString].transform(() => stage)
}
/**
* Limit downstream rate to one element every 'interval' by applying back-pressure on upstream.
* @param interval time interval to send one element downstream
* @tparam A
* @return
*/
def rateLimiter[A](interval: FiniteDuration): Flow[A, A, NotUsed] = {
case object Tick
val flow = Flow.fromGraph( GraphDSL.create() { implicit builder =>
import GraphDSL.Implicits._
val rateLimiter = Source.tick(0 second, interval, Tick)
val zip = builder.add(Zip[A, Tick.type]())
rateLimiter ~> zip.in1
FlowShape(zip.in0, zip.out)
}).map(_._1)
// We need to limit input buffer to 1 to guarantee the rate limiting feature
flow.withAttributes(Attributes.inputBuffer(initial = 1, max = 1))
}
/**
* Rechunk a stream of bytes according to a chunk size.
* @param chunkSize the new chunk size
* @return
*/
def rechunkByteStringBySize(chunkSize: Int): Flow[ByteString, ByteString, NotUsed] = {
def stage = new PushPullStage[ByteString, ByteString] {
private var buffer = ByteString.empty
override def onPush(elem: ByteString, ctx: Context[ByteString]): SyncDirective = {
buffer ++= elem
emitChunkOrPull(ctx)
}
override def onPull(ctx: Context[ByteString]): SyncDirective = emitChunkOrPull(ctx)
private def emitChunkOrPull(ctx: Context[ByteString]): SyncDirective = {
if (ctx.isFinishing) {
if (buffer.isEmpty) {
ctx.finish()
} else if (buffer.length < chunkSize) {
ctx.pushAndFinish(buffer)
} else {
val (emit, nextBuffer) = buffer.splitAt(chunkSize)
buffer = nextBuffer
ctx.push(emit)
}
} else {
if (buffer.length < chunkSize) {
ctx.pull()
} else {
val (emit, nextBuffer) = buffer.splitAt(chunkSize)
buffer = nextBuffer
ctx.push(emit)
}
}
}
override def onUpstreamFinish(ctx: Context[ByteString]): TerminationDirective = ctx.absorbTermination()
}
Flow[ByteString].transform(() => stage)
}
/**
* Fold and/or unfold the stream with an user-defined function.
* @param zero initial state
* @param f takes current state and current elem, returns a seq of C elements to push downstream and the next state b
* if we want the stream to continue (if no new state b, the stream ends).
* @param lastPushIfUpstreamEnds if the upstream ends (before customStatefulProcessor decides to end the stream),
* this function is called on the last b state and the resulting c elements
* are pushed downstream as the last elements of the stream.
* @return
*/
def customStatefulProcessor[A, B, C](zero: => B)
(f: (B, A) => (Option[B], IndexedSeq[C]),
lastPushIfUpstreamEnds: B => IndexedSeq[C] = {_: B => IndexedSeq.empty}): Flow[A, C, NotUsed] = {
def stage = new PushPullStage[A, C] {
private var state: B = _
private var buffer = Vector.empty[C]
private var finishing = false
override def onPush(elem: A, ctx: Context[C]): SyncDirective = {
if (state == null) state = zero // to keep the laziness of zero
f(state, elem) match {
case (Some(b), cs) =>
state = b
buffer ++= cs
emitChunkOrPull(ctx)
case (None, cs) =>
buffer ++= cs
finishing = true
emitChunkOrPull(ctx)
}
}
override def onPull(ctx: Context[C]): SyncDirective = {
if (state == null) state = zero // to keep the laziness of zero
emitChunkOrPull(ctx)
}
private def emitChunkOrPull(ctx: Context[C]): SyncDirective = {
if (finishing) { // customProcessor is ending
buffer match {
case Seq() => ctx.finish()
case elem +: nextBuffer =>
buffer = nextBuffer
ctx.push(elem)
}
} else if (ctx.isFinishing) { // upstream ended
buffer match {
case Seq() =>
lastPushIfUpstreamEnds(state) match {
case Seq() => ctx.finish()
case elem +: nextBuffer =>
finishing = true
buffer = nextBuffer.toVector
ctx.push(elem)
}
case elem +: nextBuffer =>
buffer = nextBuffer
ctx.push(elem)
}
} else {
buffer match {
case Seq() => ctx.pull()
case elem +: nextBuffer =>
buffer = nextBuffer
ctx.push(elem)
}
}
}
override def onUpstreamFinish(ctx: Context[C]): TerminationDirective = ctx.absorbTermination()
}
Flow[A].transform(() => stage)
}
/**
* Unfold a stream with an user-defined function.
* @param f take current elem, and return a seq of B elems with a stop boolean (true means that we want the stream to stop after sending
* the joined seq of B elems)
* @tparam A
* @tparam B
* @return
*/
def customStatelessProcessor[A, B](f: A => (IndexedSeq[B], Boolean)): Flow[A, B, NotUsed] = {
customStatefulProcessor[A, Unit, B](())(
(_, a) => {
val (bs, stop) = f(a)
(if (stop) None else Some(()), bs)
}
)
}
/**
* Fold the stream and push the last B to downstream when upstream finishes.
* @param zero
* @param f
* @tparam A
* @tparam B
* @return
*/
def fold[A, B](zero: => B)(f: (B, A) => B): Flow[A, B, NotUsed] = {
customStatefulProcessor[A, B, B](zero)(
(b, a) => (Some(f(b, a)), Vector.empty),
b => Vector(b)
)
}
/**
* Consume the stream while condition is true.
* @param f condition
* @tparam A
* @return
*/
def takeWhile[A](f: A => Boolean): Flow[A, A, NotUsed] = {
customStatelessProcessor { a =>
if (!f(a)) (Vector.empty, true)
else (Vector(a), false)
}
}
/**
* Zip a stream with a lazy future that will be evaluated only when the stream is materialized.
* @param futB
* @tparam A
* @tparam B
* @return
*/
def zipWithConstantLazyAsync[A, B](futB: => Future[B]): Flow[A, (A, B), NotUsed] = {
Flow.fromGraph( GraphDSL.create() { implicit builder =>
import GraphDSL.Implicits._
val zip = builder.add(Zip[A, B]())
SourceExt.constantLazyAsync(futB) ~> zip.in1
FlowShape(zip.in0, zip.out)
})
}
/**
* Repeat each element of the source 'nb' times.
* @param nb the number of repetitions
* @tparam A
* @return
*/
def repeatEach[A](nb: Int): Flow[A, A, NotUsed] = Flow[A].mapConcat(a => Vector.fill(nb)(a))
}
object FlowExt extends FlowExt
|
MfgLabs/akka-stream-extensions
|
commons/src/main/scala/FlowExt.scala
|
Scala
|
apache-2.0
| 11,481
|
package byteR.cfg
import toplev.GenericPrintable
case class BBPred(val preds: List[BBStart]) extends GenericPrintable {
def prettyPrint = preds.mkString(", ")
}
|
j-c-w/mlc
|
src/main/scala/byteR/cfg/BBPred.scala
|
Scala
|
gpl-3.0
| 165
|
/**
* Copyright (C) 2014-2015 Really Inc. <http://really.io>
*/
package io.really.model.loader
import java.io.{ File, FileInputStream }
import java.nio.file.{ Path, Paths }
import java.util
import java.util.LinkedHashMap
import akka.actor.ActorSystem
import _root_.io.really.model._
import _root_.io.really.R
import org.yaml.snakeyaml.Yaml
import scala.collection.JavaConversions._
import scala.collection.immutable.TreeMap
import scala.io.Source
case class InvalidField(reason: String) extends Exception(reason)
case class InvalidReferenceField(reason: String) extends Exception(reason)
case class InvalidModelFile(reason: String) extends Exception(reason)
class ModelLoader(dir: String, actorSystem: ActorSystem) {
private val yaml = new Yaml
private val modelFileName = "model.yaml"
private val directoryPath = Paths.get(dir)
private val mainDirectoryFile = new File(dir)
private val log = akka.event.Logging(actorSystem, "ModelLoader")
private val nameRegx = """\\p{javaJavaIdentifierStart}[\\p{javaJavaIdentifierPart}-]*""".r
private val migrationFileNameRegx = """evolution-\\d+.\\w+$"""
/**
* Load the models
* @return list of models objects
*/
private val modelsRegistry: Map[R, ModelInfo] = walkFilesTree(mainDirectoryFile).toMap
lazy val models: List[Model] = modelsRegistry.values.map {
modelInfo =>
Model(
modelInfo.r,
modelInfo.collectionMeta,
getFields(modelInfo.fields),
modelInfo.jsHooks,
modelInfo.migrationPlan,
modelInfo.subCollectionsR
)
}.toList
/**
* Walk through a directory and return modelInfo
* @param file
* @return
*/
private def walkFilesTree(file: File): Iterable[(R, ModelInfo)] = {
val children = new Iterable[File] {
def iterator = if (file.isDirectory) file.listFiles.iterator else Iterator.empty
}
if (file.isFile && file.getName == modelFileName) {
val modelInfo = readModelFile(file)
Seq((modelInfo.r, modelInfo))
} else if (file.isDirectory) children.flatMap(walkFilesTree(_))
else Seq.empty
}
/**
* Parse yaml file to get the model Info
* @param file yaml file
* @return model info
*/
def readModelFile(file: File): ModelInfo = {
//list list of subCollectionsR. It helps me to know what are the children of this collection
val subCollectionPaths = file.toPath.getParent.toFile
.listFiles.filter(_.isDirectory).map(f => getR(f.toPath)).toList
try {
val obj = yaml.load(new FileInputStream(file)).asInstanceOf[LinkedHashMap[String, AnyRef]]
val fields = obj.get("fields").asInstanceOf[LinkedHashMap[String, Object]]
val version = obj.get("version").toString.toLong
val parentPath = file.toPath.getParent
ModelInfo(getR(file.toPath.getParent), CollectionMetadata(version),
fields, getJsHooks(parentPath),
getMigrationPlan(parentPath),
subCollectionPaths)
} catch {
case e: Exception =>
log.error(e, s"Invalid yaml file; An error occurred while parsing this file $file")
throw new InvalidModelFile(s"Invalid yaml file; An error occurred while parsing this file $file")
}
}
/**
* takes full path of a file and returns the R that represent this path
* @param modelFilePath
* @return
*/
private def getR(modelFilePath: Path): R = {
val relativePath = modelFilePath.toString.split(directoryPath.toString)(1)
R(relativePath.replaceAll("(?!^)/", "/*/"))
}
/**
* returns jsHooks object
* @param parent
* @return
*/
private def getJsHooks(parent: Path): JsHooks = {
def file(name: String): Option[File] = {
val files = parent.toFile.listFiles.filter(_.getName.matches(name))
if (!files.isEmpty) Some(files(0)) else None
}
JsHooks(
readJsFile(file("""on-validate.\\w+""")),
readJsFile(file("""pre-get.\\w+""")),
readJsFile(file("""/pre-delete.\\w+""")),
readJsFile(file("""/pre-update.\\w+""")),
readJsFile(file("""/post-create.\\w+""")),
readJsFile(file("""/post-update.\\w+""")),
readJsFile(file(""""/post-delete.\\w+"""))
)
}
/**
* reads javascript file and returns it as string
* @param file
* @return option of JsScript
*/
private def readJsFile(file: Option[File]): Option[JsScript] =
file match {
case Some(f) => Some(Source.fromFile(f).mkString)
case None => None
}
private def getMigrationPlan(parent: Path): MigrationPlan = {
val evolutionFiles = parent.toFile.listFiles.filter(_.getName.matches(migrationFileNameRegx)).iterator
val scripts = evolutionFiles map {
file =>
val matches = """\\d+""".r.findAllIn(file.getName).matchData
val version = matches.mkString
readJsFile(Some(file)) match {
case Some(f) => Some((version.toLong, f))
case None => None
}
}
MigrationPlan(scripts.flatten.toMap)
}
private def getFields(fieldsMap: LinkedHashMap[String, Object]): Map[FieldKey, Field[_]] = {
val fields = fieldsMap.partition {
case (fieldKey, value) =>
if (!value.isInstanceOf[util.LinkedHashMap[_, _]]) {
throw new InvalidModelFile(s"No field configuration specified for key '$fieldKey'")
} else {
val field = value.asInstanceOf[LinkedHashMap[String, String]]
isValueField(field.get("type"))
}
}
val valueFields = parseValueFields(fields._1.toMap)
val otherFields = fields._2.map {
case (key, value) if (key.matches(nameRegx.toString)) =>
(key, getFieldObject(valueFields, key, value.asInstanceOf[LinkedHashMap[String, String]]))
case (key, value) =>
throw new InvalidField(s"Field name $key didn't match $nameRegx")
}
valueFields ++ TreeMap(otherFields.toArray: _*)(Ordering.by(_.toLowerCase))
}
private lazy val dataTypes: Map[String, DataType[_]] = Map(
"string" -> DataType.RString,
"double" -> DataType.RDouble,
"long" -> DataType.RLong,
"boolean" -> DataType.RBoolean
)
private def isValueField(kind: String) = dataTypes.contains(kind.toLowerCase)
private def parseValueFields(fields: Map[String, Object]): Map[FieldKey, ValueField[_]] = {
val valueFields = fields map {
case (k, v) =>
if (k.matches(nameRegx.toString)) {
val field = v.asInstanceOf[LinkedHashMap[String, String]]
val required = field.get("required").asInstanceOf[Boolean]
val default = Option(field.get("default"))
val validation = Option(field.get("validation"))
val kind = field.get("type")
(k, ValueField(k, dataTypes(kind.toLowerCase), validation, default, required))
} else
throw new InvalidField(s"Field name $k didn't match $nameRegx")
}
TreeMap(valueFields.toArray: _*)(Ordering.by(_.toLowerCase))
}
/**
* Validate the reference field data; make sure that the reference collection is exist
* and validate that the fields are exist too
* @param r
* @param fields
* @return
*/
private def isValidReference(r: R, fields: List[String]): Boolean = {
modelsRegistry.get(r) match {
case Some(modelInfo) =>
modelInfo.fields.keySet().containsAll(fields)
case None =>
log.error(s"Invalid collectionR $r value for the reference field")
false
}
}
/**
* Read the field's data and returns Field object
* @param valueFields All of Value Fields objects, required to generate calculated fields
* @param fieldKey the Key name of this field
* @param field the field's data as Map
* @return Field Object
*/
protected def getFieldObject(valueFields: Map[FieldKey, ValueField[_]], fieldKey: String,
field: LinkedHashMap[String, String]): Field[_] = {
val required = field.get("required").asInstanceOf[Boolean]
field.get("type").toLowerCase match {
case "reference" =>
val fields = field.get("fields").asInstanceOf[util.ArrayList[String]].toList
val r = R(field.get("collectionR"))
if (isValidReference(r, fields))
ReferenceField(fieldKey, required, r, fields)
else
throw new InvalidReferenceField(s"Invalid Reference field $fieldKey, please check that you have provided" +
" a valid schema for this field")
case "calculated" =>
getCalculatedField(valueFields, fieldKey, field)
case other =>
throw new DataTypeException(s"Unsupported data type [$other] for $fieldKey field")
}
}
/**
* parse field data and return calculated field
* @param valueFields
* @param fieldKey
* @param field
* @return
*/
private def getCalculatedField(valueFields: Map[FieldKey, ValueField[_]], fieldKey: String,
field: LinkedHashMap[String, String]): Field[_] = {
val dependencies = field.get("dependsOn").split(",")
dependencies.length match {
case 1 =>
val dep1 = valueFields(dependencies(0).trim)
CalculatedField1(fieldKey, dataTypes(field.get("valueType").toLowerCase), field.get("value"), dep1)
case 2 =>
val dep1 = valueFields(dependencies(0).trim)
val dep2 = valueFields(dependencies(1).trim)
CalculatedField2(fieldKey, dataTypes(field.get("valueType").toLowerCase), field.get("value"), dep1, dep2)
case 3 =>
val dep1 = valueFields(dependencies(0).trim)
val dep2 = valueFields(dependencies(1).trim)
val dep3 = valueFields(dependencies(2).trim)
CalculatedField3(fieldKey, dataTypes(field.get("valueType").toLowerCase), field.get("value"), dep1, dep2, dep3)
case _ =>
throw new DataTypeException(s"Unsupported valueType of calculated field; Maximum length " +
s"of field's dependencies is 3")
}
}
}
/**
* Define the model info
* @param r
* @param collectionMeta
* @param fields
* @param jsHooks
* @param migrationPlan
* @param subCollectionsR
*/
case class ModelInfo(
r: R,
collectionMeta: CollectionMetadata,
fields: LinkedHashMap[String, Object],
jsHooks: JsHooks,
migrationPlan: MigrationPlan,
subCollectionsR: List[R]
)
|
reallylabs/really
|
modules/really-core/src/main/scala/io/really/model/loader/ModelLoader.scala
|
Scala
|
apache-2.0
| 10,148
|
/*
* Copyright 2016 Dennis Vriend
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dnvriend.component.highlevelserver.marshaller
import com.github.dnvriend.component.highlevelserver.dto.{ Person, PersonWithId }
import spray.json.DefaultJsonProtocol
trait Marshaller extends DefaultJsonProtocol {
// the jsonFormats for Person and PersonWithId
implicit val personJsonFormat = jsonFormat3(Person)
implicit val personWithIdJsonFormat = jsonFormat4(PersonWithId)
}
|
dnvriend/akka-http-test
|
app/com/github/dnvriend/component/highlevelserver/marshaller/Marshaller.scala
|
Scala
|
apache-2.0
| 1,003
|
package dao
import com.google.inject.ImplementedBy
import dao.impl.ComputerDAOImpl
import model.{Computer, ComputerState, ConnectedUser}
import services.state.ActionState
import scala.concurrent.Future
/**
* Performs Computer database actions.
*
* @author Camilo Sampedro <camilo.sampedro@udea.edu.co>
*/
@ImplementedBy(classOf[ComputerDAOImpl])
trait ComputerDAO {
def get(severalComputers: List[String]): Future[Seq[Computer]]
def listAllSimple: Future[Seq[Computer]]
/**
* Adds a new computer
*
* @param computer Computer to add
* @return Result String message
*/
def add(computer: Computer): Future[ActionState]
/**
* Gets a computer based on its IP
*
* @param ip Computer's IP
* @return Some Computer found or None if its not found.
*/
def get(ip: String): Future[Option[Computer]]
def getWithStatus(severalIps: List[String]): Future[Seq[(Computer, Option[ComputerState], Option[ConnectedUser])]]
def getWithStatus(ip: String): Future[Seq[(Computer, Option[ComputerState], Option[ConnectedUser])]]
/**
* Deletes a computer from database
*
* @param ip Computer's IP
* @return Operation result
*/
def delete(ip: String): Future[ActionState]
/**
* Lists all computers in the database.
*
* @return All computers found.
*/
def listAll: Future[Seq[(Computer, Option[ComputerState], Option[ConnectedUser])]]
def edit(computer: Computer): Future[ActionState]
}
|
ProjectAton/AtonLab
|
app/dao/ComputerDAO.scala
|
Scala
|
gpl-3.0
| 1,481
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.features
import org.apache.spark.deploy.k8s.{KubernetesConf, SparkPod}
import org.apache.spark.deploy.k8s.Config.{KUBERNETES_DRIVER_SERVICE_ACCOUNT_NAME, KUBERNETES_EXECUTOR_SERVICE_ACCOUNT_NAME}
import org.apache.spark.deploy.k8s.KubernetesUtils.buildPodWithServiceAccount
private[spark] class ExecutorKubernetesCredentialsFeatureStep(kubernetesConf: KubernetesConf)
extends KubernetesFeatureConfigStep {
private lazy val driverServiceAccount = kubernetesConf.get(KUBERNETES_DRIVER_SERVICE_ACCOUNT_NAME)
private lazy val executorServiceAccount =
kubernetesConf.get(KUBERNETES_EXECUTOR_SERVICE_ACCOUNT_NAME)
override def configurePod(pod: SparkPod): SparkPod = {
pod.copy(
// if not setup by the pod template, fallback to the executor's sa,
// if executor's sa is not setup, the last option is driver's sa.
pod = if (Option(pod.pod.getSpec.getServiceAccount).isEmpty) {
buildPodWithServiceAccount(executorServiceAccount
.orElse(driverServiceAccount), pod).getOrElse(pod.pod)
} else {
pod.pod
})
}
}
|
maropu/spark
|
resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/ExecutorKubernetesCredentialsFeatureStep.scala
|
Scala
|
apache-2.0
| 1,934
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.util.Properties
import javax.annotation.Nullable
import scala.collection.Map
import com.fasterxml.jackson.annotation.JsonTypeInfo
import org.apache.spark.TaskEndReason
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.executor.{ExecutorMetrics, TaskMetrics}
import org.apache.spark.resource.ResourceProfile
import org.apache.spark.scheduler.cluster.ExecutorInfo
import org.apache.spark.storage.{BlockManagerId, BlockUpdatedInfo}
@DeveloperApi
@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, include = JsonTypeInfo.As.PROPERTY, property = "Event")
trait SparkListenerEvent {
/* Whether output this event to the event log */
protected[spark] def logEvent: Boolean = true
}
@DeveloperApi
case class SparkListenerStageSubmitted(stageInfo: StageInfo, properties: Properties = null)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerStageCompleted(stageInfo: StageInfo) extends SparkListenerEvent
@DeveloperApi
case class SparkListenerTaskStart(stageId: Int, stageAttemptId: Int, taskInfo: TaskInfo)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerTaskGettingResult(taskInfo: TaskInfo) extends SparkListenerEvent
@DeveloperApi
case class SparkListenerSpeculativeTaskSubmitted(
stageId: Int,
stageAttemptId: Int = 0)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerTaskEnd(
stageId: Int,
stageAttemptId: Int,
taskType: String,
reason: TaskEndReason,
taskInfo: TaskInfo,
taskExecutorMetrics: ExecutorMetrics,
// may be null if the task has failed
@Nullable taskMetrics: TaskMetrics)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerJobStart(
jobId: Int,
time: Long,
stageInfos: Seq[StageInfo],
properties: Properties = null)
extends SparkListenerEvent {
// Note: this is here for backwards-compatibility with older versions of this event which
// only stored stageIds and not StageInfos:
val stageIds: Seq[Int] = stageInfos.map(_.stageId)
}
@DeveloperApi
case class SparkListenerJobEnd(
jobId: Int,
time: Long,
jobResult: JobResult)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerEnvironmentUpdate(environmentDetails: Map[String, Seq[(String, String)]])
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerBlockManagerAdded(
time: Long,
blockManagerId: BlockManagerId,
maxMem: Long,
maxOnHeapMem: Option[Long] = None,
maxOffHeapMem: Option[Long] = None) extends SparkListenerEvent {
}
@DeveloperApi
case class SparkListenerBlockManagerRemoved(time: Long, blockManagerId: BlockManagerId)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerUnpersistRDD(rddId: Int) extends SparkListenerEvent
@DeveloperApi
case class SparkListenerExecutorAdded(time: Long, executorId: String, executorInfo: ExecutorInfo)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerExecutorRemoved(time: Long, executorId: String, reason: String)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerExecutorBlacklisted(
time: Long,
executorId: String,
taskFailures: Int)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerExecutorBlacklistedForStage(
time: Long,
executorId: String,
taskFailures: Int,
stageId: Int,
stageAttemptId: Int)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerNodeBlacklistedForStage(
time: Long,
hostId: String,
executorFailures: Int,
stageId: Int,
stageAttemptId: Int)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerExecutorUnblacklisted(time: Long, executorId: String)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerNodeBlacklisted(
time: Long,
hostId: String,
executorFailures: Int)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerNodeUnblacklisted(time: Long, hostId: String)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerBlockUpdated(blockUpdatedInfo: BlockUpdatedInfo) extends SparkListenerEvent
/**
* Periodic updates from executors.
* @param execId executor id
* @param accumUpdates sequence of (taskId, stageId, stageAttemptId, accumUpdates)
* @param executorUpdates executor level per-stage metrics updates
*/
@DeveloperApi
case class SparkListenerExecutorMetricsUpdate(
execId: String,
accumUpdates: Seq[(Long, Int, Int, Seq[AccumulableInfo])],
executorUpdates: Map[(Int, Int), ExecutorMetrics] = Map.empty)
extends SparkListenerEvent
/**
* Peak metric values for the executor for the stage, written to the history log at stage
* completion.
* @param execId executor id
* @param stageId stage id
* @param stageAttemptId stage attempt
* @param executorMetrics executor level metrics peak values
*/
@DeveloperApi
case class SparkListenerStageExecutorMetrics(
execId: String,
stageId: Int,
stageAttemptId: Int,
executorMetrics: ExecutorMetrics)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerApplicationStart(
appName: String,
appId: Option[String],
time: Long,
sparkUser: String,
appAttemptId: Option[String],
driverLogs: Option[Map[String, String]] = None,
driverAttributes: Option[Map[String, String]] = None) extends SparkListenerEvent
@DeveloperApi
case class SparkListenerApplicationEnd(time: Long) extends SparkListenerEvent
/**
* An internal class that describes the metadata of an event log.
*/
@DeveloperApi
case class SparkListenerLogStart(sparkVersion: String) extends SparkListenerEvent
@DeveloperApi
case class SparkListenerResourceProfileAdded(resourceProfile: ResourceProfile)
extends SparkListenerEvent
/**
* Interface for listening to events from the Spark scheduler. Most applications should probably
* extend SparkListener or SparkFirehoseListener directly, rather than implementing this class.
*
* Note that this is an internal interface which might change in different Spark releases.
*/
private[spark] trait SparkListenerInterface {
/**
* Called when a stage completes successfully or fails, with information on the completed stage.
*/
def onStageCompleted(stageCompleted: SparkListenerStageCompleted): Unit
/**
* Called when a stage is submitted
*/
def onStageSubmitted(stageSubmitted: SparkListenerStageSubmitted): Unit
/**
* Called when a task starts
*/
def onTaskStart(taskStart: SparkListenerTaskStart): Unit
/**
* Called when a task begins remotely fetching its result (will not be called for tasks that do
* not need to fetch the result remotely).
*/
def onTaskGettingResult(taskGettingResult: SparkListenerTaskGettingResult): Unit
/**
* Called when a task ends
*/
def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit
/**
* Called when a job starts
*/
def onJobStart(jobStart: SparkListenerJobStart): Unit
/**
* Called when a job ends
*/
def onJobEnd(jobEnd: SparkListenerJobEnd): Unit
/**
* Called when environment properties have been updated
*/
def onEnvironmentUpdate(environmentUpdate: SparkListenerEnvironmentUpdate): Unit
/**
* Called when a new block manager has joined
*/
def onBlockManagerAdded(blockManagerAdded: SparkListenerBlockManagerAdded): Unit
/**
* Called when an existing block manager has been removed
*/
def onBlockManagerRemoved(blockManagerRemoved: SparkListenerBlockManagerRemoved): Unit
/**
* Called when an RDD is manually unpersisted by the application
*/
def onUnpersistRDD(unpersistRDD: SparkListenerUnpersistRDD): Unit
/**
* Called when the application starts
*/
def onApplicationStart(applicationStart: SparkListenerApplicationStart): Unit
/**
* Called when the application ends
*/
def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd): Unit
/**
* Called when the driver receives task metrics from an executor in a heartbeat.
*/
def onExecutorMetricsUpdate(executorMetricsUpdate: SparkListenerExecutorMetricsUpdate): Unit
/**
* Called with the peak memory metrics for a given (executor, stage) combination. Note that this
* is only present when reading from the event log (as in the history server), and is never
* called in a live application.
*/
def onStageExecutorMetrics(executorMetrics: SparkListenerStageExecutorMetrics): Unit
/**
* Called when the driver registers a new executor.
*/
def onExecutorAdded(executorAdded: SparkListenerExecutorAdded): Unit
/**
* Called when the driver removes an executor.
*/
def onExecutorRemoved(executorRemoved: SparkListenerExecutorRemoved): Unit
/**
* Called when the driver blacklists an executor for a Spark application.
*/
def onExecutorBlacklisted(executorBlacklisted: SparkListenerExecutorBlacklisted): Unit
/**
* Called when the driver blacklists an executor for a stage.
*/
def onExecutorBlacklistedForStage(
executorBlacklistedForStage: SparkListenerExecutorBlacklistedForStage): Unit
/**
* Called when the driver blacklists a node for a stage.
*/
def onNodeBlacklistedForStage(nodeBlacklistedForStage: SparkListenerNodeBlacklistedForStage): Unit
/**
* Called when the driver re-enables a previously blacklisted executor.
*/
def onExecutorUnblacklisted(executorUnblacklisted: SparkListenerExecutorUnblacklisted): Unit
/**
* Called when the driver blacklists a node for a Spark application.
*/
def onNodeBlacklisted(nodeBlacklisted: SparkListenerNodeBlacklisted): Unit
/**
* Called when the driver re-enables a previously blacklisted node.
*/
def onNodeUnblacklisted(nodeUnblacklisted: SparkListenerNodeUnblacklisted): Unit
/**
* Called when the driver receives a block update info.
*/
def onBlockUpdated(blockUpdated: SparkListenerBlockUpdated): Unit
/**
* Called when a speculative task is submitted
*/
def onSpeculativeTaskSubmitted(speculativeTask: SparkListenerSpeculativeTaskSubmitted): Unit
/**
* Called when other events like SQL-specific events are posted.
*/
def onOtherEvent(event: SparkListenerEvent): Unit
/**
* Called when a Resource Profile is added to the manager.
*/
def onResourceProfileAdded(event: SparkListenerResourceProfileAdded): Unit
}
/**
* :: DeveloperApi ::
* A default implementation for `SparkListenerInterface` that has no-op implementations for
* all callbacks.
*
* Note that this is an internal interface which might change in different Spark releases.
*/
@DeveloperApi
abstract class SparkListener extends SparkListenerInterface {
override def onStageCompleted(stageCompleted: SparkListenerStageCompleted): Unit = { }
override def onStageSubmitted(stageSubmitted: SparkListenerStageSubmitted): Unit = { }
override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = { }
override def onTaskGettingResult(taskGettingResult: SparkListenerTaskGettingResult): Unit = { }
override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = { }
override def onJobStart(jobStart: SparkListenerJobStart): Unit = { }
override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = { }
override def onEnvironmentUpdate(environmentUpdate: SparkListenerEnvironmentUpdate): Unit = { }
override def onBlockManagerAdded(blockManagerAdded: SparkListenerBlockManagerAdded): Unit = { }
override def onBlockManagerRemoved(
blockManagerRemoved: SparkListenerBlockManagerRemoved): Unit = { }
override def onUnpersistRDD(unpersistRDD: SparkListenerUnpersistRDD): Unit = { }
override def onApplicationStart(applicationStart: SparkListenerApplicationStart): Unit = { }
override def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd): Unit = { }
override def onExecutorMetricsUpdate(
executorMetricsUpdate: SparkListenerExecutorMetricsUpdate): Unit = { }
override def onStageExecutorMetrics(
executorMetrics: SparkListenerStageExecutorMetrics): Unit = { }
override def onExecutorAdded(executorAdded: SparkListenerExecutorAdded): Unit = { }
override def onExecutorRemoved(executorRemoved: SparkListenerExecutorRemoved): Unit = { }
override def onExecutorBlacklisted(
executorBlacklisted: SparkListenerExecutorBlacklisted): Unit = { }
def onExecutorBlacklistedForStage(
executorBlacklistedForStage: SparkListenerExecutorBlacklistedForStage): Unit = { }
def onNodeBlacklistedForStage(
nodeBlacklistedForStage: SparkListenerNodeBlacklistedForStage): Unit = { }
override def onExecutorUnblacklisted(
executorUnblacklisted: SparkListenerExecutorUnblacklisted): Unit = { }
override def onNodeBlacklisted(
nodeBlacklisted: SparkListenerNodeBlacklisted): Unit = { }
override def onNodeUnblacklisted(
nodeUnblacklisted: SparkListenerNodeUnblacklisted): Unit = { }
override def onBlockUpdated(blockUpdated: SparkListenerBlockUpdated): Unit = { }
override def onSpeculativeTaskSubmitted(
speculativeTask: SparkListenerSpeculativeTaskSubmitted): Unit = { }
override def onOtherEvent(event: SparkListenerEvent): Unit = { }
override def onResourceProfileAdded(event: SparkListenerResourceProfileAdded): Unit = { }
}
|
spark-test/spark
|
core/src/main/scala/org/apache/spark/scheduler/SparkListener.scala
|
Scala
|
apache-2.0
| 14,025
|
package breeze.optimize.linear
import collection.mutable.ArrayBuffer
import breeze.linalg._
import org.apache.commons.math3.optim.linear._
import org.apache.commons.math3.optim.nonlinear.scalar._
import scala.collection.JavaConverters._
/**
* DSL for LinearPrograms. Not thread-safe per instance. Make multiple instances
*
* Basic example:
* {{{
* val lp = new LP
* import lp._
* val x = new Positive("x")
* val y = new Positive("y")
*
* val result = maximize ( (3 * x+ 4 * y)
* subjectTo( x <= 3, y <= 1))
*
* result.valueOf(x) // 3
*
* }}}
* @author dlwh
*/
class LinearProgram {
private var _nextId = 0
private def nextId = {
_nextId += 1
_nextId - 1
}
private val variables = new ArrayBuffer[Variable]()
sealed trait Problem { outer =>
def objective: Expression
def constraints: IndexedSeq[Constraint]
def subjectTo(constraints : Constraint*):Problem = {
val cons = constraints
new Problem {
def objective = outer.objective
def constraints = outer.constraints ++ cons
}
}
override def toString = (
"maximize " + objective + {
if(constraints.nonEmpty) {
"\nsubject to " + constraints.mkString("\n" + " " * "subject to ".length)
} else ""
}
)
}
/**
* Anything that can be built up from adding/subtracting/dividing and multiplying by constants
*/
sealed trait Expression extends Problem{ outer =>
def coefficients: Vector[Double]
def scalarComponent: Double = 0
def objective = this
def constraints: IndexedSeq[Constraint] = IndexedSeq.empty
def +(other: Expression):Expression = new Expression {
def coefficients: Vector[Double] = outer.coefficients + other.coefficients
override def scalarComponent: Double = outer.scalarComponent + other.scalarComponent
override def toString = outer.toString + " + " + other
}
def +(other: Double):Expression = new Expression {
def coefficients: Vector[Double] = outer.coefficients
override def scalarComponent: Double = outer.scalarComponent + other
override def toString = outer.toString + " + " + other
}
def -(other: Expression):Expression = new Expression {
def coefficients: Vector[Double] = outer.coefficients - other.coefficients
override def scalarComponent: Double = outer.scalarComponent - other.scalarComponent
override def toString = outer.toString + " - " + other
}
def -(other: Double):Expression = new Expression {
def coefficients: Vector[Double] = outer.coefficients
override def scalarComponent: Double = outer.scalarComponent - other
override def toString = outer.toString + " - " + other
}
def unary_- :Expression = new Expression {
def coefficients: Vector[Double] = outer.coefficients * -1.0
override def scalarComponent: Double = -outer.scalarComponent
override def toString = s"-($outer)"
}
def <=(rhs_ : Expression):Constraint = new Constraint {
def relation: LinearProgram.this.type#Relation = LTE
def lhs = outer
def rhs = rhs_
}
def <=(c: Double):Constraint = new Constraint {
def relation: Relation = LTE
def lhs = outer
def rhs = new Expression {
def coefficients = SparseVector.zeros[Double](variables.length)
override def scalarComponent = c
override def toString = c.toString
}
}
def >=(rhs_ : Expression):Constraint = new Constraint {
def relation: Relation = GTE
def lhs = outer
def rhs = rhs_
}
def >=(c: Double):Constraint = new Constraint {
def relation: Relation = GTE
def lhs = outer
def rhs = new Expression {
def coefficients = SparseVector.zeros[Double](variables.length)
override def scalarComponent = c
override def toString = c.toString
}
}
def =:=(rhs_ : Expression):Constraint = new Constraint {
def relation: Relation = EQ
def lhs = outer
def rhs = rhs_
}
def =:=(c: Double):Constraint = new Constraint {
def relation: Relation = EQ
def lhs = outer
def rhs = new Expression {
def coefficients = SparseVector.zeros[Double](variables.length)
override def scalarComponent = c
override def toString = c.toString
}
}
def *(c: Double) = new Expression {
def coefficients = outer.coefficients * c
override def scalarComponent = outer.scalarComponent * c
override def toString = s"($outer) * $c"
}
def *:(c: Double) = new Expression {
def coefficients = outer.coefficients * c
override def scalarComponent = outer.scalarComponent * c
override def toString = s"$c * ($outer)"
}
}
sealed abstract class Relation(val operator: String)
case object LTE extends Relation("<=")
case object GTE extends Relation(">=")
case object EQ extends Relation("=:=")
sealed trait Constraint { outer =>
def lhs: Expression
def rhs: Expression
def relation: Relation
override def toString() = s"$lhs ${relation.operator} $rhs"
def standardize: Constraint = new Constraint {
def relation: Relation = outer.relation
def lhs = new Expression {
def coefficients = outer.lhs.coefficients - outer.rhs.coefficients
override def scalarComponent = 0.0
}
def rhs = new Expression {
def coefficients = SparseVector.zeros[Double](variables.length)
override def scalarComponent = outer.rhs.scalarComponent - outer.lhs.scalarComponent
}
}
}
sealed trait Variable extends Expression {
def name: String
def id : Int
def size: Int = 1
override def toString = name
}
case class Real(name: String = "x_" + nextId) extends Variable { variable =>
val id = variables.length
variables += this
def coefficients = {
val v = SparseVector.zeros[Double](variables.length)
for(i <- 0 until size) v(id + i) = 1.0
v
}
}
case class Integer(name: String = "x_" + nextId) extends Variable { variable =>
val id = variables.length
variables += this
def coefficients = {
val v = SparseVector.zeros[Double](variables.length)
for(i <- 0 until size) v(id + i) = 1.0
v
}
}
case class Binary(name: String = "x_" + nextId) extends Variable { variable =>
val id = variables.length
variables += this
def coefficients = {
val v = SparseVector.zeros[Double](variables.length)
for(i <- 0 until size) v(id + i) = 1.0
v
}
}
/* I thought that interior point defaulted to requiring all variables to be positive. I appear to be wrong.
case class Real(name: String="x_" + nextId) extends Variable {
val id = variables.length
variables += this
variables += this
def coefficients = {
val v = SparseVector.zeros[Double](variables.length)
v(id) = 1
v(id+1) = -1
v
}
}
*/
case class Result(result: DenseVector[Double], problem: Problem) {
def valueOf(x: Expression):Double = {(result dot x.coefficients) + x.scalarComponent}
def value = valueOf(problem.objective)
}
def maximize(objective: Problem)(implicit solver: LinearProgram.Solver) = solver.maximize(this)(objective)
}
object LinearProgram {
trait Solver {
def maximize(lp: LinearProgram)(obj: lp.Problem):lp.Result
}
implicit val mySolver = try {
// NativeLPSolver
// } catch {
// case ex: SecurityException =>
ApacheSimplexSolver
// case ex: UnsatisfiedLinkError =>
// ApacheSimplexSolver
}
object ApacheSimplexSolver extends Solver {
def maximize(lp: LinearProgram)(objective: lp.Problem):lp.Result = {
import lp._
def relationToConstraintType(r: Relation) = r match {
case LTE => Relationship.LEQ
case GTE => Relationship.GEQ
case EQ => Relationship.EQ
}
val obj = new LinearObjectiveFunction(objective.objective.coefficients.toDenseVector.data, objective.objective.scalarComponent)
for(v <- variables) if(!v.isInstanceOf[lp.Variable]) throw new UnsupportedOperationException("Apache Solver can only handle real-valued lps!")
val constraints = for( c: Constraint <- objective.constraints) yield {
val cs = c.standardize
new LinearConstraint(cs.lhs.coefficients.toDenseVector.data, relationToConstraintType(c.relation), cs.rhs.scalarComponent)
}
val sol = new SimplexSolver().optimize(obj, new LinearConstraintSet(constraints.asJava), GoalType.MAXIMIZE)
Result(new DenseVector(sol.getPoint),objective)
}
}
/*
object NativeLPSolver extends Solver {
LpSolve.lpSolveVersion()
def maximize(lp: LinearProgram)(objective: lp.Problem): lp.Result = {
val lpsol = LpSolve.makeLp(0, lp.variables.length)
try {
import lp._
def relationToConstraintType(r: Relation) = r match {
case LTE => LpSolve.LE
case GTE => LpSolve.GE
case EQ => LpSolve.EQ
}
lpsol.setVerbose(LpSolve.IMPORTANT)
for( (v, i) <- variables.zipWithIndex) {
v match {
case x: Real =>
case x: Integer => lpsol.setInt(i+1, true)
case x: Binary => lpsol.setBinary(i+1, true)
}
}
for( c <- objective.constraints) yield {
val cs = c.standardize
lpsol.addConstraint(0.0 +: cs.lhs.coefficients.toDenseVector.data, relationToConstraintType(cs.relation), cs.rhs.scalarComponent)
}
lpsol.setObjFn(objective.objective.scalarComponent +: objective.objective.coefficients.toDenseVector.data)
lpsol.setMaxim()
val status = lpsol.solve()
val result = status match {
case 0 =>
val result = lp.Result(new DenseVector(lpsol.getPtrVariables), objective)
result
case LpSolve.UNBOUNDED =>
throw new UnboundedSolutionException
case LpSolve.INFEASIBLE =>
throw new InfeasibleProblem(objective)
case _ =>
throw new RuntimeException("Optimization failed with status: " + lpStatusToString(status) +"(" + status +")")
}
result
} finally {
lpsol.deleteLp()
}
}
def lpStatusToString(status: Int) = status match {
case -5 => "UnknownError"
case -4 => "DataIgnored"
case -3 => "NoBfp"
case -2 => "NoMemory"
case -1 => "NotRun"
case 0 => "Optimal"
case 1 => "Suboptimal"
case 2 => "Infeasible"
case 3 => "Unbounded"
case 4 => "Degenerate"
case 5 => "NumFailure"
case 6 => "UserAbort"
case 7 => "TimeOut"
case 8 => "Running"
case 9 => "FutureStatus"
case _ => "Unknown"
}
}
*/
}
case class InfeasibleProblem(prob: LinearProgram#Problem) extends RuntimeException
|
wavelets/breeze
|
src/main/scala/breeze/optimize/linear/LinearProgram.scala
|
Scala
|
apache-2.0
| 10,954
|
package org.bitcoins.core.protocol.script
import org.bitcoins.core.script.bitwise.OP_EQUALVERIFY
import org.bitcoins.core.script.constant._
import org.bitcoins.core.script.crypto.{OP_CHECKSIG, OP_HASH160}
import org.bitcoins.core.script.stack.OP_DUP
import org.bitcoins.core.util.BitcoinScriptUtil
import org.bitcoins.crypto.{CryptoUtil, ECPublicKey}
import org.bitcoins.testkitcore.Implicits._
import org.bitcoins.testkitcore.gen.CryptoGenerators
import org.bitcoins.testkitcore.util.{BitcoinSUnitTest, TestUtil}
import scodec.bits.ByteVector
/** Created by chris on 1/14/16.
*/
class ScriptPubKeyTest extends BitcoinSUnitTest {
val expectedAsm: Seq[ScriptToken] =
List(OP_DUP,
OP_HASH160,
BytesToPushOntoStack(20),
ScriptConstant("31a420903c05a0a7de2de40c9f02ebedbacdc172"),
OP_EQUALVERIFY,
OP_CHECKSIG)
//from b30d3148927f620f5b1228ba941c211fdabdae75d0ba0b688a58accbf018f3cc
val rawScriptPubKey: String = TestUtil.rawP2PKHScriptPubKey
val scriptPubKey: ScriptPubKey = ScriptPubKey(rawScriptPubKey)
"ScriptPubKey" must "give the expected asm from creating a scriptPubKey from hex" in {
scriptPubKey.asm must be(expectedAsm)
}
it must "determine if we have a witness program inside of the scriptPubKey" in {
val pubKeyHash =
CryptoUtil.sha256Hash160(CryptoGenerators.publicKey.sampleSome.bytes)
val witnessProgram = Seq(ScriptConstant(pubKeyHash.bytes))
val asm = OP_0 +: BytesToPushOntoStack(20) +: witnessProgram
val witnessScriptPubKey = WitnessScriptPubKey(asm)
witnessScriptPubKey.witnessVersion must be(WitnessVersion0)
witnessScriptPubKey.witnessProgram must be(witnessProgram)
}
it must "construct valid witness spk v1 for taproot" in {
val pubKey = CryptoGenerators.schnorrPublicKey.sample.get
val witSPKV1 = WitnessScriptPubKeyV1.fromPubKey(pubKey)
assert(witSPKV1.pubKey == pubKey)
}
it must "fail to construct a valid witness spk v1 when the coordinate is not on the curve" in {
//all zeroes
val pubKey = ByteVector.fill(32)(0.toByte)
//reconstruct asm
val asm = OP_1 +: (BitcoinScriptUtil.calculatePushOp(pubKey) ++ Vector(
ScriptConstant(pubKey)))
assertThrows[IllegalArgumentException] {
WitnessScriptPubKeyV1.fromAsm(asm)
}
}
it must "determine the correct descriptors" in {
val key = ECPublicKey(
"02c48670493ca813cd2d1bf8177df3d3d7c8e97fc7eb74cd21f71ea2ba416aee54")
// p2pk
val p2pk = P2PKScriptPubKey(key)
assert(p2pk.toString == s"pk(${key.hex})")
// p2pkh
val p2pkh = P2PKHScriptPubKey(key)
assert(p2pkh.toString == "pkh(63fe7c47cf475802b1c4ec2d34d1ef33e6b0fc63)")
// multi
val multi = MultiSignatureScriptPubKey(2, Seq(key, key))
assert(
multi.toString == "multi(2,02c48670493ca813cd2d1bf8177df3d3d7c8e97fc7eb74cd21f71ea2ba416aee54,02c48670493ca813cd2d1bf8177df3d3d7c8e97fc7eb74cd21f71ea2ba416aee54)")
// p2sh
val p2sh = P2SHScriptPubKey(p2pkh)
assert(p2sh.toString == "sh(2a941c7a3e92c7f5fe149a641cae6b417989c411)")
//p2wpkh
val p2wpkh = P2WPKHWitnessSPKV0(key)
assert(p2wpkh.toString == "wpkh(63fe7c47cf475802b1c4ec2d34d1ef33e6b0fc63)")
// p2wsh
val wsh = P2WSHWitnessSPKV0(p2pkh)
assert(
wsh.toString == "wsh(c0ad050ea2824ca0b938dd1c998f7160793034f321a307aae990786c0c029317)")
}
}
|
bitcoin-s/bitcoin-s
|
core-test/src/test/scala/org/bitcoins/core/protocol/script/ScriptPubKeyTest.scala
|
Scala
|
mit
| 3,376
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.history
import java.io.{BufferedOutputStream, ByteArrayInputStream, ByteArrayOutputStream, File,
FileOutputStream, OutputStreamWriter}
import java.net.URI
import java.nio.charset.StandardCharsets
import java.util.concurrent.TimeUnit
import java.util.zip.{ZipInputStream, ZipOutputStream}
import scala.concurrent.duration._
import scala.language.postfixOps
import com.google.common.io.{ByteStreams, Files}
import org.apache.hadoop.hdfs.DistributedFileSystem
import org.json4s.jackson.JsonMethods._
import org.mockito.Matchers.any
import org.mockito.Mockito.{mock, spy, verify}
import org.scalatest.BeforeAndAfter
import org.scalatest.Matchers
import org.scalatest.concurrent.Eventually._
import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.internal.Logging
import org.apache.spark.io._
import org.apache.spark.scheduler._
import org.apache.spark.util.{Clock, JsonProtocol, ManualClock, Utils}
class FsHistoryProviderSuite extends SparkFunSuite with BeforeAndAfter with Matchers with Logging {
private var testDir: File = null
before {
testDir = Utils.createTempDir()
}
after {
Utils.deleteRecursively(testDir)
}
/** Create a fake log file using the new log format used in Spark 1.3+ */
private def newLogFile(
appId: String,
appAttemptId: Option[String],
inProgress: Boolean,
codec: Option[String] = None): File = {
val ip = if (inProgress) EventLoggingListener.IN_PROGRESS else ""
val logUri = EventLoggingListener.getLogPath(testDir.toURI, appId, appAttemptId)
val logPath = new URI(logUri).getPath + ip
new File(logPath)
}
test("Parse application logs") {
val provider = new FsHistoryProvider(createTestConf())
// Write a new-style application log.
val newAppComplete = newLogFile("new1", None, inProgress = false)
writeFile(newAppComplete, true, None,
SparkListenerApplicationStart(newAppComplete.getName(), Some("new-app-complete"), 1L, "test",
None),
SparkListenerApplicationEnd(5L)
)
// Write a new-style application log.
val newAppCompressedComplete = newLogFile("new1compressed", None, inProgress = false,
Some("lzf"))
writeFile(newAppCompressedComplete, true, None,
SparkListenerApplicationStart(newAppCompressedComplete.getName(), Some("new-complete-lzf"),
1L, "test", None),
SparkListenerApplicationEnd(4L))
// Write an unfinished app, new-style.
val newAppIncomplete = newLogFile("new2", None, inProgress = true)
writeFile(newAppIncomplete, true, None,
SparkListenerApplicationStart(newAppIncomplete.getName(), Some("new-incomplete"), 1L, "test",
None)
)
// Force a reload of data from the log directory, and check that logs are loaded.
// Take the opportunity to check that the offset checks work as expected.
updateAndCheck(provider) { list =>
list.size should be (3)
list.count(_.attempts.head.completed) should be (2)
def makeAppInfo(
id: String,
name: String,
start: Long,
end: Long,
lastMod: Long,
user: String,
completed: Boolean): ApplicationHistoryInfo = {
ApplicationHistoryInfo(id, name,
List(ApplicationAttemptInfo(None, start, end, lastMod, user, completed)))
}
list(0) should be (makeAppInfo("new-app-complete", newAppComplete.getName(), 1L, 5L,
newAppComplete.lastModified(), "test", true))
list(1) should be (makeAppInfo("new-complete-lzf", newAppCompressedComplete.getName(),
1L, 4L, newAppCompressedComplete.lastModified(), "test", true))
list(2) should be (makeAppInfo("new-incomplete", newAppIncomplete.getName(), 1L, -1L,
newAppIncomplete.lastModified(), "test", false))
// Make sure the UI can be rendered.
list.foreach { case info =>
val appUi = provider.getAppUI(info.id, None)
appUi should not be null
appUi should not be None
}
}
}
test("SPARK-3697: ignore directories that cannot be read.") {
val logFile1 = newLogFile("new1", None, inProgress = false)
writeFile(logFile1, true, None,
SparkListenerApplicationStart("app1-1", Some("app1-1"), 1L, "test", None),
SparkListenerApplicationEnd(2L)
)
val logFile2 = newLogFile("new2", None, inProgress = false)
writeFile(logFile2, true, None,
SparkListenerApplicationStart("app1-2", Some("app1-2"), 1L, "test", None),
SparkListenerApplicationEnd(2L)
)
logFile2.setReadable(false, false)
val provider = new FsHistoryProvider(createTestConf())
updateAndCheck(provider) { list =>
list.size should be (1)
}
}
test("history file is renamed from inprogress to completed") {
val provider = new FsHistoryProvider(createTestConf())
val logFile1 = newLogFile("app1", None, inProgress = true)
writeFile(logFile1, true, None,
SparkListenerApplicationStart("app1", Some("app1"), 1L, "test", None),
SparkListenerApplicationEnd(2L)
)
updateAndCheck(provider) { list =>
list.size should be (1)
list.head.attempts.head.asInstanceOf[FsApplicationAttemptInfo].logPath should
endWith(EventLoggingListener.IN_PROGRESS)
}
logFile1.renameTo(newLogFile("app1", None, inProgress = false))
updateAndCheck(provider) { list =>
list.size should be (1)
list.head.attempts.head.asInstanceOf[FsApplicationAttemptInfo].logPath should not
endWith(EventLoggingListener.IN_PROGRESS)
}
}
test("Parse logs that application is not started") {
val provider = new FsHistoryProvider((createTestConf()))
val logFile1 = newLogFile("app1", None, inProgress = true)
writeFile(logFile1, true, None,
SparkListenerLogStart("1.4")
)
updateAndCheck(provider) { list =>
list.size should be (0)
}
}
test("SPARK-5582: empty log directory") {
val provider = new FsHistoryProvider(createTestConf())
val logFile1 = newLogFile("app1", None, inProgress = true)
writeFile(logFile1, true, None,
SparkListenerApplicationStart("app1", Some("app1"), 1L, "test", None),
SparkListenerApplicationEnd(2L))
val oldLog = new File(testDir, "old1")
oldLog.mkdir()
provider.checkForLogs()
val appListAfterRename = provider.getListing()
appListAfterRename.size should be (1)
}
test("apps with multiple attempts with order") {
val provider = new FsHistoryProvider(createTestConf())
val attempt1 = newLogFile("app1", Some("attempt1"), inProgress = true)
writeFile(attempt1, true, None,
SparkListenerApplicationStart("app1", Some("app1"), 1L, "test", Some("attempt1"))
)
updateAndCheck(provider) { list =>
list.size should be (1)
list.head.attempts.size should be (1)
}
val attempt2 = newLogFile("app1", Some("attempt2"), inProgress = true)
writeFile(attempt2, true, None,
SparkListenerApplicationStart("app1", Some("app1"), 2L, "test", Some("attempt2"))
)
updateAndCheck(provider) { list =>
list.size should be (1)
list.head.attempts.size should be (2)
list.head.attempts.head.attemptId should be (Some("attempt2"))
}
val attempt3 = newLogFile("app1", Some("attempt3"), inProgress = false)
writeFile(attempt3, true, None,
SparkListenerApplicationStart("app1", Some("app1"), 3L, "test", Some("attempt3")),
SparkListenerApplicationEnd(4L)
)
updateAndCheck(provider) { list =>
list should not be (null)
list.size should be (1)
list.head.attempts.size should be (3)
list.head.attempts.head.attemptId should be (Some("attempt3"))
}
val app2Attempt1 = newLogFile("app2", Some("attempt1"), inProgress = false)
writeFile(attempt1, true, None,
SparkListenerApplicationStart("app2", Some("app2"), 5L, "test", Some("attempt1")),
SparkListenerApplicationEnd(6L)
)
updateAndCheck(provider) { list =>
list.size should be (2)
list.head.attempts.size should be (1)
list.last.attempts.size should be (3)
list.head.attempts.head.attemptId should be (Some("attempt1"))
list.foreach { case app =>
app.attempts.foreach { attempt =>
val appUi = provider.getAppUI(app.id, attempt.attemptId)
appUi should not be null
}
}
}
}
test("log cleaner") {
val maxAge = TimeUnit.SECONDS.toMillis(10)
val clock = new ManualClock(maxAge / 2)
val provider = new FsHistoryProvider(
createTestConf().set("spark.history.fs.cleaner.maxAge", s"${maxAge}ms"), clock)
val log1 = newLogFile("app1", Some("attempt1"), inProgress = false)
writeFile(log1, true, None,
SparkListenerApplicationStart("app1", Some("app1"), 1L, "test", Some("attempt1")),
SparkListenerApplicationEnd(2L)
)
log1.setLastModified(0L)
val log2 = newLogFile("app1", Some("attempt2"), inProgress = false)
writeFile(log2, true, None,
SparkListenerApplicationStart("app1", Some("app1"), 3L, "test", Some("attempt2")),
SparkListenerApplicationEnd(4L)
)
log2.setLastModified(clock.getTimeMillis())
updateAndCheck(provider) { list =>
list.size should be (1)
list.head.attempts.size should be (2)
}
// Move the clock forward so log1 exceeds the max age.
clock.advance(maxAge)
updateAndCheck(provider) { list =>
list.size should be (1)
list.head.attempts.size should be (1)
list.head.attempts.head.attemptId should be (Some("attempt2"))
}
assert(!log1.exists())
// Do the same for the other log.
clock.advance(maxAge)
updateAndCheck(provider) { list =>
list.size should be (0)
}
assert(!log2.exists())
}
test("Event log copy") {
val provider = new FsHistoryProvider(createTestConf())
val logs = (1 to 2).map { i =>
val log = newLogFile("downloadApp1", Some(s"attempt$i"), inProgress = false)
writeFile(log, true, None,
SparkListenerApplicationStart(
"downloadApp1", Some("downloadApp1"), 5000 * i, "test", Some(s"attempt$i")),
SparkListenerApplicationEnd(5001 * i)
)
log
}
provider.checkForLogs()
(1 to 2).foreach { i =>
val underlyingStream = new ByteArrayOutputStream()
val outputStream = new ZipOutputStream(underlyingStream)
provider.writeEventLogs("downloadApp1", Some(s"attempt$i"), outputStream)
outputStream.close()
val inputStream = new ZipInputStream(new ByteArrayInputStream(underlyingStream.toByteArray))
var totalEntries = 0
var entry = inputStream.getNextEntry
entry should not be null
while (entry != null) {
val actual = new String(ByteStreams.toByteArray(inputStream), StandardCharsets.UTF_8)
val expected =
Files.toString(logs.find(_.getName == entry.getName).get, StandardCharsets.UTF_8)
actual should be (expected)
totalEntries += 1
entry = inputStream.getNextEntry
}
totalEntries should be (1)
inputStream.close()
}
}
test("SPARK-8372: new logs with no app ID are ignored") {
val provider = new FsHistoryProvider(createTestConf())
// Write a new log file without an app id, to make sure it's ignored.
val logFile1 = newLogFile("app1", None, inProgress = true)
writeFile(logFile1, true, None,
SparkListenerLogStart("1.4")
)
updateAndCheck(provider) { list =>
list.size should be (0)
}
}
test("provider correctly checks whether fs is in safe mode") {
val provider = spy(new FsHistoryProvider(createTestConf()))
val dfs = mock(classOf[DistributedFileSystem])
// Asserts that safe mode is false because we can't really control the return value of the mock,
// since the API is different between hadoop 1 and 2.
assert(!provider.isFsInSafeMode(dfs))
}
test("provider waits for safe mode to finish before initializing") {
val clock = new ManualClock()
val provider = new SafeModeTestProvider(createTestConf(), clock)
val initThread = provider.initialize()
try {
provider.getConfig().keys should contain ("HDFS State")
clock.setTime(5000)
provider.getConfig().keys should contain ("HDFS State")
provider.inSafeMode = false
clock.setTime(10000)
eventually(timeout(1 second), interval(10 millis)) {
provider.getConfig().keys should not contain ("HDFS State")
}
} finally {
provider.stop()
}
}
test("provider reports error after FS leaves safe mode") {
testDir.delete()
val clock = new ManualClock()
val provider = new SafeModeTestProvider(createTestConf(), clock)
val errorHandler = mock(classOf[Thread.UncaughtExceptionHandler])
val initThread = provider.startSafeModeCheckThread(Some(errorHandler))
try {
provider.inSafeMode = false
clock.setTime(10000)
eventually(timeout(1 second), interval(10 millis)) {
verify(errorHandler).uncaughtException(any(), any())
}
} finally {
provider.stop()
}
}
/**
* Asks the provider to check for logs and calls a function to perform checks on the updated
* app list. Example:
*
* updateAndCheck(provider) { list =>
* // asserts
* }
*/
private def updateAndCheck(provider: FsHistoryProvider)
(checkFn: Seq[ApplicationHistoryInfo] => Unit): Unit = {
provider.checkForLogs()
provider.cleanLogs()
checkFn(provider.getListing().toSeq)
}
private def writeFile(file: File, isNewFormat: Boolean, codec: Option[CompressionCodec],
events: SparkListenerEvent*) = {
val fstream = new FileOutputStream(file)
val cstream = codec.map(_.compressedOutputStream(fstream)).getOrElse(fstream)
val bstream = new BufferedOutputStream(cstream)
if (isNewFormat) {
EventLoggingListener.initEventLog(new FileOutputStream(file))
}
val writer = new OutputStreamWriter(bstream, StandardCharsets.UTF_8)
Utils.tryWithSafeFinally {
events.foreach(e => writer.write(compact(render(JsonProtocol.sparkEventToJson(e))) + "\n"))
} {
writer.close()
}
}
private def createEmptyFile(file: File) = {
new FileOutputStream(file).close()
}
private def createTestConf(): SparkConf = {
new SparkConf().set("spark.history.fs.logDirectory", testDir.getAbsolutePath())
}
private class SafeModeTestProvider(conf: SparkConf, clock: Clock)
extends FsHistoryProvider(conf, clock) {
@volatile var inSafeMode = true
// Skip initialization so that we can manually start the safe mode check thread.
private[history] override def initialize(): Thread = null
private[history] override def isFsInSafeMode(): Boolean = inSafeMode
}
}
|
gioenn/xSpark
|
core/src/test/scala/org/apache/spark/deploy/history/FsHistoryProviderSuite.scala
|
Scala
|
apache-2.0
| 15,635
|
package io.rout
import com.twitter.finagle.http.Request
import com.twitter.util.{Await, Future}
import io.rout.items._
import org.scalatest.{FlatSpec, Matchers}
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ReqReadCompanionSpec extends FlatSpec with Matchers {
"The ReqReadCompanion" should "support a factory method based on a function that reads from the request" in {
val request: Request = Request(("foo", "5"))
val futureResult: Future[Option[String]] = ReqRead[Option[String]](_ => Some("5"))(request)
Await.result(futureResult) shouldBe Some("5")
}
it should "support a factory method based on a constant Future" in {
val request: Request = Request(("foo", ""))
val futureResult: Future[Int] = ReqRead.const(Future.value(1))(request)
Await.result(futureResult) shouldBe 1
}
it should "support a factory method based on a constant value" in {
val request: Request = Request(("foo", ""))
val futureResult: Future[Int] = ReqRead.value(1)(request)
Await.result(futureResult) shouldBe 1
}
it should "support a factory method based on a constant exception" in {
val request: Request = Request(("foo", ""))
val futureResult: Future[Int] = ReqRead.exception(Error.NotPresent(BodyItem))(request)
an [Error.NotPresent] shouldBe thrownBy(Await.result(futureResult))
}
}
|
teodimoff/rOut
|
core/test/scala/io/rout/RequestReaderCompanionSpec.scala
|
Scala
|
apache-2.0
| 1,405
|
package io.skysail.domain.resources
import akka.actor.ActorContext
trait ActorContextAware {
var actorContext: ActorContext = null
def setActorContext(context: ActorContext) = this.actorContext = context
def getSender() = this.actorContext.sender
def getSystem() = this.actorContext.system
}
|
evandor/skysail-server
|
skysail.domain/src/io/skysail/domain/resources/ActorContextAware.scala
|
Scala
|
apache-2.0
| 302
|
package com.joypeg.scamandrill.models
/**
* The export information
* @param key - a valid API key
* @param id - an export job identifier
*/
case class MExportInfo(key: String = DefaultConfig.defaultKeyFromConfig,
id: String) extends MandrillRequest
/**
* The export notify info
* @param key - a valid API key
* @param notify_email - an optional email address to notify when the export job has finished
*/
case class MExportNotify(key: String = DefaultConfig.defaultKeyFromConfig,
notify_email: String) extends MandrillRequest
/**
* The export activity
* @param key - a valid API key
* @param notify_email - an optional email address to notify when the export job has finished
* @param date_from - start date as a UTC string in YYYY-MM-DD HH:MM:SS format
* @param date_to - end date as a UTC string in YYYY-MM-DD HH:MM:SS format
* @param tags - an array of tag names to narrow the export to; will match messages that contain ANY of the tags
* @param senders - an array of senders to narrow the export to
* @param states - an array of states to narrow the export to; messages with ANY of the states will be included
* @param api_keys - an array of api keys to narrow the export to; messsagse sent with ANY of the keys will be included
*/
case class MExportActivity(key: String = DefaultConfig.defaultKeyFromConfig,
notify_email: String,
date_from: String,
date_to: String,
tags: List[String],
senders: List[String],
states: List[String],
api_keys: List[String]) extends MandrillRequest
|
dzsessona/scamandrill
|
src/main/scala/com/joypeg/scamandrill/models/MandrillExportsRequests.scala
|
Scala
|
apache-2.0
| 1,743
|
package main
import org.scalajs.dom._
import org.scalajs.dom
import scala.scalajs.js
import scala.scalajs.js.annotation.JSExport
import japgolly.scalajs.react.vdom.prefix_<^.{<, ^, _}
import japgolly.scalajs.react._
import diode.react.ModelProxy
import main.components.ReqBox
import main.components.SideViewTopHeader
import modals.NewModelModal
import scalacss.ScalaCssReact._
import scalacss.Defaults._
import scala.collection.immutable.Queue
import shared._
@JSExport
object webApp extends js.JSApp {
val contentDivStyle = Seq(
^.className := "container",
^.width := "100%",
^.height := "100%",
^.overflow.hidden,
^.paddingRight := "5px",
^.paddingLeft := "5px"
)
val ListTerminalDivStyle = Seq(
^.className := "col-1",
^.float.left,
^.width := "30%",
^.height := "100%",
^.paddingRight := "9px"
)
val cachedModelsDivStyle = Seq(
^.className := "col-2",
^.width := "70%",
^.height := "100%",
^.float.left
)
val cachedModelsPreStyle = Seq(
^.padding := "5px",
^.paddingRight := "5px",
^.height := "5%",
^.minHeight := "40px",
^.overflow.hidden,
^.position.relative
)
val addCachedModelsButtonStyle = Seq(
^.className := "glyphicon glyphicon-plus",
^.color := "green",
^.position.absolute,
^.top := "0%",
^.width := "5%",
^.height := "105%",
^.marginLeft := "-6px",
^.marginTop := "-2px",
^.outline := "none"
)
val cachedModelsDiv1Style = Seq(
^.overflowX.auto,
^.left := "5%",
^.height := "91%",
^.overflowX.auto,
^.overflowY.hidden,
^.width := "95%",
^.position.absolute
)
val modelTabsStyle = Seq(
^.className := "navpill",
^.display.flex,
^.flexDirection.row,
^.whiteSpace.nowrap,
^.position.relative,
^.marginLeft := "5px",
^.marginRight := "5px",
^.padding := "5px",
^.float.left,
^.overflow.hidden,
^.borderRadius := "5px",
^.top := "0px",
^.width := "200px",
^.background := "#CFEADD"
)
val modelTabsSpanStyle = Seq(
^.className := "col",
^.width := "80%",
^.height := "100%",
^.paddingTop := "2px",
^.paddingLeft := "5px",
^.cursor.pointer,
^.display.flex,
^.alignSelf.center,
^.alignItems.center,
^.fontSize.medium
)
val modelTabsButtonStyle = Seq(
^.className := "col",
^.position.absolute,
^.width := "20%",
^.height := "100%",
^.left := "80%",
^.top := "0%",
^.paddingTop := "5px"
)
val cachedModelsRowStyle = Seq(
^.whiteSpace.nowrap,
^.position.absolute,
^.height := "100%",
^.className := "clickable-row"
)
case class Props(proxy: ModelProxy[Tree])
case class CachedModel(name: String, model: Tree, selected: Boolean, uUID: UUID)
case class State(cachedModels: Queue[CachedModel] = Queue(CachedModel("untitled", emptyTree, selected = true, uUID = UUID.random())),
isNewModelModalOpen: Boolean = false, saveModelType: String = "rec",
isMethodStarted: Boolean = false, scrollPosition: Double = 0, newModel: Tree = emptyTree,
method: Seq[String] = Seq(), topSideView: SideViewTop.Value = SideViewTop.EntityListView)
object SideViewTop extends Enumeration {
val EntityListView, ReqBoxView = Value
}
val emptyTree = Tree(Seq())
def elemToTreeItem(elems: Seq[Elem]): TreeItem = {
TreeItem("Model", UUID.model(), elems.map(elem => convert(elem)), None)
}
def convert(elem: Elem): TreeItem = elem match {
case relation: Relation => TreeItem(relation.entity, relation.entity.uuid, relation.submodel.children.map(convert), Some(relation.link))
case node: shared.Node => TreeItem(node, node.uuid, Seq(), None)
}
class Backend($: BackendScope[Props, State]) {
def saveScrollPosition(position: Double): Callback = {
if ($.accessDirect.state.scrollPosition != position)
$.modState(_.copy(scrollPosition = position))
else
Callback()
}
def closeNewModelModal: Callback = $.modState(_.copy(isNewModelModalOpen = false))
def openNewModelModal(newSaveModelType: String, newModel: Tree): Callback = $.modState(_.copy(isNewModelModalOpen = true,
saveModelType = newSaveModelType, newModel = newModel))
val treeView = ReactComponentB[ModelProxy[Tree]]("treeView")
.render(P => <.pre(
^.className := "zoomViewport",
GlobalStyle.treeView,
^.border := "1px solid #ccc",
^.id := "treeView",
<.div(
^.width := "100%",
^.height := "100%",
ReactTreeView(
root = elemToTreeItem(P.props.value.children),
modelProxy = P.props,
showSearchBox = false,
saveScrollPosition = saveScrollPosition
),
<.strong(
^.id := "treeviewcontent"
)
)
))
.build
def setScroll(scrollPosition: Double): Callback = {
val pre = document.getElementById("treeView").asInstanceOf[dom.html.Pre]
Callback(pre.scrollTop = scrollPosition)
}
def getScroll: Callback = $.modState(_.copy(scrollPosition = document.getElementById("treeView").scrollTop))
def saveModel(name: String, model: Tree, P: Props, S: State): Callback = {
val m = CachedModel(name, model, selected = true, UUID.random())
setActiveModel(m, P, S) >>
$.modState(s => s.copy(cachedModels = s.cachedModels :+ m))
}
def sendMethod(currentMethod: Seq[String]): Callback = $.modState(_.copy(method = currentMethod, isMethodStarted = true))
def methodDone: Callback = $.modState(_.copy(isMethodStarted = false))
def render(P: Props, S: State) = {
val sc = AppCircuit.connect(_.tree)
val sideViewTopHeader =
SideViewTopHeader(
setEntityListView = $.modState(_.copy(topSideView = SideViewTop.EntityListView)),
setReqBoxView = $.modState(_.copy(topSideView = SideViewTop.ReqBoxView)))
<.div(
NewModelModal(
isOpen = S.isNewModelModalOpen,
onClose = closeNewModelModal,
saveModel = saveModel(_, _, P, S),
S.newModel, S.saveModelType
),
contentDivStyle,
<.div(
^.className := "header",
Header(P.proxy, openNewModelModal, sendMethod, getActiveModelName)
),
<.div(
ListTerminalDivStyle,
sideViewTopHeader,
S.topSideView match {
case SideViewTop.EntityListView => ElementList()
case SideViewTop.ReqBoxView => ReqBox()
},
ReqTLog(P.proxy, openNewModelModal, () => S.method, S.isMethodStarted, methodDone)
),
<.div(
cachedModelsDivStyle,
cachedModels((P, S)),
sc(proxy => treeView(proxy))
)
)
}
val cachedModels = ReactComponentB[(Props, State)]("cachedModelsComponent")
.render($ => <.pre(
cachedModelsPreStyle,
<.button(
addCachedModelsButtonStyle,
^.onClick --> openNewModelModal("save", $.props._1.proxy.value)
),
<.div(
cachedModelsDiv1Style,
<.div(
cachedModelsRowStyle,
<.ul(
^.display.flex,
^.height := "100%",
^.paddingBottom := "5px",
^.className := "nav nav-pills",
^.listStyleType.none,
$.props._2.cachedModels.map(s => listModels((s, $.props._1, $.props._2)))
)
)
)
)
).build
val listModels = ReactComponentB[(CachedModel, Props, State)]("listElem")
.render($ => <.li(
modelTabsStyle,
^.opacity := {
if ($.props._1.selected) "1" else "0.7"
},
^.backgroundColor := {
if ($.props._1.selected) "#90C4AB" else "#CFEADD"
},
<.span(
modelTabsSpanStyle,
$.props._1.name
),
^.onClick --> setActiveModel($.props._1, $.props._2, $.props._3),
<.button(
modelTabsButtonStyle,
GlobalStyle.removeButtonSimple,
^.outline := "none",
^.onClick ==> removeCachedModel($.props._1, $.props._2, $.props._3)
)
)).build
def getActiveModelName: Option[CachedModel] = $.accessDirect.state.cachedModels.find(_.selected)
def setActiveModel(cachedModel: CachedModel, P: Props, S: State): Callback = {
updateActiveModel(cachedModel, P, S) >> P.proxy.dispatchCB(SetModel(cachedModel.model.children))
}
def updateActiveModel(cachedModel: CachedModel, P: Props, S: State): Callback = {
val newModels: Queue[CachedModel] = S.cachedModels.map(model =>
if (model.selected)
model.copy(model = P.proxy.value, selected = model.uUID.equals(cachedModel.uUID))
else
model.copy(selected = model.uUID.equals(cachedModel.uUID))
)
$.modState(_.copy(cachedModels = newModels))
}
def removeCachedModel(modelToRemove: CachedModel, P: Props, S: State)(e: ReactEventI): Callback = {
e.stopPropagation()
val index = S.cachedModels.indexWhere(_.equals(modelToRemove))
val beginning = S.cachedModels.take(index)
val end = S.cachedModels.drop(index + 1)
// if we removed selection we change the active model
if (modelToRemove.selected && end.nonEmpty) {
// try to select next model first (like Chrome tabs)
val selectedEnd = end.map(m =>
m.copy(selected = m.uUID.equals(end.head.uUID)))
setActiveModel(selectedEnd.head, P, S) >>
$.modState(_.copy(cachedModels = beginning ++ selectedEnd))
} else if (modelToRemove.selected && beginning.nonEmpty) {
// otherwise try to select previous model
val selectedBeginning = beginning.map(m =>
m.copy(selected = m.uUID.equals(beginning.last.uUID)))
setActiveModel(selectedBeginning.last, P, S) >>
$.modState(_.copy(cachedModels = selectedBeginning ++ end))
} else if (beginning.isEmpty && end.isEmpty) {
// queue is empty - create a default Untitled model
val untitledModel = CachedModel("untitled", emptyTree, selected = true, UUID.random())
setActiveModel(untitledModel, P, S) >>
$.modState(_.copy(cachedModels = Queue(untitledModel)))
} else {
// else just remove the target model from state
$.modState(_.copy(cachedModels = beginning ++ end))
}
}
}
val pageContent = ReactComponentB[Props]("Content")
.initialState(State())
.renderBackend[Backend]
.componentWillReceiveProps(x => x.$.backend.setScroll(x.currentState.scrollPosition))
.componentDidUpdate(x => {
x.$.backend.setScroll(x.currentState.scrollPosition)
})
.build
val dc = AppCircuit.connect(_.tree)
def main(): Unit = {
AppCss.load
window.onbeforeunload = {beforeUnloadEvent: BeforeUnloadEvent => "Leave?"}
ReactDOM.render(dc(proxy => pageContent(Props(proxy))), document.getElementById("content"))
}
}
|
reqT/reqT-webapp
|
client/src/main/scala/main/webApp.scala
|
Scala
|
apache-2.0
| 11,084
|
package com.arcusys.valamis.web.portlet
import javax.portlet.{RenderRequest, RenderResponse}
import com.arcusys.valamis.web.portlet.base.{OAuthPortlet, PortletBase}
class AchievedCertificatesView extends OAuthPortlet with PortletBase {
override def doView(request: RenderRequest, response: RenderResponse) {
implicit val out = response.getWriter
val securityScope = getSecurityData(request)
val data = securityScope.data
sendTextFile("/templates/2.0/achieved_certificates_templates.html")
sendTextFile("/templates/2.0/common_templates.html")
sendMustacheFile(data, "achieved_certificates.html")
}
}
|
igor-borisov/JSCORM
|
valamis-portlets/src/main/scala/com/arcusys/valamis/web/portlet/AchievedCertificatesView.scala
|
Scala
|
gpl-3.0
| 632
|
package com.searchlight.khronus.cluster
import akka.actor.ActorRef
import com.searchlight.khronus.model.Metric
import com.searchlight.khronus.util.Settings
import com.searchlight.khronus.util.log.Logging
import scala.util.hashing.MurmurHash3
class AffinityConsistentHashRing extends Logging {
private val tokens = collection.mutable.SortedSet[Token]()(Ordering.by(_.hash))
private val tokensByWorker = collection.mutable.Map[String, Seq[Token]]()
private var metricsByWorker: Map[String, MetricsQueue] = Map[String, MetricsQueue]()
private def virtualTokens(actor: String, count: Int = 256) = (1 to count).map(id ⇒ Token(hash(s"$actor-$id"), actor)).toSeq
private def hash(string: String) = MurmurHash3.arrayHash(string.toArray)
private def clockwiseToken(metric: Metric) = {
tokens.from(Token(hash(metric.name))).headOption.getOrElse(tokens.head)
}
def addWorker(worker: ActorRef): Unit = {
val workerKey = key(worker)
if (!tokensByWorker.contains(workerKey)) {
val workerTokens = virtualTokens(workerKey)
tokensByWorker += ((workerKey, workerTokens))
tokens ++= workerTokens
}
}
def removeWorker(worker: ActorRef): Unit = {
tokensByWorker.remove(key(worker)).foreach { workerTokens ⇒
tokens --= workerTokens
}
}
private def key(actor: ActorRef) = actor.path.parent.toString
def assignWorkers(metrics: Seq[Metric]) = {
if (tokens.nonEmpty) {
metricsByWorker = metrics.groupBy(clockwiseToken(_).worker).map { case (workerKey, groupedMetrics) ⇒ (workerKey, MetricsQueue(groupedMetrics)) }
}
}
def nextMetrics(worker: ActorRef): Seq[Metric] = metricsByWorker.get(key(worker)).map(_.next).getOrElse(Seq())
def hasPendingMetrics(worker: ActorRef) = metricsByWorker.get(key(worker)).exists(_.hasNext)
def remainingMetrics(): Seq[Metric] = {
metricsByWorker.values flatMap (_.remaining) toSeq
}
}
object AffinityConsistentHashRing {
def apply() = new AffinityConsistentHashRing
}
case class Token(hash: Int, worker: String = "")
case class MetricsQueue(metrics: Seq[Metric]) {
private val m = metrics.grouped(Settings.Master.WorkerBatchSize)
def next = if (m.hasNext) m.next() else Seq()
def hasNext = m.hasNext
def remaining = m.toList.flatten
}
|
despegar/khronus
|
khronus-core/src/main/scala/com/searchlight/khronus/cluster/AffinityConsistentHashRing.scala
|
Scala
|
apache-2.0
| 2,274
|
/*
* Copyright 2012 Eike Kettner
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.eknet.publet.vfs.fs
import java.io._
import org.eknet.publet.vfs._
import com.google.common.eventbus.EventBus
import org.eknet.publet.vfs.events.{ContentWrittenEvent, ContentCreatedEvent}
import java.nio.file.{FileVisitResult, Path => NioPath, SimpleFileVisitor, Files}
import org.eknet.publet.vfs.Path
import java.nio.file.attribute.BasicFileAttributes
/**
*
* @author <a href="mailto:eike.kettner@gmail.com">Eike Kettner</a>
* @since 01.04.12 14:06
*/
class FileResource(f: File, root: Path, bus: EventBus)
extends AbstractLocalResource(f, root, bus) with ContentResource with Modifyable with Writeable {
def inputStream = new FileInputStream(file)
def writeFrom(in: InputStream, changeInfo: Option[ChangeInfo]) {
val out = new CloseEventOutStream(new FileOutputStream(file), bus, this, changeInfo)
Content.copy(in, out, closeIn = false)
}
def outputStream: OutputStream = new CloseEventOutStream(new FileOutputStream(file), bus, this, None)
override def lastModification = Some(file.lastModified())
def create() {
file.createNewFile()
bus.post(ContentCreatedEvent(this))
}
override def length = Some(file.length())
def contentType = ContentType(f)
override def toString = "File[" + f.toString + "]"
}
private[fs] class CloseEventOutStream(out: OutputStream, bus: EventBus, resource: FileResource, changeInfo: Option[ChangeInfo]) extends OutputStream {
def write(b: Int) {
out.write(b)
}
override def write(b: Array[Byte]) {
out.write(b)
}
override def write(b: Array[Byte], off: Int, len: Int) {
out.write(b, off, len)
}
override def flush() {
out.flush()
}
override def close() {
out.close()
bus.post(ContentWrittenEvent(resource, changeInfo))
}
}
object FileResource {
private[this] def deleteDirectory(root: File, keepRoot: Boolean) {
Files.walkFileTree(root.toPath, new SimpleFileVisitor[NioPath] {
override def visitFile(file: NioPath, attrs: BasicFileAttributes) = {
Files.delete(file)
FileVisitResult.CONTINUE
}
override def postVisitDirectory(dir: NioPath, exc: IOException) = {
if (exc == null) {
if (!keepRoot || !root.equals(dir.toFile)) {
Files.delete(dir)
}
FileVisitResult.CONTINUE
} else {
FileVisitResult.TERMINATE
}
}
})
}
/**
* Recursively deletes the given directory.
*
* @param root
*/
def deleteDirectory(root: File) {
deleteDirectory(root, keepRoot = false)
}
/**
* Recursively cleans the given directory. The contents
* in the directory are delted, but not the directory itself.
*
* @param root
*/
def cleanDirectory(root: File) {
deleteDirectory(root, keepRoot = true)
}
}
|
eikek/publet
|
publet/src/main/scala/org/eknet/publet/vfs/fs/FileResource.scala
|
Scala
|
apache-2.0
| 3,390
|
package jsky.app.ot.visitlog
import edu.gemini.pot.sp.SPObservationID
import edu.gemini.spModel.obsrecord.ObsVisit
import javax.swing.table.AbstractTableModel
import java.util.Date
object VisitTableModel {
sealed abstract class Column[+T <% AnyRef : Manifest](val name: String) {
def value(visit: ObsVisit): T
def clazz: Class[_] = manifest.runtimeClass
}
case object IdColumn extends Column[SPObservationID]("Observation") {
def value(visit: ObsVisit): SPObservationID = visit.getObsId
}
case object Datasets extends Column[String]("Datasets") {
def value(visit: ObsVisit): String = {
val indices = visit.getAllDatasetLabels.toList.map(_.getIndex).sorted
val groups = (List.empty[Range]/:indices) { (lst, index) =>
lst match {
case Nil => List(Range(index, index))
case h :: t => if (h.end + 1 == index) Range(h.start, index) :: t
else if (h.end == index) lst
else Range(index, index) :: lst
}
}
groups.reverse.map { r =>
if (r.start == r.end) r.start.toString
else s"${r.start}-${r.end}"
}.mkString(",")
}
}
case object StartTime extends Column[Date]("Start") {
def value(visit: ObsVisit): Date = new Date(visit.getStartTime)
}
case object Duration extends Column[String]("Duration (mins)") {
def value(visit: ObsVisit): String = f"${visit.getTotalTime/60000d}%1.2f"
}
val columns = List(IdColumn, Datasets, StartTime, Duration)
}
import VisitTableModel._
class VisitTableModel extends AbstractTableModel {
private var visitList: List[ObsVisit] = Nil
def getRowCount: Int = visitList.length
def getColumnCount: Int = columns.length
def getValueAt(r: Int, c: Int): Object = columns(c).value(visitList(r))
override def getColumnName(c: Int): String = columns(c).name
override def getColumnClass(c: Int): Class[_] = columns(c).clazz
def visits: List[ObsVisit] = visitList
def visits_=(visits: List[ObsVisit]): Unit = {
visitList = visits
fireTableDataChanged()
}
}
|
arturog8m/ocs
|
bundle/jsky.app.ot.visitlog/src/main/scala/jsky/app/ot/visitlog/VisitTableModel.scala
|
Scala
|
bsd-3-clause
| 2,093
|
/*
* Copyright (c) 2015 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.huskimo
package channels
package singular
// Joda-Time
import org.joda.time.DateTime
// AWS
import com.amazonaws.services.s3.AmazonS3Client
// Scalaz
import scalaz._
import Scalaz._
// Spray
import spray.httpx.unmarshalling.FromResponseUnmarshaller
// This project
import ApiProtocol._
import tasks.{
FileTasks,
RedshiftTasks
}
import utils.ConversionUtils
object Singular {
private case class Resource(apiSlug: String, filePrefix: String, tableName: String)
private object Resources {
private val tablePrefix = "huskimo.singular_"
val campaigns = Resource("stats", "campaigns", s"${tablePrefix}campaigns")
val creatives = Resource("creative_stats", "creatives", s"${tablePrefix}creatives")
}
/**
* Fetch from API and write to file.
*
* @param config The Config for the HuskimoApp
* @param channel The Channel we are fetching from
* @param resource The resource we are fetching
* @param lookupDate The date we are fetching
*/
def fetchAndWrite[T <: List[Tsvable] : FromResponseUnmarshaller](config: AppConfig.Config,
channel: AppConfig.Channel, channelIndex: Int, resource: Resource,
lookupDate: DateTime)(implicit s3: AmazonS3Client) {
ApiClient.getStatistics[T](channel.api_key, resource.apiSlug, lookupDate) match {
case Success(records) => {
val filename = FileTasks.getTemporaryFile(channelIndex, resource.filePrefix, lookupDate)
FileTasks.writeFile(filename, records, channel.name, ConversionUtils.now())
FileTasks.uploadToS3(s3, config.s3.bucket, config.s3.folder_path, filename)
}
case Failure(err) => throw new Exception(s"Error fetching campaigns from Singular (${channel.name}): ${err}") // TODO: send event to Snowplow & non-0 system exit
}
}
/**
* Run the fetch process for all Singular resources
* that we care about.
*
* @param config The Config for the HuskimoApp
* @param endDate The last day to retrieve
* campaign statistics for
*/
def fetch(config: AppConfig.Config, endDate: DateTime) {
// 1. Setup
// TODO: initialize for each database
implicit val s3Client = FileTasks.initializeS3Client(config.s3.access_key_id, config.s3.secret_access_key)
FileTasks.deleteFromS3(s3Client, config.s3.bucket, Left(config.s3.folder_path))
// 2. Pagination
// TODO: this should be in parallel
val singularChannels = config.channels.filter(_.`type` == "singular")
for ((chn, idx) <- singularChannels.zipWithIndex) {
// Loop through all days
for (daysAgo <- 0 to config.fetch.lookback) {
val lookupDate = endDate.minusDays(daysAgo)
// Lookup the resources and write to a temporary file
fetchAndWrite[CampaignStatisticsResult](config, chn, idx, Resources.campaigns, lookupDate)
fetchAndWrite[CreativeStatisticsResult](config, chn, idx, Resources.creatives, lookupDate)
}
// TODO: this should be in parallel
for (tgt <- config.targets) {
RedshiftTasks.initializeConnection(tgt)
RedshiftTasks.loadTable(config.s3, Resources.campaigns.filePrefix, Resources.campaigns.tableName)
RedshiftTasks.loadTable(config.s3, Resources.creatives.filePrefix, Resources.creatives.tableName)
}
}
ApiClient.shutdown()
}
}
|
snowplow/huskimo
|
src/main/scala/com.snowplowanalytics/huskimo/channels/singular/Singular.scala
|
Scala
|
apache-2.0
| 4,025
|
/*
* Copyright 2014 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark.kernel.protocol.v5
import com.ibm.spark.kernel.BuildInfo
object SparkKernelInfo {
/**
* Represents the protocol version (IPython) supported by this kernel.
*/
val protocolVersion = "5.0"
/**
* Represents what the kernel implements.
*/
val implementation = "spark"
/**
* Represents the kernel version.
*/
val implementationVersion = BuildInfo.version
/**
* Represents the language supported by the kernel.
*/
val language_info = Map("name" -> "scala")
/**
* Represents the language version supported by the kernel.
*/
val languageVersion = BuildInfo.scalaVersion
/**
* Represents the displayed name of the kernel.
*/
val banner = "IBM Spark Kernel"
/**
* Represents the name of the user who started the kernel process.
*/
val username = System.getProperty("user.name")
/**
* Represents the unique session id used by this instance of the kernel.
*/
val session = java.util.UUID.randomUUID.toString
}
|
malcolmgreaves/spark-kernel
|
protocol/src/main/scala/com/ibm/spark/kernel/protocol/v5/SparkKernelInfo.scala
|
Scala
|
apache-2.0
| 1,686
|
package com.kodekutters.czml
import java.awt.Color
import com.kodekutters.czml.CzmlImplicits._
import com.kodekutters.czml.czmlCore._
import com.kodekutters.czml.czmlProperties._
import com.kodekutters.czml.czmlCustom.{CustomBasic, CustomInterval, CustomList, CustomMap}
import play.api.libs.json.Json
import scala.collection.mutable.{HashSet, ListBuffer, ListMap}
/**
* an example using custom properties
*/
object ExampleCustom {
def main(args: Array[String]) {
// create an empty czml object
val czml = CZML[CZMLPacket]()
// add a typical first packet
czml.packets += new CZMLPacket(id = "document", version = "1.0")
// create a positions property
val pos = new CzmlPositions(new CzmlPosition(cartographicDegrees = Cartographic[DEGREE](151.12, -33.52, 123.0)))
// create a billboard property
val bill = new Billboard(image = "https://upload.wikimedia.org/wikipedia/commons/c/c4/PM5544_with_non-PAL_signals.png", color = Color.red, show = true, scale = 0.2)
// create a label with some text
val label = new Label(eyeOffset = CzmlCartesian(5, 6, 7), text = "some text here", font = "11pt Lucida Console", outlineColor = Color.orange)
// a custom property consisting of a map of key=field name, value=various types including another map
val customMap = new CustomMap(ListMap(
"some-string" -> "xxxx",
"some-int" -> 123,
"some-map" -> Map("children" -> ListBuffer("one", "two", "three", 123))).toMap)
// a list/map of custom property (key=field name, value=various types)
val theList = ListMap("children" -> customMap,
"custom-array" -> new CustomList(List(1, 2, 3, 4, 5)),
"basic-double" -> new CustomBasic(23.4),
"basic-string" -> new CustomBasic("some-text"),
"change-name" -> new CustomInterval("2007-03-02T13:00:00Z/2008-05-11T15:30:00Z", "XYZ"),
"basic-array" -> new CustomBasic(ListBuffer(9, 8, 7)))
// create a custom properties czml property
val custom = new CustomProperties(theList.toMap)
// create a czml packet with all the czml properties
val packet = new CZMLPacket("test packet", HashSet[CzmlProperty](pos, bill, label, custom))
// add the packet to the czml object
czml.packets += packet
// convert the czml object to json
val jsczml = Json.toJson(czml)
// print the json representation
println(Json.prettyPrint(jsczml))
// alternatively, write the czml (as json) directly to file (here to System.out)
// Util.writeCzmlToFile(czml)
}
}
|
workingDog/scalaczml
|
src/main/scala/com/kodekutters/czml/ExampleCustom.scala
|
Scala
|
bsd-3-clause
| 2,521
|
/**
* Created by Xavier on 2016/8/19.
*/
object AnonymousFunction {
var inc = (x : Int) => x + 1
}
|
xydaxia0/scala-gradle
|
src/main/scala/AnonymousFunction.scala
|
Scala
|
apache-2.0
| 107
|
/**
* Copyright 2015 Devon Miller
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package im
package vdom
import scala.concurrent.{ Promise, ExecutionContext, Future }
import scala.util.{ Try, Success, Failure }
import scala.util.control.NonFatal
/**
* A Patch holds "difference" information that can be applied
* to another object (the target) to make it look like another
* (the source) object. The "difference" information must
* be interpreted for each backend environment.
*
* You can create patches either from diffing two virtual DOM
* trees or you can just manually create the patches and compose
* them using a sequence or `andThen` essentially creating a
* patch stream or template language of DOM "updates."
*
* You can create your own DSL to create patches if you want.
*/
sealed trait Patch {
/**
* Route this path to another part of the sub-tree when its run.
* The first sequence position represents the first level
* of children that the patch is applied to and so on.
* Nil means to act on the node that the patch is applied to.
*/
def applyTo(path: Seq[Int] = Nil) = PathPatch(this, path)
/**
* Route this path to a specific child index when it is run.
* The index is zero-based. An index outside the child
* list range results in an error.
*/
def applyTo(path: Int) = PathPatch(this, Seq(path))
/**
* Sequence a patch before another.
*/
def andThen(right: Patch) = AndThenPatch(this, right)
}
/**
* Do not do anything. This may translate into some type of "dummy" element.
*/
case object EmptyPatch extends Patch
/**
* Apply a patch to a particular tree child. Indexes navigate children.
*/
case class PathPatch(patch: Patch, path: Seq[Int] = Nil) extends Patch
/**
* Remove a node.
*/
case object RemovePatch extends Patch
/**
* Replace a node. Access to the parent will be required.
*/
case class ReplacePatch(replacement: VNode) extends Patch
/** Insert a new child at the specified index, or append if index is not specified. */
case class InsertPatch(vnode: VNode, index: Option[Int] = None) extends Patch
/** Create a text node. */
case class TextPatch(content: String) extends Patch
/**
* Apply attributes/properties to an element.
*/
case class KeyValuePatch(elActions: Seq[KeyValue[_]]) extends Patch
/** Manipulate children. */
case class OrderChildrenPatch(i: ReorderInstruction) extends Patch
/**
* Combine two patches in sequence.
*/
case class AndThenPatch(left: Patch, right: Patch) extends Patch
/**
* Instruction to re-order children. Removes should be processed first
* then the moves. Duplicate indexes in any of these structures could
* produce surprises. Moves should reflect the removes that are
* processed first.
*/
case class ReorderInstruction(moves: Seq[(Int, Int)] = Seq(), removes: Seq[Int] = Seq())
|
aappddeevv/scala-vdom
|
shared/src/main/scala/im/vdom/Patch.scala
|
Scala
|
apache-2.0
| 3,348
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming.sources
import java.util
import java.util.Collections
import scala.collection.JavaConverters._
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.sql.connector.catalog.{SessionConfigSupport, SupportsRead, SupportsWrite, Table, TableCapability, TableProvider}
import org.apache.spark.sql.connector.catalog.TableCapability._
import org.apache.spark.sql.connector.read.{InputPartition, PartitionReaderFactory, Scan, ScanBuilder}
import org.apache.spark.sql.connector.read.streaming.{ContinuousPartitionReaderFactory, ContinuousStream, MicroBatchStream, Offset, PartitionOffset}
import org.apache.spark.sql.connector.write.{WriteBuilder, WriterCommitMessage}
import org.apache.spark.sql.connector.write.streaming.{StreamingDataWriterFactory, StreamingWrite}
import org.apache.spark.sql.execution.datasources.DataSource
import org.apache.spark.sql.execution.streaming.{ContinuousTrigger, RateStreamOffset, Sink, StreamingQueryWrapper}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources.{DataSourceRegister, StreamSinkProvider}
import org.apache.spark.sql.streaming.{OutputMode, StreamingQuery, StreamTest, Trigger}
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.util.Utils
class FakeDataStream extends MicroBatchStream with ContinuousStream {
override def deserializeOffset(json: String): Offset = RateStreamOffset(Map())
override def commit(end: Offset): Unit = {}
override def stop(): Unit = {}
override def initialOffset(): Offset = RateStreamOffset(Map())
override def latestOffset(): Offset = RateStreamOffset(Map())
override def mergeOffsets(offsets: Array[PartitionOffset]): Offset = RateStreamOffset(Map())
override def planInputPartitions(start: Offset, end: Offset): Array[InputPartition] = {
throw new IllegalStateException("fake source - cannot actually read")
}
override def planInputPartitions(start: Offset): Array[InputPartition] = {
throw new IllegalStateException("fake source - cannot actually read")
}
override def createReaderFactory(): PartitionReaderFactory = {
throw new IllegalStateException("fake source - cannot actually read")
}
override def createContinuousReaderFactory(): ContinuousPartitionReaderFactory = {
throw new IllegalStateException("fake source - cannot actually read")
}
}
class FakeScanBuilder extends ScanBuilder with Scan {
override def build(): Scan = this
override def readSchema(): StructType = StructType(Seq())
override def toMicroBatchStream(checkpointLocation: String): MicroBatchStream = new FakeDataStream
override def toContinuousStream(checkpointLocation: String): ContinuousStream = new FakeDataStream
}
class FakeWriteBuilder extends WriteBuilder with StreamingWrite {
override def buildForStreaming(): StreamingWrite = this
override def createStreamingWriterFactory(): StreamingDataWriterFactory = {
throw new IllegalStateException("fake sink - cannot actually write")
}
override def commit(epochId: Long, messages: Array[WriterCommitMessage]): Unit = {
throw new IllegalStateException("fake sink - cannot actually write")
}
override def abort(epochId: Long, messages: Array[WriterCommitMessage]): Unit = {
throw new IllegalStateException("fake sink - cannot actually write")
}
}
trait FakeStreamingWriteTable extends Table with SupportsWrite {
override def name(): String = "fake"
override def schema(): StructType = StructType(Seq())
override def capabilities(): util.Set[TableCapability] = {
Set(STREAMING_WRITE).asJava
}
override def newWriteBuilder(options: CaseInsensitiveStringMap): WriteBuilder = {
new FakeWriteBuilder
}
}
class FakeReadMicroBatchOnly
extends DataSourceRegister
with TableProvider
with SessionConfigSupport {
override def shortName(): String = "fake-read-microbatch-only"
override def keyPrefix: String = shortName()
override def getTable(options: CaseInsensitiveStringMap): Table = {
LastReadOptions.options = options
new Table with SupportsRead {
override def name(): String = "fake"
override def schema(): StructType = StructType(Seq())
override def capabilities(): util.Set[TableCapability] = {
Set(MICRO_BATCH_READ).asJava
}
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new FakeScanBuilder
}
}
}
}
class FakeReadContinuousOnly
extends DataSourceRegister
with TableProvider
with SessionConfigSupport {
override def shortName(): String = "fake-read-continuous-only"
override def keyPrefix: String = shortName()
override def getTable(options: CaseInsensitiveStringMap): Table = {
LastReadOptions.options = options
new Table with SupportsRead {
override def name(): String = "fake"
override def schema(): StructType = StructType(Seq())
override def capabilities(): util.Set[TableCapability] = {
Set(CONTINUOUS_READ).asJava
}
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new FakeScanBuilder
}
}
}
}
class FakeReadBothModes extends DataSourceRegister with TableProvider {
override def shortName(): String = "fake-read-microbatch-continuous"
override def getTable(options: CaseInsensitiveStringMap): Table = {
new Table with SupportsRead {
override def name(): String = "fake"
override def schema(): StructType = StructType(Seq())
override def capabilities(): util.Set[TableCapability] = {
Set(MICRO_BATCH_READ, CONTINUOUS_READ).asJava
}
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new FakeScanBuilder
}
}
}
}
class FakeReadNeitherMode extends DataSourceRegister with TableProvider {
override def shortName(): String = "fake-read-neither-mode"
override def getTable(options: CaseInsensitiveStringMap): Table = {
new Table {
override def name(): String = "fake"
override def schema(): StructType = StructType(Nil)
override def capabilities(): util.Set[TableCapability] = Collections.emptySet()
}
}
}
class FakeWriteOnly
extends DataSourceRegister
with TableProvider
with SessionConfigSupport {
override def shortName(): String = "fake-write-microbatch-continuous"
override def keyPrefix: String = shortName()
override def getTable(options: CaseInsensitiveStringMap): Table = {
LastWriteOptions.options = options
new Table with FakeStreamingWriteTable {
override def name(): String = "fake"
override def schema(): StructType = StructType(Nil)
}
}
}
class FakeNoWrite extends DataSourceRegister with TableProvider {
override def shortName(): String = "fake-write-neither-mode"
override def getTable(options: CaseInsensitiveStringMap): Table = {
new Table {
override def name(): String = "fake"
override def schema(): StructType = StructType(Nil)
override def capabilities(): util.Set[TableCapability] = Collections.emptySet()
}
}
}
case class FakeWriteV1FallbackException() extends Exception
class FakeSink extends Sink {
override def addBatch(batchId: Long, data: DataFrame): Unit = {}
}
class FakeWriteSupportProviderV1Fallback extends DataSourceRegister
with TableProvider with StreamSinkProvider {
override def createSink(
sqlContext: SQLContext,
parameters: Map[String, String],
partitionColumns: Seq[String],
outputMode: OutputMode): Sink = {
new FakeSink()
}
override def shortName(): String = "fake-write-v1-fallback"
override def getTable(options: CaseInsensitiveStringMap): Table = {
new Table with FakeStreamingWriteTable {
override def name(): String = "fake"
override def schema(): StructType = StructType(Nil)
}
}
}
object LastReadOptions {
var options: CaseInsensitiveStringMap = _
def clear(): Unit = {
options = null
}
}
object LastWriteOptions {
var options: CaseInsensitiveStringMap = _
def clear(): Unit = {
options = null
}
}
class StreamingDataSourceV2Suite extends StreamTest {
override def beforeAll(): Unit = {
super.beforeAll()
val fakeCheckpoint = Utils.createTempDir()
spark.conf.set(SQLConf.CHECKPOINT_LOCATION.key, fakeCheckpoint.getCanonicalPath)
}
override def afterEach(): Unit = {
LastReadOptions.clear()
LastWriteOptions.clear()
}
val readFormats = Seq(
"fake-read-microbatch-only",
"fake-read-continuous-only",
"fake-read-microbatch-continuous",
"fake-read-neither-mode")
val writeFormats = Seq(
"fake-write-microbatch-continuous",
"fake-write-neither-mode")
val triggers = Seq(
Trigger.Once(),
Trigger.ProcessingTime(1000),
Trigger.Continuous(1000))
private def testPositiveCase(readFormat: String, writeFormat: String, trigger: Trigger): Unit = {
testPositiveCaseWithQuery(readFormat, writeFormat, trigger)(() => _)
}
private def testPositiveCaseWithQuery(
readFormat: String,
writeFormat: String,
trigger: Trigger)(check: StreamingQuery => Unit): Unit = {
val query = spark.readStream
.format(readFormat)
.load()
.writeStream
.format(writeFormat)
.trigger(trigger)
.start()
check(query)
query.stop()
}
private def testNegativeCase(
readFormat: String,
writeFormat: String,
trigger: Trigger,
errorMsg: String) = {
val ex = intercept[UnsupportedOperationException] {
testPositiveCase(readFormat, writeFormat, trigger)
}
assert(ex.getMessage.contains(errorMsg))
}
private def testPostCreationNegativeCase(
readFormat: String,
writeFormat: String,
trigger: Trigger,
errorMsg: String) = {
val query = spark.readStream
.format(readFormat)
.load()
.writeStream
.format(writeFormat)
.trigger(trigger)
.start()
eventually(timeout(streamingTimeout)) {
assert(query.exception.isDefined)
assert(query.exception.get.cause != null)
assert(query.exception.get.cause.getMessage.contains(errorMsg))
}
}
test("disabled v2 write") {
// Ensure the V2 path works normally and generates a V2 sink..
testPositiveCaseWithQuery(
"fake-read-microbatch-continuous", "fake-write-v1-fallback", Trigger.Once()) { v2Query =>
assert(v2Query.asInstanceOf[StreamingQueryWrapper].streamingQuery.sink
.isInstanceOf[Table])
}
// Ensure we create a V1 sink with the config. Note the config is a comma separated
// list, including other fake entries.
val fullSinkName = classOf[FakeWriteSupportProviderV1Fallback].getName
withSQLConf(SQLConf.DISABLED_V2_STREAMING_WRITERS.key -> s"a,b,c,test,$fullSinkName,d,e") {
testPositiveCaseWithQuery(
"fake-read-microbatch-continuous", "fake-write-v1-fallback", Trigger.Once()) { v1Query =>
assert(v1Query.asInstanceOf[StreamingQueryWrapper].streamingQuery.sink
.isInstanceOf[FakeSink])
}
}
}
Seq(
Tuple2(classOf[FakeReadMicroBatchOnly], Trigger.Once()),
Tuple2(classOf[FakeReadContinuousOnly], Trigger.Continuous(1000))
).foreach { case (source, trigger) =>
test(s"SPARK-25460: session options are respected in structured streaming sources - $source") {
// `keyPrefix` and `shortName` are the same in this test case
val readSource = source.getConstructor().newInstance().shortName()
val writeSource = "fake-write-microbatch-continuous"
val readOptionName = "optionA"
withSQLConf(s"spark.datasource.$readSource.$readOptionName" -> "true") {
testPositiveCaseWithQuery(readSource, writeSource, trigger) { _ =>
eventually(timeout(streamingTimeout)) {
// Write options should not be set.
assert(!LastWriteOptions.options.containsKey(readOptionName))
assert(LastReadOptions.options.getBoolean(readOptionName, false))
}
}
}
val writeOptionName = "optionB"
withSQLConf(s"spark.datasource.$writeSource.$writeOptionName" -> "true") {
testPositiveCaseWithQuery(readSource, writeSource, trigger) { _ =>
eventually(timeout(streamingTimeout)) {
// Read options should not be set.
assert(!LastReadOptions.options.containsKey(writeOptionName))
assert(LastWriteOptions.options.getBoolean(writeOptionName, false))
}
}
}
}
}
// Get a list of (read, write, trigger) tuples for test cases.
val cases = readFormats.flatMap { read =>
writeFormats.flatMap { write =>
triggers.map(t => (write, t))
}.map {
case (write, t) => (read, write, t)
}
}
for ((read, write, trigger) <- cases) {
testQuietly(s"stream with read format $read, write format $write, trigger $trigger") {
val sourceTable = DataSource.lookupDataSource(read, spark.sqlContext.conf).getConstructor()
.newInstance().asInstanceOf[TableProvider].getTable(CaseInsensitiveStringMap.empty())
val sinkTable = DataSource.lookupDataSource(write, spark.sqlContext.conf).getConstructor()
.newInstance().asInstanceOf[TableProvider].getTable(CaseInsensitiveStringMap.empty())
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Implicits._
trigger match {
// Invalid - can't read at all
case _ if !sourceTable.supportsAny(MICRO_BATCH_READ, CONTINUOUS_READ) =>
testNegativeCase(read, write, trigger,
s"Data source $read does not support streamed reading")
// Invalid - can't write
case _ if !sinkTable.supports(STREAMING_WRITE) =>
testNegativeCase(read, write, trigger,
s"Data source $write does not support streamed writing")
case _: ContinuousTrigger =>
if (sourceTable.supports(CONTINUOUS_READ)) {
// Valid microbatch queries.
testPositiveCase(read, write, trigger)
} else {
// Invalid - trigger is continuous but reader is not
testNegativeCase(
read, write, trigger, s"Data source $read does not support continuous processing")
}
case microBatchTrigger =>
if (sourceTable.supports(MICRO_BATCH_READ)) {
// Valid continuous queries.
testPositiveCase(read, write, trigger)
} else {
// Invalid - trigger is microbatch but reader is not
testPostCreationNegativeCase(read, write, trigger,
s"Data source $read does not support microbatch processing")
}
}
}
}
}
|
bdrillard/spark
|
sql/core/src/test/scala/org/apache/spark/sql/streaming/sources/StreamingDataSourceV2Suite.scala
|
Scala
|
apache-2.0
| 15,534
|
/*
* Copyright 2014 Aurel Paulovic (aurel.paulovic@gmail.com) (aurelpaulovic.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.aurelpaulovic.crdt
import org.scalatest._
abstract class TestSpec extends WordSpec with Assertions with OptionValues with Inside with Inspectors {
}
|
AurelPaulovic/crdt
|
src/test/scala/com/aurelpaulovic/crdt/TestSpec.scala
|
Scala
|
apache-2.0
| 811
|
/*
* Copyright 2016 Frugal Mechanic (http://frugalmechanic.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fm.common.rich
import fm.common.Implicits._
import fm.common.NodeType
import org.scalajs.dom.raw.{Element, NodeList, NodeSelector}
import scala.reflect.{classTag, ClassTag}
final class RichNodeSelector(val self: NodeSelector) extends AnyVal {
/** Typesafe helper on top of querySelector */
def selectFirst[T : NodeType : ClassTag](selector: String): T = selectFirstOption[T](selector).getOrElse{ throw new NoSuchElementException(s"No Such Element: $selector") }
/** Alias for selectFirst[T]("*") */
def selectFirst[T : NodeType : ClassTag]: T = selectFirst[T]("*")
/** Typesafe helper on top of querySelector */
def selectFirstOption[T : NodeType : ClassTag](selector: String): Option[T] = {
val targetClass: Class[_] = classTag[T].runtimeClass
val elem: Element = try{ self.querySelector(selector) } catch { case ex: Exception => throw new IllegalArgumentException("Invalid Selector for querySelector: "+selector) }
Option(elem).filter{ targetClass.isInstance }.map{ _.asInstanceOf[T] }
}
/** Alias for selectFirstOption[T]("*") */
def selectFirstOption[T : NodeType : ClassTag]: Option[T] = selectFirstOption[T]("*")
/** Typesafe helper on top of querySelectorAll */
def selectAll[T : NodeType : ClassTag](selector: String): IndexedSeq[T] = {
val targetClass: Class[_] = classTag[T].runtimeClass
val results: NodeList = try{ self.querySelectorAll(selector) } catch { case ex: Exception => throw new IllegalArgumentException("Invalid Selector for querySelectorAll: "+selector) }
results.filter{ targetClass.isInstance }.map{ _.asInstanceOf[T] }
}
/** Alias for selectAll[T]("*") */
def selectAll[T : NodeType : ClassTag]: IndexedSeq[T] = selectAll[T]("*")
}
|
frugalmechanic/fm-common
|
js/src/main/scala/fm/common/rich/RichNodeSelector.scala
|
Scala
|
apache-2.0
| 2,362
|
// Copyright (C) 2014 Dmitry Yakimenko (detunized@gmail.com).
// Licensed under the terms of the MIT license. See LICENCE for details.
package net.detunized.iteratorz
class ZippingIterator[A](i1: Iterator[A], i2: Iterator[A])
(takeFirst: (A, A) => Boolean) extends Iterator[A] {
private[this] var nextA: A = _
private[this] var nextDefined = false
override def hasNext: Boolean = nextDefined || i1.hasNext || i2.hasNext
override def next(): A = {
if (nextDefined) {
nextDefined = false
nextA
} else {
val have1 = i1.hasNext
val have2 = i2.hasNext
if (have1 && have2) {
val next1 = i1.next()
val next2 = i2.next()
if (takeFirst(next1, next2)) {
nextA = next2
nextDefined = true
next1
} else {
nextA = next1
nextDefined = true
next2
}
} else if (have1) {
i1.next()
} else if (have2) {
i2.next()
} else {
Iterator.empty.next()
}
}
}
}
|
detunized/iteratorz
|
src/main/scala/ZippingIterator.scala
|
Scala
|
mit
| 1,059
|
/* - Coeus web framework -------------------------
*
* Licensed under the Apache License, Version 2.0.
*
* Author: Spiros Tzavellas
*/
package com.tzavellas.coeus.validation.bean
import org.junit.Test
import org.junit.Assert._
import javax.validation.{ Validation => JValidation }
import javax.validation.constraints.NotNull
import com.tzavellas.coeus.bind.{ BindingResult, Error }
import com.tzavellas.coeus.test.Assertions._
class BeanValidatorTest {
import BeanValidatorTest._
@Test
def validate_returns_a_list_of_errors() {
val errors = validator.validate(new Post).iterator
assertTrue(errors.hasNext)
assertNotNull(errors.next.code)
}
@Test
def validate_using_binding_result() {
val result = new BindingResult(null, new Post)
validator.validate(result)
assertTrue(result.hasErrors)
assertNone(result.error("content"))
assertSome(result.error("title"))
assertEquals(validator.errorFormatter, result.errorFormatter)
}
@Test
def validate_field() {
assertNone(validator.validateField[Post]("title", "The title"))
assertSome(validator.validateField[Post]("title", null))
}
}
object BeanValidatorTest {
val validator = new BeanValidator(
JValidation.buildDefaultValidatorFactory.getValidator)
class Post {
@NotNull
var title: String = _
var content: String = _
}
}
|
sptz45/coeus
|
src/test/scala/com/tzavellas/coeus/validation/bean/BeanValidatorTest.scala
|
Scala
|
apache-2.0
| 1,360
|
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.convert.avro
import java.nio.ByteBuffer
import org.apache.avro.generic.{GenericArray, GenericEnumSymbol, GenericFixed, GenericRecord}
import org.apache.avro.util.Utf8
import org.locationtech.geomesa.utils.text.BasicParser
import org.parboiled.errors.ParsingException
import org.parboiled.scala.parserunners.{BasicParseRunner, ReportingParseRunner}
sealed trait AvroPath {
def eval(record: Any): Option[Any]
}
object AvroPath extends BasicParser {
type AvroPredicate = GenericRecord => Boolean
private val Parser = new AvroPathParser()
def apply(path: String): AvroPath = parse(path)
@throws(classOf[ParsingException])
def parse(path: String, report: Boolean = true): AvroPath = {
if (path == null) {
throw new IllegalArgumentException("Invalid path string: null")
}
val runner = if (report) { ReportingParseRunner(Parser.path) } else { BasicParseRunner(Parser.path) }
val parsing = runner.run(path)
parsing.result.getOrElse(throw new ParsingException(s"Error parsing avro path: $path"))
}
private def convert(record: Any): Any = {
record match {
case x: Utf8 => x.toString
case x: ByteBuffer => convertBytes(x)
case x: GenericFixed => x.bytes()
case x: GenericEnumSymbol => x.toString
case x: GenericArray[Any] => convertList(x)
case x => x
}
}
private def convertBytes(x: ByteBuffer): Array[Byte] = {
val start = x.position
val length = x.limit - start
val bytes = Array.ofDim[Byte](length)
x.get(bytes, 0, length)
x.position(start)
bytes
}
private def convertList(list: java.util.List[Any]): java.util.List[Any] = {
val result = new java.util.ArrayList[Any](list.size())
val iter = list.iterator()
while (iter.hasNext) {
result.add(convert(iter.next()))
}
result
}
case class PathExpr(field: String, predicate: AvroPredicate) extends AvroPath {
override def eval(record: Any): Option[Any] = {
record match {
case gr: GenericRecord =>
gr.get(field) match {
case x: GenericRecord => Some(x).filter(predicate)
case x => Option(convert(x))
}
case _ => None
}
}
}
case class ArrayRecordExpr(field: String, matched: String) extends AvroPath {
import scala.collection.JavaConverters._
override def eval(r: Any): Option[Any] = r match {
case a: java.util.List[GenericRecord] => a.asScala.find(predicate)
case _ => None
}
private def predicate(record: GenericRecord): Boolean = {
record.get(field) match {
case x: Utf8 => x.toString == matched
case x => x == matched
}
}
}
case class CompositeExpr(se: Seq[AvroPath]) extends AvroPath {
override def eval(r: Any): Option[Any] = r match {
case gr: GenericRecord => se.foldLeft[Option[Any]](Some(gr))((result, current) => result.flatMap(current.eval))
case _ => None
}
}
case class UnionTypeFilter(n: String) extends AvroPredicate {
override def apply(v1: GenericRecord): Boolean = v1.getSchema.getName == n
}
class AvroPathParser extends BasicParser {
import org.parboiled.scala._
// full simple feature spec
def path: Rule1[AvroPath] = rule("Path") {
oneOrMore(pathExpression | arrayRecord) ~ EOI ~~> {
paths => if (paths.lengthCompare(1) == 0) { paths.head } else { CompositeExpr(paths) }
}
}
private def pathExpression: Rule1[PathExpr] = rule("PathExpression") {
"/" ~ identifier ~ optional("$type=" ~ identifier) ~~> {
(field, typed) => PathExpr(field, typed.map(UnionTypeFilter.apply).getOrElse(_ => true))
}
}
private def arrayRecord: Rule1[ArrayRecordExpr] = rule("ArrayRecord") {
("[$" ~ identifier ~ "=" ~ identifier ~ "]") ~~> {
(field, matched) => ArrayRecordExpr(field, matched)
}
}
private def identifier: Rule1[String] = rule("Identifier") {
oneOrMore(char | anyOf(".-")) ~> { s => s }
}
}
}
|
aheyne/geomesa
|
geomesa-convert/geomesa-convert-avro/src/main/scala/org/locationtech/geomesa/convert/avro/AvroPath.scala
|
Scala
|
apache-2.0
| 4,571
|
import java.awt.datatransfer._
import scala.collection.JavaConversions.asScalaBuffer
import scala.collection.mutable.Buffer
val flavors = SystemFlavorMap.getDefaultFlavorMap().asInstanceOf[SystemFlavorMap]
val natives:Buffer[String] = flavors.getNativesForFlavor(DataFlavor.imageFlavor)
println(natives.mkString(", "))
|
Gerhut/scala-for-the-impatient
|
Chapter3/10.scala
|
Scala
|
unlicense
| 321
|
import eu.inn.binders.value._
import org.scalatest.{FlatSpec, Matchers}
case class Mixed(a: Int, b: String, extra: Value)
class TestMixJsonSerializer extends FlatSpec with Matchers {
import eu.inn.binders.json._
"Json " should " serialize Mixed" in {
val t = Mixed(1, "ha", ObjV(
"f" -> 555
))
val str = t.toJson
assert (str === """{"a":1,"b":"ha","extra":{"f":555}}""")
}
"Json " should " deserialize Mixed" in {
val o = """{"a":1,"b":"ha","extra":{"f":555}}""".parseJson[Mixed]
val t = Mixed(1, "ha", ObjV(
"f" -> 555
))
assert (o === t)
}
"Json " should " serialize Mixed (Null)" in {
val t = Mixed(1, "ha", Null)
val str = t.toJson
assert (str === """{"a":1,"b":"ha","extra":null}""")
}
"Json " should " deserialize Mixed (Null)" in {
val o = """{"a":1,"b":"ha"}""".parseJson[Mixed]
val t = Mixed(1, "ha", Null)
assert (o === t)
}
}
|
InnovaCo/binders-json
|
src/test/scala/TestMixJsonSerializer.scala
|
Scala
|
bsd-3-clause
| 932
|
package rexstream
import java.lang.annotation.Annotation
import scala.annotation.StaticAnnotation
import scala.collection.parallel._
import scala.reflect.runtime.universe._
import rexstream.util._
trait MetadataProvider extends Iterable[MetadataObject] {
def get(name : String) : Option[Any]
def apply(name : String) = {
get(name).get
}
def drop(name : String) : Boolean
def update(name : String, value : Any) : Unit
var name : String
var description : String
}
object MetadataProvider {
/**
* Created by GregRos on 04/06/2016.
*/
class DefaultMetadataProvider extends MetadataProvider {
private var inner = Map.empty[String, Any]
def name = this("name").asInstanceOf[String]
def name_=(x : String) = this("name") = x
def description = this("description").asInstanceOf[String]
def description_=(x : String) = this("description") = x
override def drop(name: String): Boolean ={
val result = inner.contains(name)
inner = inner - name
result
}
override def update(name: String, value: Any): Unit = inner = inner.updated(name, value)
override def get(name: String): Option[Any] = inner.get(name)
override def iterator: Iterator[MetadataObject] = inner.map {case (k,v) => new MetadataObject{
val name: String = k
val value = v
} }.iterator
}
def apply() : MetadataProvider = new DefaultMetadataProvider()
}
|
GregRos/Rexstream
|
ScalaFBL/src/rexstream/MetadataProvider.scala
|
Scala
|
mit
| 1,535
|
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.computations.calculations.LowEmissionCarsCalculator
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
case class CPAux2(value: Int) extends CtBoxIdentifier("MainRatePoolSum") with CtInteger
object CPAux2 extends Calculated[CPAux2, ComputationsBoxRetriever] with LowEmissionCarsCalculator {
override def calculate(fieldValueRetriever: ComputationsBoxRetriever): CPAux2 =
CPAux2(getMainRatePoolSum(fieldValueRetriever.retrieveLEC01()))
}
|
ahudspith-equalexperts/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/computations/CPAux2.scala
|
Scala
|
apache-2.0
| 1,159
|
///*
// * (C) 2014 Jacob Lorensen, jacoblorensen@gmail.com
// */
//
//package org.zapto.jablo.myml
//
//import org.junit._
//import Assert._
//import TestHelper.calc
//import TestHelper.e
//import TestHelper.check
//import TestHelper.reparse
//
//class IfExprTest {
// @Before
// def setUp: Unit = {
// }
//
// @After
// def tearDown: Unit = {
// }
//
// @Test
// def ifInt1: Unit = {
// try {
// val p = check(calc.parseAll(calc.expr, "if 1 then 2 else 0"))
// p.get eval e
// fail("Shuold produce TypeErrorException")
// } catch {
// case e: TypeErrorException => Unit
// }
// }
//
// @Test
// def ifLte1 = {
// val p = check(calc.parseAll(calc.expr, "if x <= 4 then 6 else 7"))
// assertEquals(Ife(Lte(Var("x"), Z(4)), Z(6), Z(7)), p.get);
// assertEquals(Z(7), p.get.eval(Map("x" -> Z(12))))
// assertEquals(p.get, reparse(p))
// assertEquals(Z(7), ByteCodeMachine.interp(p.get, Map("x" -> Z(12))))
// }
//
// @Test
// def ifLteComposed = {
// val p = check(calc.parseAll(calc.expr, "if x+10 <= 4*x then 1 else 2"))
// assertEquals(Ife(Lte(Add(Var("x"), Z(10)), Mul(Z(4), Var("x"))), Z(1), Z(2)), p.get)
// assertEquals(Z(1), p.get.eval(Map("x" -> Z(4))))
// assertEquals(p.get, reparse(p))
// assertEquals(Z(1), ByteCodeMachine.interp(p.get, Map("x"->Z(4))))
// }
//
// @Test
// def ifBool1 = {
// val p = check(calc.parseAll(calc.expr, "if true then 2 else 0"))
// assertEquals(Ife(True, Z(2), Z(0)), p.get)
// assertEquals(Z(2), p.get eval e)
// assertEquals(p.get, reparse(p))
// assertEquals(Z(2), ByteCodeMachine.interp(p.get))
// }
//
// @Test
// def ifNested1 = {
// val p = check(calc.parseAll(calc.expr, "if false then if true then 3 else 2 else 1"))
// assertEquals(Ife(False, Ife(True, Z(3), Z(2)), Z(1)), p.get)
// assertEquals(Z(1), p.get eval e)
// assertEquals(p.get, reparse(p))
// assertEquals(Z(1), ByteCodeMachine.interp(p.get))
// }
//
// @Test
// def ifHigher1 = {
// val p = check(calc.parseAll(calc.expr, "(if true then fun(x)=>x+1 else fun(x)=>2+x)(5)"))
// assertEquals(App(Ife(True, Fun(List("x"), Add(Var("x"), Z(1))), Fun(List("x"), Add(Z(2), Var("x")))), List(Z(5))), p.get)
// assertEquals(Z(6), p.get eval e)
// println("ifHighr1 - pars : " + p.get)
// println(" - infix: " + p.get.infix)
// val rp = reparse(p)
// println(" - repar: " + rp)
// assertEquals(p.get, reparse(p))
// assertEquals(Z(6), ByteCodeMachine.interp(p.get))
// }
//}
|
jablo/myml
|
src/test/scala/org/zapto/jablo/myml_later/IfExprTest.scala
|
Scala
|
artistic-2.0
| 2,531
|
package com.lateralthoughts.points.controllers
import com.lateralthoughts.mocked.MockedRepositoryConfig
import com.lateralthoughts.points.PointsServlet
import org.scalatra.test.scalatest.ScalatraFlatSpec
trait InitServlet extends ScalatraFlatSpec {
addServlet(new PointsServlet(MockedRepositoryConfig), "/*")
}
|
vincentdoba/points
|
points-server/src/test/scala/com/lateralthoughts/points/controllers/InitServlet.scala
|
Scala
|
mit
| 318
|
/*
*
* * Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*
*/
package play.api.libs.ws.ssl
import play.api.libs.ws.WSClientConfig
import java.security.{ Security, PrivilegedExceptionAction }
/**
* Configures global system properties on the JSSE implementation, if defined.
*
* WARNING: This class sets system properties to configure JSSE code which typically uses static initialization on
* load. Because of this, if classes are loaded in BEFORE this code has a chance to operate, you may find that this
* code works inconsistently. The solution is to set the system properties on the command line explicitly (or in the
* case of "ocsp.enable", in the security property file).
*/
class SystemConfiguration {
val logger = org.slf4j.LoggerFactory.getLogger(getClass)
def configure(config: WSClientConfig) {
config.ssl.map {
ssl =>
ssl.loose.map {
loose =>
loose.allowUnsafeRenegotiation.map(configureUnsafeRenegotiation)
loose.allowLegacyHelloMessages.map(configureAllowLegacyHelloMessages)
}
ssl.checkRevocation.map(configureCheckRevocation)
}
}
def configureUnsafeRenegotiation(allowUnsafeRenegotiation: Boolean) {
System.setProperty("sun.security.ssl.allowUnsafeRenegotiation", allowUnsafeRenegotiation.toString)
logger.debug("configureUnsafeRenegotiation: sun.security.ssl.allowUnsafeRenegotiation = {}", allowUnsafeRenegotiation.toString)
}
def configureAllowLegacyHelloMessages(allowLegacyHelloMessages: Boolean) {
System.setProperty("sun.security.ssl.allowLegacyHelloMessages", allowLegacyHelloMessages.toString)
logger.debug("configureAllowLegacyHelloMessages: sun.security.ssl.allowLegacyHelloMessages = {}", allowLegacyHelloMessages.toString)
}
def configureCheckRevocation(checkRevocation: Boolean) {
// http://docs.oracle.com/javase/6/docs/technotes/guides/security/certpath/CertPathProgGuide.html#AppC
// https://blogs.oracle.com/xuelei/entry/enable_ocsp_checking
// 1.7: PXIXCertPathValidator.populateVariables, it is dynamic so no override needed.
Security.setProperty("ocsp.enable", checkRevocation.toString)
logger.debug("configureCheckRevocation: ocsp.enable = {}", checkRevocation.toString)
System.setProperty("com.sun.security.enableCRLDP", checkRevocation.toString)
logger.debug("configureCheckRevocation: com.sun.security.enableCRLDP = {}", checkRevocation.toString)
System.setProperty("com.sun.net.ssl.checkRevocation", checkRevocation.toString)
}
/**
* For use in testing.
*/
def clearProperties() {
Security.setProperty("ocsp.enable", "false")
System.clearProperty("com.sun.security.enableCRLDP")
System.clearProperty("com.sun.net.ssl.checkRevocation")
System.clearProperty("sun.security.ssl.allowLegacyHelloMessages")
System.clearProperty("sun.security.ssl.allowUnsafeRenegotiation")
}
}
|
jyotikamboj/container
|
pf-framework/src/play-ws/src/main/scala/play/api/libs/ws/ssl/SystemConfiguration.scala
|
Scala
|
mit
| 2,924
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.source.image
import java.net.URI
import java.nio.file.Paths
import java.sql.Date
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.image.ImageSchema._
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.sql.Row
import org.apache.spark.sql.functions.{col, substring_index}
class ImageFileFormatSuite extends SparkFunSuite with MLlibTestSparkContext {
// Single column of images named "image"
private lazy val imagePath = "../data/mllib/images/partitioned"
private lazy val recursiveImagePath = "../data/mllib/images"
test("Smoke test: create basic ImageSchema dataframe") {
val origin = "path"
val width = 1
val height = 1
val nChannels = 3
val data = Array[Byte](0, 0, 0)
val mode = ocvTypes("CV_8UC3")
// Internal Row corresponds to image StructType
val rows = Seq(Row(Row(origin, height, width, nChannels, mode, data)),
Row(Row(null, height, width, nChannels, mode, data)))
val rdd = sc.makeRDD(rows)
val df = spark.createDataFrame(rdd, imageSchema)
assert(df.count === 2, "incorrect image count")
assert(df.schema("image").dataType == columnSchema, "data do not fit ImageSchema")
}
test("image datasource count test") {
val df1 = spark.read.format("image").load(imagePath)
assert(df1.count === 9)
val df2 = spark.read.format("image").option("dropInvalid", true).load(imagePath)
assert(df2.count === 8)
}
test("image datasource test: read jpg image") {
val df = spark.read.format("image").load(imagePath + "/cls=kittens/date=2018-02/DP153539.jpg")
assert(df.count() === 1)
}
test("image datasource test: read png image") {
val df = spark.read.format("image").load(imagePath + "/cls=multichannel/date=2018-01/BGRA.png")
assert(df.count() === 1)
}
test("image datasource test: read non image") {
val filePath = imagePath + "/cls=kittens/date=2018-01/not-image.txt"
val df = spark.read.format("image").option("dropInvalid", true)
.load(filePath)
assert(df.count() === 0)
val df2 = spark.read.format("image").option("dropInvalid", false)
.load(filePath)
assert(df2.count() === 1)
val result = df2.head()
val resultOrigin = result.getStruct(0).getString(0)
// covert `origin` to `java.net.URI` object and then compare.
// because `file:/path` and `file:///path` are both valid URI-ifications
assert(new URI(resultOrigin) === Paths.get(filePath).toAbsolutePath().normalize().toUri())
// Compare other columns in the row to be the same with the `invalidImageRow`
assert(result === invalidImageRow(resultOrigin))
}
test("image datasource partition test") {
val result = spark.read.format("image")
.option("dropInvalid", true).load(imagePath)
.select(substring_index(col("image.origin"), "/", -1).as("origin"), col("cls"), col("date"))
.collect()
assert(Set(result: _*) === Set(
Row("29.5.a_b_EGDP022204.jpg", "kittens", Date.valueOf("2018-01-01")),
Row("54893.jpg", "kittens", Date.valueOf("2018-02-01")),
Row("DP153539.jpg", "kittens", Date.valueOf("2018-02-01")),
Row("DP802813.jpg", "kittens", Date.valueOf("2018-02-01")),
Row("BGRA.png", "multichannel", Date.valueOf("2018-01-01")),
Row("BGRA_alpha_60.png", "multichannel", Date.valueOf("2018-01-01")),
Row("chr30.4.184.jpg", "multichannel", Date.valueOf("2018-02-01")),
Row("grayscale.jpg", "multichannel", Date.valueOf("2018-02-01"))
))
}
// Images with the different number of channels
test("readImages pixel values test") {
val images = spark.read.format("image").option("dropInvalid", true)
.load(imagePath + "/cls=multichannel/").collect()
val firstBytes20Set = images.map { rrow =>
val row = rrow.getAs[Row]("image")
val filename = Paths.get(getOrigin(row)).getFileName().toString()
val mode = getMode(row)
val bytes20 = getData(row).slice(0, 20).toList
filename -> Tuple2(mode, bytes20) // Cannot remove `Tuple2`, otherwise `->` operator
// will match 2 arguments
}.toSet
assert(firstBytes20Set === expectedFirstBytes20Set)
}
// number of channels and first 20 bytes of OpenCV representation
// - default representation for 3-channel RGB images is BGR row-wise:
// (B00, G00, R00, B10, G10, R10, ...)
// - default representation for 4-channel RGB images is BGRA row-wise:
// (B00, G00, R00, A00, B10, G10, R10, A10, ...)
private val expectedFirstBytes20Set = Set(
"grayscale.jpg" ->
((0, List[Byte](-2, -33, -61, -60, -59, -59, -64, -59, -66, -67, -73, -73, -62,
-57, -60, -63, -53, -49, -55, -69))),
"chr30.4.184.jpg" -> ((16,
List[Byte](-9, -3, -1, -43, -32, -28, -75, -60, -57, -78, -59, -56, -74, -59, -57,
-71, -58, -56, -73, -64))),
"BGRA.png" -> ((24,
List[Byte](-128, -128, -8, -1, -128, -128, -8, -1, -128,
-128, -8, -1, 127, 127, -9, -1, 127, 127, -9, -1))),
"BGRA_alpha_60.png" -> ((24,
List[Byte](-128, -128, -8, 60, -128, -128, -8, 60, -128,
-128, -8, 60, 127, 127, -9, 60, 127, 127, -9, 60)))
)
}
|
chuckchen/spark
|
mllib/src/test/scala/org/apache/spark/ml/source/image/ImageFileFormatSuite.scala
|
Scala
|
apache-2.0
| 6,014
|
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.matchers
import org.scalatest._
import org.scalatest.prop.Checkers
import org.scalacheck._
import Arbitrary._
import Prop._
import scala.reflect.BeanProperty
class ShouldBeMatcherSpec extends Spec with ShouldMatchers with Checkers with ReturnsNormallyThrowsAssertion with BookPropertyMatchers {
class OddMatcher extends BeMatcher[Int] {
def apply(left: Int): MatchResult = {
MatchResult(
left % 2 == 1,
left.toString + " was even",
left.toString + " was odd"
)
}
}
val odd = new OddMatcher
val even = not (odd)
describe("The BeMatcher syntax") {
it("should do nothing if a BeMatcher matches") {
1 should be (odd)
2 should be (even)
}
it("should throw TestFailedException if a BeMatcher does not match") {
val caught1 = intercept[TestFailedException] {
4 should be (odd)
}
assert(caught1.getMessage === "4 was even")
val caught2 = intercept[TestFailedException] {
5 should be (even)
}
assert(caught2.getMessage === "5 was odd")
}
it("should do nothing if a BeMatcher does not match, when used with not") {
2 should not be (odd)
1 should not be (even)
22 should not (not (be (even)))
1 should not (not (be (odd)))
}
it("should throw TestFailedException if a BeMatcher matches, when used with not") {
val caught1 = intercept[TestFailedException] {
3 should not be (odd)
}
assert(caught1.getMessage === "3 was odd")
val caught2 = intercept[TestFailedException] {
6 should not be (even)
}
assert(caught2.getMessage === "6 was even")
val caught3 = intercept[TestFailedException] {
6 should not (not (be (odd)))
}
assert(caught3.getMessage === "6 was even")
}
it("should do nothing if a BeMatcher matches, when used in a logical-and expression") {
1 should (be (odd) and be (odd))
1 should (be (odd) and (be (odd)))
2 should (be (even) and be (even))
2 should (be (even) and (be (even)))
}
it("should throw TestFailedException if at least one BeMatcher does not match, when used in a logical-or expression") {
// both false
val caught1 = intercept[TestFailedException] {
2 should (be (odd) and be (odd))
}
assert(caught1.getMessage === "2 was even")
val caught2 = intercept[TestFailedException] {
2 should (be (odd) and (be (odd)))
}
assert(caught2.getMessage === "2 was even")
val caught3 = intercept[TestFailedException] {
1 should (be (even) and be (even))
}
assert(caught3.getMessage === "1 was odd")
val caught4 = intercept[TestFailedException] {
1 should (be (even) and (be (even)))
}
assert(caught4.getMessage === "1 was odd")
// first false
val caught5 = intercept[TestFailedException] {
1 should (be (even) and be (odd))
}
assert(caught5.getMessage === "1 was odd")
val caught6 = intercept[TestFailedException] {
1 should (be (even) and (be (odd)))
}
assert(caught6.getMessage === "1 was odd")
val caught7 = intercept[TestFailedException] {
2 should (be (odd) and be (even))
}
assert(caught7.getMessage === "2 was even")
val caught8 = intercept[TestFailedException] {
2 should (be (odd) and (be (even)))
}
assert(caught8.getMessage === "2 was even")
// TODO: Remember to try a BeMatcher[Any] one, to make sure it works on an Int
// second false
val caught9 = intercept[TestFailedException] {
1 should (be (odd) and be (even))
}
assert(caught9.getMessage === "1 was odd, but 1 was odd")
val caught10 = intercept[TestFailedException] {
1 should (be (odd) and (be (even)))
}
assert(caught10.getMessage === "1 was odd, but 1 was odd")
val caught11 = intercept[TestFailedException] {
2 should (be (even) and be (odd))
}
assert(caught11.getMessage === "2 was even, but 2 was even")
val caught12 = intercept[TestFailedException] {
2 should (be (even) and (be (odd)))
}
assert(caught12.getMessage === "2 was even, but 2 was even")
}
it("should do nothing if at least one BeMatcher matches, when used in a logical-or expression") {
// both true
1 should (be (odd) or be (odd))
1 should (be (odd) or (be (odd)))
2 should (be (even) or be (even))
2 should (be (even) or (be (even)))
// first false
1 should (be (even) or be (odd))
1 should (be (even) or (be (odd)))
2 should (be (odd) or be (even))
2 should (be (odd) or (be (even)))
// second false
1 should (be (odd) or be (even))
1 should (be (odd) or (be (even)))
2 should (be (even) or be (odd))
2 should (be (even) or (be (odd)))
}
it("should throw TestFailedException if a BeMatcher does not match, when used in a logical-or expression") {
val caught1 = intercept[TestFailedException] {
2 should (be (odd) or be (odd))
}
assert(caught1.getMessage === "2 was even, and 2 was even")
val caught2 = intercept[TestFailedException] {
2 should (be (odd) or (be (odd)))
}
assert(caught2.getMessage === "2 was even, and 2 was even")
val caught3 = intercept[TestFailedException] {
1 should (be (even) or be (even))
}
assert(caught3.getMessage === "1 was odd, and 1 was odd")
val caught4 = intercept[TestFailedException] {
1 should (be (even) or (be (even)))
}
assert(caught4.getMessage === "1 was odd, and 1 was odd")
}
it("should do nothing if a BeMatcher does not match, when used in a logical-and expression with not") {
2 should (not be (odd) and not be (odd))
2 should (not be (odd) and not (be (odd)))
2 should (not be (odd) and (not (be (odd))))
1 should (not be (even) and not be (even))
1 should (not be (even) and not (be (even)))
1 should (not be (even) and (not (be (even))))
}
it("should throw TestFailedException if at least one BeMatcher matches, when used in a logical-and expression with not") {
// both true
val caught1 = intercept[TestFailedException] {
1 should (not be (odd) and not be (odd))
}
assert(caught1.getMessage === "1 was odd")
val caught2 = intercept[TestFailedException] {
1 should (not be (odd) and not (be (odd)))
}
assert(caught2.getMessage === "1 was odd")
val caught3 = intercept[TestFailedException] {
1 should (not be (odd) and (not (be (odd))))
}
assert(caught3.getMessage === "1 was odd")
val caught4 = intercept[TestFailedException] {
2 should (not be (even) and not be (even))
}
assert(caught4.getMessage === "2 was even")
val caught5 = intercept[TestFailedException] {
2 should (not be (even) and not (be (even)))
}
assert(caught5.getMessage === "2 was even")
val caught6 = intercept[TestFailedException] {
2 should (not be (even) and (not (be (even))))
}
assert(caught6.getMessage === "2 was even")
// first false
val caught7 = intercept[TestFailedException] {
1 should (not be (even) and not be (odd))
}
assert(caught7.getMessage === "1 was odd, but 1 was odd")
val caught8 = intercept[TestFailedException] {
1 should (not be (even) and not (be (odd)))
}
assert(caught8.getMessage === "1 was odd, but 1 was odd")
val caught9 = intercept[TestFailedException] {
1 should (not be (even) and (not (be (odd))))
}
assert(caught9.getMessage === "1 was odd, but 1 was odd")
val caught10 = intercept[TestFailedException] {
2 should (not be (odd) and not be (even))
}
assert(caught10.getMessage === "2 was even, but 2 was even")
val caught11 = intercept[TestFailedException] {
2 should (not be (odd) and not (be (even)))
}
assert(caught11.getMessage === "2 was even, but 2 was even")
val caught12 = intercept[TestFailedException] {
2 should (not be (odd) and (not (be (even))))
}
assert(caught12.getMessage === "2 was even, but 2 was even")
// second false
val caught13 = intercept[TestFailedException] {
1 should (not be (odd) and not be (even))
}
assert(caught13.getMessage === "1 was odd")
val caught14 = intercept[TestFailedException] {
1 should (not be (odd) and not (be (even)))
}
assert(caught14.getMessage === "1 was odd")
val caught15 = intercept[TestFailedException] {
1 should (not be (odd) and (not (be (even))))
}
assert(caught15.getMessage === "1 was odd")
val caught16 = intercept[TestFailedException] {
2 should (not be (even) and not be (odd))
}
assert(caught16.getMessage === "2 was even")
val caught17 = intercept[TestFailedException] {
2 should (not be (even) and not (be (odd)))
}
assert(caught17.getMessage === "2 was even")
val caught18 = intercept[TestFailedException] {
2 should (not be (even) and (not (be (odd))))
}
assert(caught18.getMessage === "2 was even")
}
it("should do nothing if at least one BeMatcher doesn't match, when used in a logical-or expression when used with not") {
// both false
2 should (not be (odd) or not be (odd))
2 should (not be (odd) or not (be (odd)))
2 should (not be (odd) or (not (be (odd))))
1 should (not be (even) or not be (even))
1 should (not be (even) or not (be (even)))
1 should (not be (even) or (not (be (even))))
// first false
1 should (not be (even) or not be (odd))
1 should (not be (even) or not (be (odd)))
1 should (not be (even) or (not (be (odd))))
2 should (not be (odd) or not be (even))
2 should (not be (odd) or not (be (even)))
2 should (not be (odd) or (not (be (even))))
// second false
1 should (not be (odd) or not be (even))
1 should (not be (odd) or not (be (even)))
1 should (not be (odd) or (not (be (even))))
2 should (not be (even) or not be (odd))
2 should (not be (even) or not (be (odd)))
2 should (not be (even) or (not (be (odd))))
}
it("should throw TestFailedException if both BeMatcher match, when used in a logical-or expression with not") {
val caught1 = intercept[TestFailedException] {
1 should (not be (odd) or not be (odd))
}
assert(caught1.getMessage === "1 was odd, and 1 was odd")
val caught2 = intercept[TestFailedException] {
1 should (not be (odd) or not (be (odd)))
}
assert(caught2.getMessage === "1 was odd, and 1 was odd")
val caught3 = intercept[TestFailedException] {
1 should (not be (odd) or (not (be (odd))))
}
assert(caught3.getMessage === "1 was odd, and 1 was odd")
val caught4 = intercept[TestFailedException] {
2 should (not be (even) or not be (even))
}
assert(caught4.getMessage === "2 was even, and 2 was even")
val caught5 = intercept[TestFailedException] {
2 should (not be (even) or not (be (even)))
}
assert(caught5.getMessage === "2 was even, and 2 was even")
val caught6 = intercept[TestFailedException] {
2 should (not be (even) or (not (be (even))))
}
assert(caught6.getMessage === "2 was even, and 2 was even")
}
it("should work when the types aren't exactly the same") {
class UnlikableMatcher extends BeMatcher[Any] {
def apply(left: Any): MatchResult = {
MatchResult(
false,
left.toString + " was not to my liking",
left.toString + " was to my liking"
)
}
}
val unlikable = new UnlikableMatcher
val likable = not (unlikable)
1 should be (likable)
2 should not be (unlikable)
val caught1 = intercept[TestFailedException] {
1 should be (unlikable)
}
assert(caught1.getMessage === "1 was not to my liking")
val caught2 = intercept[TestFailedException] {
"The dish" should not be (likable)
}
assert(caught2.getMessage === "The dish was not to my liking")
}
}
describe("the compose method on BeMatcher") {
it("should return another BeMatcher") {
val oddAsInt = odd compose { (s: String) => s.toInt }
"3" should be (oddAsInt)
"4" should not be (oddAsInt)
}
}
describe("A factory method on BeMatcher's companion object") {
it("should produce a be-matcher that executes the passed function when its apply is called") {
val f = { (s: String) => MatchResult(s.length < 3, "s was not less than 3", "s was less than 3") }
val lessThanThreeInLength = BeMatcher(f)
"" should be (lessThanThreeInLength)
"x" should be (lessThanThreeInLength)
"xx" should be (lessThanThreeInLength)
"xxx" should not be (lessThanThreeInLength)
"xxxx" should not be (lessThanThreeInLength)
}
}
}
|
JimCallahan/Graphics
|
external/scalatest/src/test/scala/org/scalatest/matchers/ShouldBeMatcherSpec.scala
|
Scala
|
apache-2.0
| 13,901
|
package com.Alteryx.sparkGLM
import org.scalatest.FunSuite
import org.apache.spark.sql.test.TestSQLContext
import com.Alteryx.testUtils.data.testData._
class utils$Test extends FunSuite {
val sqlCtx = TestSQLContext
test("matchCols") {
val df = modelMatrix(dummyDF)
val dfWithMissingCategory = modelMatrix(oneLessCategoryDF)
val testDF = utils.matchCols(df, dfWithMissingCategory)
assert(testDF.getClass.getName == "org.apache.spark.sql.DataFrame")
assert(testDF.columns.length == 4)
assert(testDF.dtypes.forall(_._2 == "DoubleType"))
val expectedCols = Array("intField", "strField_b", "strField_c", "numField")
assert(expectedCols.forall { elem =>
testDF.columns.contains(elem)
})
assert(testDF.select("strField_c").distinct.count == 1)
assert(testDF.select("strField_c").distinct.collect().apply(0).get(0) === 0)
}
}
|
dputler/sparkGLM
|
src/test/scala/com/Alteryx/sparkGLM/utils$Test.scala
|
Scala
|
apache-2.0
| 879
|
// Copyright (c) 2011-2015 ScalaMock Contributors (https://github.com/paulbutcher/ScalaMock/graphs/contributors)
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package com.example.function
import org.scalamock.scalatest.MockFactory
import org.scalatest.FreeSpec
class HigherOrderFunctionsTest extends FreeSpec with MockFactory {
import language.postfixOps
"HigherOrderFunctionsTest" - {
"testMap" in {
val f = mockFunction[Int, String]
inSequence {
f expects (1) returning "one" once;
f expects (2) returning "two" once;
f expects (3) returning "three" once;
}
assertResult(Seq("one", "two", "three")) {
Seq(1, 2, 3) map f
}
}
"testRepeat" in {
def repeat(n: Int)(what: => Unit) {
for (i <- 0 until n)
what
}
val f = mockFunction[Unit]
f expects() repeated 4 times
repeat(4) {
f()
}
}
"testFoldLeft" in {
val f = mockFunction[String, Int, String]
inSequence {
f expects("initial", 0) returning "intermediate one" once;
f expects("intermediate one", 1) returning "intermediate two" once;
f expects("intermediate two", 2) returning "intermediate three" once;
f expects("intermediate three", 3) returning "final" once;
}
assertResult("final") {
Seq(0, 1, 2, 3).foldLeft("initial")(f)
}
}
}
}
|
hypertino/ScalaMock
|
examples/shared/src/test/scala/com/example/function/HigherOrderFunctionsTest.scala
|
Scala
|
mit
| 2,458
|
package scalaz.stream
import Cause._
import scala.annotation.tailrec
import scala.concurrent.duration.Duration
import scalaz.\\/._
import scalaz.concurrent.{Actor, Strategy, Task}
import scalaz.stream.Process._
import scalaz.stream.ReceiveY._
import scalaz.stream.Util._
import scalaz.stream.process1.Await1
import scalaz.{-\\/, Either3, Left3, Middle3, Right3, \\/, \\/-}
object wye {
/**
* A `Wye` which emits values from its right branch, but allows up to `n`
* elements from the left branch to enqueue unanswered before blocking
* on the right branch.
*/
def boundedQueue[I](n: Int): Wye[Any,I,I] =
yipWithL[Any,I,I](n)((i,i2) => i2) ++ tee.passR
/**
* After each input, dynamically determine whether to read from the left, right, or both,
* for the subsequent input, using the provided functions `f` and `g`. The returned
* `Wye` begins by reading from the left side and is left-biased--if a read of both branches
* returns a `These(x,y)`, it uses the signal generated by `f` for its next step.
*/
def dynamic[I,I2](f: I => wye.Request, g: I2 => wye.Request): Wye[I,I2,ReceiveY[I,I2]] = {
import scalaz.stream.wye.Request._
def go(signal: wye.Request): Wye[I,I2,ReceiveY[I,I2]] = signal match {
case L => receiveL { i => emit(ReceiveL(i)) ++ go(f(i)) }
case R => receiveR { i2 => emit(ReceiveR(i2)) ++ go(g(i2)) }
case Both => receiveBoth {
case t@ReceiveL(i) => emit(t) ++ go(f(i))
case t@ReceiveR(i2) => emit(t) ++ go(g(i2))
case HaltOne(rsn) => Halt(rsn)
}
}
go(L)
}
/**
* Invokes `dynamic` with `I == I2`, and produces a single `I` output. Output is
* left-biased: if a `These(i1,i2)` is emitted, this is translated to an
* `emitSeq(List(i1,i2))`.
*/
def dynamic1[I](f: I => wye.Request): Wye[I,I,I] =
dynamic(f, f).flatMap {
case ReceiveL(i) => emit(i)
case ReceiveR(i) => emit(i)
case HaltOne(rsn) => Halt(rsn)
}
/**
* Nondeterminstic interleave of both inputs. Emits values whenever either
* of the inputs is available.
*/
def either[I,I2]: Wye[I,I2,I \\/ I2] =
receiveBoth {
case ReceiveL(i) => emit(left(i)) ++ either
case ReceiveR(i) => emit(right(i)) ++ either
case HaltL(End) => awaitR[I2].map(right).repeat
case HaltR(End) => awaitL[I].map(left).repeat
case h@HaltOne(rsn) => Halt(rsn)
}
/**
* Continuous wye, that first reads from Left to get `A`,
* Then when `A` is not available it reads from R echoing any `A` that was received from Left
* Will halt once any of the sides halt
*/
def echoLeft[A]: Wye[A, Any, A] = {
def go(a: A): Wye[A, Any, A] =
receiveBoth {
case ReceiveL(l) => emit(l) ++ go(l)
case ReceiveR(_) => emit(a) ++ go(a)
case HaltOne(rsn) => Halt(rsn)
}
receiveL(s => emit(s) ++ go(s))
}
/**
* Let through the right branch as long as the left branch is `false`,
* listening asynchronously for the left branch to become `true`.
* This halts as soon as the right or left branch halts.
*/
def interrupt[I]: Wye[Boolean, I, I] =
receiveBoth {
case ReceiveR(i) => emit(i) ++ interrupt
case ReceiveL(kill) => if (kill) halt else interrupt
case HaltOne(e) => Halt(e)
}
/**
* Non-deterministic interleave of both inputs. Emits values whenever either
* of the inputs is available.
*
* Will terminate once both sides terminate.
*/
def merge[I]: Wye[I,I,I] =
receiveBoth {
case ReceiveL(i) => emit(i) ++ merge
case ReceiveR(i) => emit(i) ++ merge
case HaltL(End) => awaitR.repeat
case HaltR(End) => awaitL.repeat
case HaltOne(rsn) => Halt(rsn)
}
/**
* Like `merge`, but terminates whenever one side terminate.
*/
def mergeHaltBoth[I]: Wye[I,I,I] =
receiveBoth {
case ReceiveL(i) => emit(i) ++ mergeHaltBoth
case ReceiveR(i) => emit(i) ++ mergeHaltBoth
case HaltOne(rsn) => Halt(rsn)
}
/**
* Like `merge`, but terminates whenever left side terminates.
* use `flip` to reverse this for the right side
*/
def mergeHaltL[I]: Wye[I,I,I] =
receiveBoth {
case ReceiveL(i) => emit(i) ++ mergeHaltL
case ReceiveR(i) => emit(i) ++ mergeHaltL
case HaltR(End) => awaitL.repeat
case HaltOne(rsn) => Halt(rsn)
}
/**
* Like `merge`, but terminates whenever right side terminates
*/
def mergeHaltR[I]: Wye[I,I,I] =
wye.flip(mergeHaltL)
/**
* A `Wye` which blocks on the right side when either
* a) the age of the oldest unanswered element from the left size exceeds the given duration, or
* b) the number of unanswered elements from the left exceeds `maxSize`.
*/
def timedQueue[I](d: Duration, maxSize: Int = Int.MaxValue): Wye[Duration,I,I] = {
def go(q: Vector[Duration]): Wye[Duration,I,I] =
receiveBoth {
case ReceiveL(d2) =>
if (q.size >= maxSize || (d2 - q.headOption.getOrElse(d2) > d))
receiveR(i => emit(i) ++ go(q.drop(1)))
else
go(q :+ d2)
case ReceiveR(i) => emit(i) ++ (go(q.drop(1)))
case HaltOne(rsn) => Halt(rsn)
}
go(Vector())
}
/**
* `Wye` which repeatedly awaits both branches, emitting any values
* received from the right. Useful in conjunction with `connect`,
* for instance `src.connect(snk)(unboundedQueue)`
*/
def unboundedQueue[I]: Wye[Any,I,I] =
receiveBoth {
case ReceiveL(_) => halt
case ReceiveR(i) => emit(i) ++ unboundedQueue
case HaltOne(rsn) => Halt(rsn)
}
/** Nondeterministic version of `zip` which requests both sides in parallel. */
def yip[I,I2]: Wye[I,I2,(I,I2)] = yipWith((_,_))
/**
* Left-biased, buffered version of `yip`. Allows up to `n` elements to enqueue on the
* left unanswered before requiring a response from the right. If buffer is empty,
* always reads from the left.
*/
def yipL[I,I2](n: Int): Wye[I,I2,(I,I2)] =
yipWithL(n)((_,_))
/** Nondeterministic version of `zipWith` which requests both sides in parallel. */
def yipWith[I,I2,O](f: (I,I2) => O): Wye[I,I2,O] =
receiveBoth {
case ReceiveL(i) => receiveR(i2 => emit(f(i,i2)) ++ yipWith(f))
case ReceiveR(i2) => receiveL(i => emit(f(i,i2)) ++ yipWith(f))
case HaltOne(rsn) => Halt(rsn)
}
/**
* Left-biased, buffered version of `yipWith`. Allows up to `n` elements to enqueue on the
* left unanswered before requiring a response from the right. If buffer is empty,
* always reads from the left.
*/
def yipWithL[I,O,O2](n: Int)(f: (I,O) => O2): Wye[I,O,O2] = {
def go(buf: Vector[I]): Wye[I,O,O2] =
if (buf.size > n) receiveR { o =>
emit(f(buf.head,o)) ++ go(buf.tail)
}
else if (buf.isEmpty) receiveL { i => go(buf :+ i) }
else receiveBoth {
case ReceiveL(i) => go(buf :+ i)
case ReceiveR(o) => emit(f(buf.head,o)) ++ go(buf.tail)
case HaltOne(rsn) => Halt(rsn)
}
go(Vector())
}
//////////////////////////////////////////////////////////////////////
// Helper combinator functions, useful when working with wye directly
//////////////////////////////////////////////////////////////////////
/**
* Transform the left input of the given `Wye` using a `Process1`.
*/
def attachL[I0,I,I2,O](p1: Process1[I0,I])(y: Wye[I,I2,O]): Wye[I0,I2,O] = {
y.step match {
case Step(emt@Emit(os), cont) =>
emt onHalt (rsn => attachL(p1)(Halt(rsn) +: cont))
case Step(AwaitL(rcv), cont) => p1.step match {
case Step(Emit(is), cont1) => suspend(attachL(cont1.continue)(feedL(is)(y)))
case Step(Await1(rcv1), cont1) =>
wye.receiveLOr(cause => attachL(rcv1(left(cause)) +: cont1)(y))(
i0 => attachL(p1.feed1(i0))(y)
)
case hlt@Halt(cause) =>
suspend(cause.fold(attachL(hlt)(disconnectL(Kill)(y).swallowKill))(
early => attachL(hlt)(disconnectL(early)(y))
))
}
case Step(AwaitR(rcv), cont) =>
wye.receiveROr(early => attachL(p1)(rcv(left(early)) +: cont))(
i2 => attachL(p1)(feed1R(i2)(y))
)
case Step(AwaitBoth(rcv), cont) => p1.step match {
case Step(Emit(is), cont1) => suspend(attachL(cont1.continue)(feedL(is)(y)))
case Step(Await1(rcv1), _) =>
wye.receiveBoth[I0,I2,O] {
case ReceiveL(i0) => attachL(p1.feed1(i0))(y)
case ReceiveR(i2) => attachL(p1)(feed1R(i2)(y))
case HaltL(cause) =>
cause.fold(attachL(p1)(disconnectL(Kill)(y).swallowKill))(
early => attachL(p1)(disconnectL(early)(y))
)
case HaltR(cause) =>
cause.fold( attachL(p1)(disconnectR(Kill)(y).swallowKill))(
early => attachL(p1)(disconnectR(early)(y))
)
}
case hlt@Halt(cause) =>
val ny = rcv(HaltL(cause)) +: cont
suspend(cause.fold(attachL(hlt)(disconnectL(Kill)(ny).swallowKill))(
early => attachL(hlt)(disconnectL(early)(ny))
))
}
case hlt@Halt(_) => hlt
}
}
/**
* Transform the right input of the given `Wye` using a `Process1`.
*/
def attachR[I,I1,I2,O](p: Process1[I1,I2])(w: Wye[I,I2,O]): Wye[I,I1,O] =
flip(attachL(p)(flip(w)))
/**
* Transforms the wye so it will stop to listen on left side.
* Instead all requests on the left side are converted to termination with `Kill`,
* and will terminate once the right side will terminate as well.
* Transforms `AwaitBoth` to `AwaitR`
* Transforms `AwaitL` to termination with `End`
*/
def detach1L[I,I2,O](y: Wye[I,I2,O]): Wye[I,I2,O] =
disconnectL(Kill)(y).swallowKill
/** right alternative of detach1L */
def detach1R[I,I2,O](y: Wye[I,I2,O]): Wye[I,I2,O] =
disconnectR(Kill)(y).swallowKill
/**
* Feed a single `ReceiveY` value to a `Wye`.
*/
def feed1[I,I2,O](r: ReceiveY[I,I2])(w: Wye[I,I2,O]): Wye[I,I2,O] =
r match {
case ReceiveL(i) => feed1L(i)(w)
case ReceiveR(i2) => feed1R(i2)(w)
case HaltL(cause) => cause.fold(detach1L(w))(e => disconnectL(e)(w))
case HaltR(cause) => cause.fold(detach1R(w))(e => disconnectR(e)(w))
}
/** Feed a single value to the left branch of a `Wye`. */
def feed1L[I,I2,O](i: I)(w: Wye[I,I2,O]): Wye[I,I2,O] =
feedL(Vector(i))(w)
/** Feed a single value to the right branch of a `Wye`. */
def feed1R[I,I2,O](i2: I2)(w: Wye[I,I2,O]): Wye[I,I2,O] =
feedR(Vector(i2))(w)
/** Feed a sequence of inputs to the left side of a `Wye`. */
def feedL[I,I2,O](is: Seq[I])(y: Wye[I,I2,O]): Wye[I,I2,O] = {
@tailrec
def go(in: Seq[I], out: Vector[Seq[O]], cur: Wye[I,I2,O]): Wye[I,I2,O] = {
if (in.nonEmpty) cur.step match {
case Step(Emit(os), cont) =>
go(in, out :+ os, cont.continue)
case Step(AwaitL(rcv), cont) =>
go(in.tail, out, rcv(right(in.head)) +: cont)
case Step(awt@AwaitR(rcv), cont) =>
emitAll(out.flatten) onHalt {
case End => awt.extend(p => feedL(in)(p +: cont))
case early: EarlyCause => feedL(in)(rcv(left(early)) +: cont)
}
case Step(AwaitBoth(rcv), cont) =>
go(in.tail, out, Try(rcv(ReceiveY.ReceiveL(in.head))) +: cont)
case Halt(rsn) =>
emitAll(out.flatten).causedBy(rsn)
} else cur.prepend(out.flatten)
}
go(is, Vector(), y)
}
/** Feed a sequence of inputs to the right side of a `Wye`. */
def feedR[I,I2,O](i2s: Seq[I2])(y: Wye[I,I2,O]): Wye[I,I2,O] = {
@tailrec
def go(in: Seq[I2], out: Vector[Seq[O]], cur: Wye[I,I2,O]): Wye[I,I2,O] = {
if (in.nonEmpty) cur.step match {
case Step(Emit(os), cont) =>
go(in, out :+ os, cont.continue)
case Step(awt@AwaitL(rcv), cont) =>
emitAll(out.flatten) onHalt {
case End => awt.extend(p => feedR(in)(p +: cont))
case early: EarlyCause => feedR(in)(rcv(left(early)) +: cont)
}
case Step(AwaitR(rcv), cont) =>
go(in.tail, out, rcv(right(in.head)) +: cont)
case Step(AwaitBoth(rcv), cont) =>
go(in.tail, out, rcv(ReceiveY.ReceiveR(in.head)) +: cont)
case Halt(rsn) =>
emitAll(out.flatten).causedBy(rsn)
} else cur.prepend(out.flatten)
}
go(i2s, Vector(), y)
}
/**
* Convert right requests to left requests and vice versa.
*/
def flip[I,I2,O](y: Wye[I,I2,O]): Wye[I2,I,O] = {
y.step match {
case Step(Emit(os), cont) => emitAll(os) onHalt (rsn => flip(Halt(rsn) +: cont))
case Step(awt@AwaitL(rcv), cont) =>
wye.receiveROr[I2, I, O](e=>flip(rcv(left(e)) +: cont))(
i => flip(rcv(right(i)) +: cont)
)
case Step(AwaitR(rcv), cont) =>
wye.receiveLOr[I2, I, O](e =>flip(rcv(left(e)) +: cont))(
i2 => flip(rcv(right(i2)) +: cont)
)
case Step(AwaitBoth(rcv), cont) =>
wye.receiveBoth[I2, I, O](ry => flip(rcv(ry.flip) +: cont))
case hlt@Halt(rsn) => hlt
}
}
/**
* Signals to wye, that Left side terminated.
* Reason for termination is `cause`. Any `Left` requests will be terminated with `cause`
* Wye will be switched to listen only on Right side, that means Await(Both) is converted to Await(R)
*/
def disconnectL[I, I2, O](cause: EarlyCause)(y: Wye[I, I2, O]): Wye[I, I2, O] = {
val ys = y.step
ys match {
case Step(emt@Emit(os), cont) =>
emt onHalt (rsn => disconnectL(cause)(Halt(rsn) +: cont))
case Step(AwaitL(rcv), cont) =>
suspend(disconnectL(cause)(rcv(left(cause)) +: cont))
case Step(AwaitR(rcv), cont) =>
wye.receiveROr[I,I2,O](e => disconnectL(cause)(rcv(left(e)) +: cont))(
i => disconnectL(cause)(rcv(right(i)) +: cont)
)
case Step(AwaitBoth(rcv), cont) =>
wye.receiveROr(e => disconnectL(cause)(rcv(HaltR(e)) +: cont))(
i2 => disconnectL(cause)(rcv(ReceiveR(i2)) +: cont)
)
case hlt@Halt(rsn) => Halt(rsn)
}
}
/**
* Right side alternative for `disconnectL`
*/
def disconnectR[I, I2, O](cause: EarlyCause)(y: Wye[I, I2, O]): Wye[I, I2, O] = {
val ys = y.step
ys match {
case Step(emt@Emit(os), cont) =>
emt onHalt (rsn => disconnectR(cause)(Halt(rsn) +: cont))
case Step(AwaitR(rcv), cont) =>
suspend(disconnectR(cause)(rcv(left(cause)) +: cont))
case Step(AwaitL(rcv), cont) =>
wye.receiveLOr[I,I2,O](e => disconnectR(cause)(rcv(left(e))) +: cont)(
i => disconnectR(cause)(rcv(right(i)) +: cont)
)
case Step(AwaitBoth(rcv), cont) =>
wye.receiveLOr(e => disconnectR(cause)(rcv(HaltL(e)) +: cont))(
i => disconnectR(cause)(rcv(ReceiveL(i)) +: cont)
)
case hlt@Halt(rsn) => Halt(rsn)
}
}
/**
* Signals to wye that left side halted with `cause`. Wye will be fed with `HaltL(cause)`
* and will disconnect from Left side.
*/
def haltL[I, I2, O](cause: Cause)(y: Wye[I, I2, O]): Wye[I, I2, O] = {
val ys = y.step
ys match {
case Step(emt@Emit(os), cont) =>
emt onHalt (rsn => haltL(cause)(Halt(rsn) +: cont))
case Step(AwaitR(rcv), cont) =>
wye.receiveROr[I,I2,O](e => haltL(cause)(rcv(left(e)) +: cont))(
i => haltL(cause)(rcv(right(i)) +: cont)
)
case Step(AwaitL(rcv), cont) =>
cause.fold(haltL(Kill)(y).swallowKill)(e => disconnectL(e)(rcv(left(e)) +: cont))
case Step(AwaitBoth(rcv), cont) =>
val ny = rcv(HaltL(cause)) +: cont
cause.fold(detach1L(ny))(e => disconnectL(e)(ny))
case Halt(rsn) => Halt(rsn)
}
}
/**
* Right alternative for `haltL`
*/
def haltR[I, I2, O](cause: Cause)(y: Wye[I, I2, O]): Wye[I, I2, O] = {
val ys = y.step
ys match {
case Step(emt@Emit(os), cont) =>
emt onHalt (rsn => haltR(cause)(Halt(rsn) +: cont))
case Step(AwaitL(rcv), cont) =>
wye.receiveLOr[I,I2,O](e => haltR(cause)(rcv(left(e)) +: cont))(
i => haltR(cause)(rcv(right(i)) +: cont)
)
case Step(AwaitR(rcv), cont) =>
cause.fold(haltR(Kill)(y).swallowKill)(e => disconnectR(e)(rcv(left(e)) +: cont))
case Step(AwaitBoth(rcv), cont) =>
val ny = rcv(HaltR(cause)) +: cont
cause.fold(detach1R(ny))(e => disconnectR(e)(ny))
case Halt(rsn) => Halt(rsn)
}
}
////////////////////////////////////////////////////////////////////////
// Request Algebra
////////////////////////////////////////////////////////////////////////
/** Indicates required request side */
trait Request
object Request {
/** Left side */
case object L extends Request
/** Right side */
case object R extends Request
/** Both, or Any side */
case object Both extends Request
}
//////////////////////////////////////////////////////////////////////
// De-constructors and type helpers
//////////////////////////////////////////////////////////////////////
type WyeAwaitL[I,I2,O] = Await[Env[I,I2]#Y,Env[I,Any]#Is[I],O]
type WyeAwaitR[I,I2,O] = Await[Env[I,I2]#Y,Env[Any,I2]#T[I2],O]
type WyeAwaitBoth[I,I2,O] = Await[Env[I,I2]#Y,Env[I,I2]#Y[ReceiveY[I,I2]],O]
//correctly typed wye constructors
def receiveL[I,I2,O](rcv:I => Wye[I,I2,O]) : Wye[I,I2,O] =
await(L[I]: Env[I,I2]#Y[I])(rcv)
def receiveLOr[I,I2,O](fb: EarlyCause => Wye[I,I2,O])(rcv:I => Wye[I,I2,O]) : Wye[I,I2,O] =
awaitOr(L[I]: Env[I,I2]#Y[I])(fb)(rcv)
def receiveR[I,I2,O](rcv:I2 => Wye[I,I2,O]) : Wye[I,I2,O] =
await(R[I2]: Env[I,I2]#Y[I2])(rcv)
def receiveROr[I,I2,O](fb: EarlyCause => Wye[I,I2,O])(rcv:I2 => Wye[I,I2,O]) : Wye[I,I2,O] =
awaitOr(R[I2]: Env[I,I2]#Y[I2])(fb)(rcv)
def receiveBoth[I,I2,O](rcv:ReceiveY[I,I2] => Wye[I,I2,O]): Wye[I,I2,O] =
await(Both[I,I2]: Env[I,I2]#Y[ReceiveY[I,I2]])(rcv)
def receiveBothOr[I,I2,O](fb:EarlyCause => Wye[I,I2,O] )(rcv:ReceiveY[I,I2] => Wye[I,I2,O]): Wye[I,I2,O] =
awaitOr(Both[I,I2]: Env[I,I2]#Y[ReceiveY[I,I2]])(fb)(rcv)
object AwaitL {
def unapply[I,I2,O](self: WyeAwaitL[I,I2,O]):
Option[(EarlyCause \\/ I => Wye[I,I2,O])] = self match {
case Await(req,rcv,_)
if req.tag == 0 =>
Some((r : EarlyCause \\/ I) =>
Try(rcv.asInstanceOf[(EarlyCause \\/ I) => Trampoline[Wye[I,I2,O]]](r).run)
)
case _ => None
}
/** Like `AwaitL.unapply` only allows fast test that wye is awaiting on left side */
object is {
def unapply[I,I2,O](self: WyeAwaitL[I,I2,O]):Boolean = self match {
case Await(req,rcv,_) if req.tag == 0 => true
case _ => false
}
}
}
object AwaitR {
def unapply[I,I2,O](self: WyeAwaitR[I,I2,O]):
Option[(EarlyCause \\/ I2 => Wye[I,I2,O])] = self match {
case Await(req,rcv,_)
if req.tag == 1 => Some((r : EarlyCause \\/ I2) =>
Try(rcv.asInstanceOf[(EarlyCause \\/ I2) => Trampoline[Wye[I,I2,O]]](r).run)
)
case _ => None
}
/** Like `AwaitR.unapply` only allows fast test that wye is awaiting on right side */
object is {
def unapply[I,I2,O](self: WyeAwaitR[I,I2,O]):Boolean = self match {
case Await(req,rcv,_) if req.tag == 1 => true
case _ => false
}
}
}
object AwaitBoth {
def unapply[I,I2,O](self: WyeAwaitBoth[I,I2,O]):
Option[(ReceiveY[I,I2] => Wye[I,I2,O])] = self match {
case Await(req,rcv,_)
if req.tag == 2 => Some((r : ReceiveY[I,I2]) =>
Try(rcv.asInstanceOf[(EarlyCause \\/ ReceiveY[I,I2]) => Trampoline[Wye[I,I2,O]]](right(r)).run)
)
case _ => None
}
/** Like `AwaitBoth.unapply` only allows fast test that wye is awaiting on both sides */
object is {
def unapply[I,I2,O](self: WyeAwaitBoth[I,I2,O]):Boolean = self match {
case Await(req,rcv,_) if req.tag == 2 => true
case _ => false
}
}
}
//////////////////////////////////////////////////////////////////
// Implementation
//////////////////////////////////////////////////////////////////
/**
* Implementation of wye.
*
* @param pl left process
* @param pr right process
* @param y0 wye to control queueing and merging
* @param S strategy, preferably executor service based
* @tparam L Type of left process element
* @tparam R Type of right process elements
* @tparam O Output type of resulting process
* @return Process with merged elements.
*/
def apply[L, R, O](pl: Process[Task, L], pr: Process[Task, R])(y0: Wye[L, R, O])(implicit S: Strategy): Process[Task, O] =
suspend {
val Left = new Env[L, R].Left
val Right = new Env[L, R].Right
sealed trait M
case class Ready[A](side: Env[L, R]#Y[A], result: Cause \\/ (Seq[A], Cont[Task,A])) extends M
case class Get(cb: (Terminated \\/ Seq[O]) => Unit) extends M
case class DownDone(cb: (Throwable \\/ Unit) => Unit) extends M
type SideState[A] = Either3[Cause, EarlyCause => Unit, Cont[Task,A]]
//current state of the wye
var yy: Wye[L, R, O] = y0
//cb to be completed for `out` side
var out: Option[(Cause \\/ Seq[O]) => Unit] = None
var downDone= false
//forward referenced actor
var a: Actor[M] = null
//Bias for reading from either left or right.
var leftBias: Boolean = true
// states of both sides
// todo: resolve when we will get initially "kill"
def initial[A](p:Process[Task,A]) : Cont[Task,A] = {
val next: Cause => Trampoline[Process[Task, A]] = (c:Cause) => c match {
case End => Trampoline.done(p)
case e: EarlyCause => Trampoline.done(p.kill)
}
Cont(Vector(next))
}
var left: SideState[L] = Either3.right3(initial(pl))
var right: SideState[R] = Either3.right3(initial(pr))
// runs evaluation of next Seq[A] from either L/R
// this signals to actor the next step of either left or right side
// whenever that side is ready (emited Seq[O] or is done.
def runSide[A](side: Env[L, R]#Y[A])(state: SideState[A]): SideState[A] = state match {
case Left3(rsn) => a ! Ready[A](side, -\\/(rsn)); state //just safety callback
case Middle3(interrupt) => state //no-op already awaiting the result //todo: don't wee nedd a calback there as well.
case Right3(cont) => Either3.middle3(cont.continue stepAsync { res => a ! Ready[A](side, res) })
}
val runSideLeft = runSide(Left) _
val runSideRight = runSide(Right) _
// kills the given side either interrupts the execution
// or creates next step for the process and then runs killed step.
// note that this function apart from returning the next state perform the side effects
def kill[A](side: Env[L, R]#Y[A])(state: SideState[A]): SideState[A] = {
state match {
case Middle3(interrupt) =>
interrupt(Kill)
Either3.middle3((_: Cause) => ()) //rest the interrupt so it won't get interrupted again
case Right3(cont) =>
(Halt(Kill) +: cont) stepAsync { res => a ! Ready[A](side, res) }
Either3.middle3((_: Cause) => ()) // no-op cleanup can't be interrupted
case left@Left3(_) =>
left
}
}
def killLeft = kill(Left) _
def killRight = kill(Right) _
//checks if given state is done
def isDone[A](state: SideState[A]) = state.leftOr(false)(_ => true)
// halts the open request if wye and L/R are done, and returns None
// otherwise returns cb
def haltIfDone(
y: Wye[L, R, O]
, l: SideState[L]
, r: SideState[R]
, cb: Option[(Cause \\/ Seq[O]) => Unit]
): Option[(Cause \\/ Seq[O]) => Unit] = {
cb match {
case Some(cb0) =>
if (isDone(l) && isDone(r)) {
y.unemit._2 match {
case Halt(rsn) =>
yy = Halt(rsn)
S(cb0(-\\/(rsn))); None
case other => cb
}
} else cb
case None => None
}
}
// Consumes any output form either side and updates wye with it.
// note it signals if the other side has to be killed
def sideReady[A](
side: Env[L, R]#Y[A])(
result: Cause \\/ (Seq[A], Cont[Task,A])
): (SideState[A], (Cause \\/ Seq[A])) = {
result match {
case -\\/(rsn) => (Either3.left3(rsn), -\\/(rsn))
case \\/-((as, next)) => (Either3.right3(next), \\/-(as))
}
}
def sideReadyLeft(
result: Cause \\/ (Seq[L], Cont[Task,L])
, y: Wye[L, R, O]): Wye[L, R, O] = {
val (state, input) = sideReady(Left)(result)
left = state
input.fold(
rsn => wye.haltL(rsn)(y)
, ls => wye.feedL(ls)(y)
)
}
def sideReadyRight(
result: Cause \\/ (Seq[R], Cont[Task,R])
, y: Wye[L, R, O]): Wye[L, R, O] = {
val (state, input) = sideReady(Right)(result)
right = state
input.fold(
rsn => wye.haltR(rsn)(y)
, rs => wye.feedR(rs)(y)
)
}
// interprets a single step of wye.
// if wye is at emit, it tries to complete cb, if cb is nonEmpty
// if wye is at await runs either side
// if wye is halt kills either side
// returns next state of wye and callback
def runY(y: Wye[L, R, O], cb: Option[(Cause \\/ Seq[O]) => Unit])
: (Wye[L, R, O], Option[(Cause \\/ Seq[O]) => Unit]) = {
@tailrec
def go(cur: Wye[L, R, O]): (Wye[L, R, O], Option[(Cause \\/ Seq[O]) => Unit]) = {
cur.step match {
case Step(Emit(Seq()),cont) =>
go(cont.continue)
case Step(Emit(os), cont) =>
cb match {
case Some(cb0) => S(cb0(\\/-(os))); (cont.continue, None)
case None => (cur, None)
}
case Step(AwaitL.is(), _) =>
left = runSideLeft(left)
leftBias = false
(cur, cb)
case Step(AwaitR.is(), _) =>
right = runSideRight(right)
leftBias = true
(cur, cb)
case Step(AwaitBoth.is(), _) =>
if (leftBias) {left = runSideLeft(left); right = runSideRight(right) }
else {right = runSideRight(right); left = runSideLeft(left) }
leftBias = !leftBias
(cur, cb)
case Halt(_) =>
if (!isDone(left)) left = killLeft(left)
if (!isDone(right)) right = killRight(right)
(cur, cb)
}
}
go(y)
}
a = Actor[M]({ m =>
m match {
case Ready(side, result) =>
val (y, cb) =
if (side == Left) {
val resultL = result.asInstanceOf[(Cause \\/ (Seq[L], Cont[Task,L]))]
runY(sideReadyLeft(resultL, yy), out)
} else {
val resultR = result.asInstanceOf[(Cause \\/ (Seq[R], Cont[Task,R]))]
runY(sideReadyRight(resultR, yy), out)
}
yy = y
out = haltIfDone(y, left, right, cb)
case Get(cb0) =>
val (y, cb) = runY(yy, Some((r:Cause \\/ Seq[O]) => cb0(r.bimap(c=>Terminated(c),identity))))
yy = y
out = haltIfDone(y, left, right, cb)
case DownDone(cb0) =>
if (!yy.isHalt) {
val cb1 = (r: Cause \\/ Seq[O]) => cb0(\\/-(()))
if (!downDone) {
// complete old callback (from `Get` if defined)
out.foreach(cb => S(cb(-\\/(Kill))))
val (y,cb) = runY(disconnectL(Kill)(disconnectR(Kill)(yy)).kill, Some(cb1))
yy = y
out = cb
downDone = true
}
else {
// important that existing callback is NOT erased. doing so can cause process to hang on terminate
// first terminate is on interrupt, second when an awaited task completes
out = out match {
case Some(cb) => Some{(r: Cause \\/ Seq[O]) => cb(r); cb0(\\/-(()))}
case None => Some(cb1) // should never happen - if no cb, yy will be halt
}
}
out = haltIfDone(yy, left, right, out)
}
else S(cb0(\\/-(())))
}
})(S)
repeatEval(Task.async[Seq[O]] { cb => a ! Get(cb) }) onHalt { _.asHalt } flatMap emitAll onComplete eval_(Task.async[Unit](cb => a ! DownDone(cb)))
}
}
protected[stream] trait WyeOps[+O] {
val self: Process[Task, O]
/**
* Like `tee`, but we allow the `Wye` to read non-deterministically
* from both sides at once.
*
* If `y` is in the state of awaiting `Both`, this implementation
* will continue feeding `y` from either left or right side,
* until either it halts or _both_ sides halt.
*
* If `y` is in the state of awaiting `L`, and the left
* input has halted, we halt. Likewise for the right side.
*
* For as long as `y` permits it, this implementation will _always_
* feed it any leading `Emit` elements from either side before issuing
* new `F` requests. More sophisticated chunking and fairness
* policies do not belong here, but should be built into the `Wye`
* and/or its inputs.
*
* The strategy passed in must be stack-safe, otherwise this implementation
* will throw SOE. Preferably use one of the `Strategys.Executor(es)` based strategies
*/
final def wye[O2, O3](p2: Process[Task, O2])(y: Wye[O, O2, O3])(implicit S: Strategy): Process[Task, O3] =
scalaz.stream.wye[O, O2, O3](self, p2)(y)(S)
/** Non-deterministic version of `zipWith`. Note this terminates whenever one of streams terminate */
def yipWith[O2,O3](p2: Process[Task,O2])(f: (O,O2) => O3)(implicit S:Strategy): Process[Task,O3] =
self.wye(p2)(scalaz.stream.wye.yipWith(f))
/** Non-deterministic version of `zip`. Note this terminates whenever one of streams terminate */
def yip[O2](p2: Process[Task,O2])(implicit S:Strategy): Process[Task,(O,O2)] =
self.wye(p2)(scalaz.stream.wye.yip)
/** Non-deterministic interleave of both streams.
* Emits values whenever either is defined. Note this terminates after BOTH sides terminate */
def merge[O2>:O](p2: Process[Task,O2])(implicit S:Strategy): Process[Task,O2] =
self.wye(p2)(scalaz.stream.wye.merge)
/** Non-deterministic interleave of both streams. Emits values whenever either is defined.
* Note this terminates after BOTH sides terminate */
def either[O2>:O,O3](p2: Process[Task,O3])(implicit S:Strategy): Process[Task,O2 \\/ O3] =
self.wye(p2)(scalaz.stream.wye.either)
}
/**
* This class provides infix syntax specific to `Wye`. We put these here
* rather than trying to cram them into `Process` itself using implicit
* equality witnesses. This doesn't work out so well due to variance
* issues.
*/
final class WyeSyntax[I, I2, O](val self: Wye[I, I2, O]) extends AnyVal {
/**
* Apply a `Wye` to two `Iterable` inputs.
*/
def apply(input: Iterable[I], input2: Iterable[I2]): IndexedSeq[O] = {
// this is probably rather slow
val src1 = Process.emitAll(input.toSeq).toSource
val src2 = Process.emitAll(input2.toSeq).toSource
src1.wye(src2)(self).runLog.run
}
/**
* Transform the left input of the given `Wye` using a `Process1`.
*/
def attachL[I0](f: Process1[I0, I]): Wye[I0, I2, O] =
scalaz.stream.wye.attachL(f)(self)
/**
* Transform the right input of the given `Wye` using a `Process1`.
*/
def attachR[I1](f: Process1[I1, I2]): Wye[I, I1, O] =
scalaz.stream.wye.attachR(f)(self)
/** Transform the left input to a `Wye`. */
def contramapL[I0](f: I0 => I): Wye[I0, I2, O] =
contramapL_(f)
/** Transform the right input to a `Wye`. */
def contramapR[I3](f: I3 => I2): Wye[I, I3, O] =
contramapR_(f)
private[stream] def contramapL_[I0](f: I0 => I): Wye[I0, I2, O] =
self.attachL(process1.lift(f))
private[stream] def contramapR_[I3](f: I3 => I2): Wye[I, I3, O] =
self.attachR(process1.lift(f))
/**
* Converting requests for the left input into normal termination.
* Note that `Both` requests are rewritten to fetch from the only input.
*/
def detach1L: Wye[I, I2, O] = scalaz.stream.wye.detach1L(self)
/**
* Converting requests for the right input into normal termination.
* Note that `Both` requests are rewritten to fetch from the only input.
*/
def detach1R: Wye[I, I2, O] = scalaz.stream.wye.detach1R(self)
}
|
djspiewak/scalaz-stream
|
src/main/scala/scalaz/stream/wye.scala
|
Scala
|
mit
| 33,124
|
package services.aws
import com.amazonaws.auth.BasicAWSCredentials
import com.amazonaws.services.sqs.AmazonSQSClient
import com.amazonaws.services.sqs.model.GetQueueUrlRequest
import play.api.{Logger, Play}
import collection.JavaConversions._
trait MigrationDependencyMonitor{
def isOverloaded : Boolean
def hangback
val resourceName : String
}
object DoNotOverload{
def pauseIfAnyOverloaded(monitors : List[MigrationDependencyMonitor]) =
monitors.foreach{ monitor =>
if(monitor.isOverloaded){
Logger.info(s"Hanging back for ${monitor.resourceName}")
monitor.hangback
}
}
def errorIfAnyOverloaded(monitors : List[MigrationDependencyMonitor]) =
monitors.foreach{ monitor =>
if(monitor.isOverloaded){
throw new IllegalStateException(s"${monitor.resourceName} is overloaded")
}
}
def apply[R](monitors : List[MigrationDependencyMonitor])(fn : () => R) : R = {
Logger.info(s"Checking monitors ${monitors} to ensure we are not overloading the sub systems...")
pauseIfAnyOverloaded(monitors)
errorIfAnyOverloaded(monitors)
Logger.info(s"...monitor check complete")
fn()
}
}
object Monitors {
private lazy val SqsEndpoint = Play.current.configuration.getString("aws.sqs.endpoint")
private lazy val SqsAwsKeyId = Play.current.configuration.getString("aws.sqs.awskeyid")
private lazy val SqsAwsSecretAccessKey = Play.current.configuration.getString("aws.sqs.secret")
private lazy val LifecycleQueueName = Play.current.configuration.getString("aws.sqs.lifecycle.queuename")
private lazy val MetadataQueueName = Play.current.configuration.getString("aws.sqs.metadata.queuename")
private lazy val monitors : List[MigrationDependencyMonitor] = {
for(sqsEndpoint <- SqsEndpoint;
sqsAwsKeyId <- SqsAwsKeyId;
sqsAwsSecretAccessKey <- SqsAwsSecretAccessKey)
yield {
new SqsQueueMonitor(sqsEndpoint, LifecycleQueueName.get, sqsAwsKeyId, sqsAwsSecretAccessKey) ::
new SqsQueueMonitor(sqsEndpoint, MetadataQueueName.get, sqsAwsKeyId, sqsAwsSecretAccessKey) ::
Nil
}
}.getOrElse(Nil)
def doNotOverloadSubsystems[R] = DoNotOverload[R](monitors) _
}
class SqsQueueMonitor( sqsEndpoint : String,
queueName: String,
awsAccessKey : String,
awsSecretKey : String) extends MigrationDependencyMonitor {
val MaxDepth = 1500
val HangBackTime = 10000;
private lazy val sqsClient = {
val awsCredentials = new BasicAWSCredentials(awsAccessKey, awsSecretKey)
val client = new AmazonSQSClient(awsCredentials)
client.setEndpoint(sqsEndpoint)
client
}
private lazy val queueUrl = {
val queueNameLookupResponse = sqsClient.getQueueUrl(new GetQueueUrlRequest(queueName))
queueNameLookupResponse.getQueueUrl
}
def getQueueDepth : Int = {
val attributeResult = sqsClient.getQueueAttributes(queueUrl, List("ApproximateNumberOfMessages"))
Logger.debug(s"${resourceName} ${attributeResult}")
val value = attributeResult.getAttributes.get("ApproximateNumberOfMessages")
Logger.info(s"${resourceName} queue depth is ${value}")
value.toInt
}
def isOverloaded = getQueueDepth > MaxDepth
override def hangback {
Thread.sleep(HangBackTime);
}
override val resourceName: String = s"AWS SQS queue ${queueName}"
}
|
guardian/flex-content-migrator
|
app/services/aws/monitors.scala
|
Scala
|
mit
| 3,392
|
package org.webant.worker.console
import java.io.File
import java.util
import javax.management.remote.{JMXConnector, JMXConnectorFactory, JMXServiceURL}
import javax.management.{JMX, MBeanServerConnection, ObjectName}
import org.apache.commons.io.FileUtils
object WorkerJmxClient {
private var connector: JMXConnector = _
private var mbeanProxy: ConsoleOperationMBean = _
private var mbeanServer: MBeanServerConnection = _
def connect(): String = {
connect("localhost", "1099", "webant", "webant")
}
def connect(host: String, port: String): String = {
connect(host, port, "webant", "webant")
}
def connect(host: String, port: String, username: String, password: String): String = {
try {
val prop = new util.HashMap[String, AnyRef]
prop.put(JMXConnector.CREDENTIALS, Array[String](username, password))
val jmxServerName = "WebantWorkerConsole"
val jmxServiceUrl = s"service:jmx:rmi:///jndi/rmi://$host:$port/$jmxServerName"
val jmxWorkerObjectName = s"$jmxServerName:name=WorkerJmxConsole"
val url = new JMXServiceURL(jmxServiceUrl)
connector = JMXConnectorFactory.connect(url, prop)
mbeanServer = connector.getMBeanServerConnection
val mbeanName = new ObjectName(jmxWorkerObjectName)
mbeanServer.addNotificationListener(mbeanName, new WorkerNotificationListener, null, null)
mbeanProxy = JMX.newMBeanProxy(mbeanServer, mbeanName, classOf[ConsoleOperationMBean], true)
} catch {
case e: Exception =>
return s"connect to server $host:$port failed! error: ${e.getMessage}"
}
s"connect to server $host:$port success!"
}
def isConnected: Boolean = {
mbeanProxy != null
}
def list(): Array[Array[String]] = {
if (mbeanProxy == null)
return Array.empty
mbeanProxy.list().map(_.toArray).toArray
}
def list(taskId: String): Array[String] = {
if (mbeanProxy == null)
return Array.empty
mbeanProxy.list(taskId).toArray
}
def list(taskId: String, siteId: String): String = {
if (mbeanProxy == null)
return "lost connection! please connect to server first."
mbeanProxy.list(taskId, siteId)
}
def start(): Array[Array[String]] = {
if (mbeanProxy == null)
return Array.empty
mbeanProxy.start().map(_.toArray).toArray
}
def start(taskId: String): Array[String] = {
if (mbeanProxy == null)
return Array.empty
mbeanProxy.start(taskId).toArray
}
def start(taskId: String, siteId: String): String = {
if (mbeanProxy == null)
return "lost connection! please connect to server first."
mbeanProxy.start(taskId, siteId)
}
def stop(): Array[Array[String]] = {
if (mbeanProxy == null)
return Array.empty
mbeanProxy.stop().map(_.toArray).toArray
}
def stop(taskId: String): Array[String] = {
if (mbeanProxy == null)
return Array.empty
mbeanProxy.stop(taskId).toArray
}
def stop(taskId: String, siteId: String): String = {
if (mbeanProxy == null)
return "lost connection! please connect to server first."
mbeanProxy.stop(taskId, siteId)
}
def pause(): Array[Array[String]] = {
if (mbeanProxy == null)
return Array.empty
mbeanProxy.pause().map(_.toArray).toArray
}
def pause(taskId: String): Array[String] = {
if (mbeanProxy == null)
return Array.empty
mbeanProxy.pause(taskId).toArray
}
def pause(taskId: String, siteId: String): String = {
if (mbeanProxy == null)
return "lost connection! please connect to server first."
mbeanProxy.pause(taskId, siteId)
}
def reset(): Array[Array[String]] = {
if (mbeanProxy == null)
return Array.empty
mbeanProxy.reset().map(_.toArray).toArray
}
def reset(taskId: String): Array[String] = {
if (mbeanProxy == null)
return Array.empty
mbeanProxy.reset(taskId).toArray
}
def reset(taskId: String, siteId: String): String = {
if (mbeanProxy == null)
return "lost connection! please connect to server first."
mbeanProxy.reset(taskId, siteId)
}
def exit(): Array[Array[String]] = {
if (mbeanProxy == null)
return Array.empty
mbeanProxy.exit().map(_.toArray).toArray
}
def progress(): String = {
if (mbeanProxy == null)
return "lost connection! please connect to server first."
val progress = mbeanProxy.progress()
if (progress.total == 0) {
return "total progress 0%."
}
val gauge = f"${({progress.success} + {progress.fail}).toFloat * 100 / {progress.total}.toFloat}%2.2f"
s"total progress $gauge%. total: ${progress.total}. init: ${progress.init}. " +
s"pending: ${progress.pending}. success: ${progress.success}. fail: ${progress.fail}."
}
def progress(taskId: String): String = {
if (mbeanProxy == null)
return "lost connection! please connect to server first."
val progress = mbeanProxy.progress(taskId)
if (progress.total == 0) {
return s"task($taskId) progress 0%."
}
val gauge = f"${({progress.success} + {progress.fail}).toFloat * 100 / {progress.total}.toFloat}%2.2f"
s"task($taskId) progress $gauge%. total: ${progress.total}. init: ${progress.init}. " +
s"pending: ${progress.pending}. success: ${progress.success}. fail: ${progress.fail}."
}
def progress(taskId: String, siteId: String): String = {
if (mbeanProxy == null)
return "lost connection! please connect to server first."
val progress = mbeanProxy.progress(taskId, siteId)
if (progress.total == 0) {
return s"site($taskId, $siteId) progress 0%."
}
val gauge = f"${({progress.success} + {progress.fail}).toFloat * 100 / {progress.total}.toFloat}%2.2f"
s"site($taskId, $siteId) progress $gauge%. total: ${progress.total}. init: ${progress.init}. " +
s"pending: ${progress.pending}. success: ${progress.success}. fail: ${progress.fail}."
}
def submitTask(configPath: String): Boolean = {
if (mbeanProxy == null)
return false
val file = new File(configPath)
if (!file.exists() || !file.isFile) return false
val content = FileUtils.readFileToString(file, "UTF-8")
mbeanProxy.submitTask(content)
true
}
def submitSite(configPath: String): Boolean = {
if (mbeanProxy == null)
return false
val file = new File(configPath)
if (!file.exists() || !file.isFile) return false
val content = FileUtils.readFileToString(file, "UTF-8")
mbeanProxy.submitSite(content)
true
}
def shutdown(): String = {
if (mbeanProxy == null || connector == null)
return "lost connection! please connect to server first."
try {
mbeanProxy.exit()
connector.close()
} catch {
case e: Exception =>
return s"shutdown the webant worker server failed! error: ${e.getMessage}"
}
"shutdown the webant worker server success!"
}
}
|
sutine/webant
|
webant-worker/src/main/scala/org/webant/worker/console/WorkerJmxClient.scala
|
Scala
|
apache-2.0
| 6,907
|
package org.jetbrains.plugins.scala
package project.template
import java.io.File
import com.intellij.openapi.roots.libraries.NewLibraryConfiguration
import com.intellij.openapi.roots.ui.configuration.libraryEditor.LibraryEditor
import com.intellij.openapi.roots.{JavadocOrderRootType, OrderRootType}
import org.jetbrains.plugins.scala.project.ScalaLanguageLevel.Scala_2_10
import org.jetbrains.plugins.scala.project.template.Artifact.ScalaLibrary
import org.jetbrains.plugins.scala.project._
/**
* @author Pavel Fatin
*/
case class ScalaSdkDescriptor(version: Option[Version],
compilerFiles: Seq[File],
libraryFiles: Seq[File],
sourceFiles: Seq[File],
docFiles: Seq[File]) {
def createNewLibraryConfiguration() = {
val properties = new ScalaLibraryProperties()
properties.languageLevel = version.flatMap(ScalaLanguageLevel.from).getOrElse(ScalaLanguageLevel.Default)
properties.compilerClasspath = compilerFiles
val name = "scala-sdk-" + version.map(_.number).getOrElse("Unknown")
new NewLibraryConfiguration(name, ScalaLibraryType.instance, properties) {
override def addRoots(editor: LibraryEditor): Unit = {
libraryFiles.map(_.toLibraryRootURL).foreach(editor.addRoot(_, OrderRootType.CLASSES))
sourceFiles.map(_.toLibraryRootURL).foreach(editor.addRoot(_, OrderRootType.SOURCES))
docFiles.map(_.toLibraryRootURL).foreach(editor.addRoot(_, JavadocOrderRootType.getInstance))
if (sourceFiles.isEmpty && docFiles.isEmpty) {
editor.addRoot(ScalaSdk.documentationUrlFor(version), JavadocOrderRootType.getInstance)
}
}
}
}
}
object ScalaSdkDescriptor {
def from(components: Seq[Component]): Either[String, ScalaSdkDescriptor] = {
val (binaryComponents, sourceComponents, docComponents) = {
val componentsByKind = components.groupBy(_.kind)
(componentsByKind.getOrElse(Kind.Binaries, Seq.empty),
componentsByKind.getOrElse(Kind.Sources, Seq.empty),
componentsByKind.getOrElse(Kind.Docs, Seq.empty))
}
val reflectRequired = binaryComponents.exists { component =>
component.version.exists { version =>
ScalaLanguageLevel.from(version).exists(_ >= Scala_2_10)
}
}
val requiredBinaryArtifacts: Set[Artifact] =
if (reflectRequired) Set(Artifact.ScalaLibrary, Artifact.ScalaCompiler, Artifact.ScalaReflect)
else Set(Artifact.ScalaLibrary, Artifact.ScalaCompiler)
val existingBinaryArtifacts = binaryComponents.map(_.artifact).toSet
val missingBinaryArtifacts = requiredBinaryArtifacts -- existingBinaryArtifacts
if (missingBinaryArtifacts.isEmpty) {
val compilerBinaries = binaryComponents.filter(it => requiredBinaryArtifacts.contains(it.artifact))
val libraryArtifacts = Artifact.values - Artifact.ScalaCompiler
val libraryBinaries = binaryComponents.filter(it => libraryArtifacts.contains(it.artifact))
val librarySources = sourceComponents.filter(it => libraryArtifacts.contains(it.artifact))
val libraryDocs = docComponents.filter(it => libraryArtifacts.contains(it.artifact))
val libraryVersion = binaryComponents.find(_.artifact == ScalaLibrary).flatMap(_.version)
val descriptor = ScalaSdkDescriptor(
libraryVersion,
compilerBinaries.map(_.file),
libraryBinaries.map(_.file),
librarySources.map(_.file),
libraryDocs.map(_.file))
Right(descriptor)
} else {
Left("Not found: " + missingBinaryArtifacts.map(_.title).mkString(", "))
}
}
}
|
triggerNZ/intellij-scala
|
src/org/jetbrains/plugins/scala/project/template/ScalaSdkDescriptor.scala
|
Scala
|
apache-2.0
| 3,676
|
package com.twitter.finagle.memcached.unit.protocol.text.client
import com.twitter.finagle.memcached.protocol.text.client.AbstractCommandToBuf
import com.twitter.io.BufByteWriter
import java.nio.charset.StandardCharsets
import org.scalacheck.Gen
import org.scalatest.FunSuite
import org.scalatest.prop.GeneratorDrivenPropertyChecks
class CommandToBufTest extends FunSuite with GeneratorDrivenPropertyChecks {
test("`commandToBuf.lengthAsString` returns same value as `Integer.toString.length`") {
forAll(Gen.posNum[Int]) { i: Int =>
assert(AbstractCommandToBuf.lengthAsString(i) == i.toString.length)
}
}
test("`commandToBuf.writeDigits` produces same buffer as `BufByteWriter.writeString`") {
forAll(Gen.posNum[Int]) { i: Int =>
val bw1 = BufByteWriter.fixed(16)
val bw2 = BufByteWriter.fixed(16)
AbstractCommandToBuf.writeDigits(i, bw1)
bw2.writeString(i.toString, StandardCharsets.US_ASCII)
val buf1 = bw1.owned()
val buf2 = bw2.owned()
assert(buf1.equals(buf2))
}
}
}
|
mkhq/finagle
|
finagle-memcached/src/test/scala/com/twitter/finagle/memcached/unit/protocol/text/client/CommandToBufTest.scala
|
Scala
|
apache-2.0
| 1,048
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package es.alvsanand.sgc.ftp
/**
* This class represent the user credentials used to log into a FTP server like.
* @param user The username.
* @param password The password.
*/
case class FTPCredentials(user: String, password: Option[String] = None) {
override def toString: String = s"FTPCredentials($user, ***)"
}
|
alvsanand/spark-generic-connector
|
sgc-ftp/src/main/scala/es/alvsanand/sgc/ftp/FTPCredentials.scala
|
Scala
|
apache-2.0
| 1,127
|
package amailp.intellij.robot.psi
import com.intellij.psi._
import com.intellij.lang.ASTNode
import com.intellij.extapi.psi.ASTWrapperPsiElement
import amailp.intellij.robot.findUsage.UsageFindable
import amailp.intellij.robot.psi.reference.KeywordToDefinitionReference
import amailp.intellij.robot.psi.utils.RobotPsiUtils
/**
* An instance of a keyword when is used
*/
class Keyword(node: ASTNode) extends ASTWrapperPsiElement(node) with RobotPsiUtils with UsageFindable {
override def getReference = new KeywordToDefinitionReference(this)
def getTextStrippedFromIgnored = {
for {
prefix <- Keyword.ignoredPrefixes
loweredPrefix = prefix.toLowerCase
if getText.toLowerCase.startsWith(loweredPrefix)
stripped = getText.toLowerCase.replaceFirst(loweredPrefix, "").trim
} yield stripped
}.headOption
override val element: PsiElement = this
def setNewName(name: String): PsiElement = {
val dummyKeyword = createKeyword(name)
this.getNode.getTreeParent.replaceChild(this.getNode, dummyKeyword.getNode)
this
}
def getType: String = "Keyword"
def getDescriptiveName: String = getNode.getText
}
object Keyword {
val ignoredPrefixes = List("Given", "When", "Then", "And")
}
|
puhnastik/robot-plugin
|
src/main/scala/amailp/intellij/robot/psi/Keyword.scala
|
Scala
|
gpl-3.0
| 1,240
|
package com.thoughtworks.binding.regression
import com.thoughtworks.binding.Binding._
import com.thoughtworks.binding._
import org.scalatest.freespec.AnyFreeSpec
import org.scalatest.matchers.should.Matchers
import scala.collection.mutable.ArrayBuffer
/** @author
* 杨博 (Yang Bo) <pop.atry@gmail.com>
*/
final class FlatMapRemove extends AnyFreeSpec with Matchers {
"removed source of a flatMap" in {
val data = Vars.empty[Either[String, String]]
val left = for {
s <- data
if s.isLeft
} yield s
val events = ArrayBuffer.empty[String]
val autoPrint = Binding {
if (left.length.bind > 0) {
events += "has left"
} else {
events += "does not has left"
}
}
assert(events.forall(_ == "does not has left"))
autoPrint.watch()
assert(events.forall(_ == "does not has left"))
data.value += Right("1")
assert(events.forall(_ == "does not has left"))
data.value += Right("2")
assert(events.forall(_ == "does not has left"))
data.value += Right("3")
assert(events.forall(_ == "does not has left"))
data.value(1) = Left("left 2")
assert(events.last == "has left")
data.value --= Seq(Left("left 2"))
assert(events.last == "does not has left")
}
}
|
ThoughtWorksInc/Binding.scala
|
Binding/src/test/scala/com/thoughtworks/binding/regression/FlatMapRemove.scala
|
Scala
|
mit
| 1,280
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.physical.batch
import org.apache.flink.table.api.TableException
import org.apache.flink.table.connector.sink.abilities.SupportsPartitioning
import org.apache.flink.table.filesystem.FileSystemOptions
import org.apache.flink.table.planner.plan.`trait`.FlinkRelDistribution
import org.apache.flink.table.planner.plan.nodes.FlinkConventions
import org.apache.flink.table.planner.plan.nodes.logical.FlinkLogicalSink
import org.apache.flink.table.planner.plan.nodes.physical.batch.BatchPhysicalSink
import org.apache.flink.table.planner.plan.utils.FlinkRelOptUtil
import org.apache.flink.table.types.logical.RowType
import org.apache.calcite.plan.RelOptRule
import org.apache.calcite.rel.convert.ConverterRule
import org.apache.calcite.rel.{RelCollationTraitDef, RelCollations, RelNode}
import scala.collection.JavaConversions._
class BatchPhysicalSinkRule extends ConverterRule(
classOf[FlinkLogicalSink],
FlinkConventions.LOGICAL,
FlinkConventions.BATCH_PHYSICAL,
"BatchPhysicalSinkRule") {
def convert(rel: RelNode): RelNode = {
val sinkNode = rel.asInstanceOf[FlinkLogicalSink]
val newTrait = rel.getTraitSet.replace(FlinkConventions.BATCH_PHYSICAL)
var requiredTraitSet = sinkNode.getInput.getTraitSet.replace(FlinkConventions.BATCH_PHYSICAL)
if (sinkNode.catalogTable != null && sinkNode.catalogTable.isPartitioned) {
sinkNode.tableSink match {
case partitionSink: SupportsPartitioning =>
partitionSink.applyStaticPartition(sinkNode.staticPartitions)
val dynamicPartFields = sinkNode.catalogTable.getPartitionKeys
.filter(!sinkNode.staticPartitions.contains(_))
val fieldNames = sinkNode.catalogTable
.getSchema
.toPhysicalRowDataType
.getLogicalType.asInstanceOf[RowType]
.getFieldNames
if (dynamicPartFields.nonEmpty) {
val dynamicPartIndices =
dynamicPartFields.map(fieldNames.indexOf(_))
val shuffleEnable = sinkNode
.catalogTable
.getOptions
.get(FileSystemOptions.SINK_SHUFFLE_BY_PARTITION.key())
if (shuffleEnable != null && shuffleEnable.toBoolean) {
requiredTraitSet = requiredTraitSet.plus(
FlinkRelDistribution.hash(dynamicPartIndices
.map(Integer.valueOf), requireStrict = false))
}
if (partitionSink.requiresPartitionGrouping(true)) {
// we shouldn't do partition grouping if the input already defines collation
val relCollation = requiredTraitSet.getTrait(RelCollationTraitDef.INSTANCE)
if (relCollation == null || relCollation.getFieldCollations.isEmpty) {
// default to asc.
val fieldCollations = dynamicPartIndices.map(FlinkRelOptUtil.ofRelFieldCollation)
requiredTraitSet = requiredTraitSet.plus(RelCollations.of(fieldCollations: _*))
} else {
// tell sink not to expect grouping
partitionSink.requiresPartitionGrouping(false)
}
}
}
case _ => throw new TableException(
s"'${sinkNode.tableIdentifier.asSummaryString()}' is a partitioned table, " +
s"but the underlying [${sinkNode.tableSink.asSummaryString()}] DynamicTableSink " +
s"doesn't implement SupportsPartitioning interface.")
}
}
val newInput = RelOptRule.convert(sinkNode.getInput, requiredTraitSet)
new BatchPhysicalSink(
rel.getCluster,
newTrait,
newInput,
sinkNode.tableIdentifier,
sinkNode.catalogTable,
sinkNode.tableSink)
}
}
object BatchPhysicalSinkRule {
val INSTANCE = new BatchPhysicalSinkRule
}
|
kl0u/flink
|
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/batch/BatchPhysicalSinkRule.scala
|
Scala
|
apache-2.0
| 4,647
|
package org.zalando.jsonapi.json
import org.scalatest.{ MustMatchers, WordSpec }
import org.zalando.jsonapi.json.sprayjson.SprayJsonJsonapiProtocol
import org.zalando.jsonapi.model.JsonApiObject.StringValue
import org.zalando.jsonapi.model.RootObject.ResourceObject
import org.zalando.jsonapi.model.{ Attribute, Links, RootObject }
import org.zalando.jsonapi.{ JsonapiRootObjectWriter, _ }
import spray.json._
class ExampleSpec extends WordSpec with MustMatchers with SprayJsonJsonapiProtocol {
"JsonapiRootObject" when {
"using root object serializer" must {
"serialize accordingly" in {
val json =
"""
{
"data": {
"id": "42",
"type": "person",
"attributes": {
"name": "foobar"
}
}
}
""".stripMargin.parseJson
Person(42, "foobar").rootObject.toJson mustEqual json
}
}
"serialize accordingly with links object in data object" in {
implicit val personJsonapiRootObjectWriter: JsonapiRootObjectWriter[Person] = new JsonapiRootObjectWriter[Person] {
override def toJsonapi(person: Person) = {
RootObject(data = Some(ResourceObject(
`type` = "person",
id = Some(person.id.toString),
attributes = Some(List(
Attribute("name", StringValue(person.name))
)), links = Some(List(Links.Self("http://test.link/person/42"))))))
}
}
val json =
"""
{
"data": {
"id": "42",
"type": "person",
"attributes": {
"name": "foobar"
},
"links": {
"self": "http://test.link/person/42"
}
}
}
""".stripMargin.parseJson
Person(42, "foobar").rootObject.toJson mustEqual json
}
"serialize accordingly with links object in root object" in {
implicit val personJsonapiRootObjectWriter: JsonapiRootObjectWriter[Person] = new JsonapiRootObjectWriter[Person] {
override def toJsonapi(person: Person) = {
RootObject(data = Some(ResourceObject(
`type` = "person",
id = Some(person.id.toString),
attributes = Some(List(
Attribute("name", StringValue(person.name))
)))), links = Some(List(Links.Next("http://test.link/person/43"))))
}
}
val json =
"""
{
"data": {
"id": "42",
"type": "person",
"attributes": {
"name": "foobar"
}
},
"links": {
"next": "http://test.link/person/43"
}
}
""".stripMargin.parseJson
Person(42, "foobar").rootObject.toJson mustEqual json
}
}
}
|
RavelLaw/scala-jsonapi
|
src/test/scala/org/zalando/jsonapi/json/ExampleSpec.scala
|
Scala
|
mit
| 2,903
|
package com.twitter.finagle.filter
import com.twitter.concurrent.AsyncSemaphore
import com.twitter.finagle.stats.InMemoryStatsReceiver
import com.twitter.finagle._
import com.twitter.util.{Await, Future}
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class RequestSemaphoreFilterTest extends FunSuite {
test("default config drops the queue tail") {
val neverFactory = ServiceFactory.const(new Service[Int, Int] {
def apply(req: Int) = Future.never
})
val stk: StackBuilder[ServiceFactory[Int, Int]] = new StackBuilder(
Stack.Leaf(Stack.Role("never"), neverFactory)
)
stk.push(RequestSemaphoreFilter.module[Int, Int])
val sr = new InMemoryStatsReceiver
val max = 10
val params = Stack.Params.empty +
RequestSemaphoreFilter.Param(Some(new AsyncSemaphore(max, 0))) +
param.Stats(sr)
val factory = stk.make(params)
for (_ <- 0 to max)
factory().flatMap(_(1))
assert(sr.gauges(Seq("request_concurrency"))() == max)
assert(sr.gauges(Seq("request_queue_size"))() == 0.0)
for (_ <- 0 to max)
factory().flatMap(_(1))
assert(sr.gauges(Seq("request_concurrency"))() == max)
assert(sr.gauges(Seq("request_queue_size"))() == 0.0)
}
test("mark dropped requests as rejected") {
val neverSvc = new Service[Int, Int] {
def apply(req: Int) = Future.never
}
val q = new AsyncSemaphore(1, 0)
val svc = new RequestSemaphoreFilter(q) andThen neverSvc
svc(1)
val f = intercept[Failure] { Await.result(svc(1)) }
assert(f.isFlagged(Failure.Restartable))
}
test("service failures are not wrapped as rejected") {
val exc = new Exception("app exc")
val neverSvc = new Service[Int, Int] {
def apply(req: Int) = Future.exception(exc)
}
val q = new AsyncSemaphore(1, 0)
val svc = new RequestSemaphoreFilter(q) andThen neverSvc
svc(1)
val e = intercept[Exception] { Await.result(svc(1)) }
assert(e == exc)
}
}
|
suls/finagle
|
finagle-core/src/test/scala/com/twitter/finagle/filter/RequestSemaphoreFilterTest.scala
|
Scala
|
apache-2.0
| 2,056
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.text.SimpleDateFormat
import java.time.{Duration, Period}
import java.util.Locale
import scala.collection.JavaConverters._
import org.apache.spark.SparkException
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
import org.apache.spark.sql.types.DayTimeIntervalType.{DAY, HOUR, MINUTE, SECOND}
import org.apache.spark.sql.types.YearMonthIntervalType.{MONTH, YEAR}
class CsvFunctionsSuite extends QueryTest with SharedSparkSession {
import testImplicits._
test("from_csv with empty options") {
val df = Seq("1").toDS()
val schema = "a int"
checkAnswer(
df.select(from_csv($"value", lit(schema), Map[String, String]().asJava)),
Row(Row(1)) :: Nil)
}
test("from_csv with option") {
val df = Seq("26/08/2015 18:00").toDS()
val schema = new StructType().add("time", TimestampType)
val options = Map("timestampFormat" -> "dd/MM/yyyy HH:mm")
checkAnswer(
df.select(from_csv($"value", schema, options)),
Row(Row(java.sql.Timestamp.valueOf("2015-08-26 18:00:00.0"))))
}
test("checking the columnNameOfCorruptRecord option") {
val columnNameOfCorruptRecord = "_unparsed"
val df = Seq("0,2013-111-11 12:13:14", "1,1983-08-04").toDS()
val schema = new StructType().add("a", IntegerType).add("b", DateType)
val schemaWithCorrField1 = schema.add(columnNameOfCorruptRecord, StringType)
val df2 = df
.select(from_csv($"value", schemaWithCorrField1, Map(
"mode" -> "Permissive", "columnNameOfCorruptRecord" -> columnNameOfCorruptRecord)))
withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> "corrected") {
checkAnswer(df2, Seq(
Row(Row(0, null, "0,2013-111-11 12:13:14")),
Row(Row(1, java.sql.Date.valueOf("1983-08-04"), null))))
}
withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> "legacy") {
checkAnswer(df2, Seq(
Row(Row(0, java.sql.Date.valueOf("2022-03-11"), null)),
Row(Row(1, java.sql.Date.valueOf("1983-08-04"), null))))
}
withSQLConf(SQLConf.LEGACY_TIME_PARSER_POLICY.key -> "exception") {
val msg = intercept[SparkException] {
df2.collect()
}.getCause.getMessage
assert(msg.contains("Fail to parse"))
}
}
test("schema_of_csv - infers schemas") {
checkAnswer(
spark.range(1).select(schema_of_csv(lit("0.1,1"))),
Seq(Row("STRUCT<`_c0`: DOUBLE, `_c1`: INT>")))
checkAnswer(
spark.range(1).select(schema_of_csv("0.1,1")),
Seq(Row("STRUCT<`_c0`: DOUBLE, `_c1`: INT>")))
}
test("schema_of_csv - infers schemas using options") {
val df = spark.range(1)
.select(schema_of_csv(lit("0.1 1"), Map("sep" -> " ").asJava))
checkAnswer(df, Seq(Row("STRUCT<`_c0`: DOUBLE, `_c1`: INT>")))
}
test("to_csv - struct") {
val df = Seq(Tuple1(Tuple1(1))).toDF("a")
checkAnswer(df.select(to_csv($"a")), Row("1") :: Nil)
}
test("to_csv with option") {
val df = Seq(Tuple1(Tuple1(java.sql.Timestamp.valueOf("2015-08-26 18:00:00.0")))).toDF("a")
val options = Map("timestampFormat" -> "dd/MM/yyyy HH:mm").asJava
checkAnswer(df.select(to_csv($"a", options)), Row("26/08/2015 18:00") :: Nil)
}
test("from_csv invalid csv - check modes") {
withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") {
val schema = new StructType()
.add("a", IntegerType)
.add("b", IntegerType)
.add("_unparsed", StringType)
val badRec = "\\""
val df = Seq(badRec, "2,12").toDS()
checkAnswer(
df.select(from_csv($"value", schema, Map("mode" -> "PERMISSIVE"))),
Row(Row(null, null, badRec)) :: Row(Row(2, 12, null)) :: Nil)
val exception1 = intercept[SparkException] {
df.select(from_csv($"value", schema, Map("mode" -> "FAILFAST"))).collect()
}.getMessage
assert(exception1.contains(
"Malformed records are detected in record parsing. Parse Mode: FAILFAST."))
val exception2 = intercept[SparkException] {
df.select(from_csv($"value", schema, Map("mode" -> "DROPMALFORMED")))
.collect()
}.getMessage
assert(exception2.contains(
"from_csv() doesn't support the DROPMALFORMED mode. " +
"Acceptable modes are PERMISSIVE and FAILFAST."))
}
}
test("from_csv uses DDL strings for defining a schema - java") {
val df = Seq("""1,"haa"""").toDS()
checkAnswer(
df.select(
from_csv($"value", lit("a INT, b STRING"), new java.util.HashMap[String, String]())),
Row(Row(1, "haa")) :: Nil)
}
test("roundtrip to_csv -> from_csv") {
val df = Seq(Tuple1(Tuple1(1)), Tuple1(null)).toDF("struct")
val schema = df.schema(0).dataType.asInstanceOf[StructType]
val options = Map.empty[String, String]
val readback = df.select(to_csv($"struct").as("csv"))
.select(from_csv($"csv", schema, options).as("struct"))
checkAnswer(df, readback)
}
test("roundtrip from_csv -> to_csv") {
val df = Seq(Some("1"), None).toDF("csv")
val schema = new StructType().add("a", IntegerType)
val options = Map.empty[String, String]
val readback = df.select(from_csv($"csv", schema, options).as("struct"))
.select(to_csv($"struct").as("csv"))
checkAnswer(df, readback)
}
test("infers schemas of a CSV string and pass to to from_csv") {
val in = Seq("""0.123456789,987654321,"San Francisco"""").toDS()
val options = Map.empty[String, String].asJava
val out = in.select(from_csv($"value", schema_of_csv("0.1,1,a"), options) as "parsed")
val expected = StructType(Seq(StructField(
"parsed",
StructType(Seq(
StructField("_c0", DoubleType, true),
StructField("_c1", IntegerType, true),
StructField("_c2", StringType, true))))))
assert(out.schema == expected)
}
test("Support to_csv in SQL") {
val df1 = Seq(Tuple1(Tuple1(1))).toDF("a")
checkAnswer(df1.selectExpr("to_csv(a)"), Row("1") :: Nil)
}
test("parse timestamps with locale") {
Seq("en-US", "ko-KR", "zh-CN", "ru-RU").foreach { langTag =>
val locale = Locale.forLanguageTag(langTag)
val ts = new SimpleDateFormat("dd/MM/yyyy HH:mm").parse("06/11/2018 18:00")
val timestampFormat = "dd MMM yyyy HH:mm"
val sdf = new SimpleDateFormat(timestampFormat, locale)
val input = Seq(s"""${sdf.format(ts)}""").toDS()
val options = Map("timestampFormat" -> timestampFormat, "locale" -> langTag)
val df = input.select(from_csv($"value", lit("time timestamp"), options.asJava))
checkAnswer(df, Row(Row(java.sql.Timestamp.valueOf("2018-11-06 18:00:00.0"))))
}
}
test("support foldable schema by from_csv") {
val options = Map[String, String]().asJava
val schema = concat_ws(",", lit("i int"), lit("s string"))
checkAnswer(
Seq("""1,"a"""").toDS().select(from_csv($"value", schema, options)),
Row(Row(1, "a")))
val errMsg = intercept[AnalysisException] {
Seq(("1", "i int")).toDF("csv", "schema")
.select(from_csv($"csv", $"schema", options)).collect()
}.getMessage
assert(errMsg.contains("Schema should be specified in DDL format as a string literal"))
val errMsg2 = intercept[AnalysisException] {
Seq("1").toDF("csv").select(from_csv($"csv", lit(1), options)).collect()
}.getMessage
assert(errMsg2.contains("The expression '1' is not a valid schema string"))
}
test("schema_of_csv - infers the schema of foldable CSV string") {
val input = concat_ws(",", lit(0.1), lit(1))
checkAnswer(
spark.range(1).select(schema_of_csv(input)),
Seq(Row("STRUCT<`_c0`: DOUBLE, `_c1`: INT>")))
}
test("optional datetime parser does not affect csv time formatting") {
val s = "2015-08-26 12:34:46"
def toDF(p: String): DataFrame = sql(
s"""
|SELECT
| to_csv(
| named_struct('time', timestamp'$s'), map('timestampFormat', "$p")
| )
| """.stripMargin)
checkAnswer(toDF("yyyy-MM-dd'T'HH:mm:ss.SSSXXX"), toDF("yyyy-MM-dd'T'HH:mm:ss[.SSS][XXX]"))
}
test("SPARK-32968: Pruning csv field should not change result") {
Seq("true", "false").foreach { enabled =>
withSQLConf(SQLConf.CSV_EXPRESSION_OPTIMIZATION.key -> enabled) {
val df1 = sparkContext.parallelize(Seq("a,b")).toDF("csv")
.selectExpr("from_csv(csv, 'a string, b string', map('mode', 'failfast')) as parsed")
checkAnswer(df1.selectExpr("parsed.a"), Seq(Row("a")))
checkAnswer(df1.selectExpr("parsed.b"), Seq(Row("b")))
val df2 = sparkContext.parallelize(Seq("a,b")).toDF("csv")
.selectExpr("from_csv(csv, 'a string, b string') as parsed")
checkAnswer(df2.selectExpr("parsed.a"), Seq(Row("a")))
checkAnswer(df2.selectExpr("parsed.b"), Seq(Row("b")))
}
}
}
test("SPARK-32968: bad csv input with csv pruning optimization") {
Seq("true", "false").foreach { enabled =>
withSQLConf(SQLConf.CSV_EXPRESSION_OPTIMIZATION.key -> enabled) {
val df = sparkContext.parallelize(Seq("1,\\u0001\\u0000\\u0001234")).toDF("csv")
.selectExpr("from_csv(csv, 'a int, b int', map('mode', 'failfast')) as parsed")
val err1 = intercept[SparkException] {
df.selectExpr("parsed.a").collect
}
val err2 = intercept[SparkException] {
df.selectExpr("parsed.b").collect
}
assert(err1.getMessage.contains("Malformed records are detected in record parsing"))
assert(err2.getMessage.contains("Malformed records are detected in record parsing"))
}
}
}
test("SPARK-32968: csv pruning optimization with corrupt record field") {
Seq("true", "false").foreach { enabled =>
withSQLConf(SQLConf.CSV_EXPRESSION_OPTIMIZATION.key -> enabled) {
val df = sparkContext.parallelize(Seq("a,b,c,d")).toDF("csv")
.selectExpr("from_csv(csv, 'a string, b string, _corrupt_record string') as parsed")
.selectExpr("parsed._corrupt_record")
checkAnswer(df, Seq(Row("a,b,c,d")))
}
}
}
test("SPARK-35998: Make from_csv/to_csv to handle year-month intervals properly") {
val ymDF = Seq(Period.of(1, 2, 0)).toDF
Seq(
(YearMonthIntervalType(), "INTERVAL '1-2' YEAR TO MONTH", Period.of(1, 2, 0)),
(YearMonthIntervalType(YEAR), "INTERVAL '1' YEAR", Period.of(1, 0, 0)),
(YearMonthIntervalType(MONTH), "INTERVAL '14' MONTH", Period.of(1, 2, 0))
).foreach { case (toCsvDtype, toCsvExpected, fromCsvExpected) =>
val toCsvDF = ymDF.select(to_csv(struct($"value" cast toCsvDtype)) as "csv")
checkAnswer(toCsvDF, Row(toCsvExpected))
DataTypeTestUtils.yearMonthIntervalTypes.foreach { fromCsvDtype =>
val fromCsvDF = toCsvDF
.select(
from_csv(
$"csv",
StructType(StructField("a", fromCsvDtype) :: Nil),
Map.empty[String, String]) as "value")
.selectExpr("value.a")
if (toCsvDtype == fromCsvDtype) {
checkAnswer(fromCsvDF, Row(fromCsvExpected))
} else {
checkAnswer(fromCsvDF, Row(null))
}
}
}
}
test("SPARK-35999: Make from_csv/to_csv to handle day-time intervals properly") {
val dtDF = Seq(Duration.ofDays(1).plusHours(2).plusMinutes(3).plusSeconds(4)).toDF
Seq(
(DayTimeIntervalType(), "INTERVAL '1 02:03:04' DAY TO SECOND",
Duration.ofDays(1).plusHours(2).plusMinutes(3).plusSeconds(4)),
(DayTimeIntervalType(DAY, MINUTE), "INTERVAL '1 02:03' DAY TO MINUTE",
Duration.ofDays(1).plusHours(2).plusMinutes(3)),
(DayTimeIntervalType(DAY, HOUR), "INTERVAL '1 02' DAY TO HOUR",
Duration.ofDays(1).plusHours(2)),
(DayTimeIntervalType(DAY), "INTERVAL '1' DAY",
Duration.ofDays(1)),
(DayTimeIntervalType(HOUR, SECOND), "INTERVAL '26:03:04' HOUR TO SECOND",
Duration.ofHours(26).plusMinutes(3).plusSeconds(4)),
(DayTimeIntervalType(HOUR, MINUTE), "INTERVAL '26:03' HOUR TO MINUTE",
Duration.ofHours(26).plusMinutes(3)),
(DayTimeIntervalType(HOUR), "INTERVAL '26' HOUR",
Duration.ofHours(26)),
(DayTimeIntervalType(MINUTE, SECOND), "INTERVAL '1563:04' MINUTE TO SECOND",
Duration.ofMinutes(1563).plusSeconds(4)),
(DayTimeIntervalType(MINUTE), "INTERVAL '1563' MINUTE",
Duration.ofMinutes(1563)),
(DayTimeIntervalType(SECOND), "INTERVAL '93784' SECOND",
Duration.ofSeconds(93784))
).foreach { case (toCsvDtype, toCsvExpected, fromCsvExpected) =>
val toCsvDF = dtDF.select(to_csv(struct($"value" cast toCsvDtype)) as "csv")
checkAnswer(toCsvDF, Row(toCsvExpected))
DataTypeTestUtils.dayTimeIntervalTypes.foreach { fromCsvDtype =>
val fromCsvDF = toCsvDF
.select(
from_csv(
$"csv",
StructType(StructField("a", fromCsvDtype) :: Nil),
Map.empty[String, String]) as "value")
.selectExpr("value.a")
if (toCsvDtype == fromCsvDtype) {
checkAnswer(fromCsvDF, Row(fromCsvExpected))
} else {
checkAnswer(fromCsvDF, Row(null))
}
}
}
}
}
|
jiangxb1987/spark
|
sql/core/src/test/scala/org/apache/spark/sql/CsvFunctionsSuite.scala
|
Scala
|
apache-2.0
| 14,133
|
package org.apache.spark.mllib
import org.specs._
import org.specs.runner.{ConsoleRunner, JUnit4}
class MySpecTest extends JUnit4(MySpec)
//class MySpecSuite extends ScalaTestSuite(MySpec)
object MySpecRunner extends ConsoleRunner(MySpec)
object MySpec extends Specification {
"This wonderful system" should {
"save the world" in {
val list = Nil
list must beEmpty
}
}
}
|
yinmingyang/spark-ml
|
src/test/scala/org/apache/spark/mllib/MySpec.scala
|
Scala
|
apache-2.0
| 398
|
package org.jetbrains.plugins.scala.components
import java.io.{File, IOException}
import java.lang.reflect.Field
import java.net.URLEncoder
import java.util.concurrent.TimeUnit
import javax.swing.event.HyperlinkEvent
import com.intellij.ide.plugins._
import com.intellij.ide.util.PropertiesComponent
import com.intellij.notification._
import com.intellij.openapi.application.ex.ApplicationInfoEx
import com.intellij.openapi.application.impl.ApplicationInfoImpl
import com.intellij.openapi.application.{ApplicationInfo, ApplicationManager}
import com.intellij.openapi.diagnostic.Logger
import com.intellij.openapi.editor.EditorFactory
import com.intellij.openapi.editor.event.{DocumentAdapter, DocumentEvent}
import com.intellij.openapi.extensions.PluginId
import com.intellij.openapi.fileEditor.FileDocumentManager
import com.intellij.openapi.updateSettings.impl._
import com.intellij.openapi.util.{BuildNumber, JDOMUtil, SystemInfo}
import com.intellij.openapi.vfs.CharsetToolkit
import com.intellij.util.io.HttpRequests
import com.intellij.util.io.HttpRequests.Request
import org.jdom.JDOMException
import org.jetbrains.plugins.scala.ScalaFileType
import org.jetbrains.plugins.scala.settings.ScalaApplicationSettings
import org.jetbrains.plugins.scala.settings.ScalaApplicationSettings.pluginBranch._
import scala.xml.transform.{RewriteRule, RuleTransformer}
class InvalidRepoException(what: String) extends Exception(what)
object ScalaPluginUpdater {
private val LOG = Logger.getInstance(getClass)
def pluginDescriptor = ScalaPluginVersionVerifier.getPluginDescriptor
private val scalaPluginId = "1347"
val baseUrl = "https://plugins.jetbrains.com/plugins/%s/" + scalaPluginId
private var doneUpdating = false
// *_OLD versions are for legacy repository format.
// Need to keep track of them to upgrade to new repository format automatically
// Remove eventually, when no significant amount of users will have older ones.
val CASSIOPEIA_OLD = "cassiopeia_old"
val FOURTEEN_ONE_OLD = "14.1_old"
val FOURTEEN_ONE = "14.1"
val knownVersions = Map(
CASSIOPEIA_OLD -> Map(
Release -> "DUMMY",
EAP -> "http://www.jetbrains.com/idea/plugins/scala-eap-cassiopeia.xml",
Nightly -> "http://www.jetbrains.com/idea/plugins/scala-nightly-cassiopeia.xml"
),
FOURTEEN_ONE_OLD -> Map(
Release -> "DUMMY",
EAP -> "http://www.jetbrains.com/idea/plugins/scala-eap-14.1.xml",
Nightly -> ""
),
FOURTEEN_ONE -> Map(
Release -> "DUMMY",
EAP -> baseUrl.format("eap"),
Nightly -> baseUrl.format("nightly")
)
)
val currentVersion = FOURTEEN_ONE
def currentRepo = knownVersions(currentVersion)
val updGroupId = "ScalaPluginUpdate"
val GROUP = new NotificationGroup(updGroupId, NotificationDisplayType.STICKY_BALLOON, true)
// save plugin version before patching to restore it when switching back
var savedPluginVersion = ""
private val updateListener = new DocumentAdapter() {
override def documentChanged(e: DocumentEvent) = {
val file = FileDocumentManager.getInstance().getFile(e.getDocument)
if (file != null && file.getFileType == ScalaFileType.SCALA_FILE_TYPE)
scheduleUpdate()
}
}
@throws(classOf[InvalidRepoException])
def doUpdatePluginHosts(branch: ScalaApplicationSettings.pluginBranch) = {
if(currentRepo(branch).isEmpty)
throw new InvalidRepoException(s"Branch $branch is unavailable for IDEA version $currentVersion")
// update hack - set plugin version to 0 when downgrading
// also unpatch it back if user changed mind about downgrading
if (getScalaPluginBranch.compareTo(branch) > 0) {
savedPluginVersion = pluginDescriptor.getVersion
patchPluginVersion("0.0.0")
} else if (savedPluginVersion.nonEmpty) {
patchPluginVersion(savedPluginVersion)
savedPluginVersion = ""
}
val updateSettings = UpdateSettings.getInstance()
updateSettings.getStoredPluginHosts.remove(currentRepo(EAP))
updateSettings.getStoredPluginHosts.remove(currentRepo(Nightly))
branch match {
case Release => // leave default plugin repository
case EAP => updateSettings.getStoredPluginHosts.add(currentRepo(EAP))
case Nightly => updateSettings.getStoredPluginHosts.add(currentRepo(Nightly))
}
}
@throws(classOf[InvalidRepoException])
def doUpdatePluginHostsAndCheck(branch: ScalaApplicationSettings.pluginBranch) = {
doUpdatePluginHosts(branch)
if(UpdateSettings.getInstance().isCheckNeeded) {
UpdateChecker.updateAndShowResult()
.doWhenDone(toRunnable(postCheckIdeaCompatibility(branch)))
}
}
def getScalaPluginBranch: ScalaApplicationSettings.pluginBranch = {
if (ScalaPluginUpdater.pluginIsEap) EAP
else if (ScalaPluginUpdater.pluginIsNightly) Nightly
else Release
}
def pluginIsEap = {
val updateSettings = UpdateSettings.getInstance()
updateSettings.getStoredPluginHosts.contains(currentRepo(EAP))
}
def pluginIsNightly = {
val updateSettings = UpdateSettings.getInstance()
updateSettings.getStoredPluginHosts.contains(currentRepo(Nightly))
}
def pluginIsRelease = !pluginIsEap && !pluginIsNightly
def uninstallPlugin() = {
val pluginId: PluginId = pluginDescriptor.getPluginId
pluginDescriptor.setDeleted(true)
try {
PluginInstaller.prepareToUninstall(pluginId)
val installedPlugins = InstalledPluginsState.getInstance().getInstalledPlugins
val pluginIdString: String = pluginId.getIdString
import scala.collection.JavaConversions._
while (installedPlugins.exists(_.getPluginId.getIdString == pluginIdString)) {
installedPlugins.remove(pluginIdString)
}
}
catch {
case e1: IOException => PluginManagerMain.LOG.error(e1)
}
}
def upgradeRepo() = {
val updateSettings = UpdateSettings.getInstance()
for {
(version, repo) <- knownVersions
if version != currentVersion
} {
if (updateSettings.getStoredPluginHosts.contains(repo(EAP))) {
updateSettings.getStoredPluginHosts.remove(repo(EAP))
if (!currentRepo(EAP).isEmpty)
updateSettings.getStoredPluginHosts.add(currentRepo(EAP))
}
if (updateSettings.getStoredPluginHosts.contains(repo(Nightly))) {
updateSettings.getStoredPluginHosts.remove(repo(Nightly))
if (!currentRepo(Nightly).isEmpty)
updateSettings.getStoredPluginHosts.add(currentRepo(Nightly))
else if (!currentRepo(EAP).isEmpty)
updateSettings.getStoredPluginHosts.add(currentRepo(EAP))
}
}
}
def postCheckIdeaCompatibility(branch: ScalaApplicationSettings.pluginBranch) = {
import scala.xml._
val infoImpl = ApplicationInfo.getInstance().asInstanceOf[ApplicationInfoImpl]
val localBuildNumber = infoImpl.getBuild
val url = branch match {
case Release => None
case EAP => Some(currentRepo(EAP))
case Nightly => Some(currentRepo(Nightly))
}
url.foreach(u => invokeLater {
try {
val resp = XML.load(u)
val text = ((resp \\\\ "idea-plugin").head \\ "idea-version" \\ "@since-build").text
val remoteBuildNumber = BuildNumber.fromString(text)
if (localBuildNumber.compareTo(remoteBuildNumber) < 0)
suggestIdeaUpdate(branch.toString, text)
}
catch {
case e: Throwable => LOG.info("Failed to check plugin compatibility", e)
}
})
}
def postCheckIdeaCompatibility(): Unit = postCheckIdeaCompatibility(getScalaPluginBranch)
private def suggestIdeaUpdate(branch: String, suggestedVersion: String) = {
val infoImpl = ApplicationInfo.getInstance().asInstanceOf[ApplicationInfoImpl]
val appSettings = ScalaApplicationSettings.getInstance()
def getPlatformUpdateResult = {
val a = ApplicationInfoEx.getInstanceEx.getUpdateUrls.getCheckingUrl
val info = HttpRequests.request(a).connect(new HttpRequests.RequestProcessor[Option[UpdatesInfo]] {
def process(request: HttpRequests.Request) = {
try { Some(new UpdatesInfo(JDOMUtil.loadDocument(request.getInputStream).detachRootElement)) }
catch { case e: JDOMException => LOG.info(e); None }
}
})
if(info.isDefined) {
val strategy = new UpdateStrategy(infoImpl.getMajorVersion.toInt, infoImpl.getBuild, info.get, UpdateSettings.getInstance())
Some(strategy.checkForUpdates())
} else None
}
def isUpToDatePlatform(result: CheckForUpdateResult) = result.getUpdatedChannel.getLatestBuild.getNumber.compareTo(infoImpl.getBuild) <= 0
def isBetaOrEAPPlatform = infoImpl.isEAP || infoImpl.isBetaOrRC
val notification = getPlatformUpdateResult match {
case Some(result) if isUpToDatePlatform(result) && !isBetaOrEAPPlatform && appSettings.ASK_PLATFORM_UPDATE => // platform is up to date - suggest eap
val message = s"Your IDEA is outdated to use with $branch branch.<br/>Would you like to switch IDEA channel to EAP?" +
s"""<p/><a href="Yes">Yes</a>\\n""" +
s"""<p/><a href="No">Not now</a>""" +
s"""<p/><a href="Ignore">Ignore this update</a>"""
Some(GROUP.createNotification(
"Scala Plugin Update Failed",
message,
NotificationType.WARNING,
new NotificationListener {
override def hyperlinkUpdate(notification: Notification, event: HyperlinkEvent): Unit = {
notification.expire()
event.getDescription match {
case "No" => // do nothing, will ask next time
case "Yes" => UpdateSettings.getInstance().setUpdateChannelType("eap")
case "Ignore" => appSettings.ASK_PLATFORM_UPDATE = false
}
}
}
))
case Some(result) => Some(GROUP.createNotification(
s"Your IDEA is outdated to use with Scala plugin $branch branch.<br/>" +
s"Please update IDEA to at least $suggestedVersion to use latest Scala plugin.",
NotificationType.WARNING)
)
case None => None
}
notification.foreach(Notifications.Bus.notify)
}
private def scheduleUpdate(): Unit = {
val key = "scala.last.updated"
val lastUpdateTime = PropertiesComponent.getInstance().getOrInitLong(key , 0)
EditorFactory.getInstance().getEventMulticaster.removeDocumentListener(updateListener)
if (lastUpdateTime == 0L || System.currentTimeMillis() - lastUpdateTime > TimeUnit.DAYS.toMillis(1)) {
ApplicationManager.getApplication.executeOnPooledThread(new Runnable {
override def run() = {
val buildNumber = ApplicationInfo.getInstance().getBuild.asString()
val pluginVersion = pluginDescriptor.getVersion
val os = URLEncoder.encode(SystemInfo.OS_NAME + " " + SystemInfo.OS_VERSION, CharsetToolkit.UTF8)
val uid = UpdateChecker.getInstallationUID(PropertiesComponent.getInstance())
val url = s"https://plugins.jetbrains.com/plugins/list?pluginId=$scalaPluginId&build=$buildNumber&pluginVersion=$pluginVersion&os=$os&uuid=$uid"
PropertiesComponent.getInstance().setValue(key, System.currentTimeMillis().toString)
doneUpdating = true
try {
HttpRequests.request(url).connect(new HttpRequests.RequestProcessor[Unit] {
override def process(request: Request) = JDOMUtil.load(request.getReader())
})
} catch {
case e: Throwable => LOG.warn(e)
}
}
})
}
}
def setupReporter(): Unit = {
if (ApplicationManager.getApplication.isUnitTestMode) return
import com.intellij.openapi.editor.EditorFactory
EditorFactory.getInstance().getEventMulticaster.addDocumentListener(updateListener)
}
// this hack uses fake plugin.xml deserialization to downgrade plugin version
// at least it's not using reflection
def patchPluginVersion(newVersion: String) = {
import scala.xml._
val versionPatcher = new RewriteRule {
override def transform(n: Node): NodeSeq = n match {
case <version>{_}</version> => <version>{newVersion}</version>
case <include/> => NodeSeq.Empty // relative path includes break temp file parsing
case other => other
}
}
val stream = getClass.getClassLoader.getResource("META-INF/plugin.xml").openStream()
val document = new RuleTransformer(versionPatcher).transform(XML.load(stream))
val tempFile = File.createTempFile("plugin", "xml")
XML.save(tempFile.getAbsolutePath, document.head)
pluginDescriptor.readExternal(tempFile.toURI.toURL)
tempFile.delete()
}
@deprecated
def patchPluginVersionReflection() = {
// crime of reflection goes below - workaround until force updating is available
try {
val hack: Field = classOf[IdeaPluginDescriptorImpl].getDeclaredField("myVersion")
hack.setAccessible(true)
hack.set(pluginDescriptor, "0.0.0")
}
catch {
case _: NoSuchFieldException | _: IllegalAccessException =>
Notifications.Bus.notify(new Notification(updGroupId, "Scala Plugin Update", "Please remove and reinstall Scala plugin to finish downgrading", NotificationType.INFORMATION))
}
}
def askUpdatePluginBranch(): Unit = {
val infoImpl = ApplicationInfo.getInstance().asInstanceOf[ApplicationInfoImpl]
val applicationSettings = ScalaApplicationSettings.getInstance()
if ((infoImpl.isEAP || infoImpl.isBetaOrRC)
&& applicationSettings.ASK_USE_LATEST_PLUGIN_BUILDS
&& ScalaPluginUpdater.pluginIsRelease) {
val message = "Please select Scala plugin update channel:" +
s"""<p/><a href="EAP">EAP</a>\\n""" +
s"""<p/><a href="Release">Release</a>"""
val notification = new Notification(updGroupId, "Scala Plugin Update", message, NotificationType.INFORMATION, new NotificationListener {
def hyperlinkUpdate(notification: Notification, event: HyperlinkEvent) {
notification.expire()
applicationSettings.ASK_USE_LATEST_PLUGIN_BUILDS = false
event.getDescription match {
case "EAP" => doUpdatePluginHostsAndCheck(EAP)
case "Nightly" => doUpdatePluginHostsAndCheck(Nightly)
case "Release" => doUpdatePluginHostsAndCheck(Release)
case _ => applicationSettings.ASK_USE_LATEST_PLUGIN_BUILDS = true
}
}
})
Notifications.Bus.notify(notification)
}
}
def invokeLater(f: => Unit) = ApplicationManager.getApplication.executeOnPooledThread(toRunnable(f))
def toRunnable(f: => Unit) = new Runnable { override def run(): Unit = f }
}
|
advancedxy/intellij-scala
|
src/org/jetbrains/plugins/scala/components/ScalaPluginUpdater.scala
|
Scala
|
apache-2.0
| 14,713
|
import java.io.{File, FileOutputStream}
import scala.tools.partest.DirectTest
import scala.tools.partest.nest.StreamCapture
import scala.tools.asm
import asm.{ClassWriter, Handle, Opcodes}
import Opcodes._
// This test ensures that we can read JDK 7 (classfile format 51) files, including those
// with invokeDynamic instructions and associated constant pool entries
// to do that it first uses ASM to generate a class called DynamicInvoker. Then
// it runs a normal compile on the source in the 'code' field that refers to
// DynamicInvoker. Any failure will be dumped to std out.
//
// By its nature the test can only work on JDK 7+ because under JDK 6 some of the
// classes referred to by DynamicInvoker won't be available and DynamicInvoker won't
// verify. So the test includes a version check that short-circuits the whole test
// on JDK 6
object Test extends DirectTest {
override def extraSettings: String = s"-opt:l:inline -opt-inline-from:** -usejavacp -cp ${testOutput.path}"
def generateClass(): Unit = {
val invokerClassName = "DynamicInvoker"
val bootstrapMethodName = "bootstrap"
val bootStrapMethodType = "(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/invoke/MethodType;)Ljava/lang/invoke/CallSite;"
val targetMethodName = "target"
val targetMethodType = "()Ljava/lang/String;"
val cw = new ClassWriter(0)
cw.visit(V1_7, ACC_PUBLIC + ACC_SUPER, invokerClassName, null, "java/lang/Object", null)
val constructor = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null)
constructor.visitCode()
constructor.visitVarInsn(ALOAD, 0)
constructor.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "<init>", "()V", false)
constructor.visitInsn(RETURN)
constructor.visitMaxs(1, 1)
constructor.visitEnd()
val target = cw.visitMethod(ACC_PUBLIC + ACC_STATIC, targetMethodName, targetMethodType, null, null)
target.visitCode()
target.visitLdcInsn("hello")
target.visitInsn(ARETURN)
target.visitMaxs(1, 1)
target.visitEnd()
val bootstrap = cw.visitMethod(ACC_PUBLIC + ACC_STATIC, bootstrapMethodName, bootStrapMethodType, null, null)
bootstrap.visitCode()
// val lookup = MethodHandles.lookup();
bootstrap.visitMethodInsn(INVOKESTATIC, "java/lang/invoke/MethodHandles", "lookup", "()Ljava/lang/invoke/MethodHandles$Lookup;", false)
bootstrap.visitVarInsn(ASTORE, 3) // lookup
// val clazz = lookup.lookupClass();
bootstrap.visitVarInsn(ALOAD, 3) // lookup
bootstrap.visitMethodInsn(INVOKEVIRTUAL, "java/lang/invoke/MethodHandles$Lookup", "lookupClass", "()Ljava/lang/Class;", false)
bootstrap.visitVarInsn(ASTORE, 4) // clazz
// val methodType = MethodType.fromMethodDescriptorString("()Ljava/lang/String, clazz.getClassLoader()")
bootstrap.visitLdcInsn("()Ljava/lang/String;")
bootstrap.visitVarInsn(ALOAD, 4) // CLAZZ
bootstrap.visitMethodInsn(INVOKEVIRTUAL, "java/lang/Class", "getClassLoader", "()Ljava/lang/ClassLoader;", false)
bootstrap.visitMethodInsn(INVOKESTATIC, "java/lang/invoke/MethodType", "fromMethodDescriptorString", "(Ljava/lang/String;Ljava/lang/ClassLoader;)Ljava/lang/invoke/MethodType;", false)
bootstrap.visitVarInsn(ASTORE, 5) // methodType
// val methodHandle = lookup.findStatic(thisClass, "target", methodType)
bootstrap.visitVarInsn(ALOAD, 3) // lookup
bootstrap.visitVarInsn(ALOAD, 4) // clazz
bootstrap.visitLdcInsn("target")
bootstrap.visitVarInsn(ALOAD, 5) // methodType
bootstrap.visitMethodInsn(INVOKEVIRTUAL, "java/lang/invoke/MethodHandles$Lookup", "findStatic", "(Ljava/lang/Class;Ljava/lang/String;Ljava/lang/invoke/MethodType;)Ljava/lang/invoke/MethodHandle;", false)
bootstrap.visitVarInsn(ASTORE, 6) // methodHandle
// new ConstantCallSite(methodHandle)
bootstrap.visitTypeInsn(NEW, "java/lang/invoke/ConstantCallSite")
bootstrap.visitInsn(DUP)
bootstrap.visitVarInsn(ALOAD, 6) // methodHandle
bootstrap.visitMethodInsn(INVOKESPECIAL, "java/lang/invoke/ConstantCallSite", "<init>", "(Ljava/lang/invoke/MethodHandle;)V", false)
bootstrap.visitInsn(ARETURN)
bootstrap.visitMaxs(4,7)
bootstrap.visitEnd()
val test = cw.visitMethod(ACC_PUBLIC + ACC_FINAL, "test", s"()Ljava/lang/String;", null, null)
test.visitCode()
val bootstrapHandle = new Handle(H_INVOKESTATIC, invokerClassName, bootstrapMethodName, bootStrapMethodType, /* itf = */ false)
test.visitInvokeDynamicInsn("invoke", targetMethodType, bootstrapHandle)
test.visitInsn(ARETURN)
test.visitMaxs(1, 1)
test.visitEnd()
cw.visitEnd()
val bytes = cw.toByteArray()
val fos = new FileOutputStream(new File(s"${testOutput.path}/$invokerClassName.class"))
try
fos write bytes
finally
fos.close()
}
def code =
"""
object Driver {
val invoker = new DynamicInvoker()
println(invoker.test())
}
"""
override def show(): Unit = StreamCapture.redirErr {
generateClass()
compile()
}
}
|
lrytz/scala
|
test/files/run/classfile-format-51.scala
|
Scala
|
apache-2.0
| 4,983
|
object Test1 {
trait T[A]
def foo[S[_], A](using ev: T[A] ?=> T[S[A]]): Unit = ()
implicit def bar[A](using ev: T[A]): T[List[A]] = ???
foo[List, Int]
}
object Test2 {
trait T
trait S
def foo(using ev: T ?=> S): Unit = ()
implicit def bar(using ev: T): S = ???
foo
}
|
som-snytt/dotty
|
tests/pos/i4725.scala
|
Scala
|
apache-2.0
| 290
|
package com.yetu.play.authenticator.utils
import java.util.UUID
import com.google.inject.{AbstractModule, Guice}
import com.google.inject.util.Modules
import com.mohiva.play.silhouette.api.{LoginInfo, Environment}
import com.mohiva.play.silhouette.impl.authenticators.SessionAuthenticator
import com.mohiva.play.silhouette.impl.providers.OAuth2Info
import com.mohiva.play.silhouette.test.FakeEnvironment
import com.yetu.play.authenticator.AuthenticatorGlobal
import com.yetu.play.authenticator.controllers.{ApplicationController, SocialAuthController}
import com.yetu.play.authenticator.models.User
import com.yetu.play.authenticator.models.daos.{UserDAOImpl, OAuth2InfoDAO}
import net.codingwell.scalaguice.ScalaModule
import com.yetu.play.authenticator.utils.di.SilhouetteModule
import FakeGlobal._
import play.api.Application
import play.api.mvc.RequestHeader
import play.api.mvc.Handler
/**
* Provides a fake global to override the Guice injector.
*/
class FakeGlobal extends AuthenticatorGlobal {
/**
* Overrides the Guice injector.
*/
override val injector = Guice.createInjector(Modules.`override`(new SilhouetteModule).`with`(new FakeModule))
val oauth2InfoDao = injector.getInstance[OAuth2InfoDAO](classOf[OAuth2InfoDAO])
val userDao = injector.getInstance[UserDAOImpl](classOf[UserDAOImpl])
oauth2InfoDao.save(FakeGlobal.identity.loginInfo, FakeGlobal.oauth2Info)
userDao.save(identity)
/**
* A fake Guice module.
*/
class FakeModule extends AbstractModule with ScalaModule {
def configure() = {
bind[Environment[User, SessionAuthenticator]].toInstance(env)
}
}
override def onStart(app: Application): Unit = {}
val socialAuthController = injector.getInstance[SocialAuthController](classOf[SocialAuthController])
val applicationController = injector.getInstance[ApplicationController](classOf[ApplicationController])
override def onRouteRequest(req: RequestHeader): Option[Handler] = {
(req.method, req.path) match {
case ("GET", FakeGlobal.routeHelloTest) => Some(applicationController.hello)
case ("GET", FakeGlobal.routeSignOut) => Some(applicationController.signOut)
case ("GET", FakeGlobal.routeAuthenticate) => Some(socialAuthController.authenticate("yetu"))
case ("POST", FakeGlobal.routeApiLogout) => Some(applicationController.apiLogout)
case _ => None
}
}
}
object FakeGlobal {
/**
* An identity.
*/
val identity = User(
userUUID = UUID.randomUUID(),
loginInfo = LoginInfo("provider", "user@user.com"),
firstName = None,
lastName = None,
fullName = None,
email = None,
avatarURL = None
)
val oauth2Info = OAuth2Info("random_access_token")
val routeHelloTest = "/helloTestRoute"
val routeSignOut = "/signOut"
val routeAuthenticate = "/authenticate/yetu"
val routeApiLogout = "/api/logout"
/**
* A Silhouette fake environment.
*/
implicit val env = FakeEnvironment[User, SessionAuthenticator](Seq(identity.loginInfo -> identity))
}
|
yetu/yetu-play-authenticator
|
test/com/yetu/play/authenticator/utils/FakeGlobal.scala
|
Scala
|
apache-2.0
| 3,039
|
/**
* Ephedra Food Alerts
* Copyright (C) 2013-2014 Philippe Sam-Long aka pulsation
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package eu.pulsation.ephedra
import android.content.{BroadcastReceiver, Intent, Context}
class EphedraBootReceiver extends BroadcastReceiver {
override def onReceive(context: Context, intent: Intent) {
val ephedraAlarmHelper = new AlarmHelper(context)
ephedraAlarmHelper.startAlarm()
}
}
|
pulsation/ephedra-android
|
src/eu/pulsation/ephedra/AlertNotifications/EphedraBootReceiver.scala
|
Scala
|
gpl-3.0
| 1,054
|
package chandu0101.scalajs.react.components.demo.components.materialui.svgicons
import chandu0101.scalajs.react.components.materialui.MuiSvgIcon
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.all.svg._
import japgolly.scalajs.react.vdom.prefix_<^._
object ActionHome {
val component = ReactComponentB[Unit]("ActionHome")
.render(P => {
MuiSvgIcon()(
path(^.key := "acg", d := "M10 20v-6h4v6h5v-8h3L12 3 2 12h3v8z")
)
}).buildU
def apply() = component()
}
|
coreyauger/scalajs-react-components
|
demo/src/main/scala/chandu0101/scalajs/react/components/demo/components/materialui/svgicons/ActionHome.scala
|
Scala
|
apache-2.0
| 512
|
package models
import play.api.libs.json._
case class Person(name: String, surname: String, datatype: String = "person")
object Persons {
implicit val fmt = Json.format[Person]
}
|
ReactiveCouchbase/ReactiveCouchbase-play
|
samples/scala/n1ql/app/models/Person.scala
|
Scala
|
apache-2.0
| 184
|
/*
* ____ ____ _____ ____ ___ ____
* | _ \\ | _ \\ | ____| / ___| / _/ / ___| Precog (R)
* | |_) | | |_) | | _| | | | | /| | | _ Advanced Analytics Engine for NoSQL Data
* | __/ | _ < | |___ | |___ |/ _| | | |_| | Copyright (C) 2010 - 2013 SlamData, Inc.
* |_| |_| \\_\\ |_____| \\____| /__/ \\____| All Rights Reserved.
*
* This program is free software: you can redistribute it and/or modify it under the terms of the
* GNU Affero General Public License as published by the Free Software Foundation, either version
* 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License along with this
* program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.precog
package accounts
import com.precog.common.Path
import com.precog.common.accounts._
import com.precog.common.security._
import com.precog.util.PrecogUnit
import blueeyes.json._
import org.joda.time.DateTime
import scalaz._
import scalaz.syntax.monad._
import scalaz.syntax.traverse._
import scalaz.std.stream._
import scalaz.syntax.std.option._
trait AccountManager[M[+_]] extends AccountFinder[M] {
import Account._
implicit def M: Monad[M]
def findAccountById(accountId: AccountId): M[Option[Account]]
def findAccountDetailsById(accountId: AccountId): M[Option[AccountDetails]] =
findAccountById(accountId).map(_.map(AccountDetails.from(_)))
def updateAccount(account: Account): M[Boolean]
def updateAccountPassword(account: Account, newPassword: String): M[Boolean] = {
val salt = randomSalt()
updateAccount(account.copy(passwordHash = saltAndHashSHA256(newPassword, salt), passwordSalt = salt, lastPasswordChangeTime = Some(new DateTime)))
}
def resetAccountPassword(accountId: AccountId, tokenId: ResetTokenId, newPassword: String): M[String \\/ Boolean] = {
findAccountByResetToken(accountId, tokenId).flatMap {
case errD @ -\\/(error) => M.point(errD)
case \\/-(account) =>
for {
updated <- updateAccountPassword(account, newPassword)
_ <- markResetTokenUsed(tokenId)
} yield \\/-(updated)
}
}
def generateResetToken(accountId: Account): M[ResetTokenId]
def markResetTokenUsed(tokenId: ResetTokenId): M[PrecogUnit]
def findResetToken(accountId: AccountId, tokenId: ResetTokenId): M[Option[ResetToken]]
// The accountId is used here as a sanity/security check only, not for lookup
def findAccountByResetToken(accountId: AccountId, tokenId: ResetTokenId): M[String \\/ Account] = {
logger.debug("Locating account for token id %s, account id %s".format(tokenId, accountId))
findResetToken(accountId, tokenId).flatMap {
case Some(token) =>
if (token.expiresAt.isBefore(new DateTime)) {
logger.warn("Located expired reset token: " + token)
M.point(-\\/("Reset token %s has expired".format(tokenId)))
} else if (token.usedAt.nonEmpty) {
logger.warn("Reset attempted with previously used reset token: " + token)
M.point(-\\/("Reset token %s has already been used".format(tokenId)))
} else if (token.accountId != accountId) {
logger.debug("Located reset token, but with the wrong account (expected %s): %s".format(accountId, token))
M.point(-\\/("Reset token %s does not match provided account %s".format(tokenId, accountId)))
} else {
logger.debug("Located reset token " + token)
findAccountById(token.accountId).map(_.\\/>("Could not find account by id " + token.accountId))
}
case None =>
logger.warn("Could not locate reset token for id " + tokenId)
M.point(-\\/("No reset token found for id " + tokenId))
}
}
def createAccount(email: String, password: String, creationDate: DateTime, plan: AccountPlan, parentId: Option[AccountId] = None, profile: Option[JValue] = None)(f: AccountId => M[APIKey]): M[Account]
def findAccountByEmail(email: String) : M[Option[Account]]
def hasAncestor(child: Account, ancestor: Account)(implicit M: Monad[M]): M[Boolean] = {
if (child == ancestor) {
true.point[M]
} else {
child.parentId map { id =>
findAccountById(id) flatMap {
case None => false.point[M]
case Some(`child`) => false.point[M] // avoid infinite loops
case Some(parent) => hasAncestor(parent, ancestor)
}
} getOrElse {
false.point[M]
}
}
}
def authAccount(email: String, password: String)(implicit M: Monad[M]): M[Validation[String, Account]] = {
findAccountByEmail(email) map {
case Some(account) if account.passwordHash == saltAndHashSHA1(password, account.passwordSalt) ||
account.passwordHash == saltAndHashSHA256(password, account.passwordSalt) ||
account.passwordHash == saltAndHashLegacy(password, account.passwordSalt) => Success(account)
case Some(account) => Failure("password mismatch")
case None => Failure("account not found")
}
}
def deleteAccount(accountId: AccountId): M[Option[Account]]
}
|
precog/platform
|
accounts/src/main/scala/com/precog/accounts/AccountManager.scala
|
Scala
|
agpl-3.0
| 5,436
|
/*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.norm
import com.netflix.atlas.core.util.Assert._
import org.scalatest.FunSuite
class NormalizeValueFunctionSuite extends FunSuite {
private def newFunction(step: Long, heartbeat: Long) = {
val listVF = new ListValueFunction
val normalizeVF = new NormalizeValueFunction(step, heartbeat, listVF)
listVF.f = normalizeVF
listVF
}
test("basic") {
val n = newFunction(10, 20)
assert(n.update(5, 1.0) === List(0 -> 0.5))
assert(n.update(15, 2.0) === List(10 -> 1.5))
assert(n.update(25, 2.0) === List(20 -> 2.0))
assert(n.update(35, 1.0) === List(30 -> 1.5))
assert(n.update(85, 1.0) === List(80 -> 0.5))
assert(n.update(95, 2.0) === List(90 -> 1.5))
assert(n.update(105, 2.0) === List(100 -> 2.0))
}
test("already normalized updates") {
val n = newFunction(10, 20)
assert(n.update(0, 1.0) === List(0 -> 1.0))
assert(n.update(10, 2.0) === List(10 -> 2.0))
assert(n.update(20, 3.0) === List(20 -> 3.0))
assert(n.update(30, 1.0) === List(30 -> 1.0))
}
test("already normalized updates, skip 1") {
val n = newFunction(10, 20)
assert(n.update(0, 1.0) === List(0 -> 1.0))
assert(n.update(10, 1.0) === List(10 -> 1.0))
assert(n.update(30, 1.0) === List(20 -> 1.0, 30 -> 1.0))
}
test("already normalized updates, miss heartbeat") {
val n = newFunction(10, 20)
assert(n.update(0, 1.0) === List(0 -> 1.0))
assert(n.update(10, 2.0) === List(10 -> 2.0))
assert(n.update(30, 1.0) === List(20 -> 1.0, 30 -> 1.0))
assert(n.update(60, 4.0) === List(60 -> 4.0))
assert(n.update(70, 2.0) === List(70 -> 2.0))
}
test("random offset") {
def t(m: Int, s: Int) = (m * 60 + s) * 1000L
val n = newFunction(60000, 120000)
assert(n.update(t(1, 13), 1.0) === List(t(1, 0) -> 47.0 / 60.0))
assert(n.update(t(2, 13), 1.0) === List(t(2, 0) -> 1.0))
assert(n.update(t(3, 13), 1.0) === List(t(3, 0) -> 1.0))
}
test("random offset, skip 1") {
def t(m: Int, s: Int) = (m * 60 + s) * 1000L
val n = newFunction(60000, 120000)
assert(n.update(t(1, 13), 1.0) === List(t(1, 0) -> 47.0 / 60.0))
assert(n.update(t(2, 13), 1.0) === List(t(2, 0) -> 1.0))
assert(n.update(t(3, 13), 1.0) === List(t(3, 0) -> 1.0))
assert(n.update(t(5, 13), 1.0) === List(t(4, 0) -> 1.0, t(5, 0) -> 1.0))
}
test("random offset, skip 2") {
def t(m: Int, s: Int) = (m * 60 + s) * 1000L
val n = newFunction(60000, 120000)
assert(n.update(t(1, 13), 1.0) === List(t(1, 0) -> 47.0 / 60.0))
assert(n.update(t(2, 13), 1.0) === List(t(2, 0) -> 1.0))
assert(n.update(t(3, 13), 1.0) === List(t(3, 0) -> 1.0))
assert(n.update(t(6, 13), 1.0) === List(t(6, 0) -> 47.0 / 60.0))
}
test("random offset, skip almost 2") {
def t(m: Int, s: Int) = (m * 60 + s) * 1000L
val n = newFunction(60000, 120000)
assert(n.update(t(1, 13), 1.0) === List(t(1, 0) -> 47.0 / 60.0))
assert(n.update(t(2, 13), 1.0) === List(t(2, 0) -> 1.0))
assert(n.update(t(3, 13), 1.0) === List(t(3, 0) -> 1.0))
assert(n.update(t(6, 5), 1.0) === List(t(6, 0) -> 55.0 / 60.0))
}
test("random offset, out of order") {
def t(m: Int, s: Int) = (m * 60 + s) * 1000L
val n = newFunction(60000, 120000)
assert(n.update(t(1, 13), 1.0) === List(t(1, 0) -> 47.0 / 60.0))
assert(n.update(t(1, 12), 1.0) === Nil)
assert(n.update(t(2, 13), 1.0) === List(t(2, 0) -> 1.0))
assert(n.update(t(2, 10), 1.0) === Nil)
assert(n.update(t(3, 13), 1.0) === List(t(3, 0) -> 1.0))
assert(n.update(t(3, 11), 1.0) === Nil)
}
test("random offset, dual reporting") {
def t(m: Int, s: Int) = (m * 60 + s) * 1000L
val n = newFunction(60000, 120000)
assert(n.update(t(1, 13), 1.0) === List(t(1, 0) -> 47.0 / 60.0))
assert(n.update(t(1, 13), 1.0) === Nil)
assert(n.update(t(2, 13), 1.0) === List(t(2, 0) -> 1.0))
assert(n.update(t(2, 13), 1.0) === Nil)
assert(n.update(t(3, 13), 1.0) === List(t(3, 0) -> 1.0))
assert(n.update(t(3, 13), 1.0) === Nil)
}
test("init, 17") {
def t(m: Int, s: Int) = (m * 60 + s) * 1000L
val n = newFunction(60000, 120000)
val v = 1.0 / 60.0
val wv1 = v * (43.0 / 60.0)
val wv2 = v * (17.0 / 60.0)
assert(n.update(t(8, 17), 1.0 / 60.0) === List(t(8, 0) -> wv1))
assert(n.update(t(9, 17), 0.0) === List(t(9, 0) -> wv2))
assert(n.update(t(10, 17), 0.0) === List(t(10, 0) -> 0.0))
}
test("frequent updates") {
val n = newFunction(10, 50)
assert(n.update(0, 1.0) === List(0 -> 1.0))
assert(n.update(2, 2.0) === Nil)
assert(n.update(4, 4.0) === Nil)
assert(n.update(8, 8.0) === Nil)
var res = n.update(12, 2.0).head
assert(res._1 === 10)
assertEquals(res._2, 4.8, 1e-6)
val vs = n.update(40, 3.0)
res = vs.head
assert(res._1 === 20)
assertEquals(res._2, 2.8, 1e-6)
assert(vs.tail === List(30 -> 3.0, 40 -> 3.0))
}
}
|
rspieldenner/atlas
|
atlas-core/src/test/scala/com/netflix/atlas/core/norm/NormalizeValueFunctionSuite.scala
|
Scala
|
apache-2.0
| 5,535
|
//: ----------------------------------------------------------------------------
//: Copyright (C) 2016 Verizon. All Rights Reserved.
//:
//: Licensed under the Apache License, Version 2.0 (the "License");
//: you may not use this file except in compliance with the License.
//: You may obtain a copy of the License at
//:
//: http://www.apache.org/licenses/LICENSE-2.0
//:
//: Unless required by applicable law or agreed to in writing, software
//: distributed under the License is distributed on an "AS IS" BASIS,
//: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//: See the License for the specific language governing permissions and
//: limitations under the License.
//:
//: ----------------------------------------------------------------------------
package ark
import java.util
import org.apache.mesos.Protos
import scala.collection.JavaConverters._
// State for reconciliation algorithm (see http://mesos.apache.org/documentation/latest/reconciliation/)
// tasks pending for reconciliation, no offers will be accepted until list is empty
case class ReconcileState(reconciledAt: Long, reconcilingTasks: Set[ReconcileTaskStatus],
minTaskReconciliationWait: Long = 5000, maxTaskReconciliationWait: Long = 30000) {
val size = reconcilingTasks.size
def minTimeElapsed: Boolean = System.currentTimeMillis() - reconciledAt > minTaskReconciliationWait
def maxTimeElapsed: Boolean = System.currentTimeMillis() - reconciledAt > maxTaskReconciliationWait
def reconciling: Boolean = reconcilingTasks.nonEmpty || !minTimeElapsed
def expired: Boolean = reconcilingTasks.nonEmpty && maxTimeElapsed
def getJavaCollection: util.Collection[Protos.TaskStatus] = reconcilingTasks.map(_.toTaskStatus).asJavaCollection
}
object ReconcileState {
val empty = ReconcileState(0L, Set.empty)
def apply(state: SchedulerState[_]): ReconcileState = ReconcileState(System.currentTimeMillis, state.reconcileTasks)
}
// Min info required to create TaskStatus for reconciliation
case class ReconcileTaskStatus(taskId: String, slaveId: String) {
def toTaskStatus: Protos.TaskStatus = Protos.TaskStatus.newBuilder()
.setState(Protos.TaskState.TASK_RUNNING)
.setTaskId(Protos.TaskID.newBuilder.setValue(taskId).build())
.setSlaveId(Protos.SlaveID.newBuilder.setValue(slaveId).build())
.build()
}
|
oncue/mesos-scheduler
|
core/src/main/scala/ReconcileState.scala
|
Scala
|
apache-2.0
| 2,361
|
package zzb.domain.plane
import zzb.rest._
import akka.actor.Props
import zzb.rest.util.StatableActor
import spray.http.StatusCodes._
/**
* Created by Simon on 2014/6/13
*/
class PlaneSvcActor extends RestServiceActor {
override def receive: Receive = runRoute(route)
implicit def childByName(name: String) = {
name match {
case "planes" =>
Right(context.actorOf(Props(new PlaneSetActor with StatableActor), "planes"))
case _ => Left((NotFound, "error"))
}
}
def route: Route =
pathEndOrSingleSlash {
post {
complete("ok")
}
} ~
forwardChild
}
|
stepover/zzb
|
zzb-domain/src/test/scala/zzb/domain/plane/PlaneSvcActor.scala
|
Scala
|
mit
| 622
|
import scala.scalajs.js
import scala.scalajs.js.annotation.*
@js.native
trait NativeJSTrait extends js.Any
trait NonNativeJSTrait extends js.Any
object Test {
def test(x: Any): Unit = {
x.isInstanceOf[NativeJSTrait] // error
x.isInstanceOf[NonNativeJSTrait] // error
}
}
|
dotty-staging/dotty
|
tests/neg-scalajs/isinstanceof-js-type.scala
|
Scala
|
apache-2.0
| 286
|
package org.openmole.tool.cache
import collection.JavaConverters._
object WithInstance {
/**
* Get an instance of an object given a constructor, either pooled or new.
* @param f
* @param pooled
* @param close
* @tparam T
* @return
*/
def apply[T](f: () ⇒ T)(pooled: Boolean, close: T ⇒ Unit = (_: T) ⇒ {}): WithInstance[T] =
if (pooled) Pool(f, close) else WithNewInstance(f, close)
}
trait WithInstance[T] {
def apply[A](f: T ⇒ A): A
}
object Pool {
def apply[T](f: () ⇒ T, close: T ⇒ Unit = (_: T) ⇒ {}): Pool[T] = new Pool(f, close)
}
/**
* A Pool of objects, given a constructor and a closing operator. A [[java.util.Stack]] of instances is maintained,
* on which operations are done concurrently
*
* @param f
* @param closeOp
*/
class Pool[T](f: () ⇒ T, closeOp: T ⇒ Unit) extends WithInstance[T] {
val instances: java.util.Stack[T] = new java.util.Stack()
def borrow: T = synchronized {
instances.isEmpty match {
case false ⇒ instances.pop()
case true ⇒ f()
}
}
def release(t: T) = synchronized { instances.push(t) }
def apply[A](f: T ⇒ A): A = {
val o = borrow
try f(o)
finally release(o)
}
def close() = synchronized { instances.asScala.foreach(closeOp) }
}
case class WithNewInstance[T](o: () ⇒ T, close: T ⇒ Unit = (_: T) ⇒ {}) extends WithInstance[T] {
def apply[A](f: T ⇒ A): A = {
val instance = o()
try f(instance)
finally close(instance)
}
}
|
openmole/openmole
|
openmole/third-parties/org.openmole.tool.cache/src/main/scala/org/openmole/tool/cache/Pool.scala
|
Scala
|
agpl-3.0
| 1,505
|
package mesosphere.util
import akka.actor.{ Props, Status }
import akka.testkit.{ TestActorRef, TestProbe }
import mesosphere.marathon.MarathonSpec
import mesosphere.marathon.test.MarathonActorSupport
import org.scalatest.{ BeforeAndAfterAll, Matchers }
import scala.concurrent.duration._
import scala.concurrent.{ Await, Future, Promise }
class PromiseActorTest
extends MarathonActorSupport
with MarathonSpec
with BeforeAndAfterAll
with Matchers {
test("Success") {
val promise = Promise[Any]()
val ref = TestActorRef(Props(classOf[PromiseActor], promise))
ref ! 'Test
Await.result(promise.future, 2.seconds) should equal('Test)
}
test("Success with askWithoutTimeout") {
val probe = TestProbe()
val future: Future[Symbol] = PromiseActor.askWithoutTimeout(system, probe.ref, 'Question)
probe.expectMsg('Question)
probe.reply('Answer)
Await.result(future, 2.seconds) should equal('Answer)
}
test("Status.Success") {
val promise = Promise[Any]()
val ref = TestActorRef(Props(classOf[PromiseActor], promise))
ref ! Status.Success('Test)
Await.result(promise.future, 2.seconds) should equal('Test)
}
test("State.Success with askWithoutTimeout") {
val probe = TestProbe()
val future: Future[Symbol] = PromiseActor.askWithoutTimeout(system, probe.ref, 'Question)
probe.expectMsg('Question)
probe.reply(Status.Success('Answer))
Await.result(future, 2.seconds) should equal('Answer)
}
test("Status.Failure") {
val promise = Promise[Any]()
val ref = TestActorRef(Props(classOf[PromiseActor], promise))
val ex = new Exception("test")
ref ! Status.Failure(ex)
intercept[Exception] {
Await.result(promise.future, 2.seconds)
}.getMessage should be("test")
}
test("State.Failure with askWithoutTimeout") {
val probe = TestProbe()
val future: Future[Symbol] = PromiseActor.askWithoutTimeout(system, probe.ref, 'Question)
probe.expectMsg('Question)
probe.reply(Status.Failure(new IllegalStateException("error")))
intercept[IllegalStateException] {
Await.result(future, 2.seconds)
}.getMessage should be("error")
}
}
|
yp-engineering/marathon
|
src/test/scala/mesosphere/util/PromiseActorTest.scala
|
Scala
|
apache-2.0
| 2,192
|
import org.scalatest._
import hyperion._
import akka.actor._
import akka.testkit._
import akka.pattern.{ask, pipe}
import akka.util.Timeout
import java.util.concurrent.TimeUnit
import scala.concurrent.duration._
import scala.util._
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
class TestPipeCreatorCase extends TestKit(ActorSystem("HyperionTest1", createTestconfig())) with ImplicitSender
with WordSpecLike with MustMatchers with BeforeAndAfterAll {
override def afterAll {
Registry.reset
shutdown()
}
"PipeCreator" must {
implicit val timeout = Timeout(10000 millis)
"be able to create and query Tail objects" in {
val pipeManager = system.actorOf(Props(new PipeCreator(system, new PipeFactory("HyperionTest1"))), "creator")
val options = NodeProperty("almafa", 10, 10, PipeOptions("tail", "tail", Map("backlog" -> "1")))
pipeManager ! Create(options)
Thread.sleep(100)
system.actorSelection("akka://HyperionTest1/user/pipe_almafa") ! Message.empty
Thread.sleep(100)
val result = Await.result(pipeManager ? TailQuery("almafa"), timeout.duration).asInstanceOf[List[Message]]
assert(result == List[Message](Message.empty))
}
"be able to accept UploadConfig messages" in {
val pipeManager = system.actorOf(Props(new PipeCreator(system, new PipeFactory("HyperionTest1"))), "creator2")
val config = UploadConfig(
Config(
List[NodeProperty](
NodeProperty("almaid", 0, 0,
PipeOptions("alma", "source",
Map[String, String](("port", "10000"))
)
),
NodeProperty("korteid", 0, 0,
PipeOptions("korte", "tail",
Map[String, String](("backlog", "10"))
)
)
),
List[Connection](Connection("almaid", "korteid"))
)
)
val res = pipeManager ? config
val result = Await.result(res, timeout.duration)
result match {
case Success(_) => ;
case Failure(e) => fail("UploadConfig did not succeeded! error:" + e.getMessage())
}
}
"be able to accept UploadConfig message and fail on lingering connection" in {
val pipeManager = system.actorOf(Props(new PipeCreator(system, new PipeFactory("HyperionTest1"))), "creator3")
val config = UploadConfig(
Config(
List[NodeProperty](
NodeProperty("almaid2", 0, 0,
PipeOptions("alma", "source",
Map[String, String](("port", "10000"))
)
),
NodeProperty("korteid2", 0, 0,
PipeOptions("korte", "tail",
Map[String, String](("backlog", "10"))
)
)
),
List[Connection](Connection("almaid2", "korteid3"))
)
)
val res = pipeManager ? config
val result = Await.result(res, timeout.duration)
result match {
case Success(_) => fail("UploadConfig did not fail on lingering connection!");
case Failure(e) => ;
}
}
"be able to set up TcpSource and TcpDestination properly" in {
val pipeManager = system.actorOf(Props(new PipeCreator(system, new PipeFactory("HyperionTest1"))), "creator5")
val probe1 = TestProbe()
val config = UploadConfig(
Config(
List[NodeProperty](
NodeProperty("almaid4", 0, 0,
PipeOptions("alma", "source",
Map[String, String](("port", "11115"),("parser","raw"))
)
),
NodeProperty("korteid4", 0, 0,
PipeOptions("korte", "destination",
Map[String, String](("host","localhost"),("port", "11115"),("template","$MESSAGE\\n"))
)
)
),
List[Connection]()
)
)
pipeManager ? config
Thread.sleep(100)
system.actorSelection("akka://HyperionTest1/user/pipe_almaid4") ! PipeConnectionUpdate(Map(("id", system.actorSelection(probe1.ref.path.toString))),List())
Thread.sleep(500)
val expected = Message.withMessage("testMessage")
system.actorSelection("akka://HyperionTest1/user/pipe_korteid4") ! expected
probe1.expectMsg(1000 millis, expected)
}
}
implicit val timeout = Timeout(10000 millis)
"be able to accept UploadConfig message and set up pipe system" in {
Registry.reset
val pipeManager = system.actorOf(Props(new PipeCreator(system, new PipeFactory("HyperionTest1"))), "creator4")
val config = UploadConfig(
Config(
List[NodeProperty](
NodeProperty("almaid3", 0, 0,
PipeOptions("alma", "filter",
Map[String, String](("fieldname", "MESSAGE"), ("matchexpr","alma"))
)
),
NodeProperty("korteid3", 0, 0,
PipeOptions("korte", "tail",
Map[String, String](("backlog", "10"))
)
)
),
List[Connection](Connection("almaid3", "korteid3"))
)
)
val res = pipeManager ? config
Thread.sleep(100)
system.actorSelection("akka://HyperionTest1/user/pipe_almaid3") ! Message.withMessage("alma")
Thread.sleep(100)
val result = Await.result(pipeManager ? TailQuery("korteid3"), timeout.duration).asInstanceOf[List[Message]]
assert(result == List[Message](Message.withMessage("alma")))
}
"be able to query all stats in pipe system" in {
Registry.reset
val pipeManager = system.actorOf(Props(new PipeCreator(system, new PipeFactory("HyperionTest1"))), "creator6")
val config = UploadConfig(
Config(
List[NodeProperty](
NodeProperty("almaid5", 0, 0,
PipeOptions("alma", "filter",
Map[String, String](("fieldname", "MESSAGE"), ("matchexpr","alma"))
)
),
NodeProperty("korteid5", 0, 0,
PipeOptions("korte", "counter",
Map[String, String]()
)
)
),
List[Connection](Connection("almaid5", "korteid5"))
)
)
val res = pipeManager ? config
Thread.sleep(100)
system.actorSelection("akka://HyperionTest1/user/pipe_almaid5") ! Message.withMessage("alma")
Thread.sleep(100)
val result = Await.result(pipeManager ? AllStatsQuery, timeout.duration).asInstanceOf[Map[String, Map[String, Int]]]
val expectedResult = Map[String, Map[String, Int]](
"almaid5" -> Map[String, Int]( "processed" -> 1, "matched" -> 1),
"korteid5" -> Map[String, Int] ("counter" -> 1))
assert(result == expectedResult)
}
}
class TestConfigUploadAndDownloadCase extends TestKit(ActorSystem("HyperionTest2", createTestconfig())) with ImplicitSender
with WordSpecLike with MustMatchers with BeforeAndAfterAll {
override def afterAll {
Registry.reset
shutdown()
}
"ConfigUploadDownload" must {
implicit val timeout = Timeout(1000 millis)
"be able to accept UploadConfig messages and then download config" in {
val pipeManager = system.actorOf(Props(new PipeCreator(system, new PipeFactory("HyperionTest2"))), "creator4")
val config = Config(
List[NodeProperty](
NodeProperty("almaid", 0, 0,
PipeOptions("alma", "source",
Map[String, String](("port", "10000"))
)
),
NodeProperty("korteid", 0, 0,
PipeOptions("korte", "tail",
Map[String, String](("backlog", "10"))
)
)
),
List[Connection](Connection("almaid", "korteid"))
)
val uploadConfig = UploadConfig(config)
val res = pipeManager ? uploadConfig
val downloadedConfig = Await.result(pipeManager ? QueryConfig, timeout.duration).asInstanceOf[Config]
assert(config == downloadedConfig)
}
}
}
class TestRemoveConnection extends TestKit(ActorSystem("HyperionTest3", createTestconfig())) with ImplicitSender
with WordSpecLike with MustMatchers with BeforeAndAfterAll {
override def afterAll {
Registry.reset
shutdown()
}
"RemoveConnection" must {
implicit val timeout = Timeout(1000 millis)
"be able to remove connection if it does not present in second config" in {
val pipeManager = system.actorOf(Props(new PipeCreator(system, new PipeFactory("HyperionTest3"))), "creator1")
val config = Config(
List[NodeProperty](
NodeProperty("almaid", 0, 0,
PipeOptions("alma", "filter",
Map[String, String](("fieldname", "MESSAGE"), ("matchexpr","alma"))
)
),
NodeProperty("korteid", 0, 0,
PipeOptions("korte", "tail",
Map[String, String](("backlog", "10"))
)
)
),
List[Connection](Connection("almaid", "korteid"))
)
val uploadConfig = UploadConfig(config)
val res = pipeManager ? uploadConfig
Thread.sleep(100)
system.actorSelection("akka://HyperionTest3/user/pipe_almaid") ! Message.withMessage("alma")
Thread.sleep(100)
val result = Await.result(pipeManager ? TailQuery("korteid"), timeout.duration).asInstanceOf[List[Message]]
assert(result == List[Message](Message.withMessage("alma")))
val configWithoutConnection = Config(
List[NodeProperty](
NodeProperty("almaid", 0, 0,
PipeOptions("alma", "filter",
Map[String, String](("fieldname", "MESSAGE"), ("matchexpr","alma"))
)
),
NodeProperty("korteid", 0, 0,
PipeOptions("korte", "tail",
Map[String, String](("backlog", "10"))
)
)
),
List[Connection]()
)
val uploadConfig2 = UploadConfig(configWithoutConnection)
pipeManager ? uploadConfig2
Thread.sleep(100)
system.actorSelection("akka://HyperionTest3/user/pipe_almaid") ! Message.withMessage("alma")
Thread.sleep(100)
val result2 = Await.result(pipeManager ? TailQuery("korteid"), timeout.duration).asInstanceOf[List[Message]]
assert(result2 == List[Message](Message.withMessage("alma")))
}
}
}
class TestRemovePipe extends TestKit(ActorSystem("HyperionTest4", createTestconfig())) with ImplicitSender
with WordSpecLike with MustMatchers with BeforeAndAfterAll {
override def afterAll {
Registry.reset
shutdown()
}
"RemovePipe" must {
implicit val timeout = Timeout(1000 millis)
"be able to remove pipe if it does not present in second config" in {
val pipeManager = system.actorOf(Props(new PipeCreator(system, new PipeFactory("HyperionTest4"))), "creator1")
val config = Config(
List[NodeProperty](
NodeProperty("almaid", 0, 0,
PipeOptions("alma", "filter",
Map[String, String](("fieldname", "MESSAGE"), ("matchexpr","alma"))
)
),
NodeProperty("korteid", 0, 0,
PipeOptions("korte", "tail",
Map[String, String](("backlog", "10"))
)
)
),
List[Connection](Connection("almaid", "korteid"))
)
val uploadConfig = UploadConfig(config)
val res = pipeManager ? uploadConfig
Thread.sleep(100)
system.actorSelection("akka://HyperionTest4/user/pipe_almaid") ! Message.withMessage("alma")
Thread.sleep(100)
val result = Await.result(pipeManager ? TailQuery("korteid"), timeout.duration).asInstanceOf[List[Message]]
assert(result == List[Message](Message.withMessage("alma")))
val configWithoutConnection = Config(
List[NodeProperty](
NodeProperty("almaid", 0, 0,
PipeOptions("alma", "filter",
Map[String, String](("fieldname", "MESSAGE"), ("matchexpr","alma"))
)
)
),
List[Connection]()
)
val uploadConfig2 = UploadConfig(configWithoutConnection)
pipeManager ? uploadConfig2
Thread.sleep(100)
system.actorSelection("akka://HyperionTest4/user/pipe_almaid") ! Message.withMessage("alma")
Thread.sleep(100)
try{
Await.result(pipeManager ? TailQuery("korteid"), timeout.duration).asInstanceOf[List[Message]]
fail()
}
catch {
case _ : akka.actor.ActorNotFound =>
}
val downloadedConfig = Await.result(pipeManager ? QueryConfig, timeout.duration).asInstanceOf[Config]
assert(configWithoutConnection == downloadedConfig)
}
}
}
|
talien/hyperion
|
test/testcreator.scala
|
Scala
|
lgpl-3.0
| 12,561
|
package org.jetbrains.plugins.scala
package codeInspection.collections
import org.jetbrains.plugins.scala.codeInspection.InspectionBundle
/**
* Nikolay.Tropin
* 2014-05-07
*/
class NotIsEmptyTest extends OperationsOnCollectionInspectionTest {
val hint = InspectionBundle.message("not.isEmpty.hint")
def test_1() {
val selected = s"!Array().${START}isEmpty$END"
check(selected)
val text = "!Array().isEmpty"
val result = "Array().nonEmpty"
testFix(text, result, hint)
}
def test_2() {
val selected = s"!Option(1).${START}isEmpty$END"
check(selected)
val text = "!Option(1).isEmpty"
val result = "Option(1).nonEmpty"
testFix(text, result, hint)
}
override val inspectionClass = classOf[NotIsEmptyInspection]
}
|
consulo/consulo-scala
|
test/org/jetbrains/plugins/scala/codeInspection/collections/NotIsEmptyTest.scala
|
Scala
|
apache-2.0
| 767
|
package rxgpio.examples
import rxgpio._
import rxgpio.pigpio.PigpioLibrary
import scala.concurrent.duration.DurationInt
import scala.io.StdIn
import scala.util.Success
/**
*
*/
object ButtonPushed extends App {
import rxgpio.Gpio.Implicits._
DefaultInitializer.gpioInitialise() match {
case Success(InitOK(ver)) =>
println(s"initialized pigpio:$ver")
case _ =>
println("failed")
System.exit(1)
}
implicit val pigpio = PigpioLibrary.Instance
DefaultDigitalIO.gpioSetMode(1, InputPin)
RxGpio(1).debounce(100 milliseconds).map(_.tick).subscribe(tick => println(s"alert @ tick($tick)"))
println("Press Enter to exit")
StdIn.readLine()
}
|
jw3/pigpio4s
|
examples/src/main/scala/rxgpio/examples/ButtonPushed.scala
|
Scala
|
apache-2.0
| 725
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rdd
import org.scalatest.FunSuite
import org.scalatest.BeforeAndAfter
import org.scalatest.matchers.ShouldMatchers
import org.apache.spark.{Logging, SharedSparkContext}
import org.apache.spark.SparkContext._
class SortingSuite extends FunSuite with SharedSparkContext with ShouldMatchers with Logging {
test("sortByKey") {
val pairs = sc.parallelize(Array((1, 0), (2, 0), (0, 0), (3, 0)), 2)
assert(pairs.sortByKey().collect() === Array((0,0), (1,0), (2,0), (3,0)))
}
test("large array") {
val rand = new scala.util.Random()
val pairArr = Array.fill(1000) { (rand.nextInt(), rand.nextInt()) }
val pairs = sc.parallelize(pairArr, 2)
val sorted = pairs.sortByKey()
assert(sorted.partitions.size === 2)
assert(sorted.collect() === pairArr.sortBy(_._1))
}
test("large array with one split") {
val rand = new scala.util.Random()
val pairArr = Array.fill(1000) { (rand.nextInt(), rand.nextInt()) }
val pairs = sc.parallelize(pairArr, 2)
val sorted = pairs.sortByKey(true, 1)
assert(sorted.partitions.size === 1)
assert(sorted.collect() === pairArr.sortBy(_._1))
}
test("large array with many partitions") {
val rand = new scala.util.Random()
val pairArr = Array.fill(1000) { (rand.nextInt(), rand.nextInt()) }
val pairs = sc.parallelize(pairArr, 2)
val sorted = pairs.sortByKey(true, 20)
assert(sorted.partitions.size === 20)
assert(sorted.collect() === pairArr.sortBy(_._1))
}
test("sort descending") {
val rand = new scala.util.Random()
val pairArr = Array.fill(1000) { (rand.nextInt(), rand.nextInt()) }
val pairs = sc.parallelize(pairArr, 2)
assert(pairs.sortByKey(false).collect() === pairArr.sortWith((x, y) => x._1 > y._1))
}
test("sort descending with one split") {
val rand = new scala.util.Random()
val pairArr = Array.fill(1000) { (rand.nextInt(), rand.nextInt()) }
val pairs = sc.parallelize(pairArr, 1)
assert(pairs.sortByKey(false, 1).collect() === pairArr.sortWith((x, y) => x._1 > y._1))
}
test("sort descending with many partitions") {
val rand = new scala.util.Random()
val pairArr = Array.fill(1000) { (rand.nextInt(), rand.nextInt()) }
val pairs = sc.parallelize(pairArr, 2)
assert(pairs.sortByKey(false, 20).collect() === pairArr.sortWith((x, y) => x._1 > y._1))
}
test("more partitions than elements") {
val rand = new scala.util.Random()
val pairArr = Array.fill(10) { (rand.nextInt(), rand.nextInt()) }
val pairs = sc.parallelize(pairArr, 30)
assert(pairs.sortByKey().collect() === pairArr.sortBy(_._1))
}
test("empty RDD") {
val pairArr = new Array[(Int, Int)](0)
val pairs = sc.parallelize(pairArr, 2)
assert(pairs.sortByKey().collect() === pairArr.sortBy(_._1))
}
test("partition balancing") {
val pairArr = (1 to 1000).map(x => (x, x)).toArray
val sorted = sc.parallelize(pairArr, 4).sortByKey()
assert(sorted.collect() === pairArr.sortBy(_._1))
val partitions = sorted.collectPartitions()
logInfo("Partition lengths: " + partitions.map(_.length).mkString(", "))
partitions(0).length should be > 180
partitions(1).length should be > 180
partitions(2).length should be > 180
partitions(3).length should be > 180
partitions(0).last should be < partitions(1).head
partitions(1).last should be < partitions(2).head
partitions(2).last should be < partitions(3).head
}
test("partition balancing for descending sort") {
val pairArr = (1 to 1000).map(x => (x, x)).toArray
val sorted = sc.parallelize(pairArr, 4).sortByKey(false)
assert(sorted.collect() === pairArr.sortBy(_._1).reverse)
val partitions = sorted.collectPartitions()
logInfo("partition lengths: " + partitions.map(_.length).mkString(", "))
partitions(0).length should be > 180
partitions(1).length should be > 180
partitions(2).length should be > 180
partitions(3).length should be > 180
partitions(0).last should be > partitions(1).head
partitions(1).last should be > partitions(2).head
partitions(2).last should be > partitions(3).head
}
}
|
mkolod/incubator-spark
|
core/src/test/scala/org/apache/spark/rdd/SortingSuite.scala
|
Scala
|
apache-2.0
| 4,958
|
package org.scalarules.engine
import org.scalarules.facts.SingularFact
import org.scalatest.{FlatSpec, Matchers}
class FactTest extends FlatSpec with Matchers {
val context: Context = Map(SingularFact("naamAanwezig") -> 42, SingularFact("onverwachtObject") -> "String")
it should "return a Some(Bedrag) when given a String that is present in the context" in {
val optie = SingularFact("naamAanwezig").toEval
optie(context) should be (Some(42))
}
it should "return a None when given a String that is not present in the context" in {
val optie = SingularFact("naamNietAanwezig").toEval
optie(context) should be (None)
}
it should "not throw an exception when given a context-String that results in a different type than expected" in {
val optie = SingularFact[Int]("onverwachtObject").toEval
optie(context) should be (Some("String"))
}
}
|
scala-rules/rule-engine
|
engine-core/src/test/scala/org/scalarules/engine/FactTest.scala
|
Scala
|
mit
| 879
|
package de.leanovate.swaggercheck.schema.gen
import de.leanovate.swaggercheck.schema.model.{IntegerDefinition, ArrayDefinition}
import org.scalacheck.Properties
object GeneratableArraySpecification extends Properties("GeneratableArray") with DefinitionChecks {
property("any generates are valid") = {
val definition = ArrayDefinition(None, None, None)
checkDefinition(definition)
}
property("generates with item definition are valid") = {
val definition = ArrayDefinition(None, None, Some(IntegerDefinition(None, None, None)))
checkDefinition(definition)
}
property("generate with minLength are valid") = {
val definition = ArrayDefinition(Some(10), None, None)
checkDefinition(definition)
}
property("generate with maxLength are valid") = {
val definition = ArrayDefinition(None, Some(20), None)
checkDefinition(definition)
}
}
|
leanovate/swagger-check
|
json-schema-gen/src/test/scala/de/leanovate/swaggercheck/schema/gen/GeneratableArraySpecification.scala
|
Scala
|
mit
| 886
|
/*
* Copyright 2012 Eike Kettner
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.eknet.publet.web
import javax.servlet.http.HttpServletRequest
import org.eknet.publet.vfs.Path
import util.{PubletWeb, Request, Key}
import grizzled.slf4j.Logging
import java.net.URI
/**
* @author Eike Kettner eike.kettner@gmail.com
* @since 09.05.12 21:03
*/
trait RequestUrl extends Logging {
this: RequestAttr =>
protected def req: HttpServletRequest
/** The complete request uri, from the hostname up to the query string. Decoded but untouched otherwise */
def requestUri = URI.create(req.getRequestURI).getPath
private val urlBaseKey = Key("urlBase", {
case Request => Config("publet.urlBase").getOrElse {
val uri = req.getScheme +"://"+ req.getServerName
val base = if (Set(80, 443) contains req.getServerPort)
uri
else
uri +":" +req.getServerPort
if (req.getServletContext.getContextPath.isEmpty) base
else base + req.getServletContext.getContextPath
}
})
/**Url prefix of this application. This is read from the config file or constructed
* using the information provided by the request.
*
* This base should be used when constructing urls. The path does not end
* with a `/` character
* @return
*/
def urlBase = attr(urlBaseKey).get
private val applicationUriKey: Key[String] = Key("applicationUri", {
case Request => {
val cp = Config("publet.contextPath").getOrElse(req.getContextPath)
val p = Path(requestUri.substring(cp.length))
if (p.directory) (p/"index.html").asString else p.asString
}
})
/** The part of the uri after the context path. If it is a directory,
* the standard `index.html` is appended. */
def applicationUri = attr(applicationUriKey).get
/** The part of the uri after the context path. */
def applicationPath = Path(applicationUri)
private val fullUrlKey = Key("fullUrl", {
case Request => {
val url = URI.create(req.getRequestURI).getPath //.substring((req.getContextPath+req.getServletPath).length)
val params = Option(req.getQueryString).map("?"+_).getOrElse("")
(if (url.startsWith("/")) url.substring(1) else url) + params
}
})
/** The full uri to this request. With parameters, without contextPath */
def fullUrl = attr(fullUrlKey).get
private val resourceUri = Key("applicationSourceUri", {
case Request => {
PubletWeb.publet.findSources(applicationPath).toList match {
case c::cs => Some(applicationPath.sibling(c.name.fullName))
case _ => None
}
}
})
/**
* Creates an absolute url by prefixing the host and
* context path to the given path
*
* @param path
* @return
*/
def urlOf(path: String): String = urlOf(Path(path))
/**
* Creates an absolute url by prefixing the host and
* context path to the given path
*
* @param path
* @return
*/
def urlOf(path: Path): String = urlBase + path.toAbsolute.asUrlString
/**
* Returns the path to the source file that this request
* is pointing to.
* @return
*/
def resourcePath = attr(resourceUri).get
/**
* Returns the path to the source file that this request
* is pointing to.
*
* @return
*/
def getResourceUri = resourcePath.map(_.asString).getOrElse("")
/**
* Returns the query string that is contained in the request URL after the path. This method
* returns [[scala.None]] if the URL does not have a query string. Same as the value of the CGI
* variable QUERY_STRING.
* @return
*/
def getQueryString = Option(req.getQueryString)
}
|
eikek/publet
|
web/src/main/scala/org/eknet/publet/web/RequestUrl.scala
|
Scala
|
apache-2.0
| 4,153
|
package Arithmetic.P36
import scala.language.implicitConversions
class P36(val num: Int) {
def primeFactorMultiplicity: List[(Int, Int)] = {
import Arithmetic.P35.P35._
import WorkingWithLists.P09.P09._
pack(num.primeFactors) map {l => (l.head, l.length)}
}
def primeFactorMultiplicity_Direct: List[(Int, Int)] = {
def helper(n: Int, f: Int, cnt: Int): (Int, Int) = {
require(f > 0 && n > 0)
if (n % f == 0) helper(n / f, f, cnt + 1)
else (n, cnt)
}
import Arithmetic.P31.P31._
def _primeFactorMultiplicity(n: Int, s: Stream[Int], prev: List[(Int, Int)]): List[(Int, Int)] = {
if (n < 2) Nil
else if (n.isPrime) ((n, 1) :: prev).reverse
else if (n % s.head == 0) {
val (rem, cnt) = helper(n, s.head, 0)
_primeFactorMultiplicity(rem, s.tail, (s.head, cnt) :: prev)
}
else _primeFactorMultiplicity(n, s.tail, prev)
}
_primeFactorMultiplicity(num, primeStream, Nil)
}
}
object P36 {
implicit def toP36Int(num: Int): P36 = new P36(num)
}
|
ihac/Ninety-Nine-Scala-Problems
|
src/main/scala/Arithmetic/P36/P36.scala
|
Scala
|
gpl-3.0
| 1,049
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.metadata
import org.apache.flink.table.plan.metadata.FlinkMetadata.ColumnOriginNullCount
import org.apache.flink.table.plan.schema.FlinkRelOptTable
import org.apache.flink.table.plan.util.JoinUtil
import org.apache.flink.table.{JArrayList, JBoolean, JDouble}
import org.apache.flink.util.Preconditions
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.core._
import org.apache.calcite.rel.metadata._
import org.apache.calcite.rex.{RexInputRef, RexLiteral, RexNode}
import java.util
import scala.collection.JavaConversions._
/**
* FlinkRelMdColumnOriginNullCount supplies a default implementation of
* [[FlinkRelMetadataQuery.getColumnOriginNullCount]] for the standard logical algebra.
* If there is null, then return the original stats. If there is no null, then return 0.
* If don't know, then return null.
*/
class FlinkRelMdColumnOriginNullCount private extends MetadataHandler[ColumnOriginNullCount] {
override def getDef: MetadataDef[ColumnOriginNullCount] = FlinkMetadata.ColumnOriginNullCount.DEF
def getColumnOriginNullCount(rel: TableScan, mq: RelMetadataQuery, index: Int): JDouble = {
Preconditions.checkArgument(mq.isInstanceOf[FlinkRelMetadataQuery])
val relOptTable = rel.getTable.asInstanceOf[FlinkRelOptTable]
val fieldNames = relOptTable.getRowType.getFieldNames
Preconditions.checkArgument(index >= 0 && index < fieldNames.size())
val fieldName = fieldNames.get(index)
val statistic = relOptTable.getFlinkStatistic
val colStats = statistic.getColumnStats(fieldName)
if (colStats != null && colStats.getNullCount != null) {
colStats.getNullCount.toDouble
} else {
null
}
}
def getColumnOriginNullCount(snapshot: Snapshot, mq: RelMetadataQuery, index: Int): JDouble = null
def getColumnOriginNullCount(rel: Project, mq: RelMetadataQuery, index: Int): JDouble = {
getColumnOriginNullOnProjects(rel.getInput, rel.getProjects, mq, index)
}
def getColumnOriginNullCount(rel: Calc, mq: RelMetadataQuery, index: Int): JDouble = {
val program = rel.getProgram
if (program.getCondition == null) {
val projects = program.getProjectList.map(program.expandLocalRef)
getColumnOriginNullOnProjects(rel.getInput, projects, mq, index)
} else {
null
}
}
private def getColumnOriginNullOnProjects(
input: RelNode,
projects: util.List[RexNode],
mq: RelMetadataQuery,
index: Int): JDouble = {
val fmq = FlinkRelMetadataQuery.reuseOrCreate(mq)
projects.get(index) match {
case inputRef: RexInputRef => fmq.getColumnNullCount(input, inputRef.getIndex)
case literal: RexLiteral => if (literal.isNull) 1D else 0D
case _ => null
}
}
def getColumnOriginNullCount(rel: Join, mq: RelMetadataQuery, index: Int): JDouble = {
val fmq = FlinkRelMetadataQuery.reuseOrCreate(mq)
if (rel.getJoinType == JoinRelType.INNER) {
val left = rel.getLeft
val right = rel.getRight
val leftFieldCnt = left.getRowType.getFieldCount
val filterNulls = new JArrayList[JBoolean]()
val joinInfo = JoinUtil.createJoinInfo(left, right, rel.getCondition, filterNulls)
val keys = joinInfo.leftKeys ++ joinInfo.rightKeys.map(_ + leftFieldCnt)
def filterNull: Boolean = {
var i = keys.indexOf(index)
if (i >= joinInfo.leftKeys.length) {
i = i - joinInfo.leftKeys.length
}
filterNulls(i)
}
if (keys.contains(index) && filterNull) {
0D
} else {
// As same with its children, there may be better ways to estimate it.
// With JoinNullFilterPushdownRule, we can generate more NotNullFilters.
if (index < leftFieldCnt) {
fmq.getColumnOriginNullCount(rel.getLeft, index)
} else {
fmq.getColumnOriginNullCount(rel.getRight, index - leftFieldCnt)
}
}
} else {
null
}
}
def getColumnOriginNullCount(rel: RelNode, mq: RelMetadataQuery, index: Int): JDouble = null
}
object FlinkRelMdColumnOriginNullCount {
private val INSTANCE = new FlinkRelMdColumnOriginNullCount
val SOURCE: RelMetadataProvider = ReflectiveRelMetadataProvider.reflectiveSource(
FlinkMetadata.ColumnOriginNullCount.METHOD, INSTANCE)
}
|
shaoxuan-wang/flink
|
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/metadata/FlinkRelMdColumnOriginNullCount.scala
|
Scala
|
apache-2.0
| 5,120
|
/*
* Copyright (c) 2015.
* All rights reserved by you.meng@chinacache
*
*/
package com.robin.grouping.computation
import java.io.PrintWriter
import com.robin.grouping.model.{Batch, BoltSlot, DispatchByW, TopicSlot}
import kafka.utils.Logging
import scala.collection.mutable.{ArrayBuffer, HashMap}
import scala.io.Source
/**
*
* c: Channel
* s/b: Slot/Blot
* num: boundary based batch
*
*
* c1 c2 c3 c4 c5 c6 c7
* | | | |
* +-----+ +--------+
* s1 s2
* | |
* +---------------+
* b1
*
* ================================
*
* s1 s2 s3 s4 s5 s6 s7 s8
* | | | |
* +--------+ +------------+
* v1 num1 v2 v1 num2 v2
*
* So:
* v1,v2,v3,v4: Boundaries
* Num1,Num2 : slotNums in Batch
* c1,c2,c3->s1 :Slot
* s1,s2->b1 :Bolt
* How to spread s1~s3 into Num1: SlotSpreadingAlgorithm
* How to spread s1~s8 into Num1,Num2,Num3 : AssignmentAlgorithm
*
* Created by robinmac on 15-7-30.
*/
class SizeComputation(newAddedHDFSSizeURL: String, topicNum: Int, boltNum: Int) extends Logging {
val readHDFSSize =
addAddtionalHDFSFile(_: HashMap[String, Long], newAddedHDFSSizeURL)
// batchNames: "id:size(G):"
val batchNames = Array("0:0:0", "1:200:1", "2:90:10", "3:18:360")
def startAssignment(infomap: HashMap[String, Long],
topicAction: (Iterable[(String, Long)]) => Unit,
boltAction: (Iterable[(String, Long)]) => Unit
): Unit = {
val topicSlotList: ArrayBuffer[TopicSlot] = startAssignTopicSlot(infomap)
val topicmappinglist = new ArrayBuffer[(String, Long)]
topicSlotList.foreach(slot => topicmappinglist.appendAll(slot.transformToMap()))
topicAction(topicmappinglist)
val boltSlotList: ArrayBuffer[BoltSlot] = startAssignBoltSlot(topicSlotList)
val boltmappinglist = new ArrayBuffer[(String, Long)]
boltSlotList.foreach(bslot => {
logger.info(bslot)
bslot.tsList.foreach(tslot => {
val values=tslot.transformToMap(bslot.slotid)
// values.foreach(println)
boltmappinglist.appendAll(values)
})
})
boltAction(boltmappinglist)
}
/**
* Put the topic Slots into BoltSlots using W-Dispatch
* @param topicSlotList
* @return
*/
def startAssignBoltSlot(topicSlotList: ArrayBuffer[TopicSlot]): ArrayBuffer[BoltSlot] = {
var datasum = 0l;
var channelNum = 0l;
topicSlotList.foreach(v => {
datasum += v.getDataSize
channelNum += v.getChannelNum
})
val avgDataSize = datasum / boltNum
println("avg Datasize: " + avgDataSize)
val res = new ArrayBuffer[BoltSlot](boltNum)
for (i <- 0 to boltNum)
res += new BoltSlot(i)
var currentBoltSize = 0l
var cp = new DispatchByW(0, boltNum)
var firsttime=true
topicSlotList.sortWith(_.getDataSize > _.getDataSize).foreach(slot => {
// println(slot)
logger.debug(slot + " --- cp=" + cp.value + " size:" + res(cp.value).getDataSize)
var dataSize = slot.getDataSize
do {
val originalSize = res(cp.value).getDataSize
res(cp.value).tsList += slot
val filledSize = avgDataSize - originalSize
dataSize += -filledSize
cp.change()
if(slot.batchType==3&&firsttime){
firsttime=false
val magicValue=boltNum-assignSlotNums(assignBoundaries(),)
cp=new DispatchByW(magicValue, boltNum)
}
} while (dataSize > 0 && res(cp.value).getDataSize < avgDataSize)
})
res
}
def startAssignTopicSlot(infomap: HashMap[String, Long]): ArrayBuffer[TopicSlot] = {
//boundaries of each batch
val bounds = assignBoundaries(infomap)
// nums of each batch
val slotnums = assignSlotNums(bounds, infomap)
// All batches
val batchlist = new ArrayBuffer[Batch]
// sorted channels
var starter = 0l
for (i <- 1 to bounds.length - 1) {
val batchinfo: List[(String, Long)]
= infomap.toList.filter(v => {
v._2 > bounds(i) && v._2 <= bounds(i - 1)
})
batchlist += new Batch(batchinfo, slotnums(i - 1), starter, batchNames(i))
starter += slotnums(i - 1)
}
val topicSlotList = new ArrayBuffer[TopicSlot]
batchlist.foreach(v => {
topicSlotList.appendAll(v.assignSlotsInBatchAndGet())
logger.info(v.toString)
})
topicSlotList
}
/**
* Determine the slotSums for each boundary:
*
* @param boundaries
* @param infomap
* @return
*/
def assignSlotNums(boundaries: Array[Long], infomap: HashMap[String, Long]=readHDFSSize(new HashMap[String, Long])): Array[Long] = {
Array(topicNum / 6, topicNum * 2 / 3, topicNum / 6)
}
def writeToFile(map: Iterable[(String, Long)], fileurl: String): Unit = {
val out = new PrintWriter(fileurl)
map.foreach(v => out.println(v._1 + "=" + v._2))
out.close()
logger.info("Finish writing configureations into " + fileurl)
}
def satasticSizeInfo: Array[Long] = {
logger.debug("Starting Staging-based satastic")
val volums = Array(0l, 0l, 0l, 0l)
val sizes = Array(0, 0, 0, 0)
val dataSizeMap = readHDFSSize(new HashMap[String, Long])
val bounds = assignBoundaries(dataSizeMap)
for (i <- 1 to bounds.length - 1)
sizes(i) = dataSizeMap.toList.filter(
v => v._2 match {
case v2 if v2 > bounds(i) && v2 <= bounds(i - 1) => true
case _ => false
}
).map(v => {
volums(i) += v._2
}
).size
logger.info("All Channel Size: " + dataSizeMap.size)
for (i <- 1 to bounds.length - 1)
logger.info("[" + bounds(i - 1) + "~~" + bounds(i) + "]Channel Num:" + sizes(i) + " ChannelSize:" + volums(i))
bounds
}
/**
* Determine the boundaries of the batches, from here, we directly point out the
* precise value of each boundary.
* @param infomap the data size of each channel
* @return the boundaries of each
*/
def assignBoundaries(infomap: HashMap[String, Long]=readHDFSSize(new HashMap[String, Long])): Array[Long] =
Array(Integer.MAX_VALUE, 200000, 1000, -1)
/**
* Read DataSize infos from hdfsoutput file.
* It also support more than one file, which would add the size of them together by key
*
* @param dataSizeMap size mapping like channel1->size1,channel2->size2...
* @param additionalURL adding some additional file url like the form of dataSizeMap
* @return new Mapping
*/
def addAddtionalHDFSFile(dataSizeMap: HashMap[String, Long], additionalURL: String): HashMap[String, Long] = {
val lines = Source.fromFile(additionalURL).getLines().toList
for (line <- lines) {
val ss = line.split(" ")
val key = getKey(ss(2).split("[/]"))
if (!dataSizeMap.contains(key)) {
dataSizeMap(key) = 0
}
val value = ss(0).toLong >> 20
dataSizeMap(key) += value
}
logger.info("Summed size: " + dataSizeMap.values.sum + "\\n")
dataSizeMap
}
def getKey(sArray: Array[String]): String =
sArray(5) + "-" + sArray(6)
}
object SizeComputation extends App {
val url = "/Application/nla/log_pick/conf/test/size.out";
val sc = new SizeComputation(url, 300, 100)
val sizeMap = sc.readHDFSSize(new HashMap[String, Long])
val mappinglist = sc.startAssignment(sizeMap, sc.writeToFile(_, url + ".topicmapping"), sc.writeToFile(_, url + ".boltmapping"))
val boundaries = sc.satasticSizeInfo;
// if(!topicInfos.topicMapping.isEmpty)
// sc.rebalance(topicInfos.topicMapping)
}
|
mengyou0304/scala_simple_work
|
src/main/scala/com/robin/grouping/computation/SizeComputation.scala
|
Scala
|
apache-2.0
| 7,564
|
package scife
package enumeration
package lazytraversal
import scife.enumeration.dependent._
import memoization._
import scife.{ enumeration => e }
import scife.util._
import scife.util.logging._
import scife.util.structures._
import LazyBSTrees._
import scalaz.LazyTuple2
import benchmarks._
import org.scalatest._
import org.scalameter.api._
import scala.language.existentials
import scala.language.postfixOps
class BinarySearchTree
// extends StructuresBenchmark[Depend[((Int, Range), LazyEnum[Tree]), Tree] {
extends DependentMemoizedBenchmark[(Int, Int), Depend[((Int, Range), LazyEnum[Tree]), Tree] {
type EnumSort[A] = Finite[A] with Touchable[A] with Resetable[A] with Skippable[A] }]
// extends PerformanceTest.OfflineReport with ProfileLogger
{
type Ugly = LazyEnum[Tree]
type EnumType[A] = Finite[A] with Touchable[A] with Resetable[A] with Skippable[A]
type DepEnumType[I, O] = Depend[I, O] { type EnumSort[A] = BinarySearchTree.this.EnumType[A] }
type DepEnumTypeFinite[I, O] = DependFinite[I, O] { type EnumSort[A] = BinarySearchTree.this.EnumType[A] }
type EType = DepEnumType[((Int, Range), LazyEnum[Tree]), Tree]
implicit val treeTag = implicitly[reflect.ClassTag[scife.util.structures.LazyBSTrees.Tree]]
override def generator(maxSize: Int): Gen[(Int, Int)] =
for (size <- Gen.range("size")(1, maxSize, 1);
missingEl <- Gen.range("missingElement")(0, size, 1)) yield
(size, missingEl)
//
def measureCode(tdEnum: EType) = {
(in: (Int, Int)) =>
// (size: Int) =>
val (size, el) = in
// var enum = tdEnum.getEnum((size - 1, 1 to size - 1), null)
var enum = tdEnum.getEnum((size, 1 to size), null)
// for (el <- 1 to size) {
// enum = tdEnum.getEnum((size - 1, 1 to size - 1), null)
// enum = tdEnum.getEnum((size, 1 to size), null)
var nextInd = 0
while (nextInd < enum.size) {
enum.reset
val t = enum(nextInd)
val index = t insert el
index.lazyInvariant
nextInd = enum.next(nextInd)
}
// }
}
def warmUp(inEnum: EType, maxSize: Int) {
for (size <- 1 to maxSize) {
val enum = inEnum.getEnum((size, 1 to size), null)
for (i <- 0 until enum.size) enum(i)
}
}
def constructEnumerator(implicit ms: e.memoization.MemoizationScope): DepEnumType[((Int, Range), LazyEnum[Tree]), Tree] = {
val res =
new WrapFunctionTest2[((Int, Range), Ugly), Tree, EnumType](
((self: DepEnumType[((Int, Range), Ugly), Tree], pair: ((Int, Range), Ugly)) => {
val ((size, range), ug) = pair
val reuse: split.ChainFiniteSingleCombine[(Int, Int), LazyTuple2[Tree, Tree], Tree] =
if (ug.isInstanceOf[split.ChainFiniteSingleCombine[(Int, Int), LazyTuple2[Tree, Tree], Tree]]) {
ug.asInstanceOf[split.ChainFiniteSingleCombine[(Int, Int), LazyTuple2[Tree, Tree], Tree]]
} else null
if (size < 0) throw new RuntimeException
if (size <= 0) new e.Singleton((Leaf: Tree)) with Touchable[Tree] with Resetable[Tree] with NoSkip[Tree] {
override def toString = s"Singleton[$hashCode]"
}
else if (range.isEmpty) Empty
else if (size == 1)
new e.WrapArray(range map { v => (Node(Leaf, v, Leaf): Tree) } toArray) with Touchable[Tree] with Resetable[Tree] with NoSkip[Tree] {
// override def toString = s"Array[$hashCode](${toList})"
override def toString = s"Array[$hashCode]()"
}
else {
val leftSizes = e.Enum(0 until size)
val roots = e.Enum(range)
val rootLeftSizePairs = e.Product(leftSizes, roots)
val leftTrees: DepEnumTypeFinite[(Int, Int), Tree] = new InMap(self, { (par: (Int, Int)) =>
val (leftSize, median) = par
((leftSize, range.start to (median - 1)), ug)
}) with DependFinite[(Int, Int), Tree] {
override type EnumSort[A] = BinarySearchTree.this.EnumType[A]
}
val rightTrees: DepEnumTypeFinite[(Int, Int), Tree] =
new InMap(self, { (par: (Int, Int)) =>
val (leftSize, median) = par
((size - leftSize - 1, (median + 1) to range.end), ug)
}) with DependFinite[(Int, Int), Tree] {
override type EnumSort[A] = BinarySearchTree.this.EnumType[A]
}
val leftRightPairs: DepEnumTypeFinite[(Int, Int), LazyTuple2[Tree, Tree]] =
lazytraversal.split.dependent.ProductFinite(leftTrees, rightTrees)
val fConstructTree: ((Int, Int), => LazyTuple2[Tree, Tree]) => Tree =
(p1, p2) => {
Node(p2._1, p1._2, p2._2)
}
val allNodes =
if (reuse == null)
new {
val classTagT = treeTag
} with lazytraversal.split.ChainFiniteSingleCombine[(Int, Int), LazyTuple2[Tree, Tree], Tree](
rootLeftSizePairs, leftRightPairs,
fConstructTree)(null) with e.memoization.MemoizedSize with e.memoization.MemoizedStatic[Tree] with Touchable[Tree] {
// override def toString = s"ChainFiniteSingleCombine[$hashCode](${leftRightPairs.hashCode})"
}
else
new {
val classTagT = treeTag
} with lazytraversal.split.ChainFiniteSingleCombine[(Int, Int), LazyTuple2[Tree, Tree], Tree](
rootLeftSizePairs, leftRightPairs,
fConstructTree)(reuse.inner) with e.memoization.MemoizedSize with e.memoization.MemoizedStatic[Tree] with Touchable[Tree] {
// override def toString = s"ChainFiniteSingleCombine[$hashCode](${leftRightPairs.hashCode})"
}
allNodes: BinarySearchTree.this.EnumType[Tree]
}
}): (DepEnumType[((Int, Range), Ugly), Tree], ((Int, Range), Ugly)) => EnumType[Tree]) with split.Memoized[(Int, Range), Tree, Ugly]
ms add res
res
}
}
|
kaptoxic/SciFe
|
src/bench/test/scala/scife/enumeration/lazytraversal/BinarySearchTree.scala
|
Scala
|
gpl-2.0
| 6,038
|
/*
* This file is part of EasyForger which is released under GPLv3 License.
* See file LICENSE.txt or go to http://www.gnu.org/licenses/gpl-3.0.en.html for full license details.
*/
package com.easyforger.samples.misc
import com.easyforger.base.EasyForger
import net.minecraftforge.fml.common.Mod
import net.minecraftforge.fml.common.Mod.EventHandler
import net.minecraftforge.fml.common.event.FMLPreInitializationEvent
@Mod(modid = DungeonsMod.modId, name = "EasyForger Dungeons Mod", version = "0.6", modLanguage = "scala")
object DungeonsMod extends EasyForger {
final val modId = "easyforger_dungeons"
@EventHandler
def preInit(event: FMLPreInitializationEvent): Unit = {
dungeonMobs(
EntityName.Creeper -> 100,
EntityName.Zombie -> 400,
EntityName.Enderman -> 50
)
chestDrops(
Chests.spawnBonus -> s"$modId:chests/ef_spawn_bonus_chest"
)
}
}
|
easyforger/easyforger-samples
|
src/main/scala/com/easyforger/samples/misc/DungeonsMod.scala
|
Scala
|
gpl-3.0
| 901
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster
import java.util.concurrent.Semaphore
import java.util.concurrent.atomic.AtomicBoolean
import scala.concurrent.Future
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.deploy.{ApplicationDescription, Command}
import org.apache.spark.deploy.client.{StandaloneAppClient, StandaloneAppClientListener}
import org.apache.spark.internal.{config, Logging}
import org.apache.spark.internal.config.Tests.IS_TESTING
import org.apache.spark.launcher.{LauncherBackend, SparkAppHandle}
import org.apache.spark.resource.{ResourceProfile, ResourceUtils}
import org.apache.spark.rpc.RpcEndpointAddress
import org.apache.spark.scheduler._
import org.apache.spark.util.Utils
/**
* A [[SchedulerBackend]] implementation for Spark's standalone cluster manager.
*/
private[spark] class StandaloneSchedulerBackend(
scheduler: TaskSchedulerImpl,
sc: SparkContext,
masters: Array[String])
extends CoarseGrainedSchedulerBackend(scheduler, sc.env.rpcEnv)
with StandaloneAppClientListener
with Logging {
private[spark] var client: StandaloneAppClient = null
private val stopping = new AtomicBoolean(false)
private val launcherBackend = new LauncherBackend() {
override protected def conf: SparkConf = sc.conf
override protected def onStopRequest(): Unit = stop(SparkAppHandle.State.KILLED)
}
@volatile var shutdownCallback: StandaloneSchedulerBackend => Unit = _
@volatile private var appId: String = _
private val registrationBarrier = new Semaphore(0)
private val maxCores = conf.get(config.CORES_MAX)
private val totalExpectedCores = maxCores.getOrElse(0)
private val defaultProf = sc.resourceProfileManager.defaultResourceProfile
override def start(): Unit = {
super.start()
// SPARK-21159. The scheduler backend should only try to connect to the launcher when in client
// mode. In cluster mode, the code that submits the application to the Master needs to connect
// to the launcher instead.
if (sc.deployMode == "client") {
launcherBackend.connect()
}
// The endpoint for executors to talk to us
val driverUrl = RpcEndpointAddress(
sc.conf.get(config.DRIVER_HOST_ADDRESS),
sc.conf.get(config.DRIVER_PORT),
CoarseGrainedSchedulerBackend.ENDPOINT_NAME).toString
val args = Seq(
"--driver-url", driverUrl,
"--executor-id", "{{EXECUTOR_ID}}",
"--hostname", "{{HOSTNAME}}",
"--cores", "{{CORES}}",
"--app-id", "{{APP_ID}}",
"--worker-url", "{{WORKER_URL}}")
val extraJavaOpts = sc.conf.get(config.EXECUTOR_JAVA_OPTIONS)
.map(Utils.splitCommandString).getOrElse(Seq.empty)
val classPathEntries = sc.conf.get(config.EXECUTOR_CLASS_PATH)
.map(_.split(java.io.File.pathSeparator).toSeq).getOrElse(Nil)
val libraryPathEntries = sc.conf.get(config.EXECUTOR_LIBRARY_PATH)
.map(_.split(java.io.File.pathSeparator).toSeq).getOrElse(Nil)
// When testing, expose the parent class path to the child. This is processed by
// compute-classpath.{cmd,sh} and makes all needed jars available to child processes
// when the assembly is built with the "*-provided" profiles enabled.
val testingClassPath =
if (sys.props.contains(IS_TESTING.key)) {
sys.props("java.class.path").split(java.io.File.pathSeparator).toSeq
} else {
Nil
}
// Start executors with a few necessary configs for registering with the scheduler
val sparkJavaOpts = Utils.sparkJavaOpts(conf, SparkConf.isExecutorStartupConf)
val javaOpts = sparkJavaOpts ++ extraJavaOpts
val command = Command("org.apache.spark.executor.CoarseGrainedExecutorBackend",
args, sc.executorEnvs, classPathEntries ++ testingClassPath, libraryPathEntries, javaOpts)
val webUrl = sc.ui.map(_.webUrl).getOrElse("")
val coresPerExecutor = conf.getOption(config.EXECUTOR_CORES.key).map(_.toInt)
// If we're using dynamic allocation, set our initial executor limit to 0 for now.
// ExecutorAllocationManager will send the real initial limit to the Master later.
val initialExecutorLimit =
if (Utils.isDynamicAllocationEnabled(conf)) {
Some(0)
} else {
None
}
val executorResourceReqs = ResourceUtils.parseResourceRequirements(conf,
config.SPARK_EXECUTOR_PREFIX)
val appDesc = ApplicationDescription(sc.appName, maxCores, sc.executorMemory, command,
webUrl, sc.eventLogDir, sc.eventLogCodec, coresPerExecutor, initialExecutorLimit,
resourceReqsPerExecutor = executorResourceReqs)
client = new StandaloneAppClient(sc.env.rpcEnv, masters, appDesc, this, conf)
client.start()
launcherBackend.setState(SparkAppHandle.State.SUBMITTED)
waitForRegistration()
launcherBackend.setState(SparkAppHandle.State.RUNNING)
}
override def stop(): Unit = {
stop(SparkAppHandle.State.FINISHED)
}
override def connected(appId: String): Unit = {
logInfo("Connected to Spark cluster with app ID " + appId)
this.appId = appId
notifyContext()
launcherBackend.setAppId(appId)
}
override def disconnected(): Unit = {
notifyContext()
if (!stopping.get) {
logWarning("Disconnected from Spark cluster! Waiting for reconnection...")
}
}
override def dead(reason: String): Unit = {
notifyContext()
if (!stopping.get) {
launcherBackend.setState(SparkAppHandle.State.KILLED)
logError("Application has been killed. Reason: " + reason)
try {
scheduler.error(reason)
} finally {
// Ensure the application terminates, as we can no longer run jobs.
sc.stopInNewThread()
}
}
}
override def executorAdded(fullId: String, workerId: String, hostPort: String, cores: Int,
memory: Int): Unit = {
logInfo("Granted executor ID %s on hostPort %s with %d core(s), %s RAM".format(
fullId, hostPort, cores, Utils.megabytesToString(memory)))
}
override def executorRemoved(
fullId: String,
message: String,
exitStatus: Option[Int],
workerHost: Option[String]): Unit = {
val reason: ExecutorLossReason = exitStatus match {
case Some(code) => ExecutorExited(code, exitCausedByApp = true, message)
case None => ExecutorProcessLost(message, workerHost)
}
logInfo("Executor %s removed: %s".format(fullId, message))
removeExecutor(fullId.split("/")(1), reason)
}
override def executorDecommissioned(fullId: String,
decommissionInfo: ExecutorDecommissionInfo): Unit = {
logInfo(s"Asked to decommission executor $fullId")
val execId = fullId.split("/")(1)
decommissionExecutors(
Array((execId, decommissionInfo)),
adjustTargetNumExecutors = false,
triggeredByExecutor = false)
logInfo("Executor %s decommissioned: %s".format(fullId, decommissionInfo))
}
override def workerRemoved(workerId: String, host: String, message: String): Unit = {
logInfo("Worker %s removed: %s".format(workerId, message))
removeWorker(workerId, host, message)
}
override def sufficientResourcesRegistered(): Boolean = {
totalCoreCount.get() >= totalExpectedCores * minRegisteredRatio
}
override def applicationId(): String =
Option(appId).getOrElse {
logWarning("Application ID is not initialized yet.")
super.applicationId
}
/**
* Request executors from the Master by specifying the total number desired,
* including existing pending and running executors.
*
* @return whether the request is acknowledged.
*/
protected override def doRequestTotalExecutors(
resourceProfileToTotalExecs: Map[ResourceProfile, Int]): Future[Boolean] = {
// resources profiles not supported
Option(client) match {
case Some(c) =>
val numExecs = resourceProfileToTotalExecs.getOrElse(defaultProf, 0)
c.requestTotalExecutors(numExecs)
case None =>
logWarning("Attempted to request executors before driver fully initialized.")
Future.successful(false)
}
}
/**
* Kill the given list of executors through the Master.
* @return whether the kill request is acknowledged.
*/
protected override def doKillExecutors(executorIds: Seq[String]): Future[Boolean] = {
Option(client) match {
case Some(c) => c.killExecutors(executorIds)
case None =>
logWarning("Attempted to kill executors before driver fully initialized.")
Future.successful(false)
}
}
private def waitForRegistration() = {
registrationBarrier.acquire()
}
private def notifyContext() = {
registrationBarrier.release()
}
private def stop(finalState: SparkAppHandle.State): Unit = {
if (stopping.compareAndSet(false, true)) {
try {
super.stop()
if (client != null) {
client.stop()
}
val callback = shutdownCallback
if (callback != null) {
callback(this)
}
} finally {
launcherBackend.setState(finalState)
launcherBackend.close()
}
}
}
}
|
shuangshuangwang/spark
|
core/src/main/scala/org/apache/spark/scheduler/cluster/StandaloneSchedulerBackend.scala
|
Scala
|
apache-2.0
| 9,872
|
/* scala-stm - (c) 2009-2010, Stanford University, PPL */
package scala.concurrent.stm
import java.util.concurrent.CyclicBarrier
import org.scalatest.FunSuite
/** This test uses the transactional retry mechanism to pass a token around a
* ring of threads. When there are two threads this is a ping-pong test. A
* separate `Ref` is used for each handoff.
*
* @author Nathan Bronson
*/
class TokenRingSuite extends FunSuite {
test("small non-txn threesome") { tokenRing(3, 10000, false, false) }
test("small txn threesome") { tokenRing(3, 1000, true, false) }
test("small txn threesome reading via write") { tokenRing(3, 1000, true, true) }
test("non-txn ping-pong", Slow) { tokenRing(2, 1000000, false, false) }
test("non-txn threesome", Slow) { tokenRing(3, 1000000, false, false) }
test("non-txn large ring", Slow) { tokenRing(32, 10000, false, false) }
test("txn ping-pong", Slow) { tokenRing(2, 100000, true, false) }
test("txn threesome", Slow) { tokenRing(3, 100000, true, false) }
test("txn large ring", Slow) { tokenRing(32, 10000, true, false) }
test("txn ping-pong reading via write", Slow) { tokenRing(2, 100000, true, true) }
test("txn threesome reading via write", Slow) { tokenRing(3, 100000, true, true) }
test("txn large ring reading via write", Slow) { tokenRing(32, 10000, true, true) }
def tokenRing(ringSize: Int, handoffsPerThread: Int, useTxns: Boolean, useSwap: Boolean) {
val ready = Array.tabulate(ringSize)(i => Ref(i == 0))
val threads = new Array[Thread](ringSize - 1)
val barrier = new CyclicBarrier(ringSize, new Runnable {
var start = 0L
def run {
val now = System.currentTimeMillis
if (start == 0) {
start = now
} else {
val elapsed = now - start
val handoffs = handoffsPerThread * ringSize
println("tokenRing(" + ringSize + "," + handoffsPerThread + "," + useTxns +
") total_elapsed=" + elapsed + " msec, throughput=" +
(handoffs * 1000L) / elapsed + " handoffs/sec, latency=" +
(elapsed * 1000000L) / handoffs + " nanos/handoff")
}
}
})
for (index <- 0 until ringSize) {
val work = new Runnable {
def run {
val next = (index + 1) % ringSize
barrier.await
for (h <- 0 until handoffsPerThread) {
if (!useTxns) {
ready(index).single await { _ == true }
ready(index).single() = false
ready(next).single() = true
} else {
atomic { implicit t =>
if (!useSwap) {
if (ready(index).get == false) retry
ready(index)() = false
} else {
if (ready(index).swap(false) == false) retry
}
ready(next)() = true
}
}
}
barrier.await
}
}
if (index < ringSize - 1) {
threads(index) = new Thread(work, "worker " + index)
threads(index).start
} else {
work.run
}
}
for (t <- threads) t.join
}
}
|
djspiewak/scala-stm
|
src/test/scala/scala/concurrent/stm/TokenRingSuite.scala
|
Scala
|
bsd-3-clause
| 3,151
|
package sml.instructions
/**
* Extends Instruction, adds fields required for a typical
* math operation on the sml machine
*/
trait MathInstruction extends Instruction {
/**
* Register address where the result of the math operation
* will be stored
*/
val result: Int
/**
* First operand of the math operation
*/
val op1: Int
/**
* Second operand of the math operation
*/
val op2: Int
}
|
BBK-PiJ-2015-67/sdp-portfolio
|
coursework/cw-one/src/main/scala/sml/instructions/MathInstruction.scala
|
Scala
|
unlicense
| 436
|
package com.twitter.concurrent
import com.twitter.util.{Await, Future, Promise, Return, Throw}
import java.io.EOFException
import org.specs.SpecificationWithJUnit
import scala.collection.mutable.ArrayBuffer
import Spool.{*::, **::, seqToSpool}
class SpoolSpec extends SpecificationWithJUnit {
"Empty Spool" should {
val s = Spool.empty[Int]
"iterate over all elements" in {
val xs = new ArrayBuffer[Int]
s foreach { xs += _ }
xs.size must be_==(0)
}
"map" in {
(s map { _ * 2 } ) must be_==(Spool.empty[Int])
}
"deconstruct" in {
s must beLike {
case x **:: rest => false
case _ => true
}
}
"append via ++" in {
(s ++ Spool.empty[Int]) must be_==(Spool.empty[Int])
(Spool.empty[Int] ++ s) must be_==(Spool.empty[Int])
val s2 = s ++ (3 **:: 4 **:: Spool.empty[Int])
Await.result(s2.toSeq) must be_==(Seq(3, 4))
}
"append via ++ with Future rhs" in {
Await.result(s ++ Future(Spool.empty[Int])) must be_==(Spool.empty[Int])
Await.result(Spool.empty[Int] ++ Future(s)) must be_==(Spool.empty[Int])
val s2 = s ++ Future(3 **:: 4 **:: Spool.empty[Int])
Await.result(s2 flatMap (_.toSeq)) must be_==(Seq(3, 4))
}
"flatMap" in {
val f = (x: Int) => Future(x.toString **:: (x * 2).toString **:: Spool.empty)
Await.result(s flatMap f) must be_==(Spool.empty[Int])
}
"fold left" in {
val fold = s.foldLeft(0){(x, y) => x + y}
Await.result(fold) must be_==(0)
}
"reduce left" in {
val fold = s.reduceLeft{(x, y) => x + y}
Await.result(fold) must throwAn[UnsupportedOperationException]
}
}
"Simple resolved Spool" should {
val s = 1 **:: 2 **:: Spool.empty
"iterate over all elements" in {
val xs = new ArrayBuffer[Int]
s foreach { xs += _ }
xs.toSeq must be_==(Seq(1,2))
}
"buffer to a sequence" in {
Await.result(s.toSeq) must be_==(Seq(1, 2))
}
"map" in {
Await.result(s map { _ * 2 } toSeq) must be_==(Seq(2, 4))
}
"deconstruct" in {
s must beLike {
case x **:: rest =>
x must be_==(1)
rest must beLike {
case y **:: rest if y == 2 && rest.isEmpty => true
}
}
}
"append via ++" in {
Await.result((s ++ Spool.empty[Int]).toSeq) must be_==(Seq(1, 2))
Await.result((Spool.empty[Int] ++ s).toSeq) must be_==(Seq(1, 2))
val s2 = s ++ (3 **:: 4 **:: Spool.empty)
Await.result(s2.toSeq) must be_==(Seq(1, 2, 3, 4))
}
"append via ++ with Future rhs" in {
Await.result(s ++ Future(Spool.empty[Int]) flatMap (_.toSeq)) must be_==(Seq(1, 2))
Await.result(Spool.empty[Int] ++ Future(s) flatMap (_.toSeq)) must be_==(Seq(1, 2))
val s2 = s ++ Future(3 **:: 4 **:: Spool.empty)
Await.result(s2 flatMap (_.toSeq)) must be_==(Seq(1, 2, 3, 4))
}
"flatMap" in {
val f = (x: Int) => Future(x.toString **:: (x * 2).toString **:: Spool.empty)
val s2 = s flatMap f
Await.result(s2 flatMap (_.toSeq)) must be_==(Seq("1", "2", "2", "4"))
}
"fold left" in {
val fold = s.foldLeft(0){(x, y) => x + y}
Await.result(fold) must be_==(3)
}
"reduce left" in {
val fold = s.reduceLeft{(x, y) => x + y}
Await.result(fold) must be_==(3)
}
"be roundtrippable through toSeq/toSpool" in {
val seq = (0 to 10).toSeq
Await.result(seq.toSpool.toSeq) must be_==(seq)
}
"flatten via flatMap of toSpool" in {
val spool = Seq(1, 2) **:: Seq(3, 4) **:: Spool.empty
val seq = Await.result(spool.toSeq)
val flatSpool =
spool.flatMap { inner =>
Future.value(inner.toSpool)
}
Await.result(flatSpool.flatMap(_.toSeq)) must be_==(seq.flatten)
}
}
"Simple resolved spool with EOFException" should {
val p = new Promise[Spool[Int]](Throw(new EOFException("sad panda")))
val s = 1 **:: 2 *:: p
"EOF iteration on EOFException" in {
val xs = new ArrayBuffer[Option[Int]]
s foreachElem { xs += _ }
xs.toSeq must be_==(Seq(Some(1), Some(2), None))
}
}
"Simple resolved spool with error" should {
val p = new Promise[Spool[Int]](Throw(new Exception("sad panda")))
val s = 1 **:: 2 *:: p
"return with exception on error" in {
val xs = new ArrayBuffer[Option[Int]]
s foreachElem { xs += _ }
Await.result(s.toSeq) must throwA[Exception]
}
"return with exception on error in callback" in {
val xs = new ArrayBuffer[Option[Int]]
val f = s foreach { _ => throw new Exception("sad panda") }
Await.result(f) must throwA[Exception]
}
"return with exception on EOFException in callback" in {
val xs = new ArrayBuffer[Option[Int]]
val f = s foreach { _ => throw new EOFException("sad panda") }
Await.result(f) must throwA[EOFException]
}
}
"Simple delayed Spool" should {
val p = new Promise[Spool[Int]]
val p1 = new Promise[Spool[Int]]
val p2 = new Promise[Spool[Int]]
val s = 1 *:: p
"iterate as results become available" in {
val xs = new ArrayBuffer[Int]
s foreach { xs += _ }
xs.toSeq must be_==(Seq(1))
p() = Return(2 *:: p1)
xs.toSeq must be_==(Seq(1, 2))
p1() = Return(Spool.empty)
xs.toSeq must be_==(Seq(1, 2))
}
"EOF iteration on EOFException" in {
val xs = new ArrayBuffer[Option[Int]]
s foreachElem { xs += _ }
xs.toSeq must be_==(Seq(Some(1)))
p() = Throw(new EOFException("sad panda"))
xs.toSeq must be_==(Seq(Some(1), None))
}
"return with exception on error" in {
val xs = new ArrayBuffer[Option[Int]]
s foreachElem { xs += _ }
xs.toSeq must be_==(Seq(Some(1)))
p() = Throw(new Exception("sad panda"))
Await.result(s.toSeq) must throwA[Exception]
}
"return with exception on error in callback" in {
val xs = new ArrayBuffer[Option[Int]]
val f = s foreach { _ => throw new Exception("sad panda") }
p() = Return(2 *:: p1)
Await.result(f) must throwA[Exception]
}
"return with exception on EOFException in callback" in {
val xs = new ArrayBuffer[Option[Int]]
val f = s foreach { _ => throw new EOFException("sad panda") }
p() = Return(2 *:: p1)
Await.result(f) must throwA[EOFException]
}
"return a buffered seq when complete" in {
val f = s.toSeq
f.isDefined must beFalse
p() = Return(2 *:: p1)
f.isDefined must beFalse
p1() = Return(Spool.empty)
f.isDefined must beTrue
Await.result(f) must be_==(Seq(1,2))
}
"deconstruct" in {
s must beLike {
case fst *:: rest if fst == 1 && !rest.isDefined => true
}
}
"collect" in {
val f = s collect {
case x if x % 2 == 0 => x * 2
}
f.isDefined must beFalse // 1 != 2 mod 0
p() = Return(2 *:: p1)
f.isDefined must beTrue
val s1 = Await.result(f)
s1 must beLike {
case x *:: rest if x == 4 && !rest.isDefined => true
}
p1() = Return(3 *:: p2)
s1 must beLike {
case x *:: rest if x == 4 && !rest.isDefined => true
}
p2() = Return(4 **:: Spool.empty)
val s1s = s1.toSeq
s1s.isDefined must beTrue
Await.result(s1s) must be_==(Seq(4, 8))
}
"fold left" in {
val f = s.foldLeft(0){(x, y) => x + y}
f.isDefined must beFalse
p() = Return(2 *:: p1)
f.isDefined must beFalse
p1() = Return(Spool.empty)
f.isDefined must beTrue
Await.result(f) must be_==(3)
}
"be lazy" in {
def mkSpool(i: Int = 0): Future[Spool[Int]] =
Future.value {
if (i < 3)
i *:: mkSpool(i + 1)
else
throw new AssertionError("Should not have produced " + i)
}
mkSpool() must not(throwA[AssertionError])
}
}
}
|
mosesn/util
|
util-core/src/test/scala/com/twitter/concurrent/SpoolSpec.scala
|
Scala
|
apache-2.0
| 8,020
|
/*
* Copyright 2006-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package record
import net.liftweb.http.S
import net.liftweb.http.S._
import net.liftweb.http.js._
import JsCmds._
import scala.xml.{NodeSeq, Node, Text, Elem}
import scala.xml.transform._
import net.liftweb.sitemap._
import net.liftweb.sitemap.Loc._
import net.liftweb.util.Helpers._
import net.liftweb.util._
import net.liftweb.common._
import net.liftweb.util.Mailer._
import net.liftweb.record.field._
import net.liftweb.proto.{ProtoUser => GenProtoUser}
/**
* ProtoUser provides a "User" with a first name, last name, email, etc.
*/
trait ProtoUser[T <: ProtoUser[T]] extends Record[T] {
self: T =>
/**
* The primary key field for the User. You can override the behavior
* of this field:
* <pre name="code" class="scala">
* override lazy val id = new MyMappedLongClass(this) {
* println("I am doing something different")
* }
* </pre>
*/
lazy val id: LongField[T] = new MyMappedLongClass(this)
protected class MyMappedLongClass(obj: T) extends LongField(obj)
/**
* Convert the id to a String
*/
def userIdAsString: String = id.get.toString
/**
* The first name field for the User. You can override the behavior
* of this field:
* <pre name="code" class="scala">
* override lazy val firstName = new MyFirstName(this, 32) {
* println("I am doing something different")
* }
* </pre>
*/
lazy val firstName: StringField[T] = new MyFirstName(this, 32)
protected class MyFirstName(obj: T, size: Int) extends StringField(obj, size) {
override def displayName = owner.firstNameDisplayName
override val fieldId = Some(Text("txtFirstName"))
}
/**
* The string name for the first name field
*/
def firstNameDisplayName = S.?("first.name")
/**
* The last field for the User. You can override the behavior
* of this field:
* <pre name="code" class="scala">
* override lazy val lastName = new MyLastName(this, 32) {
* println("I am doing something different")
* }
* </pre>
*/
lazy val lastName: StringField[T] = new MyLastName(this, 32)
protected class MyLastName(obj: T, size: Int) extends StringField(obj, size) {
override def displayName = owner.lastNameDisplayName
override val fieldId = Some(Text("txtLastName"))
}
/**
* The last name string
*/
def lastNameDisplayName = S.?("last.name")
/**
* The email field for the User. You can override the behavior
* of this field:
* <pre name="code" class="scala">
* override lazy val email = new MyEmail(this, 48) {
* println("I am doing something different")
* }
* </pre>
*/
lazy val email: EmailField[T] = new MyEmail(this, 48)
protected class MyEmail(obj: T, size: Int) extends EmailField(obj, size) {
override def validations = valUnique(S.?("unique.email.address")) _ :: super.validations
override def displayName = owner.emailDisplayName
override val fieldId = Some(Text("txtEmail"))
}
protected def valUnique(errorMsg: => String)(email: String): List[FieldError]
/**
* The email first name
*/
def emailDisplayName = S.?("email.address")
/**
* The password field for the User. You can override the behavior
* of this field:
* <pre name="code" class="scala">
* override lazy val password = new MyPassword(this) {
* println("I am doing something different")
* }
* </pre>
*/
lazy val password: PasswordField[T] = new MyPassword(this)
protected class MyPassword(obj: T) extends PasswordField(obj) {
override def displayName = owner.passwordDisplayName
}
/**
* The display name for the password field
*/
def passwordDisplayName = S.?("password")
/**
* The superuser field for the User. You can override the behavior
* of this field:
* <pre name="code" class="scala">
* override lazy val superUser = new MySuperUser(this) {
* println("I am doing something different")
* }
* </pre>
*/
lazy val superUser: BooleanField[T] = new MySuperUser(this)
protected class MySuperUser(obj: T) extends BooleanField(obj) {
override def defaultValue = false
}
def niceName: String = (firstName.get, lastName.get, email.get) match {
case (f, l, e) if f.length > 1 && l.length > 1 => f+" "+l+" ("+e+")"
case (f, _, e) if f.length > 1 => f+" ("+e+")"
case (_, l, e) if l.length > 1 => l+" ("+e+")"
case (_, _, e) => e
}
def shortName: String = (firstName.get, lastName.get) match {
case (f, l) if f.length > 1 && l.length > 1 => f+" "+l
case (f, _) if f.length > 1 => f
case (_, l) if l.length > 1 => l
case _ => email.get
}
def niceNameWEmailLink = <a href={"mailto:"+email.get}>{niceName}</a>
}
/**
* Mix this trait into the Mapper singleton for User and you
* get a bunch of user functionality including password reset, etc.
*/
trait MetaMegaProtoUser[ModelType <: MegaProtoUser[ModelType]] extends MetaRecord[ModelType] with GenProtoUser {
self: ModelType =>
type TheUserType = ModelType
/**
* What's a field pointer for the underlying CRUDify
*/
type FieldPointerType = Field[_, TheUserType]
/**
* Based on a FieldPointer, build a FieldPointerBridge
*/
protected implicit def buildFieldBridge(from: FieldPointerType): FieldPointerBridge = new MyPointer(from)
protected class MyPointer(from: FieldPointerType) extends FieldPointerBridge {
/**
* What is the display name of this field?
*/
def displayHtml: NodeSeq = from.displayHtml
/**
* Does this represent a pointer to a Password field?
*/
def isPasswordField_? : Boolean = from match {
case a: PasswordField[_] => true
case _ => false
}
}
/**
* Convert an instance of TheUserType to the Bridge trait
*/
protected implicit def typeToBridge(in: TheUserType): UserBridge =
new MyUserBridge(in)
/**
* Bridges from TheUserType to methods used in this class
*/
protected class MyUserBridge(in: TheUserType) extends UserBridge {
/**
* Convert the user's primary key to a String
*/
def userIdAsString: String = in.id.toString
/**
* Return the user's first name
*/
def getFirstName: String = in.firstName.get
/**
* Return the user's last name
*/
def getLastName: String = in.lastName.get
/**
* Get the user's email
*/
def getEmail: String = in.email.get
/**
* Is the user a superuser
*/
def superUser_? : Boolean = in.superUser.get
/**
* Has the user been validated?
*/
def validated_? : Boolean = in.validated.get
/**
* Does the supplied password match the actual password?
*/
def testPassword(toTest: Box[String]): Boolean =
toTest.map(in.password.match_?) openOr false
/**
* Set the validation flag on the user and return the user
*/
def setValidated(validation: Boolean): TheUserType =
in.validated(validation)
/**
* Set the unique ID for this user to a new value
*/
def resetUniqueId(): TheUserType = {
in.uniqueId.reset()
}
/**
* Return the unique ID for the user
*/
def getUniqueId(): String = in.uniqueId.get
/**
* Validate the user
*/
def validate: List[FieldError] = in.validate
/**
* Given a list of string, set the password
*/
def setPasswordFromListString(pwd: List[String]): TheUserType = {
in.password.setFromAny(pwd)
in
}
/**
* Save the user to backing store
*/
def save(): Boolean = in.saveTheRecord().isDefined
}
/**
* Given a field pointer and an instance, get the field on that instance
*/
protected def computeFieldFromPointer(instance: TheUserType, pointer: FieldPointerType): Box[BaseField] = fieldByName(pointer.name, instance)
/**
* Given an username (probably email address), find the user
*/
protected def findUserByUserName(email: String): Box[TheUserType]
/**
* Given a unique id, find the user
*/
protected def findUserByUniqueId(id: String): Box[TheUserType]
/**
* Create a new instance of the User
*/
protected def createNewUserInstance(): TheUserType = self.createRecord
/**
* Given a String representing the User ID, find the user
*/
protected def userFromStringId(id: String): Box[TheUserType]
/**
* The list of fields presented to the user at sign-up
*/
def signupFields: List[FieldPointerType] = List(firstName,
lastName,
email,
locale,
timezone,
password)
/**
* The list of fields presented to the user for editing
*/
def editFields: List[FieldPointerType] = List(firstName,
lastName,
email,
locale,
timezone)
}
/**
* ProtoUser is bare bones. MetaProtoUser contains a bunch
* more fields including a validated flag, locale, timezone, etc.
*/
trait MegaProtoUser[T <: MegaProtoUser[T]] extends ProtoUser[T] {
self: T =>
/**
* The unique id field for the User. This field
* is used for validation, lost passwords, etc.
* You can override the behavior
* of this field:
* <pre name="code" class="scala">
* override lazy val uniqueId = new MyUniqueId(this, 32) {
* println("I am doing something different")
* }
* </pre>
*/
lazy val uniqueId: UniqueIdField[T] = new MyUniqueId(this, 32)
protected class MyUniqueId(obj: T, size: Int) extends UniqueIdField(obj, size) {
}
/**
* Whether the user has been validated.
* You can override the behavior
* of this field:
* <pre name="code" class="scala">
* override lazy val validated = new MyValidated(this, 32) {
* println("I am doing something different")
* }
* </pre>
*/
lazy val validated: BooleanField[T] = new MyValidated(this)
protected class MyValidated(obj: T) extends BooleanField[T](obj) {
override def defaultValue = false
override val fieldId = Some(Text("txtValidated"))
}
/**
* The locale field for the User.
* You can override the behavior
* of this field:
* <pre name="code" class="scala">
* override lazy val locale = new MyLocale(this, 32) {
* println("I am doing something different")
* }
* </pre>
*/
lazy val locale: LocaleField[T] = new MyLocale(this)
protected class MyLocale(obj: T) extends LocaleField[T](obj) {
override def displayName = owner.localeDisplayName
override val fieldId = Some(Text("txtLocale"))
}
/**
* The time zone field for the User.
* You can override the behavior
* of this field:
* <pre name="code" class="scala">
* override lazy val timezone = new MyTimeZone(this, 32) {
* println("I am doing something different")
* }
* </pre>
*/
lazy val timezone: TimeZoneField[T] = new MyTimeZone(this)
protected class MyTimeZone(obj: T) extends TimeZoneField[T](obj) {
override def displayName = owner.timezoneDisplayName
override val fieldId = Some(Text("txtTimeZone"))
}
/**
* The string for the timezone field
*/
def timezoneDisplayName = S.?("time.zone")
/**
* The string for the locale field
*/
def localeDisplayName = S.?("locale")
}
|
lzpfmh/framework-2
|
persistence/record/src/main/scala/net/liftweb/record/ProtoUser.scala
|
Scala
|
apache-2.0
| 12,165
|
package frameless
import scala.annotation.implicitNotFound
/** Types that can be added, subtracted and multiplied by Catalyst. */
@implicitNotFound("Cannot do numeric operations on columns of type ${A}.")
trait CatalystNumeric[A]
object CatalystNumeric {
private[this] val theInstance = new CatalystNumeric[Any] {}
private[this] def of[A]: CatalystNumeric[A] = theInstance.asInstanceOf[CatalystNumeric[A]]
implicit val framelessbigDecimalNumeric: CatalystNumeric[BigDecimal] = of[BigDecimal]
implicit val framelessbyteNumeric : CatalystNumeric[Byte] = of[Byte]
implicit val framelessdoubleNumeric : CatalystNumeric[Double] = of[Double]
implicit val framelessintNumeric : CatalystNumeric[Int] = of[Int]
implicit val framelesslongNumeric : CatalystNumeric[Long] = of[Long]
implicit val framelessshortNumeric : CatalystNumeric[Short] = of[Short]
}
|
adelbertc/frameless
|
core/src/main/scala/frameless/CatalystNumeric.scala
|
Scala
|
apache-2.0
| 916
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.