code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
package com.gilt.thehand
import org.scalatest.FlatSpec
import com.gilt.thehand.test.RuleTester
/**
* This is a trait that can be used to test custom rules in a comprehensive manner. All classes that inherit from this
* trait should call the 'runTests' method with a Map of Rules to a tuple containing two lists: the first a list of
* contexts that should match the rule, the second a list of contexts that should not match the rule. See @LongInSpec
* for an example.
*/
class AbstractRuleSpec extends FlatSpec with RuleTester {
val parser: AbstractRuleParser = DefaultParser
def runTestsInFramework(tests: Map[String, Map[String, () => Unit]]) {
tests foreach { case (groupName, tests) =>
val (firstTestDescription, firstTest) = tests.head
groupName should firstTestDescription in {
firstTest()
}
tests.tail.foreach { case ((description, test)) =>
it should description in {
test()
}
}
}
}
}
|
gilt/the-hand
|
src/test/scala/com/gilt/thehand/AbstractRuleSpec.scala
|
Scala
|
apache-2.0
| 979
|
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import monix.reactive.Observable
import scala.concurrent.duration._
import scala.concurrent.duration.Duration.Zero
object DistinctUntilChangedByKeySuite extends BaseOperatorSuite {
case class Val(x: Long)
def createObservable(sourceCount: Int) = Some {
val o = Observable.range(0, sourceCount)
.flatMap(i => Observable.fromIterable(Seq(Val(i), Val(i), Val(i))))
.distinctUntilChangedByKey(_.x)
.map(_.x)
Sample(o, count(sourceCount), sum(sourceCount), Zero, Zero)
}
def observableInError(sourceCount: Int, ex: Throwable) = Some {
if (sourceCount == 1) {
val o = Observable.now(1L).endWithError(ex).distinctUntilChangedByKey(x => x)
Sample(o,1,1,Zero,Zero)
} else {
val source = Observable.range(0, sourceCount)
.flatMap(i => Observable.fromIterable(Seq(i, i, i)))
val o = createObservableEndingInError(source, ex)
.map(Val.apply)
.distinctUntilChangedByKey(_.x)
.map(_.x)
Sample(o, count(sourceCount), sum(sourceCount), Zero, Zero)
}
}
def brokenUserCodeObservable(sourceCount: Int, ex: Throwable) = Some {
val o = Observable.range(0, sourceCount)
.flatMap(i => Observable.fromIterable(Seq(Val(i), Val(i), Val(i))))
.distinctUntilChangedByKey(i => if (i.x == sourceCount-1) throw ex else i.x)
.map(_.x)
Sample(o, count(sourceCount-1), sum(sourceCount-1), Zero, Zero)
}
def count(sourceCount: Int) = sourceCount
def sum(sourceCount: Int) = sourceCount * (sourceCount - 1) / 2
override def cancelableObservables() = {
val o = Observable.now(1L).delayOnNext(1.second).distinctUntilChangedByKey(x => x)
Seq(Sample(o, 0, 0, 0.seconds, 0.seconds))
}
}
|
Wogan/monix
|
monix-reactive/shared/src/test/scala/monix/reactive/internal/operators/DistinctUntilChangedByKeySuite.scala
|
Scala
|
apache-2.0
| 2,429
|
package com.arcusys.learn.scorm.manifest.sequencing.storage.impl
import com.arcusys.learn.storage.impl.{ EntityStorageExt, KeyedEntityStorageExt }
import com.arcusys.valamis.lesson.scorm.model.manifest._
import com.arcusys.valamis.lesson.scorm.storage.sequencing._
/**
* User: Yulia.Glushonkova
* Date: 09.04.13
*/
trait SequencingEntityStorage extends SequencingStorage with KeyedEntityStorageExt[Sequencing] with EntityStorageExt[Sequencing] {
val sequencingPermissionsStorage: SequencingPermissionsStorage
val rollupContributionStorage: RollupContributionStorage
val objectiveStorageStorage: ObjectiveStorage
val childrenSelectionStorage: ChildrenSelectionStorage
val sequencingTrackingStorage: SequencingTrackingStorage
val rollupRuleStorage: RollupRuleStorage
val exitConditionRuleStorageImpl: ConditionRuleStorage[ExitConditionRule]
val preConditionRuleStorageImpl: ConditionRuleStorage[PreConditionRule]
val postConditionRuleStorageImpl: ConditionRuleStorage[PostConditionRule]
def objectiveMapStorage: ObjectiveMapStorage
def ruleConditionStorage: RuleConditionStorage
override def renew() {
super.renew()
sequencingPermissionsStorage.renew()
rollupContributionStorage.renew()
objectiveStorageStorage.renew()
objectiveMapStorage.renew()
childrenSelectionStorage.renew()
sequencingTrackingStorage.renew()
exitConditionRuleStorageImpl.renew()
preConditionRuleStorageImpl.renew()
postConditionRuleStorageImpl.renew()
rollupRuleStorage.renew()
ruleConditionStorage.renew()
}
def create(packageID: Int, activityID: String, sequencing: Sequencing) {
val id = createAndGetID(sequencing, "packageID" -> packageID, "activityID" -> activityID)
sequencingPermissionsStorage.create(id, sequencing.permissions)
rollupContributionStorage.create(id, sequencing.rollupContribution)
if (sequencing.primaryObjective.isDefined) objectiveStorageStorage.create(id, sequencing.primaryObjective.get, isPrimary = true)
sequencing.nonPrimaryObjectives.foreach(objectiveStorageStorage.create(id, _, isPrimary = false))
childrenSelectionStorage.create(id, sequencing.childrenSelection)
if (sequencing.tracking.isDefined) sequencingTrackingStorage.create(id, sequencing.tracking.get)
sequencing.rollupRules.foreach(rollupRuleStorage.create(id, _))
// exit/pre/post rules
sequencing.exitConditionRules.foreach(exitConditionRuleStorageImpl.create(id, _))
sequencing.preConditionRules.foreach(preConditionRuleStorageImpl.create(id, _))
sequencing.postConditionRules.foreach(postConditionRuleStorageImpl.create(id, _))
}
def get(packageID: Int, activityID: String): Option[Sequencing] = {
getOne("packageID" -> packageID, "activityID" -> activityID)
}
def delete(packageID: Int, activityID: String) { delete("packageID" -> packageID, "activityID" -> activityID) }
/*
override def renew() {
super.renew()
sequencingPermissionsStorage.renew()
rollupContributionStorage.renew()
objectiveStorageStorage.renew()
(new ObjectiveMapStorageImpl).renew()
childrenSelectionStorage.renew()
sequencingTrackingStorage.renew()
exitConditionRuleStorageImpl.renew()
preConditionRuleStorageImpl.renew()
postConditionRuleStorageImpl.renew()
rollupRuleStorage.renew()
(new RuleConditionStorageImpl).renew()
}
*/
}
trait SequencingFieldsMapper {
def sharedId: Option[String]
def sharedSequencingIdReference: Option[String]
def id: Int
def onlyCurrentAttemptObjectiveProgressForChildren: Boolean
def onlyCurrentAttemptAttemptProgressForChildren: Boolean
def attemptLimit: Option[Int]
def durationLimitInMilliseconds: Option[Long]
def preventChildrenActivation: Boolean
def constrainChoice: Boolean
}
trait SequencingCreator {
val sequencingPermissionsStorage: SequencingPermissionsStorage
val rollupContributionStorage: RollupContributionStorage
val objectiveStorageStorage: ObjectiveStorage
val childrenSelectionStorage: ChildrenSelectionStorage
val sequencingTrackingStorage: SequencingTrackingStorage
val rollupRuleStorage: RollupRuleStorage
val exitConditionRuleStorageImpl: ConditionRuleStorage[ExitConditionRule]
val preConditionRuleStorageImpl: ConditionRuleStorage[PreConditionRule]
val postConditionRuleStorageImpl: ConditionRuleStorage[PostConditionRule]
def createSequencing(mapper: SequencingFieldsMapper) = {
import mapper._
new Sequencing(
sharedId,
sharedSequencingIdReference,
sequencingPermissionsStorage.get(id).get,
onlyCurrentAttemptObjectiveProgressForChildren,
onlyCurrentAttemptAttemptProgressForChildren,
attemptLimit,
durationLimitInMilliseconds,
rollupContributionStorage.get(id).get,
objectiveStorageStorage.getPrimary(id),
objectiveStorageStorage.getNonPrimary(id),
childrenSelectionStorage.get(id).get,
sequencingTrackingStorage.get(id),
preventChildrenActivation,
constrainChoice,
preConditionRuleStorageImpl.getRules(id),
postConditionRuleStorageImpl.getRules(id),
exitConditionRuleStorageImpl.getRules(id),
rollupRuleStorage.get(id)
)
}
}
|
ViLPy/Valamis
|
learn-persistence-api/src/main/scala/com/arcusys/learn/scorm/manifest/sequencing/storage/impl/SequencingEntityStorage.scala
|
Scala
|
lgpl-3.0
| 5,191
|
/*
* Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see http://www.gnu.org/licenses/agpl.html.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package relite
import r._
import r.data._
import r.data.internal._
import r.builtins.{ CallFactory, Primitives }
import r.nodes._
import r.nodes.truffle.{ BaseR, RNode }
import com.oracle.truffle.api.frame._;
import org.antlr.runtime._
import java.io._
import scala.collection.JavaConversions._
object Test2 {
def eval(e: ASTNode, frame: Frame) = e match {
case e: FunctionCall =>
println("unknown f: " + e.getName + " / " + e);
println("unknown f: " + e.getArgs.first.getValue) //foreach(_.getValue));
new RLanguage(e)
case _ => println("unknown: " + e); new RLanguage(e) //RInt.RIntFactory.getScalar(42)
}
def main(args: Array[String]): Unit = {
val cf = new CallFactory("foobar", Array("e"), Array("e")) {
def create(call: ASTNode, names: Array[RSymbol], exprs: Array[RNode]): RNode = {
check(call, names, exprs)
val expr = exprs(0)
val ast = expr.getAST()
val ast1: AnyRef = ast // apparently ASTNode member fields are reassigned -- don't make it look like one!
new BaseR(call) {
def execute(frame: Frame): AnyRef = {
val ast = ast1.asInstanceOf[ASTNode]
println("dyn " + ast1 + "/" + System.identityHashCode(ast1))
eval(ast, null)
}
}
}
}
Primitives.add(cf)
val res = RContext.eval(RContext.parseFile(
new ANTLRInputStream(new ByteArrayInputStream("5+5; foobar(Vector.rand(100))".getBytes))))
println(res.pretty)
}
}
|
relite/Relite
|
test-src/test2.scala
|
Scala
|
agpl-3.0
| 2,502
|
/*
* Copyright (c) 2017 Magomed Abdurakhmanov, Hypertino
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*
*/
package com.hypertino.facade.filter.parser
import com.hypertino.binders.value.{Bool, False, Text, Value}
import com.hypertino.parser.HParser
import com.hypertino.parser.ast.Identifier
import org.parboiled2._
import scala.util.{Failure, Success}
class IpParser(val input: ParserInput) extends Parser {
import CharPredicate.Digit
private def WhiteSpace = rule { zeroOrMore(HParser.WhiteSpaceChar) }
private def IpOctet = rule { (1 to 3).times(Digit) }
private def IpRule = rule { 4.times(IpOctet).separatedBy('.') }
private def IpAddress = rule { WhiteSpace ~ capture (IpRule) ~ WhiteSpace ~> (i ⇒ i) }
private def IpRangeByFromTo = rule { capture(IpRule) ~ WhiteSpace ~ '-' ~ WhiteSpace ~ capture(IpRule) ~> ((from, to) ⇒ (from, to)) }
private def IpRangeBySubnet = rule { (capture(IpRule) ~ WhiteSpace ~ '/' ~ WhiteSpace ~ capture((1 to 2).times(Digit))) ~> ((subnet, mask) ⇒ (subnet, mask)) }
private def IpInputLine = rule { IpAddress ~ EOI }
private def IpRangeInputLine = rule { (IpRangeByFromTo | IpRangeBySubnet) ~ EOI }
}
object IpParser {
val IP_MATCHES = "ip matches"
def binaryOperation: PartialFunction[(Value, Identifier, Value), Value] = {
case (Text(ip), Identifier(Seq(IP_MATCHES)), Text(range)) ⇒
rangeContainsIp(range, ip) match {
case Some(answer) ⇒
Bool(answer)
case None ⇒
False
}
}
private def rangeContainsIp(rangeExpr: String, ipExpr: String): Option[Boolean] = {
(parseIpRange(rangeExpr), parseIp(ipExpr)) match {
case (Some(range), Some(ip)) ⇒
Some(rangeContainsIp(range._1, range._2, ip))
case _ ⇒
None
}
}
private def parseIp(expr: String): Option[String] = {
IpParser(expr).IpInputLine.run() match {
case Success(ip) ⇒ Some(ip)
case Failure(_) ⇒ None
}
}
private def parseIpRange(expr: String): Option[(String, String)] = {
new IpParser(expr).IpRangeInputLine.run() match {
case Success(range) ⇒
Some(range)
case Failure(_) ⇒
None
}
}
private def apply(expr: String): IpParser = {
new IpParser(expr)
}
private def rangeContainsIp(rangeStart: String, rangeEnd: String, ip: String) = {
if (rangeEnd.length <= 2)
rangeBySubnetContainsIp(rangeStart, rangeEnd, ip)
else
rangeByAddrContainsIp(rangeStart, rangeEnd, ip)
}
private def rangeByAddrContainsIp(rangeStart: String, rangeEnd: String, ip: String) = {
val from = ipToLong(rangeStart)
val to = ipToLong(rangeEnd)
val addr = ipToLong(ip)
addr >= from && addr <= to
}
private def rangeBySubnetContainsIp(subnet: String, mask: String, ip: String) = {
val lowerBits = (1l << (32 - mask.toInt)) - 1
val from = ipToLong(subnet)
val to = from + lowerBits
val addr = ipToLong(ip)
addr >= from && addr <= to
}
private def ipToLong(ip: String): Long = {
var ipAddress: Long = 0
val segments = ip.split('.').reverse
for (i ← 3 to 0 by -1) {
ipAddress += segments(i).toLong << (i * 8)
}
ipAddress
}
}
|
hypertino/hyperfacade
|
src/main/scala/com/hypertino/facade/filter/parser/IpParser.scala
|
Scala
|
mpl-2.0
| 3,364
|
package controllers
import play.api.libs.json._
import play.api.mvc._
import lila.api.Context
import lila.app._
import lila.common.HTTPRequest
import views._
object Timeline extends LilaController {
def home = Auth { implicit ctx =>
import lila.timeline.Entry.entryWrites
val nb = getInt("nb").fold(100)(_ min 100)
me =>
negotiate(
html = {
if (HTTPRequest.isXhr(ctx.req))
Env.timeline.entryRepo.userEntries(me.id) map { html.timeline.entries(_) }
else {
val entries = Env.timeline.entryRepo.moreUserEntries(me.id, nb)
entries map { html.timeline.more(_) }
}
},
_ => {
val entries = Env.timeline.entryRepo.moreUserEntries(me.id, nb)
entries map { es => Ok(Json.obj("entries" -> es)) }
}
)
}
def unsub(channel: String) = Auth { implicit ctx =>
me =>
Env.timeline.unsubApi.set(channel, me.id, ~get("unsub") == "on")
}
}
|
clarkerubber/lila
|
app/controllers/Timeline.scala
|
Scala
|
agpl-3.0
| 984
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.sj.crud.rest.instance.destroyer
import com.bwsw.common.marathon._
import com.bwsw.sj.common.dal.repository.ConnectionRepository
import com.bwsw.sj.common.si.model.instance.Instance
import com.bwsw.sj.crud.rest.common.InstanceRepositoryMock
import com.bwsw.sj.crud.rest.instance._
import org.apache.http.client.methods.CloseableHttpResponse
import org.apache.http.{HttpStatus, StatusLine}
import org.mockito.Mockito._
import org.scalatest.mockito.MockitoSugar
import scaldi.{Injector, Module}
trait InstanceDestroyerMocks extends MockitoSugar {
private val instanceRepositoryMock = new InstanceRepositoryMock()
private val connectionRepository = mock[ConnectionRepository]
when(connectionRepository.getInstanceRepository).thenReturn(instanceRepositoryMock.repository)
private val module = new Module {
bind[ConnectionRepository] to connectionRepository
}
protected val injector: Injector = module.injector
val instanceName = "instance-name"
val frameworkId = "framework-id"
val instanceMock: Instance = mock[Instance]
when(instanceMock.name).thenReturn(instanceName)
when(instanceMock.frameworkId).thenReturn(frameworkId)
private def getClosableHttpResponseMock(status: Int): CloseableHttpResponse = {
val statusLineMock = mock[StatusLine]
when(statusLineMock.getStatusCode).thenReturn(status)
val responseMock = mock[CloseableHttpResponse]
when(responseMock.getStatusLine).thenReturn(statusLineMock)
responseMock
}
def instanceDestroyerMock(marathonManager: MarathonApi = mock[MarathonApi],
instanceManager: InstanceDomainRenewer = mock[InstanceDomainRenewer],
instanceMock: Instance = instanceMock): InstanceDestroyerMock = {
new InstanceDestroyerMock(marathonManager, instanceManager, instanceMock)(injector)
}
val frameworkName: String = InstanceAdditionalFieldCreator.getFrameworkName(instanceMock)
val okStatus = HttpStatus.SC_OK
val errorStatus = HttpStatus.SC_INTERNAL_SERVER_ERROR
val okFrameworkResponse: CloseableHttpResponse = getClosableHttpResponseMock(okStatus)
val notFoundFrameworkResponce: CloseableHttpResponse = getClosableHttpResponseMock(HttpStatus.SC_NOT_FOUND)
}
|
bwsw/sj-platform
|
core/sj-crud-rest/src/test/scala-2.12/com/bwsw/sj/crud/rest/instance/destroyer/InstanceDestroyerMocks.scala
|
Scala
|
apache-2.0
| 3,046
|
package com.felixmilea.vorbit.main
import akka.actor.Props
import com.felixmilea.vorbit.actors.ActorManager._
import com.felixmilea.vorbit.reddit.mining.RedditMiningManager
import com.felixmilea.vorbit.reddit.mining.RedditMiner
import com.felixmilea.vorbit.utils.AppUtils
import com.felixmilea.vorbit.utils.Loggable
object MinerTest extends App with Loggable {
val manager = AppUtils.actorSystem.actorOf(Props(new RedditMiningManager(AppUtils.config.miners.size)), "MiningManager")
// start miners based on config
// for (minerConfig <- AppUtils.config.miners) {
// try {
// val miner = new RedditMiner(minerConfig, manager)
// miner.start()
// } catch {
// case t: Throwable => {
// Error(s"Unexpected error occured while creating or running miner #$minerConfig: ${t.getMessage}")
// }
// }
// }
while (true) {
readLine()
manager ! PingChildren
manager ! PingStatus
}
}
|
felixmc/Felix-Milea-Ciobanu-Vorbit
|
code/com/felixmilea/vorbit/main/MinerTest.scala
|
Scala
|
mit
| 954
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package spark
import java.util.Random
import scala.collection.Map
import scala.collection.JavaConversions.mapAsScalaMap
import scala.collection.mutable.ArrayBuffer
import org.apache.hadoop.io.BytesWritable
import org.apache.hadoop.io.compress.CompressionCodec
import org.apache.hadoop.io.NullWritable
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapred.TextOutputFormat
import it.unimi.dsi.fastutil.objects.{Object2LongOpenHashMap => OLMap}
import spark.broadcast.Broadcast
import spark.Partitioner._
import spark.partial.BoundedDouble
import spark.partial.CountEvaluator
import spark.partial.GroupedCountEvaluator
import spark.partial.PartialResult
import spark.rdd.CoalescedRDD
import spark.rdd.CartesianRDD
import spark.rdd.FilteredRDD
import spark.rdd.FlatMappedRDD
import spark.rdd.GlommedRDD
import spark.rdd.MappedRDD
import spark.rdd.MapPartitionsRDD
import spark.rdd.MapPartitionsWithIndexRDD
import spark.rdd.PipedRDD
import spark.rdd.SampledRDD
import spark.rdd.ShuffledRDD
import spark.rdd.UnionRDD
import spark.rdd.ZippedRDD
import spark.rdd.ZippedPartitionsRDD2
import spark.rdd.ZippedPartitionsRDD3
import spark.rdd.ZippedPartitionsRDD4
import spark.storage.StorageLevel
import spark.util.BoundedPriorityQueue
import SparkContext._
/**
* A Resilient Distributed Dataset (RDD), the basic abstraction in Spark. Represents an immutable,
* partitioned collection of elements that can be operated on in parallel. This class contains the
* basic operations available on all RDDs, such as `map`, `filter`, and `persist`. In addition,
* [[spark.PairRDDFunctions]] contains operations available only on RDDs of key-value pairs, such
* as `groupByKey` and `join`; [[spark.DoubleRDDFunctions]] contains operations available only on
* RDDs of Doubles; and [[spark.SequenceFileRDDFunctions]] contains operations available on RDDs
* that can be saved as SequenceFiles. These operations are automatically available on any RDD of
* the right type (e.g. RDD[(Int, Int)] through implicit conversions when you
* `import spark.SparkContext._`.
*
* Internally, each RDD is characterized by five main properties:
*
* - A list of partitions
* - A function for computing each split
* - A list of dependencies on other RDDs
* - Optionally, a Partitioner for key-value RDDs (e.g. to say that the RDD is hash-partitioned)
* - Optionally, a list of preferred locations to compute each split on (e.g. block locations for
* an HDFS file)
*
* All of the scheduling and execution in Spark is done based on these methods, allowing each RDD
* to implement its own way of computing itself. Indeed, users can implement custom RDDs (e.g. for
* reading data from a new storage system) by overriding these functions. Please refer to the
* [[http://www.cs.berkeley.edu/~matei/papers/2012/nsdi_spark.pdf Spark paper]] for more details
* on RDD internals.
*/
abstract class RDD[T: ClassManifest](
@transient private var sc: SparkContext,
@transient private var deps: Seq[Dependency[_]]
) extends Serializable with Logging {
/** Construct an RDD with just a one-to-one dependency on one parent */
def this(@transient oneParent: RDD[_]) =
this(oneParent.context , List(new OneToOneDependency(oneParent)))
// =======================================================================
// Methods that should be implemented by subclasses of RDD
// =======================================================================
/** Implemented by subclasses to compute a given partition. */
def compute(split: Partition, context: TaskContext): Iterator[T]
/**
* Implemented by subclasses to return the set of partitions in this RDD. This method will only
* be called once, so it is safe to implement a time-consuming computation in it.
*/
protected def getPartitions: Array[Partition]
/**
* Implemented by subclasses to return how this RDD depends on parent RDDs. This method will only
* be called once, so it is safe to implement a time-consuming computation in it.
*/
protected def getDependencies: Seq[Dependency[_]] = deps
/** Optionally overridden by subclasses to specify placement preferences. */
protected def getPreferredLocations(split: Partition): Seq[String] = Nil
/** Optionally overridden by subclasses to specify how they are partitioned. */
val partitioner: Option[Partitioner] = None
// =======================================================================
// Methods and fields available on all RDDs
// =======================================================================
/** The SparkContext that created this RDD. */
def sparkContext: SparkContext = sc
/** A unique ID for this RDD (within its SparkContext). */
val id: Int = sc.newRddId()
/** A friendly name for this RDD */
var name: String = null
/** Assign a name to this RDD */
def setName(_name: String) = {
name = _name
this
}
/** User-defined generator of this RDD*/
var generator = Utils.getCallSiteInfo.firstUserClass
/** Reset generator*/
def setGenerator(_generator: String) = {
generator = _generator
}
/**
* Set this RDD's storage level to persist its values across operations after the first time
* it is computed. This can only be used to assign a new storage level if the RDD does not
* have a storage level set yet..
*/
def persist(newLevel: StorageLevel): RDD[T] = {
// TODO: Handle changes of StorageLevel
if (storageLevel != StorageLevel.NONE && newLevel != storageLevel) {
throw new UnsupportedOperationException(
"Cannot change storage level of an RDD after it was already assigned a level")
}
storageLevel = newLevel
// Register the RDD with the SparkContext
sc.persistentRdds(id) = this
this
}
/** Persist this RDD with the default storage level (`MEMORY_ONLY`). */
def persist(): RDD[T] = persist(StorageLevel.MEMORY_ONLY)
/** Persist this RDD with the default storage level (`MEMORY_ONLY`). */
def cache(): RDD[T] = persist()
/**
* Mark the RDD as non-persistent, and remove all blocks for it from memory and disk.
*
* @param blocking Whether to block until all blocks are deleted.
* @return This RDD.
*/
def unpersist(blocking: Boolean = true): RDD[T] = {
logInfo("Removing RDD " + id + " from persistence list")
sc.env.blockManager.master.removeRdd(id, blocking)
sc.persistentRdds.remove(id)
storageLevel = StorageLevel.NONE
this
}
/** Get the RDD's current storage level, or StorageLevel.NONE if none is set. */
def getStorageLevel = storageLevel
// Our dependencies and partitions will be gotten by calling subclass's methods below, and will
// be overwritten when we're checkpointed
private var dependencies_ : Seq[Dependency[_]] = null
@transient private var partitions_ : Array[Partition] = null
/** An Option holding our checkpoint RDD, if we are checkpointed */
private def checkpointRDD: Option[RDD[T]] = checkpointData.flatMap(_.checkpointRDD)
/**
* Get the list of dependencies of this RDD, taking into account whether the
* RDD is checkpointed or not.
*/
final def dependencies: Seq[Dependency[_]] = {
checkpointRDD.map(r => List(new OneToOneDependency(r))).getOrElse {
if (dependencies_ == null) {
dependencies_ = getDependencies
}
dependencies_
}
}
/**
* Get the array of partitions of this RDD, taking into account whether the
* RDD is checkpointed or not.
*/
final def partitions: Array[Partition] = {
checkpointRDD.map(_.partitions).getOrElse {
if (partitions_ == null) {
partitions_ = getPartitions
}
partitions_
}
}
/**
* Get the preferred location of a split, taking into account whether the
* RDD is checkpointed or not.
*/
final def preferredLocations(split: Partition): Seq[String] = {
checkpointRDD.map(_.getPreferredLocations(split)).getOrElse {
getPreferredLocations(split)
}
}
/**
* Internal method to this RDD; will read from cache if applicable, or otherwise compute it.
* This should ''not'' be called by users directly, but is available for implementors of custom
* subclasses of RDD.
*/
final def iterator(split: Partition, context: TaskContext): Iterator[T] = {
if (storageLevel != StorageLevel.NONE) {
SparkEnv.get.cacheManager.getOrCompute(this, split, context, storageLevel)
} else {
computeOrReadCheckpoint(split, context)
}
}
/**
* Compute an RDD partition or read it from a checkpoint if the RDD is checkpointing.
*/
private[spark] def computeOrReadCheckpoint(split: Partition, context: TaskContext): Iterator[T] = {
if (isCheckpointed) {
firstParent[T].iterator(split, context)
} else {
compute(split, context)
}
}
// Transformations (return a new RDD)
/**
* Return a new RDD by applying a function to all elements of this RDD.
*/
def map[U: ClassManifest](f: T => U): RDD[U] = new MappedRDD(this, sc.clean(f))
/**
* Return a new RDD by first applying a function to all elements of this
* RDD, and then flattening the results.
*/
def flatMap[U: ClassManifest](f: T => TraversableOnce[U]): RDD[U] =
new FlatMappedRDD(this, sc.clean(f))
/**
* Return a new RDD containing only the elements that satisfy a predicate.
*/
def filter(f: T => Boolean): RDD[T] = new FilteredRDD(this, sc.clean(f))
/**
* Return a new RDD containing the distinct elements in this RDD.
*/
def distinct(numPartitions: Int): RDD[T] =
map(x => (x, null)).reduceByKey((x, y) => x, numPartitions).map(_._1)
def distinct(): RDD[T] = distinct(partitions.size)
/**
* Return a new RDD that is reduced into `numPartitions` partitions.
*/
def coalesce(numPartitions: Int, shuffle: Boolean = false): RDD[T] = {
if (shuffle) {
// include a shuffle step so that our upstream tasks are still distributed
new CoalescedRDD(new ShuffledRDD(map(x => (x, null)), new HashPartitioner(numPartitions)), numPartitions).keys
} else {
new CoalescedRDD(this, numPartitions)
}
}
/**
* Return a sampled subset of this RDD.
*/
def sample(withReplacement: Boolean, fraction: Double, seed: Int): RDD[T] =
new SampledRDD(this, withReplacement, fraction, seed)
def takeSample(withReplacement: Boolean, num: Int, seed: Int): Array[T] = {
var fraction = 0.0
var total = 0
var multiplier = 3.0
var initialCount = this.count()
var maxSelected = 0
if (num < 0) {
throw new IllegalArgumentException("Negative number of elements requested")
}
if (initialCount > Integer.MAX_VALUE - 1) {
maxSelected = Integer.MAX_VALUE - 1
} else {
maxSelected = initialCount.toInt
}
if (num > initialCount && !withReplacement) {
total = maxSelected
fraction = multiplier * (maxSelected + 1) / initialCount
} else {
fraction = multiplier * (num + 1) / initialCount
total = num
}
val rand = new Random(seed)
var samples = this.sample(withReplacement, fraction, rand.nextInt()).collect()
// If the first sample didn't turn out large enough, keep trying to take samples;
// this shouldn't happen often because we use a big multiplier for thei initial size
while (samples.length < total) {
samples = this.sample(withReplacement, fraction, rand.nextInt()).collect()
}
Utils.randomizeInPlace(samples, rand).take(total)
}
/**
* Return the union of this RDD and another one. Any identical elements will appear multiple
* times (use `.distinct()` to eliminate them).
*/
def union(other: RDD[T]): RDD[T] = new UnionRDD(sc, Array(this, other))
/**
* Return the union of this RDD and another one. Any identical elements will appear multiple
* times (use `.distinct()` to eliminate them).
*/
def ++(other: RDD[T]): RDD[T] = this.union(other)
/**
* Return an RDD created by coalescing all elements within each partition into an array.
*/
def glom(): RDD[Array[T]] = new GlommedRDD(this)
/**
* Return the Cartesian product of this RDD and another one, that is, the RDD of all pairs of
* elements (a, b) where a is in `this` and b is in `other`.
*/
def cartesian[U: ClassManifest](other: RDD[U]): RDD[(T, U)] = new CartesianRDD(sc, this, other)
/**
* Return an RDD of grouped items.
*/
def groupBy[K: ClassManifest](f: T => K): RDD[(K, Seq[T])] =
groupBy[K](f, defaultPartitioner(this))
/**
* Return an RDD of grouped elements. Each group consists of a key and a sequence of elements
* mapping to that key.
*/
def groupBy[K: ClassManifest](f: T => K, numPartitions: Int): RDD[(K, Seq[T])] =
groupBy(f, new HashPartitioner(numPartitions))
/**
* Return an RDD of grouped items.
*/
def groupBy[K: ClassManifest](f: T => K, p: Partitioner): RDD[(K, Seq[T])] = {
val cleanF = sc.clean(f)
this.map(t => (cleanF(t), t)).groupByKey(p)
}
/**
* Return an RDD created by piping elements to a forked external process.
*/
def pipe(command: String): RDD[String] = new PipedRDD(this, command)
/**
* Return an RDD created by piping elements to a forked external process.
*/
def pipe(command: String, env: Map[String, String]): RDD[String] =
new PipedRDD(this, command, env)
/**
* Return an RDD created by piping elements to a forked external process.
* The print behavior can be customized by providing two functions.
*
* @param command command to run in forked process.
* @param env environment variables to set.
* @param printPipeContext Before piping elements, this function is called as an oppotunity
* to pipe context data. Print line function (like out.println) will be
* passed as printPipeContext's parameter.
* @param printRDDElement Use this function to customize how to pipe elements. This function
* will be called with each RDD element as the 1st parameter, and the
* print line function (like out.println()) as the 2nd parameter.
* An example of pipe the RDD data of groupBy() in a streaming way,
* instead of constructing a huge String to concat all the elements:
* def printRDDElement(record:(String, Seq[String]), f:String=>Unit) =
* for (e <- record._2){f(e)}
* @return the result RDD
*/
def pipe(
command: Seq[String],
env: Map[String, String] = Map(),
printPipeContext: (String => Unit) => Unit = null,
printRDDElement: (T, String => Unit) => Unit = null): RDD[String] =
new PipedRDD(this, command, env,
if (printPipeContext ne null) sc.clean(printPipeContext) else null,
if (printRDDElement ne null) sc.clean(printRDDElement) else null)
/**
* Return a new RDD by applying a function to each partition of this RDD.
*/
def mapPartitions[U: ClassManifest](f: Iterator[T] => Iterator[U],
preservesPartitioning: Boolean = false): RDD[U] =
new MapPartitionsRDD(this, sc.clean(f), preservesPartitioning)
/**
* Return a new RDD by applying a function to each partition of this RDD, while tracking the index
* of the original partition.
*/
def mapPartitionsWithIndex[U: ClassManifest](
f: (Int, Iterator[T]) => Iterator[U],
preservesPartitioning: Boolean = false): RDD[U] =
new MapPartitionsWithIndexRDD(this, sc.clean(f), preservesPartitioning)
/**
* Return a new RDD by applying a function to each partition of this RDD, while tracking the index
* of the original partition.
*/
@deprecated("use mapPartitionsWithIndex", "0.7.0")
def mapPartitionsWithSplit[U: ClassManifest](
f: (Int, Iterator[T]) => Iterator[U],
preservesPartitioning: Boolean = false): RDD[U] =
new MapPartitionsWithIndexRDD(this, sc.clean(f), preservesPartitioning)
/**
* Maps f over this RDD, where f takes an additional parameter of type A. This
* additional parameter is produced by constructA, which is called in each
* partition with the index of that partition.
*/
def mapWith[A: ClassManifest, U: ClassManifest](constructA: Int => A, preservesPartitioning: Boolean = false)
(f:(T, A) => U): RDD[U] = {
def iterF(index: Int, iter: Iterator[T]): Iterator[U] = {
val a = constructA(index)
iter.map(t => f(t, a))
}
new MapPartitionsWithIndexRDD(this, sc.clean(iterF _), preservesPartitioning)
}
/**
* FlatMaps f over this RDD, where f takes an additional parameter of type A. This
* additional parameter is produced by constructA, which is called in each
* partition with the index of that partition.
*/
def flatMapWith[A: ClassManifest, U: ClassManifest](constructA: Int => A, preservesPartitioning: Boolean = false)
(f:(T, A) => Seq[U]): RDD[U] = {
def iterF(index: Int, iter: Iterator[T]): Iterator[U] = {
val a = constructA(index)
iter.flatMap(t => f(t, a))
}
new MapPartitionsWithIndexRDD(this, sc.clean(iterF _), preservesPartitioning)
}
/**
* Applies f to each element of this RDD, where f takes an additional parameter of type A.
* This additional parameter is produced by constructA, which is called in each
* partition with the index of that partition.
*/
def foreachWith[A: ClassManifest](constructA: Int => A)
(f:(T, A) => Unit) {
def iterF(index: Int, iter: Iterator[T]): Iterator[T] = {
val a = constructA(index)
iter.map(t => {f(t, a); t})
}
(new MapPartitionsWithIndexRDD(this, sc.clean(iterF _), true)).foreach(_ => {})
}
/**
* Filters this RDD with p, where p takes an additional parameter of type A. This
* additional parameter is produced by constructA, which is called in each
* partition with the index of that partition.
*/
def filterWith[A: ClassManifest](constructA: Int => A)
(p:(T, A) => Boolean): RDD[T] = {
def iterF(index: Int, iter: Iterator[T]): Iterator[T] = {
val a = constructA(index)
iter.filter(t => p(t, a))
}
new MapPartitionsWithIndexRDD(this, sc.clean(iterF _), true)
}
/**
* Zips this RDD with another one, returning key-value pairs with the first element in each RDD,
* second element in each RDD, etc. Assumes that the two RDDs have the *same number of
* partitions* and the *same number of elements in each partition* (e.g. one was made through
* a map on the other).
*/
def zip[U: ClassManifest](other: RDD[U]): RDD[(T, U)] = new ZippedRDD(sc, this, other)
/**
* Zip this RDD's partitions with one (or more) RDD(s) and return a new RDD by
* applying a function to the zipped partitions. Assumes that all the RDDs have the
* *same number of partitions*, but does *not* require them to have the same number
* of elements in each partition.
*/
def zipPartitions[B: ClassManifest, V: ClassManifest](
f: (Iterator[T], Iterator[B]) => Iterator[V],
rdd2: RDD[B]): RDD[V] =
new ZippedPartitionsRDD2(sc, sc.clean(f), this, rdd2)
def zipPartitions[B: ClassManifest, C: ClassManifest, V: ClassManifest](
f: (Iterator[T], Iterator[B], Iterator[C]) => Iterator[V],
rdd2: RDD[B],
rdd3: RDD[C]): RDD[V] =
new ZippedPartitionsRDD3(sc, sc.clean(f), this, rdd2, rdd3)
def zipPartitions[B: ClassManifest, C: ClassManifest, D: ClassManifest, V: ClassManifest](
f: (Iterator[T], Iterator[B], Iterator[C], Iterator[D]) => Iterator[V],
rdd2: RDD[B],
rdd3: RDD[C],
rdd4: RDD[D]): RDD[V] =
new ZippedPartitionsRDD4(sc, sc.clean(f), this, rdd2, rdd3, rdd4)
// Actions (launch a job to return a value to the user program)
/**
* Applies a function f to all elements of this RDD.
*/
def foreach(f: T => Unit) {
val cleanF = sc.clean(f)
sc.runJob(this, (iter: Iterator[T]) => iter.foreach(cleanF))
}
/**
* Applies a function f to each partition of this RDD.
*/
def foreachPartition(f: Iterator[T] => Unit) {
val cleanF = sc.clean(f)
sc.runJob(this, (iter: Iterator[T]) => cleanF(iter))
}
/**
* Return an array that contains all of the elements in this RDD.
*/
def collect(): Array[T] = {
val results = sc.runJob(this, (iter: Iterator[T]) => iter.toArray)
Array.concat(results: _*)
}
/**
* Return an array that contains all of the elements in this RDD.
*/
def toArray(): Array[T] = collect()
/**
* Return an RDD that contains all matching values by applying `f`.
*/
def collect[U: ClassManifest](f: PartialFunction[T, U]): RDD[U] = {
filter(f.isDefinedAt).map(f)
}
/**
* Return an RDD with the elements from `this` that are not in `other`.
*
* Uses `this` partitioner/partition size, because even if `other` is huge, the resulting
* RDD will be <= us.
*/
def subtract(other: RDD[T]): RDD[T] =
subtract(other, partitioner.getOrElse(new HashPartitioner(partitions.size)))
/**
* Return an RDD with the elements from `this` that are not in `other`.
*/
def subtract(other: RDD[T], numPartitions: Int): RDD[T] =
subtract(other, new HashPartitioner(numPartitions))
/**
* Return an RDD with the elements from `this` that are not in `other`.
*/
def subtract(other: RDD[T], p: Partitioner): RDD[T] = {
if (partitioner == Some(p)) {
// Our partitioner knows how to handle T (which, since we have a partitioner, is
// really (K, V)) so make a new Partitioner that will de-tuple our fake tuples
val p2 = new Partitioner() {
override def numPartitions = p.numPartitions
override def getPartition(k: Any) = p.getPartition(k.asInstanceOf[(Any, _)]._1)
}
// Unfortunately, since we're making a new p2, we'll get ShuffleDependencies
// anyway, and when calling .keys, will not have a partitioner set, even though
// the SubtractedRDD will, thanks to p2's de-tupled partitioning, already be
// partitioned by the right/real keys (e.g. p).
this.map(x => (x, null)).subtractByKey(other.map((_, null)), p2).keys
} else {
this.map(x => (x, null)).subtractByKey(other.map((_, null)), p).keys
}
}
/**
* Reduces the elements of this RDD using the specified commutative and associative binary operator.
*/
def reduce(f: (T, T) => T): T = {
val cleanF = sc.clean(f)
val reducePartition: Iterator[T] => Option[T] = iter => {
if (iter.hasNext) {
Some(iter.reduceLeft(cleanF))
} else {
None
}
}
var jobResult: Option[T] = None
val mergeResult = (index: Int, taskResult: Option[T]) => {
if (taskResult != None) {
jobResult = jobResult match {
case Some(value) => Some(f(value, taskResult.get))
case None => taskResult
}
}
}
sc.runJob(this, reducePartition, mergeResult)
// Get the final result out of our Option, or throw an exception if the RDD was empty
jobResult.getOrElse(throw new UnsupportedOperationException("empty collection"))
}
/**
* Aggregate the elements of each partition, and then the results for all the partitions, using a
* given associative function and a neutral "zero value". The function op(t1, t2) is allowed to
* modify t1 and return it as its result value to avoid object allocation; however, it should not
* modify t2.
*/
def fold(zeroValue: T)(op: (T, T) => T): T = {
// Clone the zero value since we will also be serializing it as part of tasks
var jobResult = Utils.clone(zeroValue, sc.env.closureSerializer.newInstance())
val cleanOp = sc.clean(op)
val foldPartition = (iter: Iterator[T]) => iter.fold(zeroValue)(cleanOp)
val mergeResult = (index: Int, taskResult: T) => jobResult = op(jobResult, taskResult)
sc.runJob(this, foldPartition, mergeResult)
jobResult
}
/**
* Aggregate the elements of each partition, and then the results for all the partitions, using
* given combine functions and a neutral "zero value". This function can return a different result
* type, U, than the type of this RDD, T. Thus, we need one operation for merging a T into an U
* and one operation for merging two U's, as in scala.TraversableOnce. Both of these functions are
* allowed to modify and return their first argument instead of creating a new U to avoid memory
* allocation.
*/
def aggregate[U: ClassManifest](zeroValue: U)(seqOp: (U, T) => U, combOp: (U, U) => U): U = {
// Clone the zero value since we will also be serializing it as part of tasks
var jobResult = Utils.clone(zeroValue, sc.env.closureSerializer.newInstance())
val cleanSeqOp = sc.clean(seqOp)
val cleanCombOp = sc.clean(combOp)
val aggregatePartition = (it: Iterator[T]) => it.aggregate(zeroValue)(cleanSeqOp, cleanCombOp)
val mergeResult = (index: Int, taskResult: U) => jobResult = combOp(jobResult, taskResult)
sc.runJob(this, aggregatePartition, mergeResult)
jobResult
}
/**
* Return the number of elements in the RDD.
*/
def count(): Long = {
sc.runJob(this, (iter: Iterator[T]) => {
var result = 0L
while (iter.hasNext) {
result += 1L
iter.next()
}
result
}).sum
}
/**
* (Experimental) Approximate version of count() that returns a potentially incomplete result
* within a timeout, even if not all tasks have finished.
*/
def countApprox(timeout: Long, confidence: Double = 0.95): PartialResult[BoundedDouble] = {
val countElements: (TaskContext, Iterator[T]) => Long = { (ctx, iter) =>
var result = 0L
while (iter.hasNext) {
result += 1L
iter.next()
}
result
}
val evaluator = new CountEvaluator(partitions.size, confidence)
sc.runApproximateJob(this, countElements, evaluator, timeout)
}
/**
* Return the count of each unique value in this RDD as a map of (value, count) pairs. The final
* combine step happens locally on the master, equivalent to running a single reduce task.
*/
def countByValue(): Map[T, Long] = {
if (elementClassManifest.erasure.isArray) {
throw new SparkException("countByValue() does not support arrays")
}
// TODO: This should perhaps be distributed by default.
def countPartition(iter: Iterator[T]): Iterator[OLMap[T]] = {
val map = new OLMap[T]
while (iter.hasNext) {
val v = iter.next()
map.put(v, map.getLong(v) + 1L)
}
Iterator(map)
}
def mergeMaps(m1: OLMap[T], m2: OLMap[T]): OLMap[T] = {
val iter = m2.object2LongEntrySet.fastIterator()
while (iter.hasNext) {
val entry = iter.next()
m1.put(entry.getKey, m1.getLong(entry.getKey) + entry.getLongValue)
}
return m1
}
val myResult = mapPartitions(countPartition).reduce(mergeMaps)
myResult.asInstanceOf[java.util.Map[T, Long]] // Will be wrapped as a Scala mutable Map
}
/**
* (Experimental) Approximate version of countByValue().
*/
def countByValueApprox(
timeout: Long,
confidence: Double = 0.95
): PartialResult[Map[T, BoundedDouble]] = {
if (elementClassManifest.erasure.isArray) {
throw new SparkException("countByValueApprox() does not support arrays")
}
val countPartition: (TaskContext, Iterator[T]) => OLMap[T] = { (ctx, iter) =>
val map = new OLMap[T]
while (iter.hasNext) {
val v = iter.next()
map.put(v, map.getLong(v) + 1L)
}
map
}
val evaluator = new GroupedCountEvaluator[T](partitions.size, confidence)
sc.runApproximateJob(this, countPartition, evaluator, timeout)
}
/**
* Take the first num elements of the RDD. This currently scans the partitions *one by one*, so
* it will be slow if a lot of partitions are required. In that case, use collect() to get the
* whole RDD instead.
*/
def take(num: Int): Array[T] = {
if (num == 0) {
return new Array[T](0)
}
val buf = new ArrayBuffer[T]
var p = 0
while (buf.size < num && p < partitions.size) {
val left = num - buf.size
val res = sc.runJob(this, (it: Iterator[T]) => it.take(left).toArray, Array(p), true)
buf ++= res(0)
if (buf.size == num)
return buf.toArray
p += 1
}
return buf.toArray
}
/**
* Return the first element in this RDD.
*/
def first(): T = take(1) match {
case Array(t) => t
case _ => throw new UnsupportedOperationException("empty collection")
}
/**
* Returns the top K elements from this RDD as defined by
* the specified implicit Ordering[T].
* @param num the number of top elements to return
* @param ord the implicit ordering for T
* @return an array of top elements
*/
def top(num: Int)(implicit ord: Ordering[T]): Array[T] = {
mapPartitions { items =>
val queue = new BoundedPriorityQueue[T](num)
queue ++= items
Iterator.single(queue)
}.reduce { (queue1, queue2) =>
queue1 ++= queue2
queue1
}.toArray.sorted(ord.reverse)
}
/**
* Returns the first K elements from this RDD as defined by
* the specified implicit Ordering[T] and maintains the
* ordering.
* @param num the number of top elements to return
* @param ord the implicit ordering for T
* @return an array of top elements
*/
def takeOrdered(num: Int)(implicit ord: Ordering[T]): Array[T] = top(num)(ord.reverse)
/**
* Save this RDD as a text file, using string representations of elements.
*/
def saveAsTextFile(path: String) {
this.map(x => (NullWritable.get(), new Text(x.toString)))
.saveAsHadoopFile[TextOutputFormat[NullWritable, Text]](path)
}
/**
* Save this RDD as a compressed text file, using string representations of elements.
*/
def saveAsTextFile(path: String, codec: Class[_ <: CompressionCodec]) {
this.map(x => (NullWritable.get(), new Text(x.toString)))
.saveAsHadoopFile[TextOutputFormat[NullWritable, Text]](path, codec)
}
/**
* Save this RDD as a SequenceFile of serialized objects.
*/
def saveAsObjectFile(path: String) {
this.mapPartitions(iter => iter.grouped(10).map(_.toArray))
.map(x => (NullWritable.get(), new BytesWritable(Utils.serialize(x))))
.saveAsSequenceFile(path)
}
/**
* Creates tuples of the elements in this RDD by applying `f`.
*/
def keyBy[K](f: T => K): RDD[(K, T)] = {
map(x => (f(x), x))
}
/** A private method for tests, to look at the contents of each partition */
private[spark] def collectPartitions(): Array[Array[T]] = {
sc.runJob(this, (iter: Iterator[T]) => iter.toArray)
}
/**
* Mark this RDD for checkpointing. It will be saved to a file inside the checkpoint
* directory set with SparkContext.setCheckpointDir() and all references to its parent
* RDDs will be removed. This function must be called before any job has been
* executed on this RDD. It is strongly recommended that this RDD is persisted in
* memory, otherwise saving it on a file will require recomputation.
*/
def checkpoint() {
if (context.checkpointDir.isEmpty) {
throw new Exception("Checkpoint directory has not been set in the SparkContext")
} else if (checkpointData.isEmpty) {
checkpointData = Some(new RDDCheckpointData(this))
checkpointData.get.markForCheckpoint()
}
}
/**
* Return whether this RDD has been checkpointed or not
*/
def isCheckpointed: Boolean = {
checkpointData.map(_.isCheckpointed).getOrElse(false)
}
/**
* Gets the name of the file to which this RDD was checkpointed
*/
def getCheckpointFile: Option[String] = {
checkpointData.flatMap(_.getCheckpointFile)
}
// =======================================================================
// Other internal methods and fields
// =======================================================================
private var storageLevel: StorageLevel = StorageLevel.NONE
/** Record user function generating this RDD. */
private[spark] val origin = Utils.formatSparkCallSite
private[spark] def elementClassManifest: ClassManifest[T] = classManifest[T]
private[spark] var checkpointData: Option[RDDCheckpointData[T]] = None
/** Returns the first parent RDD */
protected[spark] def firstParent[U: ClassManifest] = {
dependencies.head.rdd.asInstanceOf[RDD[U]]
}
/** The [[spark.SparkContext]] that this RDD was created on. */
def context = sc
// Avoid handling doCheckpoint multiple times to prevent excessive recursion
private var doCheckpointCalled = false
/**
* Performs the checkpointing of this RDD by saving this. It is called by the DAGScheduler
* after a job using this RDD has completed (therefore the RDD has been materialized and
* potentially stored in memory). doCheckpoint() is called recursively on the parent RDDs.
*/
private[spark] def doCheckpoint() {
if (!doCheckpointCalled) {
doCheckpointCalled = true
if (checkpointData.isDefined) {
checkpointData.get.doCheckpoint()
} else {
dependencies.foreach(_.rdd.doCheckpoint())
}
}
}
/**
* Changes the dependencies of this RDD from its original parents to a new RDD (`newRDD`)
* created from the checkpoint file, and forget its old dependencies and partitions.
*/
private[spark] def markCheckpointed(checkpointRDD: RDD[_]) {
clearDependencies()
partitions_ = null
deps = null // Forget the constructor argument for dependencies too
}
/**
* Clears the dependencies of this RDD. This method must ensure that all references
* to the original parent RDDs is removed to enable the parent RDDs to be garbage
* collected. Subclasses of RDD may override this method for implementing their own cleaning
* logic. See [[spark.rdd.UnionRDD]] for an example.
*/
protected def clearDependencies() {
dependencies_ = null
}
/** A description of this RDD and its recursive dependencies for debugging. */
def toDebugString: String = {
def debugString(rdd: RDD[_], prefix: String = ""): Seq[String] = {
Seq(prefix + rdd + " (" + rdd.partitions.size + " partitions)") ++
rdd.dependencies.flatMap(d => debugString(d.rdd, prefix + " "))
}
debugString(this).mkString("\n")
}
override def toString: String = "%s%s[%d] at %s".format(
Option(name).map(_ + " ").getOrElse(""),
getClass.getSimpleName,
id,
origin)
}
|
wgpshashank/spark
|
core/src/main/scala/spark/RDD.scala
|
Scala
|
apache-2.0
| 35,378
|
package Foos
object Outer {
class X
object x
}
|
lampepfl/dotty
|
tests/pos-special/fatal-warnings/i2673.scala
|
Scala
|
apache-2.0
| 52
|
/**
* Copyright (C) 2009-2014 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.zeromq
import org.zeromq.ZMQ.{ Socket, Poller }
import org.zeromq.{ ZMQ ⇒ JZMQ }
import akka.actor._
import scala.collection.immutable
import scala.annotation.tailrec
import scala.concurrent.{ Promise, Future }
import scala.concurrent.duration.Duration
import scala.collection.mutable.ListBuffer
import scala.util.control.NonFatal
import akka.event.Logging
import java.util.concurrent.TimeUnit
import akka.util.ByteString
private[zeromq] object ConcurrentSocketActor {
private sealed trait PollMsg
private case object Poll extends PollMsg
private case object PollCareful extends PollMsg
private case object Flush
private class NoSocketHandleException() extends Exception("Couldn't create a zeromq socket.")
private val DefaultContext = Context()
}
private[zeromq] class ConcurrentSocketActor(params: immutable.Seq[SocketOption]) extends Actor {
import ConcurrentSocketActor._
private val zmqContext = params collectFirst { case c: Context ⇒ c } getOrElse DefaultContext
private var deserializer = params collectFirst { case d: Deserializer ⇒ d } getOrElse new ZMQMessageDeserializer
private val socketType = {
import SocketType.{ ZMQSocketType ⇒ ST }
params.collectFirst { case t: ST ⇒ t }.getOrElse(throw new IllegalArgumentException("A socket type is required"))
}
private val socket: Socket = zmqContext.socket(socketType)
private val poller: Poller = zmqContext.poller
private val pendingSends = new ListBuffer[immutable.Seq[ByteString]]
def receive = {
case m: PollMsg ⇒ doPoll(m)
case ZMQMessage(frames) ⇒ handleRequest(Send(frames))
case r: Request ⇒ handleRequest(r)
case Flush ⇒ flush()
case Terminated(_) ⇒ context stop self
}
private def handleRequest(msg: Request): Unit = msg match {
case Send(frames) ⇒
if (frames.nonEmpty) {
val flushNow = pendingSends.isEmpty
pendingSends.append(frames)
if (flushNow) flush()
}
case opt: SocketOption ⇒ handleSocketOption(opt)
case q: SocketOptionQuery ⇒ handleSocketOptionQuery(q)
}
private def handleConnectOption(msg: SocketConnectOption): Unit = msg match {
case Connect(endpoint) ⇒ { socket.connect(endpoint); notifyListener(Connecting) }
case Bind(endpoint) ⇒ socket.bind(endpoint)
}
private def handlePubSubOption(msg: PubSubOption): Unit = msg match {
case Subscribe(topic) ⇒ socket.subscribe(topic.toArray)
case Unsubscribe(topic) ⇒ socket.unsubscribe(topic.toArray)
}
private def handleSocketOption(msg: SocketOption): Unit = msg match {
case x: SocketMeta ⇒ throw new IllegalStateException("SocketMeta " + x + " only allowed for setting up a socket")
case c: SocketConnectOption ⇒ handleConnectOption(c)
case ps: PubSubOption ⇒ handlePubSubOption(ps)
case Linger(value) ⇒ socket.setLinger(value)
case ReconnectIVL(value) ⇒ socket.setReconnectIVL(value)
case Backlog(value) ⇒ socket.setBacklog(value)
case ReconnectIVLMax(value) ⇒ socket.setReconnectIVLMax(value)
case MaxMsgSize(value) ⇒ socket.setMaxMsgSize(value)
case SendHighWatermark(value) ⇒ socket.setSndHWM(value)
case ReceiveHighWatermark(value) ⇒ socket.setRcvHWM(value)
case HighWatermark(value) ⇒ socket.setHWM(value)
case Swap(value) ⇒ socket.setSwap(value)
case Affinity(value) ⇒ socket.setAffinity(value)
case Identity(value) ⇒ socket.setIdentity(value)
case Rate(value) ⇒ socket.setRate(value)
case RecoveryInterval(value) ⇒ socket.setRecoveryInterval(value)
case MulticastLoop(value) ⇒ socket.setMulticastLoop(value)
case MulticastHops(value) ⇒ socket.setMulticastHops(value)
case SendBufferSize(value) ⇒ socket.setSendBufferSize(value)
case ReceiveBufferSize(value) ⇒ socket.setReceiveBufferSize(value)
case d: Deserializer ⇒ deserializer = d
}
private def handleSocketOptionQuery(msg: SocketOptionQuery): Unit =
sender() ! (msg match {
case Linger ⇒ socket.getLinger
case ReconnectIVL ⇒ socket.getReconnectIVL
case Backlog ⇒ socket.getBacklog
case ReconnectIVLMax ⇒ socket.getReconnectIVLMax
case MaxMsgSize ⇒ socket.getMaxMsgSize
case SendHighWatermark ⇒ socket.getSndHWM
case ReceiveHighWatermark ⇒ socket.getRcvHWM
case Swap ⇒ socket.getSwap
case Affinity ⇒ socket.getAffinity
case Identity ⇒ socket.getIdentity
case Rate ⇒ socket.getRate
case RecoveryInterval ⇒ socket.getRecoveryInterval
case MulticastLoop ⇒ socket.hasMulticastLoop
case MulticastHops ⇒ socket.getMulticastHops
case SendBufferSize ⇒ socket.getSendBufferSize
case ReceiveBufferSize ⇒ socket.getReceiveBufferSize
case FileDescriptor ⇒ socket.getFD
})
override def preStart {
watchListener()
setupSocket()
poller.register(socket, Poller.POLLIN)
setupConnection()
import SocketType._
socketType match {
case Pub | Push ⇒ // don’t poll
case Sub | Pull | Pair | Dealer | Router ⇒ self ! Poll
case Req | Rep ⇒ self ! PollCareful
}
}
private def setupConnection(): Unit = {
params filter (_.isInstanceOf[SocketConnectOption]) foreach { self ! _ }
params filter (_.isInstanceOf[PubSubOption]) foreach { self ! _ }
}
private def setupSocket() = params foreach {
case _: SocketConnectOption | _: PubSubOption | _: SocketMeta ⇒ // ignore, handled differently
case m ⇒ self ! m
}
override def preRestart(reason: Throwable, message: Option[Any]): Unit = context.children foreach context.stop //Do not call postStop
override def postRestart(reason: Throwable): Unit = () // Do nothing
override def postStop: Unit = try {
if (socket != null) {
poller.unregister(socket)
socket.close
}
} finally notifyListener(Closed)
@tailrec private def flushMessage(i: immutable.Seq[ByteString]): Boolean =
if (i.isEmpty)
true
else {
val head = i.head
val tail = i.tail
if (socket.send(head.toArray, if (tail.nonEmpty) JZMQ.SNDMORE else 0)) flushMessage(tail)
else {
pendingSends.prepend(i) // Reenqueue the rest of the message so the next flush takes care of it
self ! Flush
false
}
}
@tailrec private def flush(): Unit =
if (pendingSends.nonEmpty && flushMessage(pendingSends.remove(0))) flush() // Flush while things are going well
// this is a “PollMsg=>Unit” which either polls or schedules Poll, depending on the sign of the timeout
private val doPollTimeout = {
val ext = ZeroMQExtension(context.system)
val fromConfig = params collectFirst { case PollTimeoutDuration(duration) ⇒ duration }
val duration = (fromConfig getOrElse ext.DefaultPollTimeout)
if (duration > Duration.Zero) {
// for positive timeout values, do poll (i.e. block this thread)
val pollLength = duration.toUnit(ext.pollTimeUnit).toLong
(msg: PollMsg) ⇒
poller.poll(pollLength)
self ! msg
} else {
val d = -duration
{ (msg: PollMsg) ⇒
// for negative timeout values, schedule Poll token -duration into the future
import context.dispatcher
context.system.scheduler.scheduleOnce(d, self, msg)
()
}
}
}
@tailrec private def doPoll(mode: PollMsg, togo: Int = 10): Unit =
if (togo <= 0) self ! mode
else receiveMessage(mode) match {
case Seq() ⇒ doPollTimeout(mode)
case frames ⇒ notifyListener(deserializer(frames)); doPoll(mode, togo - 1)
}
@tailrec private def receiveMessage(mode: PollMsg, currentFrames: Vector[ByteString] = Vector.empty): immutable.Seq[ByteString] =
if (mode == PollCareful && (poller.poll(0) <= 0)) {
if (currentFrames.isEmpty) currentFrames else throw new IllegalStateException("Received partial transmission!")
} else {
socket.recv(if (mode == Poll) JZMQ.NOBLOCK else 0) match {
case null ⇒ /*EAGAIN*/
if (currentFrames.isEmpty) currentFrames else receiveMessage(mode, currentFrames)
case bytes ⇒
val frames = currentFrames :+ ByteString(bytes)
if (socket.hasReceiveMore) receiveMessage(mode, frames) else frames
}
}
private val listenerOpt = params collectFirst { case Listener(l) ⇒ l }
private def watchListener(): Unit = listenerOpt foreach context.watch
private def notifyListener(message: Any): Unit = listenerOpt foreach { _ ! message }
}
|
Fincore/org.spark-project.akka
|
zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala
|
Scala
|
mit
| 9,037
|
/**
* Copyright (C) 2010 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.event.events
import org.orbeon.oxf.xforms.event.XFormsEvent
import org.orbeon.oxf.xforms.event.XFormsEvent._
import org.orbeon.oxf.xforms.event.XFormsEventTarget
import org.orbeon.oxf.xforms.event.XFormsEvents._
class XXFormsDndEvent(target: XFormsEventTarget, properties: PropertyGetter)
extends XFormsEvent(XXFORMS_DND, target, properties, bubbles = false, cancelable = false) {
def getDndStart = property[String]("dnd-start").get
def getDndEnd = property[String]("dnd-end").get
}
object XXFormsDndEvent {
val StandardProperties = Map(XXFORMS_DND -> List("dnd-start", "dnd-end"))
}
|
orbeon/orbeon-forms
|
xforms-runtime/shared/src/main/scala/org/orbeon/oxf/xforms/event/events/XXFormsDndEvent.scala
|
Scala
|
lgpl-2.1
| 1,274
|
/*
* Contributions:
* Jean-Francois GUENA: implement "suffixed collection name" feature (issue #39 partially fulfilled)
* ...
*/
package akka.contrib.persistence.mongodb
import akka.actor.ActorSystem
import akka.persistence.PersistentRepr
import akka.serialization.{SerializationExtension, Serialization}
import com.mongodb.util.JSON
import com.mongodb._
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.BeforeAndAfterAll
import collection.JavaConverters._
import scala.util.Try
abstract class JournalUpgradeSpec[D <: MongoPersistenceDriver, X <: MongoPersistenceExtension](extensionClass: Class[X], database: String, toDriver: (ActorSystem,Config) => D, extendedConfig: String = "|") extends BaseUnitTest with ContainerMongo with BeforeAndAfterAll {
import ConfigLoanFixture._
override def embedDB = s"upgrade-test-$database"
override def afterAll() = cleanup()
def config(extensionClass: Class[_]) = ConfigFactory.parseString(s"""
|akka.contrib.persistence.mongodb.mongo.driver = "${extensionClass.getName}"
|akka.contrib.persistence.mongodb.mongo.journal-automatic-upgrade = true
|akka.persistence.journal.plugin = "akka-contrib-mongodb-persistence-journal"
|akka-contrib-mongodb-persistence-journal {
| # Class name of the plugin.
| class = "akka.contrib.persistence.mongodb.MongoJournal"
| overrides {
| mongouri = "mongodb://$host:$noAuthPort/$embedDB"
| }
|}
|akka.persistence.snapshot-store.plugin = "akka-contrib-mongodb-persistence-snapshot"
|akka-contrib-mongodb-persistence-snapshot {
| # Class name of the plugin.
| class = "akka.contrib.persistence.mongodb.MongoSnapshots"
|}
$extendedConfig
|""".stripMargin)
def configured[A](testCode: D => A) = withConfig(config(extensionClass), "akka-contrib-mongodb-persistence-journal", "upgrade-test")(toDriver.tupled andThen testCode)
"A mongo persistence driver" should "do nothing on a new installation" in configured { as =>
mongoClient.getDB(embedDB).getCollectionNames shouldNot contain ("akka_persistence_journal")
}
import JournallingFieldNames._
def buildLegacyObject[A](pid: String, sn: Long, payload: A)(implicit serEv: Serialization): DBObject = {
val builder = new BasicDBObjectBuilder()
builder
.add(PROCESSOR_ID, pid)
.add(SEQUENCE_NUMBER, sn)
.add(SERIALIZED,
serEv.serialize(PersistentRepr(payload, sn, pid)).get
).get()
}
def buildLegacyDocument[A](pid: String, sn: Long)(implicit serEv: Serialization): DBObject = {
val builder = new BasicDBObjectBuilder()
val serBuilder = new BasicDBObjectBuilder()
val plBuilder = new BasicDBObjectBuilder()
val subdoc = serBuilder.add(PayloadKey, plBuilder.add("abc",1).add("def",2.0).add("ghi",true).get()).get()
builder.add(PROCESSOR_ID, pid).add(SEQUENCE_NUMBER, sn).add(SERIALIZED, subdoc).get()
}
def queryByProcessorId(pid: String): DBObject = {
new BasicDBObjectBuilder().add(PROCESSOR_ID,pid).get()
}
def sortByTo: DBObject = {
new BasicDBObjectBuilder().add(TO,1).get()
}
def createLegacyIndex(coll: DBCollection): Unit = {
val idxSpec =
new BasicDBObjectBuilder()
.add(PROCESSOR_ID, 1)
.add(SEQUENCE_NUMBER, 1)
.add(DELETED, 1)
.get()
Try(coll.createIndex(idxSpec)).getOrElse(())
}
it should "upgrade an existing journal" in configured { as =>
implicit val serialization = SerializationExtension.get(as.actorSystem)
val coll = mongoClient.getDB(embedDB).getCollection("akka_persistence_journal")
createLegacyIndex(coll)
coll.insert(buildLegacyObject("foo",1,"bar"))
coll.insert(buildLegacyObject("foo",2,"bar"))
coll.insert(buildLegacyDocument("foo",3))
as.journal // executes upgrade
val records = coll.find(queryByProcessorId("foo")).sort(sortByTo).toArray.asScala.toList
records should have size 3
records.zipWithIndex.foreach { case (dbo,idx) =>
dbo.get(PROCESSOR_ID) should be ("foo")
dbo.get(TO) should be (idx + 1)
dbo.get(FROM) should be (dbo.get(TO))
val event = dbo.get(EVENTS).asInstanceOf[BasicDBList].get(0).asInstanceOf[DBObject]
event.get(SEQUENCE_NUMBER) should be (idx + 1)
if (idx < 2) {
event.get(TYPE) should be ("s")
event.get(PayloadKey) should be ("bar")
} else {
event.get(TYPE) should be ("bson")
val bson = event.get(PayloadKey).asInstanceOf[DBObject]
bson.get("abc") should be (1)
bson.get("def") should be (2.0)
bson.get("ghi") shouldBe true
}
}
}
it should "upgrade a more complicated journal" in configured { as =>
implicit val serialization = SerializationExtension.get(as.actorSystem)
val coll = mongoClient.getDB(embedDB).getCollection("akka_persistence_journal")
coll.remove(new BasicDBObject())
createLegacyIndex(coll)
val doc =
"""
|{
| "_id" : { "$oid" : "55deeae33de20e69f33b748b" },
| "pid" : "foo",
| "sn" : { "$numberLong" : "1" },
| "dl" : false,
| "cs" : [ ],
| "pr" : {
| "p" : {
| "order-created" : {
| "id" : "alsonotarealguid",
| "seqNr" : 232,
| "userId" : "notarealguid",
| "cartId" : "notarealcartid",
| "phoneNumber" : "+15555005555",
| "from" : {
| "country" : "US"
| },
| "to" : {
| "country" : "RU",
| "region" : "MOW",
| "city" : "Moscow"
| },
| "dateCreated" : { "$date": "2015-08-27T10:48:03.101Z" },
| "timestamp" : { "$date": "2015-08-27T10:48:03.101Z" },
| "addressId" : "not-a-real-addressid"
| },
| "_timestamp" : { "$date": "2015-08-27T10:48:03.102Z" }
| }
| }
}""".stripMargin
coll.insert(JSON.parse(doc).asInstanceOf[DBObject])
as.journal // executes upgrade
val records = coll.find(queryByProcessorId("foo")).toArray.asScala.toList
records should have size 1
records.zipWithIndex.foreach { case (dbo,idx) =>
dbo.get(PROCESSOR_ID) should be ("foo")
dbo.get(TO) should be (idx + 1)
dbo.get(FROM) should be (dbo.get(TO))
val event = dbo.get(EVENTS).asInstanceOf[BasicDBList].get(0).asInstanceOf[DBObject]
event.get(SEQUENCE_NUMBER) should be (idx + 1)
event.get(TYPE) should be ("bson")
val bson = event.get(PayloadKey).asInstanceOf[DBObject]
val payload = bson.get("order-created").asInstanceOf[DBObject]
payload.get("cartId") should be ("notarealcartid")
payload.get("seqNr") should be (232)
}
}
}
|
alari/akka-persistence-mongo
|
common/src/test/scala/akka/contrib/persistence/mongodb/JournalUpgradeSpec.scala
|
Scala
|
apache-2.0
| 6,850
|
/*
* Copyright (c) 2013-2014 Plausible Labs Cooperative, Inc.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
package coop.plausible.sbt.keychain.git
/**
* Git configuration option.
*
* @param value The option's value.
* @param isShellCommand If true, the option is designated as a shell command.
*/
private[keychain] case class GitConfigOption (value: String, isShellCommand: Boolean)
|
plausiblelabs/sbt-keychain
|
src/main/scala/coop/plausible/sbt/keychain/git/GitConfigOption.scala
|
Scala
|
mit
| 1,457
|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.nn.ops
import com.intel.analytics.bigdl.dllib.nn.Graph
import com.intel.analytics.bigdl.dllib.nn.tf._
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.utils.T
import com.intel.analytics.bigdl.dllib.utils.serializer.ModuleSerializationTest
class TensorArraySplitSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
"TensorArray serializer Split/Concat" should "work properly" in {
val tensorArray = new TensorArrayCreator[Float, Float]().inputs()
val data = Const[Float, Float](Tensor[Float](3, 4).rand()).inputs()
val lengths = Const[Float, Int](Tensor[Int](T(1, 2))).inputs()
val splitter = new TensorArraySplit[Float, Float]().inputs((tensorArray, 1), (data, 1),
(lengths, 1))
val ctr = new com.intel.analytics.bigdl.dllib.nn.tf.ControlDependency[Float]().
inputs(splitter)
val concat = new TensorArrayConcat[Float, Float]().inputs(tensorArray, ctr)
val size = new TensorArraySize[Float]().inputs(tensorArray, ctr)
val ctr2 = new com.intel.analytics.bigdl.dllib.nn.tf.ControlDependency[Float]().
inputs(concat, size)
val close = new TensorArrayClose[Float]().inputs((tensorArray, 1), (ctr2, 1))
val model = Graph.dynamic[Float](Array(tensorArray), Array(concat, close, size))
runSerializationTestWithMultiClass(model, Tensor.scalar[Int](2), Array(
tensorArray.element.getClass.asInstanceOf[Class[_]],
splitter.element.getClass.asInstanceOf[Class[_]],
concat.element.getClass.asInstanceOf[Class[_]],
close.element.getClass.asInstanceOf[Class[_]],
size.element.getClass.asInstanceOf[Class[_]]
))
}
}
}
|
intel-analytics/BigDL
|
scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/nn/ops/TensorArraySplitSpec.scala
|
Scala
|
apache-2.0
| 2,361
|
package com.fang.page
import com.thoughtworks.binding.{Binding, dom}
import org.scalajs.dom.raw.{Event, Node}
import com.fang.ImplicitConvert._
import com.fang.UserSession
import com.fang.ajax.UserAPI
import com.fang.data.AjaxResult.{Error, Ok}
import com.fang.data.GlobalValue
import com.fang.page.DomUtil.{bindInputValue, hideClassIf}
import com.thoughtworks.binding.Binding.Var
import org.scalajs.dom.window
class LoginPage extends Page {
override def title(): String = "login"
val username = Var("")
val password = Var("")
@dom val allValid: Binding[Boolean] = username.bind.length > 0 && password.bind.length > 0
val errorMessage:Var[Option[String]] = Var(None)
@dom override def onLoad(): Binding[Node] = {
<div class="container">
<div class="jumbotron">
<div class="container">
<h1 class="">Online Go</h1>
<p>An online platform to play gomoku game</p>
</div>
</div>
<form name="loginForm" noValidate={true}>
<div class="form-group">
<label class="control-label">Username</label>
<input type="text" name="username" class="form-control"
placeholder="Your name" oninput={bindInputValue(_: Event, username)}/>
</div>
<div class="form-group">
<label class="control-label">Password</label>
<input type="password" name="password" class="form-control"
placeholder="Password" oninput={bindInputValue(_: Event, password)}/>
</div>
<div class={hideClassIf("alert alert-danger", "hide", errorMessage.bind.isDefined)}>
{errorMessage.bind.getOrElse("")}
</div>
</form>
<div class="padding20"></div>
<div></div>
<button class="btn btn-default btn-block"
disabled={!allValid.bind}
onclick={_:Event => onLogin()}>Ok</button>
<button class="btn btn-danger btn-block">Login With Google</button>
<a href="#register" class="btn btn-primary btn-block">Register</a>
<a class="btn btn-info btn-block" href="#playing">Watch Others Play</a>
</div>
}
def onLogin(): Unit = {
UserAPI.userLogin(username.value, password.value).foreach {
case Ok(value) =>
// window.alert(value.toString)
GlobalValue.updateUserSession()
if(value.role == UserSession.USER){
window.location.hash = "user/" + value.id
}
case Error(message, _) =>
errorMessage.value = Some(message)
}
}
}
|
TianhaoFang/online-go
|
js/src/main/scala/com/fang/page/LoginPage.scala
|
Scala
|
mit
| 2,501
|
package spark.examples
import scala.math.random
import spark._
import SparkContext._
/** Computes an approximation to pi */
object SparkPi {
def main(args: Array[String]) {
/*if (args.length == 0) {
System.err.println("Usage: SparkPi <master> [<slices>]")
System.exit(1)
}*/
val spark = new SparkContext("local", "SparkPi",
System.getenv("SPARK_HOME"), Seq(System.getenv("SPARK_EXAMPLES_JAR")))
val slices = if (args.length > 1) args(1).toInt else 2
val n = 100000 * slices
val count = spark.parallelize(1 to n, slices).map {
i =>
val x = random * 2 - 1
val y = random * 2 - 1
if (x * x + y * y < 1) 1 else 0
}.reduce(_ + _)
println("Pi is roughly " + 4.0 * count / n)
System.exit(0)
}
}
|
prabeesh/Spark-Kestrel
|
examples/src/main/scala/spark/examples/SparkPi.scala
|
Scala
|
bsd-3-clause
| 778
|
package cn.changhong.web.util
import java.util.UUID
import cn.changhong.web.init.GlobalConfigFactory
import cn.changhong.web.persistent.RedisPoolManager
import com.twitter.finagle.http.Request
import org.slf4j.LoggerFactory
/**
* Created by yangguo on 14-12-11.
*/
abstract class TokenManager{
protected def generateToken:String
def validateToken(clientId:String,uid:String,tType:String,token:String):Boolean
def createToken(clientId:String,uid:String,tType:String,expired:Int):Map[String,String]
def validateIsHackAction(requestClientKey:String):Boolean
}
object TokenUtil extends TokenManager{
override def validateIsHackAction(requestClientKey:String): Boolean = {
RedisPoolManager.redisCommand{implicit client=>
val key=requestClientKey
val count=client.incr(key)
println(requestClientKey+":"+count)
if(count>GlobalConfigFactory.max_valid_request_frequency) {
if(count==GlobalConfigFactory.exceed_spider_threshold_frequency) client.expire(key,GlobalConfigFactory.exceed_spider_threshold_seconds)
true
}else {
if(count == 1) client.expire(key,GlobalConfigFactory.max_valid_request_expire_seconds)
false
}
}
}
override protected def generateToken: String = {
val uuid=UUID.randomUUID().toString
new sun.misc.BASE64Encoder().encode(uuid.getBytes())
}
override def validateToken(clientId: String, uid: String, tType: String, token: String): Boolean = {
RedisPoolManager.redisCommand{implicit client=>
val key="t_"+tType+"_"+uid+"_"+clientId
println("token_key:"+key)
val uToken=client.get(key)
if(uToken==null) throw new RestException(RestResponseInlineCode.expired_token,"Token Timeout")
if(token.equals(uToken)) true
else throw new RestException(RestResponseInlineCode.invalid_token,"Invalid Token")
}
}
/**
*
* @param cid
* @param uid
* @param tType
* @param expired token过期时间 默认为一天 单位second
* @return
*/
override def createToken(cid: String, uid: String, tType: String, expired: Int=86400): Map[String,String] = {
RedisPoolManager.redisCommand { implicit client =>
val key = "t_" + tType + "_" + uid + "_" + cid
println("created token_key:"+key)
val token = generateToken
val status = client.set(key, token)
if (status.equals("OK")) {
client.expire(key, expired)
Map("access_token" -> token, "expired" -> expired.toString, "tk_type" -> tType)
} else throw new RestException(RestResponseInlineCode.service_inline_cause, "Create Access Token Failed!")
}
}
}
|
guoyang2011/myfinagle
|
WebTemplate/src/main/scala/cn/changhong/web/util/TokenUtil.scala
|
Scala
|
apache-2.0
| 2,623
|
/*
* Copyright 2019 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.play.config
import scala.util.matching.Regex
case class ControllerParams(needsLogging: Boolean = true, needsAuditing: Boolean = true, needsAuth: Boolean = true)
trait ControllerConfig {
import com.typesafe.config.Config
import net.ceedubs.ficus.Ficus._
import net.ceedubs.ficus.readers.ValueReader
import net.ceedubs.ficus.readers.StringReader
def controllerConfigs: Config
private implicit val regexValueReader: ValueReader[Regex] = StringReader.stringValueReader.map(_.r)
private implicit val controllerParamsReader = ValueReader.relative[ControllerParams] { config =>
ControllerParams(
needsLogging = config.getAs[Boolean]("needsLogging").getOrElse(true),
needsAuditing = config.getAs[Boolean]("needsAuditing").getOrElse(true),
needsAuth = config.getAs[Boolean]("needsAuth").getOrElse(true)
)
}
def paramsForController(controllerName: String): ControllerParams =
controllerConfigs.as[Option[ControllerParams]](controllerName).getOrElse(ControllerParams())
}
|
hmrc/play-config
|
src/main/scala/uk/gov/hmrc/play/config/ControllerConfig.scala
|
Scala
|
apache-2.0
| 1,647
|
package inloopio.math.indicator
/**
* Define it as a RuntimeException, because this issue should be resolved in developing
*
* @author Caoyuan Deng
*/
final class BaseSerNotSetException(message: String) extends RuntimeException(message)
|
dcaoyuan/inloopio-libs
|
inloopio-math/src/main/scala/inloopio/math/indicator/BaseSerNotSetException.scala
|
Scala
|
bsd-3-clause
| 242
|
/*
* Copyright © 2017 University of Texas at Arlington
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.uta.diql.core
object Normalizer {
import AST._
/** rename the variables in the lambda abstraction to prevent name capture */
def renameVars ( f: Lambda ): Lambda =
f match {
case Lambda(p,b)
=> val m = patvars(p).map((_,newvar))
Lambda(m.foldLeft(p){ case (r,(from,to)) => subst(from,to,r) },
m.foldLeft(b){ case (r,(from,to)) => subst(from,Var(to),r) })
}
/** normalize ASTs */
def normalize ( e: Expr ): Expr =
e match {
case flatMap(f,flatMap(g,x))
=> renameVars(g) match {
case Lambda(p,b)
=> normalize(flatMap(Lambda(p,flatMap(f,b)),x))
}
case flatMap(Lambda(_,_),Empty())
=> Empty()
case flatMap(Lambda(p@VarPat(v),b),Elem(x))
=> normalize(if (occurrences(v,b) <= 1)
subst(Var(v),x,b)
else MatchE(x,List(Case(p,BoolConst(true),b))))
case flatMap(Lambda(p,b),Elem(x))
=> normalize(MatchE(x,List(Case(p,BoolConst(true),b))))
case flatMap(f,IfE(c,e1,e2))
=> normalize(IfE(c,flatMap(f,e1),flatMap(f,e2)))
case groupBy(Empty())
=> Empty()
case groupBy(groupBy(x))
=> val nv = newvar
val kv = newvar
normalize(flatMap(Lambda(TuplePat(List(VarPat(kv),VarPat(nv))),
Elem(Tuple(List(Var(kv),Elem(Var(nv)))))),
groupBy(x)))
case coGroup(x,Empty())
=> val nv = newvar
val kv = newvar
normalize(flatMap(Lambda(TuplePat(List(VarPat(kv),VarPat(nv))),
Elem(Tuple(List(Var(kv),Tuple(List(Var(nv),Empty())))))),
groupBy(x)))
case coGroup(Empty(),x)
=> val nv = newvar
val kv = newvar
normalize(flatMap(Lambda(TuplePat(List(VarPat(kv),VarPat(nv))),
Elem(Tuple(List(Var(kv),Tuple(List(Empty(),Var(nv))))))),
groupBy(x)))
case IfE(BoolConst(true),e1,_)
=> normalize(e1)
case IfE(BoolConst(false),_,e2)
=> normalize(e2)
case MatchE(_,List(Case(StarPat(),BoolConst(true),y)))
=> normalize(y)
case MethodCall(Tuple(s),a,null)
=> val pat = """_(\\d+)""".r
a match {
case pat(x) if x.toInt <= s.length
=> normalize(s(x.toInt-1))
case _ => MethodCall(Tuple(s.map(normalize(_))),a,null)
}
case MethodCall(MethodCall(x,"||",List(y)),"!",null)
=> normalize(MethodCall(MethodCall(x,"!",null),"&&",
List(MethodCall(y,"!",null))))
case MethodCall(MethodCall(x,"&&",List(y)),"!",null)
=> normalize(MethodCall(MethodCall(x,"!",null),"||",
List(MethodCall(y,"!",null))))
case MethodCall(MethodCall(x,"!",null),"!",null)
=> normalize(x)
case MethodCall(MethodCall(x,"!=",List(y)),"!",null)
=> normalize(MethodCall(x,"==",List(y)))
case MethodCall(BoolConst(b),"&&",List(x))
=> if (b) normalize(x) else BoolConst(false)
case MethodCall(x,"&&",List(BoolConst(b)))
=> if (b) normalize(x) else BoolConst(false)
case MethodCall(BoolConst(b),"||",List(x))
=> if (b) BoolConst(true) else normalize(x)
case MethodCall(x,"||",List(BoolConst(b)))
=> if (b) BoolConst(true) else normalize(x)
case _ => apply(e,normalize(_))
}
def normalizeAll ( e: Expr ): Expr = {
var olde = e
var ne = olde
do { olde = ne
ne = normalize(ne)
} while (olde != ne)
ne
}
}
|
fegaras/DIQL
|
src/main/scala/edu/uta/diql/Normalizer.scala
|
Scala
|
apache-2.0
| 4,285
|
package mesosphere.marathon.core.matcher.reconcile
import akka.event.EventStream
import mesosphere.marathon.core.base.Clock
import mesosphere.marathon.core.flow.ReviveOffersConfig
import mesosphere.marathon.core.leadership.LeadershipModule
import mesosphere.marathon.core.matcher.base.OfferMatcher
import mesosphere.marathon.core.matcher.reconcile.impl.{ OfferMatcherReconciler, OffersWantedForReconciliationActor }
import mesosphere.marathon.core.task.tracker.TaskTracker
import mesosphere.marathon.storage.repository.GroupRepository
import rx.lang.scala.subjects.BehaviorSubject
import rx.lang.scala.{ Observable, Observer, Subject }
class OfferMatcherReconciliationModule(
reviveOffersConfig: ReviveOffersConfig,
clock: Clock,
marathonEventStream: EventStream,
taskTracker: TaskTracker,
groupRepository: GroupRepository,
leadershipModule: LeadershipModule) {
/** An offer matcher that performs reconciliation on the expected reservations. */
lazy val offerMatcherReconciler: OfferMatcher = new OfferMatcherReconciler(taskTracker, groupRepository)
/** Emits true when offers are wanted for reconciliation. */
def offersWantedObservable: Observable[Boolean] = offersWantedSubject
/** Starts underlying actors etc. */
def start(): Unit = offersWantedForReconciliationActor
private[this] lazy val offersWantedSubject: Subject[Boolean] = BehaviorSubject(false)
private[this] def offersWantedObserver: Observer[Boolean] = offersWantedSubject
private[this] lazy val offersWantedForReconciliationActor = leadershipModule.startWhenLeader(
OffersWantedForReconciliationActor.props(
reviveOffersConfig,
clock,
marathonEventStream,
offersWantedObserver
),
"offersWantedForReconciliation"
)
}
|
timcharper/marathon
|
src/main/scala/mesosphere/marathon/core/matcher/reconcile/OfferMatcherReconciliationModule.scala
|
Scala
|
apache-2.0
| 1,769
|
package com.twitter.finagle.memcached
import scala.collection.{immutable, mutable}
import _root_.java.lang.{Boolean => JBoolean, Long => JLong}
import _root_.java.net.{SocketAddress, InetSocketAddress}
import _root_.java.util.{Map => JMap}
import com.twitter.concurrent.{Broker, Offer}
import com.twitter.conversions.time._
import com.twitter.finagle._
import com.twitter.finagle.builder.{Cluster, ClientBuilder, ClientConfig, StaticCluster}
import com.twitter.finagle.memcached.protocol.text.Memcached
import com.twitter.finagle.memcached.protocol._
import com.twitter.finagle.memcached.util.ChannelBufferUtils._
import com.twitter.finagle.service.{FailureAccrualFactory, FailedService}
import com.twitter.finagle.stats.{StatsReceiver, NullStatsReceiver}
import com.twitter.finagle.{Service, ShardNotAvailableException}
import com.twitter.hashing._
import com.twitter.util.{Time, Future, Bijection, Duration, Timer}
import org.jboss.netty.buffer.ChannelBuffer
import org.jboss.netty.buffer.ChannelBuffers
import org.jboss.netty.util.CharsetUtil.UTF_8
object Client {
/**
* Construct a client from a single host.
*
* @param host a String of host:port combination.
*/
def apply(host: String): Client = Client(
ClientBuilder()
.hosts(host)
.hostConnectionLimit(1)
.codec(Memcached())
.build())
/**
* Construct a client from a Cluster
*/
@deprecated("Use group instead", "7.0.0")
def apply(cluster: Cluster[SocketAddress]) : Client = Client(
ClientBuilder()
.cluster(cluster)
.hostConnectionLimit(1)
.codec(new Memcached)
.build())
/**
* Construct a client from a Group
*/
def apply(group: Group[SocketAddress]) : Client = Client(
ClientBuilder()
.group(group)
.hostConnectionLimit(1)
.codec(new Memcached)
.build())
/**
* Construct a client from a single Service.
*/
def apply(raw: Service[Command, Response]): Client = {
new ConnectedClient(raw)
}
}
case class GetResult private[memcached](
hits: Map[String, Value] = Map.empty,
misses: immutable.Set[String] = immutable.Set.empty,
failures: Map[String, Throwable] = Map.empty
) {
lazy val values = hits mapValues { _.value }
def ++(o: GetResult) = GetResult(hits ++ o.hits, misses ++ o.misses, failures ++ o.failures)
}
case class GetsResult(getResult: GetResult) {
def hits = getResult.hits
def misses = getResult.misses
def failures = getResult.failures
def values = getResult.values
lazy val valuesWithTokens = hits mapValues { v => (v.value, v.casUnique.get) }
def ++(o: GetsResult) = GetsResult(getResult ++ o.getResult)
}
object GetResult {
/**
* Equivalaent to results.reduceLeft { _ ++ _ }, but written to be more efficient.
*/
private[memcached] def merged(results: Seq[GetResult]): GetResult = {
results match {
case Nil => GetResult()
case Seq(single) => single
case Seq(a, b) => a ++ b
case _ =>
val hits = new mutable.HashMap[String, Value]
val misses = new mutable.HashSet[String]
val failures = new mutable.HashMap[String, Throwable]
for (result <- results) {
hits ++= result.hits
misses ++= result.misses
failures ++= result.failures
}
GetResult(hits.toMap, misses.toSet, failures.toMap)
}
}
private[memcached] def merged(results: Seq[GetsResult]): GetsResult = {
val unwrapped = results map { _.getResult }
GetsResult(merged(unwrapped))
}
}
/**
* A friendly client to talk to a Memcached server.
*/
trait BaseClient[T] {
def channelBufferToType(a: ChannelBuffer): T
/**
* Store a key. Override an existing value.
* @return true
*/
def set(key: String, flags: Int, expiry: Time, value: T): Future[Unit]
/**
* Store a key but only if it doesn't already exist on the server.
* @return true if stored, false if not stored
*/
def add(key: String, flags: Int, expiry: Time, value: T): Future[JBoolean]
/**
* Append bytes to the end of an existing key. If the key doesn't exist, the
* operation has no effect.
* @return true if stored, false if not stored
*/
def append(key: String, flags: Int, expiry: Time, value: T): Future[JBoolean]
/**
* Prepend bytes to the beginning of an existing key. If the key doesn't
* exist, the operation has no effect.
* @return true if stored, false if not stored
*/
def prepend(key: String, flags: Int, expiry: Time, value: T): Future[JBoolean]
/**
* Replace bytes on an existing key. If the key doesn't exist, the
* operation has no effect.
* @return true if stored, false if not stored
*/
def replace(key: String, flags: Int, expiry: Time, value: T): Future[JBoolean]
/**
* Perform a CAS operation on the key, only if the value has not
* changed since the value was last retrieved, and `casUnique`
* extracted from a `gets` command. We treat the "cas unique" token
* opaquely, but in reality it is a string-encoded u64.
*
* @return true if replaced, false if not
*/
def cas(
key: String, flags: Int, expiry: Time, value: T, casUnique: ChannelBuffer
): Future[JBoolean]
/**
* Get a key from the server.
*/
def get(key: String): Future[Option[T]] = get(Seq(key)) map { _.values.headOption }
/**
* Get a key from the server, with a "cas unique" token. The token
* is treated opaquely by the memcache client but is in reality a
* string-encoded u64.
*/
def gets(key: String): Future[Option[(T, ChannelBuffer)]] =
gets(Seq(key)) map { _.values.headOption }
/**
* Get a set of keys from the server.
* @return a Map[String, T] of all of the keys that the server had.
*/
def get(keys: Iterable[String]): Future[Map[String, T]] = {
getResult(keys) flatMap { result =>
if (result.failures.nonEmpty) {
Future.exception(result.failures.values.head)
} else {
Future.value(result.values mapValues { channelBufferToType(_) })
}
}
}
/**
* Get a set of keys from the server, together with a "cas unique"
* token. The token is treated opaquely by the memcache client but
* is in reality a string-encoded u64.
*
* @return a Map[String, (T, ChannelBuffer)] of all the
* keys the server had, together with their "cas unique" token
*/
def gets(keys: Iterable[String]): Future[Map[String, (T, ChannelBuffer)]] = {
getsResult(keys) flatMap { result =>
if (result.failures.nonEmpty) {
Future.exception(result.failures.values.head)
} else {
Future.value(result.valuesWithTokens mapValues {
case (v, u) => (channelBufferToType(v), u)
})
}
}
}
/**
* Get a set of keys from the server. Returns a Future[GetResult] that
* encapsulates hits, misses and failures.
*/
def getResult(keys: Iterable[String]): Future[GetResult]
/**
* Get a set of keys from the server. Returns a Future[GetsResult] that
* encapsulates hits, misses and failures. This variant includes the casToken
* from memcached.
*/
def getsResult(keys: Iterable[String]): Future[GetsResult]
/**
* Remove a key.
* @return true if deleted, false if not found
*/
def delete(key: String): Future[JBoolean]
/**
* Increment a key. Interpret the value as an Long if it is parsable.
* This operation has no effect if there is no value there already.
*/
def incr(key: String, delta: Long): Future[Option[JLong]]
def incr(key: String): Future[Option[JLong]] = incr(key, 1L)
/**
* Decrement a key. Interpret the value as an JLong if it is parsable.
* This operation has no effect if there is no value there already.
*/
def decr(key: String, delta: Long): Future[Option[JLong]]
def decr(key: String): Future[Option[JLong]] = decr(key, 1L)
/**
* Store a key. Override an existing values.
* @return true
*/
def set(key: String, value: T): Future[Unit] =
set(key, 0, Time.epoch, value)
/**
* Store a key but only if it doesn't already exist on the server.
* @return true if stored, false if not stored
*/
def add(key: String, value: T): Future[JBoolean] =
add(key, 0, Time.epoch, value)
/**
* Append a set of bytes to the end of an existing key. If the key doesn't
* exist, the operation has no effect.
* @return true if stored, false if not stored
*/
def append(key: String, value: T): Future[JBoolean] =
append(key, 0, Time.epoch, value)
/**
* Prepend a set of bytes to the beginning of an existing key. If the key
* doesn't exist, the operation has no effect.
* @return true if stored, false if not stored
*/
def prepend(key: String, value: T): Future[JBoolean] =
prepend(key, 0, Time.epoch, value)
/**
* Replace an item if it exists. If it doesn't exist, the operation has no
* effect.
* @return true if stored, false if not stored
*/
def replace(key: String, value: T): Future[JBoolean] = replace(key, 0, Time.epoch, value)
/**
* Perform a CAS operation on the key, only if the value has not
* changed since the value was last retrieved, and `casUnique`
* extracted from a `gets` command. We treat the "cas unique" token
* opaquely, but in reality it is a string-encoded u64.
*
* @return true if replaced, false if not
*/
def cas(key: String, value: T, casUnique: ChannelBuffer): Future[JBoolean] =
cas(key, 0, Time.epoch, value, casUnique)
/**
* Send a quit command to the server. Alternative to release, for
* protocol compatability.
* @return none
*/
def quit(): Future[Unit] = Future(release())
/**
* Send a stats command with optional arguments to the server
* @return a sequence of strings, each of which is a line of output
*/
def stats(args: Option[String]): Future[Seq[String]]
def stats(args: String): Future[Seq[String]] = stats(Some(args))
def stats(): Future[Seq[String]] = stats(None)
/**
* release the underlying service(s)
*/
def release(): Unit
}
trait Client extends BaseClient[ChannelBuffer] {
def channelBufferToType(v: ChannelBuffer) = v
def adapt[T](bijection: Bijection[ChannelBuffer, T]): BaseClient[T] =
new ClientAdaptor[T](this, bijection)
/** Adaptor to use String as values */
def withStrings: BaseClient[String] = adapt(
new Bijection[ChannelBuffer, String] {
def apply(a: ChannelBuffer): String = channelBufferToString(a)
def invert(b: String): ChannelBuffer = stringToChannelBuffer(b)
}
)
/** Adaptor to use Array[Byte] as values */
def withBytes: BaseClient[Array[Byte]] = adapt(
new Bijection[ChannelBuffer, Array[Byte]] {
def apply(a: ChannelBuffer): Array[Byte] = channelBufferToBytes(a)
def invert(b: Array[Byte]): ChannelBuffer = bytesToChannelBuffer(b)
}
)
}
trait ProxyClient extends Client {
protected def proxyClient: Client
def getResult(keys: Iterable[String]) = proxyClient.getResult(keys)
def getsResult(keys: Iterable[String]) = proxyClient.getsResult(keys)
def set(key: String, flags: Int, expiry: Time, value: ChannelBuffer) = proxyClient.set(key, flags, expiry, value)
def add(key: String, flags: Int, expiry: Time, value: ChannelBuffer) = proxyClient.add(key, flags, expiry, value)
def replace(key: String, flags: Int, expiry: Time, value: ChannelBuffer) = proxyClient.replace(key, flags, expiry, value)
def append(key: String, flags: Int, expiry: Time, value: ChannelBuffer) = proxyClient.append(key, flags, expiry, value)
def prepend(key: String, flags: Int, expiry: Time, value: ChannelBuffer) = proxyClient.prepend(key, flags, expiry, value)
def incr(key: String, delta: Long) = proxyClient.incr(key, delta)
def decr(key: String, delta: Long) = proxyClient.decr(key, delta)
def cas(key: String, flags: Int, expiry: Time, value: ChannelBuffer, casUnique: ChannelBuffer) = proxyClient.cas(key, flags, expiry, value, casUnique)
def delete(key: String) = proxyClient.delete(key)
def stats(args: Option[String]) = proxyClient.stats(args)
def release() { proxyClient.release() }
}
/**
* A Client connected to an individual Memcached server.
*
* @param service the underlying Memcached Service.
*/
protected class ConnectedClient(protected val service: Service[Command, Response]) extends Client {
protected def rawGet(command: RetrievalCommand) = {
val keys = immutable.Set(command.keys map { _.toString(UTF_8) }: _*)
service(command) map {
case Values(values) =>
val tuples = values.map {
case value => (value.key.toString(UTF_8), value)
}
val hits = tuples.toMap
val misses = keys -- hits.keySet
GetResult(hits, misses)
case Error(e) => throw e
case _ => throw new IllegalStateException
} handle {
case t: RequestException => GetResult(failures = (keys map { (_, t) }).toMap)
case t: ChannelException => GetResult(failures = (keys map { (_, t) }).toMap)
case t: ServiceException => GetResult(failures = (keys map { (_, t) }).toMap)
}
}
def getResult(keys: Iterable[String]) = {
try {
if (keys==null) throw new IllegalArgumentException("Invalid keys: keys cannot be null")
rawGet(Get(keys.toSeq))
} catch {
case t:IllegalArgumentException => Future.exception(new ClientError(t.getMessage))
}
}
def getsResult(keys: Iterable[String]) = {
try {
if (keys==null) throw new IllegalArgumentException("Invalid keys: keys cannot be null")
rawGet(Gets(keys.toSeq)) map { GetsResult(_) }
} catch {
case t:IllegalArgumentException => Future.exception(new ClientError(t.getMessage))
}
}
def set(key: String, flags: Int, expiry: Time, value: ChannelBuffer) = {
try {
service(Set(key, flags, expiry, value)) map {
case Stored() => ()
case Error(e) => throw e
case _ => throw new IllegalStateException
}
} catch {
case t:IllegalArgumentException => Future.exception(new ClientError(t.getMessage))
}
}
def cas(key: String, flags: Int, expiry: Time, value: ChannelBuffer, casUnique: ChannelBuffer) = {
try {
service(Cas(key, flags, expiry, value, casUnique)) map {
case Stored() => true
case Exists() => false
case Error(e) => throw e
case _ => throw new IllegalStateException
}
} catch {
case t:IllegalArgumentException => Future.exception(new ClientError(t.getMessage))
}
}
def add(key: String, flags: Int, expiry: Time, value: ChannelBuffer) = {
try {
service(Add(key, flags, expiry, value)) map {
case Stored() => true
case NotStored() => false
case Error(e) => throw e
case _ => throw new IllegalStateException
}
} catch {
case t:IllegalArgumentException => Future.exception(new ClientError(t.getMessage))
}
}
def append(key: String, flags: Int, expiry: Time, value: ChannelBuffer) = {
try {
service(Append(key, flags, expiry, value)) map {
case Stored() => true
case NotStored() => false
case Error(e) => throw e
case _ => throw new IllegalStateException
}
} catch {
case t:IllegalArgumentException => Future.exception(new ClientError(t.getMessage))
}
}
def prepend(key: String, flags: Int, expiry: Time, value: ChannelBuffer) = {
try {
service(Prepend(key, flags, expiry, value)) map {
case Stored() => true
case NotStored() => false
case Error(e) => throw e
case _ => throw new IllegalStateException
}
} catch {
case t:IllegalArgumentException => Future.exception(new ClientError(t.getMessage))
}
}
def replace(key: String, flags: Int, expiry: Time, value: ChannelBuffer) = {
try {
service(Replace(key, flags, expiry, value)) map {
case Stored() => true
case NotStored() => false
case Error(e) => throw e
case _ => throw new IllegalStateException
}
} catch {
case t:IllegalArgumentException => Future.exception(new ClientError(t.getMessage))
}
}
def delete(key: String) = {
try {
service(Delete(key)) map {
case Deleted() => true
case NotFound() => false
case Error(e) => throw e
case _ => throw new IllegalStateException
}
} catch {
case t:IllegalArgumentException => Future.exception(new ClientError(t.getMessage))
}
}
def incr(key: String, delta: Long): Future[Option[JLong]] = {
try {
service(Incr(key, delta)) map {
case Number(value) => Some(value)
case NotFound() => None
case Error(e) => throw e
case _ => throw new IllegalStateException
}
} catch {
case t:IllegalArgumentException => Future.exception(new ClientError(t.getMessage))
}
}
def decr(key: String, delta: Long): Future[Option[JLong]] = {
try {
service(Decr(key, delta)) map {
case Number(value) => Some(value)
case NotFound() => None
case Error(e) => throw e
case _ => throw new IllegalStateException
}
} catch {
case t:IllegalArgumentException => Future.exception(new ClientError(t.getMessage))
}
}
def stats(args: Option[String]): Future[Seq[String]] = {
val statArgs: Seq[ChannelBuffer] = args match {
case None => Seq(ChannelBuffers.EMPTY_BUFFER)
case Some(args) => args.split(" ").toSeq
}
service(Stats(statArgs)) map {
case InfoLines(lines) => lines.map { line =>
val key = line.key
val values = line.values
key.toString(UTF_8) + " " + values.map { value => value.toString(UTF_8) }.mkString(" ")
}
case Error(e) => throw e
case Values(list) => Nil
case _ => throw new IllegalStateException
}
}
def release() {
service.close()
}
}
/**
* A partitioned client is a client that delegates to an actual client based on
* the key value. Subclasses implement clientOf to choose the Client.
*/
trait PartitionedClient extends Client {
protected[memcached] def clientOf(key: String): Client
private[this] def withKeysGroupedByClient[A](
keys: Iterable[String])(f: (Client, Iterable[String]) => Future[A]
): Future[Seq[A]] = {
Future.collect(
keys groupBy(clientOf(_)) map Function.tupled(f) toSeq
)
}
def getResult(keys: Iterable[String]) = {
if (keys.nonEmpty) {
withKeysGroupedByClient(keys) {
_.getResult(_)
} map { GetResult.merged(_) }
} else {
Future.value(GetResult())
}
}
def getsResult(keys: Iterable[String]) = {
if (keys.nonEmpty) {
withKeysGroupedByClient(keys) {
_.getsResult(_)
} map { GetResult.merged(_) }
} else {
Future.value(GetsResult(GetResult()))
}
}
def set(key: String, flags: Int, expiry: Time, value: ChannelBuffer) =
clientOf(key).set(key, flags, expiry, value)
def add(key: String, flags: Int, expiry: Time, value: ChannelBuffer) =
clientOf(key).add(key, flags, expiry, value)
def append(key: String, flags: Int, expiry: Time, value: ChannelBuffer) =
clientOf(key).append(key, flags, expiry, value)
def prepend(key: String, flags: Int, expiry: Time, value: ChannelBuffer) =
clientOf(key).prepend(key, flags, expiry, value)
def replace(key: String, flags: Int, expiry: Time, value: ChannelBuffer) =
clientOf(key).replace(key, flags, expiry, value)
def cas(key: String, flags: Int, expiry: Time, value: ChannelBuffer, casUnique: ChannelBuffer) =
clientOf(key).cas(key, flags, expiry, value, casUnique)
def delete(key: String) = clientOf(key).delete(key)
def incr(key: String, delta: Long) = clientOf(key).incr(key, delta)
def decr(key: String, delta: Long) = clientOf(key).decr(key, delta)
def stats(args: Option[String]): Future[Seq[String]] =
throw new UnsupportedOperationException("No logical way to perform stats without a key")
}
object PartitionedClient {
@deprecated("Use CacheNodeGroup.apply(hostPartWeights) instead", "7.0.0")
def parseHostPortWeights(hostPortWeights: String): Seq[(String, Int, Int)] =
hostPortWeights
.split(Array(' ', ','))
.filter((_ != ""))
.map(_.split(":"))
.map {
case Array(host) => (host, 11211, 1)
case Array(host, port) => (host, port.toInt, 1)
case Array(host, port, weight) => (host, port.toInt, weight.toInt)
}
}
abstract class KetamaClientKey {
def identifier: String
}
object KetamaClientKey {
private[memcached] case class HostPortBasedKey(host: String, port: Int, weight: Int) extends KetamaClientKey {
val identifier = if (port == 11211) host else host + ":" + port
}
private[memcached] case class CustomKey(identifier: String) extends KetamaClientKey
def apply(host: String, port: Int, weight: Int): KetamaClientKey =
HostPortBasedKey(host, port, weight)
def apply(id: String) = CustomKey(id)
}
private[finagle] sealed trait NodeEvent
private[finagle] sealed trait NodeHealth extends NodeEvent
private[finagle] case class NodeMarkedDead(key: KetamaClientKey) extends NodeHealth
private[finagle] case class NodeRevived(key: KetamaClientKey) extends NodeHealth
class KetamaFailureAccrualFactory[Req, Rep](
underlying: ServiceFactory[Req, Rep],
numFailures: Int,
markDeadFor: Duration,
timer: Timer,
key: KetamaClientKey,
healthBroker: Broker[NodeHealth]
) extends FailureAccrualFactory[Req, Rep](underlying, numFailures, markDeadFor, timer) {
override def markDead() = {
super.markDead()
healthBroker ! NodeMarkedDead(key)
}
override def revive() = {
super.revive()
healthBroker ! NodeRevived(key)
}
}
object KetamaClient {
val DefaultNumReps = 160
private val shardNotAvailableDistributor = {
val failedService = new FailedService(new ShardNotAvailableException)
new SingletonDistributor(TwemcacheClient(failedService): Client)
}
}
class KetamaClient private[finagle](
initialServices: Group[CacheNode],
keyHasher: KeyHasher,
numReps: Int,
failureAccrualParams: (Int, Duration) = (5, 30.seconds),
legacyFAClientBuilder: Option[(CacheNode, KetamaClientKey, Broker[NodeHealth], (Int, Duration)) => Service[Command, Response]],
statsReceiver: StatsReceiver = NullStatsReceiver,
oldLibMemcachedVersionComplianceMode: Boolean = false
) extends PartitionedClient {
private object NodeState extends Enumeration {
type t = this.Value
val Live, Ejected = Value
}
private case class Node(node: KetamaNode[Client], var state: NodeState.Value)
// ketama nodes group maps each cache node to a ketama key/node pair
// with memcached rich client as undelying handler
val nodeHealthBroker = new Broker[NodeHealth]
val ketamaNodeGrp = initialServices.map({
node: CacheNode =>
val key = node.key match {
case Some(id) => KetamaClientKey(id)
case None => KetamaClientKey(node.host, node.port, node.weight)
}
val faClient: Client = legacyFAClientBuilder map { builder =>
TwemcacheClient(builder(node, key, nodeHealthBroker, failureAccrualParams))
} getOrElse MemcachedFailureAccrualClient(
key, nodeHealthBroker, failureAccrualParams
).newTwemcacheClient(node.host+":"+node.port)
key -> KetamaNode(key.identifier, node.weight, faClient)
})
@volatile private[this] var ketamaNodeSnap = ketamaNodeGrp()
@volatile private[this] var nodes = mutable.Map[KetamaClientKey, Node]() ++ {
ketamaNodeSnap.toMap mapValues { kn: KetamaNode[Client] => Node(kn, NodeState.Live) }
}
nodeHealthBroker.recv foreach {
case NodeMarkedDead(key) => ejectNode(key)
case NodeRevived(key) => reviveNode(key)
}
private[this] val pristineDistributor = buildDistributor(nodes.values map(_.node) toSeq)
@volatile private[this] var currentDistributor: Distributor[Client] = pristineDistributor
private[this] val liveNodeGauge = statsReceiver.addGauge("live_nodes") {
synchronized { nodes count { case (_, Node(_, state)) => state == NodeState.Live } } }
private[this] val deadNodeGauge = statsReceiver.addGauge("dead_nodes") {
synchronized { nodes count { case (_, Node(_, state)) => state == NodeState.Ejected } } }
private[this] val ejectionCount = statsReceiver.counter("ejections")
private[this] val revivalCount = statsReceiver.counter("revivals")
private[this] val nodeLeaveCount = statsReceiver.counter("leaves")
private[this] val nodeJoinCount = statsReceiver.counter("joins")
private[this] val keyRingRedistributeCount = statsReceiver.counter("redistributes")
private[this] def buildDistributor(nodes: Seq[KetamaNode[Client]]) = synchronized {
if (nodes.isEmpty) KetamaClient.shardNotAvailableDistributor
else new KetamaDistributor(nodes, numReps, oldLibMemcachedVersionComplianceMode)
}
override def clientOf(key: String): Client = {
if (ketamaNodeGrp() ne ketamaNodeSnap)
updateGroup()
val hash = keyHasher.hashKey(key)
currentDistributor.nodeForHash(hash)
}
private[this] def rebuildDistributor(): Unit = synchronized {
val liveNodes = for ((_, Node(node, NodeState.Live)) <- nodes) yield node
currentDistributor = buildDistributor(liveNodes toSeq)
keyRingRedistributeCount.incr()
}
private[this] def updateGroup() = synchronized {
if (ketamaNodeGrp() ne ketamaNodeSnap) {
val old = ketamaNodeSnap
ketamaNodeSnap = ketamaNodeGrp()
// remove old nodes and release clients
nodes --= (old &~ ketamaNodeSnap) collect {
case (key, node) =>
node.handle.release()
nodeLeaveCount.incr()
key
}
// new joined node appears as Live state
nodes ++= (ketamaNodeSnap &~ old) collect {
case (key, node) =>
nodeJoinCount.incr()
key -> Node(node, NodeState.Live)
}
rebuildDistributor()
}
}
private[this] def ejectNode(key: KetamaClientKey) = synchronized {
nodes.get(key) match {
case Some(node) if (node.state == NodeState.Live) =>
node.state = NodeState.Ejected
rebuildDistributor()
ejectionCount.incr()
case _ =>
}
}
private[this] def reviveNode(key: KetamaClientKey) = synchronized {
nodes.get(key) match {
case Some(node) if node.state == NodeState.Ejected =>
node.state = NodeState.Live
rebuildDistributor()
revivalCount.incr()
case _ =>
}
}
def release() = synchronized {
for ((_, Node(node, _)) <- nodes)
node.handle.release()
}
}
case class KetamaClientBuilder private[memcached] (
_group: Group[CacheNode],
_hashName: Option[String],
_clientBuilder: Option[ClientBuilder[_, _, _, _, ClientConfig.Yes]],
_failureAccrualParams: (Int, Duration) = (5, 30.seconds),
oldLibMemcachedVersionComplianceMode: Boolean = false,
numReps: Int = KetamaClient.DefaultNumReps
) {
@deprecated("Use group(Group[CacheNode]) instead", "7.0.0")
def cluster(cluster: Cluster[InetSocketAddress]): KetamaClientBuilder = {
group(CacheNodeGroup(Group.fromCluster(cluster).map{_.asInstanceOf[SocketAddress]}))
}
def group(group: Group[CacheNode]): KetamaClientBuilder = {
copy(_group = group)
}
@deprecated("Use group(Group[CacheNode]) instead", "7.0.0")
def cachePoolCluster(cluster: Cluster[CacheNode]): KetamaClientBuilder = {
copy(_group = Group.fromCluster(cluster))
}
def nodes(nodes: Seq[(String, Int, Int)]): KetamaClientBuilder =
copy(_group = Group(nodes map {
case (host, port, weight) => new CacheNode(host, port, weight)
}:_*))
def nodes(hostPortWeights: String): KetamaClientBuilder =
group(CacheNodeGroup(hostPortWeights))
def hashName(hashName: String): KetamaClientBuilder =
copy(_hashName = Some(hashName))
def numReps(numReps: Int): KetamaClientBuilder =
copy(numReps = numReps)
def clientBuilder(clientBuilder: ClientBuilder[_, _, _, _, ClientConfig.Yes]): KetamaClientBuilder =
copy(_clientBuilder = Some(clientBuilder))
def failureAccrualParams(numFailures: Int, markDeadFor: Duration): KetamaClientBuilder =
copy(_failureAccrualParams = (numFailures, markDeadFor))
def noFailureAccrual: KetamaClientBuilder =
copy(_failureAccrualParams = (Int.MaxValue, Duration.Zero))
def enableOldLibMemcachedVersionComplianceMode(): KetamaClientBuilder =
copy(oldLibMemcachedVersionComplianceMode = true)
def build(): Client = {
val builder =
(_clientBuilder getOrElse ClientBuilder().hostConnectionLimit(1)).codec(Memcached())
def legacyFAClientBuilder(
node: CacheNode, key: KetamaClientKey, broker: Broker[NodeHealth], faParams: (Int, Duration)
) = {
builder.hosts(new InetSocketAddress(node.host, node.port))
.failureAccrualFactory(filter(key, broker, faParams) _)
.build()
}
val keyHasher = KeyHasher.byName(_hashName.getOrElse("ketama"))
val statsReceiver = builder.statsReceiver.scope("memcached_client")
new KetamaClient(
_group,
keyHasher,
numReps,
_failureAccrualParams,
Some(legacyFAClientBuilder(_, _, _, _)),
statsReceiver,
oldLibMemcachedVersionComplianceMode
)
}
private[this] def filter(
key: KetamaClientKey, broker: Broker[NodeHealth], faParams: (Int, Duration)
)(timer: Timer) = {
val (_numFailures, _markDeadFor) = faParams
new ServiceFactoryWrapper {
def andThen[Req, Rep](factory: ServiceFactory[Req, Rep]) = {
new KetamaFailureAccrualFactory(
factory, _numFailures, _markDeadFor, timer, key, broker
)
}
}
}
}
object KetamaClientBuilder {
def apply(): KetamaClientBuilder = KetamaClientBuilder(Group.empty, Some("ketama"), None)
def get() = apply()
}
/**
* Ruby memcache-client (MemCache) compatible client.
*/
class RubyMemCacheClient(clients: Seq[Client]) extends PartitionedClient {
protected[memcached] def clientOf(key: String) = {
val hash = (KeyHasher.CRC32_ITU.hashKey(key) >> 16) & 0x7fff
val index = hash % clients.size
clients(index.toInt)
}
def release() {
clients foreach { _.release() }
}
}
/**
* Builder for memcache-client (MemCache) compatible client.
*/
case class RubyMemCacheClientBuilder(
_nodes: Seq[(String, Int, Int)],
_clientBuilder: Option[ClientBuilder[_, _, _, _, ClientConfig.Yes]]) {
def this() = this(
Nil, // nodes
None // clientBuilder
)
def nodes(nodes: Seq[(String, Int, Int)]): RubyMemCacheClientBuilder =
copy(_nodes = nodes)
def nodes(hostPortWeights: String): RubyMemCacheClientBuilder =
copy(_nodes = CacheNodeGroup(hostPortWeights).members map {
node: CacheNode => (node.host, node.port, node.weight)
} toSeq)
def clientBuilder(clientBuilder: ClientBuilder[_, _, _, _, ClientConfig.Yes]): RubyMemCacheClientBuilder =
copy(_clientBuilder = Some(clientBuilder))
def build(): PartitionedClient = {
val builder = _clientBuilder getOrElse ClientBuilder().hostConnectionLimit(1)
val clients = _nodes.map { case (hostname, port, weight) =>
require(weight == 1, "Ruby memcache node weight must be 1")
Client(builder.hosts(hostname + ":" + port).codec(Memcached()).build())
}
new RubyMemCacheClient(clients)
}
}
/**
* PHP memcache-client (memcache.so) compatible client.
*/
class PHPMemCacheClient(clients: Array[Client], keyHasher: KeyHasher)
extends PartitionedClient {
protected[memcached] def clientOf(key: String) = {
// See mmc_hash() in memcache_standard_hash.c
val hash = (keyHasher.hashKey(key.getBytes) >> 16) & 0x7fff
val index = hash % clients.size
clients(index.toInt)
}
def release() {
clients foreach { _.release() }
}
}
/**
* Builder for memcache-client (memcache.so) compatible client.
*/
case class PHPMemCacheClientBuilder(
_nodes: Seq[(String, Int, Int)],
_hashName: Option[String],
_clientBuilder: Option[ClientBuilder[_, _, _, _, ClientConfig.Yes]]) {
def nodes(nodes: Seq[(String, Int, Int)]): PHPMemCacheClientBuilder =
copy(_nodes = nodes)
def nodes(hostPortWeights: String): PHPMemCacheClientBuilder =
copy(_nodes = CacheNodeGroup(hostPortWeights).members map {
node: CacheNode => (node.host, node.port, node.weight)
} toSeq)
def hashName(hashName: String): PHPMemCacheClientBuilder =
copy(_hashName = Some(hashName))
def clientBuilder(clientBuilder: ClientBuilder[_, _, _, _, ClientConfig.Yes]): PHPMemCacheClientBuilder =
copy(_clientBuilder = Some(clientBuilder))
def build(): PartitionedClient = {
val builder = _clientBuilder getOrElse ClientBuilder().hostConnectionLimit(1)
val keyHasher = KeyHasher.byName(_hashName.getOrElse("crc32-itu"))
val clients = _nodes.map { case (hostname, port, weight) =>
val client = Client(builder.hosts(hostname + ":" + port).codec(Memcached()).build())
for (i <- (1 to weight)) yield client
}.flatten.toArray
new PHPMemCacheClient(clients, keyHasher)
}
}
object PHPMemCacheClientBuilder {
def apply(): PHPMemCacheClientBuilder = PHPMemCacheClientBuilder(Nil, Some("crc32-itu"), None)
def get() = apply()
}
|
firebase/finagle
|
finagle-memcached/src/main/scala/com/twitter/finagle/memcached/Client.scala
|
Scala
|
apache-2.0
| 33,422
|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalatest.SharedHelpers.{EventRecordingReporter, thisLineNumber}
import scala.concurrent.{Promise, ExecutionContext, Future}
import org.scalatest.concurrent.SleepHelper
import org.scalatest.events.{InfoProvided, MarkupProvided}
import org.scalatest.exceptions.DuplicateTestNameException
import scala.util.Success
class AsyncFunSuiteSpec extends FunSpec {
describe("AsyncFunSuite") {
it("can be used for tests that return Future under parallel async test execution") {
class ExampleSuite extends AsyncFunSuite with ParallelTestExecution {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
//SCALATESTNATIVE-ONLY implicit override def executionContext = scala.concurrent.ExecutionContext.Implicits.global
val a = 1
test("test 1") {
Future {
assert(a == 1)
}
}
test("test 2") {
Future {
assert(a == 2)
}
}
test("test 3") {
Future {
pending
}
}
test("test 4") {
Future {
cancel
}
}
ignore("test 5") {
Future {
cancel
}
}
override def newInstance = new ExampleSuite
}
val rep = new EventRecordingReporter
val suite = new ExampleSuite
val status = suite.run(None, Args(reporter = rep))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
assert(rep.testStartingEventsReceived.length == 4)
assert(rep.testSucceededEventsReceived.length == 1)
assert(rep.testSucceededEventsReceived(0).testName == "test 1")
assert(rep.testFailedEventsReceived.length == 1)
assert(rep.testFailedEventsReceived(0).testName == "test 2")
assert(rep.testPendingEventsReceived.length == 1)
assert(rep.testPendingEventsReceived(0).testName == "test 3")
assert(rep.testCanceledEventsReceived.length == 1)
assert(rep.testCanceledEventsReceived(0).testName == "test 4")
assert(rep.testIgnoredEventsReceived.length == 1)
assert(rep.testIgnoredEventsReceived(0).testName == "test 5")
}
it("can be used for tests that did not return Future under parallel async test execution") {
class ExampleSuite extends AsyncFunSuite with ParallelTestExecution {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
//SCALATESTNATIVE-ONLY implicit override def executionContext = scala.concurrent.ExecutionContext.Implicits.global
val a = 1
test("test 1") {
assert(a == 1)
}
test("test 2") {
assert(a == 2)
}
test("test 3") {
pending
}
test("test 4") {
cancel
}
ignore("test 5") {
cancel
}
override def newInstance = new ExampleSuite
}
val rep = new EventRecordingReporter
val suite = new ExampleSuite
val status = suite.run(None, Args(reporter = rep))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
assert(rep.testStartingEventsReceived.length == 4)
assert(rep.testSucceededEventsReceived.length == 1)
assert(rep.testSucceededEventsReceived(0).testName == "test 1")
assert(rep.testFailedEventsReceived.length == 1)
assert(rep.testFailedEventsReceived(0).testName == "test 2")
assert(rep.testPendingEventsReceived.length == 1)
assert(rep.testPendingEventsReceived(0).testName == "test 3")
assert(rep.testCanceledEventsReceived.length == 1)
assert(rep.testCanceledEventsReceived(0).testName == "test 4")
assert(rep.testIgnoredEventsReceived.length == 1)
assert(rep.testIgnoredEventsReceived(0).testName == "test 5")
}
it("should run tests that return Future in serial by default") {
@volatile var count = 0
class ExampleSuite extends AsyncFunSuite {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
//SCALATESTNATIVE-ONLY implicit override def executionContext = scala.concurrent.ExecutionContext.Implicits.global
test("test 1") {
Future {
SleepHelper.sleep(30)
assert(count == 0)
count = 1
succeed
}
}
test("test 2") {
Future {
assert(count == 1)
SleepHelper.sleep(50)
count = 2
succeed
}
}
test("test 3") {
Future {
assert(count == 2)
}
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSuite
val status = suite.run(None, Args(reporter = rep))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
assert(rep.testStartingEventsReceived.length == 3)
assert(rep.testSucceededEventsReceived.length == 3)
}
it("should run tests that does not return Future in serial by default") {
@volatile var count = 0
class ExampleSuite extends AsyncFunSuite {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
//SCALATESTNATIVE-ONLY implicit override def executionContext = scala.concurrent.ExecutionContext.Implicits.global
test("test 1") {
SleepHelper.sleep(30)
assert(count == 0)
count = 1
succeed
}
test("test 2") {
assert(count == 1)
SleepHelper.sleep(50)
count = 2
succeed
}
test("test 3") {
assert(count == 2)
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSuite
val status = suite.run(None, Args(reporter = rep))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
assert(rep.testStartingEventsReceived.length == 3)
assert(rep.testSucceededEventsReceived.length == 3)
}
// SKIP-SCALATESTJS,NATIVE-START
it("should run tests and its future in same main thread when use SerialExecutionContext") {
var mainThread = Thread.currentThread
var test1Thread: Option[Thread] = None
var test2Thread: Option[Thread] = None
var onCompleteThread: Option[Thread] = None
class ExampleSpec extends AsyncFunSuite {
test("test 1") {
Future {
test1Thread = Some(Thread.currentThread)
succeed
}
}
test("test 2") {
Future {
test2Thread = Some(Thread.currentThread)
succeed
}
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
status.whenCompleted { s =>
onCompleteThread = Some(Thread.currentThread)
}
status.waitUntilCompleted()
assert(test1Thread.isDefined)
assert(test1Thread.get == mainThread)
assert(test2Thread.isDefined)
assert(test2Thread.get == mainThread)
assert(onCompleteThread.isDefined)
assert(onCompleteThread.get == mainThread)
}
it("should run tests and its true async future in the same thread when use SerialExecutionContext") {
var mainThread = Thread.currentThread
@volatile var test1Thread: Option[Thread] = None
@volatile var test2Thread: Option[Thread] = None
var onCompleteThread: Option[Thread] = None
class ExampleSpec extends AsyncFunSuite {
test("test 1") {
val promise = Promise[Assertion]
val timer = new java.util.Timer
timer.schedule(
new java.util.TimerTask {
def run(): Unit = {
promise.complete(Success(succeed))
}
},
1000
)
promise.future.map { s =>
test1Thread = Some(Thread.currentThread)
s
}
}
test("test 2") {
val promise = Promise[Assertion]
val timer = new java.util.Timer
timer.schedule(
new java.util.TimerTask {
def run(): Unit = {
promise.complete(Success(succeed))
}
},
500
)
promise.future.map { s =>
test2Thread = Some(Thread.currentThread)
s
}
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
status.whenCompleted { s =>
onCompleteThread = Some(Thread.currentThread)
}
status.waitUntilCompleted()
assert(test1Thread.isDefined)
assert(test1Thread.get == mainThread)
assert(test2Thread.isDefined)
assert(test2Thread.get == mainThread)
assert(onCompleteThread.isDefined)
assert(onCompleteThread.get == mainThread)
}
it("should not run out of stack space with nested futures when using SerialExecutionContext") {
class ExampleSpec extends AsyncFunSuite {
// Note we get a StackOverflowError with the following execution
// context.
// override implicit def executionContext: ExecutionContext = new ExecutionContext { def execute(runnable: Runnable) = runnable.run; def reportFailure(cause: Throwable) = () }
def sum(xs: List[Int]): Future[Int] =
xs match {
case Nil => Future.successful(0)
case x :: xs => Future(x).flatMap(xx => sum(xs).map(xxx => xx + xxx))
}
test("test 1") {
val fut: Future[Int] = sum((1 to 50000).toList)
fut.map(total => assert(total == 1250025000))
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
status.waitUntilCompleted()
assert(!rep.testSucceededEventsReceived.isEmpty)
}
// SKIP-SCALATESTJS,NATIVE-END
it("should run tests that returns Future and report their result in serial") {
class ExampleSpec extends AsyncFunSuite {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
//SCALATESTNATIVE-ONLY implicit override def executionContext = scala.concurrent.ExecutionContext.Implicits.global
test("test 1") {
Future {
SleepHelper.sleep(60)
succeed
}
}
test("test 2") {
Future {
SleepHelper.sleep(30)
succeed
}
}
test("test 3") {
Future {
succeed
}
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
assert(rep.testStartingEventsReceived.length == 3)
assert(rep.testStartingEventsReceived(0).testName == "test 1")
assert(rep.testStartingEventsReceived(1).testName == "test 2")
assert(rep.testStartingEventsReceived(2).testName == "test 3")
assert(rep.testSucceededEventsReceived.length == 3)
assert(rep.testSucceededEventsReceived(0).testName == "test 1")
assert(rep.testSucceededEventsReceived(1).testName == "test 2")
assert(rep.testSucceededEventsReceived(2).testName == "test 3")
}
it("should run tests that does not return Future and report their result in serial") {
class ExampleSpec extends AsyncFunSuite {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
//SCALATESTNATIVE-ONLY implicit override def executionContext = scala.concurrent.ExecutionContext.Implicits.global
test("test 1") {
SleepHelper.sleep(60)
succeed
}
test("test 2") {
SleepHelper.sleep(30)
succeed
}
test("test 3") {
succeed
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
assert(rep.testStartingEventsReceived.length == 3)
assert(rep.testStartingEventsReceived(0).testName == "test 1")
assert(rep.testStartingEventsReceived(1).testName == "test 2")
assert(rep.testStartingEventsReceived(2).testName == "test 3")
assert(rep.testSucceededEventsReceived.length == 3)
assert(rep.testSucceededEventsReceived(0).testName == "test 1")
assert(rep.testSucceededEventsReceived(1).testName == "test 2")
assert(rep.testSucceededEventsReceived(2).testName == "test 3")
}
it("should send an InfoProvided event for an info in main spec body") {
class MySuite extends AsyncFunSuite {
info(
"hi there"
)
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val infoList = reporter.infoProvidedEventsReceived
assert(infoList.size == 1)
assert(infoList(0).message == "hi there")
}
it("should send an InfoProvided event for an info in test body") {
class MySuite extends AsyncFunSuite {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
//SCALATESTNATIVE-ONLY implicit override def executionContext = scala.concurrent.ExecutionContext.Implicits.global
test("test 1") {
info("hi there")
succeed
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val infoList = reporter.infoProvidedEventsReceived
assert(infoList.size == 0)
val testSucceededList = reporter.testSucceededEventsReceived
assert(testSucceededList.size == 1)
assert(testSucceededList(0).recordedEvents.size == 1)
val recordedEvent = testSucceededList(0).recordedEvents(0)
assert(recordedEvent.isInstanceOf[InfoProvided])
val infoProvided = recordedEvent.asInstanceOf[InfoProvided]
assert(infoProvided.message == "hi there")
}
it("should send an InfoProvided event for an info in Future returned by test body") {
class MySuite extends AsyncFunSuite {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
//SCALATESTNATIVE-ONLY implicit override def executionContext = scala.concurrent.ExecutionContext.Implicits.global
test("test 1") {
Future {
info("hi there")
succeed
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val infoList = reporter.infoProvidedEventsReceived
assert(infoList.size == 0)
val testSucceededList = reporter.testSucceededEventsReceived
assert(testSucceededList.size == 1)
assert(testSucceededList(0).recordedEvents.size == 1)
val recordedEvent = testSucceededList(0).recordedEvents(0)
assert(recordedEvent.isInstanceOf[InfoProvided])
val infoProvided = recordedEvent.asInstanceOf[InfoProvided]
assert(infoProvided.message == "hi there")
}
it("should send a NoteProvided event for a note in main spec body") {
class MySuite extends AsyncFunSuite {
note(
"hi there"
)
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val noteList = reporter.noteProvidedEventsReceived
assert(noteList.size == 1)
assert(noteList(0).message == "hi there")
}
it("should send a NoteProvided event for a note in test body") {
class MySuite extends AsyncFunSuite {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
//SCALATESTNATIVE-ONLY implicit override def executionContext = scala.concurrent.ExecutionContext.Implicits.global
test("test 1") {
note("hi there")
succeed
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val noteList = reporter.noteProvidedEventsReceived
assert(noteList.size == 1)
assert(noteList(0).message == "hi there")
}
it("should send a NoteProvided event for a note in Future returned by test body") {
class MySuite extends AsyncFunSuite {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
//SCALATESTNATIVE-ONLY implicit override def executionContext = scala.concurrent.ExecutionContext.Implicits.global
test("test 1") {
Future {
note("hi there")
succeed
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val noteList = reporter.noteProvidedEventsReceived
assert(noteList.size == 1)
assert(noteList(0).message == "hi there")
}
it("should send an AlertProvided event for an alert in main spec body") {
class MySuite extends AsyncFunSuite {
alert(
"hi there"
)
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val alertList = reporter.alertProvidedEventsReceived
assert(alertList.size == 1)
assert(alertList(0).message == "hi there")
}
it("should send an AlertProvided event for an alert in test body") {
class MySuite extends AsyncFunSuite {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
//SCALATESTNATIVE-ONLY implicit override def executionContext = scala.concurrent.ExecutionContext.Implicits.global
test("test 1") {
alert("hi there")
succeed
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val alertList = reporter.alertProvidedEventsReceived
assert(alertList.size == 1)
assert(alertList(0).message == "hi there")
}
it("should send an AlertProvided event for an alert in Future returned by test body") {
class MySuite extends AsyncFunSuite {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
//SCALATESTNATIVE-ONLY implicit override def executionContext = scala.concurrent.ExecutionContext.Implicits.global
test("test 1") {
Future {
alert("hi there")
succeed
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val alertList = reporter.alertProvidedEventsReceived
assert(alertList.size == 1)
assert(alertList(0).message == "hi there")
}
it("should send a MarkupProvided event for a markup in main spec body") {
class MySuite extends AsyncFunSuite {
markup(
"hi there"
)
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val markupList = reporter.markupProvidedEventsReceived
assert(markupList.size == 1)
assert(markupList(0).text == "hi there")
}
it("should send a MarkupProvided event for a markup in test body") {
class MySuite extends AsyncFunSuite {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
//SCALATESTNATIVE-ONLY implicit override def executionContext = scala.concurrent.ExecutionContext.Implicits.global
test("test 1") {
markup("hi there")
succeed
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val markupList = reporter.markupProvidedEventsReceived
assert(markupList.size == 0)
val testSucceededList = reporter.testSucceededEventsReceived
assert(testSucceededList.size == 1)
assert(testSucceededList(0).recordedEvents.size == 1)
val recordedEvent = testSucceededList(0).recordedEvents(0)
assert(recordedEvent.isInstanceOf[MarkupProvided])
val markupProvided = recordedEvent.asInstanceOf[MarkupProvided]
assert(markupProvided.text == "hi there")
}
it("should send a MarkupProvided event for a markup in Future returned by test body") {
class MySuite extends AsyncFunSuite {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
//SCALATESTNATIVE-ONLY implicit override def executionContext = scala.concurrent.ExecutionContext.Implicits.global
test("test 1") {
Future {
markup("hi there")
succeed
}
}
}
val suite = new MySuite
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
val markupList = reporter.markupProvidedEventsReceived
assert(markupList.size == 0)
val testSucceededList = reporter.testSucceededEventsReceived
assert(testSucceededList.size == 1)
assert(testSucceededList(0).recordedEvents.size == 1)
val recordedEvent = testSucceededList(0).recordedEvents(0)
assert(recordedEvent.isInstanceOf[MarkupProvided])
val markupProvided = recordedEvent.asInstanceOf[MarkupProvided]
assert(markupProvided.text == "hi there")
}
it("should generate a DuplicateTestNameException when duplicate test name is detected") {
class TestSpec extends AsyncFunSuite {
test("test 1") { succeed }
test("test 1") { succeed }
}
val e = intercept[DuplicateTestNameException] {
new TestSpec
}
assert("AsyncFunSuiteSpec.scala" == e.failedCodeFileName.get)
assert(e.failedCodeLineNumber.get == thisLineNumber - 6)
assert(!e.cause.isDefined)
}
it("should generate a DuplicateTestNameException when duplicate test name is detected using ignore") {
class TestSpec extends AsyncFunSuite {
test("test 1") { succeed }
ignore("test 1") { succeed }
}
val e = intercept[DuplicateTestNameException] {
new TestSpec
}
assert("AsyncFunSuiteSpec.scala" == e.failedCodeFileName.get)
assert(e.failedCodeLineNumber.get == thisLineNumber - 6)
assert(!e.cause.isDefined)
}
it("should allow other execution context to be used") {
class TestSpec extends AsyncFunSuite {
// SKIP-SCALATESTJS,NATIVE-START
override implicit val executionContext = scala.concurrent.ExecutionContext.Implicits.global
// SKIP-SCALATESTJS,NATIVE-END
// SCALATESTJS-ONLY override implicit val executionContext = scala.scalajs.concurrent.JSExecutionContext.runNow
val a = 1
test("test A") {
Future { assert(a == 1) }
}
test("test B") {
Future { assert(a == 1) }
}
test("test C") {
Future { assert(a == 1) }
}
}
val suite = new TestSpec
val reporter = new EventRecordingReporter
val status = suite.run(None, Args(reporter))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
assert(reporter.testStartingEventsReceived.length == 3)
assert(reporter.testSucceededEventsReceived.length == 3)
}
}
}
|
dotty-staging/scalatest
|
scalatest-test/src/test/scala/org/scalatest/AsyncFunSuiteSpec.scala
|
Scala
|
apache-2.0
| 26,363
|
package dotty.tools.dotc.quoted
import dotty.tools.dotc.core.Contexts._
import dotty.tools.dotc.util.Property
import dotty.tools.dotc.ast.tpd
object QuotesCache {
import tpd._
/** A key to be used in a context property that caches the unpickled trees */
private val QuotesCacheKey = new Property.Key[collection.mutable.Map[String | List[String], Tree]]
/** Get the cached tree of the quote */
def getTree(pickled: String | List[String])(using Context): Option[Tree] =
ctx.property(QuotesCacheKey).get.get(pickled)
/** Update the cached tree of the quote */
def update(pickled: String | List[String], tree: Tree)(using Context): Unit =
ctx.property(QuotesCacheKey).get.update(pickled, tree)
/** Context with a cache for quote trees and tasty bytes */
def init(ctx: FreshContext): ctx.type =
ctx.setProperty(QuotesCacheKey, collection.mutable.Map.empty)
}
|
dotty-staging/dotty
|
compiler/src/dotty/tools/dotc/quoted/QuotesCache.scala
|
Scala
|
apache-2.0
| 892
|
/* sbt -- Simple Build Tool
* Copyright 2010 Mark Harrah
*/
package xsbt.api
import xsbti.api._
import scala.collection.mutable
class Visit {
private[this] val visitedStructures = new mutable.HashSet[Structure]
private[this] val visitedClassLike = new mutable.HashSet[ClassLike]
def visit(s: Source): Unit = visitAPI(s.api)
def visitAPI(s: SourceAPI): Unit =
{
s.packages foreach visitPackage
s.definitions foreach visitDefinition
}
def visitPackage(p: Package) {
visitString(p.name)
}
def visitDefinitions(ds: Seq[Definition]) = ds foreach visitDefinition
def visitDefinition(d: Definition) {
visitString(d.name)
visitAnnotations(d.annotations)
visitModifiers(d.modifiers)
visitAccess(d.access)
d match {
case c: ClassLike => visitClass(c)
case f: FieldLike => visitField(f)
case d: Def => visitDef(d)
case t: TypeDeclaration => visitTypeDeclaration(t)
case t: TypeAlias => visitTypeAlias(t)
}
}
final def visitClass(c: ClassLike): Unit = if (visitedClassLike add c) visitClass0(c)
def visitClass0(c: ClassLike) {
visitParameterizedDefinition(c)
visitType(c.selfType)
visitStructure(c.structure)
}
def visitField(f: FieldLike) {
visitType(f.tpe)
f match {
case v: Var => visitVar(v)
case v: Val => visitVal(v)
}
}
def visitVar(v: Var) {}
def visitVal(v: Val) {}
def visitDef(d: Def) {
visitParameterizedDefinition(d)
visitValueParameters(d.valueParameters)
visitType(d.returnType)
}
def visitAccess(a: Access): Unit =
a match {
case pub: Public => visitPublic(pub)
case qual: Qualified => visitQualified(qual)
}
def visitQualified(qual: Qualified): Unit =
qual match {
case p: Protected => visitProtected(p)
case p: Private => visitPrivate(p)
}
def visitQualifier(qual: Qualifier): Unit =
qual match {
case unq: Unqualified => visitUnqualified(unq)
case thisq: ThisQualifier => visitThisQualifier(thisq)
case id: IdQualifier => visitIdQualifier(id)
}
def visitIdQualifier(id: IdQualifier) {
visitString(id.value)
}
def visitUnqualified(unq: Unqualified) {}
def visitThisQualifier(thisq: ThisQualifier) {}
def visitPublic(pub: Public) {}
def visitPrivate(p: Private) { visitQualifier(p.qualifier) }
def visitProtected(p: Protected) { visitQualifier(p.qualifier) }
def visitModifiers(m: Modifiers) {}
def visitValueParameters(valueParameters: Seq[ParameterList]) = valueParameters foreach visitValueParameterList
def visitValueParameterList(list: ParameterList) = list.parameters foreach visitValueParameter
def visitValueParameter(parameter: MethodParameter) =
{
visitString(parameter.name)
visitType(parameter.tpe)
}
def visitParameterizedDefinition[T <: ParameterizedDefinition](d: T) {
visitTypeParameters(d.typeParameters)
}
def visitTypeDeclaration(d: TypeDeclaration) {
visitParameterizedDefinition(d)
visitType(d.lowerBound)
visitType(d.upperBound)
}
def visitTypeAlias(d: TypeAlias) {
visitParameterizedDefinition(d)
visitType(d.tpe)
}
def visitTypeParameters(parameters: Seq[TypeParameter]) = parameters foreach visitTypeParameter
def visitTypeParameter(parameter: TypeParameter) {
visitTypeParameters(parameter.typeParameters)
visitType(parameter.lowerBound)
visitType(parameter.upperBound)
visitAnnotations(parameter.annotations)
}
def visitAnnotations(annotations: Seq[Annotation]) = annotations foreach visitAnnotation
def visitAnnotation(annotation: Annotation) =
{
visitType(annotation.base)
visitAnnotationArguments(annotation.arguments)
}
def visitAnnotationArguments(args: Seq[AnnotationArgument]) = args foreach visitAnnotationArgument
def visitAnnotationArgument(arg: AnnotationArgument) {
visitString(arg.name)
visitString(arg.value)
}
def visitTypes(ts: Seq[Type]) = ts.foreach(visitType)
def visitType(t: Type) {
t match {
case s: Structure => visitStructure(s)
case e: Existential => visitExistential(e)
case c: Constant => visitConstant(c)
case p: Polymorphic => visitPolymorphic(p)
case a: Annotated => visitAnnotated(a)
case p: Parameterized => visitParameterized(p)
case p: Projection => visitProjection(p)
case _: EmptyType => visitEmptyType()
case s: Singleton => visitSingleton(s)
case pr: ParameterRef => visitParameterRef(pr)
}
}
def visitEmptyType() {}
def visitParameterRef(p: ParameterRef) {}
def visitSingleton(s: Singleton) { visitPath(s.path) }
def visitPath(path: Path) = path.components foreach visitPathComponent
def visitPathComponent(pc: PathComponent) = pc match {
case t: This => visitThisPath(t)
case s: Super => visitSuperPath(s)
case id: Id => visitIdPath(id)
}
def visitThisPath(t: This) {}
def visitSuperPath(s: Super) { visitPath(s.qualifier) }
def visitIdPath(id: Id) { visitString(id.id) }
def visitConstant(c: Constant) =
{
visitString(c.value)
visitType(c.baseType)
}
def visitExistential(e: Existential) = visitParameters(e.clause, e.baseType)
def visitPolymorphic(p: Polymorphic) = visitParameters(p.parameters, p.baseType)
def visitProjection(p: Projection) =
{
visitString(p.id)
visitType(p.prefix)
}
def visitParameterized(p: Parameterized) {
visitType(p.baseType)
visitTypes(p.typeArguments)
}
def visitAnnotated(a: Annotated) {
visitType(a.baseType)
visitAnnotations(a.annotations)
}
final def visitStructure(structure: Structure) = if (visitedStructures add structure) visitStructure0(structure)
def visitStructure0(structure: Structure) {
visitTypes(structure.parents)
visitDefinitions(structure.declared)
visitDefinitions(structure.inherited)
}
def visitParameters(parameters: Seq[TypeParameter], base: Type): Unit =
{
visitTypeParameters(parameters)
visitType(base)
}
def visitString(s: String) {}
}
|
xeno-by/old-scalameta-sbt
|
compile/api/src/main/scala/xsbt/api/Visit.scala
|
Scala
|
bsd-3-clause
| 6,149
|
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.core
import java.io.{ File, IOException }
import java.util.jar.JarFile
import java.util.regex.Pattern
import org.apache.commons.lang.StringEscapeUtils
import scala.io.Source
// Scaladoc uses @usecase comment annotations to substitute kid-safe signatures
// in the docs.
// See: http://stackoverflow.com/questions/26132459/why-are-scaladoc-method-signatures-wrong
// Unfortunately, these usecase signatures are also used for the link anchors.
// As a result, it's impossible to generate the links without doing non-trivial
// analysis of the doc comments.
// Also see: https://issues.scala-lang.org/browse/SI-9168
//
// Instead of doing things the 'right' way, which would be hard, we use heuristics
// to determine if the link is likely to be a @usecase, and then go digging
// through the html content to find the anchor.
trait DocUsecaseHandling { self: DocResolver =>
val PrefixRegexp = """^([A-Za-z:_\\+-]+).*""".r
protected def maybeReplaceWithUsecase(jar: File, sig: DocSig): DocSig = {
if (sig.fqn.scalaStdLib) {
sig.member match {
case Some(PrefixRegexp(prefix)) if UseCasePrefixes.contains(prefix) =>
try {
val jarFile = new JarFile(jar)
try {
val is = jarFile.getInputStream(jarFile.getEntry(scalaFqnToPath(sig.fqn)))
val html = Source.fromInputStream(is).mkString
val re = s"""<a id="(${Pattern.quote(prefix)}.+?)"""".r
re.findFirstMatchIn(html).map { m =>
sig.copy(member = Some(StringEscapeUtils.unescapeHtml(m.group(1))))
}.getOrElse(sig)
} finally jarFile.close()
} catch { case e: IOException => sig }
case _ => sig
}
} else sig
}
private val UseCasePrefixes = Set(
"+",
"+",
"++",
"++:",
"+:",
"-",
":+",
"::",
":::",
"collect",
"copyToArray",
"diff",
"filterMap",
"flatMap",
"flatten",
"foreach",
"getOrElse",
"indexOf",
"intersect",
"lastIndexOf",
"map",
"mapConserve",
"max",
"maxBy",
"min",
"minBy",
"padTo",
"patch",
"product",
"reverseMap",
"reverse_:::",
"sameElements",
"scan",
"sum",
"to",
"toArray",
"toMap",
"union",
"updated",
"zip",
"zipAll",
"zipWithIndex"
)
}
|
espinhogr/ensime-server
|
core/src/main/scala/org/ensime/core/DocUsecaseHandling.scala
|
Scala
|
gpl-3.0
| 2,500
|
package com.twitter.util
/*
* Copyright 2011 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.specs.SpecificationWithJUnit
import java.lang.StringBuilder
class TestBase64Encoder extends Base64StringEncoder {
}
class StringEncoderSpec extends SpecificationWithJUnit {
val longString = "A string that is really really really really really really long and has more than 76 characters"
val result = "QSBzdHJpbmcgdGhhdCBpcyByZWFsbHkgcmVhbGx5IHJlYWxseSByZWFsbHkgcmVhbGx5IHJlYWxseSBsb25nIGFuZCBoYXMgbW9yZSB0aGFuIDc2IGNoYXJhY3RlcnM="
val testEncoder = new TestBase64Encoder()
"strip new lines" in {
testEncoder.encode(longString.getBytes) mustEqual result
}
"decode value with stripped new lines" in {
new String(testEncoder.decode(result)) mustEqual longString
}
}
class GZIPStringEncoderSpec extends SpecificationWithJUnit {
"a gzip string encoder" should {
val gse = new GZIPStringEncoder {}
"properly encode and decode strings" in {
def testCodec(str: String) {
str mustMatch gse.decodeString(gse.encodeString(str))
}
testCodec("a")
testCodec("\\n\\t\\n\\t\\n\\n\\n\\n\\t\\n\\nt\\n\\t\\n\\t\\n\\tn\\t\\nt\\nt\\nt\\nt\\nt\\nt\\tn\\nt\\nt\\n\\t\\nt\\n")
testCodec("aosnetuhsaontehusaonethsoantehusaonethusonethusnaotehu")
// build a huge string
val sb = new StringBuilder
for (_ <- 1 to 10000) {
sb.append("oasnuthoesntihosnteidosentidosentauhsnoetidosentihsoneitdsnuthsin\\n")
}
testCodec(sb.toString)
}
}
}
|
mosesn/util
|
util-codec/src/test/scala/com/twitter/util/StringEncoderSpec.scala
|
Scala
|
apache-2.0
| 2,033
|
package spark.streaming
import collection.mutable.{SynchronizedBuffer, ArrayBuffer}
import java.util.{List => JList}
import spark.streaming.api.java.{JavaPairDStream, JavaDStreamLike, JavaDStream, JavaStreamingContext}
import spark.streaming._
import java.util.ArrayList
import collection.JavaConversions._
/** Exposes streaming test functionality in a Java-friendly way. */
trait JavaTestBase extends TestSuiteBase {
/**
* Create a [[spark.streaming.TestInputStream]] and attach it to the supplied context.
* The stream will be derived from the supplied lists of Java objects.
**/
def attachTestInputStream[T](
ssc: JavaStreamingContext,
data: JList[JList[T]],
numPartitions: Int) = {
val seqData = data.map(Seq(_:_*))
implicit val cm: ClassManifest[T] =
implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[T]]
val dstream = new TestInputStream[T](ssc.ssc, seqData, numPartitions)
ssc.ssc.registerInputStream(dstream)
new JavaDStream[T](dstream)
}
/**
* Attach a provided stream to it's associated StreamingContext as a
* [[spark.streaming.TestOutputStream]].
**/
def attachTestOutputStream[T, This <: spark.streaming.api.java.JavaDStreamLike[T, This, R],
R <: spark.api.java.JavaRDDLike[T, R]](
dstream: JavaDStreamLike[T, This, R]) = {
implicit val cm: ClassManifest[T] =
implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[T]]
val ostream = new TestOutputStream(dstream.dstream,
new ArrayBuffer[Seq[T]] with SynchronizedBuffer[Seq[T]])
dstream.dstream.ssc.registerOutputStream(ostream)
}
/**
* Process all registered streams for a numBatches batches, failing if
* numExpectedOutput RDD's are not generated. Generated RDD's are collected
* and returned, represented as a list for each batch interval.
*/
def runStreams[V](
ssc: JavaStreamingContext, numBatches: Int, numExpectedOutput: Int): JList[JList[V]] = {
implicit val cm: ClassManifest[V] =
implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[V]]
val res = runStreams[V](ssc.ssc, numBatches, numExpectedOutput)
val out = new ArrayList[JList[V]]()
res.map(entry => out.append(new ArrayList[V](entry)))
out
}
}
object JavaTestUtils extends JavaTestBase {
override def maxWaitTimeMillis = 20000
}
object JavaCheckpointTestUtils extends JavaTestBase {
override def actuallyWait = true
}
|
koeninger/spark
|
streaming/src/test/java/spark/streaming/JavaTestUtils.scala
|
Scala
|
bsd-3-clause
| 2,432
|
package chiselutils.utils
import Chisel._
/** This object allows the assignment of a dynamic section of a vector to another
* Allowing vecOut(5,1) := vecIn(6, 2), where the indexs can be dynamically specified
*/
object DynamicVecAssign {
def apply[T <: Data]( vecOut : Vec[T], hiOut : UInt, loOut : UInt, vecIn : Vec[T], hiIn : UInt, loIn : UInt ) : Unit = {
val vecOutLen = vecOut.length
val vecInLen = vecIn.length
val vecOutBW = log2Up(vecOutLen)
val vecInBW = log2Up(vecInLen)
val maxWidth = if ( vecInBW > vecOutBW ) vecInBW else vecOutBW
if ( vecOutLen == 0 || vecInLen == 0 ) {
ChiselError.error("The vectors cannot have a width of 0")
}
for ( i <- 0 until vecOutLen ) {
val inIdx = loIn + UInt(i, maxWidth) - loOut
when ( hiOut >= UInt(i, vecOutBW) && loOut <= UInt(i, vecOutBW) ) {
vecOut(UInt(i, vecOutBW)) := vecIn(inIdx(vecInBW - 1, 0))
}
}
}
}
|
da-steve101/chisel-utils
|
src/main/scala/chiselutils/utils/DynamicVecAssign.scala
|
Scala
|
lgpl-3.0
| 936
|
package org.skycastle.entity
/**
* A trigger that triggers at every change.
*/
class HairTrigger extends Trigger {
def init(value: Any, triggerListener : => Unit, timestamp: Long) {}
def valueChanged(newValue: Any, timestamp: Long) : Boolean = true
}
|
weimingtom/skycastle
|
src/main/scala/org/skycastle/entity/HairTrigger.scala
|
Scala
|
gpl-2.0
| 259
|
package com.jreadability.main
import java.lang.StringBuilder
import java.io._
import scala._
import scala.io.Source
/**
* Scala Porter Stemmer Implementation
*
*/
object Stemmer {
def stem (str: String): String = {
// check for zero length
if (str.length() > 3) {
// all characters must be letters
for (ch <- str toList) {
if (!Character.isLetter(ch)) {
return str.toLowerCase()
}
}
}
var s: String = step_1(str)
step_5(step_4(step_3(step_2(step_1(str))))).toLowerCase
}
def step_1(str: String): String = step_1_c(step_1_b(step_1_a(str)))
/*
* Step 1a
* SSES -> SS caresses -> caress
* IES -> I ponies -> poni
* ties -> ti
* SS -> SS caress -> caress
* S -> cats -> cat
*/
def step_1_a(str: String): String = replacePatterns(str, List( ("sses", "ss"), ("ies", "i"), ("ss", "ss"), ("s", "")), _>=0)
/*
* Step 1b
* (m>0) EED -> EE feed -> feed
* agreed -> agree
* (*v*) ED -> plastered -> plaster
* bled -> bled
* (*v*) ING -> motoring -> motor
* sing -> sing
*/
def step_1_b (str: String): String = {
// (m > 0) EED -> EE
if (str.endsWith("eed")) {
if (stringMeasure(str.substring(0, str.length - 3)) > 0)
return str.substring(0, str.length() - 1)
// (*v*) ED ->
} else if ((str.endsWith("ed")) &&
(containsVowel(str.substring(0, str.length - 2)))) {
return step_1_b_2(str.substring(0, str.length - 2))
// (*v*) ING ->
} else if ((str.endsWith("ing")) &&
(containsVowel(str.substring(0, str.length - 3)))) {
return step_1_b_2(str.substring(0, str.length - 3))
} // end if
str
} // end step1b
/*
* If the second or third of the rules in Step 1b is successful, the following is done:
* AT -> ATE conflat(ed) -> conflate
* BL -> BLE troubl(ed) -> trouble
* IZ -> IZE siz(ed) -> size
*
* (*d and not (*L or *S or *Z)) -> single letter
* hopp(ing) -> hop
* tann(ed) -> tan
* fall(ing) -> fall
* hiss(ing) -> hiss
* fizz(ed) -> fizz
*
* (m=1 and *o) -> E fail(ing) -> fail
* fil(ing) -> file
*/
def step_1_b_2 (str: String): String = {
if (str.endsWith("at") ||
str.endsWith("bl") ||
str.endsWith("iz")) {
return str + "e";
}
else if ((str.length() > 1) && (endsWithDoubleConsonent(str)) &&
(!(str.endsWith("l") || str.endsWith("s") || str.endsWith("z")))) {
return str.substring(0, str.length() - 1);
}
else if ((stringMeasure(str) == 1) &&
(endsWithCVC(str))) {
return str + "e"
}
str
}
/*
* (*v*) Y -> I happy -> happi
* sky -> sky
*/
def step_1_c(str: String): String = {
if (str.endsWith("y") && containsVowel(str.substring(0, str.length() - 1)))
return str.substring(0, str.length() - 1) + "i"
str
} // end step1c
/*
* (m>0) ATIONAL -> ATE relational -> relate
* (m>0) TIONAL -> TION conditional -> condition
* rational -> rational
* (m>0) ENCI -> ENCE valenci -> valence
* (m>0) ANCI -> ANCE hesitanci -> hesitance
* (m>0) IZER -> IZE digitizer -> digitize
* (m>0) ABLI -> ABLE conformabli -> conformable
* (m>0) ALLI -> AL radicalli -> radical
* (m>0) ENTLI -> ENT differentli -> different
* (m>0) ELI -> E vileli - > vile
* (m>0) OUSLI -> OUS analogousli -> analogous
* (m>0) IZATION -> IZE vietnamization -> vietnamize
* (m>0) ATION -> ATE predication -> predicate
* (m>0) ATOR -> ATE operator -> operate
* (m>0) ALISM -> AL feudalism -> feudal
* (m>0) IVENESS -> IVE decisiveness -> decisive
* (m>0) FULNESS -> FUL hopefulness -> hopeful
* (m>0) OUSNESS -> OUS callousness -> callous
* (m>0) ALITI -> AL formaliti -> formal
* (m>0) IVITI -> IVE sensitiviti -> sensitive
* (m>0) BILITI -> BLE sensibiliti -> sensible
*/
def step_2 (str: String): String = replacePatterns(str, List( ("ational", "ate"), ("tional","tion"), ("enci","ence"), ("anci","ance"),
("izer","ize"), ("bli","ble"), ("alli", "al"), ("entli","ent"),("eli","e"),
("ousli","ous"), ("ization","ize"), ("ation","ate"), ("ator","ate"), ("alism","al"),
("iveness","ive"), ("fulness","ful"), ("ousness", "ous"), ("aliti", "al"), ("iviti","ive"),
("biliti", "ble"), ("logi", "log")))
/*
* (m>0) ICATE -> IC triplicate -> triplic
* (m>0) ATIVE -> formative -> form
* (m>0) ALIZE -> AL formalize -> formal
* (m>0) ICITI -> IC electriciti -> electric
* (m>0) ICAL -> IC electrical -> electric
* (m>0) FUL -> hopeful -> hope
* (m>0) NESS -> goodness -> good
*/
def step_3 (str: String): String = replacePatterns(str, List( ("icate", "ic"),("ative",""),("alize","al"),("iciti","ic"),("ical","ic"),("ful",""),("ness","")))
/*
* (m>1) AL -> revival -> reviv
* (m>1) ANCE -> allowance -> allow
* (m>1) ENCE -> inference -> infer
* (m>1) ER -> airliner -> airlin
* (m>1) IC -> gyroscopic -> gyroscop
* (m>1) ABLE -> adjustable -> adjust
* (m>1) IBLE -> defensible -> defens
* (m>1) ANT -> irritant -> irrit
* (m>1) EMENT -> replacement -> replac
* (m>1) MENT -> adjustment -> adjust
* (m>1) ENT -> dependent -> depend
* (m>1 and (*S or *T)) ION -> adoption -> adopt
* (m>1) OU -> homologou -> homolog
* (m>1) ISM -> communism -> commun
* (m>1) ATE -> activate -> activ
* (m>1) ITI -> angulariti -> angular
* (m>1) OUS -> homologous -> homolog
* (m>1) IVE -> effective -> effect
* (m>1) IZE -> bowdlerize -> bowdler
*/
def step_4 (str: String): String = {
val res: String = replacePatterns(str, List( ("al",""),("ance",""),("ence",""),("er",""),("ic",""),("able",""),("ible",""),("ant",""),("ement",""),
("ment",""),("ent",""),("ou", ""),("ism",""),("ate",""),("iti",""),("ous",""),
("ive",""),("ize","")), _>1)
if (str == res) {
if ((str.endsWith("sion") || str.endsWith("tion")) && stringMeasure(str.substring(0, str.length() - 3)) > 1)
return str.substring(0, str.length() - 3)
else
return str
}
else {
return res
}
}
def step_5 (str: String): String = step_5_b(step_5_a(str))
/*
* (m>1) E -> probate -> probat
* rate -> rate
* (m=1 and not *o) E -> cease -> ceas
*/
def step_5_a (str: String): String = {
// (m > 1) E ->
if ((stringMeasure(str.substring(0, str.length() - 1)) > 1) &&
str.endsWith("e"))
return str.substring(0, str.length() -1)
// (m = 1 and not *0) E ->
else if ((stringMeasure(str.substring(0, str.length() - 1)) == 1) &&
(!endsWithCVC(str.substring(0, str.length() - 1))) &&
(str.endsWith("e")))
return str.substring(0, str.length() - 1)
else
return str
} // end step5a
/*
* (m > 1 and *d and *L) -> single letter
* controll -> control
* roll -> roll
*/
def step_5_b (str: String): String = {
// (m > 1 and *d and *L) ->
if (str.endsWith("l") &&
endsWithDoubleConsonent(str) &&
(stringMeasure(str.substring(0, str.length() - 1)) > 1)) {
str.substring(0, str.length() - 1)
} else {
str
}
} // end step5b
// does string contain a vowel?
def containsVowel(str: String): Boolean = {
for (ch <- str toList) {
if (isVowel(ch))
return true
}
// no aeiou but there is y
if (str.indexOf('y') > -1)
return true
else
false
} // end function
// is char a vowel?
def isVowel(c: Char): Boolean = {
for (ch <- "aeiou" toList)
if (c == ch)
return true
false
} // end function
/*
* Special check for 'y', since it may be both vowel and consonent depending on surrounding letters
*/
def isVowel(str: String, i: Int): Boolean = {
for (ch <- "aeiou" toList)
if (str(i) == ch || (str(i) == 'y' && i > 0 && i+1 < str.length && !isVowel(str(i-1)) && !isVowel(str(i+1)) ))
return true
false
} // end function
// returns a CVC measure for the string
def stringMeasure(str: String): Int = {
var count = 0
var vowelSeen: Boolean = false
for (i <- 0 to str.length - 1) {
if(isVowel(str, i)) {
vowelSeen = true
} else if (vowelSeen) {
count += 1
vowelSeen = false
}
}
count
} // end function
// does stem end with CVC?
def endsWithCVC (str: String): Boolean = {
if (str.length() >= 3) {
val cvc = ( str(str.length - 1), str(str.length - 2), str(str.length - 3) )
val cvc_str = cvc._1.toString + cvc._2 + cvc._3
if ((cvc._1 == 'w') || (cvc._1 == 'x') || (cvc._1 == 'y'))
false
else if (!isVowel(cvc._1) && isVowel(cvc_str, 1) && !isVowel(cvc._3))
true
else
false
}
else
false
} // end function
// does string end with a double consonent?
def endsWithDoubleConsonent(str: String): Boolean = {
val c: Char = str.charAt(str.length() - 1);
if (c == str.charAt(str.length() - 2))
if (!containsVowel(str.substring(str.length() - 2))) {
return true
}
false
} // end function
def replacePatterns(str: String, patterns: List[(String, String)]): String = replacePatterns(str, patterns, _>0)
def replaceLast(str: String, pattern: String, replacement: String) = new StringBuilder(str).replace(str.lastIndexOf(pattern), str.lastIndexOf(pattern) + pattern.length, replacement).toString
def replacePatterns(str: String, patterns: List[(String, String)], comparer: Int => Boolean): String = {
for (pattern <- patterns)
if (str.endsWith(pattern._1)) {
val res = replaceLast(str, pattern._1, pattern._2)
if (comparer(stringMeasure(replaceLast(str, pattern._1, ""))))
return res
else
return str
}
str
}
}
|
ifesdjeen/jReadability
|
src/scala/main/com/jreadability/main/Stemmer.scala
|
Scala
|
apache-2.0
| 12,259
|
package sri.sangria.web.components
import sri.core._
import sri.sangria.web.model.TodoJS
import sri.sangria.web.styles.{Theme, Colors}
import sri.universal.ReactUniversal
import sri.universal.components._
import sri.web.all._
import sri.web.styles.WebStyleSheet
import scala.scalajs.js
import scala.scalajs.js.Dynamic.{literal => json}
import scala.scalajs.js.annotation.ScalaJSDefined
import scala.scalajs.js.{UndefOr => U, Function1, undefined => undefined}
object TodoList {
case class State(ds: ListViewDataSource[TodoJS, String] = createListViewDataSource((r1: TodoJS, r2: TodoJS) => r1 != r2))
@ScalaJSDefined
class Component extends ReactComponent[Props, State] {
initialState(State())
def render() = {
val ds = state.ds.cloneWithRows(props.items)
ListView(dataSource = ds,
renderRow = renderRow _,
renderSeparator = renderSeparator _)()
}
def renderRow(row: TodoJS, sectionId: String, rowId: String,highlightRow: js.Function2[String,String,_]) = {
View(style = styles.cell, key = rowId)(Text(style = Theme.bigText)(row.text))
}
def renderSeparator(sectionID: String, rowID: String, adjacentRowHighlighted: Boolean) = {
View(style = styles.cellBorder, key = rowID)()
}
}
object styles extends WebStyleSheet {
val cell = style(height := 90,
justifyContent.center,
alignItems.center)
val cellBorder = style(backgroundColor := "rgba(0, 0, 0, 0.1)",
height := 1.0 / ReactUniversal.PixelRatio.get(),
marginLeft := 4)
}
case class Props(items: js.Array[TodoJS])
val ctor = getTypedConstructor(js.constructorOf[Component], classOf[Component])
def apply(items: js.Array[TodoJS], key: U[String] = js.undefined, ref: Function1[Component, _] = null) = createElement(ctor, Props(items), key = key, ref = ref)
}
|
chandu0101/sri-sangria-example
|
web/src/main/scala/sri/sangria/web/components/TodoList.scala
|
Scala
|
apache-2.0
| 1,841
|
/*
* Copyright (c) 2014 Paul Bernard
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Spectrum Finance is based in part on:
* QuantLib. http://quantlib.org/
*
*/
package org.quantintel.ql.util
/**
* @author Paul Bernard
*/
trait Observer {
def update()
}
|
quantintel/spectrum
|
financial/src/main/scala/org/quantintel/ql/util/Observer.scala
|
Scala
|
apache-2.0
| 786
|
package spire.example
import Predef.{any2stringadd => _, intWrapper => _, _}
import spire.algebra._
import spire.std.BooleanIsRig
import spire.implicits._
import scala.reflect.ClassTag
import scala.annotation.tailrec
/**
* These examples are taken from http://r6.ca/blog/20110808T035622Z.html.
*
* The goal is to try to do as direct a translation as possible from the
* Haskell, to see how well we can do with Spire.
*
* The original example is in literate Haskell with good comments, so consult
* the link for more information.
*/
object KleeneDemo {
/**
* Show is a type class we'll use to control how types should display.
*/
trait Show[A] {
def show(a: A): String
}
object Show {
def apply[A](implicit ev: Show[A]) = ev
}
implicit class ShowOps[A: Show](a: A) {
def show: String = Show[A].show(a)
}
// Show[A] instances for built-in types
implicit object IntHasShow extends Show[Int] {
def show(a: Int) = a.toString
}
implicit object DoubleHasShow extends Show[Double] {
def show(a: Double) = a.toString
}
implicit object BooleanHasShow extends Show[Boolean] {
def show(a: Boolean) = if (a) "x" else "."
}
implicit def optionHasShow[A](implicit ev: Show[A]) = new Show[Option[A]] {
def show(a: Option[A]) = a.map(ev.show).getOrElse("-")
}
implicit def listHasShow[A](implicit ev: Show[A]) = new Show[List[A]] {
def show(a: List[A]) = a.map(ev.show).mkString("[", ",", "]")
}
implicit def streamHasShow[A](implicit ev: Show[A]) = new Show[Stream[A]] {
def show(s: Stream[A]) =
if (s.isEmpty) "[]" else "[%s,...]" format ev.show(s.head)
}
/**
* StarRig[A] is a Rig[A] that also has an asteration operator: kstar.
*
* Laws:
* 1. a.star = 1 + a * a.star = 1 + a.star * a
*/
trait StarRig[A] extends Rig[A] {
// one of these must be overridden in any type class instance
def kstar(a: A): A = plus(one, kplus(a))
def kplus(a: A): A = times(a, kstar(a))
}
object StarRig {
def apply[A](implicit ev: StarRig[A]) = ev
implicit def starRigHasRig[A](implicit ev: StarRig[A]): Rig[A] = ev
}
implicit class StarRigOps[A: StarRig](a: A) {
def kstar: A = StarRig[A].kstar(a)
def kplus: A = StarRig[A].kplus(a)
}
implicit def matrixHasStarRig[A](implicit dim: Dim, sr: StarRig[A], ct: ClassTag[A]) =
new StarRig[Matrix[A]] {
def zero: Matrix[A] = Matrix.zero
def one: Matrix[A] = Matrix.one
def plus(x: Matrix[A], y: Matrix[A]) = x + y
def times(x: Matrix[A], y: Matrix[A]) = x * y
override def kplus(m: Matrix[A]) = {
def f(k: Int, m: Matrix[A]) = Matrix[A] { (x, y) =>
m(x, y) + m(k, y) * m(k, k).kstar * m(x, k)
}
@tailrec def loop(m: Matrix[A], i: Int): Matrix[A] =
if (i >= 0) loop(f(i, m), i - 1) else m
loop(m, dim.n - 1)
}
}
/**
* A Kleene is a StarRig which obeys some additional laws.
*
* Laws:
* 1. a + a = a
* 2. a * x + x = x ==> a.kstar * x + x = x
* 3. x * a + x = x ==> x * a.kstar + x = x
*/
trait Kleene[A] extends StarRig[A]
object Kleene {
def apply[A](implicit ev: Kleene[A]) = ev
implicit def kleenIsStarRig[A](implicit ev: Kleene[A]): StarRig[A] = ev
}
// Kleene[A] instances for built-in types
implicit object BooleanHasKleene extends Kleene[Boolean] with BooleanIsRig {
override def kstar(x: Boolean) = true
}
/**
* Dim is a cute little class that let's us have implicit size information.
*
* This is to work around the fact that we don't currently have
* implementations of Bounded[A] or Ix[A] like Haskell does.
*
* Dim is probably not robust enough for real world use.
*/
case class Dim(n: Int)
/**
* Naive matrix trait.
*/
trait Matrix[A] { lhs =>
def dim: Dim
def apply(x: Int, y: Int): A
def map[B: ClassTag](f: A => B): Matrix[B]
def +(rhs: Matrix[A])(implicit rig: Rig[A]): Matrix[A]
def *(rhs: Matrix[A])(implicit rig: Rig[A]): Matrix[A]
}
object Matrix {
/**
* Builds a Matrix[A] given a function (Int, Int) => A and an implicit Dim
* to provide the dimensions over which to run the function.
*/
def apply[A: ClassTag](f: (Int, Int) => A)(implicit dim: Dim): Matrix[A] = {
val n = dim.n
val arr = new Array[A](n * n)
cfor(0)(_ < n, _ + 1) { y =>
cfor(0)(_ < n, _ + 1) { x =>
arr(y * n + x) = f(x, y)
}
}
new ArrayMatrix(arr)
}
/**
* Given an implicit Dim, builds the zero matrix (all zeros).
*/
def zero[A: Rig: ClassTag](implicit dim: Dim): Matrix[A] =
apply((x, y) => Rig[A].zero)
/**
* Given an implicit Dim, builds the identity matrix (diagonal ones).
*/
def one[A: Rig: ClassTag](implicit dim: Dim): Matrix[A] =
apply((x, y) => if (x == y) Rig[A].one else Rig[A].zero)
}
/**
* Mutable ArrayMatrix implementation.
*
* The mutability should only be used to initialize a matrix. Once it's built
* it will be typed as Matrix[A] with no interface for further mutation.
*
* The matrix also has naive implementations of addition and multiplication.
* These are not optimized--do not use this class in the wild!
*/
case class ArrayMatrix[A](arr: Array[A])(implicit val dim: Dim, ct: ClassTag[A]) extends Matrix[A] { lhs =>
def apply(x: Int, y: Int): A = arr(y * dim.n + x)
def update(x: Int, y: Int, a: A): Unit = arr(y * dim.n + x) = a
def map[B: ClassTag](f: A => B): Matrix[B] =
ArrayMatrix(arr.map(f))
def +(rhs: Matrix[A])(implicit rig: Rig[A]): Matrix[A] =
Matrix((x, y) => lhs(x, y) + rhs(x, y))
def *(rhs: Matrix[A])(implicit rig: Rig[A]): Matrix[A] =
Matrix { (x, y) =>
var total = rig.zero
cfor(0)(_ < dim.n, _ + 1)(j => total += lhs(j, y) * rhs(x, j))
total
}
}
// type class instance for Show[Matrix[A]]
implicit def matrixHasShow[A](implicit ev: Show[A]) = new Show[Matrix[A]] {
def show(m: Matrix[A]): String = {
val s = Show[A]
val n = m.dim.n
val lines = Array.fill(n)("")
cfor(0)(_ < n, _ + 1) { x =>
cfor(0)(_ < n, _ + 1)(y => lines(y) += s.show(m(x, y)) + " ")
val len = lines.foldLeft(0)(_ max _.length)
cfor(0)(_ < n, _ + 1)(y => lines(y) += " " * (len - lines(y).length))
}
lines.mkString("\\n") + "\\n"
}
}
// type class instance for Kleene[Matrix[A]]
implicit def matrixHasKleene[A](implicit dim: Dim, ka: Kleene[A], ct: ClassTag[A]) =
new Kleene[Matrix[A]] {
def zero: Matrix[A] = Matrix.zero
def one: Matrix[A] = Matrix.one
def plus(x: Matrix[A], y: Matrix[A]) = x + y
def times(x: Matrix[A], y: Matrix[A]) = x * y
override def kplus(m: Matrix[A]) = {
def f(k: Int, m: Matrix[A]) = Matrix[A] { (x, y) =>
m(x, y) + m(k, y) * m(k, k).kstar * m(x, k)
}
@tailrec def loop(m: Matrix[A], i: Int): Matrix[A] =
if (i >= 0) loop(f(i, m), i - 1) else m
loop(m, dim.n - 1)
}
}
/**
* Edge is a simple class used to construct adjacency matrices.
*
* It's important to remember that edges go: y -> x.
*
* Thus from is the y-coordinate and to is the x-coordinate.
*/
case class Edge(from: Int, to: Int)
// type class instance for Show[Edge]
implicit object EdgeHasShow extends Show[Edge] {
def show(e: Edge) = "(%c%c)" format ('A' + e.from, 'A' + e.to)
}
/**
* Graph provides functions for constructing an adjacency matrices.
*/
object Graph {
def apply(edges: Edge*)(implicit dim: Dim): Matrix[Boolean] = {
val m = ArrayMatrix(Array.fill[Boolean](dim.n * dim.n)(false))
edges.foreach { case Edge(from, to) => m(to, from) = true }
m
}
}
object LabeledGraph {
def apply(m: Matrix[Boolean])(implicit dim: Dim) = Matrix[Option[Edge]] { (x, y) =>
if (m(x, y)) Some(Edge(y, x)) else None
}
}
/**
* Expr[A] implements an AST for regular expressions.
*
* Basic regular consist of the following:
* 1. the empty set (Nul) -- a set with no strings
* 2. the empty string (Empty) -- set containing the empty string
* 3. literal strings (Var(a)) -- set containing a
* 4. concatenation (Then(a, b)) -- set of all xy, for x in a, y in b
* 5. alternation (Or(a, b)) -- union set of a and b
* 6. kleene star (Star(a)) -- set produced by 0+ concatenations from a
*
* For example, (a|bc)* includes "", "a", "bc", "abcaaaabc" but not "bc".
*/
sealed trait Expr[+A]
case class Var[A](a: A) extends Expr[A]
case class Or[A](lhs: Expr[A], rhs: Expr[A]) extends Expr[A]
case class Then[A](lhs: Expr[A], rhs: Expr[A]) extends Expr[A]
case class Star[A](lhs: Expr[A]) extends Expr[A]
case object Empty extends Expr[Nothing]
case object Nul extends Expr[Nothing]
object Expr {
def apply[A](a: A): Expr[A] = Var(a)
}
// type class instance for Show[Expr[A]]
implicit def exprHasShow[A](implicit ev: Show[A]) = new Show[Expr[A]] {
def show(e: Expr[A]) = e match {
case Var(a) => ev.show(a)
case Empty => "ε"
case Nul => "∅"
case Star(x) => "(" + show(x) + ")*"
case Or(x, y) => "(" + show(x) + "|" + show(y) + ")"
case Then(x, y) => show(x) + show(y)
}
}
// type class instance for Kleene[Expr[A]]
implicit def exprHasKleene[A] = new Kleene[Expr[A]] {
def zero: Expr[A] = Nul
def one: Expr[A] = Empty
def plus(x: Expr[A], y: Expr[A]): Expr[A] = (x, y) match {
case (Nul, e) => e
case (e, Nul) => e
case (Empty, Empty) => Empty
case (Empty, Star(e)) => Star(e)
case (Star(e), Empty) => Star(e)
case (e1, e2) => Or(e1, e2)
}
def times(x: Expr[A], y: Expr[A]): Expr[A] = (x, y) match {
case (Nul, _) => Nul
case (_, Nul) => Nul
case (Empty, e) => e
case (e, Empty) => e
case (e1, e2) => Then(e1, e2)
}
override def kstar(x: Expr[A]): Expr[A] = x match {
case Nul => Empty
case Empty => Empty
case Star(e) => kstar(e)
case _ => Star(x)
}
}
/**
* Tropical represents a finite quantity between zero and infinity.
*/
sealed trait Tropical[+A]
case class Finite[A](a: A) extends Tropical[A]
case object Infinity extends Tropical[Nothing]
object Tropical {
def apply[A](a: A): Tropical[A] = Finite(a)
def inf[A]: Tropical[A] = Infinity
}
implicit def tropicalHasShow[A: Show] = new Show[Tropical[A]] {
def show(t: Tropical[A]) = t match {
case Finite(a) => Show[A].show(a)
case Infinity => "∞"
}
}
implicit def tropicalHasOrder[A](implicit ord: Order[A]) = new Order[Tropical[A]] {
def compare(x: Tropical[A], y: Tropical[A]) = (x, y) match {
case (Infinity, Infinity) => 0
case (Infinity, _) => 1
case (_, Infinity) => -1
case (Finite(a1), Finite(a2)) => ord.compare(a1, a2)
}
}
implicit def TropicalHasKleene[A: Order: Rig] = new Kleene[Tropical[A]] {
def zero: Tropical[A] = Infinity
def one: Tropical[A] = Tropical(Rig[A].zero)
def plus(x: Tropical[A], y: Tropical[A]): Tropical[A] = (x, y) match {
case (Infinity, t) => t
case (t, Infinity) => t
case (Finite(a1), Finite(a2)) => Tropical(a1 min a2)
}
def times(x: Tropical[A], y: Tropical[A]): Tropical[A] = (x, y) match {
case (Infinity, _) => Infinity
case (_, Infinity) => Infinity
case (Finite(a1), Finite(a2)) => Tropical(a1 + a2)
}
override def kstar(x: Tropical[A]): Tropical[A] = one
}
/**
* ShortestPath is a data structure which will track two things:
* 1. the path's cost, as Tropical[A]
* 2. the path itself, as B
* Any impossible path will have Infinity as its cost.
*/
case class ShortestPath[A, B](a: Tropical[A], b: B) {
def map[C](f: B => C) = ShortestPath[A, C](a, f(b))
}
// type class instance for Show[ShortestPath[A, B]]
implicit def spHasShow[A: Show, B: Show] = new Show[ShortestPath[A, B]] {
def show(p: ShortestPath[A, B]) = "%s[%s]" format (p.b.show, p.a.show)
}
// type class instance for Kleene[ShortestPath[A, B]]
implicit def shortestPathHasKleene[A, B](implicit rig: Rig[Tropical[A]], ord: Order[Tropical[A]], kb: Kleene[B]) =
new Kleene[ShortestPath[A, B]] {
def zero = ShortestPath(rig.zero, kb.zero)
def one = ShortestPath(rig.one, kb.one)
def plus(x: ShortestPath[A, B], y: ShortestPath[A, B]) = (x.a compare y.a) match {
case -1 => x
case 0 => ShortestPath(x.a + y.a, x.b + y.b)
case 1 => y
}
def times(x: ShortestPath[A, B], y: ShortestPath[A, B]) =
ShortestPath(x.a * y.a, x.b * y.b)
override def kstar(x: ShortestPath[A, B]) =
ShortestPath(rig.one, if (x.a === rig.one) x.b.kstar else kb.one)
}
/**
* Language represents the set of every valid string in a regular
* language. Each W is a valid character, each Stream[W] is a (lazy)
* string, and SS[W] (e.g. Stream[Stream[W]]) is the complete set of
* all strings.
*/
case class Language[W](wss: SS[W]) {
def someWord: Option[List[W]] = wss.headOption.map(_.toList)
}
object Language {
def letter[W](w: W): Language[W] = Language(Stream(Stream(w)))
}
// handy type alias
type SS[W] = Stream[Stream[W]]
// type class instance for Show[Language[W]]
implicit def languageHasShow[W: Show] = new Show[Language[W]] {
def show(l: Language[W]) = Show[SS[W]].show(l.wss)
}
// type class instance for Kleene[Language[W]]
implicit def languageHasKleene[W] = new Kleene[Language[W]] {
def zero: Language[W] = Language(Stream.empty[Stream[W]])
def one: Language[W] = Language(Stream(Stream.empty[W]))
def plus(x: Language[W], y: Language[W]): Language[W] = {
def interleave(ws1: SS[W], ws2: SS[W]): SS[W] =
if (ws1.isEmpty) ws2 else ws1.head #:: interleave(ws2, ws1.tail)
Language(interleave(x.wss, y.wss))
}
def times(x: Language[W], y: Language[W]): Language[W] =
Language(x.wss.flatMap(ws1 => y.wss.map(ws2 => ws1 #::: ws2)))
override def kstar(x: Language[W]): Language[W] =
Language(Stream.empty #:: x.wss.flatMap(s => kstar(x).wss.map(s #::: _)))
}
/**
*
*/
trait Compact[+A] {
def map[B: Field](f: A => B): Compact[B] = this match {
case CompactReal(a) => CompactReal(f(a))
case _ => CompactInf
}
}
case object CompactInf extends Compact[Nothing]
case class CompactReal[A: Field](a: A) extends Compact[A]
object Compact {
def apply[A: Field](a: A): Compact[A] = CompactReal(a)
}
implicit def compactHasShow[A: Show] = new Show[Compact[A]] {
def show(c: Compact[A]) = c match {
case CompactReal(a) => a.show
case _ => "∞"
}
}
implicit def compactIsStarRig[A: Field] = new StarRig[Compact[A]] {
val zero: Compact[A] = Compact(Field[A].zero)
val one: Compact[A] = Compact(Field[A].one)
def plus(x: Compact[A], y: Compact[A]): Compact[A] = (x, y) match {
case (CompactInf, _) => CompactInf
case (_, CompactInf) => CompactInf
case (CompactReal(a), CompactReal(b)) => Compact(a + b)
}
def times(x: Compact[A], y: Compact[A]): Compact[A] = (x, y) match {
case (`zero`, _) => zero
case (_, `zero`) => zero
case (CompactInf, _) => CompactInf
case (_, CompactInf) => CompactInf
case (CompactReal(a), CompactReal(b)) => Compact(a * b)
}
override def kstar(x: Compact[A]): Compact[A] = x match {
case `one` => CompactInf
case CompactInf => CompactInf
case CompactReal(a) => CompactReal((Field[A].one - a).reciprocal)
}
}
/**
*
*/
def graphExample() {
// our example graph will be 5x5
implicit val dim = Dim(5)
// edges for this example
val edges = List(
Edge(0, 1),
Edge(1, 2),
Edge(2, 3), Edge(2, 4),
Edge(3, 1),
Edge(4, 3)
)
// build the example graph
val example: Matrix[Boolean] = Graph(edges:_*)
// examine the graph
println("adjacency matrix:\\n%s" format example.show)
println("reflexive-transitive closure:\\n%s" format example.kstar.show)
println("transitive closure:\\n%s" format example.kplus.show)
val labeled = LabeledGraph(example)
println("labels:\\n%s" format labeled.show)
val expred = labeled.map(_.map(Expr.apply).getOrElse(Nul))
println("exprs:\\n%s" format expred.show)
println("path exprs:\\n%s" format expred.kstar.show)
}
def pathExample() {
// our example graph will be 5x5
implicit val dim = Dim(6)
val edges = List(
(Edge(0, 1), 7), (Edge(0, 2), 9), (Edge(0, 5), 14),
(Edge(1, 2), 10), (Edge(1, 3), 15),
(Edge(2, 3), 11), (Edge(2, 5), 2),
(Edge(3, 4), 6),
(Edge(4, 5), 9)
)
val weighted: Matrix[Tropical[Int]] = {
val m = ArrayMatrix(Array.fill(dim.n * dim.n)(Tropical.inf[Int]))
edges.foreach { case (Edge(y, x), n) =>
m(x, y) = Tropical(n)
m(y, x) = Tropical(n)
}
m
}
println("weights:\\n%s" format weighted.show)
println("least-cost:\\n%s" format weighted.kstar.show)
val annotated = Matrix[ShortestPath[Int, Expr[Edge]]] { (x, y) =>
weighted(x, y) match {
case Infinity => ShortestPath(Infinity, Kleene[Expr[Edge]].zero)
case Finite(n) => ShortestPath(Finite(n), Var(Edge(y, x)))
}
}
println("annotated-re:\\n" + annotated.show)
println("shortest-path-re:\\n" + annotated.kstar.show)
val langed = Matrix[ShortestPath[Int, Language[Edge]]] { (x, y) =>
weighted(x, y) match {
case Infinity => ShortestPath(Infinity, Kleene[Language[Edge]].zero)
case Finite(n) => ShortestPath(Finite(n), Language.letter(Edge(y, x)))
}
}
println("l-annotated:\\n" + langed.show)
println("l-shortest-path:\\n" + langed.kstar.map(_.b.someWord).show)
def evalExpr[A, B: Kleene](expr: Expr[A])(f: A => B): B = expr match {
case Nul => Kleene[B].zero
case Empty => Kleene[B].one
case Var(a) => f(a)
case Star(x) => evalExpr(x)(f).kstar
case Or(x, y) => evalExpr(x)(f) + evalExpr(y)(f)
case Then(x, y) => evalExpr(x)(f) * evalExpr(y)(f)
}
val costExprs: Matrix[Expr[Int]] = annotated.map {
case ShortestPath(Infinity, _) => Nul
case ShortestPath(Finite(n), _) => Expr(n)
}
val leastCostExprs: Matrix[Tropical[Int]] =
costExprs.kstar.map(a => evalExpr(a)(Tropical.apply))
println("least-cost via evalExpr:\\n" + leastCostExprs.show)
}
def solvingExample() {
// our example matrix is 2x2
implicit val dim = Dim(2)
val m: Matrix[Compact[Double]] = ArrayMatrix(Array(2.0, 1.0, 0.0, 2.0)).map(n => Compact(n))
println("2x2 matrix:\\n" + m.show)
println("2x2 asteration:\\n" + m.kstar.show)
def negate(m: Matrix[Compact[Double]]) = m.map(_.map(-_))
val one = Matrix.one[Compact[Double]]
def inverse(m: Matrix[Compact[Double]]) = (one + negate(m)).kstar
println("2x2 inverse:\\n" + inverse(m).show)
}
def languageExample() {
val bit = Language(Stream(Stream('0'), Stream('1')))
val lang1 = bit.pow(4)
val lang2 = bit.kstar
println(lang1.wss.take(10).map(_.take(10).mkString + "...").toList)
println(lang2.wss.take(10).map(_.take(10).mkString + "...").toList)
}
def main(args: Array[String]) {
graphExample()
pathExample()
solvingExample()
languageExample()
}
}
|
lrytz/spire
|
examples/src/main/scala/spire/example/kleene.scala
|
Scala
|
mit
| 19,596
|
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs105.boxes
import uk.gov.hmrc.ct.accounts.frs105.retriever.Frs105AccountsBoxRetriever
import uk.gov.hmrc.ct.box._
case class AC416(value: Option[Int]) extends CtBoxIdentifier(name = "Staff costs (previous PoA)")
with CtOptionalInteger
with Input
with ValidatableBox[Frs105AccountsBoxRetriever]
with Debit {
override def validate(boxRetriever: Frs105AccountsBoxRetriever): Set[CtValidation] = {
collectErrors(
validateMoney(value)
)
}
}
|
hmrc/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/accounts/frs105/boxes/AC416.scala
|
Scala
|
apache-2.0
| 1,104
|
/*
* Copyright (C) 2016-2017 TU Delft, The Netherlands
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* Authors: Hamid Mushtaq
*
*/
package hmushtaq.sparkga1.utils
import java.io.File
import java.io.InputStream
import java.io.FileInputStream
import java.util._
import htsjdk.samtools.util.BufferedLineReader
import htsjdk.samtools._
/**
*
* @author Hamid Mushtaq
*/
class SamRecsReader(is: InputStream, config: Configuration)
{
private val keyValues = scala.collection.mutable.ArrayBuffer.empty[(Integer, SAMRecord)]
private val mConfig = config
private var mReads = 0
private val validationStringency: ValidationStringency = ValidationStringency.LENIENT;
private val mReader = new BufferedLineReader(is);
private val samRecordFactory = new DefaultSAMRecordFactory();
private var mCurrentLine: String = null
def getKeyValuePairs() : Array[(Integer, SAMRecord)] =
{
return keyValues.toArray
}
def writeSAMRecord(sam: SAMRecord) : Integer =
{
var count = 0
val read1Ref = sam.getReferenceIndex()
if (!sam.getReadUnmappedFlag() && (read1Ref >= 0))
{
val region = sam.getAlignmentStart / mConfig.getChrRegionSize(read1Ref)
keyValues.append((read1Ref, sam))
count = count + 1;
}
return count
}
def writeSAMRecord(sam: SAMRecord,
writerMap: scala.collection.mutable.HashMap[(Integer, Integer), scala.collection.mutable.ArrayBuffer[SAMRecord]]) : Integer =
{
var count = 0
val chr = sam.getReferenceIndex()
if (!sam.getReadUnmappedFlag() && (chr >= 0))
{
val reg = sam.getAlignmentStart / mConfig.getChrRegionSize(chr)
if (!writerMap.contains((chr, reg)))
writerMap += (((chr, reg), scala.collection.mutable.ArrayBuffer.empty[SAMRecord]))
writerMap((chr, reg)).append(sam)
count = count + 1
}
return count
}
def advanceLine() : String =
{
mCurrentLine = mReader.readLine()
return mCurrentLine;
}
def parseSam(writerMap: scala.collection.mutable.HashMap[(Integer, Integer), scala.collection.mutable.ArrayBuffer[SAMRecord]]) : Integer =
{
var mParentReader: SAMFileReader = null
val headerCodec = new SAMTextHeaderCodec();
headerCodec.setValidationStringency(validationStringency)
val mFileHeader = headerCodec.decode(mReader, null)
val parser = new SAMLineParser(samRecordFactory, validationStringency, mFileHeader, null, null)
// now process each read...
var count = 0
var badLines = 0
mCurrentLine = mReader.readLine()
if (mCurrentLine == null)
println("Hamid >> mCurrentLine is null!")
while (mCurrentLine != null)
{
try
{
val samrecord = parser.parseLine(mCurrentLine, mReader.getLineNumber())
//if ((count != 0) && ((count % 500000) == 0))
// println("Hamid >> " + count + " records parsed.")
if (writerMap == null)
count += writeSAMRecord(samrecord)
else
count += writeSAMRecord(samrecord, writerMap)
}
catch
{
case e: Exception => badLines += 1
}
//advance line even if bad line
advanceLine();
}
mReads = count
//println("SAMstream counts " + count + " records");
return badLines
}
def getNumOfReads() : Integer =
{
return mReads
}
def close() =
{
mReader.close()
is.close()
}
}
|
HamidMushtaq/SparkGA1
|
program/src/main/scala/hmushtaq/sparkga1/utils/SamRecsReader.scala
|
Scala
|
gpl-3.0
| 3,850
|
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package util.parsing
package combinator
package token
/** This component provides the standard `Token`s for a simple, Scala-like language.
*/
trait StdTokens extends Tokens {
/** The class of keyword tokens */
case class Keyword(chars: String) extends Token {
override def toString = s"'$chars'"
}
/** The class of numeric literal tokens */
case class NumericLit(chars: String) extends Token {
override def toString = chars
}
/** The class of string literal tokens */
case class StringLit(chars: String) extends Token {
override def toString = s""""$chars""""
}
/** The class of identifier tokens */
case class Identifier(chars: String) extends Token {
override def toString = s"identifier $chars"
}
}
|
scala/scala-parser-combinators
|
shared/src/main/scala/scala/util/parsing/combinator/token/StdTokens.scala
|
Scala
|
apache-2.0
| 1,051
|
// Generated by https://github.com/tanishiking/semanticdb-for-scala3
// Generated by the Scala Plugin for the Protocol Buffer Compiler.
// Do not edit!
//
// Protofile syntax: PROTO3
package dotty.tools.dotc.semanticdb
import dotty.tools.dotc.semanticdb.internal._
import scala.annotation.internal.sharable
@SerialVersionUID(0L)
final case class SymbolInformation(
symbol: _root_.scala.Predef.String = "",
language: dotty.tools.dotc.semanticdb.Language = dotty.tools.dotc.semanticdb.Language.UNKNOWN_LANGUAGE,
kind: dotty.tools.dotc.semanticdb.SymbolInformation.Kind = dotty.tools.dotc.semanticdb.SymbolInformation.Kind.UNKNOWN_KIND,
properties: _root_.scala.Int = 0,
displayName: _root_.scala.Predef.String = "",
signature: dotty.tools.dotc.semanticdb.Signature = dotty.tools.dotc.semanticdb.SymbolInformation._typemapper_signature.toCustom(dotty.tools.dotc.semanticdb.SignatureMessage.defaultInstance),
annotations: _root_.scala.Seq[dotty.tools.dotc.semanticdb.Annotation] = _root_.scala.Seq.empty,
access: dotty.tools.dotc.semanticdb.Access = dotty.tools.dotc.semanticdb.SymbolInformation._typemapper_access.toCustom(dotty.tools.dotc.semanticdb.AccessMessage.defaultInstance),
overriddenSymbols: _root_.scala.Seq[_root_.scala.Predef.String] = _root_.scala.Seq.empty,
documentation: _root_.scala.Option[dotty.tools.dotc.semanticdb.Documentation] = _root_.scala.None
) extends SemanticdbGeneratedMessage derives CanEqual {
@transient @sharable
private[this] var __serializedSizeMemoized: _root_.scala.Int = 0
private[this] def __computeSerializedSize(): _root_.scala.Int = {
var __size = 0
{
val __value = symbol
if (!__value.isEmpty) {
__size += SemanticdbOutputStream.computeStringSize(1, __value)
}
};
{
val __value = language.value
if (__value != 0) {
__size += SemanticdbOutputStream.computeEnumSize(16, __value)
}
};
{
val __value = kind.value
if (__value != 0) {
__size += SemanticdbOutputStream.computeEnumSize(3, __value)
}
};
{
val __value = properties
if (__value != 0) {
__size += SemanticdbOutputStream.computeInt32Size(4, __value)
}
};
{
val __value = displayName
if (!__value.isEmpty) {
__size += SemanticdbOutputStream.computeStringSize(5, __value)
}
};
{
val __value = dotty.tools.dotc.semanticdb.SymbolInformation._typemapper_signature.toBase(signature)
if (__value.serializedSize != 0) {
__size += 2 + SemanticdbOutputStream.computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize
}
};
annotations.foreach { __item =>
val __value = __item
__size += 1 + SemanticdbOutputStream.computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize
}
{
val __value = dotty.tools.dotc.semanticdb.SymbolInformation._typemapper_access.toBase(access)
if (__value.serializedSize != 0) {
__size += 2 + SemanticdbOutputStream.computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize
}
};
overriddenSymbols.foreach { __item =>
val __value = __item
__size += SemanticdbOutputStream.computeStringSize(19, __value)
}
if (documentation.isDefined) {
val __value = documentation.get
__size += 2 + SemanticdbOutputStream.computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize
};
__size
}
override def serializedSize: _root_.scala.Int = {
var __size = __serializedSizeMemoized
if (__size == 0) {
__size = __computeSerializedSize() + 1
__serializedSizeMemoized = __size
}
__size - 1
}
def writeTo(`_output__`: SemanticdbOutputStream): _root_.scala.Unit = {
{
val __v = symbol
if (!__v.isEmpty) {
_output__.writeString(1, __v)
}
};
{
val __v = kind.value
if (__v != 0) {
_output__.writeEnum(3, __v)
}
};
{
val __v = properties
if (__v != 0) {
_output__.writeInt32(4, __v)
}
};
{
val __v = displayName
if (!__v.isEmpty) {
_output__.writeString(5, __v)
}
};
annotations.foreach { __v =>
val __m = __v
_output__.writeTag(13, 2)
_output__.writeUInt32NoTag(__m.serializedSize)
__m.writeTo(_output__)
};
{
val __v = language.value
if (__v != 0) {
_output__.writeEnum(16, __v)
}
};
{
val __v = dotty.tools.dotc.semanticdb.SymbolInformation._typemapper_signature.toBase(signature)
if (__v.serializedSize != 0) {
_output__.writeTag(17, 2)
_output__.writeUInt32NoTag(__v.serializedSize)
__v.writeTo(_output__)
}
};
{
val __v = dotty.tools.dotc.semanticdb.SymbolInformation._typemapper_access.toBase(access)
if (__v.serializedSize != 0) {
_output__.writeTag(18, 2)
_output__.writeUInt32NoTag(__v.serializedSize)
__v.writeTo(_output__)
}
};
overriddenSymbols.foreach { __v =>
val __m = __v
_output__.writeString(19, __m)
};
documentation.foreach { __v =>
val __m = __v
_output__.writeTag(20, 2)
_output__.writeUInt32NoTag(__m.serializedSize)
__m.writeTo(_output__)
};
}
def withSymbol(__v: _root_.scala.Predef.String): SymbolInformation = copy(symbol = __v)
def withLanguage(__v: dotty.tools.dotc.semanticdb.Language): SymbolInformation = copy(language = __v)
def withKind(__v: dotty.tools.dotc.semanticdb.SymbolInformation.Kind): SymbolInformation = copy(kind = __v)
def withProperties(__v: _root_.scala.Int): SymbolInformation = copy(properties = __v)
def withDisplayName(__v: _root_.scala.Predef.String): SymbolInformation = copy(displayName = __v)
def withSignature(__v: dotty.tools.dotc.semanticdb.Signature): SymbolInformation = copy(signature = __v)
def clearAnnotations = copy(annotations = _root_.scala.Seq.empty)
def addAnnotations(__vs: dotty.tools.dotc.semanticdb.Annotation *): SymbolInformation = addAllAnnotations(__vs)
def addAllAnnotations(__vs: Iterable[dotty.tools.dotc.semanticdb.Annotation]): SymbolInformation = copy(annotations = annotations ++ __vs)
def withAnnotations(__v: _root_.scala.Seq[dotty.tools.dotc.semanticdb.Annotation]): SymbolInformation = copy(annotations = __v)
def withAccess(__v: dotty.tools.dotc.semanticdb.Access): SymbolInformation = copy(access = __v)
def clearOverriddenSymbols = copy(overriddenSymbols = _root_.scala.Seq.empty)
def addOverriddenSymbols(__vs: _root_.scala.Predef.String *): SymbolInformation = addAllOverriddenSymbols(__vs)
def addAllOverriddenSymbols(__vs: Iterable[_root_.scala.Predef.String]): SymbolInformation = copy(overriddenSymbols = overriddenSymbols ++ __vs)
def withOverriddenSymbols(__v: _root_.scala.Seq[_root_.scala.Predef.String]): SymbolInformation = copy(overriddenSymbols = __v)
def getDocumentation: dotty.tools.dotc.semanticdb.Documentation = documentation.getOrElse(dotty.tools.dotc.semanticdb.Documentation.defaultInstance)
def clearDocumentation: SymbolInformation = copy(documentation = _root_.scala.None)
def withDocumentation(__v: dotty.tools.dotc.semanticdb.Documentation): SymbolInformation = copy(documentation = Option(__v))
// @@protoc_insertion_point(GeneratedMessage[dotty.tools.dotc.semanticdb.SymbolInformation])
}
object SymbolInformation extends SemanticdbGeneratedMessageCompanion[dotty.tools.dotc.semanticdb.SymbolInformation] {
implicit def messageCompanion: SemanticdbGeneratedMessageCompanion[dotty.tools.dotc.semanticdb.SymbolInformation] = this
def parseFrom(`_input__`: SemanticdbInputStream): dotty.tools.dotc.semanticdb.SymbolInformation = {
var __symbol: _root_.scala.Predef.String = ""
var __language: dotty.tools.dotc.semanticdb.Language = dotty.tools.dotc.semanticdb.Language.UNKNOWN_LANGUAGE
var __kind: dotty.tools.dotc.semanticdb.SymbolInformation.Kind = dotty.tools.dotc.semanticdb.SymbolInformation.Kind.UNKNOWN_KIND
var __properties: _root_.scala.Int = 0
var __displayName: _root_.scala.Predef.String = ""
var __signature: _root_.scala.Option[dotty.tools.dotc.semanticdb.SignatureMessage] = _root_.scala.None
val __annotations: _root_.scala.collection.immutable.VectorBuilder[dotty.tools.dotc.semanticdb.Annotation] = new _root_.scala.collection.immutable.VectorBuilder[dotty.tools.dotc.semanticdb.Annotation]
var __access: _root_.scala.Option[dotty.tools.dotc.semanticdb.AccessMessage] = _root_.scala.None
val __overriddenSymbols: _root_.scala.collection.immutable.VectorBuilder[_root_.scala.Predef.String] = new _root_.scala.collection.immutable.VectorBuilder[_root_.scala.Predef.String]
var __documentation: _root_.scala.Option[dotty.tools.dotc.semanticdb.Documentation] = _root_.scala.None
var _done__ = false
while (!_done__) {
val _tag__ = _input__.readTag()
_tag__ match {
case 0 => _done__ = true
case 10 =>
__symbol = _input__.readStringRequireUtf8()
case 128 =>
__language = dotty.tools.dotc.semanticdb.Language.fromValue(_input__.readEnum())
case 24 =>
__kind = dotty.tools.dotc.semanticdb.SymbolInformation.Kind.fromValue(_input__.readEnum())
case 32 =>
__properties = _input__.readInt32()
case 42 =>
__displayName = _input__.readStringRequireUtf8()
case 138 =>
__signature = _root_.scala.Some(__signature.fold(LiteParser.readMessage[dotty.tools.dotc.semanticdb.SignatureMessage](_input__))(LiteParser.readMessage(_input__, _)))
case 106 =>
__annotations += LiteParser.readMessage[dotty.tools.dotc.semanticdb.Annotation](_input__)
case 146 =>
__access = _root_.scala.Some(__access.fold(LiteParser.readMessage[dotty.tools.dotc.semanticdb.AccessMessage](_input__))(LiteParser.readMessage(_input__, _)))
case 154 =>
__overriddenSymbols += _input__.readStringRequireUtf8()
case 162 =>
__documentation = Option(__documentation.fold(LiteParser.readMessage[dotty.tools.dotc.semanticdb.Documentation](_input__))(LiteParser.readMessage(_input__, _)))
case tag => _input__.skipField(tag)
}
}
dotty.tools.dotc.semanticdb.SymbolInformation(
symbol = __symbol,
language = __language,
kind = __kind,
properties = __properties,
displayName = __displayName,
signature = dotty.tools.dotc.semanticdb.SymbolInformation._typemapper_signature.toCustom(__signature.getOrElse(dotty.tools.dotc.semanticdb.SignatureMessage.defaultInstance)),
annotations = __annotations.result(),
access = dotty.tools.dotc.semanticdb.SymbolInformation._typemapper_access.toCustom(__access.getOrElse(dotty.tools.dotc.semanticdb.AccessMessage.defaultInstance)),
overriddenSymbols = __overriddenSymbols.result(),
documentation = __documentation
)
}
lazy val defaultInstance = dotty.tools.dotc.semanticdb.SymbolInformation(
symbol = "",
language = dotty.tools.dotc.semanticdb.Language.UNKNOWN_LANGUAGE,
kind = dotty.tools.dotc.semanticdb.SymbolInformation.Kind.UNKNOWN_KIND,
properties = 0,
displayName = "",
signature = dotty.tools.dotc.semanticdb.SymbolInformation._typemapper_signature.toCustom(dotty.tools.dotc.semanticdb.SignatureMessage.defaultInstance),
annotations = _root_.scala.Seq.empty,
access = dotty.tools.dotc.semanticdb.SymbolInformation._typemapper_access.toCustom(dotty.tools.dotc.semanticdb.AccessMessage.defaultInstance),
overriddenSymbols = _root_.scala.Seq.empty,
documentation = _root_.scala.None
)
sealed abstract class Kind(val value: _root_.scala.Int) extends SemanticdbGeneratedEnum derives CanEqual {
type EnumType = Kind
def isUnknownKind: _root_.scala.Boolean = false
def isLocal: _root_.scala.Boolean = false
def isField: _root_.scala.Boolean = false
def isMethod: _root_.scala.Boolean = false
def isConstructor: _root_.scala.Boolean = false
def isMacro: _root_.scala.Boolean = false
def isType: _root_.scala.Boolean = false
def isParameter: _root_.scala.Boolean = false
def isSelfParameter: _root_.scala.Boolean = false
def isTypeParameter: _root_.scala.Boolean = false
def isObject: _root_.scala.Boolean = false
def isPackage: _root_.scala.Boolean = false
def isPackageObject: _root_.scala.Boolean = false
def isClass: _root_.scala.Boolean = false
def isTrait: _root_.scala.Boolean = false
def isInterface: _root_.scala.Boolean = false
final def asRecognized: _root_.scala.Option[dotty.tools.dotc.semanticdb.SymbolInformation.Kind.Recognized] = if (isUnrecognized) _root_.scala.None else _root_.scala.Some(this.asInstanceOf[dotty.tools.dotc.semanticdb.SymbolInformation.Kind.Recognized])
}
object Kind {
sealed trait Recognized extends Kind
@SerialVersionUID(0L)
case object UNKNOWN_KIND extends Kind(0) with Kind.Recognized {
val index = 0
val name = "UNKNOWN_KIND"
override def isUnknownKind: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object LOCAL extends Kind(19) with Kind.Recognized {
val index = 1
val name = "LOCAL"
override def isLocal: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object FIELD extends Kind(20) with Kind.Recognized {
val index = 2
val name = "FIELD"
override def isField: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object METHOD extends Kind(3) with Kind.Recognized {
val index = 3
val name = "METHOD"
override def isMethod: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object CONSTRUCTOR extends Kind(21) with Kind.Recognized {
val index = 4
val name = "CONSTRUCTOR"
override def isConstructor: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object MACRO extends Kind(6) with Kind.Recognized {
val index = 5
val name = "MACRO"
override def isMacro: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object TYPE extends Kind(7) with Kind.Recognized {
val index = 6
val name = "TYPE"
override def isType: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object PARAMETER extends Kind(8) with Kind.Recognized {
val index = 7
val name = "PARAMETER"
override def isParameter: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object SELF_PARAMETER extends Kind(17) with Kind.Recognized {
val index = 8
val name = "SELF_PARAMETER"
override def isSelfParameter: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object TYPE_PARAMETER extends Kind(9) with Kind.Recognized {
val index = 9
val name = "TYPE_PARAMETER"
override def isTypeParameter: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object OBJECT extends Kind(10) with Kind.Recognized {
val index = 10
val name = "OBJECT"
override def isObject: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object PACKAGE extends Kind(11) with Kind.Recognized {
val index = 11
val name = "PACKAGE"
override def isPackage: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object PACKAGE_OBJECT extends Kind(12) with Kind.Recognized {
val index = 12
val name = "PACKAGE_OBJECT"
override def isPackageObject: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object CLASS extends Kind(13) with Kind.Recognized {
val index = 13
val name = "CLASS"
override def isClass: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object TRAIT extends Kind(14) with Kind.Recognized {
val index = 14
val name = "TRAIT"
override def isTrait: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object INTERFACE extends Kind(18) with Kind.Recognized {
val index = 15
val name = "INTERFACE"
override def isInterface: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
final case class Unrecognized(unrecognizedValue: _root_.scala.Int) extends Kind(unrecognizedValue) with SemanticdbUnrecognizedEnum
lazy val values = scala.collection.immutable.Seq(UNKNOWN_KIND, LOCAL, FIELD, METHOD, CONSTRUCTOR, MACRO, TYPE, PARAMETER, SELF_PARAMETER, TYPE_PARAMETER, OBJECT, PACKAGE, PACKAGE_OBJECT, CLASS, TRAIT, INTERFACE)
def fromValue(__value: _root_.scala.Int): Kind = __value match {
case 0 => UNKNOWN_KIND
case 3 => METHOD
case 6 => MACRO
case 7 => TYPE
case 8 => PARAMETER
case 9 => TYPE_PARAMETER
case 10 => OBJECT
case 11 => PACKAGE
case 12 => PACKAGE_OBJECT
case 13 => CLASS
case 14 => TRAIT
case 17 => SELF_PARAMETER
case 18 => INTERFACE
case 19 => LOCAL
case 20 => FIELD
case 21 => CONSTRUCTOR
case __other => Unrecognized(__other)
}
}
sealed abstract class Property(val value: _root_.scala.Int) extends SemanticdbGeneratedEnum derives CanEqual {
type EnumType = Property
def isUnknownProperty: _root_.scala.Boolean = false
def isAbstract: _root_.scala.Boolean = false
def isFinal: _root_.scala.Boolean = false
def isSealed: _root_.scala.Boolean = false
def isImplicit: _root_.scala.Boolean = false
def isLazy: _root_.scala.Boolean = false
def isCase: _root_.scala.Boolean = false
def isCovariant: _root_.scala.Boolean = false
def isContravariant: _root_.scala.Boolean = false
def isVal: _root_.scala.Boolean = false
def isVar: _root_.scala.Boolean = false
def isStatic: _root_.scala.Boolean = false
def isPrimary: _root_.scala.Boolean = false
def isEnum: _root_.scala.Boolean = false
def isDefault: _root_.scala.Boolean = false
def isGiven: _root_.scala.Boolean = false
def isInline: _root_.scala.Boolean = false
def isOpen: _root_.scala.Boolean = false
def isTransparent: _root_.scala.Boolean = false
def isInfix: _root_.scala.Boolean = false
def isOpaque: _root_.scala.Boolean = false
final def asRecognized: _root_.scala.Option[dotty.tools.dotc.semanticdb.SymbolInformation.Property.Recognized] = if (isUnrecognized) _root_.scala.None else _root_.scala.Some(this.asInstanceOf[dotty.tools.dotc.semanticdb.SymbolInformation.Property.Recognized])
}
object Property {
sealed trait Recognized extends Property
@SerialVersionUID(0L)
case object UNKNOWN_PROPERTY extends Property(0) with Property.Recognized {
val index = 0
val name = "UNKNOWN_PROPERTY"
override def isUnknownProperty: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object ABSTRACT extends Property(4) with Property.Recognized {
val index = 1
val name = "ABSTRACT"
override def isAbstract: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object FINAL extends Property(8) with Property.Recognized {
val index = 2
val name = "FINAL"
override def isFinal: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object SEALED extends Property(16) with Property.Recognized {
val index = 3
val name = "SEALED"
override def isSealed: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object IMPLICIT extends Property(32) with Property.Recognized {
val index = 4
val name = "IMPLICIT"
override def isImplicit: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object LAZY extends Property(64) with Property.Recognized {
val index = 5
val name = "LAZY"
override def isLazy: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object CASE extends Property(128) with Property.Recognized {
val index = 6
val name = "CASE"
override def isCase: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object COVARIANT extends Property(256) with Property.Recognized {
val index = 7
val name = "COVARIANT"
override def isCovariant: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object CONTRAVARIANT extends Property(512) with Property.Recognized {
val index = 8
val name = "CONTRAVARIANT"
override def isContravariant: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object VAL extends Property(1024) with Property.Recognized {
val index = 9
val name = "VAL"
override def isVal: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object VAR extends Property(2048) with Property.Recognized {
val index = 10
val name = "VAR"
override def isVar: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object STATIC extends Property(4096) with Property.Recognized {
val index = 11
val name = "STATIC"
override def isStatic: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object PRIMARY extends Property(8192) with Property.Recognized {
val index = 12
val name = "PRIMARY"
override def isPrimary: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object ENUM extends Property(16384) with Property.Recognized {
val index = 13
val name = "ENUM"
override def isEnum: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object DEFAULT extends Property(32768) with Property.Recognized {
val index = 14
val name = "DEFAULT"
override def isDefault: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object GIVEN extends Property(65536) with Property.Recognized {
val index = 15
val name = "GIVEN"
override def isGiven: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object INLINE extends Property(131072) with Property.Recognized {
val index = 16
val name = "INLINE"
override def isInline: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object OPEN extends Property(262144) with Property.Recognized {
val index = 17
val name = "OPEN"
override def isOpen: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object TRANSPARENT extends Property(524288) with Property.Recognized {
val index = 18
val name = "TRANSPARENT"
override def isTransparent: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object INFIX extends Property(1048576) with Property.Recognized {
val index = 19
val name = "INFIX"
override def isInfix: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object OPAQUE extends Property(2097152) with Property.Recognized {
val index = 20
val name = "OPAQUE"
override def isOpaque: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
final case class Unrecognized(unrecognizedValue: _root_.scala.Int) extends Property(unrecognizedValue) with SemanticdbUnrecognizedEnum
lazy val values = scala.collection.immutable.Seq(UNKNOWN_PROPERTY, ABSTRACT, FINAL, SEALED, IMPLICIT, LAZY, CASE, COVARIANT, CONTRAVARIANT, VAL, VAR, STATIC, PRIMARY, ENUM, DEFAULT, GIVEN, INLINE, OPEN, TRANSPARENT, INFIX, OPAQUE)
def fromValue(__value: _root_.scala.Int): Property = __value match {
case 0 => UNKNOWN_PROPERTY
case 4 => ABSTRACT
case 8 => FINAL
case 16 => SEALED
case 32 => IMPLICIT
case 64 => LAZY
case 128 => CASE
case 256 => COVARIANT
case 512 => CONTRAVARIANT
case 1024 => VAL
case 2048 => VAR
case 4096 => STATIC
case 8192 => PRIMARY
case 16384 => ENUM
case 32768 => DEFAULT
case 65536 => GIVEN
case 131072 => INLINE
case 262144 => OPEN
case 524288 => TRANSPARENT
case 1048576 => INFIX
case 2097152 => OPAQUE
case __other => Unrecognized(__other)
}
}
final val SYMBOL_FIELD_NUMBER = 1
final val LANGUAGE_FIELD_NUMBER = 16
final val KIND_FIELD_NUMBER = 3
final val PROPERTIES_FIELD_NUMBER = 4
final val DISPLAY_NAME_FIELD_NUMBER = 5
final val SIGNATURE_FIELD_NUMBER = 17
final val ANNOTATIONS_FIELD_NUMBER = 13
final val ACCESS_FIELD_NUMBER = 18
final val OVERRIDDEN_SYMBOLS_FIELD_NUMBER = 19
final val DOCUMENTATION_FIELD_NUMBER = 20
@transient @sharable
private[semanticdb] val _typemapper_signature: SemanticdbTypeMapper[dotty.tools.dotc.semanticdb.SignatureMessage, dotty.tools.dotc.semanticdb.Signature] = implicitly[SemanticdbTypeMapper[dotty.tools.dotc.semanticdb.SignatureMessage, dotty.tools.dotc.semanticdb.Signature]]
@transient @sharable
private[semanticdb] val _typemapper_access: SemanticdbTypeMapper[dotty.tools.dotc.semanticdb.AccessMessage, dotty.tools.dotc.semanticdb.Access] = implicitly[SemanticdbTypeMapper[dotty.tools.dotc.semanticdb.AccessMessage, dotty.tools.dotc.semanticdb.Access]]
def of(
symbol: _root_.scala.Predef.String,
language: dotty.tools.dotc.semanticdb.Language,
kind: dotty.tools.dotc.semanticdb.SymbolInformation.Kind,
properties: _root_.scala.Int,
displayName: _root_.scala.Predef.String,
signature: dotty.tools.dotc.semanticdb.Signature,
annotations: _root_.scala.Seq[dotty.tools.dotc.semanticdb.Annotation],
access: dotty.tools.dotc.semanticdb.Access,
overriddenSymbols: _root_.scala.Seq[_root_.scala.Predef.String],
documentation: _root_.scala.Option[dotty.tools.dotc.semanticdb.Documentation]
): _root_.dotty.tools.dotc.semanticdb.SymbolInformation = _root_.dotty.tools.dotc.semanticdb.SymbolInformation(
symbol,
language,
kind,
properties,
displayName,
signature,
annotations,
access,
overriddenSymbols,
documentation
)
// @@protoc_insertion_point(GeneratedMessageCompanion[dotty.tools.dotc.semanticdb.SymbolInformation])
}
|
dotty-staging/dotty
|
compiler/src/dotty/tools/dotc/semanticdb/generated/SymbolInformation.scala
|
Scala
|
apache-2.0
| 26,712
|
/*
* Part of NDLA learningpath-api.
* Copyright (C) 2016 NDLA
*
* See LICENSE
*
*/
package no.ndla.learningpathapi
import com.typesafe.scalalogging.LazyLogging
import no.ndla.learningpathapi.model.domain.Language
import no.ndla.network.{AuthUser, Domains}
import no.ndla.network.secrets.PropertyKeys
import scala.util.Properties._
import scala.util.{Failure, Success}
object LearningpathApiProperties extends LazyLogging {
val IsKubernetes: Boolean = propOrNone("NDLA_IS_KUBERNETES").isDefined
val Environment: String = propOrElse("NDLA_ENVIRONMENT", "local")
val ApplicationName = "learningpath-api"
val Auth0LoginEndpoint = s"https://${AuthUser.getAuth0HostForEnv(Environment)}/authorize"
val ApplicationPort: Int = propOrElse("APPLICATION_PORT", "80").toInt
val DefaultLanguage: String = propOrElse("DEFAULT_LANGUAGE", "nb")
val ContactName: String = propOrElse("CONTACT_NAME", "NDLA")
val ContactUrl: String = propOrElse("CONTACT_URL", "ndla.no")
val ContactEmail: String = propOrElse("CONTACT_EMAIL", "support+api@ndla.no")
val TermsUrl: String = propOrElse("TERMS_URL", "https://om.ndla.no/tos")
lazy val Domain: String = propOrElse("BACKEND_API_DOMAIN", Domains.get(Environment))
val MetaMaxConnections = 10
val SearchIndex: String = propOrElse("SEARCH_INDEX_NAME", "learningpaths")
val SearchDocument = "learningpath"
val DefaultPageSize = 10
val MaxPageSize = 10000
val IndexBulkSize = 1000
val ApiGatewayHost: String = propOrElse("API_GATEWAY_HOST", "api-gateway.ndla-local")
val ImageApiHost: String = propOrElse("IMAGE_API_HOST", "image-api.ndla-local")
val InternalImageApiUrl = s"$ImageApiHost/image-api/v2/images"
val SearchApiHost: String = propOrElse("SEARCH_API_HOST", "search-api.ndla-local")
val NdlaFrontendHost: String = propOrElse("NDLA_FRONTEND_HOST", Environment match {
case "prod" => "ndla.no"
case "local" => "localhost:30017"
case _ => s"$Environment.ndla.no"
})
val NdlaFrontendProtocol: String = propOrElse("NDLA_FRONTEND_PROTOCOL", Environment match {
case "local" => "http"
case _ => "https"
})
def EnvironmentUrls(env: String): Set[String] = {
Set(
s"$env.ndla.no",
s"www.$env.ndla.no",
s"ndla-frontend.$env.api.ndla.no"
)
}
val NdlaFrontendHostNames: Set[String] = Set(
"ndla.no",
"www.ndla.no",
s"ndla-frontend.api.ndla.no",
"localhost",
) ++
EnvironmentUrls(Environment) ++
EnvironmentUrls("test") ++
EnvironmentUrls("staging")
val UsernameHeader = "X-Consumer-Username"
val ElasticSearchIndexMaxResultWindow = 10000
val ElasticSearchScrollKeepAlive = "1m"
val InitialScrollContextKeywords = List("0", "initial", "start", "first")
val BasicHtmlTags = List("b",
"blockquote",
"br",
"cite",
"code",
"dd",
"dl",
"dt",
"em",
"i",
"li",
"ol",
"p",
"pre",
"q",
"small",
"strike",
"strong",
"sub",
"sup",
"u",
"ul")
val CorrelationIdKey = "correlationID"
val CorrelationIdHeader = "X-Correlation-ID"
def MetaUserName: String = prop(PropertyKeys.MetaUserNameKey)
def MetaPassword: String = prop(PropertyKeys.MetaPasswordKey)
def MetaResource: String = prop(PropertyKeys.MetaResourceKey)
def MetaServer: String = prop(PropertyKeys.MetaServerKey)
def MetaPort: Int = prop(PropertyKeys.MetaPortKey).toInt
def MetaSchema: String = prop(PropertyKeys.MetaSchemaKey)
val SearchServer: String =
propOrElse("SEARCH_SERVER", "http://search-learningpath-api.ndla-local")
val RunWithSignedSearchRequests: Boolean =
propOrElse("RUN_WITH_SIGNED_SEARCH_REQUESTS", "true").toBoolean
def prop(key: String): String = {
propOrElse(key, throw new RuntimeException(s"Unable to load property $key"))
}
def propOrElse(key: String, default: => String): String = {
propOrNone(key) match {
case Some(prop) => prop
case _ => default
}
}
}
|
NDLANO/learningpath-api
|
src/main/scala/no/ndla/learningpathapi/LearningpathApiProperties.scala
|
Scala
|
gpl-3.0
| 4,462
|
package frameless
package ml
package classification
import shapeless.test.illTyped
import org.apache.spark.ml.linalg._
import frameless.ml.params.trees.FeatureSubsetStrategy
import org.scalacheck.{Arbitrary, Gen}
import org.scalacheck.Prop._
import org.scalatest.matchers.must.Matchers
class TypedRandomForestClassifierTests extends FramelessMlSuite with Matchers {
implicit val arbDouble: Arbitrary[Double] =
Arbitrary(Gen.choose(1, 99).map(_.toDouble)) // num classes must be between 0 and 100 for the test
implicit val arbVectorNonEmpty: Arbitrary[Vector] =
Arbitrary(Generators.arbVector.arbitrary suchThat (_.size > 0)) // vector must not be empty for RandomForestClassifier
import Generators.arbTreesFeaturesSubsetStrategy
test("fit() returns a correct TypedTransformer") {
val prop = forAll { x2: X2[Double, Vector] =>
val rf = TypedRandomForestClassifier[X2[Double, Vector]]
val ds = TypedDataset.create(Seq(x2))
val model = rf.fit(ds).run()
val pDs = model.transform(ds).as[X5[Double, Vector, Vector, Vector, Double]]
pDs.select(pDs.col('a), pDs.col('b)).collect.run() == Seq(x2.a -> x2.b)
}
val prop2 = forAll { x2: X2[Vector, Double] =>
val rf = TypedRandomForestClassifier[X2[Vector, Double]]
val ds = TypedDataset.create(Seq(x2))
val model = rf.fit(ds).run()
val pDs = model.transform(ds).as[X5[Vector, Double, Vector, Vector, Double]]
pDs.select(pDs.col('a), pDs.col('b)).collect.run() == Seq(x2.a -> x2.b)
}
def prop3[A: TypedEncoder: Arbitrary] = forAll { x3: X3[Vector, Double, A] =>
val rf = TypedRandomForestClassifier[X2[Vector, Double]]
val ds = TypedDataset.create(Seq(x3))
val model = rf.fit(ds).run()
val pDs = model.transform(ds).as[X6[Vector, Double, A, Vector, Vector, Double]]
pDs.select(pDs.col('a), pDs.col('b), pDs.col('c)).collect.run() == Seq((x3.a, x3.b, x3.c))
}
check(prop)
check(prop2)
check(prop3[String])
check(prop3[Double])
}
test("param setting is retained") {
val prop = forAll { featureSubsetStrategy: FeatureSubsetStrategy =>
val rf = TypedRandomForestClassifier[X2[Double, Vector]]
.setNumTrees(10)
.setMaxBins(100)
.setFeatureSubsetStrategy(featureSubsetStrategy)
.setMaxDepth(10)
.setMaxMemoryInMB(100)
.setMinInfoGain(0.1D)
.setMinInstancesPerNode(2)
.setSubsamplingRate(0.9D)
val ds = TypedDataset.create(Seq(X2(0D, Vectors.dense(0D))))
val model = rf.fit(ds).run()
model.transformer.getNumTrees == 10 &&
model.transformer.getMaxBins == 100 &&
model.transformer.getFeatureSubsetStrategy == featureSubsetStrategy.sparkValue &&
model.transformer.getMaxDepth == 10 &&
model.transformer.getMaxMemoryInMB == 100 &&
model.transformer.getMinInfoGain == 0.1D &&
model.transformer.getMinInstancesPerNode == 2 &&
model.transformer.getSubsamplingRate == 0.9D
}
check(prop)
}
test("create() compiles only with correct inputs") {
illTyped("TypedRandomForestClassifier.create[Double]()")
illTyped("TypedRandomForestClassifier.create[X1[Double]]()")
illTyped("TypedRandomForestClassifier.create[X2[Double, Double]]()")
illTyped("TypedRandomForestClassifier.create[X3[Vector, Double, Int]]()")
illTyped("TypedRandomForestClassifier.create[X2[Vector, String]]()")
}
}
|
imarios/frameless
|
ml/src/test/scala/frameless/ml/classification/TypedRandomForestClassifierTests.scala
|
Scala
|
apache-2.0
| 3,447
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import java.io.{File, InterruptedIOException, IOException, UncheckedIOException}
import java.nio.channels.ClosedByInterruptException
import java.util.concurrent.{CountDownLatch, ExecutionException, TimeUnit}
import scala.concurrent.TimeoutException
import scala.reflect.ClassTag
import scala.util.control.ControlThrowable
import com.google.common.util.concurrent.UncheckedExecutionException
import org.apache.commons.io.FileUtils
import org.apache.hadoop.conf.Configuration
import org.scalatest.time.SpanSugar._
import org.apache.spark.{SparkConf, SparkContext, TaskContext, TestUtils}
import org.apache.spark.scheduler.{SparkListener, SparkListenerJobStart}
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.plans.logical.Range
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes
import org.apache.spark.sql.catalyst.util.DateTimeConstants.MICROS_PER_MILLIS
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.execution.{LocalLimitExec, SimpleMode, SparkPlan}
import org.apache.spark.sql.execution.command.ExplainCommand
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.execution.streaming.sources.{ContinuousMemoryStream, MemorySink}
import org.apache.spark.sql.execution.streaming.state.{StateStore, StateStoreConf, StateStoreId, StateStoreProvider}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources.StreamSourceProvider
import org.apache.spark.sql.streaming.util.{BlockOnStopSourceProvider, StreamManualClock}
import org.apache.spark.sql.types.{IntegerType, StructField, StructType}
import org.apache.spark.util.Utils
class StreamSuite extends StreamTest {
import testImplicits._
test("map with recovery") {
val inputData = MemoryStream[Int]
val mapped = inputData.toDS().map(_ + 1)
testStream(mapped)(
AddData(inputData, 1, 2, 3),
StartStream(),
CheckAnswer(2, 3, 4),
StopStream,
AddData(inputData, 4, 5, 6),
StartStream(),
CheckAnswer(2, 3, 4, 5, 6, 7))
}
test("join") {
// Make a table and ensure it will be broadcast.
val smallTable = Seq((1, "one"), (2, "two"), (4, "four")).toDF("number", "word")
// Join the input stream with a table.
val inputData = MemoryStream[Int]
val joined = inputData.toDS().toDF().join(smallTable, $"value" === $"number")
testStream(joined)(
AddData(inputData, 1, 2, 3),
CheckAnswer(Row(1, 1, "one"), Row(2, 2, "two")),
AddData(inputData, 4),
CheckAnswer(Row(1, 1, "one"), Row(2, 2, "two"), Row(4, 4, "four")))
}
test("StreamingRelation.computeStats") {
withTempDir { dir =>
val df = spark.readStream.format("csv").schema(StructType(Seq())).load(dir.getCanonicalPath)
val streamingRelation = df.logicalPlan collect {
case s: StreamingRelation => s
}
assert(streamingRelation.nonEmpty, "cannot find StreamingRelation")
assert(
streamingRelation.head.computeStats.sizeInBytes ==
spark.sessionState.conf.defaultSizeInBytes)
}
}
test("StreamingRelationV2.computeStats") {
val streamingRelation = spark.readStream.format("rate").load().logicalPlan collect {
case s: StreamingRelationV2 => s
}
assert(streamingRelation.nonEmpty, "cannot find StreamingRelationV2")
assert(
streamingRelation.head.computeStats.sizeInBytes == spark.sessionState.conf.defaultSizeInBytes)
}
test("StreamingExecutionRelation.computeStats") {
val memoryStream = MemoryStream[Int]
val executionRelation = StreamingExecutionRelation(
memoryStream, memoryStream.encoder.schema.toAttributes)(memoryStream.sqlContext.sparkSession)
assert(executionRelation.computeStats.sizeInBytes == spark.sessionState.conf.defaultSizeInBytes)
}
test("explain join with a normal source") {
// This test triggers CostBasedJoinReorder to call `computeStats`
withSQLConf(SQLConf.CBO_ENABLED.key -> "true", SQLConf.JOIN_REORDER_ENABLED.key -> "true") {
val smallTable = Seq((1, "one"), (2, "two"), (4, "four")).toDF("number", "word")
val smallTable2 = Seq((1, "one"), (2, "two"), (4, "four")).toDF("number", "word")
val smallTable3 = Seq((1, "one"), (2, "two"), (4, "four")).toDF("number", "word")
// Join the input stream with a table.
val df = spark.readStream.format("rate").load()
val joined = df.join(smallTable, smallTable("number") === $"value")
.join(smallTable2, smallTable2("number") === $"value")
.join(smallTable3, smallTable3("number") === $"value")
val outputStream = new java.io.ByteArrayOutputStream()
Console.withOut(outputStream) {
joined.explain(true)
}
assert(outputStream.toString.contains("StreamingRelation"))
}
}
test("explain join with MemoryStream") {
// This test triggers CostBasedJoinReorder to call `computeStats`
// Because MemoryStream doesn't use DataSource code path, we need a separate test.
withSQLConf(SQLConf.CBO_ENABLED.key -> "true", SQLConf.JOIN_REORDER_ENABLED.key -> "true") {
val smallTable = Seq((1, "one"), (2, "two"), (4, "four")).toDF("number", "word")
val smallTable2 = Seq((1, "one"), (2, "two"), (4, "four")).toDF("number", "word")
val smallTable3 = Seq((1, "one"), (2, "two"), (4, "four")).toDF("number", "word")
// Join the input stream with a table.
val df = MemoryStream[Int].toDF
val joined = df.join(smallTable, smallTable("number") === $"value")
.join(smallTable2, smallTable2("number") === $"value")
.join(smallTable3, smallTable3("number") === $"value")
val outputStream = new java.io.ByteArrayOutputStream()
Console.withOut(outputStream) {
joined.explain(true)
}
assert(outputStream.toString.contains("StreamingRelation"))
}
}
test("SPARK-20432: union one stream with itself") {
val df = spark.readStream.format(classOf[FakeDefaultSource].getName).load().select("a")
val unioned = df.union(df)
withTempDir { outputDir =>
withTempDir { checkpointDir =>
val query =
unioned
.writeStream.format("parquet")
.option("checkpointLocation", checkpointDir.getAbsolutePath)
.start(outputDir.getAbsolutePath)
try {
query.processAllAvailable()
val outputDf = spark.read.parquet(outputDir.getAbsolutePath).as[Long]
checkDatasetUnorderly[Long](outputDf, (0L to 10L).union((0L to 10L)).toArray: _*)
} finally {
query.stop()
}
}
}
}
test("union two streams") {
val inputData1 = MemoryStream[Int]
val inputData2 = MemoryStream[Int]
val unioned = inputData1.toDS().union(inputData2.toDS())
testStream(unioned)(
AddData(inputData1, 1, 3, 5),
CheckAnswer(1, 3, 5),
AddData(inputData2, 2, 4, 6),
CheckAnswer(1, 2, 3, 4, 5, 6),
StopStream,
AddData(inputData1, 7),
StartStream(),
AddData(inputData2, 8),
CheckAnswer(1, 2, 3, 4, 5, 6, 7, 8))
}
test("sql queries") {
withTempView("stream") {
val inputData = MemoryStream[Int]
inputData.toDF().createOrReplaceTempView("stream")
val evens = sql("SELECT * FROM stream WHERE value % 2 = 0")
testStream(evens)(
AddData(inputData, 1, 2, 3, 4),
CheckAnswer(2, 4))
}
}
test("DataFrame reuse") {
def assertDF(df: DataFrame): Unit = {
withTempDir { outputDir =>
withTempDir { checkpointDir =>
val query = df.writeStream.format("parquet")
.option("checkpointLocation", checkpointDir.getAbsolutePath)
.start(outputDir.getAbsolutePath)
try {
query.processAllAvailable()
val outputDf = spark.read.parquet(outputDir.getAbsolutePath).as[Long]
checkDataset[Long](outputDf, (0L to 10L).toArray: _*)
} finally {
query.stop()
}
}
}
}
val df = spark.readStream.format(classOf[FakeDefaultSource].getName).load()
Seq("", "parquet").foreach { useV1Source =>
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> useV1Source) {
assertDF(df)
assertDF(df)
}
}
}
test("Within the same streaming query, one StreamingRelation should only be transformed to one " +
"StreamingExecutionRelation") {
val df = spark.readStream.format(classOf[FakeDefaultSource].getName).load()
var query: StreamExecution = null
try {
query =
df.union(df)
.writeStream
.format("memory")
.queryName("memory")
.start()
.asInstanceOf[StreamingQueryWrapper]
.streamingQuery
query.awaitInitialization(streamingTimeout.toMillis)
val executionRelations =
query
.logicalPlan
.collect { case ser: StreamingExecutionRelation => ser }
assert(executionRelations.size === 2)
assert(executionRelations.distinct.size === 1)
} finally {
if (query != null) {
query.stop()
}
}
}
test("unsupported queries") {
val streamInput = MemoryStream[Int]
val batchInput = Seq(1, 2, 3).toDS()
def assertError(expectedMsgs: Seq[String])(body: => Unit): Unit = {
val e = intercept[AnalysisException] {
body
}
expectedMsgs.foreach { s => assert(e.getMessage.contains(s)) }
}
// Running streaming plan as a batch query
assertError("start" :: Nil) {
streamInput.toDS.map { i => i }.count()
}
// Running non-streaming plan with as a streaming query
assertError("without streaming sources" :: "start" :: Nil) {
val ds = batchInput.map { i => i }
testStream(ds)()
}
// Running streaming plan that cannot be incrementalized
assertError("not supported" :: "streaming" :: Nil) {
val ds = streamInput.toDS.map { i => i }.sort()
testStream(ds)()
}
}
test("minimize delay between batch construction and execution") {
// For each batch, we would retrieve new data's offsets and log them before we run the execution
// This checks whether the key of the offset log is the expected batch id
def CheckOffsetLogLatestBatchId(expectedId: Int): AssertOnQuery =
AssertOnQuery(_.offsetLog.getLatest().get._1 == expectedId,
s"offsetLog's latest should be $expectedId")
// Check the latest batchid in the commit log
def CheckCommitLogLatestBatchId(expectedId: Int): AssertOnQuery =
AssertOnQuery(_.commitLog.getLatest().get._1 == expectedId,
s"commitLog's latest should be $expectedId")
// Ensure that there has not been an incremental execution after restart
def CheckNoIncrementalExecutionCurrentBatchId(): AssertOnQuery =
AssertOnQuery(_.lastExecution == null, s"lastExecution not expected to run")
// For each batch, we would log the state change during the execution
// This checks whether the key of the state change log is the expected batch id
def CheckIncrementalExecutionCurrentBatchId(expectedId: Int): AssertOnQuery =
AssertOnQuery(_.lastExecution.asInstanceOf[IncrementalExecution].currentBatchId == expectedId,
s"lastExecution's currentBatchId should be $expectedId")
// For each batch, we would log the sink change after the execution
// This checks whether the key of the sink change log is the expected batch id
def CheckSinkLatestBatchId(expectedId: Int): AssertOnQuery =
AssertOnQuery(_.sink.asInstanceOf[MemorySink].latestBatchId.get == expectedId,
s"sink's lastBatchId should be $expectedId")
val inputData = MemoryStream[Int]
testStream(inputData.toDS())(
StartStream(Trigger.ProcessingTime("10 seconds"), new StreamManualClock),
/* -- batch 0 ----------------------- */
// Add some data in batch 0
AddData(inputData, 1, 2, 3),
AdvanceManualClock(10 * 1000), // 10 seconds
/* -- batch 1 ----------------------- */
// Check the results of batch 0
CheckAnswer(1, 2, 3),
CheckIncrementalExecutionCurrentBatchId(0),
CheckCommitLogLatestBatchId(0),
CheckOffsetLogLatestBatchId(0),
CheckSinkLatestBatchId(0),
// Add some data in batch 1
AddData(inputData, 4, 5, 6),
AdvanceManualClock(10 * 1000),
/* -- batch _ ----------------------- */
// Check the results of batch 1
CheckAnswer(1, 2, 3, 4, 5, 6),
CheckIncrementalExecutionCurrentBatchId(1),
CheckCommitLogLatestBatchId(1),
CheckOffsetLogLatestBatchId(1),
CheckSinkLatestBatchId(1),
AdvanceManualClock(10 * 1000),
AdvanceManualClock(10 * 1000),
AdvanceManualClock(10 * 1000),
/* -- batch __ ---------------------- */
// Check the results of batch 1 again; this is to make sure that, when there's no new data,
// the currentId does not get logged (e.g. as 2) even if the clock has advanced many times
CheckAnswer(1, 2, 3, 4, 5, 6),
CheckIncrementalExecutionCurrentBatchId(1),
CheckCommitLogLatestBatchId(1),
CheckOffsetLogLatestBatchId(1),
CheckSinkLatestBatchId(1),
/* Stop then restart the Stream */
StopStream,
StartStream(Trigger.ProcessingTime("10 seconds"), new StreamManualClock(60 * 1000)),
/* -- batch 1 no rerun ----------------- */
// batch 1 would not re-run because the latest batch id logged in commit log is 1
AdvanceManualClock(10 * 1000),
CheckNoIncrementalExecutionCurrentBatchId(),
/* -- batch 2 ----------------------- */
// Check the results of batch 1
CheckAnswer(1, 2, 3, 4, 5, 6),
CheckCommitLogLatestBatchId(1),
CheckOffsetLogLatestBatchId(1),
CheckSinkLatestBatchId(1),
// Add some data in batch 2
AddData(inputData, 7, 8, 9),
AdvanceManualClock(10 * 1000),
/* -- batch 3 ----------------------- */
// Check the results of batch 2
CheckAnswer(1, 2, 3, 4, 5, 6, 7, 8, 9),
CheckIncrementalExecutionCurrentBatchId(2),
CheckCommitLogLatestBatchId(2),
CheckOffsetLogLatestBatchId(2),
CheckSinkLatestBatchId(2))
}
test("insert an extraStrategy") {
try {
spark.experimental.extraStrategies = TestStrategy :: Nil
val inputData = MemoryStream[(String, Int)]
val df = inputData.toDS().map(_._1).toDF("a")
testStream(df)(
AddData(inputData, ("so slow", 1)),
CheckAnswer("so fast"))
} finally {
spark.experimental.extraStrategies = Nil
}
}
testQuietly("handle fatal errors thrown from the stream thread") {
for (e <- Seq(
new VirtualMachineError {},
new ThreadDeath,
new LinkageError,
new ControlThrowable {}
)) {
val source = new Source {
override def getOffset: Option[Offset] = {
throw e
}
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
throw e
}
override def schema: StructType = StructType(Array(StructField("value", IntegerType)))
override def stop(): Unit = {}
}
val df = Dataset[Int](
sqlContext.sparkSession,
StreamingExecutionRelation(source, sqlContext.sparkSession))
testStream(df)(
// `ExpectFailure(isFatalError = true)` verifies two things:
// - Fatal errors can be propagated to `StreamingQuery.exception` and
// `StreamingQuery.awaitTermination` like non fatal errors.
// - Fatal errors can be caught by UncaughtExceptionHandler.
ExpectFailure(isFatalError = true)(ClassTag(e.getClass))
)
}
}
test("output mode API in Scala") {
assert(OutputMode.Append === InternalOutputModes.Append)
assert(OutputMode.Complete === InternalOutputModes.Complete)
assert(OutputMode.Update === InternalOutputModes.Update)
}
override protected def sparkConf: SparkConf = super.sparkConf
.set("spark.redaction.string.regex", "file:/[\\\\w_]+")
test("explain - redaction") {
val replacement = "*********"
val inputData = MemoryStream[String]
val df = inputData.toDS().map(_ + "foo").groupBy("value").agg(count("*"))
// Test StreamingQuery.display
val q = df.writeStream.queryName("memory_explain").outputMode("complete").format("memory")
.start()
.asInstanceOf[StreamingQueryWrapper]
.streamingQuery
try {
inputData.addData("abc")
q.processAllAvailable()
val explainWithoutExtended = q.explainInternal(false)
assert(explainWithoutExtended.contains(replacement))
assert(explainWithoutExtended.contains("StateStoreRestore"))
assert(!explainWithoutExtended.contains("file:/"))
val explainWithExtended = q.explainInternal(true)
assert(explainWithExtended.contains(replacement))
assert(explainWithExtended.contains("StateStoreRestore"))
assert(!explainWithoutExtended.contains("file:/"))
} finally {
q.stop()
}
}
test("explain") {
val inputData = MemoryStream[String]
val df = inputData.toDS().map(_ + "foo").groupBy("value").agg(count("*"))
// Test `df.explain`
val explain = ExplainCommand(df.queryExecution.logical, SimpleMode)
val explainString =
spark.sessionState
.executePlan(explain)
.executedPlan
.executeCollect()
.map(_.getString(0))
.mkString("\\n")
assert(explainString.contains("StateStoreRestore"))
assert(explainString.contains("StreamingRelation"))
assert(!explainString.contains("LocalTableScan"))
// Test StreamingQuery.display
val q = df.writeStream.queryName("memory_explain").outputMode("complete").format("memory")
.start()
.asInstanceOf[StreamingQueryWrapper]
.streamingQuery
try {
assert("No physical plan. Waiting for data." === q.explainInternal(false))
assert("No physical plan. Waiting for data." === q.explainInternal(true))
inputData.addData("abc")
q.processAllAvailable()
val explainWithoutExtended = q.explainInternal(false)
// `extended = false` only displays the physical plan.
assert("StreamingDataSourceV2Relation".r
.findAllMatchIn(explainWithoutExtended).size === 0)
assert("BatchScan".r
.findAllMatchIn(explainWithoutExtended).size === 1)
// Use "StateStoreRestore" to verify that it does output a streaming physical plan
assert(explainWithoutExtended.contains("StateStoreRestore"))
val explainWithExtended = q.explainInternal(true)
// `extended = true` displays 3 logical plans (Parsed/Optimized/Optimized) and 1 physical
// plan.
assert("StreamingDataSourceV2Relation".r
.findAllMatchIn(explainWithExtended).size === 3)
assert("BatchScan".r
.findAllMatchIn(explainWithExtended).size === 1)
// Use "StateStoreRestore" to verify that it does output a streaming physical plan
assert(explainWithExtended.contains("StateStoreRestore"))
} finally {
q.stop()
}
}
test("explain-continuous") {
val inputData = ContinuousMemoryStream[Int]
val df = inputData.toDS().map(_ * 2).filter(_ > 5)
// Test `df.explain`
val explain = ExplainCommand(df.queryExecution.logical, SimpleMode)
val explainString =
spark.sessionState
.executePlan(explain)
.executedPlan
.executeCollect()
.map(_.getString(0))
.mkString("\\n")
assert(explainString.contains("Filter"))
assert(explainString.contains("MapElements"))
assert(!explainString.contains("LocalTableScan"))
// Test StreamingQuery.display
val q = df.writeStream.queryName("memory_continuous_explain")
.outputMode(OutputMode.Update()).format("memory")
.trigger(Trigger.Continuous("1 seconds"))
.start()
.asInstanceOf[StreamingQueryWrapper]
.streamingQuery
try {
// in continuous mode, the query will be run even there's no data
// sleep a bit to ensure initialization
eventually(timeout(2.seconds), interval(100.milliseconds)) {
assert(q.lastExecution != null)
}
val explainWithoutExtended = q.explainInternal(false)
// `extended = false` only displays the physical plan.
assert("StreamingDataSourceV2Relation".r
.findAllMatchIn(explainWithoutExtended).size === 0)
assert("ContinuousScan".r
.findAllMatchIn(explainWithoutExtended).size === 1)
val explainWithExtended = q.explainInternal(true)
// `extended = true` displays 3 logical plans (Parsed/Optimized/Optimized) and 1 physical
// plan.
assert("StreamingDataSourceV2Relation".r
.findAllMatchIn(explainWithExtended).size === 3)
assert("ContinuousScan".r
.findAllMatchIn(explainWithExtended).size === 1)
} finally {
q.stop()
}
}
test("codegen-microbatch") {
val inputData = MemoryStream[Int]
val df = inputData.toDS().map(_ * 2).filter(_ > 5)
// Test StreamingQuery.codegen
val q = df.writeStream.queryName("memory_microbatch_codegen")
.outputMode(OutputMode.Update)
.format("memory")
.trigger(Trigger.ProcessingTime("1 seconds"))
.start()
try {
import org.apache.spark.sql.execution.debug._
assert("No physical plan. Waiting for data." === codegenString(q))
assert(codegenStringSeq(q).isEmpty)
inputData.addData(1, 2, 3, 4, 5)
q.processAllAvailable()
assertDebugCodegenResult(q)
} finally {
q.stop()
}
}
test("codegen-continuous") {
val inputData = ContinuousMemoryStream[Int]
val df = inputData.toDS().map(_ * 2).filter(_ > 5)
// Test StreamingQuery.codegen
val q = df.writeStream.queryName("memory_continuous_codegen")
.outputMode(OutputMode.Update)
.format("memory")
.trigger(Trigger.Continuous("1 seconds"))
.start()
try {
// in continuous mode, the query will be run even there's no data
// sleep a bit to ensure initialization
eventually(timeout(2.seconds), interval(100.milliseconds)) {
assert(q.asInstanceOf[StreamingQueryWrapper].streamingQuery.lastExecution != null)
}
assertDebugCodegenResult(q)
} finally {
q.stop()
}
}
private def assertDebugCodegenResult(query: StreamingQuery): Unit = {
import org.apache.spark.sql.execution.debug._
val codegenStr = codegenString(query)
assert(codegenStr.contains("Found 1 WholeStageCodegen subtrees."))
// assuming that code is generated for the test query
assert(codegenStr.contains("Generated code:"))
val codegenStrSeq = codegenStringSeq(query)
assert(codegenStrSeq.nonEmpty)
assert(codegenStrSeq.head._1.contains("*(1)"))
assert(codegenStrSeq.head._2.contains("codegenStageId=1"))
}
test("SPARK-19065: dropDuplicates should not create expressions using the same id") {
withTempPath { testPath =>
val data = Seq((1, 2), (2, 3), (3, 4))
data.toDS.write.mode("overwrite").json(testPath.getCanonicalPath)
val schema = spark.read.json(testPath.getCanonicalPath).schema
val query = spark
.readStream
.schema(schema)
.json(testPath.getCanonicalPath)
.dropDuplicates("_1")
.writeStream
.format("memory")
.queryName("testquery")
.outputMode("append")
.start()
try {
query.processAllAvailable()
if (query.exception.isDefined) {
throw query.exception.get
}
} finally {
query.stop()
}
}
}
test("handle IOException when the streaming thread is interrupted (pre Hadoop 2.8)") {
// This test uses a fake source to throw the same IOException as pre Hadoop 2.8 when the
// streaming thread is interrupted. We should handle it properly by not failing the query.
ThrowingIOExceptionLikeHadoop12074.createSourceLatch = new CountDownLatch(1)
val query = spark
.readStream
.format(classOf[ThrowingIOExceptionLikeHadoop12074].getName)
.load()
.writeStream
.format("console")
.start()
assert(ThrowingIOExceptionLikeHadoop12074.createSourceLatch
.await(streamingTimeout.toMillis, TimeUnit.MILLISECONDS),
"ThrowingIOExceptionLikeHadoop12074.createSource wasn't called before timeout")
query.stop()
assert(query.exception.isEmpty)
}
test("handle InterruptedIOException when the streaming thread is interrupted (Hadoop 2.8+)") {
// This test uses a fake source to throw the same InterruptedIOException as Hadoop 2.8+ when the
// streaming thread is interrupted. We should handle it properly by not failing the query.
ThrowingInterruptedIOException.createSourceLatch = new CountDownLatch(1)
val query = spark
.readStream
.format(classOf[ThrowingInterruptedIOException].getName)
.load()
.writeStream
.format("console")
.start()
assert(ThrowingInterruptedIOException.createSourceLatch
.await(streamingTimeout.toMillis, TimeUnit.MILLISECONDS),
"ThrowingInterruptedIOException.createSource wasn't called before timeout")
query.stop()
assert(query.exception.isEmpty)
}
test("SPARK-19873: streaming aggregation with change in number of partitions") {
val inputData = MemoryStream[(Int, Int)]
val agg = inputData.toDS().groupBy("_1").count()
testStream(agg, OutputMode.Complete())(
AddData(inputData, (1, 0), (2, 0)),
StartStream(additionalConfs = Map(SQLConf.SHUFFLE_PARTITIONS.key -> "2")),
CheckAnswer((1, 1), (2, 1)),
StopStream,
AddData(inputData, (3, 0), (2, 0)),
StartStream(additionalConfs = Map(SQLConf.SHUFFLE_PARTITIONS.key -> "5")),
CheckAnswer((1, 1), (2, 2), (3, 1)),
StopStream,
AddData(inputData, (3, 0), (1, 0)),
StartStream(additionalConfs = Map(SQLConf.SHUFFLE_PARTITIONS.key -> "1")),
CheckAnswer((1, 2), (2, 2), (3, 2)))
}
testQuietly("recover from a Spark v2.1 checkpoint") {
var inputData: MemoryStream[Int] = null
var query: DataStreamWriter[Row] = null
def prepareMemoryStream(): Unit = {
inputData = MemoryStream[Int]
inputData.addData(1, 2, 3, 4)
inputData.addData(3, 4, 5, 6)
inputData.addData(5, 6, 7, 8)
query = inputData
.toDF()
.groupBy($"value")
.agg(count("*"))
.writeStream
.outputMode("complete")
.format("memory")
}
// Get an existing checkpoint generated by Spark v2.1.
// v2.1 does not record # shuffle partitions in the offset metadata.
val resourceUri =
this.getClass.getResource("/structured-streaming/checkpoint-version-2.1.0").toURI
val checkpointDir = new File(resourceUri)
// 1 - Test if recovery from the checkpoint is successful.
prepareMemoryStream()
val dir1 = Utils.createTempDir().getCanonicalFile // not using withTempDir {}, makes test flaky
// Copy the checkpoint to a temp dir to prevent changes to the original.
// Not doing this will lead to the test passing on the first run, but fail subsequent runs.
FileUtils.copyDirectory(checkpointDir, dir1)
// Checkpoint data was generated by a query with 10 shuffle partitions.
// In order to test reading from the checkpoint, the checkpoint must have two or more batches,
// since the last batch may be rerun.
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "10") {
var streamingQuery: StreamingQuery = null
try {
streamingQuery =
query.queryName("counts").option("checkpointLocation", dir1.getCanonicalPath).start()
streamingQuery.processAllAvailable()
inputData.addData(9)
streamingQuery.processAllAvailable()
checkAnswer(spark.table("counts").toDF(),
Row(1, 1L) :: Row(2, 1L) :: Row(3, 2L) :: Row(4, 2L) ::
Row(5, 2L) :: Row(6, 2L) :: Row(7, 1L) :: Row(8, 1L) :: Row(9, 1L) :: Nil)
} finally {
if (streamingQuery ne null) {
streamingQuery.stop()
}
}
}
// 2 - Check recovery with wrong num shuffle partitions
prepareMemoryStream()
val dir2 = Utils.createTempDir().getCanonicalFile
FileUtils.copyDirectory(checkpointDir, dir2)
// Since the number of partitions is greater than 10, should throw exception.
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "15") {
var streamingQuery: StreamingQuery = null
try {
intercept[StreamingQueryException] {
streamingQuery =
query.queryName("badQuery").option("checkpointLocation", dir2.getCanonicalPath).start()
streamingQuery.processAllAvailable()
}
} finally {
if (streamingQuery ne null) {
streamingQuery.stop()
}
}
}
}
test("calling stop() on a query cancels related jobs") {
val input = MemoryStream[Int]
val query = input
.toDS()
.map { i =>
while (!TaskContext.get().isInterrupted()) {
// keep looping till interrupted by query.stop()
Thread.sleep(100)
}
i
}
.writeStream
.format("console")
.start()
input.addData(1)
// wait for jobs to start
eventually(timeout(streamingTimeout)) {
assert(sparkContext.statusTracker.getActiveJobIds().nonEmpty)
}
query.stop()
// make sure jobs are stopped
eventually(timeout(streamingTimeout)) {
assert(sparkContext.statusTracker.getActiveJobIds().isEmpty)
}
}
test("batch id is updated correctly in the job description") {
val queryName = "memStream"
@volatile var jobDescription: String = null
def assertDescContainsQueryNameAnd(batch: Integer): Unit = {
// wait for listener event to be processed
spark.sparkContext.listenerBus.waitUntilEmpty(streamingTimeout.toMillis)
assert(jobDescription.contains(queryName) && jobDescription.contains(s"batch = $batch"))
}
spark.sparkContext.addSparkListener(new SparkListener {
override def onJobStart(jobStart: SparkListenerJobStart): Unit = {
jobDescription = jobStart.properties.getProperty(SparkContext.SPARK_JOB_DESCRIPTION)
}
})
val input = MemoryStream[Int]
val query = input
.toDS()
.map(_ + 1)
.writeStream
.format("memory")
.queryName(queryName)
.start()
input.addData(1)
query.processAllAvailable()
assertDescContainsQueryNameAnd(batch = 0)
input.addData(2, 3)
query.processAllAvailable()
assertDescContainsQueryNameAnd(batch = 1)
input.addData(4)
query.processAllAvailable()
assertDescContainsQueryNameAnd(batch = 2)
query.stop()
}
test("should resolve the checkpoint path") {
withTempDir { dir =>
val checkpointLocation = dir.getCanonicalPath
assert(!checkpointLocation.startsWith("file:/"))
val query = MemoryStream[Int].toDF
.writeStream
.option("checkpointLocation", checkpointLocation)
.format("console")
.start()
try {
val resolvedCheckpointDir =
query.asInstanceOf[StreamingQueryWrapper].streamingQuery.resolvedCheckpointRoot
assert(resolvedCheckpointDir.startsWith("file:/"))
} finally {
query.stop()
}
}
}
testQuietly("specify custom state store provider") {
val providerClassName = classOf[TestStateStoreProvider].getCanonicalName
withSQLConf(SQLConf.STATE_STORE_PROVIDER_CLASS.key -> providerClassName) {
val input = MemoryStream[Int]
val df = input.toDS().groupBy().count()
val query = df.writeStream.outputMode("complete").format("memory").queryName("name").start()
input.addData(1, 2, 3)
val e = intercept[Exception] {
query.awaitTermination()
}
TestUtils.assertExceptionMsg(e, providerClassName)
TestUtils.assertExceptionMsg(e, "instantiated")
}
}
testQuietly("custom state store provider read from offset log") {
val input = MemoryStream[Int]
val df = input.toDS().groupBy().count()
val providerConf1 = SQLConf.STATE_STORE_PROVIDER_CLASS.key ->
"org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider"
val providerConf2 = SQLConf.STATE_STORE_PROVIDER_CLASS.key ->
classOf[TestStateStoreProvider].getCanonicalName
def runQuery(queryName: String, checkpointLoc: String): Unit = {
val query = df.writeStream
.outputMode("complete")
.format("memory")
.queryName(queryName)
.option("checkpointLocation", checkpointLoc)
.start()
input.addData(1, 2, 3)
query.processAllAvailable()
query.stop()
}
withTempDir { dir =>
val checkpointLoc1 = new File(dir, "1").getCanonicalPath
withSQLConf(providerConf1) {
runQuery("query1", checkpointLoc1) // generate checkpoints
}
val checkpointLoc2 = new File(dir, "2").getCanonicalPath
withSQLConf(providerConf2) {
// Verify new query will use new provider that throw error on loading
intercept[Exception] {
runQuery("query2", checkpointLoc2)
}
// Verify old query from checkpoint will still use old provider
runQuery("query1", checkpointLoc1)
}
}
}
test("streaming limit without state") {
val inputData1 = MemoryStream[Int]
testStream(inputData1.toDF().limit(0))(
AddData(inputData1, 1 to 8: _*),
CheckAnswer())
val inputData2 = MemoryStream[Int]
testStream(inputData2.toDF().limit(4))(
AddData(inputData2, 1 to 8: _*),
CheckAnswer(1 to 4: _*))
}
test("streaming limit with state") {
val inputData = MemoryStream[Int]
testStream(inputData.toDF().limit(4))(
AddData(inputData, 1 to 2: _*),
CheckAnswer(1 to 2: _*),
AddData(inputData, 3 to 6: _*),
CheckAnswer(1 to 4: _*),
AddData(inputData, 7 to 9: _*),
CheckAnswer(1 to 4: _*))
}
test("streaming limit with other operators") {
val inputData = MemoryStream[Int]
testStream(inputData.toDF().where("value % 2 = 1").limit(4))(
AddData(inputData, 1 to 5: _*),
CheckAnswer(1, 3, 5),
AddData(inputData, 6 to 9: _*),
CheckAnswer(1, 3, 5, 7),
AddData(inputData, 10 to 12: _*),
CheckAnswer(1, 3, 5, 7))
}
test("streaming limit with multiple limits") {
val inputData1 = MemoryStream[Int]
testStream(inputData1.toDF().limit(4).limit(2))(
AddData(inputData1, 1),
CheckAnswer(1),
AddData(inputData1, 2 to 8: _*),
CheckAnswer(1, 2))
val inputData2 = MemoryStream[Int]
testStream(inputData2.toDF().limit(4).limit(100).limit(3))(
AddData(inputData2, 1, 2),
CheckAnswer(1, 2),
AddData(inputData2, 3 to 8: _*),
CheckAnswer(1 to 3: _*))
}
test("SPARK-30658: streaming limit before agg in complete mode") {
val inputData = MemoryStream[Int]
val limited = inputData.toDF().limit(5).groupBy("value").count()
testStream(limited, OutputMode.Complete())(
AddData(inputData, 1 to 3: _*),
CheckAnswer(Row(1, 1), Row(2, 1), Row(3, 1)),
AddData(inputData, 1 to 9: _*),
CheckAnswer(Row(1, 2), Row(2, 2), Row(3, 1)))
}
test("SPARK-30658: streaming limits before and after agg in complete mode " +
"(after limit < before limit)") {
val inputData = MemoryStream[Int]
val limited = inputData.toDF().limit(4).groupBy("value").count().orderBy("value").limit(3)
testStream(limited, OutputMode.Complete())(
StartStream(additionalConfs = Map(SQLConf.SHUFFLE_PARTITIONS.key -> "1")),
AddData(inputData, 1 to 9: _*),
// only 1 to 4 should be allowed to aggregate, and counts for only 1 to 3 should be output
CheckAnswer(Row(1, 1), Row(2, 1), Row(3, 1)),
AddData(inputData, 2 to 6: _*),
// None of the new values should be allowed to aggregate, same 3 counts should be output
CheckAnswer(Row(1, 1), Row(2, 1), Row(3, 1)))
}
test("SPARK-30658: streaming limits before and after agg in complete mode " +
"(before limit < after limit)") {
val inputData = MemoryStream[Int]
val limited = inputData.toDF().limit(2).groupBy("value").count().orderBy("value").limit(3)
testStream(limited, OutputMode.Complete())(
StartStream(additionalConfs = Map(SQLConf.SHUFFLE_PARTITIONS.key -> "1")),
AddData(inputData, 1 to 9: _*),
CheckAnswer(Row(1, 1), Row(2, 1)),
AddData(inputData, 2 to 6: _*),
CheckAnswer(Row(1, 1), Row(2, 1)))
}
test("SPARK-30657: streaming limit after streaming dedup in append mode") {
val inputData = MemoryStream[Int]
val limited = inputData.toDF().dropDuplicates().limit(1)
testStream(limited)(
AddData(inputData, 1, 2),
CheckAnswer(Row(1)),
AddData(inputData, 3, 4),
CheckAnswer(Row(1)))
}
test("streaming limit in update mode") {
val inputData = MemoryStream[Int]
val e = intercept[AnalysisException] {
testStream(inputData.toDF().limit(5), OutputMode.Update())(
AddData(inputData, 1 to 3: _*)
)
}
assert(e.getMessage.contains(
"Limits are not supported on streaming DataFrames/Datasets in Update output mode"))
}
test("streaming limit in multiple partitions") {
val inputData = MemoryStream[Int]
testStream(inputData.toDF().repartition(2).limit(7))(
AddData(inputData, 1 to 10: _*),
CheckAnswerRowsByFunc(
rows => assert(rows.size == 7 && rows.forall(r => r.getInt(0) <= 10)),
false),
AddData(inputData, 11 to 20: _*),
CheckAnswerRowsByFunc(
rows => assert(rows.size == 7 && rows.forall(r => r.getInt(0) <= 10)),
false))
}
test("streaming limit in multiple partitions by column") {
val inputData = MemoryStream[(Int, Int)]
val df = inputData.toDF().repartition(2, $"_2").limit(7)
testStream(df)(
AddData(inputData, (1, 0), (2, 0), (3, 1), (4, 1)),
CheckAnswerRowsByFunc(
rows => assert(rows.size == 4 && rows.forall(r => r.getInt(0) <= 4)),
false),
AddData(inputData, (5, 0), (6, 0), (7, 1), (8, 1)),
CheckAnswerRowsByFunc(
rows => assert(rows.size == 7 && rows.forall(r => r.getInt(0) <= 8)),
false))
}
test("SPARK-30657: streaming limit should not apply on limits on state subplans") {
val streanData = MemoryStream[Int]
val streamingDF = streanData.toDF().toDF("value")
val staticDF = spark.createDataset(Seq(1)).toDF("value").orderBy("value")
testStream(streamingDF.join(staticDF.limit(1), "value"))(
AddData(streanData, 1, 2, 3),
CheckAnswer(Row(1)),
AddData(streanData, 1, 3, 5),
CheckAnswer(Row(1), Row(1)))
}
test("SPARK-30657: streaming limit optimization from StreamingLocalLimitExec to LocalLimitExec") {
val inputData = MemoryStream[Int]
val inputDF = inputData.toDF()
/** Verify whether the local limit in the plan is a streaming limit or is a simple */
def verifyLocalLimit(
df: DataFrame,
expectStreamingLimit: Boolean,
outputMode: OutputMode = OutputMode.Append): Unit = {
var execPlan: SparkPlan = null
testStream(df, outputMode)(
AddData(inputData, 1),
AssertOnQuery { q =>
q.processAllAvailable()
execPlan = q.lastExecution.executedPlan
true
}
)
require(execPlan != null)
val localLimits = execPlan.collect {
case l: LocalLimitExec => l
case l: StreamingLocalLimitExec => l
}
require(
localLimits.size == 1,
s"Cant verify local limit optimization with this plan:\\n$execPlan")
if (expectStreamingLimit) {
assert(
localLimits.head.isInstanceOf[StreamingLocalLimitExec],
s"Local limit was not StreamingLocalLimitExec:\\n$execPlan")
} else {
assert(
localLimits.head.isInstanceOf[LocalLimitExec],
s"Local limit was not LocalLimitExec:\\n$execPlan")
}
}
// Should not be optimized, so StreamingLocalLimitExec should be present
verifyLocalLimit(inputDF.dropDuplicates().limit(1), expectStreamingLimit = true)
// Should be optimized from StreamingLocalLimitExec to LocalLimitExec
verifyLocalLimit(inputDF.limit(1), expectStreamingLimit = false)
verifyLocalLimit(
inputDF.limit(1).groupBy().count(),
expectStreamingLimit = false,
outputMode = OutputMode.Complete())
// Should be optimized as repartition is sufficient to ensure that the iterators of
// StreamingDeduplicationExec should be consumed completely by the repartition exchange.
verifyLocalLimit(inputDF.dropDuplicates().repartition(1).limit(1), expectStreamingLimit = false)
// Should be LocalLimitExec in the first place, not from optimization of StreamingLocalLimitExec
val staticDF = spark.range(1).toDF("value").limit(1)
verifyLocalLimit(inputDF.toDF("value").join(staticDF, "value"), expectStreamingLimit = false)
verifyLocalLimit(
inputDF.groupBy().count().limit(1),
expectStreamingLimit = false,
outputMode = OutputMode.Complete())
}
test("is_continuous_processing property should be false for microbatch processing") {
val input = MemoryStream[Int]
val df = input.toDS()
.map(i => TaskContext.get().getLocalProperty(StreamExecution.IS_CONTINUOUS_PROCESSING))
testStream(df) (
AddData(input, 1),
CheckAnswer("false")
)
}
test("is_continuous_processing property should be true for continuous processing") {
val input = ContinuousMemoryStream[Int]
val stream = input.toDS()
.map(i => TaskContext.get().getLocalProperty(StreamExecution.IS_CONTINUOUS_PROCESSING))
.writeStream.format("memory")
.queryName("output")
.trigger(Trigger.Continuous("1 seconds"))
.start()
try {
input.addData(1)
stream.processAllAvailable()
} finally {
stream.stop()
}
checkAnswer(spark.sql("select * from output"), Row("true"))
}
for (e <- Seq(
new InterruptedException,
new InterruptedIOException,
new ClosedByInterruptException,
new UncheckedIOException("test", new ClosedByInterruptException),
new ExecutionException("test", new InterruptedException),
new UncheckedExecutionException("test", new InterruptedException))) {
test(s"view ${e.getClass.getSimpleName} as a normal query stop") {
ThrowingExceptionInCreateSource.createSourceLatch = new CountDownLatch(1)
ThrowingExceptionInCreateSource.exception = e
val query = spark
.readStream
.format(classOf[ThrowingExceptionInCreateSource].getName)
.load()
.writeStream
.format("console")
.start()
assert(ThrowingExceptionInCreateSource.createSourceLatch
.await(streamingTimeout.toMillis, TimeUnit.MILLISECONDS),
"ThrowingExceptionInCreateSource.createSource wasn't called before timeout")
query.stop()
assert(query.exception.isEmpty)
}
}
test("SPARK-26379 Structured Streaming - Exception on adding current_timestamp " +
" to Dataset - use v2 sink") {
testCurrentTimestampOnStreamingQuery()
}
test("SPARK-26379 Structured Streaming - Exception on adding current_timestamp " +
" to Dataset - use v1 sink") {
testCurrentTimestampOnStreamingQuery()
}
private def testCurrentTimestampOnStreamingQuery(): Unit = {
val input = MemoryStream[Int]
val df = input.toDS().withColumn("cur_timestamp", lit(current_timestamp()))
def assertBatchOutputAndUpdateLastTimestamp(
rows: Seq[Row],
curTimestamp: Long,
curDate: Int,
expectedValue: Int): Long = {
assert(rows.size === 1)
val row = rows.head
assert(row.getInt(0) === expectedValue)
assert(row.getTimestamp(1).getTime >= curTimestamp)
row.getTimestamp(1).getTime
}
var lastTimestamp = System.currentTimeMillis()
val currentDate = DateTimeUtils.microsToDays(DateTimeUtils.millisToMicros(lastTimestamp))
testStream(df) (
AddData(input, 1),
CheckLastBatch { rows: Seq[Row] =>
lastTimestamp = assertBatchOutputAndUpdateLastTimestamp(rows, lastTimestamp, currentDate, 1)
},
Execute { _ => Thread.sleep(1000) },
AddData(input, 2),
CheckLastBatch { rows: Seq[Row] =>
lastTimestamp = assertBatchOutputAndUpdateLastTimestamp(rows, lastTimestamp, currentDate, 2)
}
)
}
// ProcessingTime trigger generates MicroBatchExecution, and ContinuousTrigger starts a
// ContinuousExecution
Seq(Trigger.ProcessingTime("1 second"), Trigger.Continuous("1 second")).foreach { trigger =>
test(s"SPARK-30143: stop waits until timeout if blocked - trigger: $trigger") {
BlockOnStopSourceProvider.enableBlocking()
val sq = spark.readStream.format(classOf[BlockOnStopSourceProvider].getName)
.load()
.writeStream
.format("console")
.trigger(trigger)
.start()
failAfter(60.seconds) {
val startTime = System.nanoTime()
withSQLConf(SQLConf.STREAMING_STOP_TIMEOUT.key -> "2000") {
val ex = intercept[TimeoutException] {
sq.stop()
}
assert(ex.getMessage.contains(sq.id.toString))
}
val duration = (System.nanoTime() - startTime) / 1e6
assert(duration >= 2000,
s"Should have waited more than 2000 millis, but waited $duration millis")
BlockOnStopSourceProvider.disableBlocking()
withSQLConf(SQLConf.STREAMING_STOP_TIMEOUT.key -> "0") {
sq.stop()
}
}
}
}
}
abstract class FakeSource extends StreamSourceProvider {
private val fakeSchema = StructType(StructField("a", IntegerType) :: Nil)
override def sourceSchema(
spark: SQLContext,
schema: Option[StructType],
providerName: String,
parameters: Map[String, String]): (String, StructType) = ("fakeSource", fakeSchema)
}
/** A fake StreamSourceProvider that creates a fake Source that cannot be reused. */
class FakeDefaultSource extends FakeSource {
override def createSource(
spark: SQLContext,
metadataPath: String,
schema: Option[StructType],
providerName: String,
parameters: Map[String, String]): Source = {
// Create a fake Source that emits 0 to 10.
new Source {
private var offset = -1L
override def schema: StructType = StructType(StructField("a", IntegerType) :: Nil)
override def getOffset: Option[Offset] = {
if (offset >= 10) {
None
} else {
offset += 1
Some(LongOffset(offset))
}
}
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
val startOffset = start.map(_.asInstanceOf[LongOffset].offset).getOrElse(-1L) + 1
val ds = new Dataset[java.lang.Long](
spark.sparkSession,
Range(
startOffset,
end.asInstanceOf[LongOffset].offset + 1,
1,
Some(spark.sparkSession.sparkContext.defaultParallelism),
isStreaming = true),
Encoders.LONG)
ds.toDF("a")
}
override def stop(): Unit = {}
}
}
}
/** A fake source that throws the same IOException like pre Hadoop 2.8 when it's interrupted. */
class ThrowingIOExceptionLikeHadoop12074 extends FakeSource {
import ThrowingIOExceptionLikeHadoop12074._
override def createSource(
spark: SQLContext,
metadataPath: String,
schema: Option[StructType],
providerName: String,
parameters: Map[String, String]): Source = {
createSourceLatch.countDown()
try {
Thread.sleep(30000)
throw new TimeoutException("sleep was not interrupted in 30 seconds")
} catch {
case ie: InterruptedException =>
throw new IOException(ie.toString)
}
}
}
object ThrowingIOExceptionLikeHadoop12074 {
/**
* A latch to allow the user to wait until `ThrowingIOExceptionLikeHadoop12074.createSource` is
* called.
*/
@volatile var createSourceLatch: CountDownLatch = null
}
/** A fake source that throws InterruptedIOException like Hadoop 2.8+ when it's interrupted. */
class ThrowingInterruptedIOException extends FakeSource {
import ThrowingInterruptedIOException._
override def createSource(
spark: SQLContext,
metadataPath: String,
schema: Option[StructType],
providerName: String,
parameters: Map[String, String]): Source = {
createSourceLatch.countDown()
try {
Thread.sleep(30000)
throw new TimeoutException("sleep was not interrupted in 30 seconds")
} catch {
case ie: InterruptedException =>
val iie = new InterruptedIOException(ie.toString)
iie.initCause(ie)
throw iie
}
}
}
object ThrowingInterruptedIOException {
/**
* A latch to allow the user to wait until `ThrowingInterruptedIOException.createSource` is
* called.
*/
@volatile var createSourceLatch: CountDownLatch = null
}
class TestStateStoreProvider extends StateStoreProvider {
override def init(
stateStoreId: StateStoreId,
keySchema: StructType,
valueSchema: StructType,
indexOrdinal: Option[Int],
storeConfs: StateStoreConf,
hadoopConf: Configuration): Unit = {
throw new Exception("Successfully instantiated")
}
override def stateStoreId: StateStoreId = null
override def close(): Unit = { }
override def getStore(version: Long): StateStore = null
}
/** A fake source that throws `ThrowingExceptionInCreateSource.exception` in `createSource` */
class ThrowingExceptionInCreateSource extends FakeSource {
override def createSource(
spark: SQLContext,
metadataPath: String,
schema: Option[StructType],
providerName: String,
parameters: Map[String, String]): Source = {
ThrowingExceptionInCreateSource.createSourceLatch.countDown()
try {
Thread.sleep(30000)
throw new TimeoutException("sleep was not interrupted in 30 seconds")
} catch {
case _: InterruptedException =>
throw ThrowingExceptionInCreateSource.exception
}
}
}
object ThrowingExceptionInCreateSource {
/**
* A latch to allow the user to wait until `ThrowingExceptionInCreateSource.createSource` is
* called.
*/
@volatile var createSourceLatch: CountDownLatch = null
@volatile var exception: Exception = null
}
|
zuotingbing/spark
|
sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala
|
Scala
|
apache-2.0
| 51,336
|
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frsse2008.micro
import uk.gov.hmrc.ct.accounts.retriever.AccountsBoxRetriever
import uk.gov.hmrc.ct.box._
case class AC411(value: Option[Int]) extends CtBoxIdentifier(name = "Previous Cost of raw materials and consumables")
with CtOptionalInteger with Input
with SelfValidatableBox[AccountsBoxRetriever, Option[Int]] {
override def validate(boxRetriever: AccountsBoxRetriever): Set[CtValidation] = {
validateMoney(value)
}
}
|
pncampbell/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/accounts/frsse2008/micro/AC411.scala
|
Scala
|
apache-2.0
| 1,147
|
/*
* ******************************************************************************
* * Copyright (C) 2013 Christopher Harris (Itszuvalex)
* * Itszuvalex@gmail.com
* *
* * This program is free software; you can redistribute it and/or
* * modify it under the terms of the GNU General Public License
* * as published by the Free Software Foundation; either version 2
* * of the License, or (at your option) any later version.
* *
* * This program is distributed in the hope that it will be useful,
* * but WITHOUT ANY WARRANTY; without even the implied warranty of
* * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* * GNU General Public License for more details.
* *
* * You should have received a copy of the GNU General Public License
* * along with this program; if not, write to the Free Software
* * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
* *****************************************************************************
*/
package com.itszuvalex.femtocraft.industry.blocks
import java.util.Random
import com.itszuvalex.femtocraft.Femtocraft
import com.itszuvalex.femtocraft.core.blocks.TileContainer
import com.itszuvalex.femtocraft.core.traits.block.{DroppableInventory, RotateOnPlace}
import com.itszuvalex.femtocraft.industry.tiles.TileEntityFemtoRepurposer
import com.itszuvalex.femtocraft.render.RenderSimpleMachine
import com.itszuvalex.femtocraft.utils.FemtocraftUtils
import cpw.mods.fml.relauncher.{Side, SideOnly}
import net.minecraft.block.Block
import net.minecraft.block.material.Material
import net.minecraft.client.renderer.texture.IIconRegister
import net.minecraft.inventory.{Container, IInventory}
import net.minecraft.util.IIcon
import net.minecraft.world.World
class BlockFemtoRepurposer extends TileContainer(Material.iron) with DroppableInventory with RotateOnPlace {
/**
* Is the random generator used by furnace to drop the inventory contents in
* random directions.
*/
private val rand = new Random
private var frontIcon: IIcon = null
setBlockName("BlockFemtoRepurposer")
setHardness(3.5f)
setStepSound(Block.soundTypeMetal)
setCreativeTab(Femtocraft.femtocraftTab)
override def renderAsNormalBlock = false
override def getRenderType = RenderSimpleMachine.renderID
@SideOnly(Side.CLIENT) override def getIcon(par1: Int, par2: Int) = if (par1 == par2) frontIcon else blockIcon
/**
* If this returns true, then comparators facing away from this block will
* use the value from getComparatorInputOverride instead of the actual
* redstone signal strength.
*/
override def hasComparatorInputOverride = true
/**
* If hasComparatorInputOverride returns true, the return value from this is
* used instead of the redstone signal strength when this block inputs to a
* comparator.
*/
override def getComparatorInputOverride(par1World: World, par2: Int, par3: Int, par4: Int, par5: Int) = Container.calcRedstoneFromInventory(par1World.getTileEntity(par2, par3, par4).asInstanceOf[IInventory])
@SideOnly(Side.CLIENT) override def registerBlockIcons(par1IconRegister: IIconRegister) {
blockIcon = par1IconRegister.registerIcon(Femtocraft.ID.toLowerCase + ":" + "FemtoMachineBlock_side")
frontIcon = par1IconRegister.registerIcon(Femtocraft.ID.toLowerCase + ":" + "FemtoRepurposer_front")
}
/**
* Returns a new instance of a block's tile entity class. Called on placing
* the block.
*/
override def createNewTileEntity(par1World: World, metadata: Int) = new TileEntityFemtoRepurposer
/**
* ejects contained items into the world, and notifies neighbours of an
* update, as appropriate
*/
override def breakBlock(world: World, x: Int, y: Int, z: Int, block: Block, metadata: Int) {
world.getTileEntity(x, y, z) match {
case te: TileEntityFemtoRepurposer if te.isWorking => FemtocraftUtils.dropItem(te.deconstructingStack, world, x, y, z, rand)
case _ =>
}
world.func_147453_f(x, y, z, block)
super.breakBlock(world, x, y, z, block, metadata)
}
}
|
Itszuvalex/Femtocraft-alpha-1
|
src/main/java/com/itszuvalex/femtocraft/industry/blocks/BlockFemtoRepurposer.scala
|
Scala
|
gpl-2.0
| 4,148
|
package worker
import scala.concurrent.duration._
import java.io.Serializable
import java.io.ByteArrayInputStream
import java.io.ByteArrayOutputStream
import java.io.ObjectInputStream
import java.io.ObjectOutputStream
import com.sandinh.paho.akka._
import com.sandinh.paho.akka.MqttPubSub._
object MqttConfig {
val topic = "akka-iot-mqtt-topic"
// Pub-Sub config
val psConfig = PSConfig(
brokerUrl = "tcp://test.mosquitto.org:1883",
userName = null,
password = null,
stashTimeToLive = 1.minute,
stashCapacity = 8000,
reconnectDelayMin = 10.millis,
reconnectDelayMax = 30.seconds,
cleanSession = false
)
// Serialize object to byte array
def writeToByteArray(obj: Any): Array[Byte] = {
val baos = new ByteArrayOutputStream
val oos = new ObjectOutputStream(baos)
try {
oos.writeObject(obj)
baos.toByteArray
} finally {
try {
oos.close
} catch {
case _: Throwable => // Do nothing
}
}
}
// Deserialize object from byte array
def readFromByteArray[A](bytes: Array[Byte]): A = {
val bais = new ByteArrayInputStream(bytes)
val ois = new ObjectInputStream(bais)
try {
val obj = ois.readObject
obj.asInstanceOf[A]
} finally {
try {
ois.close
} catch {
case _: Throwable => // Do nothing
}
}
}
}
|
oel/akka-iot-mqtt
|
src/main/scala/worker/MqttConfig.scala
|
Scala
|
lgpl-3.0
| 1,378
|
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.bigquery
import java.math.MathContext
import java.nio.ByteBuffer
import com.google.api.services.bigquery.model.{
TableReference => GTableReference,
TableRow => GTableRow,
TimePartitioning => GTimePartitioning
}
import com.spotify.scio.ScioContext
import com.spotify.scio.bigquery.client.BigQuery
import com.spotify.scio.values.SCollection
import org.apache.avro.Conversions.DecimalConversion
import org.apache.avro.LogicalTypes
import org.apache.beam.sdk.io.gcp.bigquery.{BigQueryHelpers, BigQueryInsertError, WriteResult}
import org.joda.time._
import org.joda.time.format.{DateTimeFormat, DateTimeFormatterBuilder}
sealed trait Source
/** A wrapper type [[Query]] which wraps a SQL String. */
final case class Query(underlying: String) extends Source {
/**
* A helper method to replace the "$LATEST" placeholder in query to the latest common partition.
* For example:
* {{{
* @BigQueryType.fromQuery("SELECT ... FROM `project.data.foo_%s`", "$LATEST")
* class Foo
*
* val bq: BigQuery = BigQuery.defaultInstance()
* scioContext.bigQuerySelect(Foo.queryAsSource("$LATEST").latest(bq))
* }}}
*
* Or, if your query string is a dynamic value which uses a "$LATEST" placeholder,
* {{{
* val dynamicSQLStr = "SELECT ... FROM some_table_$LATEST"
* scioContext.bigQuerySelect(Query(dynamicSQLStr).latest(bq))
* }}}
*
* @param bq [[BigQuery]] client
* @return [[Query]] with "$LATEST" replaced
*/
def latest(bq: BigQuery): Query =
Query(BigQueryPartitionUtil.latestQuery(bq, underlying))
def latest(): Query = latest(BigQuery.defaultInstance())
}
/**
* [[Table]] abstracts the multiple ways of referencing Bigquery tables.
* Tables can be referenced by a table spec `String` or by a table reference [[GTableReference]].
*
* Example:
* {{{
* val table = Table.Spec("bigquery-public-data:samples.shakespeare")
* sc.bigQueryTable(table)
* .filter(r => "hamlet".equals(r.getString("corpus")) && "Polonius".equals(r.getString("word")))
* .saveAsTextFile("./output.txt")
* sc.run()
* }}}
*
* Or create a [[Table]] from a [[GTableReference]]:
* {{{
* val tableReference = new TableReference
* tableReference.setProjectId("bigquery-public-data")
* tableReference.setDatasetId("samples")
* tableReference.setTableId("shakespeare")
* val table = Table.Ref(tableReference)
* }}}
*
* A helper method is provided to replace the "$LATEST" placeholder in the table name
* to the latest common partition.
* {{{
* val table = Table.Spec("some_project:some_data.some_table_$LATEST").latest()
* }}}
*/
sealed trait Table extends Source {
def spec: String
def ref: GTableReference
def latest(bg: BigQuery): Table
def latest(): Table
}
object Table {
final case class Ref(ref: GTableReference) extends Table {
override lazy val spec: String = BigQueryHelpers.toTableSpec(ref)
def latest(bq: BigQuery): Ref =
Ref(Spec(spec).latest(bq).ref)
def latest(): Ref = latest(BigQuery.defaultInstance())
}
final case class Spec(spec: String) extends Table {
override val ref: GTableReference = BigQueryHelpers.parseTableSpec(spec)
def latest(bq: BigQuery): Spec =
Spec(BigQueryPartitionUtil.latestTable(bq, spec))
def latest(): Spec = latest(BigQuery.defaultInstance())
}
}
sealed trait ExtendedErrorInfo {
type Info
private[scio] def coll(sc: ScioContext, wr: WriteResult): SCollection[Info]
}
object ExtendedErrorInfo {
final case object Enabled extends ExtendedErrorInfo {
override type Info = BigQueryInsertError
override private[scio] def coll(sc: ScioContext, wr: WriteResult): SCollection[Info] =
sc.wrap(wr.getFailedInsertsWithErr())
}
final case object Disabled extends ExtendedErrorInfo {
override type Info = TableRow
override private[scio] def coll(sc: ScioContext, wr: WriteResult): SCollection[Info] =
sc.wrap(wr.getFailedInserts())
}
}
/**
* Create a [[TableRow]] with `Map`-like syntax. For example:
*
* {{{
* val r = TableRow("name" -> "Alice", "score" -> 100)
* }}}
*/
object TableRow {
@inline def apply(fields: (String, _)*): TableRow =
fields.foldLeft(new GTableRow())((r, kv) => r.set(kv._1, kv._2))
}
/** Utility for BigQuery `TIMESTAMP` type. */
object Timestamp {
// YYYY-[M]M-[D]D[( |T)[H]H:[M]M:[S]S[.DDDDDD]][time zone]
private[this] val Formatter =
DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSSSS ZZZ")
private[this] val Parser = new DateTimeFormatterBuilder()
.append(DateTimeFormat.forPattern("yyyy-MM-dd"))
.appendOptional(
new DateTimeFormatterBuilder()
.append(DateTimeFormat.forPattern(" HH:mm:ss").getParser)
.appendOptional(DateTimeFormat.forPattern(".SSSSSS").getParser)
.toParser
)
.appendOptional(
new DateTimeFormatterBuilder()
.append(DateTimeFormat.forPattern("'T'HH:mm:ss").getParser)
.appendOptional(DateTimeFormat.forPattern(".SSSSSS").getParser)
.toParser
)
.appendOptional(
new DateTimeFormatterBuilder()
.append(null, Array(" ZZZ", "ZZ").map(p => DateTimeFormat.forPattern(p).getParser))
.toParser
)
.toFormatter
.withZoneUTC()
/** Convert `Instant` to BigQuery `TIMESTAMP` string. */
def apply(instant: Instant): String = Formatter.print(instant)
/** Convert millisecond instant to BigQuery `TIMESTAMP` string. */
def apply(instant: Long): String = Formatter.print(instant)
/** Convert BigQuery `TIMESTAMP` string to `Instant`. */
def parse(timestamp: String): Instant =
Parser.parseDateTime(timestamp).toInstant
// For BigQueryType macros only, do not use directly
def parse(timestamp: Any): Instant = timestamp match {
case t: Long => new Instant(t / 1000)
case _ => parse(timestamp.toString)
}
}
/** Utility for BigQuery `DATE` type. */
object Date {
// YYYY-[M]M-[D]D
private[this] val Formatter =
DateTimeFormat.forPattern("yyyy-MM-dd").withZoneUTC()
/** Convert `LocalDate` to BigQuery `DATE` string. */
def apply(date: LocalDate): String = Formatter.print(date)
/** Convert BigQuery `DATE` string to `LocalDate`. */
def parse(date: String): LocalDate = LocalDate.parse(date, Formatter)
// For BigQueryType macros only, do not use directly
def parse(date: Any): LocalDate = date match {
case d: Int => new LocalDate(0, DateTimeZone.UTC).plusDays(d)
case _ => parse(date.toString)
}
}
/** Utility for BigQuery `TIME` type. */
object Time {
// [H]H:[M]M:[S]S[.DDDDDD]
private[this] val Formatter =
DateTimeFormat.forPattern("HH:mm:ss.SSSSSS").withZoneUTC()
private[this] val Parser = new DateTimeFormatterBuilder()
.append(DateTimeFormat.forPattern("HH:mm:ss").getParser)
.appendOptional(DateTimeFormat.forPattern(".SSSSSS").getParser)
.toFormatter
.withZoneUTC()
/** Convert `LocalTime` to BigQuery `TIME` string. */
def apply(time: LocalTime): String = Formatter.print(time)
/** Convert BigQuery `TIME` string to `LocalTime`. */
def parse(time: String): LocalTime = Parser.parseLocalTime(time)
// For BigQueryType macros only, do not use directly
def parse(time: Any): LocalTime = time match {
case t: Long => new LocalTime(t / 1000, DateTimeZone.UTC)
case _ => parse(time.toString)
}
}
/** Utility for BigQuery `DATETIME` type. */
object DateTime {
// YYYY-[M]M-[D]D[( |T)[H]H:[M]M:[S]S[.DDDDDD]]
private[this] val Formatter =
DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSSSS")
private[this] val Parser = new DateTimeFormatterBuilder()
.append(DateTimeFormat.forPattern("yyyy-MM-dd"))
.appendOptional(
new DateTimeFormatterBuilder()
.append(DateTimeFormat.forPattern(" HH:mm:ss").getParser)
.appendOptional(DateTimeFormat.forPattern(".SSSSSS").getParser)
.toParser
)
.appendOptional(
new DateTimeFormatterBuilder()
.append(DateTimeFormat.forPattern("'T'HH:mm:ss").getParser)
.appendOptional(DateTimeFormat.forPattern(".SSSSSS").getParser)
.toParser
)
.toFormatter
.withZoneUTC()
/** Convert `LocalDateTime` to BigQuery `DATETIME` string. */
def apply(datetime: LocalDateTime): String = Formatter.print(datetime)
/** Convert BigQuery `DATETIME` string to `LocalDateTime`. */
def parse(datetime: String): LocalDateTime =
Parser.parseLocalDateTime(datetime)
}
/** Scala wrapper for [[com.google.api.services.bigquery.model.TimePartitioning]]. */
case class TimePartitioning(
`type`: String,
field: String = null,
expirationMs: Long = 0,
requirePartitionFilter: Boolean = false
) {
def asJava: GTimePartitioning = {
var p = new GTimePartitioning()
.setType(`type`)
.setRequirePartitionFilter(requirePartitionFilter)
if (field != null) p = p.setField(field)
if (expirationMs > 0) p = p.setExpirationMs(expirationMs)
p
}
}
object Numeric {
val MaxNumericPrecision = 38
val MaxNumericScale = 9
private[this] val DecimalConverter = new DecimalConversion
private[this] val DecimalLogicalType = LogicalTypes.decimal(MaxNumericPrecision, MaxNumericScale)
def apply(value: String): BigDecimal = apply(BigDecimal(value))
def apply(value: BigDecimal): BigDecimal = {
// NUMERIC's max scale is 9, precision is 38
val scaled = if (value.scale > MaxNumericScale) {
value.setScale(MaxNumericScale, scala.math.BigDecimal.RoundingMode.HALF_UP)
} else {
value
}
require(
scaled.precision <= MaxNumericPrecision,
s"max allowed precision is $MaxNumericPrecision"
)
BigDecimal(scaled.toString, new MathContext(MaxNumericPrecision))
}
// For BigQueryType macros only, do not use directly
def parse(value: Any): BigDecimal = value match {
case b: ByteBuffer => DecimalConverter.fromBytes(b, null, DecimalLogicalType)
case _ => apply(value.toString)
}
}
|
regadas/scio
|
scio-google-cloud-platform/src/main/scala/com/spotify/scio/bigquery/BigQueryTypes.scala
|
Scala
|
apache-2.0
| 10,568
|
package com.twitter.finagle.httpx.filter
import com.twitter.finagle.{Filter, Service}
import com.twitter.finagle.httpx.{Request, Response, Status, Method}
import com.twitter.util.{Duration, Future}
/** Implements http://www.w3.org/TR/cors/ */
object Cors {
/**
* A Cross-Origin Resource Sharing policy.
*
* A Policy determines how CORS response headers are set in response to a request with
* CORS headers:
*
* allowsOrigin is a function that takes the value specified in the Origin request header
* and optionally returns the value of Access-Control-Allow-Origin.
*
* allowsMethods is a function that takes the value of the Access-Control-Request-Method
* preflight request header and optionally returns a list of methods to be set in the
* Access-Control-Allow-Methods response header.
*
* allowsHeaders is a function that takes the values set in the Access-Control-Request-Headers
* preflight request header and returns the header names to be set in the Access-Control-Allow-
* Headers response header.
*
* exposedHeaders is the list of header names to be set in the Access-Control-Expose-Headers
* response header (in response to non-preflight requests).
*
* If supportsCredentials is true and allowsOrigin does not return '*', the Access-Control-
* Allow-Credentials resopnse header will be set to 'true'.
*
* If maxAge is defined, its value (in seconds) will be set in the Access-Control-Max-Age
* response header.
*/
case class Policy(
allowsOrigin: String => Option[String],
allowsMethods: String => Option[Seq[String]],
allowsHeaders: Seq[String] => Option[Seq[String]],
exposedHeaders: Seq[String] = Seq.empty,
supportsCredentials: Boolean = false,
maxAge: Option[Duration] = None)
/** A CORS policy that lets you do whatever you want. Don't use this in production. */
val UnsafePermissivePolicy: Policy = Policy(
allowsOrigin = { origin => Some(origin) },
allowsMethods = { method => Some(method :: Nil) },
allowsHeaders = { headers => Some(headers) },
supportsCredentials = true)
/**
* An HTTP filter that handles preflight (OPTIONS) requests and sets CORS response headers
* as described in the W3C CORS spec.
*/
class HttpFilter(policy: Policy)
extends Filter[Request, Response, Request, Response] {
/*
* Simple Cross-Origin Request, Actual Request, and Redirects
*/
protected[this] def getOrigin(request: Request): Option[String] = {
/*
* If the Origin header is not present terminate this set of steps. The request is outside
* the scope of this specification.
*/
Option(request.headers.get("Origin")) flatMap { origin =>
/*
* If the value of the Origin header is not a case-sensitive match for any of the values
* in list of origins, do not set any additional headers and terminate this set of steps.
*/
policy.allowsOrigin(origin)
}
}
/**
* If the resource supports credentials add a single Access-Control-Allow-Origin
* header, with the value of the Origin header as value, and add a single
* Access-Control-Allow-Credentials header with the case-sensitive string "true" as
* value.
*
* Otherwise, add a single Access-Control-Allow-Origin header, with either the value
* of the Origin header or the string "*" as value.
*
* n.b. The string "*" cannot be used for a resource that supports credentials.
*/
protected[this] def setOriginAndCredentials(response: Response, origin: String): Response = {
response.headers.add("Access-Control-Allow-Origin", origin)
if (policy.supportsCredentials && origin != "*") {
response.headers.add("Access-Control-Allow-Credentials", "true")
}
response
}
/**
* Resources that wish to enable themselves to be shared with multiple Origins but do not
* respond uniformly with "*" must in practice generate the Access-Control-Allow-Origin header
* dynamically in response to every request they wish to allow. As a consequence, authors of
* such resources should send a Vary: Origin HTTP header or provide other appropriate control
* directives to prevent caching of such responses, which may be inaccurate if re-used across-
* origins.
*/
def setVary(response: Response): Response = {
response.headers.set("Vary", "Origin")
response
}
/**
* If the list of exposed headers is not empty add one or more Access-Control-Expose-
* Headers headers, with as values the header field names given in the list of exposed
* headers.
*
* By not adding the appropriate headers resource can also clear the preflight result
* cache of all entries where origin is a case-sensitive match for the value of the
* Origin header and url is a case-sensitive match for the URL of the resource.
*/
protected[this] def addExposedHeaders(response: Response): Response = {
if (policy.exposedHeaders.nonEmpty) {
response.headers.add(
"Access-Control-Expose-Headers", policy.exposedHeaders.mkString(", "))
}
response
}
/** http://www.w3.org/TR/cors/#resource-requests */
protected[this] def handleSimple(request: Request, response: Response): Response =
getOrigin(request) map {
setOriginAndCredentials(response, _)
} map {
addExposedHeaders(_)
} getOrElse response
/*
* Preflight (OPTIONS) requests
*/
protected[this] object Preflight {
def unapply(request: Request): Boolean =
request.method == Method.Options
}
/** Let method be the value as result of parsing the Access-Control-Request-Method header. */
protected[this] def getMethod(request: Request): Option[String] =
Option(request.headers.get("Access-Control-Request-Method"))
/**
* If method is a simple method this step may be skipped.
*
* Add one or more Access-Control-Allow-Methods headers consisting of (a subset of) the list of
* methods.
*/
protected[this] def setMethod(response: Response, methods: Seq[String]): Response = {
response.headers.set("Access-Control-Allow-Methods", methods.mkString(", "))
response
}
/**
* Optionally add a single Access-Control-Max-Age header with as value the amount of seconds
* the user agent is allowed to cache the result of the request.
*/
protected[this] def setMaxAge(response: Response): Response = {
policy.maxAge foreach { maxAge =>
response.headers.add("Access-Control-Max-Age", maxAge.inSeconds.toString)
}
response
}
private[this] val commaSpace = ", *".r
/**
* Let header field-names be the values as result of parsing the
* Access-Control-Request-Headers headers. If there are no Access-Control-Request-Headers
* headers let header field-names be the empty list.
*/
protected[this] def getHeaders(request: Request): Seq[String] =
Option(request.headers.get("Access-Control-Request-Headers")) map {
commaSpace.split(_).toSeq
} getOrElse List.empty[String]
/**
* If each of the header field-names is a simple header and none is Content-Type, than this step
* may be skipped.
*
* Add one or more Access-Control-Allow-Headers headers consisting of (a subset of) the list of
* headers.
*/
protected[this] def setHeaders(response: Response, headers: Seq[String]): Response = {
if (headers.nonEmpty) {
response.headers.set("Access-Control-Allow-Headers", headers.mkString(", "))
}
response
}
/** http://www.w3.org/TR/cors/#resource-preflight-requests */
protected[this] def handlePreflight(request: Request): Option[Response] =
getOrigin(request) flatMap { origin =>
getMethod(request) flatMap { method =>
val headers = getHeaders(request)
policy.allowsMethods(method) flatMap { allowedMethods =>
policy.allowsHeaders(headers) map { allowedHeaders =>
setHeaders(
setMethod(
setMaxAge(
setOriginAndCredentials(request.response, origin)),
allowedMethods),
allowedHeaders)
}
}
}
}
/**
* Fully handle preflight requests. If a preflight request is deemed to be unacceptable,
* a 200 OK response is served without CORS headers.
*
* Adds CORS response headers onto all non-preflight requests that have the 'Origin' header
* set to a value that is allowed by the Policy.
*/
def apply(request: Request, service: Service[Request, Response]): Future[Response] = {
val response = request match {
case Preflight() => Future {
// If preflight is not acceptable, just return a 200 without CORS headers
handlePreflight(request) getOrElse request.response
}
case _ => service(request) map { handleSimple(request, _) }
}
response map { setVary(_) }
}
}
}
/**
* Adds headers to support Cross-origin resource sharing.
*
* This is here for backwards compatibility. You should probably use Cors.HttpFilter directly.
*/
object CorsFilter {
private[this] val sep = ", *".r
def apply(origin: String = "*",
methods: String = "GET",
headers: String = "x-requested-with",
exposes: String = ""): Filter[Request, Response, Request, Response] = {
val methodList = Some(sep.split(methods).toSeq)
val headerList = Some(sep.split(headers).toSeq)
val exposeList = sep.split(exposes).toSeq
new Cors.HttpFilter(Cors.Policy(
{ _ => Some(origin) },
{ _ => methodList },
{ _ => headerList },
exposeList))
}
}
|
kristofa/finagle
|
finagle-httpx/src/main/scala/com/twitter/finagle/httpx/filter/Cors.scala
|
Scala
|
apache-2.0
| 9,907
|
/*
* Copyright 2012-2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda.basic
import com.netflix.edda.aws.AwsBeanMapper
import com.netflix.edda.aws.AwsCollectionBuilder
import com.netflix.edda.aws.AwsClient
import com.netflix.edda.CollectionManager
import com.netflix.edda.Datastore
import com.netflix.edda.Elector
import com.netflix.edda.Utils
import com.netflix.edda.RequestId
import javax.servlet.http.HttpServlet
import org.slf4j.LoggerFactory
/** simple servlet that specifies the datastores being used and creates
* accessors to initialize the AWS client credentials and start the collections.
* It is recommended to create a separate Servlet if behavior changes are required
* for special collections or datastores
*/
class BasicServer extends HttpServlet {
private[this] val logger = LoggerFactory.getLogger(getClass)
implicit val req = RequestId("basicServer")
override def init() {
Utils.initConfiguration(System.getProperty("edda.properties","edda.properties"))
logger.info(s"$req Staring Server")
val electorClassName = Utils.getProperty("edda", "elector.class", "", "com.netflix.edda.mongo.MongoElector").get
val electorClass = this.getClass.getClassLoader.loadClass(electorClassName)
val elector = electorClass.newInstance.asInstanceOf[Elector]
val bm = new BasicBeanMapper with AwsBeanMapper
val awsClientFactory = (account: String) => new AwsClient(account)
AwsCollectionBuilder.buildAll(BasicContext, awsClientFactory, bm, elector)
if (logger.isInfoEnabled) logger.info(s"$req Starting Collections")
CollectionManager.start()
super.init()
}
override def destroy() {
CollectionManager.stop()
super.destroy()
}
}
|
wstrucke/edda
|
src/main/scala/com/netflix/edda/basic/BasicServer.scala
|
Scala
|
apache-2.0
| 2,270
|
package sbt.jvmcommand
import sbt.{Plugin, Command, Keys}
import Keys._
import sbt.complete.DefaultParsers._
import java.util.Date
object JvmCommandPlugin extends Plugin {
override lazy val settings = Seq(commands += myCommand)
val SetProperty = "setProperty"
val GetProperty = "getProperty"
val GetProperties = "getProperties"
val Gc = "gc"
val CurrentTime = "currentTime"
val GetEnv = "getenv"
val MemoryStats = "memoryStats"
val AvailableProcessors = "availableProcessors"
val parser = (Space ~> SetProperty ~ ((Space ~> NotSpace ) ~ (Space ~> NotSpace))) |
(Space ~> GetProperty ~ (Space ~> NotSpace)) |
(Space ~> GetProperties) |
(Space ~> GetEnv) |
(Space ~> Gc) |
(Space ~> CurrentTime) |
(Space ~> MemoryStats) |
(Space ~> AvailableProcessors)
lazy val myCommand =
Command("system")( _ => parser) { (st, args) =>
val runtime = Runtime.getRuntime
args match {
case (`SetProperty`,(prop : String, value : String)) =>
st.log.info( "Setting " + prop + " = " + value )
System.setProperty(prop, value)
case (`GetProperty`,key : String) =>
st.log.info( key + " = " + System.getProperty(key))
case `GetProperties` =>
st.log.info( System.getProperties.toString )
case `Gc` =>
st.log.info("Running garbage collection")
System.gc()
case `CurrentTime` =>
st.log.info(new Date().toString + " (currentTimeMillis = " + System.currentTimeMillis() + ")")
case `GetEnv` =>
st.log.info(System.getenv().toString)
case `MemoryStats` =>
st.log.info("Memory")
st.log.info("\ttotal = " + FileSize(runtime.totalMemory()))
st.log.info("\tfree = " + FileSize(runtime.freeMemory()))
st.log.info("\tused = " + FileSize(runtime.totalMemory - runtime.freeMemory()))
st.log.info("\tmax = " + FileSize(runtime.maxMemory()))
case `AvailableProcessors` =>
st.log.info("Available processors = " + runtime.availableProcessors())
}
st
}
case class FileSize(bytes : Long) {
override def toString = {
val symbol = FileSize.symbols.find { s =>
bytes < (1L << (s._1 + 10)) &&
bytes >= (1L << (s._1) )
}.getOrElse( (60, "EB") )
val size = bytes.toDouble / ( 1L << symbol._1 )
"%.2f %s".format(size, symbol._2)
}
}
object FileSize {
val symbols = Map( 0 -> "B", 10 -> "KB", 20 -> "MB",
30 -> "GB", 40 -> "TB", 50 -> "PB" )
}
}
|
backuitist/jvm-command
|
src/main/scala/sbt/jvmcommand/JvmCommandPlugin.scala
|
Scala
|
apache-2.0
| 2,674
|
import scala.collection.mutable.Queue
object Solution extends App{
val in = io.Source.stdin.getLines()
val nodes = in.next.toInt
val tc = new TreeConstructor(new Tree(1, None, None))
(1 to nodes).foreach(_ => {
val node = in.next.split(" ").map(_.toInt)
tc.addChildren(node(0), node(1))
})
val t = tc.root
val swaps = in.next.toInt
(1 to swaps).foreach(_ => {
val level = in.next.toInt
t.swapNLevelsDown(level, 1)
println(t.inorder().mkString(" "))
})
}
class Tree(val root: Int, var left: Option[Tree], var right: Option[Tree]){
def swap(): Unit = {
val temp = this.left
this.left = this.right
this.right = temp
}
def swapNLevelsDown(n: Int, currLevel: Int): Unit = {
if(currLevel % n == 0) this.swap
for{toSwap <- this.left} toSwap.swapNLevelsDown(n, currLevel + 1)
for{toSwap <- this.right} toSwap.swapNLevelsDown(n, currLevel + 1)
}
def inorder(): List[Int] = {
val leftRec: List[Int] = if(this.left == None){
Nil
}else{
this.left.get.inorder()
}
val rightRec: List[Int] = if(this.right == None){
Nil
}else{
this.right.get.inorder()
}
(leftRec :+ this.root) ++ rightRec
}
}
class TreeConstructor(val root: Tree){
val insertionQueue = Queue(root)
def addChildren(l: Int, r: Int) = {
val nextParent = insertionQueue.dequeue()
nextParent.left = if(l == -1) None else Some(new Tree(l, None, None))
nextParent.right = if(r == -1) None else Some(new Tree(r, None, None))
for {toAdd <- nextParent.left} insertionQueue.enqueue(toAdd)
for {toAdd <- nextParent.right} insertionQueue.enqueue(toAdd)
}
}
|
clemus90/competitive-programming
|
hackerRank/FunctionalProgramming/SwapNodes.scala
|
Scala
|
mit
| 1,659
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import java.util.Locale
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.mutable
import scala.util.control.NonFatal
import org.apache.spark.broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.catalyst.expressions.codegen.Block._
import org.apache.spark.sql.catalyst.plans.QueryPlan
import org.apache.spark.sql.catalyst.plans.physical.Partitioning
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution.aggregate.HashAggregateExec
import org.apache.spark.sql.execution.columnar.InMemoryTableScanExec
import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, SortMergeJoinExec}
import org.apache.spark.sql.execution.metric.SQLMetrics
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.sql.vectorized.ColumnarBatch
import org.apache.spark.util.Utils
/**
* An interface for those physical operators that support codegen.
*/
trait CodegenSupport extends SparkPlan {
/** Prefix used in the current operator's variable names. */
private def variablePrefix: String = this match {
case _: HashAggregateExec => "agg"
case _: BroadcastHashJoinExec => "bhj"
case _: SortMergeJoinExec => "smj"
case _: RDDScanExec => "rdd"
case _: DataSourceScanExec => "scan"
case _: InMemoryTableScanExec => "memoryScan"
case _ => nodeName.toLowerCase(Locale.ROOT)
}
/**
* Creates a metric using the specified name.
*
* @return name of the variable representing the metric
*/
def metricTerm(ctx: CodegenContext, name: String): String = {
ctx.addReferenceObj(name, longMetric(name))
}
/**
* Whether this SparkPlan supports whole stage codegen or not.
*/
def supportCodegen: Boolean = true
/**
* Which SparkPlan is calling produce() of this one. It's itself for the first SparkPlan.
*/
protected var parent: CodegenSupport = null
/**
* Returns all the RDDs of InternalRow which generates the input rows.
*
* @note Right now we support up to two RDDs
*/
def inputRDDs(): Seq[RDD[InternalRow]]
/**
* Returns Java source code to process the rows from input RDD.
*/
final def produce(ctx: CodegenContext, parent: CodegenSupport): String = executeQuery {
this.parent = parent
ctx.freshNamePrefix = variablePrefix
s"""
|${ctx.registerComment(s"PRODUCE: ${this.simpleString(SQLConf.get.maxToStringFields)}")}
|${doProduce(ctx)}
""".stripMargin
}
/**
* Generate the Java source code to process, should be overridden by subclass to support codegen.
*
* doProduce() usually generate the framework, for example, aggregation could generate this:
*
* if (!initialized) {
* # create a hash map, then build the aggregation hash map
* # call child.produce()
* initialized = true;
* }
* while (hashmap.hasNext()) {
* row = hashmap.next();
* # build the aggregation results
* # create variables for results
* # call consume(), which will call parent.doConsume()
* if (shouldStop()) return;
* }
*/
protected def doProduce(ctx: CodegenContext): String
private def prepareRowVar(ctx: CodegenContext, row: String, colVars: Seq[ExprCode]): ExprCode = {
if (row != null) {
ExprCode.forNonNullValue(JavaCode.variable(row, classOf[UnsafeRow]))
} else {
if (colVars.nonEmpty) {
val colExprs = output.zipWithIndex.map { case (attr, i) =>
BoundReference(i, attr.dataType, attr.nullable)
}
val evaluateInputs = evaluateVariables(colVars)
// generate the code to create a UnsafeRow
ctx.INPUT_ROW = row
ctx.currentVars = colVars
val ev = GenerateUnsafeProjection.createCode(ctx, colExprs, false)
val code = code"""
|$evaluateInputs
|${ev.code}
""".stripMargin
ExprCode(code, FalseLiteral, ev.value)
} else {
// There are no columns
ExprCode.forNonNullValue(JavaCode.variable("unsafeRow", classOf[UnsafeRow]))
}
}
}
/**
* Consume the generated columns or row from current SparkPlan, call its parent's `doConsume()`.
*
* Note that `outputVars` and `row` can't both be null.
*/
final def consume(ctx: CodegenContext, outputVars: Seq[ExprCode], row: String = null): String = {
val inputVarsCandidate =
if (outputVars != null) {
assert(outputVars.length == output.length)
// outputVars will be used to generate the code for UnsafeRow, so we should copy them
outputVars.map(_.copy())
} else {
assert(row != null, "outputVars and row cannot both be null.")
ctx.currentVars = null
ctx.INPUT_ROW = row
output.zipWithIndex.map { case (attr, i) =>
BoundReference(i, attr.dataType, attr.nullable).genCode(ctx)
}
}
val inputVars = inputVarsCandidate match {
case stream: Stream[ExprCode] => stream.force
case other => other
}
val rowVar = prepareRowVar(ctx, row, outputVars)
// Set up the `currentVars` in the codegen context, as we generate the code of `inputVars`
// before calling `parent.doConsume`. We can't set up `INPUT_ROW`, because parent needs to
// generate code of `rowVar` manually.
ctx.currentVars = inputVars
ctx.INPUT_ROW = null
ctx.freshNamePrefix = parent.variablePrefix
val evaluated = evaluateRequiredVariables(output, inputVars, parent.usedInputs)
// Under certain conditions, we can put the logic to consume the rows of this operator into
// another function. So we can prevent a generated function too long to be optimized by JIT.
// The conditions:
// 1. The config "spark.sql.codegen.splitConsumeFuncByOperator" is enabled.
// 2. `inputVars` are all materialized. That is guaranteed to be true if the parent plan uses
// all variables in output (see `requireAllOutput`).
// 3. The number of output variables must less than maximum number of parameters in Java method
// declaration.
val confEnabled = SQLConf.get.wholeStageSplitConsumeFuncByOperator
val requireAllOutput = output.forall(parent.usedInputs.contains(_))
val paramLength = CodeGenerator.calculateParamLength(output) + (if (row != null) 1 else 0)
val consumeFunc = if (confEnabled && requireAllOutput
&& CodeGenerator.isValidParamLength(paramLength)) {
constructDoConsumeFunction(ctx, inputVars, row)
} else {
parent.doConsume(ctx, inputVars, rowVar)
}
s"""
|${ctx.registerComment(s"CONSUME: ${parent.simpleString(SQLConf.get.maxToStringFields)}")}
|$evaluated
|$consumeFunc
""".stripMargin
}
/**
* To prevent concatenated function growing too long to be optimized by JIT. We can separate the
* parent's `doConsume` codes of a `CodegenSupport` operator into a function to call.
*/
private def constructDoConsumeFunction(
ctx: CodegenContext,
inputVars: Seq[ExprCode],
row: String): String = {
val (args, params, inputVarsInFunc) = constructConsumeParameters(ctx, output, inputVars, row)
val rowVar = prepareRowVar(ctx, row, inputVarsInFunc)
val doConsume = ctx.freshName("doConsume")
ctx.currentVars = inputVarsInFunc
ctx.INPUT_ROW = null
val doConsumeFuncName = ctx.addNewFunction(doConsume,
s"""
| private void $doConsume(${params.mkString(", ")}) throws java.io.IOException {
| ${parent.doConsume(ctx, inputVarsInFunc, rowVar)}
| }
""".stripMargin)
s"""
| $doConsumeFuncName(${args.mkString(", ")});
""".stripMargin
}
/**
* Returns arguments for calling method and method definition parameters of the consume function.
* And also returns the list of `ExprCode` for the parameters.
*/
private def constructConsumeParameters(
ctx: CodegenContext,
attributes: Seq[Attribute],
variables: Seq[ExprCode],
row: String): (Seq[String], Seq[String], Seq[ExprCode]) = {
val arguments = mutable.ArrayBuffer[String]()
val parameters = mutable.ArrayBuffer[String]()
val paramVars = mutable.ArrayBuffer[ExprCode]()
if (row != null) {
arguments += row
parameters += s"InternalRow $row"
}
variables.zipWithIndex.foreach { case (ev, i) =>
val paramName = ctx.freshName(s"expr_$i")
val paramType = CodeGenerator.javaType(attributes(i).dataType)
arguments += ev.value
parameters += s"$paramType $paramName"
val paramIsNull = if (!attributes(i).nullable) {
// Use constant `false` without passing `isNull` for non-nullable variable.
FalseLiteral
} else {
val isNull = ctx.freshName(s"exprIsNull_$i")
arguments += ev.isNull
parameters += s"boolean $isNull"
JavaCode.isNullVariable(isNull)
}
paramVars += ExprCode(paramIsNull, JavaCode.variable(paramName, attributes(i).dataType))
}
(arguments, parameters, paramVars)
}
/**
* Returns source code to evaluate all the variables, and clear the code of them, to prevent
* them to be evaluated twice.
*/
protected def evaluateVariables(variables: Seq[ExprCode]): String = {
val evaluate = variables.filter(_.code.nonEmpty).map(_.code.toString).mkString("\\n")
variables.foreach(_.code = EmptyBlock)
evaluate
}
/**
* Returns source code to evaluate the variables for required attributes, and clear the code
* of evaluated variables, to prevent them to be evaluated twice.
*/
protected def evaluateRequiredVariables(
attributes: Seq[Attribute],
variables: Seq[ExprCode],
required: AttributeSet): String = {
val evaluateVars = new StringBuilder
variables.zipWithIndex.foreach { case (ev, i) =>
if (ev.code.nonEmpty && required.contains(attributes(i))) {
evaluateVars.append(ev.code.toString + "\\n")
ev.code = EmptyBlock
}
}
evaluateVars.toString()
}
/**
* Returns source code to evaluate the variables for non-deterministic expressions, and clear the
* code of evaluated variables, to prevent them to be evaluated twice.
*/
protected def evaluateNondeterministicVariables(
attributes: Seq[Attribute],
variables: Seq[ExprCode],
expressions: Seq[NamedExpression]): String = {
val nondeterministicAttrs = expressions.filterNot(_.deterministic).map(_.toAttribute)
evaluateRequiredVariables(attributes, variables, AttributeSet(nondeterministicAttrs))
}
/**
* The subset of inputSet those should be evaluated before this plan.
*
* We will use this to insert some code to access those columns that are actually used by current
* plan before calling doConsume().
*/
def usedInputs: AttributeSet = references
/**
* Generate the Java source code to process the rows from child SparkPlan. This should only be
* called from `consume`.
*
* This should be override by subclass to support codegen.
*
* Note: The operator should not assume the existence of an outer processing loop,
* which it can jump from with "continue;"!
*
* For example, filter could generate this:
* # code to evaluate the predicate expression, result is isNull1 and value2
* if (!isNull1 && value2) {
* # call consume(), which will call parent.doConsume()
* }
*
* Note: A plan can either consume the rows as UnsafeRow (row), or a list of variables (input).
* When consuming as a listing of variables, the code to produce the input is already
* generated and `CodegenContext.currentVars` is already set. When consuming as UnsafeRow,
* implementations need to put `row.code` in the generated code and set
* `CodegenContext.INPUT_ROW` manually. Some plans may need more tweaks as they have
* different inputs(join build side, aggregate buffer, etc.), or other special cases.
*/
def doConsume(ctx: CodegenContext, input: Seq[ExprCode], row: ExprCode): String = {
throw new UnsupportedOperationException
}
/**
* Whether or not the result rows of this operator should be copied before putting into a buffer.
*
* If any operator inside WholeStageCodegen generate multiple rows from a single row (for
* example, Join), this should be true.
*
* If an operator starts a new pipeline, this should be false.
*/
def needCopyResult: Boolean = {
if (children.isEmpty) {
false
} else if (children.length == 1) {
children.head.asInstanceOf[CodegenSupport].needCopyResult
} else {
throw new UnsupportedOperationException
}
}
/**
* Whether or not the children of this operator should generate a stop check when consuming input
* rows. This is used to suppress shouldStop() in a loop of WholeStageCodegen.
*
* This should be false if an operator starts a new pipeline, which means it consumes all rows
* produced by children but doesn't output row to buffer by calling append(), so the children
* don't require shouldStop() in the loop of producing rows.
*/
def needStopCheck: Boolean = parent.needStopCheck
/**
* Helper default should stop check code.
*/
def shouldStopCheckCode: String = if (needStopCheck) {
"if (shouldStop()) return;"
} else {
"// shouldStop check is eliminated"
}
/**
* A sequence of checks which evaluate to true if the downstream Limit operators have not received
* enough records and reached the limit. If current node is a data producing node, it can leverage
* this information to stop producing data and complete the data flow earlier. Common data
* producing nodes are leaf nodes like Range and Scan, and blocking nodes like Sort and Aggregate.
* These checks should be put into the loop condition of the data producing loop.
*/
def limitNotReachedChecks: Seq[String] = parent.limitNotReachedChecks
/**
* Check if the node is supposed to produce limit not reached checks.
*/
protected def canCheckLimitNotReached: Boolean = children.isEmpty
/**
* A helper method to generate the data producing loop condition according to the
* limit-not-reached checks.
*/
final def limitNotReachedCond: String = {
if (!canCheckLimitNotReached) {
val errMsg = "Only leaf nodes and blocking nodes need to call 'limitNotReachedCond' " +
"in its data producing loop."
if (Utils.isTesting) {
throw new IllegalStateException(errMsg)
} else {
logWarning(s"[BUG] $errMsg Please open a JIRA ticket to report it.")
}
}
if (parent.limitNotReachedChecks.isEmpty) {
""
} else {
parent.limitNotReachedChecks.mkString("", " && ", " &&")
}
}
}
/**
* A special kind of operators which support whole stage codegen. Blocking means these operators
* will consume all the inputs first, before producing output. Typical blocking operators are
* sort and aggregate.
*/
trait BlockingOperatorWithCodegen extends CodegenSupport {
// Blocking operators usually have some kind of buffer to keep the data before producing them, so
// then don't to copy its result even if its child does.
override def needCopyResult: Boolean = false
// Blocking operators always consume all the input first, so its upstream operators don't need a
// stop check.
override def needStopCheck: Boolean = false
// Blocking operators need to consume all the inputs before producing any output. This means,
// Limit operator after this blocking operator will never reach its limit during the execution of
// this blocking operator's upstream operators. Here we override this method to return Nil, so
// that upstream operators will not generate useless conditions (which are always evaluated to
// false) for the Limit operators after this blocking operator.
override def limitNotReachedChecks: Seq[String] = Nil
// This is a blocking node so the node can produce these checks
override protected def canCheckLimitNotReached: Boolean = true
}
/**
* Leaf codegen node reading from a single RDD.
*/
trait InputRDDCodegen extends CodegenSupport {
def inputRDD: RDD[InternalRow]
// If the input can be InternalRows, an UnsafeProjection needs to be created.
protected val createUnsafeProjection: Boolean
override def inputRDDs(): Seq[RDD[InternalRow]] = {
inputRDD :: Nil
}
override def doProduce(ctx: CodegenContext): String = {
// Inline mutable state since an InputRDDCodegen is used once in a task for WholeStageCodegen
val input = ctx.addMutableState("scala.collection.Iterator", "input", v => s"$v = inputs[0];",
forceInline = true)
val row = ctx.freshName("row")
val outputVars = if (createUnsafeProjection) {
// creating the vars will make the parent consume add an unsafe projection.
ctx.INPUT_ROW = row
ctx.currentVars = null
output.zipWithIndex.map { case (a, i) =>
BoundReference(i, a.dataType, a.nullable).genCode(ctx)
}
} else {
null
}
val updateNumOutputRowsMetrics = if (metrics.contains("numOutputRows")) {
val numOutputRows = metricTerm(ctx, "numOutputRows")
s"$numOutputRows.add(1);"
} else {
""
}
s"""
| while ($limitNotReachedCond $input.hasNext()) {
| InternalRow $row = (InternalRow) $input.next();
| ${updateNumOutputRowsMetrics}
| ${consume(ctx, outputVars, if (createUnsafeProjection) null else row).trim}
| ${shouldStopCheckCode}
| }
""".stripMargin
}
}
/**
* InputAdapter is used to hide a SparkPlan from a subtree that supports codegen.
*
* This is the leaf node of a tree with WholeStageCodegen that is used to generate code
* that consumes an RDD iterator of InternalRow.
*/
case class InputAdapter(child: SparkPlan) extends UnaryExecNode with InputRDDCodegen {
override def output: Seq[Attribute] = child.output
override def outputPartitioning: Partitioning = child.outputPartitioning
override def outputOrdering: Seq[SortOrder] = child.outputOrdering
override def vectorTypes: Option[Seq[String]] = child.vectorTypes
// This is not strictly needed because the codegen transformation happens after the columnar
// transformation but just for consistency
override def supportsColumnar: Boolean = child.supportsColumnar
override def doExecute(): RDD[InternalRow] = {
child.execute()
}
override def doExecuteBroadcast[T](): broadcast.Broadcast[T] = {
child.doExecuteBroadcast()
}
override def doExecuteColumnar(): RDD[ColumnarBatch] = {
child.executeColumnar()
}
// `InputAdapter` can only generate code to process the rows from its child. If the child produces
// columnar batches, there must be a `ColumnarToRowExec` above `InputAdapter` to handle it by
// overriding `inputRDDs` and calling `InputAdapter#executeColumnar` directly.
override def inputRDD: RDD[InternalRow] = child.execute()
// This is a leaf node so the node can produce limit not reached checks.
override protected def canCheckLimitNotReached: Boolean = true
// InputAdapter does not need UnsafeProjection.
protected val createUnsafeProjection: Boolean = false
override def generateTreeString(
depth: Int,
lastChildren: Seq[Boolean],
append: String => Unit,
verbose: Boolean,
prefix: String = "",
addSuffix: Boolean = false,
maxFields: Int,
printNodeId: Boolean): Unit = {
child.generateTreeString(
depth,
lastChildren,
append,
verbose,
prefix = "",
addSuffix = false,
maxFields,
printNodeId)
}
override def needCopyResult: Boolean = false
}
object WholeStageCodegenExec {
val PIPELINE_DURATION_METRIC = "duration"
private def numOfNestedFields(dataType: DataType): Int = dataType match {
case dt: StructType => dt.fields.map(f => numOfNestedFields(f.dataType)).sum
case m: MapType => numOfNestedFields(m.keyType) + numOfNestedFields(m.valueType)
case a: ArrayType => numOfNestedFields(a.elementType)
case u: UserDefinedType[_] => numOfNestedFields(u.sqlType)
case _ => 1
}
def isTooManyFields(conf: SQLConf, dataType: DataType): Boolean = {
numOfNestedFields(dataType) > conf.wholeStageMaxNumFields
}
}
/**
* WholeStageCodegen compiles a subtree of plans that support codegen together into single Java
* function.
*
* Here is the call graph of to generate Java source (plan A supports codegen, but plan B does not):
*
* WholeStageCodegen Plan A FakeInput Plan B
* =========================================================================
*
* -> execute()
* |
* doExecute() ---------> inputRDDs() -------> inputRDDs() ------> execute()
* |
* +-----------------> produce()
* |
* doProduce() -------> produce()
* |
* doProduce()
* |
* doConsume() <--------- consume()
* |
* doConsume() <-------- consume()
*
* SparkPlan A should override `doProduce()` and `doConsume()`.
*
* `doCodeGen()` will create a `CodeGenContext`, which will hold a list of variables for input,
* used to generated code for [[BoundReference]].
*/
case class WholeStageCodegenExec(child: SparkPlan)(val codegenStageId: Int)
extends UnaryExecNode with CodegenSupport {
override def output: Seq[Attribute] = child.output
override def outputPartitioning: Partitioning = child.outputPartitioning
override def outputOrdering: Seq[SortOrder] = child.outputOrdering
// This is not strictly needed because the codegen transformation happens after the columnar
// transformation but just for consistency
override def supportsColumnar: Boolean = child.supportsColumnar
override lazy val metrics = Map(
"pipelineTime" -> SQLMetrics.createTimingMetric(sparkContext,
WholeStageCodegenExec.PIPELINE_DURATION_METRIC))
def generatedClassName(): String = if (conf.wholeStageUseIdInClassName) {
s"GeneratedIteratorForCodegenStage$codegenStageId"
} else {
"GeneratedIterator"
}
/**
* Generates code for this subtree.
*
* @return the tuple of the codegen context and the actual generated source.
*/
def doCodeGen(): (CodegenContext, CodeAndComment) = {
val ctx = new CodegenContext
val code = child.asInstanceOf[CodegenSupport].produce(ctx, this)
// main next function.
ctx.addNewFunction("processNext",
s"""
protected void processNext() throws java.io.IOException {
${code.trim}
}
""", inlineToOuterClass = true)
val className = generatedClassName()
val source = s"""
public Object generate(Object[] references) {
return new $className(references);
}
${ctx.registerComment(
s"""Codegend pipeline for stage (id=$codegenStageId)
|${this.treeString.trim}""".stripMargin,
"wsc_codegenPipeline")}
${ctx.registerComment(s"codegenStageId=$codegenStageId", "wsc_codegenStageId", true)}
final class $className extends ${classOf[BufferedRowIterator].getName} {
private Object[] references;
private scala.collection.Iterator[] inputs;
${ctx.declareMutableStates()}
public $className(Object[] references) {
this.references = references;
}
public void init(int index, scala.collection.Iterator[] inputs) {
partitionIndex = index;
this.inputs = inputs;
${ctx.initMutableStates()}
${ctx.initPartition()}
}
${ctx.emitExtraCode()}
${ctx.declareAddedFunctions()}
}
""".trim
// try to compile, helpful for debug
val cleanedSource = CodeFormatter.stripOverlappingComments(
new CodeAndComment(CodeFormatter.stripExtraNewLines(source), ctx.getPlaceHolderToComments()))
logDebug(s"\\n${CodeFormatter.format(cleanedSource)}")
(ctx, cleanedSource)
}
override def doExecuteColumnar(): RDD[ColumnarBatch] = {
// Code generation is not currently supported for columnar output, so just fall back to
// the interpreted path
child.executeColumnar()
}
override def doExecute(): RDD[InternalRow] = {
val (ctx, cleanedSource) = doCodeGen()
// try to compile and fallback if it failed
val (_, maxCodeSize) = try {
CodeGenerator.compile(cleanedSource)
} catch {
case NonFatal(_) if !Utils.isTesting && sqlContext.conf.codegenFallback =>
// We should already saw the error message
logWarning(s"Whole-stage codegen disabled for plan (id=$codegenStageId):\\n $treeString")
return child.execute()
}
// Check if compiled code has a too large function
if (maxCodeSize > sqlContext.conf.hugeMethodLimit) {
logInfo(s"Found too long generated codes and JIT optimization might not work: " +
s"the bytecode size ($maxCodeSize) is above the limit " +
s"${sqlContext.conf.hugeMethodLimit}, and the whole-stage codegen was disabled " +
s"for this plan (id=$codegenStageId). To avoid this, you can raise the limit " +
s"`${SQLConf.WHOLESTAGE_HUGE_METHOD_LIMIT.key}`:\\n$treeString")
return child.execute()
}
val references = ctx.references.toArray
val durationMs = longMetric("pipelineTime")
// Even though rdds is an RDD[InternalRow] it may actually be an RDD[ColumnarBatch] with
// type erasure hiding that. This allows for the input to a code gen stage to be columnar,
// but the output must be rows.
val rdds = child.asInstanceOf[CodegenSupport].inputRDDs()
assert(rdds.size <= 2, "Up to two input RDDs can be supported")
if (rdds.length == 1) {
rdds.head.mapPartitionsWithIndex { (index, iter) =>
val (clazz, _) = CodeGenerator.compile(cleanedSource)
val buffer = clazz.generate(references).asInstanceOf[BufferedRowIterator]
buffer.init(index, Array(iter))
new Iterator[InternalRow] {
override def hasNext: Boolean = {
val v = buffer.hasNext
if (!v) durationMs += buffer.durationMs()
v
}
override def next: InternalRow = buffer.next()
}
}
} else {
// Right now, we support up to two input RDDs.
rdds.head.zipPartitions(rdds(1)) { (leftIter, rightIter) =>
Iterator((leftIter, rightIter))
// a small hack to obtain the correct partition index
}.mapPartitionsWithIndex { (index, zippedIter) =>
val (leftIter, rightIter) = zippedIter.next()
val (clazz, _) = CodeGenerator.compile(cleanedSource)
val buffer = clazz.generate(references).asInstanceOf[BufferedRowIterator]
buffer.init(index, Array(leftIter, rightIter))
new Iterator[InternalRow] {
override def hasNext: Boolean = {
val v = buffer.hasNext
if (!v) durationMs += buffer.durationMs()
v
}
override def next: InternalRow = buffer.next()
}
}
}
}
override def inputRDDs(): Seq[RDD[InternalRow]] = {
throw new UnsupportedOperationException
}
override def doProduce(ctx: CodegenContext): String = {
throw new UnsupportedOperationException
}
override def doConsume(ctx: CodegenContext, input: Seq[ExprCode], row: ExprCode): String = {
val doCopy = if (needCopyResult) {
".copy()"
} else {
""
}
s"""
|${row.code}
|append(${row.value}$doCopy);
""".stripMargin.trim
}
override def generateTreeString(
depth: Int,
lastChildren: Seq[Boolean],
append: String => Unit,
verbose: Boolean,
prefix: String = "",
addSuffix: Boolean = false,
maxFields: Int,
printNodeId: Boolean): Unit = {
child.generateTreeString(
depth,
lastChildren,
append,
verbose,
if (printNodeId) "* " else s"*($codegenStageId) ",
false,
maxFields,
printNodeId)
}
override def needStopCheck: Boolean = true
override def limitNotReachedChecks: Seq[String] = Nil
override protected def otherCopyArgs: Seq[AnyRef] = Seq(codegenStageId.asInstanceOf[Integer])
}
/**
* Find the chained plans that support codegen, collapse them together as WholeStageCodegen.
*
* The `codegenStageCounter` generates ID for codegen stages within a query plan.
* It does not affect equality, nor does it participate in destructuring pattern matching
* of WholeStageCodegenExec.
*
* This ID is used to help differentiate between codegen stages. It is included as a part
* of the explain output for physical plans, e.g.
*
* == Physical Plan ==
* *(5) SortMergeJoin [x#3L], [y#9L], Inner
* :- *(2) Sort [x#3L ASC NULLS FIRST], false, 0
* : +- Exchange hashpartitioning(x#3L, 200)
* : +- *(1) Project [(id#0L % 2) AS x#3L]
* : +- *(1) Filter isnotnull((id#0L % 2))
* : +- *(1) Range (0, 5, step=1, splits=8)
* +- *(4) Sort [y#9L ASC NULLS FIRST], false, 0
* +- Exchange hashpartitioning(y#9L, 200)
* +- *(3) Project [(id#6L % 2) AS y#9L]
* +- *(3) Filter isnotnull((id#6L % 2))
* +- *(3) Range (0, 5, step=1, splits=8)
*
* where the ID makes it obvious that not all adjacent codegen'd plan operators are of the
* same codegen stage.
*
* The codegen stage ID is also optionally included in the name of the generated classes as
* a suffix, so that it's easier to associate a generated class back to the physical operator.
* This is controlled by SQLConf: spark.sql.codegen.useIdInClassName
*
* The ID is also included in various log messages.
*
* Within a query, a codegen stage in a plan starts counting from 1, in "insertion order".
* WholeStageCodegenExec operators are inserted into a plan in depth-first post-order.
* See CollapseCodegenStages.insertWholeStageCodegen for the definition of insertion order.
*
* 0 is reserved as a special ID value to indicate a temporary WholeStageCodegenExec object
* is created, e.g. for special fallback handling when an existing WholeStageCodegenExec
* failed to generate/compile code.
*/
case class CollapseCodegenStages(
conf: SQLConf,
codegenStageCounter: AtomicInteger = new AtomicInteger(0))
extends Rule[SparkPlan] {
private def supportCodegen(e: Expression): Boolean = e match {
case e: LeafExpression => true
// CodegenFallback requires the input to be an InternalRow
case e: CodegenFallback => false
case _ => true
}
private def supportCodegen(plan: SparkPlan): Boolean = plan match {
case plan: CodegenSupport if plan.supportCodegen =>
val willFallback = plan.expressions.exists(_.find(e => !supportCodegen(e)).isDefined)
// the generated code will be huge if there are too many columns
val hasTooManyOutputFields =
WholeStageCodegenExec.isTooManyFields(conf, plan.schema)
val hasTooManyInputFields =
plan.children.exists(p => WholeStageCodegenExec.isTooManyFields(conf, p.schema))
!willFallback && !hasTooManyOutputFields && !hasTooManyInputFields
case _ => false
}
/**
* Inserts an InputAdapter on top of those that do not support codegen.
*/
private def insertInputAdapter(plan: SparkPlan): SparkPlan = {
plan match {
case p if !supportCodegen(p) =>
// collapse them recursively
InputAdapter(insertWholeStageCodegen(p))
case j: SortMergeJoinExec =>
// The children of SortMergeJoin should do codegen separately.
j.withNewChildren(j.children.map(
child => InputAdapter(insertWholeStageCodegen(child))))
case p => p.withNewChildren(p.children.map(insertInputAdapter))
}
}
/**
* Inserts a WholeStageCodegen on top of those that support codegen.
*/
private def insertWholeStageCodegen(plan: SparkPlan): SparkPlan = {
plan match {
// For operators that will output domain object, do not insert WholeStageCodegen for it as
// domain object can not be written into unsafe row.
case plan if plan.output.length == 1 && plan.output.head.dataType.isInstanceOf[ObjectType] =>
plan.withNewChildren(plan.children.map(insertWholeStageCodegen))
case plan: LocalTableScanExec =>
// Do not make LogicalTableScanExec the root of WholeStageCodegen
// to support the fast driver-local collect/take paths.
plan
case plan: CodegenSupport if supportCodegen(plan) =>
// The whole-stage-codegen framework is row-based. If a plan supports columnar execution,
// it can't support whole-stage-codegen at the same time.
assert(!plan.supportsColumnar)
WholeStageCodegenExec(insertInputAdapter(plan))(codegenStageCounter.incrementAndGet())
case other =>
other.withNewChildren(other.children.map(insertWholeStageCodegen))
}
}
def apply(plan: SparkPlan): SparkPlan = {
if (conf.wholeStageEnabled) {
insertWholeStageCodegen(plan)
} else {
plan
}
}
}
|
pgandhi999/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegenExec.scala
|
Scala
|
apache-2.0
| 34,231
|
package specification
import _root_.java.util.Date
class Traveler(
val firstName: String,
val middleName: String,
val lastName: String,
val gender: String,
val birthday: Date, // should be in YYYY-MM-DD format when exported to XML
val travelDocType: String, // passport, driver license, EU citizen card, etc
val travelDocNumber: String,
val phone: String,
val email: String){
def toXML =
<traveler>
<firstName>{ firstName }</firstName>
<middleName>{ middleName }</middleName>
<lastName>{ lastName }</lastName>
<gender>{ gender } </gender>
<birthday>{ birthday }</birthday>
<travelDocType>{ travelDocType }</travelDocType>
<travelDocNumber>{ travelDocNumber }</travelDocNumber>
<phone>{ phone }</phone>
<email>{ email }</email>
</traveler>
}
|
jwachter/travel-service
|
src/main/scala/specification/Traveler.scala
|
Scala
|
apache-2.0
| 837
|
package gh.test.gh2011c.payload
import gh2011c.models.IssueCommentEventPayload
import net.liftweb.json._
import org.scalatest.{Matchers, FlatSpec}
class IssueCommentEventPayloadTest extends FlatSpec with Matchers
{
"A valid IssueCommentEvent payload" must "be correctly parsed" in {
val json = parse(
"""
| {
|
| "comment":{
| "created_at":"2011-10-07T23:59:31Z",
| "body":"Ok, it sounds like rvmsudo is damaging your link environment somehow. I am going to release this build and close for now. Thanks both of you for all your help.",
| "updated_at":"2011-10-07T23:59:31Z",
| "url":"https://api.github.com/repos/fauna/memcached/issues/comments/2329706",
| "id":2329706,
| "user":{
| "avatar_url":"https://secure.gravatar.com/avatar/f8634aca904bc63cb047cb1bd93bdc74?d=https://a248.e.akamai.net/assets.github.com%2Fimages%2Fgravatars%2Fgravatar-140.png",
| "url":"https://api.github.com/users/evan",
| "id":210,
| "login":"evan"
| }
| },
| "action":"created",
| "issue":{
| "number":52,
| "created_at":"2011-07-05T22:17:32Z",
| "pull_request":{
| "diff_url":null,
| "patch_url":null,
| "html_url":null
| },
| "body":"Version 1.2.7 fails to install on RHEL6.1. Version 1.2.6 installs fine. Output of build failure and gem_make.out at https://gist.github.com/868793135b0d8db77c70\\r\\n\\r\\nLibraries and devel headers are installed for sasl, libmemcache, and memcache:\\r\\n\\r\\n[vstsbx01:~] root# rpm -qa|egrep 'sasl|memcac'|sort\\r\\ncyrus-sasl-2.1.23-8.el6.x86_64\\r\\ncyrus-sasl-devel-2.1.23-8.el6.x86_64\\r\\ncyrus-sasl-gssapi-2.1.23-8.el6.x86_64\\r\\ncyrus-sasl-lib-2.1.23-8.el6.x86_64\\r\\ncyrus-sasl-plain-2.1.23-8.el6.x86_64\\r\\nlibmemcached-0.31-1.1.el6.x86_64\\r\\nlibmemcached-devel-0.31-1.1.el6.x86_64\\r\\nmemcached-1.4.4-3.el6.x86_64\\r\\nmemcached-devel-1.4.4-3.el6.x86_64",
| "comments":46,
| "title":"Install fails on RHEL6, v1.2.7",
| "updated_at":"2011-10-07T23:59:31Z",
| "url":"https://api.github.com/repos/fauna/memcached/issues/52",
| "id":1170572,
| "assignee":{
| "avatar_url":"https://secure.gravatar.com/avatar/e35b8aac0c907bd2167bb3a7b9d3ca61?d=https://a248.e.akamai.net/assets.github.com%2Fimages%2Fgravatars%2Fgravatar-140.png",
| "url":"https://api.github.com/users/bitbckt",
| "id":335,
| "login":"bitbckt"
| },
| "milestone":null,
| "closed_at":null,
| "user":{
| "avatar_url":"https://secure.gravatar.com/avatar/b97a5e7cb0d66ca79a7d81de371e8791?d=https://a248.e.akamai.net/assets.github.com%2Fimages%2Fgravatars%2Fgravatar-140.png",
| "url":"https://api.github.com/users/ryanschwartz",
| "id":4556,
| "login":"ryanschwartz"
| },
| "html_url":"https://github.com/fauna/memcached/issues/52",
| "labels":[
| ],
| "state":"open"
| },
| "legacy":{
| "issue_id":1170572,
| "comment_id":2329706
| }
|
|}
""".stripMargin)
IssueCommentEventPayload(json) shouldBe 'defined
}
}
|
mgoeminne/github_etl
|
src/test/scala/gh/test/gh2011c/payload/IssuecommentEventPayloadTest.scala
|
Scala
|
mit
| 3,809
|
package Controller
import JsonHandling.ManufacturingData
import MongoConnectivity.{MongoConsumer, MongoProducer}
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
/**
* Created by fabian on 18.11.16.
*/
class MongoController {
private val mongoConsumer: MongoConsumer = new MongoConsumer()
private val mongoProducer: MongoProducer = new MongoProducer()
private val mongoDatabaseName:String = "oip_taktstrasse"
private val mongoCollectionName: String = "manufacturingData"
def getManufacturingDataFromMongo(sc: SparkContext): RDD[ManufacturingData] ={
mongoConsumer.getMongoData(mongoConsumer.getMongoCollection(mongoDatabaseName, mongoCollectionName),sc)
}
def runAnalysisWithMongoData(analysisController:AnalysisController, sc: SparkContext): Unit ={
val coll = mongoConsumer.getMongoCollection(mongoDatabaseName, mongoCollectionName)
val rdd = mongoConsumer.getMongoData(coll,sc)
analysisController.runAllAnalysis(rdd)
}
def writeAnalysisToMongo(json: String, kafkaTopic: String): Unit ={
val mongoClient = mongoProducer.getMongoProducer
mongoProducer.writeJsonToMongo(mongoClient, json, kafkaTopic)
mongoProducer.closeMongoProducer(mongoClient)
}
}
|
4lexBaum/projekt-5s-dhbw
|
Spark/src/main/scala/Controller/MongoController.scala
|
Scala
|
mit
| 1,231
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.linalg
import scala.reflect.ClassTag
import scala.util.Random
import breeze.linalg.{squaredDistance => breezeSquaredDistance, DenseMatrix => BDM}
import org.json4s.jackson.JsonMethods.{parse => parseJson}
import org.apache.spark.{SparkConf, SparkException, SparkFunSuite}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.Kryo._
import org.apache.spark.ml.{linalg => newlinalg}
import org.apache.spark.mllib.util.TestingUtils._
import org.apache.spark.serializer.KryoSerializer
class VectorsSuite extends SparkFunSuite with Logging {
val arr = Array(0.1, 0.0, 0.3, 0.4)
val n = 4
val indices = Array(0, 2, 3)
val values = Array(0.1, 0.3, 0.4)
test("kryo class register") {
val conf = new SparkConf(false)
conf.set(KRYO_REGISTRATION_REQUIRED, true)
val ser = new KryoSerializer(conf).newInstance()
def check[T: ClassTag](t: T) {
assert(ser.deserialize[T](ser.serialize(t)) === t)
}
val desVec = Vectors.dense(arr).asInstanceOf[DenseVector]
val sparVec = Vectors.sparse(n, indices, values).asInstanceOf[SparseVector]
check(desVec)
check(sparVec)
}
test("dense vector construction with varargs") {
val vec = Vectors.dense(arr).asInstanceOf[DenseVector]
assert(vec.size === arr.length)
assert(vec.values.eq(arr))
}
test("dense vector construction from a double array") {
val vec = Vectors.dense(arr).asInstanceOf[DenseVector]
assert(vec.size === arr.length)
assert(vec.values.eq(arr))
}
test("sparse vector construction") {
val vec = Vectors.sparse(n, indices, values).asInstanceOf[SparseVector]
assert(vec.size === n)
assert(vec.indices.eq(indices))
assert(vec.values.eq(values))
}
test("sparse vector construction with unordered elements") {
val vec = Vectors.sparse(n, indices.zip(values).reverse).asInstanceOf[SparseVector]
assert(vec.size === n)
assert(vec.indices === indices)
assert(vec.values === values)
}
test("sparse vector construction with mismatched indices/values array") {
intercept[IllegalArgumentException] {
Vectors.sparse(4, Array(1, 2, 3), Array(3.0, 5.0, 7.0, 9.0))
}
intercept[IllegalArgumentException] {
Vectors.sparse(4, Array(1, 2, 3), Array(3.0, 5.0))
}
}
test("sparse vector construction with too many indices vs size") {
intercept[IllegalArgumentException] {
Vectors.sparse(3, Array(1, 2, 3, 4), Array(3.0, 5.0, 7.0, 9.0))
}
}
test("dense to array") {
val vec = Vectors.dense(arr).asInstanceOf[DenseVector]
assert(vec.toArray.eq(arr))
}
test("dense argmax") {
val vec = Vectors.dense(Array.empty[Double]).asInstanceOf[DenseVector]
assert(vec.argmax === -1)
val vec2 = Vectors.dense(arr).asInstanceOf[DenseVector]
assert(vec2.argmax === 3)
val vec3 = Vectors.dense(Array(-1.0, 0.0, -2.0, 1.0)).asInstanceOf[DenseVector]
assert(vec3.argmax === 3)
}
test("sparse to array") {
val vec = Vectors.sparse(n, indices, values).asInstanceOf[SparseVector]
assert(vec.toArray === arr)
}
test("sparse argmax") {
val vec = Vectors.sparse(0, Array.empty[Int], Array.empty[Double]).asInstanceOf[SparseVector]
assert(vec.argmax === -1)
val vec2 = Vectors.sparse(n, indices, values).asInstanceOf[SparseVector]
assert(vec2.argmax === 3)
val vec3 = Vectors.sparse(5, Array(2, 3, 4), Array(1.0, 0.0, -.7))
assert(vec3.argmax === 2)
// check for case that sparse vector is created with
// only negative values {0.0, 0.0,-1.0, -0.7, 0.0}
val vec4 = Vectors.sparse(5, Array(2, 3), Array(-1.0, -.7))
assert(vec4.argmax === 0)
val vec5 = Vectors.sparse(11, Array(0, 3, 10), Array(-1.0, -.7, 0.0))
assert(vec5.argmax === 1)
val vec6 = Vectors.sparse(11, Array(0, 1, 2), Array(-1.0, -.7, 0.0))
assert(vec6.argmax === 2)
val vec7 = Vectors.sparse(5, Array(0, 1, 3), Array(-1.0, 0.0, -.7))
assert(vec7.argmax === 1)
val vec8 = Vectors.sparse(5, Array(1, 2), Array(0.0, -1.0))
assert(vec8.argmax === 0)
// Check for case when sparse vector is non-empty but the values are empty
val vec9 = Vectors.sparse(100, Array.empty[Int], Array.empty[Double]).asInstanceOf[SparseVector]
assert(vec9.argmax === 0)
val vec10 = Vectors.sparse(1, Array.empty[Int], Array.empty[Double]).asInstanceOf[SparseVector]
assert(vec10.argmax === 0)
}
test("vector equals") {
val dv1 = Vectors.dense(arr.clone())
val dv2 = Vectors.dense(arr.clone())
val sv1 = Vectors.sparse(n, indices.clone(), values.clone())
val sv2 = Vectors.sparse(n, indices.clone(), values.clone())
val vectors = Seq(dv1, dv2, sv1, sv2)
for (v <- vectors; u <- vectors) {
assert(v === u)
assert(v.## === u.##)
}
val another = Vectors.dense(0.1, 0.2, 0.3, 0.4)
for (v <- vectors) {
assert(v != another)
assert(v.## != another.##)
}
}
test("vectors equals with explicit 0") {
val dv1 = Vectors.dense(Array(0, 0.9, 0, 0.8, 0))
val sv1 = Vectors.sparse(5, Array(1, 3), Array(0.9, 0.8))
val sv2 = Vectors.sparse(5, Array(0, 1, 2, 3, 4), Array(0, 0.9, 0, 0.8, 0))
val vectors = Seq(dv1, sv1, sv2)
for (v <- vectors; u <- vectors) {
assert(v === u)
assert(v.## === u.##)
}
val another = Vectors.sparse(5, Array(0, 1, 3), Array(0, 0.9, 0.2))
for (v <- vectors) {
assert(v != another)
assert(v.## != another.##)
}
}
test("indexing dense vectors") {
val vec = Vectors.dense(1.0, 2.0, 3.0, 4.0)
assert(vec(0) === 1.0)
assert(vec(3) === 4.0)
}
test("indexing sparse vectors") {
val vec = Vectors.sparse(7, Array(0, 2, 4, 6), Array(1.0, 2.0, 3.0, 4.0))
assert(vec(0) === 1.0)
assert(vec(1) === 0.0)
assert(vec(2) === 2.0)
assert(vec(3) === 0.0)
assert(vec(6) === 4.0)
val vec2 = Vectors.sparse(8, Array(0, 2, 4, 6), Array(1.0, 2.0, 3.0, 4.0))
assert(vec2(6) === 4.0)
assert(vec2(7) === 0.0)
}
test("parse vectors") {
val vectors = Seq(
Vectors.dense(Array.empty[Double]),
Vectors.dense(1.0),
Vectors.dense(1.0E6, 0.0, -2.0e-7),
Vectors.sparse(0, Array.empty[Int], Array.empty[Double]),
Vectors.sparse(1, Array(0), Array(1.0)),
Vectors.sparse(3, Array(0, 2), Array(1.0, -2.0)))
vectors.foreach { v =>
val v1 = Vectors.parse(v.toString)
assert(v.getClass === v1.getClass)
assert(v === v1)
}
val malformatted = Seq("1", "[1,,]", "[1,2b]", "(1,[1,2])", "([1],[2.0,1.0])")
malformatted.foreach { s =>
intercept[SparkException] {
Vectors.parse(s)
logInfo(s"Didn't detect malformatted string $s.")
}
}
}
test("zeros") {
assert(Vectors.zeros(3) === Vectors.dense(0.0, 0.0, 0.0))
}
test("Vector.copy") {
val sv = Vectors.sparse(4, Array(0, 2), Array(1.0, 2.0))
val svCopy = sv.copy
(sv, svCopy) match {
case (sv: SparseVector, svCopy: SparseVector) =>
assert(sv.size === svCopy.size)
assert(sv.indices === svCopy.indices)
assert(sv.values === svCopy.values)
assert(!sv.indices.eq(svCopy.indices))
assert(!sv.values.eq(svCopy.values))
case _ =>
throw new RuntimeException(s"copy returned ${svCopy.getClass} on ${sv.getClass}.")
}
val dv = Vectors.dense(1.0, 0.0, 2.0)
val dvCopy = dv.copy
(dv, dvCopy) match {
case (dv: DenseVector, dvCopy: DenseVector) =>
assert(dv.size === dvCopy.size)
assert(dv.values === dvCopy.values)
assert(!dv.values.eq(dvCopy.values))
case _ =>
throw new RuntimeException(s"copy returned ${dvCopy.getClass} on ${dv.getClass}.")
}
}
test("VectorUDT") {
val dv0 = Vectors.dense(Array.empty[Double])
val dv1 = Vectors.dense(1.0, 2.0)
val sv0 = Vectors.sparse(2, Array.empty, Array.empty)
val sv1 = Vectors.sparse(2, Array(1), Array(2.0))
val udt = new VectorUDT()
for (v <- Seq(dv0, dv1, sv0, sv1)) {
assert(v === udt.deserialize(udt.serialize(v)))
}
assert(udt.typeName == "vector")
assert(udt.simpleString == "vector")
}
test("fromBreeze") {
val x = BDM.zeros[Double](10, 10)
val v = Vectors.fromBreeze(x(::, 0))
assert(v.size === x.rows)
}
test("sqdist") {
val random = new Random()
for (m <- 1 until 1000 by 100) {
val nnz = random.nextInt(m)
val indices1 = random.shuffle(0 to m - 1).slice(0, nnz).sorted.toArray
val values1 = Array.fill(nnz)(random.nextDouble)
val sparseVector1 = Vectors.sparse(m, indices1, values1)
val indices2 = random.shuffle(0 to m - 1).slice(0, nnz).sorted.toArray
val values2 = Array.fill(nnz)(random.nextDouble)
val sparseVector2 = Vectors.sparse(m, indices2, values2)
val denseVector1 = Vectors.dense(sparseVector1.toArray)
val denseVector2 = Vectors.dense(sparseVector2.toArray)
val squaredDist = breezeSquaredDistance(sparseVector1.asBreeze, sparseVector2.asBreeze)
// SparseVector vs. SparseVector
assert(Vectors.sqdist(sparseVector1, sparseVector2) ~== squaredDist relTol 1E-8)
// DenseVector vs. SparseVector
assert(Vectors.sqdist(denseVector1, sparseVector2) ~== squaredDist relTol 1E-8)
// DenseVector vs. DenseVector
assert(Vectors.sqdist(denseVector1, denseVector2) ~== squaredDist relTol 1E-8)
}
}
test("foreachActive") {
val dv = Vectors.dense(0.0, 1.2, 3.1, 0.0)
val sv = Vectors.sparse(4, Seq((1, 1.2), (2, 3.1), (3, 0.0)))
val dvMap = scala.collection.mutable.Map[Int, Double]()
dv.foreachActive { (index, value) =>
dvMap.put(index, value)
}
assert(dvMap.size === 4)
assert(dvMap.get(0) === Some(0.0))
assert(dvMap.get(1) === Some(1.2))
assert(dvMap.get(2) === Some(3.1))
assert(dvMap.get(3) === Some(0.0))
val svMap = scala.collection.mutable.Map[Int, Double]()
sv.foreachActive { (index, value) =>
svMap.put(index, value)
}
assert(svMap.size === 3)
assert(svMap.get(1) === Some(1.2))
assert(svMap.get(2) === Some(3.1))
assert(svMap.get(3) === Some(0.0))
}
test("vector p-norm") {
val dv = Vectors.dense(0.0, -1.2, 3.1, 0.0, -4.5, 1.9)
val sv = Vectors.sparse(6, Seq((1, -1.2), (2, 3.1), (3, 0.0), (4, -4.5), (5, 1.9)))
assert(Vectors.norm(dv, 1.0) ~== dv.toArray.foldLeft(0.0)((a, v) =>
a + math.abs(v)) relTol 1E-8)
assert(Vectors.norm(sv, 1.0) ~== sv.toArray.foldLeft(0.0)((a, v) =>
a + math.abs(v)) relTol 1E-8)
assert(Vectors.norm(dv, 2.0) ~== math.sqrt(dv.toArray.foldLeft(0.0)((a, v) =>
a + v * v)) relTol 1E-8)
assert(Vectors.norm(sv, 2.0) ~== math.sqrt(sv.toArray.foldLeft(0.0)((a, v) =>
a + v * v)) relTol 1E-8)
assert(Vectors.norm(dv, Double.PositiveInfinity) ~== dv.toArray.map(math.abs).max relTol 1E-8)
assert(Vectors.norm(sv, Double.PositiveInfinity) ~== sv.toArray.map(math.abs).max relTol 1E-8)
assert(Vectors.norm(dv, 3.7) ~== math.pow(dv.toArray.foldLeft(0.0)((a, v) =>
a + math.pow(math.abs(v), 3.7)), 1.0 / 3.7) relTol 1E-8)
assert(Vectors.norm(sv, 3.7) ~== math.pow(sv.toArray.foldLeft(0.0)((a, v) =>
a + math.pow(math.abs(v), 3.7)), 1.0 / 3.7) relTol 1E-8)
}
test("Vector numActive and numNonzeros") {
val dv = Vectors.dense(0.0, 2.0, 3.0, 0.0)
assert(dv.numActives === 4)
assert(dv.numNonzeros === 2)
val sv = Vectors.sparse(4, Array(0, 1, 2), Array(0.0, 2.0, 3.0))
assert(sv.numActives === 3)
assert(sv.numNonzeros === 2)
}
test("Vector toSparse and toDense") {
val dv0 = Vectors.dense(0.0, 2.0, 3.0, 0.0)
assert(dv0.toDense === dv0)
val dv0s = dv0.toSparse
assert(dv0s.numActives === 2)
assert(dv0s === dv0)
assert(dv0.toSparseWithSize(dv0.numNonzeros) === dv0)
val dv0s2 = dv0.toSparseWithSize(dv0.numNonzeros)
assert(dv0s2.numActives === 2)
assert(dv0s2 === dv0s)
val sv0 = Vectors.sparse(4, Array(0, 1, 2), Array(0.0, 2.0, 3.0))
assert(sv0.toDense === sv0)
val sv0s = sv0.toSparse
assert(sv0s.numActives === 2)
assert(sv0s === sv0)
assert(sv0.toSparseWithSize(sv0.numNonzeros) === sv0)
val sv0s2 = sv0.toSparseWithSize(sv0.numNonzeros)
assert(sv0s2.numActives === 2)
assert(sv0s2 === sv0s)
}
test("Vector.compressed") {
val dv0 = Vectors.dense(1.0, 2.0, 3.0, 0.0)
val dv0c = dv0.compressed.asInstanceOf[DenseVector]
assert(dv0c === dv0)
val dv1 = Vectors.dense(0.0, 2.0, 0.0, 0.0)
val dv1c = dv1.compressed.asInstanceOf[SparseVector]
assert(dv1 === dv1c)
assert(dv1c.numActives === 1)
val sv0 = Vectors.sparse(4, Array(1, 2), Array(2.0, 0.0))
val sv0c = sv0.compressed.asInstanceOf[SparseVector]
assert(sv0 === sv0c)
assert(sv0c.numActives === 1)
val sv1 = Vectors.sparse(4, Array(0, 1, 2), Array(1.0, 2.0, 3.0))
val sv1c = sv1.compressed.asInstanceOf[DenseVector]
assert(sv1 === sv1c)
}
test("SparseVector.slice") {
val v = new SparseVector(5, Array(1, 2, 4), Array(1.1, 2.2, 4.4))
assert(v.slice(Array(0, 2)) === new SparseVector(2, Array(1), Array(2.2)))
assert(v.slice(Array(2, 0)) === new SparseVector(2, Array(0), Array(2.2)))
assert(v.slice(Array(2, 0, 3, 4)) === new SparseVector(4, Array(0, 3), Array(2.2, 4.4)))
}
test("toJson/fromJson") {
val sv0 = Vectors.sparse(0, Array.empty, Array.empty)
val sv1 = Vectors.sparse(1, Array.empty, Array.empty)
val sv2 = Vectors.sparse(2, Array(1), Array(2.0))
val dv0 = Vectors.dense(Array.empty[Double])
val dv1 = Vectors.dense(1.0)
val dv2 = Vectors.dense(0.0, 2.0)
for (v <- Seq(sv0, sv1, sv2, dv0, dv1, dv2)) {
val json = v.toJson
parseJson(json) // `json` should be a valid JSON string
val u = Vectors.fromJson(json)
assert(u.getClass === v.getClass, "toJson/fromJson should preserve vector types.")
assert(u === v, "toJson/fromJson should preserve vector values.")
}
}
test("conversions between new local linalg and mllib linalg") {
val dv: DenseVector = new DenseVector(Array(1.0, 2.0, 3.5))
val sv: SparseVector = new SparseVector(5, Array(1, 2, 4), Array(1.1, 2.2, 4.4))
val sv0: Vector = sv.asInstanceOf[Vector]
val dv0: Vector = dv.asInstanceOf[Vector]
val newSV: newlinalg.SparseVector = sv.asML
val newDV: newlinalg.DenseVector = dv.asML
val newSV0: newlinalg.Vector = sv0.asML
val newDV0: newlinalg.Vector = dv0.asML
assert(newSV0.isInstanceOf[newlinalg.SparseVector])
assert(newDV0.isInstanceOf[newlinalg.DenseVector])
assert(sv.toArray === newSV.toArray)
assert(dv.toArray === newDV.toArray)
assert(sv0.toArray === newSV0.toArray)
assert(dv0.toArray === newDV0.toArray)
val oldSV: SparseVector = SparseVector.fromML(newSV)
val oldDV: DenseVector = DenseVector.fromML(newDV)
val oldSV0: Vector = Vectors.fromML(newSV0)
val oldDV0: Vector = Vectors.fromML(newDV0)
assert(oldSV0.isInstanceOf[SparseVector])
assert(oldDV0.isInstanceOf[DenseVector])
assert(oldSV.toArray === newSV.toArray)
assert(oldDV.toArray === newDV.toArray)
assert(oldSV0.toArray === newSV0.toArray)
assert(oldDV0.toArray === newDV0.toArray)
}
test("implicit conversions between new local linalg and mllib linalg") {
def mllibVectorToArray(v: Vector): Array[Double] = v.toArray
def mllibDenseVectorToArray(v: DenseVector): Array[Double] = v.toArray
def mllibSparseVectorToArray(v: SparseVector): Array[Double] = v.toArray
def mlVectorToArray(v: newlinalg.Vector): Array[Double] = v.toArray
def mlDenseVectorToArray(v: newlinalg.DenseVector): Array[Double] = v.toArray
def mlSparseVectorToArray(v: newlinalg.SparseVector): Array[Double] = v.toArray
val dv: DenseVector = new DenseVector(Array(1.0, 2.0, 3.5))
val sv: SparseVector = new SparseVector(5, Array(1, 2, 4), Array(1.1, 2.2, 4.4))
val sv0: Vector = sv.asInstanceOf[Vector]
val dv0: Vector = dv.asInstanceOf[Vector]
val newSV: newlinalg.SparseVector = sv.asML
val newDV: newlinalg.DenseVector = dv.asML
val newSV0: newlinalg.Vector = sv0.asML
val newDV0: newlinalg.Vector = dv0.asML
import org.apache.spark.mllib.linalg.VectorImplicits._
assert(mllibVectorToArray(dv0) === mllibVectorToArray(newDV0))
assert(mllibVectorToArray(sv0) === mllibVectorToArray(newSV0))
assert(mllibDenseVectorToArray(dv) === mllibDenseVectorToArray(newDV))
assert(mllibSparseVectorToArray(sv) === mllibSparseVectorToArray(newSV))
assert(mlVectorToArray(dv0) === mlVectorToArray(newDV0))
assert(mlVectorToArray(sv0) === mlVectorToArray(newSV0))
assert(mlDenseVectorToArray(dv) === mlDenseVectorToArray(newDV))
assert(mlSparseVectorToArray(sv) === mlSparseVectorToArray(newSV))
}
test("sparse vector only support non-negative length") {
val v1 = Vectors.sparse(0, Array.emptyIntArray, Array.emptyDoubleArray)
val v2 = Vectors.sparse(0, Array.empty[(Int, Double)])
assert(v1.size === 0)
assert(v2.size === 0)
intercept[IllegalArgumentException] {
Vectors.sparse(-1, Array(1), Array(2.0))
}
intercept[IllegalArgumentException] {
Vectors.sparse(-1, Array((1, 2.0)))
}
}
}
|
pgandhi999/spark
|
mllib/src/test/scala/org/apache/spark/mllib/linalg/VectorsSuite.scala
|
Scala
|
apache-2.0
| 18,110
|
/*
* Copyright (C) 2013 Alcatel-Lucent.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Licensed to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package molecule
package net
import java.nio.channels.{ SelectableChannel => JSelectableChannel }
import channel.{ IChan, OChan }
/**
* Base class for Molecule sockets.
*
* Molecule Sockets consist in an input stream and an output
* stream of byte buffers. The socket is automatically closed when
* both streams are poisoned.
*
*/
abstract class Socket[T] extends Channel {
protected[net] val input: InputChannel[T]
protected[net] val output: OutputChannel[T]
/**
* Socket handle.
*
* @return a SocketHandle object providing information about the socket.
*/
def handle: SocketHandle
/**
* The socket input stream.
*/
val ichan: IChan[T]
/**
* The socket output stream.
*/
val ochan: OChan[T]
/**
* Close the socket by poisoninig its input and output streams.
*
* @param signal the signal with which the socket is poisoned.
*/
override def shutdown(signal: Signal): Unit = {
ichan.poison(signal)
ochan.close(signal)
super.shutdown()
}
private final var _iClosed: Boolean = false
private final var _oClosed: Boolean = false
private[net] def oClosed(): Unit = {
if (!_oClosed) {
_oClosed = true
if (_iClosed) {
niochan.close()
}
}
}
private[net] def iClosed(): Unit = {
if (!_iClosed) {
_iClosed = true
if (_oClosed) {
niochan.close()
}
}
}
}
/**
* Companion object
*
* Provide utility methods to assemble Molecule Sockets.
*/
object Socket {
// We have already Channel is message (should suffice)
//
// private[this] val socketErasureMessage:Message[Socket[Any]] = new Message[Socket[Any]] {
// def poison(s:Socket[Any], signal:Signal):Unit = s.shutdown(signal)
// }
//
// implicit def socketIsMessage[A]:Message[Socket[A]] = socketErasureMessage.asInstanceOf[Message[Socket[A]]]
private[net] def apply[T](niochan: JSelectableChannel, selector: IOSelector, handle: SocketHandle, input: InputChannel[T], output: OutputChannel[T]): Socket[T] = {
new Impl(niochan, selector, handle, input, output)
}
private class Impl[T](
final val niochan: JSelectableChannel,
final val selector: IOSelector,
final val handle: SocketHandle,
final val input: InputChannel[T],
final val output: OutputChannel[T]) extends Socket[T] {
final val ichan = input.init()
final val ochan = output.init()
}
}
|
molecule-labs/molecule
|
molecule-net/src/main/scala/molecule/net/Socket.scala
|
Scala
|
apache-2.0
| 3,146
|
package at.forsyte.apalache.tla.bmcmt
import at.forsyte.apalache.tla.lir.{TlaEx, TlaModule}
/**
* Input to ModelChecker. We assume that this input is prepared by TransitionPass.
*
* @param rootModule a TLA+ module
* @param initTransitions a list of transitions that compute the initial states.
* A list [A_1, ..., A_n] is treated as A_1 \\/ ... \\/ A_n.
* In contrast to Init in TLA+, we require the disjuncts {A_i} to contain only primed variables.
* Each disjunct should assign a value to every primed variable at least once (see assignmentSolver).
* @param nextTransitions a list of transitions that compute the next states.
* A list [A_1, ..., A_n] is treated as A_1 \\/ ... \\/ A_n.
* Each disjunct should assign a value to every primed variable at least once (see assignmentSolver).
* @param constInitPrimed An optional initializer of CONSTANTS (over their primed versions).
* @param invariantsAndNegations pairs of invariants and their negations (we decouple them for optimization).
* @author Igor Konnov
*/
class CheckerInput(val rootModule: TlaModule,
val initTransitions: List[TlaEx],
val nextTransitions: List[TlaEx],
val constInitPrimed: Option[TlaEx],
val invariantsAndNegations: List[(TlaEx, TlaEx)]) {
}
|
konnov/dach
|
tla-bmcmt/src/main/scala/at/forsyte/apalache/tla/bmcmt/CheckerInput.scala
|
Scala
|
apache-2.0
| 1,442
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.logical
import org.apache.flink.table.planner.expressions.utils.Func1
import org.apache.flink.table.planner.plan.optimize.program.{FlinkBatchProgram, FlinkHepRuleSetProgramBuilder, HEP_RULES_EXECUTION_TYPE}
import org.apache.flink.table.planner.utils.{TableConfigUtils, TableTestBase, TestPartitionableTableSource}
import org.apache.calcite.plan.hep.HepMatchOrder
import org.apache.calcite.tools.RuleSets
import org.junit.{Before, Test}
/**
* Test for [[PushPartitionIntoTableSourceScanRule]].
*/
class PushPartitionIntoTableSourceScanRuleTest extends TableTestBase {
private val util = batchTestUtil()
@Before
def setup(): Unit = {
util.buildBatchProgram(FlinkBatchProgram.DEFAULT_REWRITE)
val calciteConfig = TableConfigUtils.getCalciteConfig(util.tableEnv.getConfig)
calciteConfig.getBatchProgram.get.addLast(
"rules",
FlinkHepRuleSetProgramBuilder.newBuilder
.setHepRulesExecutionType(HEP_RULES_EXECUTION_TYPE.RULE_COLLECTION)
.setHepMatchOrder(HepMatchOrder.BOTTOM_UP)
.add(RuleSets.ofList(PushPartitionIntoTableSourceScanRule.INSTANCE))
.build()
)
util.tableEnv.registerTableSource("MyTable", new TestPartitionableTableSource(true))
}
@Test
def testNoPartitionFieldPredicate(): Unit = {
util.verifyPlan("SELECT * FROM MyTable WHERE id > 2")
}
@Test
def testOnlyPartitionFieldPredicate1(): Unit = {
util.verifyPlan("SELECT * FROM MyTable WHERE part1 = 'A'")
}
@Test
def testOnlyPartitionFieldPredicate2(): Unit = {
util.verifyPlan("SELECT * FROM MyTable WHERE part2 > 1")
}
@Test
def testOnlyPartitionFieldPredicate3(): Unit = {
util.verifyPlan("SELECT * FROM MyTable WHERE part1 = 'A' AND part2 > 1")
}
@Test
def testOnlyPartitionFieldPredicate4(): Unit = {
util.verifyPlan("SELECT * FROM MyTable WHERE part1 = 'A' OR part2 > 1")
}
@Test
def testPartitionFieldPredicateAndOtherPredicate(): Unit = {
util.verifyPlan("SELECT * FROM MyTable WHERE id > 2 AND part1 = 'A'")
}
@Test
def testPartitionFieldPredicateOrOtherPredicate(): Unit = {
util.verifyPlan("SELECT * FROM MyTable WHERE id > 2 OR part1 = 'A'")
}
@Test
def testPartialPartitionFieldPredicatePushDown(): Unit = {
util.verifyPlan("SELECT * FROM MyTable WHERE (id > 2 OR part1 = 'A') AND part2 > 1")
}
@Test
def testWithUdf(): Unit = {
util.addFunction("MyUdf", Func1)
util.verifyPlan("SELECT * FROM MyTable WHERE id > 2 AND MyUdf(part2) < 3")
}
}
|
fhueske/flink
|
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/rules/logical/PushPartitionIntoTableSourceScanRuleTest.scala
|
Scala
|
apache-2.0
| 3,353
|
package org.elasticmq.actor.reply
trait Replyable[T]
|
adamw/elasticmq
|
core/src/main/scala/org/elasticmq/actor/reply/Replyable.scala
|
Scala
|
apache-2.0
| 54
|
package ohnosequences.jellyfish.api.jellyfish
import ohnosequences.jellyfish.api._, opt._
import ohnosequences.cosas._, types._, records._, fns._, klists._
import better.files._
case object dump extends AnyJellyfishCommand {
type Arguments = arguments.type
case object arguments extends RecordType(
input :×:
output :×:
|[AnyJellyfishOption]
)
type ArgumentsVals =
(input.type := input.Raw) ::
(output.type := output.Raw) ::
*[AnyDenotation]
type Options = options.type
case object options extends RecordType(
column :×:
tab :×:
lower_count :×:
upper_count :×:
|[AnyJellyfishOption]
)
type OptionsVals =
(column.type := column.Raw) ::
(tab.type := tab.Raw) ::
(lower_count.type := lower_count.Raw) ::
(upper_count.type := upper_count.Raw) ::
*[AnyDenotation]
lazy val defaults = options(
column(false) ::
tab(false) ::
lower_count(uint64(1)) ::
upper_count(uint64.MaxValue) ::
*[AnyDenotation]
)
}
|
ohnosequences/jellyfish-api
|
src/main/scala/api/commands/dump.scala
|
Scala
|
agpl-3.0
| 1,097
|
/*
* Copyright 2001-2017 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.prop
import org.scalatest.exceptions.TestFailedException
import scala.concurrent.Future
import org.scalatest.funspec.AsyncFunSpec
class AsyncGeneratorDrivenPropertyChecksSpec extends AsyncFunSpec with GeneratorDrivenPropertyChecks {
describe("GeneratorDrivenPropertyChecks") {
it("should do nothing when non-blocking 1 argument block assertion passed") {
forAll { (i: Int) =>
Future { assert(i == i) }
}
}
it("should fail with TestFailedException when non-blocking 1 argument block assertion failed") {
recoverToSucceededIf[TestFailedException] {
forAll { (i: Int) =>
Future {
assert(i != i)
}
}
}
}
it("should do nothing when non-blocking 2 arguments block assertion passed") {
forAll { (i1: Int, i2: Long) =>
Future { assert(i1 == i1 && i2 == i2) }
}
}
it("should fail with TestFailedException when non-blocking 2 arguments block assertion failed") {
recoverToSucceededIf[TestFailedException] {
forAll { (i1: Int, i2: Long) =>
Future {
assert(i1 == i1 && i2 != i2)
}
}
}
}
it("should do nothing when non-blocking 3 arguments block assertion passed") {
forAll { (i1: Int, i2: Long, i3: String) =>
Future { assert(i1 == i1 && i2 == i2 && i3 == i3) }
}
}
it("should fail with TestFailedException when non-blocking 3 arguments block assertion failed") {
recoverToSucceededIf[TestFailedException] {
forAll { (i1: Int, i2: Long, i3: String) =>
Future {
assert(i1 == i1 && i2 == i2 && i3 != i3)
}
}
}
}
it("should do nothing when non-blocking 4 arguments block assertion passed") {
forAll { (i1: Int, i2: Long, i3: String, i4: Short) =>
Future { assert(i1 == i1 && i2 == i2 && i3 == i3 && i4 == i4) }
}
}
it("should fail with TestFailedException when non-blocking 4 arguments block assertion failed") {
recoverToSucceededIf[TestFailedException] {
forAll { (i1: Int, i2: Long, i3: String, i4: Short) =>
Future {
assert(i1 == i1 && i2 == i2 && i3 == i3 && i4 != i4)
}
}
}
}
it("should do nothing when non-blocking 5 arguments block assertion passed") {
forAll { (i1: Int, i2: Long, i3: String, i4: Short, i5: Float) =>
Future { assert(i1 == i1 && i2 == i2 && i3 == i3 && i4 == i4 && i5 == i5) }
}
}
it("should fail with TestFailedException when non-blocking 5 arguments block assertion failed") {
recoverToSucceededIf[TestFailedException] {
forAll { (i1: Int, i2: Long, i3: String, i4: Short, i5: Float) =>
Future {
assert(i1 == i1 && i2 == i2 && i3 == i3 && i4 == i4 && i5 != i5)
}
}
}
}
it("should do nothing when non-blocking 6 arguments block assertion passed") {
forAll { (i1: Int, i2: Long, i3: String, i4: Short, i5: Float, i6: Double) =>
Future { assert(i1 == i1 && i2 == i2 && i3 == i3 && i4 == i4 && i5 == i5 && i6 == i6) }
}
}
it("should fail with TestFailedException when non-blocking 6 arguments block assertion failed") {
recoverToSucceededIf[TestFailedException] {
forAll { (i1: Int, i2: Long, i3: String, i4: Short, i5: Float, i6: Double) =>
Future {
assert(i1 == i1 && i2 == i2 && i3 == i3 && i4 == i4 && i5 == i5 && i6 != i6)
}
}
}
}
}
}
|
scalatest/scalatest
|
jvm/scalatest-test/src/test/scala/org/scalatest/prop/AsyncGeneratorDrivenPropertyChecksSpec.scala
|
Scala
|
apache-2.0
| 4,169
|
/*
* Copyright (c) 2012-2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.common
package loaders
// Java
import java.net.URI
// Apache URLEncodedUtils
import org.apache.http.client.utils.URLEncodedUtils
// Scala
import scala.collection.JavaConversions._
// Scalaz
import scalaz._
import Scalaz._
/**
* Companion object to the CollectorLoader.
* Contains factory methods.
*/
object Loader {
/**
* Factory to return a CollectorLoader
* based on the supplied collector
* identifier (e.g. "cloudfront" or
* "clj-tomcat").
*
* @param collector Identifier for the
* event collector
* @return a CollectorLoader object, or
* an an error message, boxed
* in a Scalaz Validation
*/
def getLoader(collectorOrProtocol: String): Validation[String, Loader[_]] = collectorOrProtocol match {
case "cloudfront" => CloudfrontLoader.success
case "clj-tomcat" => CljTomcatLoader.success
case "thrift-raw" => ThriftLoader.success // Finally - a data protocol rather than a piece of software
case c => "[%s] is not a recognised Snowplow event collector".format(c).fail
}
}
/**
* All loaders must implement this
* abstract base class.
*/
abstract class Loader[T] {
import CollectorPayload._
/**
* Converts the source string into a
* CanonicalInput.
*
* TODO: need to change this to
* handling some sort of validation
* object.
*
* @param line A line of data to convert
* @return a CanonicalInput object, Option-
* boxed, or None if no input was
* extractable.
*/
def toCollectorPayload(line: T): ValidatedMaybeCollectorPayload
/**
* Checks whether a request to
* a collector is a tracker
* hitting the ice pixel.
*
* @param path The request path
* @return true if this is a request
* for the ice pixel
*/
protected[loaders] def isIceRequest(path: String): Boolean =
path.startsWith("/ice.png") || // Legacy name for /i
path.equals("/i") ||
path.startsWith("/i?")
/**
* Converts a querystring String
* into a non-empty list of NameValuePairs.
*
* Returns a non-empty list of
* NameValuePairs on Success, or a Failure
* String.
*
* @param qs Option-boxed querystring
* String to extract name-value
* pairs from, or None
* @param encoding The encoding used
* by this querystring
* @return either a NonEmptyList of
* NameValuePairs or an error
* message, boxed in a Scalaz
* Validation
*/
protected[loaders] def parseQuerystring(qs: Option[String], enc: String): ValidatedNameValuePairs = qs match {
case Some(q) => {
try {
URLEncodedUtils.parse(URI.create("http://localhost/?" + q), enc).toList.success
} catch {
case e => "Exception extracting name-value pairs from querystring [%s] with encoding [%s]: [%s]".format(q, enc, e.getMessage).fail
}
}
case None => Nil.success
}
}
|
guardian/snowplow
|
3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/loaders/Loader.scala
|
Scala
|
apache-2.0
| 3,691
|
package com.nulabinc.backlog.r2b.exporter.convert
import javax.inject.Inject
import com.nulabinc.backlog.migration.common.convert.{Convert, Writes}
import com.nulabinc.backlog.migration.common.domain.{BacklogTextFormattingRule, BacklogWiki}
import com.nulabinc.backlog.migration.common.utils.{DateUtil, Logging}
import com.nulabinc.backlog.r2b.utils.TextileUtil
import com.osinka.i18n.Messages
import com.taskadapter.redmineapi.bean.News
/**
* @author
* uchida
*/
private[exporter] class NewsWrites @Inject() (implicit
val userWrites: UserWrites,
backlogTextFormattingRule: BacklogTextFormattingRule
) extends Writes[News, BacklogWiki]
with Logging {
override def writes(news: News): BacklogWiki = {
BacklogWiki(
optId = None,
name = s"${Messages("common.news")}/${Option(news.getTitle).getOrElse("")}",
optContent = Some(content(news)),
attachments = Seq(),
sharedFiles = Seq(),
tags = Seq(),
optCreatedUser = Option(news.getUser).map(Convert.toBacklog(_)),
optCreated = Option(news.getCreatedOn).map(DateUtil.isoFormat),
optUpdatedUser = Option(news.getUser).map(Convert.toBacklog(_)),
optUpdated = Option(news.getCreatedOn).map(DateUtil.isoFormat)
)
}
private[this] def content(news: News): String = {
val sb = new StringBuilder
sb.append(news.getDescription)
for { link <- Option(news.getLink) } yield {
sb.append("\\n\\n\\n")
sb.append(Messages("common.link")).append(":").append(link)
}
TextileUtil.convert(sb.toString(), backlogTextFormattingRule)
}
}
|
nulab/BacklogMigration-Redmine
|
src/main/scala/com/nulabinc/backlog/r2b/exporter/convert/NewsWrites.scala
|
Scala
|
mit
| 1,586
|
package scalax.collection
import collection.mutable.ListBuffer
import GraphPredef.{EdgeLikeIn, GraphParamOut, NodeOut, EdgeOut}
/**
* Defines traversal-related algorithmic interfaces.
*
* Graph traversal means to navigate from node to node based on incident edges.
* Another kind of navigation is to iterate over nodes/edges based on the node-/edge-set.
*
* @define INTOACC taking optional filters and visitors into account.
* @define VISITORS Node/edge visitor functions allow arbitrary user-defined computation
* during the traversal.
* @define DIRECTION Determines which connected nodes the traversal has to follow.
* Defaults to `Successors`.
* @define NODEFILTER Predicate to filter the nodes to be visited during traversal.
* Defaults to `anyNode`, that is no filtering.
* A return of `true` signals that the traversal is to be canceled.
* @define EDGEFILTER Predicate to filter the edges to be visited during traversal.
* Defaults to `anyEdge` that is no filtering.
* @define NODEVISITOR Function called on visiting a node.
* It can mutate the node or carry out any other side effect.
* Defaults to the empty function noNodeAction.
* @define EDGEVISITOR Function called on visiting an edge.
* It can mutate the node or carry out any other side effect.
* Defaults to the empty function noEdgeAction.
* @define BREADTHFIRST If `true` the traversal is based on a breath first
* (BFS, layer-for-layer) search, otherwise on a depth first search (DFS).
* Default is BFS.
* @define MAXDEPTH `0` - the default - indicates that the traversal should have
* an unlimited depth meaning that it will be continued either until
* it is cancelled by `nodeVisitor` or, in absence of a cancelation,
* until all nodes have been visited. A positiv value limits the number
* of layers for BFS respectively the number of consecutive child visits
* before siblings are are visited for DFS.
* @define RESULT Tuple of the node found and the set of all visited nodes.
* @define ROOT the node to start the traversal from.
* @define PRED the predicate which must hold true to stop traversing
* and return the node found. Defaults to `noNode` causing a full traversal.
*/
trait GraphTraversal[N, E[X] <: EdgeLikeIn[X]] extends GraphBase[N,E]
{
import GraphTraversal.VisitorReturn._
import GraphTraversal._
/** Whether `this` graph is connected if it is undirected or
* weakly connected if it is directed.
*/
def isConnected = nodes.headOption map { head =>
var cnt = 0
head.traverseNodes(direction = AnyConnected, breadthFirst = false) { n =>
cnt += 1
Continue
}
cnt == nodes.size
} getOrElse true
/**
* List of sets of nodes with each set containing connected nodes $INTOACC.
* If `this` graph is connected, the list has one element containing all nodes
* otherwise more than one elements.
*
* @param nodeFilter $NODEFILTER
* @param edgeFilter $EDGEFILTER
* @param nodeVisitor $NODEVISITOR
* @param edgeVisitor $EDGEVISITOR
*//*
def components (nodeFilter : (NodeT) => Boolean = anyNode,
edgeFilter : (EdgeT) => Boolean = anyEdge,
nodeVisitor: (NodeT) => VisitorReturn = noNodeAction,
edgeVisitor: (EdgeT) => Unit = noEdgeAction): List[Set[NodeT]]*/
/** Same as `components(...)` with default arguments. *//*
@inline final def components: List[Set[NodeT]] = components()*/
/**
* Whether `this` graph has at least one cycle.
*/
@inline final def isCyclic: Boolean = findCycle isDefined
/**
* Whether `this` graph has no cycle.
*/
@inline final def isAcyclic: Boolean = ! isCyclic
/**
* Finds a cycle in `this` graph $INTOACC, if any.
*
* @param nodeFilter $NODEFILTER
* @param edgeFilter $EDGEFILTER
* @param maxDepth $MAXDEPTH
* @param nodeVisitor $NODEVISITOR
* @param edgeVisitor $EDGEVISITOR
* @return A cycle or None if either
* a) there exists no cycle in `this` graph or
* b) there exists a cycle in `this` graph but due to the given
* filtering conditions or a `Cancel` return by a visitor this cycle
* had to be disregarded.
*/
def findCycle(nodeFilter : (NodeT) => Boolean = anyNode,
edgeFilter : (EdgeT) => Boolean = anyEdge,
maxDepth : Int = 0,
nodeVisitor: (NodeT) => VisitorReturn = noNodeAction,
edgeVisitor: (EdgeT) => Unit = noEdgeAction): Option[Cycle]
/** Same as `findCycle(...)` with default arguments. */
@inline final def findCycle: Option[Cycle] = findCycle()
/**
* Represents a path in this graph listing the nodes and connecting edges on it
* with the following syntax:
*
* `path ::= ''node'' { ''edge'' ''node'' }`
*
* All nodes and edges on the path are distinct. A path contains at least
* one node followed by any number of consecutive pairs of an edge and a node.
* The first element is the start node, the second is an edge with its tail
* being the start node and its head being the third element etc.
*/
trait Path extends Iterable[GraphParamOut[N,E]]
{
override def stringPrefix = "Path"
/**
* Iterator over the nodes of this path. The result is chached
* on the first call, so consecutive calls of this method are cheep.
*
* @return Iterator over all nodes of this path in proper order.
*/
def nodeIterator = (iterator filter (_.isNode)).asInstanceOf[Iterator[NodeT]]
/**
* Iterator over the edges of this path. The result is chached
* on the first call, so consecutive calls of this method are cheep.
*
* @return Iterator over all edges of this path in proper order.
*/
def edgeIterator = (iterator filter (_.isEdge)).asInstanceOf[Iterator[EdgeT]]
/** List containing all nodes of this path in proper order. */
def nodes = nodeIterator.toList
/** List containing all edges of this path in proper order. */
def edges = edgeIterator.toList
/** The cumulated weight of all edges on this path. */
def weight = { var sum = 0L; edgeIterator foreach {sum += _.weight}; sum }
/** The number of edges on this path. */
def length = edgeIterator.size
def startNode: NodeT
def endNode: NodeT
/**
* Returns whether the contents of this path are valid with respect
* to path semantics. This check is appropriate whenever there may be
* any doubt about the correctness of the result of an algorithm.
*/
def isValid: Boolean
}
/**
* Whether all nodes are pairwise adjacent.
*
* @return `true` if this graph is complete, `false` if this graph contains any
* independent nodes.
*/
/**
* Represents a cycle in this graph listing the nodes and connecting edges on it
* with the following syntax:
*
* `cycle ::= ''start-end-node'' { ''edge'' ''node'' } ''edge'' ''start-end-node''`
*
* All nodes and edges on the path are distinct except the start and end nodes that
* are equal. A cycle contains at least a start node followed by any number of
* consecutive pairs of an edge and a node and the end node equaling to the start node.
* The first element is the start node, the second is an edge with its tail
* being the start node and its head being the third element etc.
*/
trait Cycle extends Path {
override def stringPrefix = "Cycle"
/**
* Semantically compares `this` cycle with `that` cycle. While `==` returns `true`
* only if the cycles contain the same elements in the same order, this comparison
* returns also `true` if the elements of `that` cycle can be shifted and optionally
* reversed such that their elements have the same order.
*
* For instance, given
*
* `c1 = Cycle(1-2-3-1)`, `c2 = Cycle(2-3-1-2)` and `c3 = Cycle(2-1-3-2)`
*
* the following expressions hold:
*
* `c1 != c2`, `c1 != c3` but `c1 sameAs c2` and `c1 sameAs c3`.
*/
def sameAs(that: GraphTraversal[N,E]#Cycle): Boolean
}
def isComplete = {
val orderLessOne = order - 1
nodes forall (_.diSuccessors.size == orderLessOne)
}
/** Default node filter letting through all nodes. */
@inline final def anyNode = (n: NodeT) => true
/** Node predicate always returning `false`. */
@inline final def noNode = (n: NodeT) => false
/** Default edge filter letting through all edges. */
@inline final def anyEdge = (e: EdgeT) => true
//@inline final def noNodeSort = (n1: NodeT, n2: NodeT) => true
//@inline final def noEdgeSort = (e1: EdgeT, e2: EdgeT) => true
/** Default node visitor doing nothing. */
@inline final def noNodeAction = (n: NodeT) => Continue
/** Default edge visitor doing nothing. */
@inline final def noEdgeAction = (e: EdgeT) => {}
/** Returns true if `filter` is not equivalent to `anyNode`. */
@inline final def isCustomNodeFilter(filter: (NodeT) => Boolean) = filter ne anyNode
/** Returns true if `filter` is not equivalent to `anyEdge`. */
@inline final def isCustomEdgeFilter(filter: (EdgeT) => Boolean) = filter ne anyEdge
// @inline final def isCustomNodeSort = nodeSort ne NoNodeSort
// @inline final def isCustomEdgeSort = nodeSort ne NoEdgeSort
/** Returns true if `visitor` is not equivalent to `noNodeAction`. */
@inline final def isCustomNodeVisitor(visitor: (NodeT) => VisitorReturn) = visitor ne noNodeAction
/** Returns true if `visitor` is not equivalent to `noEdgeAction`. */
@inline final def isCustomEdgeVisitor(visitor: (EdgeT) => Unit ) = visitor ne noEdgeAction
// /**
// * Contains methods that return more context information on the current
// * traversal and may be called by a traversal visitor.
// */
// trait NavContext {
// def depth: Int
// def path: Path
// def nrOfVisitedNodes: Int
// def visitedNodes: Set[NodeT]
// }
type NodeT <: InnerNodeLike
trait InnerNodeLike extends super.InnerNodeLike
{
/**
* Finds a successor of this node for which the predicate `pred` holds $INTOACC.
* $VISITORS
*
* This node itself does not count as a match. This is also true if it has a hook.
* If several successors exist the algorithm selects the first of them it founds.
*
* @param pred The predicate which must hold true for the resulting node.
* @param nodeFilter $NODEFILTER
* @param edgeFilter $EDGEFILTER
* @param nodeVisitor $NODEVISITOR
* @param edgeVisitor $EDGEVISITOR
* @return A node with the predicate `pred` or None if either
* a) there is no node with `pred` or
* b) there exists no path to such a node at all
* c) there exists a path to such a node but due to
* user filtering or canceling the traversal this path had to be disregarded.
*/
def findSuccessor(pred: (NodeT) => Boolean,
nodeFilter : (NodeT) => Boolean = anyNode,
edgeFilter : (EdgeT) => Boolean = anyEdge,
nodeVisitor: (NodeT) => VisitorReturn = noNodeAction,
edgeVisitor: (EdgeT) => Unit = noEdgeAction): Option[NodeT]
/**
* Checks whether `potentialSuccessor` is a successor of this node $INTOACC.
* $VISITORS
* Same as `isPredecessorOf`.
*
* @param potentialSuccessor The node which is potentially a successor of this node.
* @param nodeFilter $NODEFILTER
* @param edgeFilter $EDGEFILTER
* @param nodeVisitor $NODEVISITOR
* @param edgeVisitor $EDGEVISITOR
* @return `true` if a path exists from this node to `potentialSuccessor` and
* it had not to be excluded due to user filtering or canceling the traversal.
*/
@inline final
def hasSuccessor(potentialSuccessor: NodeT,
nodeFilter : (NodeT) => Boolean = anyNode,
edgeFilter : (EdgeT) => Boolean = anyEdge,
nodeVisitor: (NodeT) => VisitorReturn = noNodeAction,
edgeVisitor: (EdgeT) => Unit = noEdgeAction): Boolean =
findSuccessor(_ eq potentialSuccessor,
nodeFilter, edgeFilter, nodeVisitor, edgeVisitor).isDefined
/** Same as `hasSuccessor`. */
@inline final
def isPredecessorOf(potentialSuccessor: NodeT,
nodeFilter : (NodeT) => Boolean = anyNode,
edgeFilter : (EdgeT) => Boolean = anyEdge,
nodeVisitor: (NodeT) => VisitorReturn = noNodeAction,
edgeVisitor: (EdgeT) => Unit = noEdgeAction): Boolean =
hasSuccessor(potentialSuccessor,
nodeFilter, edgeFilter, nodeVisitor, edgeVisitor)
/**
* Finds a predecessor of this node for which the predicate `pred` holds $INTOACC.
* $VISITORS
*
* This node itself does not count as a match. This is also true if it has a hook.
* If several predecessors exist the algorithm selects the first of them found.
*
* @param pred The predicate which must hold true for the resulting node.
* @param nodeFilter $NODEFILTER
* @param edgeFilter $EDGEFILTER
* @param nodeVisitor $NODEVISITOR
* @param edgeVisitor $EDGEVISITOR
* @return A node with the predicate `pred` or None if either
* a) there is no node with `pred` or
* b) there exists no path from such a node to this node at all or
* c) there exists a path from such a node to this node but due to
* user filtering or canceling the traversal this path had to be disregarded.
*/
def findPredecessor(pred: (NodeT) => Boolean,
nodeFilter : (NodeT) => Boolean = anyNode,
edgeFilter : (EdgeT) => Boolean = anyEdge,
nodeVisitor: (NodeT) => VisitorReturn = noNodeAction,
edgeVisitor: (EdgeT) => Unit = noEdgeAction): Option[NodeT]
/**
* Checks whether `potentialPredecessor` is a predecessor of this node $INTOACC.
* $VISITORS
* Same as `isSuccessorOf`.
*
* @param potentialPredecessor The node which is potentially a predecessor of this node.
* @param nodeFilter $NODEFILTER
* @param edgeFilter $EDGEFILTER
* @param nodeVisitor $NODEVISITOR
* @param edgeVisitor $EDGEVISITOR
* @return `true` if a path exists from `potentialPredecessor` to this node and
* it had not to be excluded due to user filtering or canceling the traversal.
*/
@inline final
def hasPredecessor(potentialPredecessor: NodeT,
nodeFilter : (NodeT) => Boolean = anyNode,
edgeFilter : (EdgeT) => Boolean = anyEdge,
nodeVisitor: (NodeT) => VisitorReturn = noNodeAction,
edgeVisitor: (EdgeT) => Unit = noEdgeAction): Boolean =
findPredecessor(_ eq potentialPredecessor,
nodeFilter, edgeFilter, nodeVisitor, edgeVisitor).isDefined
/** Same as `hasPredecessor`. */
@inline final
def isSuccessorOf(potentialPredecessor: NodeT,
nodeFilter : (NodeT) => Boolean = anyNode,
edgeFilter : (EdgeT) => Boolean = anyEdge,
nodeVisitor: (NodeT) => VisitorReturn = noNodeAction,
edgeVisitor: (EdgeT) => Unit = noEdgeAction): Boolean =
hasPredecessor(potentialPredecessor,
nodeFilter, edgeFilter, nodeVisitor, edgeVisitor)
/**
* Finds a node (not necessarily directly) connected with this node
* for which the predicate `pred` holds $INTOACC.
* For directed or mixed graphs the node to be found is weekly connected with this node.
* $VISITORS
*
* This node itself does not count as a match. This is also true if it has a hook.
* If several connected nodes exist with `pred` the algorithm selects the first
* of them it founds.
*
* @param pred The predicate which must hold true for the resulting node.
* @param nodeFilter $NODEFILTER
* @param edgeFilter $EDGEFILTER
* @param nodeVisitor $NODEVISITOR
* @param edgeVisitor $EDGEVISITOR
* @return A node with the predicate `pred` or None if either
* a) there is no node with `pred` or
* b) there exists no connection to such a node at all
* c) there exists a connection to such a node but due to
* user filtering or canceling the traversal this connection had to be disregarded.
*/
def findConnected(pred: (NodeT) => Boolean,
nodeFilter : (NodeT) => Boolean = anyNode,
edgeFilter : (EdgeT) => Boolean = anyEdge,
nodeVisitor: (NodeT) => VisitorReturn = noNodeAction,
edgeVisitor: (EdgeT) => Unit = noEdgeAction): Option[NodeT]
/**
* Checks whether `potentialConnected` is a node (not necessarily directly)
* connected with this node $INTOACC.
* For directed or mixed graphs it is satisfactory that `potentialConnected` is
* weekly connected with this node.
* $VISITORS
*
* @param potentialConnected The node which is potentially connected with this node.
* @param nodeFilter $NODEFILTER
* @param edgeFilter $EDGEFILTER
* @param nodeVisitor $NODEVISITOR
* @param edgeVisitor $EDGEVISITOR
* @return `true` if a path exists from this node to `potentialConnected` and
* it had not to be excluded due to user filtering or canceling the traversal.
*/
@inline final
def isConnectedWith(potentialConnected: NodeT,
nodeFilter : (NodeT) => Boolean = anyNode,
edgeFilter : (EdgeT) => Boolean = anyEdge,
nodeVisitor: (NodeT) => VisitorReturn = noNodeAction,
edgeVisitor: (EdgeT) => Unit = noEdgeAction): Boolean =
findConnected(_ eq potentialConnected,
nodeFilter, edgeFilter, nodeVisitor, edgeVisitor).isDefined
/**
* Finds a path from this node to a successor of this node for which the predicate
* `pred` holds $INTOACC.
*
* This node itself does not count as a match. This is also true if it has a hook.
* If several successors exist the algorithm selects any first matching node.
*
* @param pred The predicate which must hold true for the successor.
* @param nodeFilter $NODEFILTER
* @param edgeFilter $EDGEFILTER
* @param nodeVisitor $NODEVISITOR
* @param edgeVisitor $EDGEVISITOR
* @return A path to a node with the predicate `pred` or None if either
* a) there is no node with `pred` or
* b) there exists no path to such a node at all
* c) there exists a path to such a node but due to the given filter
* conditions this path had to be disregarded.
*/
def pathUntil(pred: (NodeT) => Boolean,
nodeFilter : (NodeT) => Boolean = anyNode,
edgeFilter : (EdgeT) => Boolean = anyEdge,
nodeVisitor: (NodeT) => VisitorReturn = noNodeAction,
edgeVisitor: (EdgeT) => Unit = noEdgeAction): Option[Path]
/**
* Finds a path from this node to `potentialSuccessor`.
*
* @param potentialSuccessor The node a path is to be found to.
* @return A path to `potentialSuccessor` or None if either
* a) there is no node with `pred` or
* b) there exists no path to such a node at all
*/
@inline final
def pathTo(potentialSuccessor: NodeT): Option[Path] =
pathUntil(_ eq potentialSuccessor,
anyNode, anyEdge, noNodeAction, noEdgeAction)
/**
* Finds a path from this node to `potentialSuccessor` $INTOACC.
*
* @param potentialSuccessor The node a path is to be found to.
* @param nodeFilter $NODEFILTER
* @param edgeFilter $EDGEFILTER
* @param nodeVisitor $NODEVISITOR
* @param edgeVisitor $EDGEVISITOR
* @return A path to `potentialSuccessor` or None if either
* a) there is no node with `pred` or
* b) there exists no path to such a node at all
* c) there exists a path to such a node but due to the given filter
* conditions this path had to be disregarded.
*/
@inline final
def pathTo(potentialSuccessor: NodeT,
nodeFilter : (NodeT) => Boolean = anyNode,
edgeFilter : (EdgeT) => Boolean = anyEdge,
nodeVisitor: (NodeT) => VisitorReturn = noNodeAction,
edgeVisitor: (EdgeT) => Unit = noEdgeAction): Option[Path] =
pathUntil(_ eq potentialSuccessor,
nodeFilter, edgeFilter, nodeVisitor, edgeVisitor)
/**
* Finds the shortest path from this node to `potentialSuccessor`.
*
* The calculation is based on the weight of the edges on the path. As a default,
* edges have a weight of 1 what can be overridden by custom edges.
*
* @param potentialSuccessor The node the shortest path is to be found to.
* @param nodeFilter $NODEFILTER
* @param edgeFilter $EDGEFILTER
* @param nodeVisitor $NODEVISITOR
* @param edgeVisitor $EDGEVISITOR
* @return The shortest path to `potentialSuccessor` or None if either
* a) there exists no path to `potentialSuccessor` or
* b) there exists a path to `potentialSuccessor` but due to the given
* filtering conditions this path had to be disregarded.
*/
def shortestPathTo(potentialSuccessor: NodeT,
nodeFilter : (NodeT) => Boolean = anyNode,
edgeFilter : (EdgeT) => Boolean = anyEdge,
nodeVisitor: (NodeT) => VisitorReturn = noNodeAction,
edgeVisitor: (EdgeT) => Unit = noEdgeAction): Option[Path]
/**
* Finds a cycle starting the search at this node $INTOACC, if any.
* The resulting cycle may start at any node connected with `this` node.
*
* @param nodeFilter $NODEFILTER
* @param edgeFilter $EDGEFILTER
* @param maxDepth $MAXDEPTH
* @param nodeVisitor $NODEVISITOR
* @param edgeVisitor $EDGEVISITOR
* @return A cycle or None if either
* a) there exists no cycle in the component `this` node belongs to or
* b) there exists a cycle in the component but due to the given
* filtering conditions or a `Cancel` return by a visitor this cycle
* had to be disregarded.
*/
def findCycle(nodeFilter : (NodeT) => Boolean = anyNode,
edgeFilter : (EdgeT) => Boolean = anyEdge,
maxDepth : Int = 0,
nodeVisitor: (NodeT) => VisitorReturn = noNodeAction,
edgeVisitor: (EdgeT) => Unit = noEdgeAction): Option[Cycle]
/** Same as `findCycle(...)` with default arguments. */
@inline final def findCycle: Option[Cycle] = findCycle()
/**
* Traverses this graph from this (root) node for side-effects allowing
*
* a) to filter nodes and/or edges,
* b) to carry out any side effect at visited nodes and/or edges and
* c) to cancel the traversal at any node.
*
* @param direction $DIRECTION
* @param nodeFilter $NODEFILTER
* @param edgeFilter $EDGEFILTER
* @param breadthFirst $BREADTHFIRST
* @param maxDepth $MAXDEPTH
* @param nodeVisitor $NODEVISITOR
* @param edgeVisitor $EDGEVISITOR
*/
def traverse (direction : Direction = Successors,
nodeFilter : (NodeT) => Boolean = anyNode,
edgeFilter : (EdgeT) => Boolean = anyEdge,
breadthFirst:Boolean = true,
maxDepth : Int = 0)
(nodeVisitor: (NodeT) => VisitorReturn = noNodeAction,
edgeVisitor: (EdgeT) => Unit = noEdgeAction)
/**
* Shortcut for calling 'traverse' with a non-default `nodeVisitor`
* but the default `edgeVisitor` allowing a `foreach`-like call syntax:
* {{{
* rootNode traverseNodes() {
* print("d" + _.degree)
* Continue
* }
* }}}
*/
@inline final
def traverseNodes(direction : Direction = Successors,
nodeFilter : (NodeT) => Boolean = anyNode,
edgeFilter : (EdgeT) => Boolean = anyEdge,
breadthFirst:Boolean = true,
maxDepth : Int = 0)
(nodeVisitor: (NodeT) => VisitorReturn) =
traverse(direction, nodeFilter, edgeFilter, breadthFirst, maxDepth)(nodeVisitor = nodeVisitor)
/**
* Shortcut for calling 'traverse' with a non-default `edgeVisitor`
* but the default `nodeVisitor` allowing a `foreach`-like call syntax:
* {{{
* rootNode traverseEdges() {
* print( if(_.directed) "d" else "u" )
* Continue
* }
* }}}
*/
@inline final
def traverseEdges(direction : Direction = Successors,
nodeFilter : (NodeT) => Boolean = anyNode,
edgeFilter : (EdgeT) => Boolean = anyEdge,
breadthFirst:Boolean = true,
maxDepth : Int = 0)
(edgeVisitor: (EdgeT) => Unit ) =
traverse(direction, nodeFilter, edgeFilter, breadthFirst, maxDepth)(edgeVisitor = edgeVisitor)
}
/** Abstract class for functional traversals.
*
* In addition to the `traverse` methods defined for nodes, this concept supports
* repeated traversals with constant direction, filters and visitors.
* Call `newTraversal` to create an instance and call any subsequent traversals
* on that instance.
*
* @author Peter Empen
*/
abstract class Traversal(direction : Direction,
nodeFilter : (NodeT) => Boolean,
edgeFilter : (EdgeT) => Boolean,
nodeVisitor: (NodeT) => VisitorReturn,
edgeVisitor: (EdgeT) => Unit)
{
final val noAction = (n: NodeT) => {}
final val notVisited = (n: NodeT) => false
/** Computes the filtered direct successors of `node`.
* It also calls `edgeVisitor` but does '''not''' call `nodeVisitor`.
*
* @param node the node the direct successors are to be calculated of
* @param isVisited function returning whether a node has already been
* visited and must therefore be excluded
*/
protected[collection]
def filteredDiSuccessors(node : NodeT,
isVisited: (NodeT) => Boolean): Iterable[NodeT]
/** Computes the filtered direct predecessors of `node`.
* It also calls `edgeVisitor` but does '''not''' call `nodeVisitor`.
*
* @param node the node the direct predecessors are to be calculated of
* @param isVisited function returning whether a node has already been
* visited and must therefore be excluded
*/
protected[collection]
def filteredDiPredecessors(node : NodeT,
isVisited: (NodeT) => Boolean): Iterable[NodeT]
/** Computes the filtered neighbors of `node`.
* It also calls `edgeVisitor` but does '''not''' call `nodeVisitor`.
*
* @param node the node the adjacent are to be calculated of
* @param isVisited function returning whether a node has already been
* visited and must therefore be excluded
*/
protected[collection]
def filteredNeighbors(node : NodeT,
isVisited: (NodeT) => Boolean): Iterable[NodeT]
/**
* Traversal return value.
*
* @param found the node found if a search was required
* @param visited the set of all traversed nodes (As an exception, third-party
* implementations may return an empty set instead to minimize computational
* overhead in which case this must be indicated in the documentation.)
*/
protected[collection]
case class Result(val found: Option[NodeT],
val visited: Iterable[NodeT])
/**
* Traverses this graph from `root` for side-effects allowing
*
* a) to filter nodes and/or edges,
* b) to carry out any side effect at visited nodes and/or edges and
* c) to cancel the traversal at any node.
*
* @param root $ROOT
* @param pred $PRED
* @param breadthFirst $BREADTHFIRST
* @param maxDepth $MAXDEPTH
* @return $RESULT
*/
def apply(root : NodeT,
pred : (NodeT) => Boolean = noNode,
breadthFirst: Boolean = true,
maxDepth : Int = 0): Result
/**
* Starting at `root`, functionally traverses this graph up to `maxDepth` layers
* using the depth first search algorithm and all filters, visitors etc.
* passed to the encapsulating `Traversal` instance.
*
* @param root $ROOT
* @param pred $PRED
* @return $RESULT
*/
def depthFirstSearch (root : NodeT,
pred : (NodeT) => Boolean = noNode,
onPopFound: (NodeT) => Unit = noAction): Result
/** Synonym for `depthFirstSearch` */
@inline final def dfs(root : NodeT,
pred : (NodeT) => Boolean = noNode,
onPopFound: (NodeT) => Unit = noAction) =
depthFirstSearch(root, pred, onPopFound)
/**
* Starting at `root`, functionally traverses this graph up to `maxDepth` layers
* using the breadth first search algorithm and all filters, visitors etc.
* passed to the encapsulating `Traversal` instance.
*
* @param root $ROOT
* @param pred $PRED
* @return $RESULT
*/
def breadthFirstSearch(root : NodeT,
pred : (NodeT) => Boolean = noNode,
maxDepth: Int = 0): Result
/** Synonym for `breadthFirstSearch` */
@inline final def bfs(root : NodeT,
pred : (NodeT) => Boolean = noNode,
maxDepth: Int = 0) =
breadthFirstSearch(root, pred, maxDepth)
}
/**
* Creates a `Traversal` instance allowing subsequent traversals with
* constant filters and visitors.
*
* @param direction $DIRECTION
* @param nodeFilter $NODEFILTER
* @param edgeFilter $EDGEFILTER
* @param nodeVisitor $NODEVISITOR
* @param edgeVisitor $EDGEVISITOR
*/
def newTraversal(direction : Direction = Successors,
nodeFilter : (NodeT) => Boolean = anyNode,
edgeFilter : (EdgeT) => Boolean = anyEdge,
nodeVisitor: (NodeT) => VisitorReturn = noNodeAction,
edgeVisitor: (EdgeT) => Unit = noEdgeAction): Traversal
}
object GraphTraversal {
object VisitorReturn extends Enumeration {
type VisitorReturn = Value
val Continue, Cancel = Value
}
sealed trait Direction
object Successors extends Direction
object Predecessors extends Direction
object AnyConnected extends Direction
}
|
opyate/scala-graph
|
core/src/main/scala/scalax/collection/GraphTraversal.scala
|
Scala
|
bsd-3-clause
| 32,811
|
package com.toscaruntime.compiler.tosca
import com.toscaruntime.compiler.Tokens
import com.toscaruntime.tosca._
import scala.util.parsing.input.Positional
case class Csar(path: String, definitions: Map[String, Definition]) {
def csarName = definitions.values.head.name.get.value
def csarVersion = definitions.values.head.version.get.value
def csarId = csarName + ":" + csarVersion
}
case class ParsedValue[T](value: T) extends Positional
case class Definition(definitionVersion: Option[ParsedValue[String]],
name: Option[ParsedValue[String]],
version: Option[ParsedValue[String]],
imports: Option[List[ParsedValue[String]]],
author: Option[ParsedValue[String]],
description: Option[ParsedValue[String]],
nodeTypes: Option[Map[ParsedValue[String], NodeType]],
dataTypes: Option[Map[ParsedValue[String], DataType]],
capabilityTypes: Option[Map[ParsedValue[String], CapabilityType]],
relationshipTypes: Option[Map[ParsedValue[String], RelationshipType]],
artifactTypes: Option[Map[ParsedValue[String], ArtifactType]],
groupTypes: Option[Map[ParsedValue[String], GroupType]],
policyTypes: Option[Map[ParsedValue[String], PolicyType]],
topologyTemplate: Option[TopologyTemplate]) extends Positional
case class ArtifactType(name: ParsedValue[String],
derivedFrom: Option[ParsedValue[String]],
description: Option[ParsedValue[String]],
fileExtension: Option[List[ParsedValue[String]]]) extends Positional
trait Type {
val name: ParsedValue[String]
val description: Option[ParsedValue[String]]
val isAbstract: ParsedValue[Boolean]
val derivedFrom: Option[ParsedValue[String]]
val properties: Option[Map[ParsedValue[String], FieldValue]]
}
trait RuntimeType extends Type {
val attributes: Option[Map[ParsedValue[String], FieldValue]]
val artifacts: Option[Map[ParsedValue[String], DeploymentArtifact]]
val interfaces: Option[Map[ParsedValue[String], Interface]]
}
case class GroupType(name: ParsedValue[String],
derivedFrom: Option[ParsedValue[String]],
description: Option[ParsedValue[String]],
interfaces: Option[Map[ParsedValue[String], Interface]]) extends Positional
case class PolicyType(name: ParsedValue[String],
derivedFrom: Option[ParsedValue[String]],
description: Option[ParsedValue[String]]) extends Positional
case class DataType(name: ParsedValue[String],
isAbstract: ParsedValue[Boolean],
derivedFrom: Option[ParsedValue[String]],
description: Option[ParsedValue[String]],
properties: Option[Map[ParsedValue[String], FieldValue]]) extends Positional with Type
case class Requirement(name: ParsedValue[String],
properties: Option[Map[ParsedValue[String], EvaluableFieldValue]],
targetNode: Option[ParsedValue[String]],
targetCapability: Option[ParsedValue[String]],
relationshipType: Option[ParsedValue[String]]) extends Positional
case class Capability(name: ParsedValue[String],
properties: Map[ParsedValue[String], EvaluableFieldValue]) extends Positional
case class NodeTemplate(name: ParsedValue[String],
typeName: Option[ParsedValue[String]],
properties: Option[Map[ParsedValue[String], EvaluableFieldValue]],
requirements: Option[List[Requirement]],
capabilities: Option[Map[ParsedValue[String], Capability]]) extends Positional
case class Output(name: ParsedValue[String],
description: Option[ParsedValue[String]],
value: Option[EvaluableFieldValue]) extends Positional
case class TopologyTemplate(description: Option[ParsedValue[String]],
inputs: Option[Map[ParsedValue[String], PropertyDefinition]],
outputs: Option[Map[ParsedValue[String], Output]],
nodeTemplates: Option[Map[ParsedValue[String], NodeTemplate]]) extends Positional
case class DeploymentArtifact(ref: ParsedValue[String],
typeName: ParsedValue[String]) extends Positional
case class NodeType(name: ParsedValue[String],
isAbstract: ParsedValue[Boolean],
derivedFrom: Option[ParsedValue[String]],
description: Option[ParsedValue[String]],
tags: Option[Map[ParsedValue[String], ParsedValue[String]]],
properties: Option[Map[ParsedValue[String], FieldValue]],
attributes: Option[Map[ParsedValue[String], FieldValue]],
requirements: Option[Map[ParsedValue[String], RequirementDefinition]],
capabilities: Option[Map[ParsedValue[String], CapabilityDefinition]],
artifacts: Option[Map[ParsedValue[String], DeploymentArtifact]],
interfaces: Option[Map[ParsedValue[String], Interface]]) extends Positional with RuntimeType
case class RelationshipType(name: ParsedValue[String],
isAbstract: ParsedValue[Boolean],
derivedFrom: Option[ParsedValue[String]],
description: Option[ParsedValue[String]],
properties: Option[Map[ParsedValue[String], FieldValue]],
attributes: Option[Map[ParsedValue[String], FieldValue]],
validSources: Option[List[ParsedValue[String]]],
validTargets: Option[List[ParsedValue[String]]],
artifacts: Option[Map[ParsedValue[String], DeploymentArtifact]],
interfaces: Option[Map[ParsedValue[String], Interface]]) extends Positional with RuntimeType
case class CapabilityType(name: ParsedValue[String],
isAbstract: ParsedValue[Boolean],
derivedFrom: Option[ParsedValue[String]],
description: Option[ParsedValue[String]],
properties: Option[Map[ParsedValue[String], PropertyDefinition]]) extends Positional with Type
case class ScalarValue(value: String) extends PropertyValue[String]
object ScalarValue {
def apply(parsedValue: ParsedValue[String]): ScalarValue = {
val scalarValue = ScalarValue(parsedValue.value)
scalarValue.setPos(parsedValue.pos)
scalarValue
}
}
case class ListValue(value: List[FieldValue]) extends PropertyValue[List[FieldValue]]
case class ComplexValue(value: Map[ParsedValue[String], FieldValue]) extends PropertyValue[Map[ParsedValue[String], FieldValue]]
trait FieldValue extends Positional
trait EvaluableFieldValue extends FieldValue
trait PropertyValue[T] extends EvaluableFieldValue {
val value: T
}
trait FieldDefinition extends FieldValue {
val valueType: ParsedValue[String]
val required: ParsedValue[Boolean]
val default: Option[EvaluableFieldValue]
val constraints: Option[List[PropertyConstraint]]
val description: Option[ParsedValue[String]]
val entrySchema: Option[PropertyDefinition]
}
case class PropertyDefinition(valueType: ParsedValue[String],
required: ParsedValue[Boolean],
default: Option[EvaluableFieldValue],
constraints: Option[List[PropertyConstraint]],
description: Option[ParsedValue[String]],
entrySchema: Option[PropertyDefinition]) extends FieldDefinition
case class AttributeDefinition(valueType: ParsedValue[String],
required: ParsedValue[Boolean],
default: Option[EvaluableFieldValue],
constraints: Option[List[PropertyConstraint]],
description: Option[ParsedValue[String]],
entrySchema: Option[PropertyDefinition]) extends FieldDefinition
trait FilterDefinition extends Positional {
val properties: Map[ParsedValue[String], List[PropertyConstraint]]
}
case class PropertiesFilter(properties: Map[ParsedValue[String], List[PropertyConstraint]]) extends FilterDefinition
case class NodeFilter(properties: Map[ParsedValue[String], List[PropertyConstraint]],
capabilities: Map[ParsedValue[String], FilterDefinition]) extends FilterDefinition
case class RequirementDefinition(name: ParsedValue[String],
capabilityType: Option[ParsedValue[String]],
relationshipType: Option[ParsedValue[String]],
lowerBound: ParsedValue[Int],
upperBound: ParsedValue[Int],
description: Option[ParsedValue[String]],
nodeFilter: Option[NodeFilter]) extends Positional
case class CapabilityDefinition(capabilityType: Option[ParsedValue[String]],
upperBound: ParsedValue[Int],
description: Option[ParsedValue[String]]) extends Positional
case class Interface(description: Option[ParsedValue[String]],
operations: Map[ParsedValue[String], Operation]) extends Positional
case class Operation(description: Option[ParsedValue[String]],
inputs: Option[Map[ParsedValue[String], FieldValue]],
implementation: Option[ParsedValue[String]]) extends Positional
case class Function(function: ParsedValue[String], paths: Seq[ParsedValue[String]]) extends EvaluableFieldValue
case class CompositeFunction(function: ParsedValue[String], members: Seq[FieldValue]) extends EvaluableFieldValue
// reference can be a ParsedValue[String] or a List[ParsedValue[String]]
case class PropertyConstraint(operator: ParsedValue[String], reference: Any) extends Positional
object PropertyConstraint {
def isValueValid(value: String, valueType: String, constraint: PropertyConstraint): Boolean = {
val parsedValue = FieldDefinition.toToscaPrimitiveType(value, valueType)
if (!parsedValue.isValid) {
return false
}
constraint.operator.value match {
case Tokens.equal_token =>
if (!constraint.reference.isInstanceOf[ParsedValue[String]]) {
false
} else {
val reference = FieldDefinition.toToscaPrimitiveType(constraint.reference.asInstanceOf[ParsedValue[String]].value, valueType)
if (!reference.isValid) {
false
} else {
parsedValue.value.get == reference.value.get
}
}
case Tokens.valid_values_token =>
if (!constraint.reference.isInstanceOf[List[ParsedValue[String]]]) {
false
} else {
val listReference = constraint.reference.asInstanceOf[List[ParsedValue[String]]].map {
case ParsedValue(referenceValue) => FieldDefinition.toToscaPrimitiveType(referenceValue, valueType)
}
if (listReference.exists(!_.isValid)) {
false
} else {
listReference.exists(parsedValue.value.get == _.value.get)
}
}
case Tokens.greater_than_token | Tokens.greater_or_equal_token | Tokens.less_than_token | Tokens.less_or_equal_token =>
if (!FieldDefinition.isComparableType(valueType) || !constraint.reference.isInstanceOf[ParsedValue[String]]) {
false
} else {
val toBeCompared = parsedValue.asInstanceOf[ToscaComparableType[Any]]
val reference = FieldDefinition.toToscaPrimitiveType(constraint.reference.asInstanceOf[ParsedValue[String]].value, valueType).asInstanceOf[ToscaComparableType[Any]]
if (!reference.isValid) {
false
} else {
constraint.operator.value match {
case Tokens.greater_than_token => toBeCompared > reference
case Tokens.greater_or_equal_token => toBeCompared >= reference
case Tokens.less_than_token => toBeCompared < reference
case Tokens.less_or_equal_token => toBeCompared <= reference
}
}
}
case Tokens.in_range_token =>
if (!FieldDefinition.isComparableType(valueType) || !constraint.reference.isInstanceOf[List[ParsedValue[String]]]) {
false
} else {
val toBeCompared = parsedValue.asInstanceOf[ToscaComparableType[Any]]
val listReference = constraint.reference.asInstanceOf[List[ParsedValue[String]]].map {
case ParsedValue(referenceValue) => FieldDefinition.toToscaPrimitiveType(referenceValue, valueType).asInstanceOf[ToscaComparableType[Any]]
}
if (listReference.exists(!_.isValid)) {
false
} else {
val minReference = listReference.head
val maxReference = listReference.last
minReference < toBeCompared && toBeCompared < maxReference
}
}
case Tokens.length_token | Tokens.min_length_token | Tokens.max_length_token =>
if (valueType != FieldDefinition.STRING) {
false
} else {
val toBeCompared = parsedValue.asInstanceOf[ToscaString].value.get
val referenceOpt = ToscaInteger(constraint.reference.asInstanceOf[ParsedValue[String]].value)
if (!referenceOpt.isValid) {
false
} else {
val reference = referenceOpt.value.get
constraint.operator.value match {
case Tokens.length_token => toBeCompared.length == reference
case Tokens.min_length_token => toBeCompared.length >= reference
case Tokens.max_length_token => toBeCompared.length <= reference
}
}
}
case Tokens.pattern_token =>
if (valueType != FieldDefinition.STRING) {
false
} else {
val toBeCompared = parsedValue.asInstanceOf[ToscaString].value.get
val referenceOpt = ToscaString(constraint.reference.asInstanceOf[ParsedValue[String]].value)
if (!referenceOpt.isValid) {
false
} else {
val reference = referenceOpt.value.get
toBeCompared.matches(reference)
}
}
}
}
}
object FieldDefinition {
val STRING = "string"
val INTEGER = "integer"
val FLOAT = "float"
val BOOLEAN = "boolean"
val TIMESTAMP = "timestamp"
val VERSION = "version"
val SIZE = "scalar-unit.size"
val TIME = "scalar-unit.time"
val FREQUENCY = "scalar-unit.frequency"
val LIST = "list"
val MAP = "map"
/**
* Check if given type is native, native types are primitives + list and map
*
* @param valueType type to check
* @return true if it's tosca native type (not complex data type)
*/
def isToscaNativeType(valueType: String) = {
isToscaPrimitiveType(valueType) || valueType == LIST || valueType == MAP
}
/**
* Check if given type is primitive
*
* @param valueType type to check
* @return true if it's tosca native type (not complex data type)
*/
def isToscaPrimitiveType(valueType: String) = {
valueType match {
case STRING | INTEGER | FLOAT | BOOLEAN | TIMESTAMP | VERSION | SIZE | TIME | FREQUENCY => true
case _ => false
}
}
/**
* Type whom values can be compared
*
* @param valueType type to check
* @return true if it's of type whom values can be compared
*/
def isComparableType(valueType: String) = {
valueType match {
case INTEGER | FLOAT | BOOLEAN | TIMESTAMP | VERSION | SIZE | FREQUENCY | TIME => true
case _ => false
}
}
def isValidPrimitiveValue(valueAsText: String, valueType: String) = {
toToscaPrimitiveType(valueAsText, valueType).isValid
}
def toToscaPrimitiveType(valueAsText: String, valueType: String) = {
valueType match {
case STRING => ToscaString(valueAsText)
case INTEGER => ToscaInteger(valueAsText)
case BOOLEAN => ToscaBoolean(valueAsText)
case FLOAT => ToscaFloat(valueAsText)
case TIMESTAMP => ToscaTimestamp(valueAsText)
case TIME => ToscaTime(valueAsText)
case SIZE => ToscaSize(valueAsText)
case FREQUENCY => ToscaFrequency(valueAsText)
case VERSION => ToscaVersion(valueAsText)
}
}
}
|
vuminhkh/tosca-runtime
|
compiler/src/main/scala/com/toscaruntime/compiler/tosca/ToscaModels.scala
|
Scala
|
mit
| 16,793
|
// code-examples/BasicOOP/scoping/scope-inheritance-wont-compile.scala
// WON'T COMPILE
package scopeA {
class Class1 {
private[scopeA] val scopeA_privateField = 1
protected[scopeA] val scopeA_protectedField = 2
private[Class1] val class1_privateField = 3
protected[Class1] val class1_protectedField = 4
private[this] val this_privateField = 5
protected[this] val this_protectedField = 6
}
class Class2 extends Class1 {
val field1 = scopeA_privateField
val field2 = scopeA_protectedField
val field3 = class1_privateField // ERROR
val field4 = class1_protectedField
val field5 = this_privateField // ERROR
val field6 = this_protectedField
}
}
package scopeB {
class Class2B extends scopeA.Class1 {
val field1 = scopeA_privateField // ERROR
val field2 = scopeA_protectedField
val field3 = class1_privateField // ERROR
val field4 = class1_protectedField
val field5 = this_privateField // ERROR
val field6 = this_protectedField
}
}
|
XClouded/t4f-core
|
scala/src/tmp/BasicOOP/scoping/scope-inheritance-wont-compile.scala
|
Scala
|
apache-2.0
| 1,065
|
/*
* Copyright 2017-2022 John Snow Labs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.johnsnowlabs.client.aws
import com.amazonaws.AmazonClientException
import com.amazonaws.auth.profile.ProfileCredentialsProvider
import com.amazonaws.auth.{AWSCredentials, BasicAWSCredentials, DefaultAWSCredentialsProviderChain}
import com.johnsnowlabs.client.CredentialParams
import com.johnsnowlabs.nlp.util.io.ResourceHelper
class AWSCredentialsProvider extends Credentials {
override val next: Option[Credentials] = Some(new AWSAnonymousCredentials)
override def buildCredentials(credentialParams: CredentialParams): Option[AWSCredentials] = {
if (credentialParams.accessKeyId != "anonymous" && credentialParams.region != "") {
try {
//check if default profile name works if not try
logger.info("Connecting to AWS with AWS Credentials Provider...")
return Some(new ProfileCredentialsProvider("spark_nlp").getCredentials)
} catch {
case _: Exception =>
try {
return Some(new DefaultAWSCredentialsProviderChain().getCredentials)
} catch {
case _: AmazonClientException =>
if (ResourceHelper.spark.sparkContext.hadoopConfiguration.get("fs.s3a.access.key") != null) {
val key = ResourceHelper.spark.sparkContext.hadoopConfiguration.get("fs.s3a.access.key")
val secret = ResourceHelper.spark.sparkContext.hadoopConfiguration.get("fs.s3a.secret.key")
return Some(new BasicAWSCredentials(key, secret))
} else {
next.get.buildCredentials(credentialParams)
}
case e: Exception => throw e
}
}
}
next.get.buildCredentials(credentialParams)
}
}
|
JohnSnowLabs/spark-nlp
|
src/main/scala/com/johnsnowlabs/client/aws/AWSCredentialsProvider.scala
|
Scala
|
apache-2.0
| 2,296
|
package org.skrushingiv.repository
package mongo
import reactivemongo.bson._
import reactivemongo.bson.Producer._
object DSL {
def $and(objs: BSONDocument*) =
nameOptionValue2Producer("$and", objs.headOption.map(_ ⇒ BSONArray(objs.map(valueProducer(_)): _*)))
def $or(objs: BSONDocument*) =
nameOptionValue2Producer("$or", objs.headOption.map(_ ⇒ BSONArray(objs.map(valueProducer(_)): _*)))
def $gte[T](value: T)(implicit writer: BSONWriter[T, _ <: BSONValue]) = element2Producer("$gte", value)
def $gt[T](value: T)(implicit writer: BSONWriter[T, _ <: BSONValue]) = element2Producer("$gt", value)
def $lt[T](value: T)(implicit writer: BSONWriter[T, _ <: BSONValue]) = element2Producer("$lt", value)
def $lte[T](value: T)(implicit writer: BSONWriter[T, _ <: BSONValue]) = element2Producer("$lte", value)
def $in[T](values: Seq[T])(implicit writer: BSONWriter[T, _ <: BSONValue]) = element2Producer("$in", values)
def $in[T](values: Option[Seq[T]])(implicit writer: BSONWriter[T, _ <: BSONValue]) =
values.map(objs ⇒ element2Producer("$in", BSONArray(objs.map(valueProducer(_)): _*)))
implicit class StringHelper(val s: String) extends AnyVal {
def $gte[T](value: T)(implicit writer: BSONWriter[T, _ <: BSONValue]) = element2Producer(s, document(DSL.$gte(value)))
def $gt[T](value: T)(implicit writer: BSONWriter[T, _ <: BSONValue]) = element2Producer(s, document(DSL.$gt(value)))
def $lt[T](value: T)(implicit writer: BSONWriter[T, _ <: BSONValue]) = element2Producer(s, document(DSL.$lt(value)))
def $lte[T](value: T)(implicit writer: BSONWriter[T, _ <: BSONValue]) = element2Producer(s, document(DSL.$lte(value)))
def $in[T](values: Seq[T])(implicit writer: BSONWriter[T, _ <: BSONValue]) = element2Producer(s, document(DSL.$in(values)))
def $in[T](values: Option[Seq[T]])(implicit writer: BSONWriter[T, _ <: BSONValue]) = nameOptionValue2Producer(s, DSL.$in(values).map(document(_)))
}
}
|
srushingiv/org.skrushingiv
|
src/main/scala/org/skrushingiv/repository/mongo/DSL.scala
|
Scala
|
mit
| 1,960
|
package lila.perfStat
import akka.actor.ActorRef
import play.api.libs.iteratee._
import lila.db.api._
import lila.db.Implicits._
import lila.game.{ Game, Pov, Query }
import lila.hub.Sequencer
import lila.rating.PerfType
import lila.user.User
final class PerfStatIndexer(storage: PerfStatStorage, sequencer: ActorRef) {
private implicit val timeout = makeTimeout minutes 2
def userPerf(user: User, perfType: PerfType): Funit = {
val p = scala.concurrent.Promise[Unit]()
sequencer ! Sequencer.work(compute(user, perfType), p.some)
p.future
}
private def compute(user: User, perfType: PerfType): Funit = {
import lila.game.tube.gameTube
import lila.game.BSONHandlers.gameBSONHandler
pimpQB($query {
Query.user(user.id) ++
Query.finished ++
Query.turnsMoreThan(2) ++
Query.variant(PerfType variantOf perfType)
}).sort(Query.sortChronological)
.cursor[Game]()
.enumerate(Int.MaxValue, stopOnError = true) |>>>
Iteratee.fold[Game, PerfStat](PerfStat.init(user.id, perfType)) {
case (perfStat, game) if game.perfType.contains(perfType) =>
Pov.ofUserId(game, user.id).fold(perfStat)(perfStat.agg)
case (perfStat, _) => perfStat
}
} flatMap storage.insert
def addGame(game: Game): Funit = game.players.flatMap { player =>
player.userId.map { userId =>
addPov(Pov(game, player), userId)
}
}.sequenceFu.void
private def addPov(pov: Pov, userId: String): Funit = pov.game.perfType ?? { perfType =>
storage.find(userId, perfType) flatMap {
_ ?? { perfStat =>
storage.update(perfStat agg pov)
}
}
}
}
|
JimmyMow/lila
|
modules/perfStat/src/main/PerfStatIndexer.scala
|
Scala
|
mit
| 1,665
|
package scala.meta.tests
package parsers
import scala.meta.dialects.Scala211
class TokenSuite extends ParseSuite {
test("class C") {
templStat("@foo\\n//bar bar\\ndef baz = qux")
}
}
|
beni55/scalameta
|
scalameta/parsers/src/test/scala/scala/meta/tests/parsers/TokenSuite.scala
|
Scala
|
bsd-3-clause
| 191
|
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js Test Suite **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013-2015, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.testsuite.compiler
import org.junit.Test
import org.junit.Assert._
import java.{util => ju}
class DefaultMethodsTest {
@Test def canOverrideDefaultMethod(): Unit = {
var counter = 0
class SpecialIntComparator extends ju.Comparator[Int] {
def compare(o1: Int, o2: Int): Int =
o1.compareTo(o2)
override def reversed(): ju.Comparator[Int] = {
counter += 1
super.reversed()
}
}
val c = new SpecialIntComparator
assertTrue(c.compare(5, 7) < 0)
assertEquals(0, counter)
val reversed = c.reversed()
assertEquals(1, counter)
assertTrue(reversed.compare(5, 7) > 0)
}
}
|
mdedetrich/scala-js
|
test-suite/shared/src/test/require-jdk8/org/scalajs/testsuite/compiler/DefaultMethodsTest.scala
|
Scala
|
bsd-3-clause
| 1,195
|
package sss.db
import java.util.Date
import org.scalatest.DoNotDiscover
import sss.db.WhereOps.toWhere
import sss.db.ops.DbOps.DbRunOps
import scala.util.control.NonFatal
@DoNotDiscover
class DbV1Spec extends DbSpecSetup {
"A Db" should " allow insert into existing table " in {
val db = fixture.dbUnderTest
import db.syncRunContext
import db.syncRunContext.executor
val numInserted = fixture.table.insert(0, "strId", new Date().getTime, 42).runSync.get
assert(numInserted == 1, s"Should be 1 row created not ${numInserted}!")
}
it should " be able to read all rows from a table " in {
val db = fixture.dbUnderTest
import db.syncRunContext
import db.syncRunContext.executor
val time = new Date()
val rowsT = for {
r <- fixture.table.insert(0, "strId", time, 42)
rows <- fixture.table.map(r => r)
} yield rows
val rows = rowsT.runSync.get
assert(rows.size === 1, "Should only be one row!")
val row = rows(0)
assert(row[String]("strId") == "strId")
assert(row("createTime") === time.getTime)
assert(row("intVal") === 42)
}
it should " be able to read all rows from a table (in order!) " in {
val db = fixture.dbUnderTest
import db.syncRunContext
import db.syncRunContext.executor
val time = new Date()
val insertedT: FutureTx[QueryResults[Int]] = FutureTx.sequence(
(0 to 10).map (fixture.table.insert(0, "strId", time, _))
) flatMap { seqInts: Seq[Int] =>
fixture.table.map ( r => r[Int]("intVal")
, OrderDesc ("intVal"))
}
assert(insertedT.runSync.get === (0 to 10).reverse)
val got = fixture.table.map (r => {
r
}).runSync.get
assert(fixture.table.map (r => {
r[Int]("intVal")
}, OrderAsc("intVal")).runSync.get == (0 to 10))
}
it should " be able to find the row inserted " in {
val db = fixture.dbUnderTest
import db.syncRunContext
import db.syncRunContext.executor
val time = new Date()
fixture.table.insert(0, "strId", time, 45).runSync
val rows = fixture.table.filter(where(ps"createTime = ${time.getTime}")).runSync.get
assert(rows.size === 1, "Should only be one row found !")
val row = rows(0)
assert(row("strId") === "strId")
assert(row("createTime") === time.getTime)
assert(row("intVal") === 45)
}
it should " support shorthand filter" in {
val db = fixture.dbUnderTest
import db.syncRunContext
import db.syncRunContext.executor
val time = new Date()
fixture.table.insert(3456, "strId", time, 45).runSync
val rows = fixture.table.filter("createTime" -> time).runSync.get
assert(rows.size === 1, "Should only be one row found !")
val row = rows(0)
assert(row("strId") === "strId")
assert(row("createTime") === time.getTime)
assert(row("intVal") === 45)
}
it should " be able to find the row inserted by id " in {
val db = fixture.dbUnderTest
import db.syncRunContext
import db.syncRunContext.executor
val time = new Date()
(fixture.table.insert(99, "strId", time, 45) flatMap { i =>
fixture.table.get(99) map {
case None => fail("oh oh, failed to find row by id")
case Some(r) => assert(r("id") === 99)
}
}).runSync.get
}
it should " be able to find the row searching by field name " in {
val db = fixture.dbUnderTest
import db.syncRunContext
import db.syncRunContext.executor
val time = new Date()
val p = for {
_ <- fixture.table.insert(99, "strId", time, 45)
r <- fixture.table.find(where(s"id = ?", 99))
} yield r match {
case None => fail("oh oh, failed to find row by id")
case Some(r1) => assert(r1("id") === 99)
}
p.runSync.get
}
it should " support shorthand find " in {
val db = fixture.dbUnderTest
import db.syncRunContext
import db.syncRunContext.executor
val time = new Date()
val p = for {
_ <- fixture.table.insert(100, "strId", time, 45)
rOpt <- fixture.table.find("id" -> 100)
} yield rOpt match {
case None => fail("oh oh, failed to find row by id")
case Some(r) => assert(r("id") === 100)
}
p.runSync.get
}
it should " not be able to find a single row when 2 are present " in {
val db = fixture.dbUnderTest
import db.syncRunContext
import db.syncRunContext.executor
val time = new Date()
(for {
_ <- fixture.table.insert(99, "strId", time, 45)
_ <- fixture.table.insert(100, "strId", time, 45)
} yield ()).runSync.get
try {
fixture.table.find(where(s"strId = ?", "strId")).runSync.get
fail("there are 2 rows with strId, should throw ...")
} catch {
case NonFatal(e) =>
}
}
it should "support a transaction" in {
val time = new Date()
val db = fixture.dbUnderTest
import db.syncRunContext
import db.syncRunContext.executor
val p = for {
_ <- fixture.table.insert(999999, "strId", time, 45)
_ = throw new RuntimeException("Ah HA!")
r <- fixture.table.insert(199999, "strId", time, 45)
} yield r
assert(p.runSync.isFailure)
assert(fixture.table.find(idCol -> 199999).runSync.get.isEmpty)
assert(fixture.table.find(idCol -> 999999).runSync.get.isEmpty)
}
it should "correctly find using 'in' syntax" in {
val time = new Date()
val db = fixture.dbUnderTest
import db.syncRunContext
import db.syncRunContext.executor
val plan = for {
_ <- fixture.table.insert(999999, "strId", time, 45)
_ <- fixture.table.insert(4, "strId", time, 45)
rows <- fixture.table.filter(where("id") in Set(999999,3,4))
rows2 <- fixture.table.filter(where("id") in Set(999999,3))
} yield (rows, rows2)
val (rows, rows2) = plan.runSync.get
assert(rows.size == 2)
assert(rows2.head.id === 999999)
}
it should "correctly find using 'not in' syntax" in {
val time = new Date()
val db = fixture.dbUnderTest
import db.syncRunContext
import db.syncRunContext.executor
val plan = for {
_ <- fixture.table.insert(999999, "strId", time, 45)
_ <- fixture.table.insert(4, "strId", time, 45)
rows <- fixture.table.filter(where("id") notIn Set(999999, 3, 4))
_ = assert (rows.isEmpty)
rows2 <- fixture.table.filter(where("id") notIn Set(3))
_ = assert (rows2.size === 2)
} yield ()
plan.runSync.get
}
it should "correctly find using 'in' syntax with orderby and limit " in {
val time = new Date()
val db = fixture.dbUnderTest
import db.syncRunContext
import db.syncRunContext.executor
val plan = for {
_ <- fixture.table.insert(999999, "strId", time, 45)
_ <- fixture.table.insert(4, "strId", time, 45)
rows <- fixture.table.filter(where("id") in Set(999999, 3, 4) orderBy OrderAsc("id") limit 1)
_ = assert(rows.head.id === 4)
} yield ()
plan.runSync.get
}
it should "correctly find using existing where plus 'in' syntax with orderby and limit " in {
val time = new Date()
val db = fixture.dbUnderTest
import db.syncRunContext
import db.syncRunContext.executor
val plan = for {
_ <- fixture.table.insert(999999, "strId", time, 45)
_ <- fixture.table.insert(4, "strId", time, 45)
rows <- fixture.table.filter(
where("id > ?", 4) and
where("id") in Set(999999, 3, 4)
orderBy OrderAsc("id")
limit 1
)
} yield rows
val rows = plan.runSync.get
assert(rows.head.id === 999999)
}
it should "correctly filter using None syntax " in {
val db = fixture.dbUnderTest
import db.syncRunContext
import db.syncRunContext.executor
val table = fixture.dbUnderTest.table("testBinary")
val plan = for {
_ <- table.insert(Map("byteVal" -> None))
//val all = table.map(identity)
rows <- table.filter(where("byteVal" -> None))
rowsWorks <- table.filter(where("byteVal IS NULL"))
_ = assert(rowsWorks.nonEmpty)
_ = assert(rows.nonEmpty)
_ = assert(rowsWorks == rows)
} yield ()
plan.runSync.get
}
it should "correctly filter using is Not Null syntax " in {
val db = fixture.dbUnderTest
import db.syncRunContext
import db.syncRunContext.executor
val table = fixture.dbUnderTest.table("testBinary")
import IsNull._
val plan = for {
_ <- table.insert(Map("byteVal" -> Some(3.toByte)))
//val all = table.map(identity)
rows <- table.filter(where("byteVal") is NotNull)
rowsWorks <- table.filter(where("byteVal IS NOT NULL"))
_ = assert(rowsWorks.nonEmpty)
_ = assert(rows.nonEmpty)
_ = assert(rowsWorks == rows)
} yield ()
plan.runSync.get
}
it should "correctly filter using is Null syntax " in {
val db = fixture.dbUnderTest
import db.syncRunContext
import IsNull._
import db.syncRunContext.executor
val table = fixture.dbUnderTest.table("testBinary")
val plan = for {
_ <- table.insert(Map("byteVal" -> None))
rows <- table.filter(where("byteVal") is Null)
rowsWorks <- table.filter(where("byteVal IS NULL"))
_ = assert(rowsWorks.nonEmpty)
_ = assert(rows.nonEmpty)
_ = assert(rowsWorks == rows)
} yield ()
plan.runSync.get
}
it should "be able to insert into a table with no identity column" in {
implicit val db = fixture.dbUnderTest
val table = db.table("testNoIdentity")
val colName = "boolval"
val inRows = Seq(
Map(
"id" -> 1,
colName -> true
),
Map(
"id" -> 2,
colName -> false
),
Map(
"id" -> 3,
colName -> true
)
)
table.insertNoIdentity(inRows(0)).dbRunSyncGet
table.insertNoIdentity(inRows(1)).dbRunSyncGet
table.insertNoIdentity(inRows(2)).dbRunSyncGet
val seqRows = table.map(identity).dbRunSyncGet.map(_.asMap)
seqRows shouldBe inRows
//should insert SECOND ROW (no Primary key)
table.insertNoIdentity(Map(
"id" -> 2,
colName -> true
)).dbRunSyncGet
table.map(identity, where("id" -> 2))
.dbRunSyncGet
.map(_.boolean(colName)) shouldBe Seq(false, true)
}
}
|
mcsherrylabs/sss.db
|
src/test/scala/sss/db/DbV1Spec.scala
|
Scala
|
gpl-3.0
| 10,266
|
/*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.model
import com.netflix.atlas.core.util.BoundedPriorityBuffer
import com.netflix.atlas.core.util.Math
import java.util.Comparator
trait FilterExpr extends TimeSeriesExpr
object FilterExpr {
case class Stat(expr: TimeSeriesExpr, stat: String) extends FilterExpr {
override def toString: String = s"$expr,$stat,:stat"
def dataExprs: List[DataExpr] = expr.dataExprs
def isGrouped: Boolean = expr.isGrouped
def groupByKey(tags: Map[String, String]): Option[String] = expr.groupByKey(tags)
def finalGrouping: List[String] = expr.finalGrouping
def eval(context: EvalContext, data: Map[DataExpr, List[TimeSeries]]): ResultSet = {
val rs = expr.eval(context, data)
val newData = rs.data.map { t =>
val v = SummaryStats(t, context.start, context.end).get(stat)
val seq = new FunctionTimeSeq(DsType.Gauge, context.step, _ => v)
TimeSeries(t.tags, s"stat-$stat(${t.label})", seq)
}
ResultSet(this, newData, rs.state)
}
}
trait StatExpr extends FilterExpr {
def name: String
override def toString: String = s":stat-$name"
def dataExprs: List[DataExpr] = Nil
def isGrouped: Boolean = false
def groupByKey(tags: Map[String, String]): Option[String] = None
def finalGrouping: List[String] = Nil
def eval(context: EvalContext, data: Map[DataExpr, List[TimeSeries]]): ResultSet = {
ResultSet(this, Nil, context.state)
}
}
case object StatAvg extends StatExpr {
def name: String = "avg"
}
case object StatMax extends StatExpr {
def name: String = "max"
}
case object StatMin extends StatExpr {
def name: String = "min"
}
case object StatLast extends StatExpr {
def name: String = "last"
}
case object StatCount extends StatExpr {
def name: String = "count"
}
case object StatTotal extends StatExpr {
def name: String = "total"
}
case class Filter(expr1: TimeSeriesExpr, expr2: TimeSeriesExpr) extends FilterExpr {
if (!expr1.isGrouped) require(!expr2.isGrouped, "filter grouping must match expr grouping")
override def toString: String = s"$expr1,$expr2,:filter"
def dataExprs: List[DataExpr] = expr1.dataExprs ::: expr2.dataExprs
def isGrouped: Boolean = expr1.isGrouped
def groupByKey(tags: Map[String, String]): Option[String] = expr1.groupByKey(tags)
def finalGrouping: List[String] = expr1.finalGrouping
def matches(context: EvalContext, ts: TimeSeries): Boolean = {
var m = false
ts.data.foreach(context.start, context.end) { (_, v) =>
m = m || Math.toBoolean(v)
}
m
}
def eval(context: EvalContext, data: Map[DataExpr, List[TimeSeries]]): ResultSet = {
val rs1 = expr1.eval(context, data)
val rs2 = expr2.eval(context, data)
val result = (expr1.isGrouped, expr2.isGrouped) match {
case (_, false) =>
require(rs2.data.lengthCompare(1) == 0, "empty result for filter expression")
if (matches(context, rs2.data.head)) rs1.data else Nil
case (false, _) =>
// Shouldn't be able to get here
Nil
case (true, true) =>
val g2 = rs2.data.groupBy(t => groupByKey(t.tags))
rs1.data.filter { t1 =>
val k = groupByKey(t1.tags)
g2.get(k).fold(false) {
case t2 :: Nil => matches(context, t2)
case _ => false
}
}
}
val msg = s"${result.size} of ${rs1.data.size} lines matched filter"
ResultSet(this, result, rs1.state ++ rs2.state, List(msg))
}
}
/**
* Base type for Top/Bottom K operators.
*/
trait PriorityFilterExpr extends FilterExpr {
/** Operation name to use for encoding expression. */
def opName: String
/** Comparator that determines the priority order. */
def comparator: Comparator[TimeSeriesSummary]
/** Aggregation to use for time series that are not one of the K highest priority. */
def othersAggregator(start: Long, end: Long): TimeSeries.Aggregator = {
TimeSeries.NoopAggregator
}
/** Grouped expression to select the input from. */
def expr: TimeSeriesExpr
/** Summary statistic that should be used for the priority. */
def stat: String
/** Max number of entries to return. */
def k: Int
require(k > 0, s"k must be positive ($k <= 0)")
override def toString: String = s"$expr,$stat,$k,:$opName"
def dataExprs: List[DataExpr] = expr.dataExprs
def isGrouped: Boolean = expr.isGrouped
def groupByKey(tags: Map[String, String]): Option[String] = expr.groupByKey(tags)
def finalGrouping: List[String] = expr.finalGrouping
def eval(context: EvalContext, data: Map[DataExpr, List[TimeSeries]]): ResultSet = {
val buffer = new BoundedPriorityBuffer[TimeSeriesSummary](k, comparator)
val aggregator = othersAggregator(context.start, context.end)
val rs = expr.eval(context, data)
rs.data.foreach { t =>
val v = SummaryStats(t, context.start, context.end).get(stat)
val other = buffer.add(TimeSeriesSummary(t, v))
if (other != null) {
aggregator.update(other.timeSeries)
}
}
val newData = aggregator match {
case aggr if aggr.isEmpty =>
buffer.toList.map(_.timeSeries)
case _ =>
val others = aggregator.result()
val otherTags = others.tags ++ finalGrouping.map(k => k -> "--others--").toMap
others.withTags(otherTags) :: buffer.toList.map(_.timeSeries)
}
ResultSet(this, newData, rs.state)
}
}
case class TopK(expr: TimeSeriesExpr, stat: String, k: Int) extends PriorityFilterExpr {
override def opName: String = "topk"
override def comparator: Comparator[TimeSeriesSummary] = StatComparator.reversed()
}
case class TopKOthersMin(expr: TimeSeriesExpr, stat: String, k: Int) extends PriorityFilterExpr {
override def opName: String = "topk-others-min"
override def comparator: Comparator[TimeSeriesSummary] = StatComparator.reversed()
override def othersAggregator(start: Long, end: Long): TimeSeries.Aggregator = {
new TimeSeries.SimpleAggregator(start, end, Math.minNaN)
}
}
case class TopKOthersMax(expr: TimeSeriesExpr, stat: String, k: Int) extends PriorityFilterExpr {
override def opName: String = "topk-others-max"
override def comparator: Comparator[TimeSeriesSummary] = StatComparator.reversed()
override def othersAggregator(start: Long, end: Long): TimeSeries.Aggregator = {
new TimeSeries.SimpleAggregator(start, end, Math.maxNaN)
}
}
case class TopKOthersSum(expr: TimeSeriesExpr, stat: String, k: Int) extends PriorityFilterExpr {
override def opName: String = "topk-others-sum"
override def comparator: Comparator[TimeSeriesSummary] = StatComparator.reversed()
override def othersAggregator(start: Long, end: Long): TimeSeries.Aggregator = {
new TimeSeries.SimpleAggregator(start, end, Math.addNaN)
}
}
case class TopKOthersAvg(expr: TimeSeriesExpr, stat: String, k: Int) extends PriorityFilterExpr {
override def opName: String = "topk-others-avg"
override def comparator: Comparator[TimeSeriesSummary] = StatComparator.reversed()
override def othersAggregator(start: Long, end: Long): TimeSeries.Aggregator = {
new TimeSeries.AvgAggregator(start, end)
}
}
case class BottomK(expr: TimeSeriesExpr, stat: String, k: Int) extends PriorityFilterExpr {
override def opName: String = "bottomk"
override def comparator: Comparator[TimeSeriesSummary] = StatComparator
}
case class BottomKOthersMin(expr: TimeSeriesExpr, stat: String, k: Int)
extends PriorityFilterExpr {
override def opName: String = "bottomk-others-min"
override def comparator: Comparator[TimeSeriesSummary] = StatComparator
override def othersAggregator(start: Long, end: Long): TimeSeries.Aggregator = {
new TimeSeries.SimpleAggregator(start, end, Math.minNaN)
}
}
case class BottomKOthersMax(expr: TimeSeriesExpr, stat: String, k: Int)
extends PriorityFilterExpr {
override def opName: String = "bottomk-others-max"
override def comparator: Comparator[TimeSeriesSummary] = StatComparator
override def othersAggregator(start: Long, end: Long): TimeSeries.Aggregator = {
new TimeSeries.SimpleAggregator(start, end, Math.maxNaN)
}
}
case class BottomKOthersSum(expr: TimeSeriesExpr, stat: String, k: Int)
extends PriorityFilterExpr {
override def opName: String = "bottomk-others-sum"
override def comparator: Comparator[TimeSeriesSummary] = StatComparator
override def othersAggregator(start: Long, end: Long): TimeSeries.Aggregator = {
new TimeSeries.SimpleAggregator(start, end, Math.addNaN)
}
}
case class BottomKOthersAvg(expr: TimeSeriesExpr, stat: String, k: Int)
extends PriorityFilterExpr {
override def opName: String = "bottomk-others-avg"
override def comparator: Comparator[TimeSeriesSummary] = StatComparator
override def othersAggregator(start: Long, end: Long): TimeSeries.Aggregator = {
new TimeSeries.AvgAggregator(start, end)
}
}
/**
* Caches the statistic value associated with a time series. Used for priority filters to
* avoid recomputing the summary statistics on each comparison operation.
*/
case class TimeSeriesSummary(timeSeries: TimeSeries, stat: Double)
/** Default natural order comparator used for priority filters. */
private object StatComparator extends Comparator[TimeSeriesSummary] {
override def compare(t1: TimeSeriesSummary, t2: TimeSeriesSummary): Int = {
java.lang.Double.compare(t1.stat, t2.stat)
}
}
}
|
Netflix/atlas
|
atlas-core/src/main/scala/com/netflix/atlas/core/model/FilterExpr.scala
|
Scala
|
apache-2.0
| 10,415
|
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.cluster.test
import java.io.File
import java.net.{InetAddress, ServerSocket}
import akka.actor.{ActorSelection, ActorSystem, PoisonPill, Terminated}
import akka.testkit.TestKit
import com.typesafe.config.{Config, ConfigFactory}
import com.typesafe.scalalogging.LazyLogging
import org.apache.commons.io.FileUtils
import org.apache.curator.test.TestingServer
import org.squbs.cluster.ZkCluster
import ZkClusterMultiActorSystemTestKit._
import scala.annotation.tailrec
import scala.concurrent.duration._
import scala.language.{implicitConversions, postfixOps}
import scala.util.{Failure, Success, Try, Random}
abstract class ZkClusterMultiActorSystemTestKit(systemName: String)
extends TestKit(ActorSystem(systemName, akkaRemoteConfig)) with LazyLogging {
val timeout: FiniteDuration
val clusterSize: Int
private var actorSystems = Map.empty[String, ActorSystem]
def zkClusterExts: Map[String, ZkCluster] = actorSystems map { sys => sys._1 -> ZkCluster(sys._2)}
def startCluster(): Unit = {
Random.setSeed(System.nanoTime)
actorSystems = (0 until clusterSize) map {num =>
val sysName: String = num
sysName -> ActorSystem(sysName, akkaRemoteConfig withFallback zkConfig)
} toMap
// start the lazy actor
zkClusterExts foreach { ext =>
watch(ext._2.zkClusterActor)
}
Thread.sleep(timeout.toMillis / 10)
}
protected lazy val zkConfig = ConfigFactory.parseString(
s"""
|zkCluster {
| connectionString = "127.0.0.1:$ZOOKEEPER_DEFAULT_PORT"
| namespace = "zkclustersystest-${System.currentTimeMillis()}"
| segments = 1
|}
""".stripMargin)
def shutdownCluster(): Unit = {
zkClusterExts.foreach(ext => killSystem(ext._1))
system.terminate()
Thread.sleep(timeout.toMillis / 10)
}
implicit protected def int2SystemName(num: Int): String = s"member-$num"
implicit protected def zkCluster2Selection(zkCluster: ZkCluster): ActorSelection =
system.actorSelection(zkCluster.zkClusterActor.path.toStringWithAddress(zkCluster.zkAddress))
def killSystem(sysName: String): Unit = {
zkClusterExts(sysName).addShutdownListener((_) => actorSystems(sysName).terminate())
zkClusterExts(sysName).zkClusterActor ! PoisonPill
expectMsgType[Terminated](timeout)
actorSystems -= sysName
logger.info("system {} got killed", sysName)
}
def bringUpSystem(sysName: String): Unit = {
actorSystems += sysName -> ActorSystem(sysName, akkaRemoteConfig withFallback zkConfig)
watch(zkClusterExts(sysName).zkClusterActor)
logger.info("system {} is up", sysName)
Thread.sleep(timeout.toMillis / 5)
}
@tailrec final def pickASystemRandomly(exclude: Option[String] = None): String = {
val candidate: String = Math.abs(Random.nextInt()) % clusterSize
(actorSystems get candidate, exclude) match {
case (Some(sys), Some(ex)) if candidate != ex =>
candidate
case (Some(sys), None) =>
candidate
case _ => pickASystemRandomly(exclude)
}
}
}
object ZkClusterMultiActorSystemTestKit {
val ZOOKEEPER_STARTUP_TIME = 5000
val ZOOKEEPER_DEFAULT_PORT = 8085
val zookeeperDir = new File("zookeeper")
FileUtils.deleteQuietly(zookeeperDir)
new TestingServer(ZOOKEEPER_DEFAULT_PORT, zookeeperDir, true)
Thread.sleep(ZOOKEEPER_STARTUP_TIME)
private def nextPort = {
val s = new ServerSocket(0)
val p = Try(s.getLocalPort) match {
case Success(port) => port
case Failure(e) => throw e
}
s.close()
p
}
def akkaRemoteConfig: Config = ConfigFactory.parseString(
s"""
|akka {
| actor {
| provider = "akka.remote.RemoteActorRefProvider"
| }
| remote {
| enabled-transports = ["akka.remote.netty.tcp"]
| netty.tcp {
| port = $nextPort
| hostname = ${InetAddress.getLocalHost.getHostAddress}
| server-socket-worker-pool {
| pool-size-min = 2
| pool-size-max = 4
| }
| client-socket-worker-pool {
| pool-size-min = 2
| pool-size-max = 4
| }
| connection-timeout = 1 s
| }
| log-received-messages = on
| log-sent-messages = on
| command-ack-timeout = 3 s
| retry-window = 1s
| gate-invalid-addresses-for = 1s
| transport-failure-detector {
| heartbeat-interval = 2s
| acceptable-heartbeat-pause = 5s
| }
| watch-failure-detector {
| heartbeat-interval = 2s
| acceptable-heartbeat-pause = 5s
| threshold = 10.0
| }
| }
|}
""".stripMargin)
}
|
az-qbradley/squbs
|
squbs-zkcluster/src/test/scala/org/squbs/cluster/test/ZkClusterMultiActorSystemTestKit.scala
|
Scala
|
apache-2.0
| 5,377
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.optimization
import scala.util.Random
import scala.collection.JavaConversions._
import org.scalatest.FunSuite
import org.scalatest.matchers.ShouldMatchers
import org.apache.spark.mllib.regression._
import org.apache.spark.mllib.util.LocalSparkContext
import org.apache.spark.mllib.linalg.Vectors
object GradientDescentSuite {
def generateLogisticInputAsList(
offset: Double,
scale: Double,
nPoints: Int,
seed: Int): java.util.List[LabeledPoint] = {
seqAsJavaList(generateGDInput(offset, scale, nPoints, seed))
}
// Generate input of the form Y = logistic(offset + scale * X)
def generateGDInput(
offset: Double,
scale: Double,
nPoints: Int,
seed: Int): Seq[LabeledPoint] = {
val rnd = new Random(seed)
val x1 = Array.fill[Double](nPoints)(rnd.nextGaussian())
val unifRand = new scala.util.Random(45)
val rLogis = (0 until nPoints).map { i =>
val u = unifRand.nextDouble()
math.log(u) - math.log(1.0-u)
}
val y: Seq[Int] = (0 until nPoints).map { i =>
val yVal = offset + scale * x1(i) + rLogis(i)
if (yVal > 0) 1 else 0
}
(0 until nPoints).map(i => LabeledPoint(y(i), Vectors.dense(x1(i))))
}
}
class GradientDescentSuite extends FunSuite with LocalSparkContext with ShouldMatchers {
test("Assert the loss is decreasing.") {
val nPoints = 10000
val A = 2.0
val B = -1.5
val initialB = -1.0
val initialWeights = Array(initialB)
val gradient = new LogisticGradient()
val updater = new SimpleUpdater()
val stepSize = 1.0
val numIterations = 10
val regParam = 0
val miniBatchFrac = 1.0
// Add a extra variable consisting of all 1.0's for the intercept.
val testData = GradientDescentSuite.generateGDInput(A, B, nPoints, 42)
val data = testData.map { case LabeledPoint(label, features) =>
label -> Vectors.dense(1.0, features.toArray: _*)
}
val dataRDD = sc.parallelize(data, 2).cache()
val initialWeightsWithIntercept = Vectors.dense(1.0, initialWeights: _*)
val (_, loss) = GradientDescent.runMiniBatchSGD(
dataRDD,
gradient,
updater,
stepSize,
numIterations,
regParam,
miniBatchFrac,
initialWeightsWithIntercept)
assert(loss.last - loss.head < 0, "loss isn't decreasing.")
val lossDiff = loss.init.zip(loss.tail).map { case (lhs, rhs) => lhs - rhs }
assert(lossDiff.count(_ > 0).toDouble / lossDiff.size > 0.8)
}
test("Test the loss and gradient of first iteration with regularization.") {
val gradient = new LogisticGradient()
val updater = new SquaredL2Updater()
// Add a extra variable consisting of all 1.0's for the intercept.
val testData = GradientDescentSuite.generateGDInput(2.0, -1.5, 10000, 42)
val data = testData.map { case LabeledPoint(label, features) =>
label -> Vectors.dense(1.0, features.toArray: _*)
}
val dataRDD = sc.parallelize(data, 2).cache()
// Prepare non-zero weights
val initialWeightsWithIntercept = Vectors.dense(1.0, 0.5)
val regParam0 = 0
val (newWeights0, loss0) = GradientDescent.runMiniBatchSGD(
dataRDD, gradient, updater, 1, 1, regParam0, 1.0, initialWeightsWithIntercept)
val regParam1 = 1
val (newWeights1, loss1) = GradientDescent.runMiniBatchSGD(
dataRDD, gradient, updater, 1, 1, regParam1, 1.0, initialWeightsWithIntercept)
def compareDouble(x: Double, y: Double, tol: Double = 1E-3): Boolean = {
math.abs(x - y) / (math.abs(y) + 1e-15) < tol
}
assert(compareDouble(
loss1(0),
loss0(0) + (math.pow(initialWeightsWithIntercept(0), 2) +
math.pow(initialWeightsWithIntercept(1), 2)) / 2),
"""For non-zero weights, the regVal should be \frac{1}{2}\sum_i w_i^2.""")
assert(
compareDouble(newWeights1(0) , newWeights0(0) - initialWeightsWithIntercept(0)) &&
compareDouble(newWeights1(1) , newWeights0(1) - initialWeightsWithIntercept(1)),
"The different between newWeights with/without regularization " +
"should be initialWeightsWithIntercept.")
}
}
|
zhangjunfang/eclipse-dir
|
spark/mllib/src/test/scala/org/apache/spark/mllib/optimization/GradientDescentSuite.scala
|
Scala
|
bsd-2-clause
| 4,957
|
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.unicomplex.pipeline
import akka.actor.{Actor, ActorSystem}
import akka.http.scaladsl.model.Uri.Path
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.headers.RawHeader
import akka.http.scaladsl.server.{RejectionHandler, Route}
import akka.pattern._
import akka.stream.scaladsl.{BidiFlow, Flow, GraphDSL}
import akka.stream.{ActorMaterializer, BidiShape}
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}
import org.squbs.lifecycle.GracefulStop
import org.squbs.pipeline.{Context, PipelineFlow, PipelineFlowFactory, RequestContext}
import org.squbs.unicomplex.Timeouts._
import org.squbs.unicomplex._
import scala.concurrent.Await
object PipelineSpec {
val classPaths = Array(getClass.getClassLoader.getResource("classpaths/pipeline/PipelineSpec").getPath)
val config = ConfigFactory.parseString(
s"""
|default-listener.bind-port = 0
|squbs {
| actorsystem-name = PipelineSpec
| ${JMX.prefixConfig} = true
|}
|
|dummyFlow {
| type = squbs.pipelineflow
| factory = org.squbs.unicomplex.pipeline.DummyFlow
|}
|
|preFlow {
| type = squbs.pipelineflow
| factory = org.squbs.unicomplex.pipeline.PreFlow
|}
|
|postFlow {
| type = squbs.pipelineflow
| factory = org.squbs.unicomplex.pipeline.PostFlow
|}
|
|squbs.pipeline.server.default {
| pre-flow = preFlow
| post-flow = postFlow
|}
""".stripMargin
)
val boot = UnicomplexBoot(config)
.createUsing {(name, config) => ActorSystem(name, config)}
.scanComponents(classPaths)
.initExtensions.start()
}
class PipelineSpec extends TestKit(
PipelineSpec.boot.actorSystem) with FlatSpecLike with Matchers with ImplicitSender with BeforeAndAfterAll {
implicit val am = ActorMaterializer()
val portBindings = Await.result((Unicomplex(system).uniActor ? PortBindings).mapTo[Map[String, Int]], awaitMax)
val port = portBindings("default-listener")
override def afterAll() {
Unicomplex(system).uniActor ! GracefulStop
}
it should "build the flow with defaults" in {
val (actualEntity, actualHeaders) = Await.result(entityAsStringWithHeaders(s"http://127.0.0.1:$port/1/dummy"), awaitMax)
val expectedHeaders = Seq(
RawHeader("keyD", "valD"),
RawHeader("keyPreOutbound", "valPreOutbound"),
RawHeader("keyPostOutbound", "valPostOutbound")).sortBy(_.name)
actualHeaders.filter(_.name.startsWith("key")).sortBy(_.name) should equal(expectedHeaders)
actualEntity should equal(Seq(
RawHeader("keyA", "valA"),
RawHeader("keyB", "valB"),
RawHeader("keyC", "valC"),
RawHeader("keyPreInbound", "valPreInbound"),
RawHeader("keyPostInbound", "valPostInbound")).sortBy(_.name).mkString(","))
}
it should "build the flow only with defaults" in {
val (actualEntity, actualHeaders) = Await.result(entityAsStringWithHeaders(s"http://127.0.0.1:$port/2/dummy"), awaitMax)
val expectedHeaders = Seq(
RawHeader("keyPreOutbound", "valPreOutbound"),
RawHeader("keyPostOutbound", "valPostOutbound")).sortBy(_.name)
actualHeaders.filter(_.name.startsWith("key")).sortBy(_.name) should equal(expectedHeaders)
actualEntity should equal(Seq(
RawHeader("keyPreInbound", "valPreInbound"),
RawHeader("keyPostInbound", "valPostInbound")).sortBy(_.name).mkString(","))
}
it should "build the flow without defaults" in {
val (actualEntity, actualHeaders) = Await.result(entityAsStringWithHeaders(s"http://127.0.0.1:$port/3/dummy"), awaitMax)
val expectedHeaders = Seq(RawHeader("keyD", "valD")).sortBy(_.name)
actualHeaders.filter(_.name.startsWith("key")).sortBy(_.name) should equal(expectedHeaders)
actualEntity should equal(Seq(
RawHeader("keyA", "valA"),
RawHeader("keyB", "valB"),
RawHeader("keyC", "valC")).sortBy(_.name).mkString(","))
}
it should "not build a pipeline" in {
val (actualEntity, actualHeaders) = Await.result(entityAsStringWithHeaders(s"http://127.0.0.1:$port/4/dummy"), awaitMax)
actualHeaders.filter(_.name.startsWith("key")).sortBy(_.name) should equal(Seq.empty[HttpHeader])
actualEntity should equal("")
}
it should "build the flow with defaults for non-route actor" in {
val (actualEntity, actualHeaders) = Await.result(entityAsStringWithHeaders(s"http://127.0.0.1:$port/5/dummy"), awaitMax)
val expectedHeaders = Seq(
RawHeader("keyD", "valD"),
RawHeader("keyPreOutbound", "valPreOutbound"),
RawHeader("keyPostOutbound", "valPostOutbound")).sortBy(_.name)
actualHeaders.filter(_.name.startsWith("key")).sortBy(_.name) should equal(expectedHeaders)
actualEntity should equal(Seq(
RawHeader("keyA", "valA"),
RawHeader("keyB", "valB"),
RawHeader("keyC", "valC"),
RawHeader("keyPreInbound", "valPreInbound"),
RawHeader("keyPostInbound", "valPostInbound")).sortBy(_.name).mkString(","))
}
it should "bypass the response flow when request times out" in {
val (_, actualHeaders) = Await.result(entityAsStringWithHeaders(s"http://127.0.0.1:$port/5/timeout"), awaitMax)
actualHeaders.filter(_.name.startsWith("key")).sortBy(_.name) should equal(Seq.empty[HttpHeader])
}
it should "run the pipeline when resource could not be found in route level" in {
val (actualEntity, actualHeaders) = Await.result(entityAsStringWithHeaders(s"http://127.0.0.1:$port/1/notexists"), awaitMax)
val expectedHeaders = Seq(
RawHeader("keyD", "valD"),
RawHeader("keyPreOutbound", "valPreOutbound"),
RawHeader("keyPostOutbound", "valPostOutbound")).sortBy(_.name)
actualHeaders.filter(_.name.startsWith("key")).sortBy(_.name) should equal(expectedHeaders)
actualEntity should equal("Custom route level not found message.")
}
it should "not build a flow for the resource not found in webcontext level scenario" in {
val (actualEntity, actualHeaders) = Await.result(entityAsStringWithHeaders(s"http://127.0.0.1:$port/notexists"), awaitMax)
actualHeaders.filter(_.name.startsWith("key")).sortBy(_.name) should equal(Seq.empty[HttpHeader])
actualEntity should equal(StatusCodes.NotFound.defaultMessage)
}
}
class DummyRoute extends RouteDefinition {
override def route: Route =
path("dummy") {
extract(_.request.headers) { headers =>
// Filter any non-test headers
complete(headers.filter(_.name.startsWith("key")).sortBy(_.name).mkString(","))
}
}
override def rejectionHandler: Option[RejectionHandler] = Some(RejectionHandler.newBuilder().handleNotFound {
complete(StatusCodes.NotFound, "Custom route level not found message.")
}.result())
}
class DummyActor extends Actor {
override def receive: Receive = {
case req @ HttpRequest(_, Uri(_, _, Path("/5/timeout"), _, _), _, _, _) => // Do nothing
case req: HttpRequest =>
sender() ! HttpResponse(entity = req.headers.filter(_.name.startsWith("key")).sortBy(_.name).mkString(","))
}
}
class DummyFlow extends PipelineFlowFactory {
override def create(context: Context)(implicit system: ActorSystem): PipelineFlow = {
BidiFlow.fromGraph(GraphDSL.create() { implicit b =>
import GraphDSL.Implicits._
val stageA = b.add(Flow[RequestContext].map { rc => rc.addRequestHeaders(RawHeader("keyA", "valA")) })
val stageB = b.add(Flow[RequestContext].map { rc => rc.addRequestHeaders(RawHeader("keyB", "valB")) })
val stageC = b.add(dummyBidi)
val stageD = b.add(Flow[RequestContext].map { rc => rc.addResponseHeaders(RawHeader("keyD", "valD")) })
stageA ~> stageB ~> stageC.in1
stageD <~ stageC.out2
BidiShape(stageA.in, stageC.out1, stageC.in2, stageD.out)
})
}
val dummyBidi = BidiFlow.fromGraph(GraphDSL.create() { implicit b =>
val requestFlow = b.add(Flow[RequestContext].map { rc => rc.addRequestHeaders(RawHeader("keyC", "valC")) } )
val responseFlow = b.add(Flow[RequestContext])
BidiShape.fromFlows(requestFlow, responseFlow)
})
}
class PreFlow extends PipelineFlowFactory {
override def create(context: Context)(implicit system: ActorSystem): PipelineFlow = {
BidiFlow.fromGraph(GraphDSL.create() { implicit b =>
val inbound = b.add(Flow[RequestContext].map { rc => rc.addRequestHeaders(RawHeader("keyPreInbound", "valPreInbound")) })
val outbound = b.add(Flow[RequestContext].map { rc => rc.addResponseHeaders(RawHeader("keyPreOutbound", "valPreOutbound")) })
BidiShape.fromFlows(inbound, outbound)
})
}
}
class PostFlow extends PipelineFlowFactory {
override def create(context: Context)(implicit system: ActorSystem): PipelineFlow = {
BidiFlow.fromGraph(GraphDSL.create() { implicit b =>
val inbound = b.add(Flow[RequestContext].map { rc => rc.addRequestHeaders(RawHeader("keyPostInbound", "valPostInbound")) })
val outbound = b.add(Flow[RequestContext].map { rc => rc.addResponseHeaders(RawHeader("keyPostOutbound", "valPostOutbound")) })
BidiShape.fromFlows(inbound, outbound)
})
}
}
|
SarathChandran/squbs
|
squbs-unicomplex/src/test/scala/org/squbs/unicomplex/pipeline/PipelineSpec.scala
|
Scala
|
apache-2.0
| 9,894
|
package models
import play.api.libs.json.JsValue
case class LGTMResponse(val json: JsValue) {
def getSlackMessage(): String = {
return (json \\ "imageUrl").as[String]
}
}
|
uqtimes/SlackBotScala
|
app/models/LGTMResponse.scala
|
Scala
|
mit
| 181
|
package ignition.core.jobs.utils
import ignition.core.testsupport.spark.SharedSparkContext
import ignition.core.jobs.utils.RDDUtils._
import org.scalatest._
import scala.util.Random
class RDDUtilsSpec extends FlatSpec with Matchers with SharedSparkContext {
"RDDUtils" should "provide groupByKeyAndTake" in {
(10 to 60 by 10).foreach { take =>
val rdd = sc.parallelize((1 to 400).map(x => "a" -> Random.nextInt()) ++ (1 to 400).map(x => "b" -> Random.nextInt()), 60)
val result = rdd.groupByKeyAndTake(take).collect().toMap
result("a").length shouldBe take
result("b").length shouldBe take
}
}
it should "provide groupByKeyAndTakeOrdered" in {
val take = 50
val aList = (1 to Random.nextInt(400) + 100).map(x => "a" -> Random.nextInt()).toList
val bList = (1 to Random.nextInt(400) + 100).map(x => "b" -> Random.nextInt()).toList
val rdd = sc.parallelize(aList ++ bList)
val result = rdd.groupByKeyAndTakeOrdered(take).collect().toMap
result("a") shouldBe aList.map(_._2).sorted.take(take)
result("b") shouldBe bList.map(_._2).sorted.take(take)
}
}
|
chaordic/ignition-core
|
src/test/scala/ignition/core/jobs/utils/RDDUtilsSpec.scala
|
Scala
|
mit
| 1,123
|
package com.twitter.finatra.http.internal.exceptions
import com.twitter.finagle.http.{Request, Response, Status}
import com.twitter.finatra.http.exceptions.{DefaultExceptionMapper, ExceptionMapper}
import com.twitter.finatra.http.response.SimpleResponse
import com.twitter.inject.Test
import com.twitter.inject.app.TestInjector
import org.jboss.netty.handler.codec.http.HttpResponseStatus
import org.specs2.mock.Mockito
class ExceptionManagerTest extends Test with Mockito {
def newExceptionManager =
new ExceptionManager(TestInjector(), TestDefaultExceptionMapper)
val exceptionManager = newExceptionManager
exceptionManager.add[ForbiddenExceptionMapper]
exceptionManager.add(new UnauthorizedExceptionMapper)
exceptionManager.add[UnauthorizedException1Mapper]
def testException(e: Throwable, status: HttpResponseStatus) {
val request = mock[Request]
val response = exceptionManager.toResponse(request, e)
response.status should equal(status)
}
"map exceptions to mappers installed with Guice" in {
testException(new ForbiddenException, Status.Forbidden)
}
"map exceptions to mappers installed manually" in {
testException(new UnauthorizedException, Status.Unauthorized)
}
"map subclass exceptions to parent class mappers" in {
testException(new ForbiddenException1, Status.Forbidden)
testException(new ForbiddenException2, Status.Forbidden)
}
"map exceptions to mappers of most specific class" in {
testException(new UnauthorizedException1, Status.NotFound)
}
"fall back to default mapper" in {
testException(new UnregisteredException, Status.InternalServerError)
}
"throw an IllegalStateException if exception mapped twice" in {
val exceptionManager = newExceptionManager
exceptionManager.add[ForbiddenExceptionMapper]
intercept[IllegalStateException] {
exceptionManager.add[ForbiddenExceptionMapper]
}
}
"replace an exception mapper" in {
val exceptionManager = newExceptionManager
exceptionManager.add[ForbiddenExceptionMapper]
exceptionManager.replace[ForbiddenIsOkExceptionMapper]
val request = mock[Request]
val response = exceptionManager.toResponse(request, new ForbiddenException)
response.status should equal(Status.Ok)
}
"replace the default exception mapper" in {
val exceptionManager = newExceptionManager
exceptionManager.replace[EverythingIsFineMapper]
val request = mock[Request]
val response = exceptionManager.toResponse(request, new Exception)
response.status should equal(Status.Ok)
}
}
class UnregisteredException extends Exception
class ForbiddenException extends Exception
class ForbiddenException1 extends ForbiddenException
class ForbiddenException2 extends ForbiddenException1
class UnauthorizedException extends Exception
class UnauthorizedException1 extends UnauthorizedException
object TestDefaultExceptionMapper extends DefaultExceptionMapper {
def toResponse(request: Request, throwable: Throwable): Response = {
new SimpleResponse(Status.InternalServerError)
}
}
class ForbiddenExceptionMapper extends ExceptionMapper[ForbiddenException] {
def toResponse(request: Request, throwable: ForbiddenException): Response =
new SimpleResponse(Status.Forbidden)
}
class ForbiddenIsOkExceptionMapper extends ExceptionMapper[ForbiddenException] {
def toResponse(request: Request, throwable: ForbiddenException): Response =
new SimpleResponse(Status.Ok)
}
class UnauthorizedExceptionMapper extends ExceptionMapper[UnauthorizedException] {
def toResponse(request: Request, throwable: UnauthorizedException): Response =
new SimpleResponse(Status.Unauthorized)
}
class UnauthorizedException1Mapper extends ExceptionMapper[UnauthorizedException1] {
def toResponse(request: Request, throwable: UnauthorizedException1): Response =
new SimpleResponse(Status.NotFound)
}
class EverythingIsFineMapper extends ExceptionMapper[Throwable] {
def toResponse(request: Request, throwable: Throwable): Response =
new SimpleResponse(Status.Ok)
}
|
tempbottle/finatra
|
http/src/test/scala/com/twitter/finatra/http/internal/exceptions/ExceptionManagerTest.scala
|
Scala
|
apache-2.0
| 4,059
|
package edu.gemini.osgi.tools
package app
import scala.xml._
case class BundleSpec(
name: String,
version: edu.gemini.osgi.tools.Version)
object BundleSpec {
// TODO: get rid of
def apply(n: Int, s: String, v: Version): BundleSpec =
apply(s, v)
def apply(mf: java.util.jar.Manifest): BundleSpec = {
val bmf = new BundleManifest(mf)
BundleSpec(bmf.symbolicName, bmf.version)
}
}
|
arturog8m/ocs
|
project/src/main/scala/edu/gemini/osgi/tools/app/BundleSpec.scala
|
Scala
|
bsd-3-clause
| 409
|
package com.kozlowst.oms.rest.routes
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Directives._
import com.kozlowst.oms.common.{Order, Side}
import com.kozlowst.oms.rest.models.OrderRequest
import spray.json.DefaultJsonProtocol
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
//import com.kozlowst.oms.rest.Publisher
/**
* Created by tomek on 4/9/17.
*/
trait OrderRoute extends DefaultJsonProtocol with BaseService {
implicit val orderRequestFormat = jsonFormat7(OrderRequest)
val orderRoute =
(path("order") & post) {
entity(as[OrderRequest]) { order =>
println("Order Request: " + order)
val o = Order.createMarketOrd(order.accountName, order.comment, order.createdBy,
order.instrumentId, order.size, Side(order.side), order.autoStop)
publisher ! o
complete((StatusCodes.OK, "order created"))
}
} ~
(path("order") & put) {
complete((StatusCodes.OK, "order updated"))
}
}
|
rysiekblah/oms-akka-poc
|
order-rest-service/src/main/scala/com/kozlowst/oms/rest/routes/OrderRoute.scala
|
Scala
|
mit
| 1,018
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.aggregate
import org.apache.spark.sql.catalyst.expressions.UnsafeRow
import org.apache.spark.sql.catalyst.expressions.aggregate.{AggregateExpression}
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext}
import org.apache.spark.sql.types._
/**
* This is a helper class to generate an append-only row-based hash map that can act as a 'cache'
* for extremely fast key-value lookups while evaluating aggregates (and fall back to the
* `BytesToBytesMap` if a given key isn't found). This is 'codegened' in HashAggregate to speed
* up aggregates w/ key.
*
* We also have VectorizedHashMapGenerator, which generates a append-only vectorized hash map.
* We choose one of the two as the 1st level, fast hash map during aggregation.
*
* NOTE: This row-based hash map currently doesn't support nullable keys and falls back to the
* `BytesToBytesMap` to store them.
*/
class RowBasedHashMapGenerator(
ctx: CodegenContext,
aggregateExpressions: Seq[AggregateExpression],
generatedClassName: String,
groupingKeySchema: StructType,
bufferSchema: StructType)
extends HashMapGenerator (ctx, aggregateExpressions, generatedClassName,
groupingKeySchema, bufferSchema) {
override protected def initializeAggregateHashMap(): String = {
val generatedKeySchema: String =
s"new org.apache.spark.sql.types.StructType()" +
groupingKeySchema.map { key =>
val keyName = ctx.addReferenceMinorObj(key.name)
key.dataType match {
case d: DecimalType =>
s""".add($keyName, org.apache.spark.sql.types.DataTypes.createDecimalType(
|${d.precision}, ${d.scale}))""".stripMargin
case _ =>
s""".add($keyName, org.apache.spark.sql.types.DataTypes.${key.dataType})"""
}
}.mkString("\\n").concat(";")
val generatedValueSchema: String =
s"new org.apache.spark.sql.types.StructType()" +
bufferSchema.map { key =>
val keyName = ctx.addReferenceMinorObj(key.name)
key.dataType match {
case d: DecimalType =>
s""".add($keyName, org.apache.spark.sql.types.DataTypes.createDecimalType(
|${d.precision}, ${d.scale}))""".stripMargin
case _ =>
s""".add($keyName, org.apache.spark.sql.types.DataTypes.${key.dataType})"""
}
}.mkString("\\n").concat(";")
s"""
| private org.apache.spark.sql.catalyst.expressions.RowBasedKeyValueBatch batch;
| private int[] buckets;
| private int capacity = 1 << 16;
| private double loadFactor = 0.5;
| private int numBuckets = (int) (capacity / loadFactor);
| private int maxSteps = 2;
| private int numRows = 0;
| private org.apache.spark.sql.types.StructType keySchema = $generatedKeySchema
| private org.apache.spark.sql.types.StructType valueSchema = $generatedValueSchema
| private Object emptyVBase;
| private long emptyVOff;
| private int emptyVLen;
| private boolean isBatchFull = false;
|
|
| public $generatedClassName(
| org.apache.spark.memory.TaskMemoryManager taskMemoryManager,
| InternalRow emptyAggregationBuffer) {
| batch = org.apache.spark.sql.catalyst.expressions.RowBasedKeyValueBatch
| .allocate(keySchema, valueSchema, taskMemoryManager, capacity);
|
| final UnsafeProjection valueProjection = UnsafeProjection.create(valueSchema);
| final byte[] emptyBuffer = valueProjection.apply(emptyAggregationBuffer).getBytes();
|
| emptyVBase = emptyBuffer;
| emptyVOff = Platform.BYTE_ARRAY_OFFSET;
| emptyVLen = emptyBuffer.length;
|
| buckets = new int[numBuckets];
| java.util.Arrays.fill(buckets, -1);
| }
""".stripMargin
}
/**
* Generates a method that returns true if the group-by keys exist at a given index in the
* associated [[org.apache.spark.sql.catalyst.expressions.RowBasedKeyValueBatch]].
*
*/
protected def generateEquals(): String = {
def genEqualsForKeys(groupingKeys: Seq[Buffer]): String = {
groupingKeys.zipWithIndex.map { case (key: Buffer, ordinal: Int) =>
s"""(${ctx.genEqual(key.dataType, ctx.getValue("row",
key.dataType, ordinal.toString()), key.name)})"""
}.mkString(" && ")
}
s"""
|private boolean equals(int idx, $groupingKeySignature) {
| UnsafeRow row = batch.getKeyRow(buckets[idx]);
| return ${genEqualsForKeys(groupingKeys)};
|}
""".stripMargin
}
/**
* Generates a method that returns a
* [[org.apache.spark.sql.catalyst.expressions.UnsafeRow]] which keeps track of the
* aggregate value(s) for a given set of keys. If the corresponding row doesn't exist, the
* generated method adds the corresponding row in the associated
* [[org.apache.spark.sql.catalyst.expressions.RowBasedKeyValueBatch]].
*
*/
protected def generateFindOrInsert(): String = {
val numVarLenFields = groupingKeys.map(_.dataType).count {
case dt if UnsafeRow.isFixedLength(dt) => false
// TODO: consider large decimal and interval type
case _ => true
}
val createUnsafeRowForKey = groupingKeys.zipWithIndex.map { case (key: Buffer, ordinal: Int) =>
key.dataType match {
case t: DecimalType =>
s"agg_rowWriter.write(${ordinal}, ${key.name}, ${t.precision}, ${t.scale})"
case t: DataType =>
if (!t.isInstanceOf[StringType] && !ctx.isPrimitiveType(t)) {
throw new IllegalArgumentException(s"cannot generate code for unsupported type: $t")
}
s"agg_rowWriter.write(${ordinal}, ${key.name})"
}
}.mkString(";\\n")
s"""
|public org.apache.spark.sql.catalyst.expressions.UnsafeRow findOrInsert(${
groupingKeySignature}) {
| long h = hash(${groupingKeys.map(_.name).mkString(", ")});
| int step = 0;
| int idx = (int) h & (numBuckets - 1);
| while (step < maxSteps) {
| // Return bucket index if it's either an empty slot or already contains the key
| if (buckets[idx] == -1) {
| if (numRows < capacity && !isBatchFull) {
| // creating the unsafe for new entry
| UnsafeRow agg_result = new UnsafeRow(${groupingKeySchema.length});
| org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder
| = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result,
| ${numVarLenFields * 32});
| org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter
| = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(
| agg_holder,
| ${groupingKeySchema.length});
| agg_holder.reset(); //TODO: investigate if reset or zeroout are actually needed
| agg_rowWriter.zeroOutNullBytes();
| ${createUnsafeRowForKey};
| agg_result.setTotalSize(agg_holder.totalSize());
| Object kbase = agg_result.getBaseObject();
| long koff = agg_result.getBaseOffset();
| int klen = agg_result.getSizeInBytes();
|
| UnsafeRow vRow
| = batch.appendRow(kbase, koff, klen, emptyVBase, emptyVOff, emptyVLen);
| if (vRow == null) {
| isBatchFull = true;
| } else {
| buckets[idx] = numRows++;
| }
| return vRow;
| } else {
| // No more space
| return null;
| }
| } else if (equals(idx, ${groupingKeys.map(_.name).mkString(", ")})) {
| return batch.getValueRow(buckets[idx]);
| }
| idx = (idx + 1) & (numBuckets - 1);
| step++;
| }
| // Didn't find it
| return null;
|}
""".stripMargin
}
protected def generateRowIterator(): String = {
s"""
|public org.apache.spark.unsafe.KVIterator<UnsafeRow, UnsafeRow> rowIterator() {
| return batch.rowIterator();
|}
""".stripMargin
}
}
|
akopich/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/RowBasedHashMapGenerator.scala
|
Scala
|
apache-2.0
| 9,204
|
/*
* Licensed to SequoiaDB (C) under one or more contributor license agreements.
* See the NOTICE file distributed with this work for additional information
* regarding copyright ownership. The SequoiaDB (C) licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.sequoiadb.spark.util
/**
* Source File Name = ConnectionUtil.scala
* Description = Connection utilities
* Restrictions = N/A
* Change Activity:
* Date Who Description
* ======== ================== ================================================
* 20150307 Tao Wang Initial Draft
*/
import com.sequoiadb.datasource.DatasourceOptions
import com.sequoiadb.net.ConfigOptions
import com.sequoiadb.exception.BaseException
import org.bson.util.JSON
import org.bson.BSONObject
object ConnectionUtil {
def initConfigOptions : ConfigOptions = {
val nwOpt = new ConfigOptions()
// 5 seconds timeout
nwOpt.setConnectTimeout(5000)
// do not retry
nwOpt.setMaxAutoConnectRetryTime(0)
nwOpt
}
def initSequoiadbOptions : DatasourceOptions = {
val dsOpt = new DatasourceOptions()
// connection pool max to 3
dsOpt.setMaxCount (3)
dsOpt.setDeltaIncCount (1)
dsOpt.setMaxIdleCount (1)
dsOpt
}
def getPreferenceObj ( preference: String ) : BSONObject = {
try {
JSON.parse ( preference ).asInstanceOf[BSONObject]
} catch {
case ex: Exception => throw new BaseException ( "SDB_INVALIDARG" )
}
}
/*
* Gets sdb connect preference option object string.
* if PreferedInstance = "r" then change PreferedInstance = "A"
* @return object string
*/
def getPreferenceStr (preference: String) : String = {
val preferenceObj = getPreferenceObj ( preference )
if ( (preferenceObj.get("PreferedInstance").asInstanceOf[String])
.equalsIgnoreCase("r")
) {
preferenceObj.put ("PreferedInstance", "A");
}
return preferenceObj.toString
}
}
|
SequoiaDB/spark-sequoiadb
|
src/main/scala/com/sequoiadb/spark/util/ConnectionUtil.scala
|
Scala
|
apache-2.0
| 2,505
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.executor
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.metrics.ExecutorMetricType
/**
* :: DeveloperApi ::
* Metrics tracked for executors and the driver.
*
* Executor-level metrics are sent from each executor to the driver as part of the Heartbeat.
*/
@DeveloperApi
class ExecutorMetrics private[spark] extends Serializable {
// Metrics are indexed by ExecutorMetricType.values
private val metrics = new Array[Long](ExecutorMetricType.values.length)
// the first element is initialized to -1, indicating that the values for the array
// haven't been set yet.
metrics(0) = -1
/** Returns the value for the specified metricType. */
def getMetricValue(metricType: ExecutorMetricType): Long = {
metrics(ExecutorMetricType.metricIdxMap(metricType))
}
/** Returns true if the values for the metrics have been set, false otherwise. */
def isSet(): Boolean = metrics(0) > -1
private[spark] def this(metrics: Array[Long]) {
this()
Array.copy(metrics, 0, this.metrics, 0, Math.min(metrics.size, this.metrics.size))
}
/**
* Constructor: create the ExecutorMetrics with the values specified.
*
* @param executorMetrics map of executor metric name to value
*/
private[spark] def this(executorMetrics: Map[String, Long]) {
this()
(0 until ExecutorMetricType.values.length).foreach { idx =>
metrics(idx) = executorMetrics.getOrElse(ExecutorMetricType.values(idx).name, 0L)
}
}
/**
* Compare the specified executor metrics values with the current executor metric values,
* and update the value for any metrics where the new value for the metric is larger.
*
* @param executorMetrics the executor metrics to compare
* @return if there is a new peak value for any metric
*/
private[spark] def compareAndUpdatePeakValues(executorMetrics: ExecutorMetrics): Boolean = {
var updated = false
(0 until ExecutorMetricType.values.length).foreach { idx =>
if (executorMetrics.metrics(idx) > metrics(idx)) {
updated = true
metrics(idx) = executorMetrics.metrics(idx)
}
}
updated
}
}
|
ahnqirage/spark
|
core/src/main/scala/org/apache/spark/executor/ExecutorMetrics.scala
|
Scala
|
apache-2.0
| 2,964
|
package com.twitter.finagle.loadbalancer
import com.twitter.finagle._
import com.twitter.finagle.service.FailingFactory
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finagle.util.{OnReady, Rng, Updater}
import com.twitter.util.{Activity, Future, Promise, Time, Closable, Return, Throw}
import java.util.concurrent.atomic.AtomicInteger
import scala.annotation.tailrec
/**
* Basic functionality for a load balancer. Balancer takes care of
* maintaining and updating a distributor, which is responsible for
* distributing load across a number of nodes.
*
* This arrangement allows separate functionality to be mixed in. For
* example, we can specify and mix in a load metric (via a Node) and
* a balancer (a Distributor) separately.
*/
private trait Balancer[Req, Rep] extends ServiceFactory[Req, Rep] { self =>
/**
* The maximum number of balancing tries (yielding unavailable
* factories) until we give up.
*/
protected def maxEffort: Int
/**
* The throwable to use when the load balancer is empty.
*/
protected def emptyException: Throwable
protected lazy val empty = Future.exception(emptyException)
/**
* Balancer reports stats here.
*/
protected def statsReceiver: StatsReceiver
/**
* The base type of nodes over which load is balanced.
* Nodes define the load metric that is used; distributors
* like P2C will use these to decide where to balance
* the next connection request.
*/
protected trait NodeT extends ServiceFactory[Req, Rep] {
type This
/**
* The current load, in units of the active metric.
*/
def load: Double
/**
* The number of pending requests to this node.
*/
def pending: Int
/**
* A token is a random integer identifying the node.
* It persists through node updates.
*/
def token: Int
/**
* The underlying service factory.
*/
def factory: ServiceFactory[Req, Rep]
}
/**
* The type of Node. Mixed in.
*/
protected type Node <: AnyRef with NodeT { type This = Node }
/**
* Create a new node representing the given factory, with the given
* weight. Report node-related stats to the given StatsReceiver.
*/
protected def newNode(
factory: ServiceFactory[Req, Rep],
statsReceiver: StatsReceiver
): Node
/**
* Create a node whose sole purpose it is to endlessly fail
* with the given cause.
*/
protected def failingNode(cause: Throwable): Node
/**
* The base type of the load balancer distributor. Distributors are
* updated nondestructively, but, as with nodes, may share some
* data across updates.
*/
protected trait DistributorT {
type This
/**
* The vector of nodes over which we are currently balancing.
*/
def vector: Vector[Node]
/**
* Pick the next node. This is the main load balancer.
*/
def pick(): Node
/**
* True if this distributor needs to be rebuilt. (For example, it
* may need to be updated with current availabilities.)
*/
def needsRebuild: Boolean
/**
* Rebuild this distributor.
*/
def rebuild(): This
/**
* Rebuild this distributor with a new vector.
*/
def rebuild(vector: Vector[Node]): This
}
/**
* The type of Distributor. Mixed in.
*/
protected type Distributor <: DistributorT { type This = Distributor }
/**
* Create an initial distributor.
*/
protected def initDistributor(): Distributor
/**
* Balancer status is the best of its constituent nodes.
*/
override def status: Status = Status.bestOf(dist.vector, nodeStatus)
private[this] val nodeStatus: Node => Status = _.factory.status
@volatile protected var dist: Distributor = initDistributor()
protected def rebuild(): Unit = {
updater(Rebuild(dist))
}
private[this] val gauges = Seq(
statsReceiver.addGauge("available") {
dist.vector.count(n => n.status == Status.Open)
},
statsReceiver.addGauge("busy") {
dist.vector.count(n => n.status == Status.Busy)
},
statsReceiver.addGauge("closed") {
dist.vector.count(n => n.status == Status.Closed)
},
statsReceiver.addGauge("load") {
dist.vector.map(_.pending).sum
},
statsReceiver.addGauge("size") { dist.vector.size })
private[this] val adds = statsReceiver.counter("adds")
private[this] val removes = statsReceiver.counter("removes")
protected sealed trait Update
protected case class NewList(
svcFactories: Traversable[ServiceFactory[Req, Rep]]) extends Update
protected case class Rebuild(cur: Distributor) extends Update
protected case class Invoke(fn: Distributor => Unit) extends Update
private[this] val updater = new Updater[Update] {
protected def preprocess(updates: Seq[Update]): Seq[Update] = {
if (updates.size == 1)
return updates
val types = updates.reverse.groupBy(_.getClass)
val update: Seq[Update] = types.get(classOf[NewList]) match {
case Some(Seq(last, _*)) => Seq(last)
case None => types.getOrElse(classOf[Rebuild], Nil).take(1)
}
update ++ types.getOrElse(classOf[Invoke], Nil).reverse
}
def handle(u: Update): Unit = u match {
case NewList(svcFactories) =>
val newFactories = svcFactories.toSet
val (transfer, closed) = dist.vector.partition { node =>
newFactories.contains(node.factory)
}
for (node <- closed)
node.close()
removes.incr(closed.size)
// we could demand that 'n' proxies hashCode, equals (i.e. is a Proxy)
val transferNodes = transfer.map(n => n.factory -> n).toMap
var numNew = 0
val newNodes = svcFactories.map {
case f if transferNodes.contains(f) => transferNodes(f)
case f =>
numNew += 1
newNode(f, statsReceiver.scope(f.toString))
}
dist = dist.rebuild(newNodes.toVector)
adds.incr(numNew)
case Rebuild(_dist) if _dist == dist =>
dist = dist.rebuild()
case Rebuild(_stale) =>
case Invoke(fn) =>
fn(dist)
}
}
/**
* Update the load balancer's service list. After the update, which
* may run asynchronously, is completed, the load balancer balances
* across these factories and no others.
*/
def update(factories: Traversable[ServiceFactory[Req, Rep]]): Unit =
updater(NewList(factories))
/**
* Invoke `fn` on the current distributor. This is done through the updater
* and is serialized with distributor updates and other invocations.
*/
protected def invoke(fn: Distributor => Unit): Unit = {
updater(Invoke(fn))
}
@tailrec
private[this] def pick(nodes: Distributor, count: Int): Node = {
if (count == 0)
return null.asInstanceOf[Node]
val n = dist.pick()
if (n.factory.status == Status.Open) n
else pick(nodes, count-1)
}
def apply(conn: ClientConnection): Future[Service[Req, Rep]] = {
val d = dist
var n = pick(d, maxEffort)
if (n == null) {
rebuild()
n = dist.pick()
}
val f = n(conn)
if (d.needsRebuild && d == dist)
rebuild()
f
}
def close(deadline: Time): Future[Unit] = {
for (gauge <- gauges) gauge.remove()
removes.incr(dist.vector.size)
Closable.all(dist.vector:_*).close(deadline)
}
}
/**
* A Balancer mix-in to provide automatic updating via Activities.
*/
private trait Updating[Req, Rep] extends Balancer[Req, Rep] with OnReady {
private[this] val ready = new Promise[Unit]
def onReady: Future[Unit] = ready
/**
* An activity representing the active set of ServiceFactories.
*/
protected def activity: Activity[Traversable[ServiceFactory[Req, Rep]]]
/*
* Subscribe to the Activity and dynamically update the load
* balancer as it (succesfully) changes.
*
* The observation is terminated when the Balancer is closed.
*/
private[this] val observation = activity.states.respond {
case Activity.Pending =>
case Activity.Ok(newList) =>
update(newList)
ready.setDone()
case Activity.Failed(_) =>
// On resolution failure, consider the
// load balancer ready (to serve errors).
ready.setDone()
}
override def close(deadline: Time): Future[Unit] = {
observation.close(deadline) transform { _ => super.close(deadline) } ensure {
ready.setDone()
}
}
}
/**
* Provide Nodes whose 'load' is the current number of pending
* requests and thus will result in least-loaded load balancer.
*/
private trait LeastLoaded[Req, Rep] { self: Balancer[Req, Rep] =>
protected def rng: Rng
protected case class Node(factory: ServiceFactory[Req, Rep], counter: AtomicInteger, token: Int)
extends ServiceFactoryProxy[Req, Rep](factory)
with NodeT {
type This = Node
def load = counter.get
def pending = counter.get
override def apply(conn: ClientConnection) = {
counter.incrementAndGet()
super.apply(conn) transform {
case Return(svc) =>
Future.value(new ServiceProxy(svc) {
override def close(deadline: Time) =
super.close(deadline) ensure {
counter.decrementAndGet()
}
})
case t@Throw(_) =>
counter.decrementAndGet()
Future.const(t)
}
}
}
protected def newNode(factory: ServiceFactory[Req, Rep], statsReceiver: StatsReceiver) =
Node(factory, new AtomicInteger(0), rng.nextInt())
private[this] val failingLoad = new AtomicInteger(0)
protected def failingNode(cause: Throwable) = Node(new FailingFactory(cause), failingLoad, 0)
}
/**
* An O(1), concurrent, weighted fair load balancer. This uses the
* ideas behind "power of 2 choices" [1] combined with O(1) biased
* coin flipping through the aliasing method, described in
* [[com.twitter.finagle.util.Drv Drv]].
*
* [1] Michael Mitzenmacher. 2001. The Power of Two Choices in
* Randomized Load Balancing. IEEE Trans. Parallel Distrib. Syst. 12,
* 10 (October 2001), 1094-1104.
*/
private trait P2C[Req, Rep] { self: Balancer[Req, Rep] =>
/**
* Our sturdy coin flipper.
*/
protected def rng: Rng
protected class Distributor(val vector: Vector[Node]) extends DistributorT {
type This = Distributor
private[this] val nodeUp: Node => Boolean = { node =>
node.status == Status.Open
}
private[this] val (up, down) = vector.partition(nodeUp)
def needsRebuild: Boolean = down.nonEmpty && down.exists(nodeUp)
def rebuild(): This = new Distributor(vector)
def rebuild(vec: Vector[Node]): This = new Distributor(vec)
def pick(): Node = {
if (vector.isEmpty)
return failingNode(emptyException)
// if all nodes are down, we might as well try to send requests somewhere
// as our view of the world may be out of date.
val vec = if (up.isEmpty) down else up
val size = vec.size
if (size == 1) vec.head else {
val a = rng.nextInt(size)
var b = rng.nextInt(size)
// Try to pick b, b != a, up to 10 times.
var i = 10
while (a == b && i > 0) {
b = rng.nextInt(size)
i -= 1
}
val nodeA = vec(a)
val nodeB = vec(b)
if (nodeA.load < nodeB.load) nodeA else nodeB
}
}
}
protected def initDistributor() = new Distributor(Vector.empty)
}
|
lucaslanger/finagle
|
finagle-core/src/main/scala/com/twitter/finagle/loadbalancer/Balancer.scala
|
Scala
|
apache-2.0
| 11,452
|
package com.landoop.streamreactor.connect.hive.sink.evolution
import com.landoop.streamreactor.connect.hive.{DatabaseName, TableName}
import org.apache.hadoop.hive.metastore.IMetaStoreClient
import org.apache.kafka.connect.data.Schema
import scala.util.Try
/**
* An compile of [[EvolutionPolicy]] that peforms no checks.
*
* This means that invalid data may be written and/or exceptions may be thrown.
*
* This policy can be useful in tests but should be avoided in production code.
*/
object NoopEvolutionPolicy extends EvolutionPolicy {
override def evolve(dbName: DatabaseName,
tableName: TableName,
metastoreSchema: Schema,
inputSchema: Schema)
(implicit client: IMetaStoreClient): Try[Schema] = Try(metastoreSchema)
}
|
datamountaineer/stream-reactor
|
kafka-connect-hive/src/main/scala/com/landoop/streamreactor/connect/hive/sink/evolution/NoopEvolutionPolicy.scala
|
Scala
|
apache-2.0
| 827
|
/**
* This code is generated using [[https://www.scala-sbt.org/contraband/ sbt-contraband]].
*/
// DO NOT EDIT MANUALLY
package sbt.protocol
final class SettingQuery private (
val setting: String) extends sbt.protocol.CommandMessage() with Serializable {
override def equals(o: Any): Boolean = this.eq(o.asInstanceOf[AnyRef]) || (o match {
case x: SettingQuery => (this.setting == x.setting)
case _ => false
})
override def hashCode: Int = {
37 * (37 * (17 + "sbt.protocol.SettingQuery".##) + setting.##)
}
override def toString: String = {
"SettingQuery(" + setting + ")"
}
private[this] def copy(setting: String = setting): SettingQuery = {
new SettingQuery(setting)
}
def withSetting(setting: String): SettingQuery = {
copy(setting = setting)
}
}
object SettingQuery {
def apply(setting: String): SettingQuery = new SettingQuery(setting)
}
|
xuwei-k/xsbt
|
protocol/src/main/contraband-scala/sbt/protocol/SettingQuery.scala
|
Scala
|
apache-2.0
| 906
|
package com.arcusys.valamis.web.servlet.lrsProxy
import java.io.ByteArrayInputStream
import java.net.{URLDecoder, URLEncoder}
import java.util
import javax.servlet._
import javax.servlet.http.{HttpServletRequest, HttpServletRequestWrapper, HttpServletResponse}
import org.apache.commons.codec.CharEncoding
import org.apache.http.HttpHeaders._
import scala.collection.JavaConverters._
trait MethodOverrideFilter {
private val Method = "method"
private val Content = "content"
private val TincanHeaders = Seq("authorization", "content-type", "x-experience-api-version", Content)
def doFilter(req: HttpServletRequest,
res: HttpServletResponse) : HttpServletRequest = {
req.getMethod match {
case "POST" =>
req.getParameter(Method) match {
case null => req
case method => getOverridedRequest(req, method)
}
case _ =>
req
}
}
// this request impl should hide http method overriding from client code
def getOverridedRequest(req: HttpServletRequest, method:String): HttpServletRequestWrapper = new HttpServletRequestWrapper(req) {
private val encoding = req.getCharacterEncoding
private val enc = if (encoding == null || encoding.trim.length == 0) "UTF-8" else encoding
private final val bodyContent = URLDecoder.decode(scala.io.Source.fromInputStream(req.getInputStream).mkString, enc)
private val newParameters = bodyContent.split("&")
.map(_.split("=", 2))
.map(p => (p(0), p(1))).toMap
private def getNewParameter(name: String): Option[String] = {
newParameters.find(_._1.equalsIgnoreCase(name)).map(_._2)
}
override def getMethod = method.toUpperCase
override def getHeader(name: String): String = {
name.toLowerCase match {
case "content-length" => getContentLength.toString
case _ => getNewParameter(name).getOrElse(super.getHeader(name))
}
}
override def getHeaderNames: util.Enumeration[Any] = {
(super.getHeaderNames.asScala ++ newParameters.keys).toSeq.distinct.iterator.asJavaEnumeration
}
override def getParameterMap: util.Map[String, Array[String]] = {
newParameters.map(p => (p._1, Array(p._2))).asJava
}
override def getParameter(name: String): String =
newParameters.find(_._1.equalsIgnoreCase(name)).map(_._2).orNull
override def getContentType: String = {
getHeader(CONTENT_TYPE)
}
override def getContentLength: Int = {
getNewParameter(Content).map(_.length).getOrElse(0)
}
override def getInputStream = {
val content = getNewParameter(Content).getOrElse("")
val byteArrayInputStream = new ByteArrayInputStream(content.getBytes(CharEncoding.UTF_8))
new ServletInputStream {
def read() = byteArrayInputStream.read()
override def close() = {
byteArrayInputStream.close()
super.close()
}
}
}
override def getQueryString: String = {
val originalParametersPairs = super.getQueryString.split("&")
val newParametersPairs = newParameters
.filterNot(p => TincanHeaders.contains(p._1.toLowerCase))
.filterNot(_._1 == "registration") // fix for articulate packages
.map(p => p._1 + "=" + URLEncoder.encode(p._2, enc))
(originalParametersPairs ++ newParametersPairs).mkString("&")
}
}
}
|
igor-borisov/valamis
|
valamis-portlets/src/main/scala/com/arcusys/valamis/web/servlet/lrsProxy/MethodOverrideFilter.scala
|
Scala
|
gpl-3.0
| 3,393
|
/*
* Copyright (C) 2014 GRNET S.A.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package gr.grnet.cdmi
import java.io.{PrintWriter, StringWriter}
import scala.io.Source
/**
*
*/
package object client {
final implicit class RichThrowable(val t: Throwable) extends AnyVal {
def stringStackTrace(indent: String = " "): String = {
val sw = new StringWriter()
val pw = new PrintWriter(sw)
t.printStackTrace(pw)
val string0 = sw.toString
val lines0 = Source.fromString(string0).getLines()
if(lines0.hasNext) {
val lines = lines0.map(indent + _).toStream
val string = (lines.head /: lines.tail)(_ + "\\n" + _)
string
}
else {
indent
}
}
}
}
|
grnet/cdmi-testsuite
|
src/main/scala/gr/grnet/cdmi/client/package.scala
|
Scala
|
gpl-3.0
| 1,349
|
package com.sksamuel.avro4s.github
import java.io.ByteArrayOutputStream
import com.sksamuel.avro4s.{AvroFixed, AvroInputStream, AvroOutputStream}
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
case class Data(uuid: Option[UUID])
case class UUID(@AvroFixed(8) bytes: Array[Byte])
class GithubIssue193 extends AnyFunSuite with Matchers {
test("Converting data with an optional fixed type field to GenericRecord fails #193") {
val baos = new ByteArrayOutputStream()
val output = AvroOutputStream.data[Data].to(baos).build()
output.write(Data(Some(UUID(Array[Byte](0, 1, 2, 3, 4, 5, 6, 7)))))
output.write(Data(None))
output.write(Data(Some(UUID(Array[Byte](7, 6, 5, 4, 3, 2, 1, 0)))))
output.close()
val input = AvroInputStream.data[Data].from(baos.toByteArray).build
val datas = input.iterator.toList
datas.head.uuid.get.bytes should equal(Array[Byte](0, 1, 2, 3, 4, 5, 6, 7))
datas(1).uuid shouldBe None
datas.last.uuid.get.bytes should equal(Array[Byte](7, 6, 5, 4, 3, 2, 1, 0))
input.close()
}
}
|
sksamuel/avro4s
|
avro4s-core/src/test/scala/com/sksamuel/avro4s/github/GithubIssue193.scala
|
Scala
|
apache-2.0
| 1,098
|
package com.jorgefigueiredo.operators
import com.jorgefigueiredo.SparkContextFactory
object FileRDDApplication {
def main(args: Array[String]) {
val sparkContext = SparkContextFactory.getContext
val countries = sparkContext.textFile("input/airports/countries.csv")
//val airports = sparkContext.textFile("input/airports/airports.csv")
//val countriesKeyValue = countries.map(country => (country.split(",")(1), country))
//val airportsKeyValue = airports.map(airport => (airport.split(",")(8), airport))
countries.take(10).foreach(println)
//airports.take(10).foreach(println)
//countriesKeyValue.take(10).foreach(println)
//airportsKeyValue.take(10).foreach(println)
//val airportsWithCountries = airportsKeyValue.join(countriesKeyValue)
//airportsWithCountries.take(10).foreach(println)
Thread.sleep(60*1000)
}
}
|
jorgeacf/apache-spark-demos
|
src/scala/src/main/scala/com/jorgefigueiredo/operators/FileRDDApplication.scala
|
Scala
|
apache-2.0
| 876
|
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.app.nlp.hcoref
import cc.factorie._
import cc.factorie.model.TupleTemplateWithStatistics2
import cc.factorie.optimize.{MIRA, ParameterAveraging, SampleRankTrainer}
import cc.factorie.variable.{BagOfWordsVariable, BooleanValue}
import scala.reflect.ClassTag
/**
* @author John Sullivan
*/
trait TrainingObjective[Vars <: NodeVariables[Vars] with GroundTruth] {
this:CorefSampler[Vars] with PairGenerator[Vars] =>
override def objective = new CorefTrainerModel[Vars]
val averager = new MIRA with ParameterAveraging
val trainer = new SampleRankTrainer(this, averager)
def train(numSteps:Int) {
println("Starting %d training iterations".format(numSteps))
(0 until numSteps).foreach { idx =>
trainer.processContext(nextContext)
}
println("training complete")
averager.setWeightsToAverage(model.asInstanceOf[Parameters].parameters)
}
}
trait GroundTruth {
this: NodeVariables[_] =>
def truth:BagOfWordsVariable
}
class PairwiseTrainerFeature[Vars <: NodeVariables[Vars] with GroundTruth](val precisionDominated:Double = 0.95)(implicit ct:ClassTag[Vars]) extends TupleTemplateWithStatistics2[Vars, Node[Vars]#Exists] {
def unroll1(vars:Vars) = if(vars.node.isRoot) Factor(vars, vars.node.existsVar) else Nil
def unroll2(isEntity:Node[Vars]#Exists) = if(isEntity.node.isRoot) Factor(isEntity.node.variables, isEntity) else Nil
override def score(vars:Vars, isEntity:BooleanValue):Double ={
var result = 0.0
//val bag = s._1
val bagSeq = vars.truth.iterator.toSeq
var i=0;var j=0
var tp = 0.0
var fp = 0.0
while(i<bagSeq.size){
//val e:AuthorEntity=null
val (labeli,weighti) = bagSeq(i)
j = i
while(j<bagSeq.size){
val (labelj,weightj) = bagSeq(j)
if(labeli==labelj)
tp += (weighti*(weighti-1))/2.0
else
fp += weighti*weightj
j += 1
}
i += 1
}
val normalizer = tp+fp
result = tp - fp
result
}
}
class CorefTrainerModel[Vars <: NodeVariables[Vars] with GroundTruth](implicit ct:ClassTag[Vars]) extends CombinedModel {
this += new PairwiseTrainerFeature[Vars]
}
|
patverga/factorie
|
src/main/scala/cc/factorie/app/nlp/hcoref/TrainingObjective.scala
|
Scala
|
apache-2.0
| 2,914
|
/*
*
* o o o o o
* | o | |\\ /| | /
* | o-o o--o o-o oo | | O | oo o-o OO o-o o o
* | | | | | | | | | | | | | | | | \\ | | \\ /
* O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
* |
* o--o
* o--o o o--o o o
* | | | | o | |
* O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
* | \\ | | | | | | | | | | | | | |-' | | | \\
* o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
*
* Logical Markov Random Fields (LoMRF).
*
*
*/
package lomrf.util.opt
trait MasterOptionParser {
type Description = String
type OptionName = String
private var optToParserExecutable: Map[OptionName, (Description, Array[String] => Unit)] = Map.empty
protected def addOpt(
opt: OptionName,
description: Description,
executable: Array[String] => Unit): Unit = {
optToParserExecutable += opt -> (description, executable)
}
def parse(argz: Array[String]): Unit = {
if (argz.isEmpty) {
println(usage)
sys.exit(1)
}
val firstArgument = argz.head.trim
optToParserExecutable.get(firstArgument) match {
case Some((_, executable)) =>
val restOptions = if (argz.length == 1) Array[String]() else argz.slice(1, argz.length)
executable(restOptions)
case None =>
Console.err.print(s"Unknown parameter '$firstArgument'")
sys.exit(1)
}
}
def usage: String = {
val maxSizeOptName = optToParserExecutable.keys.map(_.length).max + 4
"\\n\\nUsage:\\n" + optToParserExecutable
.map {
case (optionName, (description, _)) =>
val max_length = 76 - maxSizeOptName
if (max_length < 0)
s" $optionName$NLTB${wrapText(description, 72)}"
else {
val numberOfwhiteSpaces = maxSizeOptName - optionName.size
val gap = " " * numberOfwhiteSpaces
s" $optionName$gap${wrapText(description, max_length)}"
}
}
.mkString(NL)
}
}
|
anskarl/LoMRF
|
src/main/scala/lomrf/util/opt/MasterOptionParser.scala
|
Scala
|
apache-2.0
| 2,176
|
package de.leanovate.swaggercheck.generators
import java.net.{URI, URL}
import de.leanovate.swaggercheck.schema.model.JsonPath
import de.leanovate.swaggercheck.schema.model.formats.StringFormats
import org.scalacheck.Prop.forAll
import org.scalacheck.Properties
import scala.util.Try
object GeneratorsSpecification extends Properties("Generators") {
property("generate valid urls") = forAll(Generators.url) {
url =>
Try(new URL(url)).isSuccess
}
property("generate valid uris") = forAll(Generators.uri) {
url =>
Try(new URI(url)).isSuccess
}
property("generate valid emails") = forAll(Generators.email) {
email =>
StringFormats.EmailString.validate(JsonPath(), email).isSuccess
}
}
|
leanovate/swagger-check
|
json-schema-gen/src/test/scala/de/leanovate/swaggercheck/generators/GeneratorsSpecification.scala
|
Scala
|
mit
| 730
|
/*
* Copyright 2015 Databricks Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.databricks.spark.sql.perf.tpch
import scala.sys.process._
import com.databricks.spark.sql.perf.{Benchmark, DataGenerator, Table, Tables}
import com.databricks.spark.sql.perf.ExecutionMode.CollectResults
import org.apache.commons.io.IOUtils
import org.apache.spark.SparkContext
import org.apache.spark.sql.SQLContext
class DBGEN(dbgenDir: String, params: Seq[String]) extends DataGenerator {
val dbgen = s"$dbgenDir/dbgen"
def generate(sparkContext: SparkContext,name: String, partitions: Int, scaleFactor: String) = {
val smallTables = Seq("nation", "region")
val numPartitions = if (partitions > 1 && !smallTables.contains(name)) partitions else 1
val generatedData = {
sparkContext.parallelize(1 to numPartitions, numPartitions).flatMap { i =>
val localToolsDir = if (new java.io.File(dbgen).exists) {
dbgenDir
} else if (new java.io.File(s"/$dbgenDir").exists) {
s"/$dbgenDir"
} else {
sys.error(s"Could not find dbgen at $dbgen or /$dbgenDir. Run install")
}
val parallel = if (numPartitions > 1) s"-C $partitions -S $i" else ""
val shortTableNames = Map(
"customer" -> "c",
"lineitem" -> "L",
"nation" -> "n",
"orders" -> "O",
"part" -> "P",
"region" -> "r",
"supplier" -> "s",
"partsupp" -> "S"
)
val paramsString = params.mkString(" ")
val commands = Seq(
"bash", "-c",
s"cd $localToolsDir && ./dbgen -q $paramsString -T ${shortTableNames(name)} -s $scaleFactor $parallel")
println(commands)
commands.lines
}.repartition(numPartitions)
}
generatedData.setName(s"$name, sf=$scaleFactor, strings")
generatedData
}
}
class TPCHTables(
sqlContext: SQLContext,
dbgenDir: String,
scaleFactor: String,
useDoubleForDecimal: Boolean = false,
useStringForDate: Boolean = false,
generatorParams: Seq[String] = Nil)
extends Tables(sqlContext, scaleFactor, useDoubleForDecimal, useStringForDate) {
import sqlContext.implicits._
val dataGenerator = new DBGEN(dbgenDir, generatorParams)
val tables = Seq(
Table("part",
partitionColumns = Nil,
'p_partkey.long,
'p_name.string,
'p_mfgr.string,
'p_brand.string,
'p_type.string,
'p_size.int,
'p_container.string,
'p_retailprice.decimal(12, 2),
'p_comment.string
),
Table("supplier",
partitionColumns = Nil,
's_suppkey.long,
's_name.string,
's_address.string,
's_nationkey.long,
's_phone.string,
's_acctbal.decimal(12, 2),
's_comment.string
),
Table("partsupp",
partitionColumns = Nil,
'ps_partkey.long,
'ps_suppkey.long,
'ps_availqty.int,
'ps_supplycost.decimal(12, 2),
'ps_comment.string
),
Table("customer",
partitionColumns = Nil,
'c_custkey.long,
'c_name.string,
'c_address.string,
'c_nationkey.string,
'c_phone.string,
'c_acctbal.decimal(12, 2),
'c_mktsegment.string,
'c_comment.string
),
Table("orders",
partitionColumns = Nil,
'o_orderkey.long,
'o_custkey.long,
'o_orderstatus.string,
'o_totalprice.decimal(12, 2),
'o_orderdate.date,
'o_orderpriority.string,
'o_clerk.string,
'o_shippriority.int,
'o_comment.string
),
Table("lineitem",
partitionColumns = Nil,
'l_orderkey.long,
'l_partkey.long,
'l_suppkey.long,
'l_linenumber.int,
'l_quantity.decimal(12, 2),
'l_extendedprice.decimal(12, 2),
'l_discount.decimal(12, 2),
'l_tax.decimal(12, 2),
'l_returnflag.string,
'l_linestatus.string,
'l_shipdate.date,
'l_commitdate.date,
'l_receiptdate.date,
'l_shipinstruct.string,
'l_shipmode.string,
'l_comment.string
),
Table("nation",
partitionColumns = Nil,
'n_nationkey.long,
'n_name.string,
'n_regionkey.long,
'n_comment.string
),
Table("region",
partitionColumns = Nil,
'r_regionkey.long,
'r_name.string,
'r_comment.string
)
).map(_.convertTypes())
}
class TPCH(@transient sqlContext: SQLContext)
extends Benchmark(sqlContext) {
val queries = (1 to 22).map { q =>
val queryContent: String = IOUtils.toString(
getClass().getClassLoader().getResourceAsStream(s"tpch/queries/$q.sql"))
Query(s"Q$q", queryContent, description = "TPCH Query",
executionMode = CollectResults)
}
val queriesMap = queries.map(q => q.name.split("-").get(0) -> q).toMap
}
|
wayblink/Naive
|
spark/spark-sql-perf/src/main/scala/com/databricks/spark/sql/perf/tpch/TPCH.scala
|
Scala
|
mit
| 5,308
|
// shapeless's Lazy implemented in terms of byname implicits
trait Lazy[T] {
val value: T
}
object Lazy {
implicit def apply[T](implicit t: => T): Lazy[T] =
new Lazy[T] {
lazy val value = t
}
def unapply[T](lt: Lazy[T]): Option[T] = Some(lt.value)
}
trait Foo {
type Out
def out: Out
}
object Foo {
type Aux[Out0] = Foo { type Out = Out0 }
implicit val fooInt: Aux[Int] = new Foo { type Out = Int ; def out = 23 }
}
object Test {
def bar[T](t: T)(implicit foo: Lazy[Foo.Aux[T]]): foo.value.Out = foo.value.out
val i = bar(13)
i: Int
}
|
scala/scala
|
test/files/pos/byname-implicits-8.scala
|
Scala
|
apache-2.0
| 576
|
/*
Originally Adapted from shapeless-contrib scalaz
*/
package cats.sequence
import cats.data._
import cats.instances.option._
import cats.instances.either._
import cats.instances.function._
import org.scalacheck.Arbitrary
import shapeless._, shapeless.syntax.singleton._
import cats.derived._
import org.scalacheck.Prop.forAll
import shapeless.record.Record
import cats.laws.discipline.arbitrary._
class SequenceSuite extends KittensSuite {
test("sequencing Option")(check {
forAll { (x: Option[Int], y: Option[String], z: Option[Float]) =>
val expected = (x, y, z) mapN (_ :: _ :: _ :: HNil)
(x :: y :: z :: HNil).sequence == expected
}
})
test("sequencing HNil with Option")(
// We can't simply use HNil.sequence, because F would be ambiguous.
// However, we can explicitly grab the Sequencer for Option and use it.
check {
implicitly[Sequencer.Aux[HNil, Option, HNil]].apply(HNil) == Some(HNil)
})
test("sequencing Either")(check {
forAll { (x: Either[String, Int], y: Either[String, String], z: Either[String, Float]) =>
val expected = (x, y, z) mapN (_ :: _ :: _ :: HNil)
(x :: y :: z :: HNil).sequence == expected
}
})
// note: using the ValidationNel type alias here breaks the implicit search
test("sequencing ValidatedNel")(check {
forAll { (x: Validated[NonEmptyList[String], Int], y: Validated[NonEmptyList[String], String], z: Validated[NonEmptyList[String], Float]) =>
val expected = (x, y, z) mapN (_ :: _ :: _ :: HNil)
sequence(x, y, z) == expected
}
})
test("sequencing Function"){
val f1 = (_: String).length
val f2 = (_: String).reverse
val f3 = (_: String).toDouble
val f = (f1 :: f2 :: f3 :: HNil).sequence
assert( f("42.0") == 4 :: "0.24" :: 42.0 :: HNil)
}
test("sequencing Function using ProductArgs"){
val f1 = (_: String).length
val f2 = (_: String).reverse
val f3 = (_: String).toDouble
val f = sequence(f1, f2, f3)
assert( f("42.0") == 4 :: "0.24" :: 42.0 :: HNil )
}
test("sequencing Klesilis through ProductArgs") {
val f1 = ((_: String).length) andThen Option.apply
val f2 = ((_: String).reverse) andThen Option.apply
val f3 = ((_: String).toDouble) andThen Option.apply
val f = sequence(Kleisli(f1), Kleisli(f2), Kleisli(f3))
assert( f.run("42.0") == Some(('a ->> 4) :: ('b ->> "0.24") :: ('c ->> 42.0) :: HNil))
}
test("sequencing record of Option")(check {
forAll { (x: Option[Int], y: Option[String], z: Option[Float]) =>
val expected = for ( a <- x; b <- y; c <- z ) yield ('a ->> a) :: ('b ->> b) :: ( 'c ->> c) :: HNil
(('a ->> x) :: ('b ->> y) :: ('c ->> z) :: HNil).sequence == expected
}
})
test("sequencing record of Option using RecordArgs")(check {
forAll { (x: Option[Int], y: Option[String], z: Option[Float]) =>
val expected = for ( a <- x; b <- y; c <- z ) yield ('a ->> a) :: ('b ->> b) :: ( 'c ->> c) :: HNil
sequenceRecord(a = x, b = y, c = z) == expected
}
})
test("sequencing record of Either")(check {
forAll { (x: Either[String, Int], y: Either[String, String], z: Either[String, Float]) =>
val expected = for ( a <- x; b <- y; c <- z ) yield ('a ->> a) :: ('b ->> b) :: ( 'c ->> c) :: HNil
(('a ->> x) :: ('b ->> y) :: ('c ->> z) :: HNil).sequence == expected
}
})
test("sequencing record of Functions through RecordArgs") {
val f1 = (_: String).length
val f2 = (_: String).reverse
val f3 = (_: String).toDouble
val f = sequenceRecord(a = f1, b = f2, c = f3)
assert( f("42.0") == ('a ->> 4) :: ('b ->> "0.24") :: ('c ->> 42.0) :: HNil )
}
test("sequencing record of Kleisli through RecordArgs") {
val f1 = ((_: String).length) andThen Option.apply
val f2 = ((_: String).reverse) andThen Option.apply
val f3 = ((_: String).toDouble) andThen Option.apply
val f = sequenceRecord(a = Kleisli(f1), b = Kleisli(f2), c = Kleisli(f3))
assert( f.run("42.0") == Some(('a ->> 4) :: ('b ->> "0.24") :: ('c ->> 42.0) :: HNil))
}
case class MyCase(a: Int, b: String, c: Float)
test("sequence gen for Option")(check {
forAll { (x: Option[Int], y: Option[String], z: Option[Float]) =>
val myGen = sequenceGeneric[MyCase]
val expected = (x, y, z) mapN MyCase.apply
myGen(a = x, b = y, c = z) == expected
}
})
test("sequence gen with different sort")(check {
forAll { (x: Option[Int], y: Option[String], z: Option[Float]) =>
val myGen = sequenceGeneric[MyCase]
val expected = (x, y, z) mapN MyCase.apply
myGen(b = y, a = x, c = z) == expected
}
})
test("sequence gen for Either")(check {
forAll { (x: Either[String, Int], y: Either[String, String], z: Either[String, Float]) =>
val myGen = sequenceGeneric[MyCase]
val expected = (x, y, z) mapN MyCase.apply
myGen(a = x, b = y, c = z) == expected
}
})
test("sequence gen for Functions") {
val f1 = (_: String).length
val f2 = (_: String).reverse
val f3 = (_: String).toFloat
val myGen = sequenceGeneric[MyCase]
val f = myGen(a = f1, b = f2, c = f3)
assert( f("42.0") == MyCase(4, "0.24", 42.0f))
}
test("sequence gen for Klesili") {
val f1 = ((_: String).length) andThen Option.apply
val f2 = ((_: String).reverse) andThen Option.apply
val f3 = ((_: String).toFloat) andThen Option.apply
val myGen = sequenceGeneric[MyCase]
val f = myGen(a = Kleisli(f1), b = Kleisli(f2), c = Kleisli(f3))
assert( f.run("42.0") == Some(MyCase(4, "0.24", 42.0f)))
}
//wait until cats 0.5.0 release to bring unapply to serializable
test("RecordSequencer is serializable") {
import java.io.{ ObjectOutputStream, ByteArrayOutputStream }
val r = Record.`'a -> Option[Int], 'b -> Option[String]`
type Rec = r.T
val rs = the[RecordSequencer[Rec]]
assert( isSerializable(rs) )
}
}
|
milessabin/kittens
|
core/src/test/scala/cats/sequence/SequenceSuite.scala
|
Scala
|
apache-2.0
| 5,947
|
package scalan.compilation.lms.common
import scala.lms.common.{ScalaGenMiscOps, MiscOpsExp, MiscOps}
import scala.reflect.SourceContext
trait MiscOpsExt extends MiscOps {
def readline(implicit pos: SourceContext): Rep[String]
}
trait MiscOpsExtExp extends MiscOpsExt with MiscOpsExp {
case class Readline() extends Def[String]
def readline(implicit pos: SourceContext) = reflectEffect(Readline()) // TODO: simple effect
override def mirror[A:Manifest](e: Def[A], f: Transformer)(implicit pos: SourceContext): Exp[A] = (e match {
case Reflect(Readline(), u, es) => reflectMirrored(Reflect(Readline(), mapOver(f,u), f(es)))(mtype(manifest[A]), pos)
case _ => super.mirror(e,f)
}).asInstanceOf[Exp[A]]
}
trait ScalaGenMiscOpsExt extends ScalaGenMiscOps {
val IR: MiscOpsExtExp
import IR._
override def emitNode(sym: Sym[Any], rhs: Def[Any]) = rhs match {
case Readline() => emitValDef(sym, src"Predef.readLine()")
case _ => super.emitNode(sym, rhs)
}
}
|
scalan/scalan
|
lms-backend/core/src/main/scala/scalan/compilation/lms/common/MiscOpsExt.scala
|
Scala
|
apache-2.0
| 992
|
package com.datawizards.splot.examples
import com.datawizards.splot.api.implicits._
import com.datawizards.splot.functions.{count, mean}
import com.datawizards.splot.model.ImageFormats
import com.datawizards.splot.theme.PlotTheme
import scala.util.Random
object SaveExampleImagesToFiles extends App {
val exportPath = "images/"
val format = ImageFormats.PNG
val width = 400
val height = 300
val rand = new Random()
rand.setSeed(0L)
val gaussians = for(i <- 1 to 10000) yield rand.nextGaussian()
Seq(1.0, 4.0, 9.0).buildPlot().bar().size(width, height).save(exportPath+"basic_bar.png", format)
people.take(3).buildPlot().bar(_.age).size(width, height).save(exportPath+"bar_people.png", format)
Seq(
(1.0, 1.0),
(2.0, 4.0),
(3.0, 9.0)
).buildPlot().scatter().size(width, height).save(exportPath+"scatter_basic.png", format)
Seq(
AgeIncome(20, 1000.0),
AgeIncome(25, 2000.0),
AgeIncome(30, 2500.0),
AgeIncome(35, 3000.0),
AgeIncome(40, 3500.0),
AgeIncome(45, 3000.0),
AgeIncome(50, 2500.0)
).buildPlot().scatter(_.age, _.income).size(width, height).save(exportPath+"scatter_age_income.png", format)
Seq(
(1.0, 1.0),
(2.0, 4.0),
(3.0, 9.0)
).buildPlot().line().size(width, height).save(exportPath+"line_basic.png", format)
Seq(
AgeIncome(20, 1000.0),
AgeIncome(25, 2000.0),
AgeIncome(30, 2500.0),
AgeIncome(35, 3000.0),
AgeIncome(40, 3500.0),
AgeIncome(45, 3000.0),
AgeIncome(50, 2500.0)
).buildPlot().line(_.age, _.income).size(width, height).save(exportPath+"line_age_income.png", format)
timeseriesData2017
.buildPlot()
.line()
.size(1200, 400)
.save(exportPath+"line_timeseries.png", format)
people
.buildPlot()
.scatter(_.age, _.income)
.size(300, 900)
.rowsBy(_.education)
.save(exportPath+"people_groupby_education.png", format)
people
.buildPlot()
.scatter(_.age, _.income)
.size(800, 200)
.colsBy(_.country)
.save(exportPath+"people_groupby_country.png", format)
people
.buildPlot()
.scatter(_.age, _.income)
.size(1200, 800)
.colsBy(_.country)
.rowsBy(_.education)
.save(exportPath+"people_groupby_country_education.png", format)
gaussians
.buildPlot()
.histogram(100)
.size(400, 300)
.save(exportPath+"histogram_for_gaussians.png", format)
people
.buildPlot()
.histogramForCategories(_.education)
.size(400, 300)
.titles("People by education", "Education", "Count")
.legendVisible(false)
.save(exportPath+"histogram_for_categories.png", format)
val populationByCountry = Seq(
("DE", 81),
("TR", 72),
("FR", 63),
("UK", 62),
("IT", 61),
("ES", 46),
("UA", 45),
("PL", 38),
("RO", 19),
("NL", 17),
("GR", 11),
("PT", 11),
("BE", 10),
("CZ", 10),
("HU", 10)
)
populationByCountry
.buildPlot()
.bar(_._1, _._2)
.titles("Population by country [millions]", "Country", "Population")
.size(1200, 300)
.legendVisible(false)
.save(exportPath+"bar_chart_with_string.png", format)
val groupedPeopleByCountryEducation = people
.groupBy(p => (p.country, p.education))
.mapValues(pv => pv.size)
groupedPeopleByCountryEducation
.buildPlot()
.colsBy(_._1._1)
.bar(x => x._1._2, x => x._2)
.size(1200, 300)
.save(exportPath+"bar_chart_grids_with_string.png", format)
people1000
.buildPlot()
.colsBy(_.education)
.histogram(_.age, 50)
.size(1200, 400)
.save(exportPath+"histogram_multiple_columns.png", format)
Seq(1.0, 4.0, 9.0)
.buildPlot()
.bar()
.seriesName("custom name")
.size(400, 300)
.save(exportPath+"bar_chart_custom_series_name.png", format)
Seq(1.0, 4.0, 9.0)
.buildPlot()
.bar()
.legendVisible(false)
.size(400, 300)
.save(exportPath+"bar_chart_hide_legend.png", format)
people
.buildPlot()
.scatter(_.age, _.income)
.seriesBy(_.education)
.size(500, 400)
.titles("Age and income by education", "age", "income")
.save(exportPath+"scatter_chart_with_multiple_series.png", format)
people
.buildPlot()
.scatter(_.age, _.income)
.size(1200, 300)
.colsBy(_.country)
.seriesBy(_.education)
.save(exportPath+"scatter_chart_with_multiple_columns_and_series.png", format)
people
.buildPlot()
.barWithAggregations(_.education, count())
.size(400, 300)
.save(exportPath+"bar_chart_with_count_aggregation.png", format)
people
.buildPlot()
.barWithAggregations(_.country, mean(_.income))
.size(400, 300)
.save(exportPath+"bar_chart_with_mean_aggregation.png", format)
val data = Seq(
("Python", 3.0),
("Java", 4.0),
("Scala", 5.0)
)
data.buildPlot().bar().size(300,150).theme(PlotTheme.GGPlot2).title("ggplot").save(exportPath+"ggplot_theme.png", format)
data.buildPlot().bar().size(300,150).theme(PlotTheme.Matlab).title("matlab").save(exportPath+"matplot_theme.png", format)
data.buildPlot().bar().size(300,150).theme(PlotTheme.XChart).title("xchart").save(exportPath+"xchart_theme.png", format)
data.buildPlot().bar().size(300,150).theme(PlotTheme.SPlot).title("splot").save(exportPath+"splot_theme.png", format)
Seq(
(1, 1, 9.0),
(1, 2, 20.0),
(3, 2, 30.0),
(2, 2, 40.0),
(1, 3, 10.0),
(2, 3, 15.0)
)
.buildPlot()
.bubble(_._1, _._2, _._3)
.size(400, 300)
.legendVisible(false)
.save(exportPath+"bubble_chart.png", format)
Seq(
("DE", 81),
("TR", 72),
("FR", 63),
("UK", 62),
("IT", 61)
)
.buildPlot()
.pie()
.size(400, 300)
.save(exportPath+"pie_chart.png", format)
Seq(
(1.0, 1.0),
(2.0, 4.0),
(3.0, 9.0)
)
.buildPlot()
.area()
.size(400, 300)
.save(exportPath+"area_chart.png", format)
Seq(1, 4, 9)
.buildPlot()
.bar()
.showAnnotations(true)
.size(400, 300)
.save(exportPath+"annotations.png", format)
}
|
piotr-kalanski/SPlot
|
src/main/scala/com/datawizards/splot/examples/SaveExampleImagesToFiles.scala
|
Scala
|
apache-2.0
| 5,995
|
package me.benetis
import me.benetis.compiler.Compiler
import me.benetis.model.Post
import zio.console.{Console, putStrLn}
import zio.{App, ZIO}
object MyApp extends App {
def run(args: List[String]): ZIO[zio.ZEnv, Nothing, Int] =
myAppLogic.fold(_ => 1, _ => 0)
val publishedPosts: Set[Post] = Set(
posts.NewWorld,
posts.Backtracking,
posts.GameOfLife,
posts.SoftwareEngineerTwoHours
)
val myAppLogic: ZIO[Console, Throwable, Unit] =
for {
_ <- putStrLn("Starting...")
_ <- Compiler.compile(publishedPosts)
} yield ()
}
|
benetis/benetis.me
|
src/main/scala/me/benetis/MyApp.scala
|
Scala
|
agpl-3.0
| 574
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.