code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.spark
import java.util.{Map => JMap}
import com.typesafe.scalalogging.LazyLogging
import com.vividsolutions.jts.geom._
import org.apache.spark.sql.SQLTypes
import org.geotools.data.DataStoreFinder
import org.geotools.geometry.jts.JTS
import org.junit.runner.RunWith
import org.locationtech.geomesa.utils.geohash.BoundingBox
import org.locationtech.geomesa.utils.interop.WKTUtils
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class SparkSQLGeometricConstructorsTest extends Specification with LazyLogging {
"sql geometry constructors" should {
sequential
val dsParams: JMap[String, String] = Map("cqengine" -> "true", "geotools" -> "true")
val ds = DataStoreFinder.getDataStore(dsParams)
val spark = SparkSQLTestUtils.createSparkSession()
val sc = spark.sqlContext
SQLTypes.init(sc)
"st_box2DFromGeoHash" >> {
sc.sql("select st_box2DFromGeoHash(null, null)").collect.head(0) must beNull
val r = sc.sql(
s"""
|select st_box2DFromGeoHash('ezs42', 25)
""".stripMargin
)
val boxCoords = r.collect().head.getAs[Geometry](0).getCoordinates
val ll = boxCoords(0)
val ur = boxCoords(2)
boxCoords.length mustEqual 5
ll.x must beCloseTo(-5.625, .022) // lon
ll.y must beCloseTo(42.583, .022) // lat
ur.x must beCloseTo(-5.581, .022) // lon
ur.y must beCloseTo(42.627, .022) // lat
}
"st_geomFromGeoHash" >> {
sc.sql("select st_geomFromGeoHash(null, null)").collect.head(0) must beNull
val r = sc.sql(
s"""
|select st_geomFromGeoHash('ezs42', 25)
""".stripMargin
)
val geomboxCoords = r.collect().head.getAs[Geometry](0).getCoordinates
val ll = geomboxCoords(0)
val ur = geomboxCoords(2)
geomboxCoords.length mustEqual 5
ll.x must beCloseTo(-5.625, .022) // lon
ll.y must beCloseTo(42.583, .022) // lat
ur.x must beCloseTo(-5.581, .022) // lon
ur.y must beCloseTo(42.627, .022) // lat
}
"st_geomFromWKT" >> {
sc.sql("select st_geomFromWKT(null)").collect.head(0) must beNull
val r = sc.sql(
"""
|select st_geomFromWKT('POINT(0 0)')
""".stripMargin
)
r.collect().head.getAs[Geometry](0) mustEqual WKTUtils.read("POINT(0 0)")
}
"st_geometryFromText" >> {
sc.sql("select st_geometryFromText(null)").collect.head(0) must beNull
val r = sc.sql(
"""
|select st_geometryFromText('POINT(0 0)')
""".stripMargin
)
r.collect().head.getAs[Geometry](0) mustEqual WKTUtils.read("POINT(0 0)")
}
"st_geomFromWKB" >> {
sc.sql("select st_geomFromWKB(null)").collect.head(0) must beNull
val geomArr = Array[Byte](0,
0, 0, 0, 3,
0, 0, 0, 1,
0, 0, 0, 5,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
)
val r = sc.sql(
s"""select st_geomFromWKB(st_byteArray('${new String(geomArr)}'))"""
)
r.collect().head.getAs[Geometry](0) mustEqual WKTUtils.read("POLYGON((0 0, 2 0, 2 2, 0 2, 0 0))")
}
"st_lineFromText" >> {
sc.sql("select st_lineFromText(null)").collect.head(0) must beNull
val r = sc.sql(
"""
|select st_lineFromText('LINESTRING(0 0, 1 1, 2 2)')
""".stripMargin
)
r.collect().head.getAs[LineString](0) mustEqual WKTUtils.read("LINESTRING(0 0, 1 1, 2 2)")
}
"st_makeBBOX" >> {
sc.sql("select st_makeBBOX(null, null, null, null)").collect.head(0) must beNull
val r = sc.sql(
"""
|select st_makeBBOX(0.0, 0.0, 2.0, 2.0)
""".stripMargin
)
r.collect().head.getAs[Geometry](0) mustEqual JTS.toGeometry(BoundingBox(0, 2, 0, 2))
}
"st_makeBox2D" >> {
sc.sql("select st_makeBox2D(null, null)").collect.head(0) must beNull
val r = sc.sql(
"""
|select st_makeBox2D(st_castToPoint(st_geomFromWKT('POINT(0 0)')),
| st_castToPoint(st_geomFromWKT('POINT(2 2)')))
""".stripMargin
)
r.collect().head.getAs[Geometry](0) mustEqual WKTUtils.read("POLYGON((0.0 0.0, 2.0 0.0, " +
"2.0 2.0, 0.0 2.0, 0.0 0.0))")
}
"st_makePolygon" >> {
sc.sql("select st_makePolygon(null)").collect.head(0) must beNull
val r = sc.sql(
s"""
|select st_makePolygon(st_castToLineString(
| st_geomFromWKT('LINESTRING(0 0, 2 2, 5 4, 7 2, 5 2, 3 0, 0 0)')))
""".stripMargin
)
r.collect().head.getAs[Polygon](0) mustEqual WKTUtils.read("POLYGON((0 0, 2 2, 5 4, 7 2, 5 2, 3 0, 0 0))")
}
"st_makePoint" >> {
sc.sql("select st_makePoint(null, null)").collect.head(0) must beNull
val r = sc.sql(
"""
|select st_makePoint(0, 0)
""".stripMargin
)
r.collect().head.getAs[Point](0) mustEqual WKTUtils.read("POINT(0 0)")
}
"st_makePointM" >> {
sc.sql("select st_makePointM(null, null, null)").collect.head(0) must beNull
val r = sc.sql(
"""
|select st_makePointM(0, 0, 1)
""".stripMargin
)
r.collect().head.getAs[Point](0) mustEqual WKTUtils.read("POINT(0 0 1)")
}
"st_mLineFromText" >> {
sc.sql("select st_mLineFromText(null)").collect.head(0) must beNull
val r = sc.sql(
"""
|select st_mLineFromText('MULTILINESTRING((0 0, 1 1, 2 2), (0 1, 1 2, 2 3))')
""".stripMargin
)
r.collect().head.getAs[MultiLineString](0) mustEqual WKTUtils.read("MULTILINESTRING((0 0, 1 1, 2 2), " +
"(0 1, 1 2, 2 3))")
}
"st_mPointFromText" >> {
sc.sql("select st_mPointFromText(null)").collect.head(0) must beNull
val r = sc.sql(
"""
|select st_mPointFromText('MULTIPOINT((0 0), (1 1))')
""".stripMargin
)
r.collect().head.getAs[MultiPoint](0) mustEqual WKTUtils.read("MULTIPOINT((0 0), (1 1))")
}
"st_mPolyFromText" >> {
sc.sql("select st_mPolyFromText(null)").collect.head(0) must beNull
val r = sc.sql(
"""
|select st_mPolyFromText('MULTIPOLYGON((( -1 -1, 0 1, 1 -1, -1 -1 )),((-4 4, 4 4, 4 -4, -4 -4, -4 4),
| (2 2, -2 2, -2 -2, 2 -2, 2 2)))')
""".stripMargin
)
r.collect().head.getAs[MultiPolygon](0) mustEqual
WKTUtils.read("MULTIPOLYGON((( -1 -1, 0 1, 1 -1, -1 -1 ))," +
"((-4 4, 4 4, 4 -4, -4 -4, -4 4),(2 2, -2 2, -2 -2, 2 -2, 2 2)))")
}
"st_point" >> {
sc.sql("select st_point(null, null)").collect.head(0) must beNull
val r = sc.sql(
"""
|select st_point(0, 0)
""".stripMargin
)
r.collect().head.getAs[Point](0) mustEqual WKTUtils.read("POINT(0 0)")
}
"st_pointFromGeoHash" >> {
sc.sql("select st_pointFromGeoHash(null, null)").collect.head(0) must beNull
val r = sc.sql(
s"""
|select st_pointFromGeoHash('ezs42', 25)
""".stripMargin
)
val point = r.collect().head.getAs[Point](0)
point.getX must beCloseTo(-5.603, .022)
point.getY must beCloseTo(42.605, .022)
}
"st_pointFromText" >> {
sc.sql("select st_pointFromText(null)").collect.head(0) must beNull
val r = sc.sql(
"""
|select st_pointFromText('Point(0 0)')
""".stripMargin
)
r.collect().head.getAs[Point](0) mustEqual WKTUtils.read("POINT(0 0)")
}
"st_pointFromWKB" >> {
sc.sql("select st_pointFromWKB(null)").collect.head(0) must beNull
val pointArr = Array[Byte](0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0)
val r = sc.sql(
s"""
|select st_pointFromWKB(st_byteArray('${new String(pointArr)}'))
""".stripMargin
)
r.collect().head.getAs[Point](0) mustEqual WKTUtils.read("POINT(0 0)")
}
"st_polygon" >> {
sc.sql("select st_polygon(null)").collect.head(0) must beNull
val r = sc.sql(
s"""
|select st_polygon(st_castToLineString(
| st_geomFromWKT('LINESTRING(0 0, 2 2, 5 2, 3 0, 0 0)')))
""".stripMargin
)
r.collect().head.getAs[Polygon](0) mustEqual WKTUtils.read("POLYGON((0 0, 2 2, 5 2, 3 0, 0 0))")
}
"st_polygonFromText" >> {
sc.sql("select st_polygonFromText(null)").collect.head(0) must beNull
val r = sc.sql(
"""
|select st_polygonFromText('POLYGON((0 0, 2 0, 2 2, 0 2, 0 0))')
""".stripMargin
)
r.collect().head.getAs[Polygon](0) mustEqual WKTUtils.read("POLYGON((0.0 0.0, 2.0 0.0, " +
"2.0 2.0, 0.0 2.0, 0.0 0.0))")
}
// after
step {
ds.dispose()
spark.stop()
}
}
}
|
ronq/geomesa
|
geomesa-spark/geomesa-spark-sql/src/test/scala/org/locationtech/geomesa/spark/SparkSQLGeometricConstructorsTest.scala
|
Scala
|
apache-2.0
| 9,649
|
package org.codeswarm.tnsconfig
/** The string identifier of an entry in a tnsnames.ora file.
*/
sealed case class TnsAlias(name: String)
|
chris-martin/tns-config
|
src/main/scala/TnsAlias.scala
|
Scala
|
apache-2.0
| 141
|
/*
* Copyright 2018 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package support.steps
import java.util.UUID
import org.scalatest.concurrent.IntegrationPatience
import com.github.tomakehurst.wiremock.client.WireMock._
import cucumber.api.scala.ScalaDsl
import play.api.http.Status.{ACCEPTED, INTERNAL_SERVER_ERROR}
class PaymentsWorldpayClearanceSteps extends ScalaDsl with BaseSteps with IntegrationPatience {
private val ClearedPaymentsUrl = "/payments-worldpay-clearance/cleared-payments"
Given("""^the Payments Worldpay Clearance service will accept the report$""") {
stubFor(post(urlEqualTo(ClearedPaymentsUrl)).
willReturn(aResponse().withStatus(ACCEPTED).
withHeader("X-ClearanceSetId", UUID.randomUUID().toString)))
}
Given("""^the Payments Worldpay Clearance service is unable to accept the report$""") {
stubFor(post(urlEqualTo(ClearedPaymentsUrl)).
willReturn(aResponse().withStatus(INTERNAL_SERVER_ERROR)))
}
Then("""^the Payments Worldpay Clearance service receives a report containing$""") { (reportContent:String) =>
verify(
postRequestedFor(urlEqualTo(ClearedPaymentsUrl)).
withHeader("Content-Type", equalTo("text/plain")).
withRequestBody(equalTo(reportContent.stripMargin)))
}
Then("""^the Payments Worldpay Clearance service does not receive a report$""") {
val processSteps = new ProcessSteps
processSteps.verifyPostIsNotSent(ClearedPaymentsUrl)
}
}
|
hmrc/worldpay-downloader
|
test/support/steps/PaymentsWorldpayClearanceSteps.scala
|
Scala
|
apache-2.0
| 2,000
|
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2016 Matthias Langer (t3l@threelights.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.latrobe.cublaze.modules
import edu.latrobe.blaze.modules._
import edu.latrobe.cublaze._
trait Layer_CUDA[TThis <: LayerBuilder[_]]
extends Layer[TThis] {
// Lock a CUDA device.
final private val claim
: DeviceClaim = LogicalDevice.claim()
final protected val device
: LogicalDevice = claim.device
override protected def doClose()
: Unit = {
claim.close()
super.doClose()
}
}
|
bashimao/ltudl
|
cublaze/src/main/scala/edu/latrobe/cublaze/modules/Layer_CUDA.scala
|
Scala
|
apache-2.0
| 1,100
|
package pl.touk.nussknacker.processCounts.influxdb
import org.scalatest.{FunSuite, Matchers}
import pl.touk.nussknacker.engine.api.CirceUtil
import pl.touk.nussknacker.test.PatientScalaFutures
import sttp.client.Identity
import sttp.client.monad.IdMonad
import java.time.Instant
class InfluxGeneratorSpec extends FunSuite with Matchers with PatientScalaFutures {
import InfluxGenerator._
//TODO: test generated query, not just shape of output
test("Point in time query returns correct results") {
val pointInTimeQuery = new PointInTimeQuery[Identity](_ => sampleInfluxOutput, "process1", "test", MetricsConfig())(IdMonad)
pointInTimeQuery.query(Instant.now()) shouldBe Map(
"start" -> (552855221L + 557871409L),
"end" -> (412793677L + 414963365L)
)
}
val sampleInfluxOutputRaw: String = """
| [
| {
| "name": "nodeCount",
| "tags": {
| "nodeId": "end",
| "slot": "0"
| },
| "columns": [
| "time",
| "nodeId",
| "count"
| ],
| "values": [
| [
| "2018-10-15T06:17:35Z",
| "end",
| 412793677
| ]
| ]
| },
| {
| "name": "nodeCount",
| "tags": {
| "nodeId": "end",
| "slot": "1"
| },
| "columns": [
| "time",
| "nodeId",
| "count"
| ],
| "values": [
| [
| "2018-10-15T06:17:35Z",
| "end",
| 414963365
| ]
| ]
| },
| {
| "name": "nodeCount",
| "tags": {
| "nodeId": "start",
| "slot": "0"
| },
| "columns": [
| "time",
| "nodeId",
| "count"
| ],
| "values": [
| [
| "2018-10-15T06:17:35Z",
| "start",
| 552855221
| ]
| ]
| },
| {
| "name": "nodeCount",
| "tags": {
| "nodeId": "start",
| "slot": "1"
| },
| "columns": [
| "time",
| "nodeId",
| "count"
| ],
| "values": [
| [
| "2018-10-15T06:17:35Z",
| "start",
| 557871409
| ]
| ]
| }
| ]
""".stripMargin
val sampleInfluxOutput: List[InfluxSeries] = CirceUtil.decodeJsonUnsafe[List[InfluxSeries]](sampleInfluxOutputRaw, "failed to decode series")
}
|
TouK/nussknacker
|
ui/processReports/src/test/scala/pl/touk/nussknacker/processCounts/influxdb/InfluxGeneratorSpec.scala
|
Scala
|
apache-2.0
| 2,934
|
package io.swagger.client.model
import org.joda.time.DateTime
case class Variable (
name: String, // User-defined variable display name.
originalName: String, // Name used when the variable was originally created in the `variables` table.
category: String, // Variable category like Mood, Sleep, Physical Activity, Treatment, Symptom, etc.
unit: String, // Abbreviated name of the default unit for the variable
sources: String, // Comma-separated list of source names to limit variables to those sources
minimumValue: Double, // Minimum reasonable value for this variable (uses default unit)
maximumValue: Double, // Maximum reasonable value for this variable (uses default unit)
combinationOperation: String, // How to aggregate measurements over time.
fillingValue: Double // Value for replacing null measurements
)
|
QuantiModo/QuantiModo-SDK-Async-Scala
|
src/main/scala/io/swagger/client/model/Variable.scala
|
Scala
|
apache-2.0
| 851
|
package akka.contrib.persistence.mongodb
import akka.NotUsed
import akka.actor.{Actor, ActorLogging, ActorRef, ExtendedActorSystem, Props}
import akka.persistence.query._
import akka.persistence.query.javadsl.{AllPersistenceIdsQuery => JAPIQ, CurrentEventsByPersistenceIdQuery => JCEBP, CurrentPersistenceIdsQuery => JCP, EventsByPersistenceIdQuery => JEBP}
import akka.persistence.query.scaladsl.{AllPersistenceIdsQuery, CurrentEventsByPersistenceIdQuery, CurrentPersistenceIdsQuery, EventsByPersistenceIdQuery}
import akka.stream.{FlowShape, Inlet, Outlet, OverflowStrategy, Attributes}
import akka.stream.actor.{ActorPublisher, ActorPublisherMessage}
import akka.stream.javadsl.{Source => JSource}
import akka.stream.scaladsl._
import akka.stream.stage._
import com.typesafe.config.Config
import scala.collection.mutable
object MongoReadJournal {
val Identifier = "akka-contrib-mongodb-persistence-readjournal"
}
class MongoReadJournal(system: ExtendedActorSystem, config: Config) extends ReadJournalProvider {
private[this] val impl = MongoPersistenceExtension(system)(config).readJournal
override def scaladslReadJournal(): scaladsl.ReadJournal = new ScalaDslMongoReadJournal(impl)
override def javadslReadJournal(): javadsl.ReadJournal = new JavaDslMongoReadJournal(new ScalaDslMongoReadJournal(impl))
}
object ScalaDslMongoReadJournal {
val eventToEventEnvelope: Flow[Event, EventEnvelope, NotUsed] = {
// TODO Use zipWithIndex in akka 2.4.14
Flow[Event].zip(Source.unfold(0L)(s => Some((s + 1, s)))).map { case (event, offset) => event.toEnvelope(offset) }
}
implicit class RichFlow[Mat](source: Source[Event, Mat]) {
def toEventEnvelopes: Source[EventEnvelope, Mat] =
source.via(eventToEventEnvelope)
}
}
class ScalaDslMongoReadJournal(impl: MongoPersistenceReadJournallingApi)
extends scaladsl.ReadJournal
with CurrentPersistenceIdsQuery
with CurrentEventsByPersistenceIdQuery
with AllPersistenceIdsQuery
with EventsByPersistenceIdQuery {
import ScalaDslMongoReadJournal._
def currentAllEvents(): Source[EventEnvelope, NotUsed] =
Source.actorPublisher[Event](impl.currentAllEvents)
.toEventEnvelopes
.mapMaterializedValue(_ => NotUsed)
override def currentPersistenceIds(): Source[String, NotUsed] =
Source.actorPublisher[String](impl.currentPersistenceIds)
.mapMaterializedValue(_ => NotUsed)
override def currentEventsByPersistenceId(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long): Source[EventEnvelope, NotUsed] = {
require(persistenceId != null, "PersistenceId must not be null")
Source.actorPublisher[Event](impl.currentEventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr))
.toEventEnvelopes
.mapMaterializedValue(_ => NotUsed)
}
def allEvents(): Source[EventEnvelope, NotUsed] = {
val pastSource = Source.actorPublisher[Event](impl.currentAllEvents).mapMaterializedValue(_ => ())
val realtimeSource = Source.actorRef[Event](100, OverflowStrategy.dropHead)
.mapMaterializedValue(actor => impl.subscribeJournalEvents(actor))
val removeDuplicatedEventsByPersistenceId = Flow[Event].via(new RemoveDuplicatedEventsByPersistenceId)
(pastSource ++ realtimeSource).mapMaterializedValue(_ => NotUsed).via(removeDuplicatedEventsByPersistenceId).toEventEnvelopes
}
override def eventsByPersistenceId(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long): Source[EventEnvelope, NotUsed] = {
require(persistenceId != null, "PersistenceId must not be null")
val pastSource = Source.actorPublisher[Event](impl.currentEventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr))
.mapMaterializedValue(_ => NotUsed)
val realtimeSource = Source.actorRef[Event](100, OverflowStrategy.dropHead)
.mapMaterializedValue { actor => impl.subscribeJournalEvents(actor); NotUsed }
val stages = Flow[Event]
.filter(_.pid == persistenceId)
.filter(_.sn >= fromSequenceNr)
.via(new StopAtSeq(toSequenceNr))
.via(new RemoveDuplicatedEventsByPersistenceId)
(pastSource concat realtimeSource).via(stages).toEventEnvelopes
}
override def allPersistenceIds(): Source[String, NotUsed] = {
val pastSource = Source.actorPublisher[String](impl.currentPersistenceIds)
val realtimeSource = Source.actorRef[Event](100, OverflowStrategy.dropHead)
.map(_.pid).mapMaterializedValue(actor => impl.subscribeJournalEvents(actor))
val removeDuplicatedPersistenceIds = Flow[String].via(new RemoveDuplicates)
(pastSource ++ realtimeSource).mapMaterializedValue(_ => NotUsed).via(removeDuplicatedPersistenceIds)
}
}
class JavaDslMongoReadJournal(rj: ScalaDslMongoReadJournal) extends javadsl.ReadJournal with JCP with JCEBP with JEBP with JAPIQ {
def currentAllEvents(): JSource[EventEnvelope, NotUsed] = rj.currentAllEvents().asJava
def allEvents(): JSource[EventEnvelope, NotUsed] = rj.allEvents().asJava
override def currentPersistenceIds(): JSource[String, NotUsed] = rj.currentPersistenceIds().asJava
override def currentEventsByPersistenceId(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long): JSource[EventEnvelope, NotUsed] = {
require(persistenceId != null, "PersistenceId must not be null")
rj.currentEventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).asJava
}
override def eventsByPersistenceId(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long) = {
require(persistenceId != null, "PersistenceId must not be null")
rj.eventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).asJava
}
override def allPersistenceIds(): JSource[String, NotUsed] = rj.allPersistenceIds().asJava
}
trait JournalStream[Cursor] {
def cursor(): Cursor
def publishEvents(): Unit
}
class StopAtSeq(to: Long) extends GraphStage[FlowShape[Event, Event]] {
val in = Inlet[Event]("flowIn")
val out = Outlet[Event]("flowOut")
override def shape: FlowShape[Event, Event] = FlowShape(in, out)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) {
setHandler(in, new InHandler {
override def onPush(): Unit = {
val ev = grab(in)
push(out, ev)
if (ev.sn == to) completeStage()
}
})
setHandler(out, new OutHandler {
override def onPull(): Unit = {
pull(in)
}
})
}
}
class RemoveDuplicatedEventsByPersistenceId extends GraphStage[FlowShape[Event, Event]] {
private val in: Inlet[Event] = Inlet("in")
private val out: Outlet[Event] = Outlet("out")
override val shape: FlowShape[Event, Event] = FlowShape(in, out)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler {
private val lastSequenceNrByPersistenceId = mutable.HashMap.empty[String, Long]
override def onPush(): Unit = {
val event = grab(in)
lastSequenceNrByPersistenceId.get(event.pid) match {
case Some(sn) if event.sn > sn =>
push(out, event)
lastSequenceNrByPersistenceId.update(event.pid, event.sn)
case None =>
push(out, event)
lastSequenceNrByPersistenceId.update(event.pid, event.sn)
case _ =>
pull(in)
}
}
override def onPull(): Unit = pull(in)
setHandlers(in, out, this)
}
}
class RemoveDuplicates[T] extends GraphStage[FlowShape[T, T]] {
private val in: Inlet[T] = Inlet("in")
private val out: Outlet[T] = Outlet("out")
override val shape: FlowShape[T, T] = FlowShape(in, out)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler {
private val processed = mutable.HashSet.empty[T]
override def onPush(): Unit = {
val element = grab(in)
if(processed(element)) {
pull(in)
} else {
processed.add(element)
push(out, element)
}
}
override def onPull(): Unit = pull(in)
setHandlers(in, out, this)
}
}
trait MongoPersistenceReadJournallingApi {
def currentAllEvents: Props
def currentPersistenceIds: Props
def currentEventsByPersistenceId(persistenceId: String, fromSeq: Long, toSeq: Long): Props
def subscribeJournalEvents(subscriber: ActorRef): Unit
}
trait SyncActorPublisher[A, Cursor] extends ActorPublisher[A] with ActorLogging {
import ActorPublisherMessage._
override def preStart() = {
context.become(streaming(initialCursor, 0))
super.preStart()
}
protected def driver: MongoPersistenceDriver
protected def initialCursor: Cursor
protected def next(c: Cursor, atMost: Long): (Vector[A], Cursor)
protected def isCompleted(c: Cursor): Boolean
protected def discard(c: Cursor): Unit
def receive = Actor.emptyBehavior
def streaming(cursor: Cursor, offset: Long): Receive = {
case _: Cancel | SubscriptionTimeoutExceeded =>
discard(cursor)
context.stop(self)
case Request(_) =>
val (filled, remaining) = next(cursor, totalDemand)
filled foreach onNext
if (isCompleted(remaining)) {
onCompleteThenStop()
discard(remaining)
}
else
context.become(streaming(remaining, offset + filled.size))
}
}
|
alari/akka-persistence-mongo
|
common/src/main/scala/akka/contrib/persistence/mongodb/MongoReadJournal.scala
|
Scala
|
apache-2.0
| 9,333
|
package tests
import so.eval.Router
import akka.actor.{ ActorSystem, Props }
import akka.pattern.ask
import akka.util.Timeout
import scala.concurrent.duration._
import org.scalatest.{ BeforeAndAfter, FunSpec, Inside, ParallelTestExecution }
import org.scalatest.matchers.ShouldMatchers
/** A common trait that all of our language tests can extend. */
trait LanguageTest
extends FunSpec
with ShouldMatchers
with Inside
with BeforeAndAfter
with ParallelTestExecution {
// Some really high timeout that we'll never hit unless something is really
// really wrong.
implicit val timeout = Timeout(20.seconds)
val system = ActorSystem("Evaluate")
val router = system.actorOf(Props(new Router))
}
|
eval-so/minibcs
|
src/test/scala/LanguageTest.scala
|
Scala
|
apache-2.0
| 716
|
package play
import sbt.{ Project => _, _ }
import sbt.Keys._
import play.console.Colors
import com.typesafe.sbt.SbtNativePackager.packageArchetype
object Project extends Plugin with PlayExceptions with play.Keys with PlayReloader with PlayCommands
with PlayRun with play.Settings with PlayPositionMapper with PlaySourceGenerators {
// ~~ Alerts
if (Option(System.getProperty("play.debug.classpath")).filter(_ == "true").isDefined) {
println()
this.getClass.getClassLoader.asInstanceOf[sbt.PluginManagement.PluginClassLoader].getURLs.foreach { el =>
println(Colors.green(el.toString))
}
println()
}
Option(System.getProperty("play.version")).map {
case badVersion if badVersion != play.core.PlayVersion.current => {
println(
Colors.red("""
|This project uses Play %s!
|Update the Play sbt-plugin version to %s (usually in project/plugins.sbt)
""".stripMargin.format(play.core.PlayVersion.current, badVersion))
)
}
case _ =>
}
private lazy val commonSettings: Seq[Setting[_]] =
packageArchetype.java_application ++
defaultSettings ++
intellijCommandSettings ++
Seq(testListeners += testListener) ++
Seq(
scalacOptions ++= Seq("-deprecation", "-unchecked", "-encoding", "utf8"),
javacOptions in Compile ++= Seq("-encoding", "utf8", "-g")
)
lazy val playJavaSettings: Seq[Setting[_]] =
commonSettings ++
eclipseCommandSettings(JAVA) ++
defaultJavaSettings ++
Seq(libraryDependencies += javaCore)
lazy val playScalaSettings: Seq[Setting[_]] =
commonSettings ++
eclipseCommandSettings(SCALA) ++
defaultScalaSettings
// Provided for backward compatibility because we now prefer sbt settings to be used directly.
// FIXME: Deprecate this method in the future.
def apply(name: String, applicationVersion: String = "1.0", dependencies: Seq[ModuleID] = Nil, path: File = file("."), settings: => Seq[Setting[_]] = Seq()): sbt.Project = {
lazy val playSettings = if (dependencies.contains(javaCore)) playJavaSettings else playScalaSettings
lazy val projectSettings: Seq[Setting[_]] = Seq(
version := applicationVersion,
libraryDependencies ++= dependencies
)
sbt.Project(name, path)
.settings(playSettings: _*)
.settings(projectSettings: _*)
.settings(settings: _*)
}
}
|
michaelahlers/team-awesome-wedding
|
vendor/play-2.2.1/framework/src/sbt-plugin/src/main/scala/play/Project.scala
|
Scala
|
mit
| 2,414
|
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.sparta.driver.util
import java.io.Serializable
import java.net.URLClassLoader
import akka.event.slf4j.SLF4JLogging
import scala.collection.JavaConversions._
import org.reflections.Reflections
import com.stratio.sparta.driver.exception.DriverException
import com.stratio.sparta.sdk._
import scala.util.Try
class ReflectionUtils extends SLF4JLogging {
def tryToInstantiate[C](classAndPackage: String, block: Class[_] => C): C = {
val clazMap: Map[String, String] = getClasspathMap
val finalClazzToInstance = clazMap.getOrElse(classAndPackage, classAndPackage)
try {
val clazz = Class.forName(finalClazzToInstance)
block(clazz)
} catch {
case cnfe: ClassNotFoundException =>
throw DriverException.create("Class with name " + classAndPackage + " Cannot be found in the classpath.", cnfe)
case ie: InstantiationException =>
throw DriverException.create(
"Class with name " + classAndPackage + " cannot be instantiated", ie)
case e: Exception => throw DriverException.create(
"Generic error trying to instantiate " + classAndPackage, e)
}
}
def instantiateParameterizable[C](clazz: Class[_], properties: Map[String, Serializable]): C =
clazz.getDeclaredConstructor(classOf[Map[String, Serializable]]).newInstance(properties).asInstanceOf[C]
def printClassPath(cl: ClassLoader): Unit = {
val urls = cl.asInstanceOf[URLClassLoader].getURLs()
urls.foreach(url => log.info(url.getFile))
}
lazy val getClasspathMap: Map[String, String] = {
val reflections = new Reflections("com.stratio.sparta")
try {
log.info("#######")
log.info("####### SPARK MUTABLE_URL_CLASS_LOADER:")
log.info(getClass.getClassLoader.toString)
printClassPath(getClass.getClassLoader)
log.info("#######")
log.info("####### APP_CLASS_LOADER / SYSTEM CLASSLOADER:")
log.info(ClassLoader.getSystemClassLoader().toString)
printClassPath(ClassLoader.getSystemClassLoader())
log.info("#######")
log.info("####### EXTRA_CLASS_LOADER:")
log.info(getClass.getClassLoader.getParent.getParent.toString)
printClassPath(getClass.getClassLoader.getParent.getParent)
} catch {
case e: Exception => //nothing
}
val inputs = reflections.getSubTypesOf(classOf[Input]).toList
val dimensionTypes = reflections.getSubTypesOf(classOf[DimensionType]).toList
val operators = reflections.getSubTypesOf(classOf[Operator]).toList
val outputs = reflections.getSubTypesOf(classOf[Output]).toList
val parsers = reflections.getSubTypesOf(classOf[Parser]).toList
val plugins = inputs ++ dimensionTypes ++ operators ++ outputs ++ parsers
val result = plugins map (t => t.getSimpleName -> t.getCanonicalName) toMap
log.info("#######")
log.info("####### Plugins to be loaded:")
result.foreach {
case (simpleName: String, canonicalName: String) => log.info(s"${canonicalName}")
}
result
}
}
|
danielcsant/sparta
|
driver/src/main/scala/com/stratio/sparta/driver/util/ReflectionUtils.scala
|
Scala
|
apache-2.0
| 3,628
|
package com.madgag.git.bfg.test
import com.madgag.git._
import com.madgag.git.test._
import org.eclipse.jgit.internal.storage.file.{GC, ObjectDirectory}
import org.eclipse.jgit.lib.Constants.OBJ_BLOB
import org.eclipse.jgit.lib.{ObjectId, ObjectReader, Repository}
import org.eclipse.jgit.revwalk.{RevCommit, RevTree}
import org.eclipse.jgit.treewalk.TreeWalk
import org.scalatest.Inspectors
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import org.scalatest.matchers.{MatchResult, Matcher}
import scala.jdk.CollectionConverters._
class unpackedRepo(filePath: String) extends AnyFlatSpec with Matchers {
implicit val repo = unpackRepo(filePath)
implicit val objectDirectory = repo.getObjectDatabase.asInstanceOf[ObjectDirectory]
implicit lazy val (revWalk, reader) = repo.singleThreadedReaderTuple
def blobOfSize(sizeInBytes: Int): Matcher[ObjectId] = Matcher { (objectId: ObjectId) =>
val objectLoader = objectId.open
val hasThatSize = objectLoader.getType == OBJ_BLOB && objectLoader.getSize == sizeInBytes
def thing(boo: String) = s"${objectId.shortName} $boo size of $sizeInBytes"
MatchResult(hasThatSize, thing("did not have"), thing("had"))
}
def packedBlobsOfSize(sizeInBytes: Long): Set[ObjectId] = {
implicit val reader = repo.newObjectReader()
repo.getObjectDatabase.asInstanceOf[ObjectDirectory].packedObjects.filter { objectId =>
val objectLoader = objectId.open
objectLoader.getType == OBJ_BLOB && objectLoader.getSize == sizeInBytes
}.toSet
}
def haveFile(name: String): Matcher[ObjectId] = haveTreeEntry(name, !_.isSubtree)
def haveFolder(name: String): Matcher[ObjectId] = haveTreeEntry(name, _.isSubtree)
def haveTreeEntry(name: String, p: TreeWalk => Boolean)= new Matcher[ObjectId] {
def apply(treeish: ObjectId) = {
treeOrBlobPointedToBy(treeish.asRevObject) match {
case Right(tree) =>
def thing(boo: String) = s"tree ${treeish.shortName} $boo a '$name' entry"
MatchResult(
treeEntryNames(tree, p).contains(name),
thing("did not contain"),
thing("contained")
)
case Left(blob) =>
MatchResult(
false,
s"blob ${treeish.shortName} was not a tree containing '$name'",
s"""When does this happen??!""""
)
}
}
}
def treeEntryNames(t: RevTree, p: TreeWalk => Boolean): Seq[String] =
t.walk(postOrderTraversal = true).withFilter(p).map(_.getNameString).toList
def commitHist(specificRefs: String*)(implicit repo: Repository): Seq[RevCommit] = {
val logCommand = repo.git.log
if (specificRefs.isEmpty) logCommand.all else specificRefs.foldLeft(logCommand)((lc, ref) => lc.add(repo.resolve(ref)))
}.call.asScala.toSeq.reverse
def haveCommitWhereObjectIds(boom: Matcher[Iterable[ObjectId]])(implicit reader: ObjectReader): Matcher[RevCommit] = boom compose {
(c: RevCommit) => c.getTree.walk().map(_.getObjectId(0)).toSeq
}
def haveRef(refName: String, objectIdMatcher: Matcher[ObjectId]): Matcher[Repository] = objectIdMatcher compose {
(r: Repository) => r resolve refName // aka s"Ref [$refName]"
}
def commitHistory(histMatcher: Matcher[Seq[RevCommit]]) = histMatcher compose {
r: Repository => commitHist()(r)
}
def commitHistoryFor(refs: String*)(histMatcher: Matcher[Seq[RevCommit]]) = histMatcher compose {
r: Repository => commitHist(refs:_*)(r)
}
def ensureRemovalOfBadEggs[S,T](expr : => Iterable[S], exprResultMatcher: Matcher[Iterable[S]])(block: => T) = {
gc()
expr should exprResultMatcher
block
gc()
expr shouldBe empty
}
def gc() = {
val gc = new GC(repo)
gc.setPackExpireAgeMillis(0)
gc.gc()
}
class CheckRemovalFromCommits(commits: => Seq[RevCommit]) extends Inspectors {
def ofCommitsThat[T](commitM: Matcher[RevCommit])(block: => T): Unit = {
forAtLeast(1, commits) { commit =>
commit should commitM
}
block
forAll(commits) { commit =>
commit shouldNot commitM
}
}
}
def ensureRemovalFrom(commits: => Seq[RevCommit]): CheckRemovalFromCommits = new CheckRemovalFromCommits(commits)
def ensureInvariantValue[T, S](f: => S)(block: => T) = {
val originalValue = f
block
f should equal(originalValue)
}
def ensureInvariantCondition[T, S](cond: Matcher[Repository])(block: => T) = {
repo should cond
block
repo should cond
}
}
|
rtyley/bfg-repo-cleaner
|
bfg-test/src/main/scala/com/madgag/git/bfg/test/unpackedRepo.scala
|
Scala
|
gpl-3.0
| 4,524
|
package com.twitter.finagle.service
import com.twitter.finagle.{FailedFastException, ServiceFactory, Service}
import com.twitter.util._
import com.twitter.finagle.MockTimer
import com.twitter.finagle.stats.NullStatsReceiver
import com.twitter.conversions.time._
import java.util.concurrent.atomic.AtomicInteger
import org.junit.runner.RunWith
import org.mockito.Matchers.any
import org.mockito.Mockito.{never, verify, when, times}
import org.scalatest.concurrent.Conductors
import org.scalatest.junit.JUnitRunner
import org.scalatest.FunSuite
import org.scalatest.mock.MockitoSugar
import scala.language.reflectiveCalls
@RunWith(classOf[JUnitRunner])
class FailFastFactoryTest extends FunSuite with MockitoSugar with Conductors {
def newCtx() = new {
val timer = new MockTimer
val backoffs = 1.second #:: 2.seconds #:: Stream.empty[Duration]
val service = mock[Service[Int, Int]]
when(service.close(any[Time])).thenReturn(Future.Done)
val underlying = mock[ServiceFactory[Int, Int]]
when(underlying.isAvailable).thenReturn(true)
when(underlying.close(any[Time])).thenReturn(Future.Done)
val failfast = new FailFastFactory(underlying, NullStatsReceiver, timer, backoffs)
val p, q, r = new Promise[Service[Int, Int]]
when(underlying()).thenReturn(p)
val pp = failfast()
assert(pp.isDefined === false)
assert(failfast.isAvailable === true)
assert(timer.tasks.isEmpty)
}
test("pass through whenever everything is fine") {
Time.withCurrentTimeFrozen { tc =>
val ctx = newCtx()
import ctx._
p() = Return(service)
assert(pp.poll === Some(Return(service)))
}
}
test("failure") {
Time.withCurrentTimeFrozen { tc =>
val ctx = newCtx()
import ctx._
p() = Throw(new Exception)
verify(underlying).apply()
assert(failfast.isAvailable === false)
}
}
test("time out according to backoffs") {
Time.withCurrentTimeFrozen { tc =>
val ctx = newCtx()
import ctx._
p() = Throw(new Exception)
assert(timer.tasks.size === 1)
tc.set(timer.tasks(0).when)
timer.tick()
verify(underlying, times(2)).apply()
assert(failfast.isAvailable === false)
}
}
test("become available again if the next attempt succeeds") {
Time.withCurrentTimeFrozen { tc =>
val ctx = newCtx()
import ctx._
p() = Throw(new Exception)
tc.set(timer.tasks(0).when)
when(underlying()).thenReturn(q)
verify(underlying).apply()
timer.tick()
verify(underlying, times(2)).apply()
assert(timer.tasks.isEmpty)
q() = Return(service)
assert(timer.tasks.isEmpty)
assert(failfast.isAvailable === true)
}
}
test("refuse external attempts") {
Time.withCurrentTimeFrozen { tc =>
val ctx = newCtx()
import ctx._
p() = Throw(new Exception)
assert {
failfast().poll match {
case Some(Throw(_: FailedFastException)) => true
case _ => false
}
}
verify(underlying).apply() // nothing new
}
}
test("admit external attempts when available again") {
Time.withCurrentTimeFrozen { tc =>
val ctx = newCtx()
import ctx._
p() = Throw(new Exception)
tc.set(timer.tasks(0).when)
verify(underlying).apply()
when(underlying()).thenReturn(q)
timer.tick()
verify(underlying, times(2)).apply()
q() = Return(service)
when(underlying()).thenReturn(r)
assert(failfast().poll === None)
r() = Return(service)
assert {
failfast().poll match {
case Some(Return(s)) => s eq service
case _ => false
}
}
}
}
test("cancels timer on close") {
Time.withCurrentTimeFrozen { tc =>
val ctx = newCtx()
import ctx._
p() = Throw(new Exception)
assert(timer.tasks.size === 1)
assert(failfast.isAvailable === false)
verify(underlying, never()).close()
failfast.close()
verify(underlying).close()
assert(timer.tasks.isEmpty)
assert(failfast.isAvailable === underlying.isAvailable)
val ia = !underlying.isAvailable
when(underlying.isAvailable).thenReturn(ia)
assert(failfast.isAvailable === underlying.isAvailable)
}
}
test("fails simultaneous requests properly") {
Time.withCurrentTimeFrozen { tc =>
val ctx = newCtx()
import ctx._
val pp2 = failfast()
val e = new Exception
p() = Throw(e)
assert(pp.poll === Some(Throw(e)))
assert(pp2.poll === Some(Throw(e)))
val ffe = intercept[FailedFastException] {
failfast().poll.get.get
}
assert(ffe.getMessage().contains("twitter.github.io/finagle/guide/FAQ.html"))
}
}
test("maintains separate exception state in separate threads") {
Time.withCurrentTimeFrozen { tc =>
val conductor = new Conductor
import conductor._
val threadCompletionCount = new AtomicInteger(0)
thread("threadOne") {
val ctx = newCtx()
ctx.p() = Throw(new Exception)
ctx.failfast().poll match {
case Some(Throw(ex: FailedFastException)) => {
ex.serviceName = "threadOne"
assert(beat === 0)
}
case _ => throw new Exception
}
threadCompletionCount.incrementAndGet()
}
thread("threadTwo") {
waitForBeat(1)
val ctx = newCtx()
ctx.p() = Throw(new Exception)
ctx.failfast().poll match {
case Some(Throw(ex: FailedFastException)) => {
assert(ex.serviceName === "unspecified")
}
case _ => throw new Exception
}
threadCompletionCount.incrementAndGet()
}
whenFinished {
assert(threadCompletionCount.get === 2)
}
}
}
}
|
yancl/finagle-6.22.0
|
finagle-core/src/test/scala/com/twitter/finagle/service/FailFastFactoryTest.scala
|
Scala
|
apache-2.0
| 5,861
|
package com.rasterfoundry.batch.stacExport
import geotrellis.server.stac._
import com.rasterfoundry.datamodel._
import geotrellis.proj4.CRS
import geotrellis.vector.reproject.Reproject
import cats.implicits._
import io.circe._
import java.sql.Timestamp
import shapeless._
object StacCatalogBuilder {
sealed trait CatalogRequirements
object CatalogBuilder {
trait EmptyCatalog extends CatalogRequirements
trait CatalogVersion extends CatalogRequirements
trait CatalogParentPath extends CatalogRequirements
trait CatalogId extends CatalogRequirements
trait CatalogTitle extends CatalogRequirements
trait CatalogDescription extends CatalogRequirements
trait CatalogLinks extends CatalogRequirements
trait CatalogContents extends CatalogRequirements
type CompleteCatalog =
EmptyCatalog
with CatalogVersion
with CatalogParentPath
with CatalogId
with CatalogTitle
with CatalogDescription
with CatalogLinks
with CatalogContents
}
}
case class IncompleteStacCatalog(
stacVersion: Option[String] = None,
parentPath: Option[String] = None,
rootPath: Option[String] = None,
isRoot: Boolean = false,
id: Option[String] = None,
title: Option[String] = None,
description: Option[String] = None,
links: List[StacLink] = List(),
contents: Option[ContentBundle] = None
) {
// it is ok to use .get in here because stacVersion, id,
// description are in the requirement above and only when
// they are populated does the compiler agree with
// the .build() call
@SuppressWarnings(Array("OptionGet"))
def toStacCatalog(): StacCatalog = {
StacCatalog(
stacVersion.get,
id.get,
title,
description.get,
links
)
}
}
class StacCatalogBuilder[
CatalogRequirements <: StacCatalogBuilder.CatalogRequirements
](stacCatalog: IncompleteStacCatalog = IncompleteStacCatalog()) {
import StacCatalogBuilder.CatalogBuilder._
def withVersion(
stacVersion: String
): StacCatalogBuilder[CatalogRequirements with CatalogVersion] =
new StacCatalogBuilder(stacCatalog.copy(stacVersion = Some(stacVersion)))
def withParentPath(
parentPath: String,
isRoot: Boolean = false,
rootPath: String = ""
): StacCatalogBuilder[CatalogRequirements with CatalogParentPath] =
new StacCatalogBuilder(
stacCatalog.copy(
parentPath = Some(parentPath),
rootPath = Some(rootPath),
isRoot = isRoot
)
)
def withId(
id: String
): StacCatalogBuilder[CatalogRequirements with CatalogId] =
new StacCatalogBuilder(stacCatalog.copy(id = Some(id)))
def withTitle(
title: String
): StacCatalogBuilder[CatalogRequirements with CatalogTitle] =
new StacCatalogBuilder(stacCatalog.copy(title = Some(title)))
def withDescription(
description: String
): StacCatalogBuilder[CatalogRequirements with CatalogDescription] =
new StacCatalogBuilder(stacCatalog.copy(description = Some(description)))
def withLinks(
links: List[StacLink]
): StacCatalogBuilder[CatalogRequirements with CatalogLinks] =
new StacCatalogBuilder(stacCatalog.copy(links = links))
def withContents(
contents: ContentBundle
): StacCatalogBuilder[CatalogRequirements with CatalogContents] =
new StacCatalogBuilder(stacCatalog.copy(contents = Some(contents)))
// it is ok to use .get in here because parentPath, contents,
// id, and stacVersion are in the requirement above and only
// when they are populated does the compiler agree with the .build() call
@SuppressWarnings(Array("OptionGet"))
def build()(
implicit ev: CatalogRequirements =:= CompleteCatalog
): (
StacCatalog, // catalog
List[
(
StacCollection, // layer collection
(StacCollection, List[StacItem]), // scene collection and scene items
(StacCollection, StacItem, (Option[Json], String)) // label collection, label item, label data, and s3 location
)
]
) = {
// Silence unused warning because scalac warns about phantom types
ev.unused
// s3://rasterfoundry-production-data-us-east-1/stac-exports/<catalogId>
val absPath: String = stacCatalog.parentPath.get
// catalog.json
val rootPath = "catalog.json"
val layerCollectionList: List[
(
StacCollection, // layer collection
(StacCollection, List[StacItem]), // scene collection and scene items
(StacCollection, StacItem, (Option[Json], String)), // label collection, label item, label data, and s3 location
String //layerSelfAbsLink
)
] = stacCatalog.contents.get.layerToSceneTaskAnnotation
.map {
case (layerId, sceneTaskAnnotation) => (layerId, sceneTaskAnnotation)
}
.toList
.map(layerInfo => {
val layerId: String = layerInfo._1.toString
val sceneList: List[Scene] = layerInfo._2._1
val sceneGeomExtent: Option[UnionedGeomExtent] = layerInfo._2._2
// s3://rasterfoundry-production-data-us-east-1/stac-exports/<catalogId>/<layerId>
val layerCollectionAbsPath = s"${absPath}/${layerId}"
// ../../catalog.json
val layerRootPath = s"../${rootPath}"
val layerCollectionBuilder =
new LayerCollectionBuilder[
LayerCollectionBuilder.CollectionBuilder.EmptyCollection
]()
val layerSelfAbsLink = s"${layerCollectionAbsPath}/collection.json"
val layerOwnLinks = List(
StacLink(
// s3://rasterfoundry-production-data-us-east-1/stac-exports/<catalogId>/catalog.json
"../catalog.json",
Parent,
Some(`application/json`),
Some(s"Catalog ${stacCatalog.id.get}"),
List()
),
StacLink(
// s3://rasterfoundry-production-data-us-east-1/stac-exports/<catalogId>/<layerCollectionId>/collection.json
layerSelfAbsLink,
Self,
Some(`application/json`),
Some(s"Layer Collection ${layerId}"),
List()
),
StacLink(
// s3://rasterfoundry-production-data-us-east-1/stac-exports/<catalogId>/<catalogId>.json
layerRootPath,
StacRoot,
Some(`application/json`),
Some("Root"),
List()
)
)
val layerSceneSpatialExtent = sceneGeomExtent match {
case Some(geomExt) =>
Coproduct[Bbox](
TwoDimBbox(geomExt.xMin, geomExt.yMin, geomExt.xMax, geomExt.yMax)
)
case None =>
val extent = sceneList
.map(_.dataFootprint)
.flatten
.map(
geom =>
Reproject(
geom.geom,
CRS.fromEpsgCode(3857),
CRS.fromEpsgCode(4326)
)
)
.map(_.envelope)
.reduce((e1, e2) => {
e1.combine(e2)
})
Coproduct[Bbox](
TwoDimBbox(extent.xmin, extent.ymin, extent.xmax, extent.ymax)
)
}
val layerSceneAqcTime: List[Timestamp] =
sceneList map { scene =>
scene.filterFields.acquisitionDate.getOrElse(scene.createdAt)
}
val layerSceneTemporalExtent: List[Option[String]] = List(
Some(layerSceneAqcTime.minBy(_.getTime).toLocalDateTime.toString),
Some(layerSceneAqcTime.maxBy(_.getTime).toLocalDateTime.toString)
)
val layerExtent = StacExtent(
SpatialExtent(List(layerSceneSpatialExtent)),
TemporalExtent(List(layerSceneTemporalExtent))
)
val (
layerCollection,
(sceneCollection, sceneItems),
(labelCollection, labelItem, (labelDataJson, labelDataS3AbsLink))
): (
StacCollection,
(StacCollection, List[StacItem]),
(StacCollection, StacItem, (Option[Json], String))
) = layerCollectionBuilder
.withVersion(stacCatalog.stacVersion.get)
.withId(layerId)
.withTitle("Layers")
.withDescription("Project layer collection")
.withLinks(layerOwnLinks)
.withParentPath(layerCollectionAbsPath, layerRootPath)
.withExtent(layerExtent)
.withSceneTaskAnnotations(layerInfo._2)
.build()
(
layerCollection,
(sceneCollection, sceneItems),
(labelCollection, labelItem, (labelDataJson, labelDataS3AbsLink)),
layerSelfAbsLink
)
})
val updatedStacCatalog = stacCatalog
.copy(
links = stacCatalog.links ++ layerCollectionList.map {
case (layerCollection, _, _, _) =>
StacLink(
s"${layerCollection.id}/collection.json",
Child,
Some(`application/json`),
Some("Layer Collection"),
List()
)
}
)
.toStacCatalog()
val layerInfoList = layerCollectionList.map(
layerInfo => (layerInfo._1, layerInfo._2, layerInfo._3)
)
(updatedStacCatalog, layerInfoList)
}
}
|
aaronxsu/raster-foundry
|
app-backend/batch/src/main/scala/stacExport/StacCatalogBuilder.scala
|
Scala
|
apache-2.0
| 9,285
|
package im.mange.driveby.pool
//TODO: seems like application stuff should be in application package (like browser)
case class Application(name: String, port: Int, host: String = "127.0.0.1") {
def baseUrl = "http://" + host + ":" + port
}
|
alltonp/driveby
|
src/main/scala/im/mange/driveby/pool/Application.scala
|
Scala
|
apache-2.0
| 242
|
package sangria.validation.rules
import org.scalatest.WordSpec
import sangria.util.{Pos, ValidationSupport}
class KnownFragmentNamesSpec extends WordSpec with ValidationSupport {
override val defaultRule = Some(new KnownFragmentNames)
"Validate: Known fragment names" should {
"known fragment names are valid" in expectPasses(
"""
{
human(id: 4) {
...HumanFields1
... on Human {
...HumanFields2
}
}
}
fragment HumanFields1 on Human {
name
...HumanFields3
}
fragment HumanFields2 on Human {
name
}
fragment HumanFields3 on Human {
name
}
""")
"unknown fragment names are invalid" in expectFails(
"""
{
human(id: 4) {
...UnknownFragment1
... on Human {
...UnknownFragment2
}
}
}
fragment HumanFields on Human {
name
...UnknownFragment3
}
""",
List(
"Unknown fragment 'UnknownFragment1'." -> Some(Pos(4, 13)),
"Unknown fragment 'UnknownFragment2'." -> Some(Pos(6, 15)),
"Unknown fragment 'UnknownFragment3'." -> Some(Pos(12, 11))
))
}
}
|
narahari92/sangria
|
src/test/scala/sangria/validation/rules/KnownFragmentNamesSpec.scala
|
Scala
|
apache-2.0
| 1,309
|
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.commons.util
import java.nio.ByteBuffer
import io.gatling.commons.util.Collections._
object ByteBuffers {
val Empty = ByteBuffer.wrap(Array.empty)
def byteBuffer2ByteArray(byteBuffer: ByteBuffer): Array[Byte] = {
val bytes = new Array[Byte](byteBuffer.remaining)
if (byteBuffer.hasArray) {
System.arraycopy(byteBuffer.array, byteBuffer.arrayOffset, bytes, 0, bytes.length)
} else {
byteBuffer.get(bytes)
}
bytes
}
def byteBuffers2ByteArray(byteBuffers: Seq[ByteBuffer]): Array[Byte] = {
val bytes = new Array[Byte](byteBuffers.sumBy(_.remaining))
var pos = 0
byteBuffers.foreach { byteBuffer =>
val remaining = byteBuffer.remaining
if (byteBuffer.hasArray) {
System.arraycopy(byteBuffer.array, byteBuffer.arrayOffset, bytes, pos, remaining)
} else {
byteBuffer.get(bytes, pos, remaining)
}
pos += remaining
}
bytes
}
def sumByteBuffers(buffers: Iterable[ByteBuffer]): ByteBuffer = {
val comb = ByteBuffer.allocate(buffers.sumBy(_.remaining))
copyInto(buffers, comb)
}
def copyInto(sources: Iterable[ByteBuffer], target: ByteBuffer): ByteBuffer = {
sources.foreach(target.put)
target.flip()
target
}
}
|
MykolaB/gatling
|
gatling-commons/src/main/scala/io/gatling/commons/util/ByteBuffers.scala
|
Scala
|
apache-2.0
| 1,884
|
package edu.rice.habanero.benchmarks.big
import java.util.Random
import akka.actor.{ActorRef, Props}
import edu.rice.habanero.actors.{AkkaActor, AkkaActorState}
import edu.rice.habanero.benchmarks.big.BigConfig.{ExitMessage, Message, PingMessage, PongMessage}
import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner}
/**
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> (shams@rice.edu)
*/
object BigAkkaActorBenchmark {
def main(args: Array[String]) {
BenchmarkRunner.runBenchmark(args, new BigAkkaActorBenchmark)
}
private final class BigAkkaActorBenchmark extends Benchmark {
def initialize(args: Array[String]) {
BigConfig.parseArgs(args)
}
def printArgInfo() {
BigConfig.printArgs()
}
def runIteration() {
val system = AkkaActorState.newActorSystem("Big")
val sinkActor = system.actorOf(Props(new SinkActor(BigConfig.W)))
AkkaActorState.startActor(sinkActor)
val bigActors = Array.tabulate[ActorRef](BigConfig.W)(i => {
val loopActor = system.actorOf(Props(new BigActor(i, BigConfig.N, sinkActor)))
AkkaActorState.startActor(loopActor)
loopActor
})
val neighborMessage = new NeighborMessage(bigActors)
sinkActor ! neighborMessage
bigActors.foreach(loopActor => {
loopActor ! neighborMessage
})
bigActors.foreach(loopActor => {
loopActor ! new PongMessage(-1)
})
AkkaActorState.awaitTermination(system)
}
def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double) {
}
}
private case class NeighborMessage(neighbors: Array[ActorRef]) extends Message
private class BigActor(id: Int, numMessages: Int, sinkActor: ActorRef) extends AkkaActor[AnyRef] {
private var numPings = 0
private var expPinger = -1
private val random = new Random(id)
private var neighbors: Array[ActorRef] = null
private val myPingMessage = new PingMessage(id)
private val myPongMessage = new PongMessage(id)
override def process(msg: AnyRef) {
msg match {
case pm: PingMessage =>
val sender = neighbors(pm.sender)
sender ! myPongMessage
case pm: PongMessage =>
if (pm.sender != expPinger) {
println("ERROR: Expected: " + expPinger + ", but received ping from " + pm.sender)
}
if (numPings == numMessages) {
sinkActor ! ExitMessage.ONLY
} else {
sendPing()
numPings += 1
}
case em: ExitMessage =>
exit()
case nm: NeighborMessage =>
neighbors = nm.neighbors
}
}
private def sendPing(): Unit = {
val target = random.nextInt(neighbors.size)
val targetActor = neighbors(target)
expPinger = target
targetActor ! myPingMessage
}
}
private class SinkActor(numWorkers: Int) extends AkkaActor[AnyRef] {
private var numMessages = 0
private var neighbors: Array[ActorRef] = null
override def process(msg: AnyRef) {
msg match {
case em: ExitMessage =>
numMessages += 1
if (numMessages == numWorkers) {
neighbors.foreach(loopWorker => loopWorker ! ExitMessage.ONLY)
exit()
}
case nm: NeighborMessage =>
neighbors = nm.neighbors
}
}
}
}
|
smarr/savina
|
src/main/scala/edu/rice/habanero/benchmarks/big/BigAkkaActorBenchmark.scala
|
Scala
|
gpl-2.0
| 3,396
|
package reactiverogue.record
package field
import reactivemongo.bson.BSONObjectID
trait ObjectIdPk[OwnerType <: MongoRecord[OwnerType]] { self: OwnerType =>
def defaultIdValue = BSONObjectID.generate
object id extends ObjectIdField(this.asInstanceOf[OwnerType]) {
override def name = "_id"
override def defaultValue = defaultIdValue
}
}
|
whisklabs/reactiverogue
|
reactiverogue-core/src/main/scala/reactiverogue/record/field/ObjectIdPk.scala
|
Scala
|
apache-2.0
| 355
|
package mesosphere.marathon
package core.election.impl
import akka.event.EventStream
import mesosphere.AkkaUnitTest
import mesosphere.chaos.http.HttpConf
import mesosphere.marathon.core.base.{ CrashStrategy, LifecycleState }
import mesosphere.marathon.core.election.{ ElectionCandidate, ElectionService, LocalLeadershipEvent }
import org.mockito.Mockito
import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{ Seconds, Span }
class PseudoElectionServiceTest extends AkkaUnitTest with Eventually {
override implicit lazy val patienceConfig: PatienceConfig = PatienceConfig(timeout = Span(10, Seconds))
class Fixture {
val hostPort: String = "unresolvable:2181"
val httpConfig: HttpConf = mock[HttpConf]
val electionService: ElectionService = mock[ElectionService]
val events: EventStream = new EventStream(system)
val candidate: ElectionCandidate = mock[ElectionCandidate]
val lifecycle: LifecycleState = LifecycleState.Ignore
val crashStrategy: CrashStrategy = mock[CrashStrategy]
}
"PseudoElectionService" should {
"leader is not set initially" in {
val f = new Fixture
val electionService = new PseudoElectionService(f.hostPort, system, f.events, f.lifecycle, f.crashStrategy)
electionService.currentCandidate.get should be(None)
}
"leader is eventually set after offerLeadership is called" in {
val f = new Fixture
val electionService = new PseudoElectionService(f.hostPort, system, f.events, f.lifecycle, f.crashStrategy)
Given("leadership is offered")
electionService.offerLeadership(f.candidate)
Then("leader is set")
eventually { electionService.currentCandidate.get should equal(Some(f.candidate)) }
Given("leadership is offered again")
electionService.offerLeadership(f.candidate)
Then("leader is set to None and Marathon stops")
eventually { electionService.currentCandidate.get should equal(None) }
eventually { verify(f.crashStrategy).crash() }
}
"Marathon stops after abdicateLeadership while being idle" in {
val f = new Fixture
val electionService = new PseudoElectionService(f.hostPort, system, f.events, f.lifecycle, f.crashStrategy)
Given("leadership is abdicated while not being leader")
electionService.abdicateLeadership()
Then("leader is None and Marathon stops")
eventually { electionService.currentCandidate.get should be(None) }
eventually { verify(f.crashStrategy).crash() }
}
"events are sent" in {
val f = new Fixture
val events = mock[EventStream]
val electionService = new PseudoElectionService(f.hostPort, system, events, f.lifecycle, f.crashStrategy)
Given("this instance is becoming a leader")
electionService.offerLeadership(f.candidate)
eventually { electionService.currentCandidate.get should equal(Some(f.candidate)) }
Then("the candidate is called, then an event is published")
val order = Mockito.inOrder(events, f.candidate)
eventually { order.verify(f.candidate).startLeadership() }
eventually { order.verify(events).publish(LocalLeadershipEvent.ElectedAsLeader) }
Given("this instance is abdicating")
electionService.abdicateLeadership()
Then("the candidate is called, then an event is published")
eventually { order.verify(f.candidate).stopLeadership() }
eventually { order.verify(events).publish(LocalLeadershipEvent.Standby) }
Then("the candidate is set to None")
eventually { electionService.currentCandidate.get should be(None) }
Then("then Marathon stops")
eventually { verify(f.crashStrategy).crash() }
}
"Marathon stops after leadership abdication while being a leader" in {
val f = new Fixture
val electionService = new PseudoElectionService(f.hostPort, system, f.events, f.lifecycle, f.crashStrategy)
Given("this instance becomes leader and then abdicates leadership")
electionService.offerLeadership(f.candidate)
eventually { electionService.currentCandidate.get should equal(Some(f.candidate)) }
electionService.abdicateLeadership()
Then("then state is Stopped and Marathon stops")
eventually { electionService.currentCandidate.get should be(None) }
eventually { verify(f.crashStrategy).crash() }
}
"Marathon stops if a candidate's startLeadership fails" in {
val f = new Fixture
val electionService = new PseudoElectionService(f.hostPort, system, f.events, f.lifecycle, f.crashStrategy)
Mockito.when(f.candidate.startLeadership()).thenAnswer(new Answer[Unit] {
override def answer(invocation: InvocationOnMock): Unit = {
throw new Exception("candidate.startLeadership exception")
}
})
Given("this instance is offering leadership and candidate.startLeadership throws an exception")
electionService.offerLeadership(f.candidate)
Then("the instance is stopped")
eventually { electionService.currentCandidate.get should be(None) }
eventually { verify(f.crashStrategy).crash() }
}
}
}
|
Caerostris/marathon
|
src/test/scala/mesosphere/marathon/core/election/impl/PseudoElectionServiceTest.scala
|
Scala
|
apache-2.0
| 5,219
|
package actors
import akka.actor.{Props, ActorRef, Actor}
import utils.{StockQuote, PretendStockQuote}
import scala.util.Random
import scala.collection.immutable.{HashSet, Queue}
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
import play.libs.Akka
/**
* There is one StockActor per stock symbol. The StockActor maintains a list of users watching the stock and the stock
* values. Each StockActor updates a rolling dataset of randomly generated stock values.
*/
class StockActor(symbol: String) extends Actor {
lazy val stockQuote: StockQuote = new PretendStockQuote
protected[this] var watchers: HashSet[ActorRef] = HashSet.empty[ActorRef]
// A random data set which uses stockQuote.newPrice to get each data point
var stockHistory: Queue[Double] = {
val random = Random
lazy val initialPrices: Stream[Double] = (random.nextDouble * 800) #:: initialPrices.map(previous => stockQuote.newPrice(previous))
initialPrices.take(50).to[Queue]
}
// Fetch the latest stock value every 75ms
val stockTick = context.system.scheduler.schedule(Duration.Zero, 75.millis, self, FetchLatest)
def receive = {
case FetchLatest => {
// add a new stock price to the history and drop the oldest
val newPrice = stockQuote.newPrice(stockHistory.last.doubleValue())
stockHistory = stockHistory.drop(1) :+ newPrice
// notify watchers
watchers.foreach(_ ! StockUpdate(symbol, newPrice))
}
case WatchStock(_) => {
// send the stock history to the user
sender ! StockHistory(symbol, stockHistory.toList)
// add the watcher to the list
watchers = watchers + sender
}
case UnwatchStock(_) =>
watchers = watchers - sender
if (watchers.size == 0) {
stockTick.cancel()
context.stop(self)
}
}
}
class StocksActor extends Actor {
def receive = {
case watchStock @ WatchStock(symbol) =>{
// get or create the StockActor for the symbol and forward this message
context.child(symbol).getOrElse {
context.actorOf(Props(new StockActor(symbol)), symbol)
} forward watchStock
}
case unwatchStock @ UnwatchStock(Some(symbol)) => {
// if there is a StockActor for the symbol forward this message
context.child(symbol).foreach(_.forward(unwatchStock))
}
case unwatchStock @ UnwatchStock(None) => {
// if no symbol is specified, forward to everyone
context.children.foreach(_.forward(unwatchStock))
}
}
}
object StocksActor {
lazy val stocksActor: ActorRef = Akka.system.actorOf(Props(classOf[StocksActor]))
}
case object FetchLatest
case class StockUpdate(symbol: String, price: Number)
case class StockHistory(symbol: String, history: List[Double])
case class WatchStock(symbol: String)
case class UnwatchStock(symbol: Option[String])
|
alanktwong/typesafe_activators
|
reactive-stocks/app/actors/StockActor.scala
|
Scala
|
mit
| 2,766
|
package coursera.week1
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class PascalSuite extends FunSuite {
import Main.pascal
test("pascal: col=0,row=2") {
assert(pascal(0,2) === 1)
}
test("pascal: col=1,row=2") {
assert(pascal(1,2) === 2)
}
test("pascal: col=1,row=3") {
assert(pascal(1,3) === 3)
}
}
|
testnaz/ScalaSandbox
|
src/test/scala/coursera/week1/PascalSuite.scala
|
Scala
|
gpl-3.0
| 430
|
/*
* Copyright 2015-2016 IBM Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.entitlement
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import Privilege.Privilege
import spray.http.HttpMethod
import spray.http.HttpMethods.DELETE
import spray.http.HttpMethods.GET
import spray.http.HttpMethods.POST
import spray.http.HttpMethods.PUT
import whisk.common.Logging
import whisk.common.TransactionId
import whisk.core.entity.Identity
import whisk.core.entity.WhiskAction
import whisk.core.entity.WhiskActivation
import whisk.core.entity.WhiskEntityStore
import whisk.core.entity.WhiskPackage
import whisk.core.entity.WhiskRule
import whisk.core.entity.WhiskTrigger
import whisk.core.entity.types.EntityStore
/**
* A collection encapsulates the name of a collection and implicit rights when subject
* lacks explicit rights on a resource in the collection.
*
* @param path the name of the collection (the resource path in URI and the view name in the datastore)
* @param activate the privilege for an activate (may be ACTIVATE or REJECT for example)
* @param listLimit the default limit on number of entities returned from a collection on a list operation
*/
protected[core] case class Collection protected (
val path: String,
val listLimit: Int = 30) {
override def toString = path
/** Determines the right to request for the resources and context. */
protected[core] def determineRight(op: HttpMethod, resource: Option[String])(
implicit transid: TransactionId): Privilege = {
op match {
case GET => Privilege.READ
case PUT => resource map { _ => Privilege.PUT } getOrElse Privilege.REJECT
case POST => resource map { _ => activateAllowed } getOrElse Privilege.REJECT
case DELETE => resource map { _ => Privilege.DELETE } getOrElse Privilege.REJECT
case _ => Privilege.REJECT
}
}
protected val allowedCollectionRights = Set(Privilege.READ)
protected val allowedEntityRights = {
Set(Privilege.READ, Privilege.PUT, Privilege.ACTIVATE, Privilege.DELETE)
}
private lazy val activateAllowed = {
if (allowedEntityRights.contains(Privilege.ACTIVATE)) {
Privilege.ACTIVATE
} else Privilege.REJECT
}
/**
* Infers implicit rights on a resource in the collection before checking explicit
* rights in the entitlement matrix. The subject has CRUD and activate rights
* to any resource in in any of their namespaces as long as the right (the implied operation)
* is permitted on the resource.
*/
protected[core] def implicitRights(user: Identity, namespaces: Set[String], right: Privilege, resource: Resource)(
implicit ep: EntitlementProvider, ec: ExecutionContext, transid: TransactionId): Future[Boolean] = Future.successful {
// if the resource root namespace is in any of the allowed namespaces
// then this is an owner of the resource
val self = namespaces.contains(resource.namespace.root.asString)
resource.entity map {
_ => self && allowedEntityRights.contains(right)
} getOrElse {
self && allowedCollectionRights.contains(right)
}
}
}
/** An enumeration of known collections. */
protected[core] object Collection {
protected[core] def requiredProperties = WhiskEntityStore.requiredProperties
protected[core] val ACTIONS = WhiskAction.collectionName
protected[core] val TRIGGERS = WhiskTrigger.collectionName
protected[core] val RULES = WhiskRule.collectionName
protected[core] val PACKAGES = WhiskPackage.collectionName
protected[core] val ACTIVATIONS = WhiskActivation.collectionName
protected[core] val NAMESPACES = "namespaces"
private val collections = scala.collection.mutable.Map[String, Collection]()
private def register(c: Collection) = collections += c.path -> c
protected[core] def apply(name: String) = collections.get(name).get
protected[core] def initialize(entityStore: EntityStore)(implicit logging: Logging) = {
register(new ActionCollection(entityStore))
register(new Collection(TRIGGERS))
register(new Collection(RULES))
register(new PackageCollection(entityStore))
register(new Collection(ACTIVATIONS) {
protected[core] override def determineRight(op: HttpMethod, resource: Option[String])(
implicit transid: TransactionId) = {
if (op == GET) Privilege.READ else Privilege.REJECT
}
protected override val allowedEntityRights = Set(Privilege.READ)
})
register(new Collection(NAMESPACES) {
protected[core] override def determineRight(op: HttpMethod, resource: Option[String])(
implicit transid: TransactionId) = {
resource map { _ => Privilege.REJECT } getOrElse {
if (op == GET) Privilege.READ else Privilege.REJECT
}
}
protected override val allowedEntityRights = Set(Privilege.READ)
})
}
}
|
xin-cai/openwhisk
|
core/controller/src/main/scala/whisk/core/entitlement/Collection.scala
|
Scala
|
apache-2.0
| 5,672
|
import org.scalatestplus.play._
import scala.collection.mutable
class TestSpec extends PlaySpec {
"A Stack" must {
"pop values in last-in-first-out order" in {
val stack = new mutable.Stack[Int]
stack.push(1)
stack.push(2)
stack.pop() mustBe 2
stack.pop() mustBe 1
}
"throw NoSuchElementException if an empty stack is popped" in {
val emptyStack = new mutable.Stack[Int]
a [NoSuchElementException] must be thrownBy {
emptyStack.pop()
}
}
}
}
|
agoetschm/linkmanager
|
server/test/TestSpec.scala
|
Scala
|
gpl-3.0
| 520
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.codegen.agg
import org.apache.flink.table.api.DataTypes
import org.apache.flink.table.dataformat.GenericRow
import org.apache.flink.table.runtime.dataview.PerKeyStateDataViewStore
import org.apache.flink.table.runtime.generated.AggsHandleFunction
import org.apache.flink.table.types.utils.TypeConversions
import org.junit.{Assert, Test}
import java.lang
class AggsHandlerCodeGeneratorTest extends AggTestBase(isBatchMode = false) {
@Test
def testAvg(): Unit = {
val handler = getHandler(needRetract = false, needMerge = false)
handler.resetAccumulators()
handler.accumulate(GenericRow.of("f0", jl(5L), jd(5.3D), jl(2L)))
handler.accumulate(GenericRow.of("f0", jl(6L), jd(6.5D), jl(3L)))
handler.accumulate(GenericRow.of("f0", jl(7L), jd(7.1D), jl(4L)))
val ret = handler.getValue
Assert.assertEquals(6L, ret.getLong(0), 0)
Assert.assertEquals(6.3, ret.getDouble(1), 0)
Assert.assertEquals(3L, ret.getLong(2), 0)
}
@Test
def testAvgWithRetract(): Unit = {
val handler = getHandler(needRetract = true, needMerge = false)
handler.resetAccumulators()
handler.accumulate(GenericRow.of("f0", jl(5L), jd(5.3D), jl(2L)))
handler.accumulate(GenericRow.of("f0", jl(6L), jd(6.3D), jl(3L)))
handler.accumulate(GenericRow.of("f0", jl(7L), jd(7.4D), jl(4L)))
handler.retract(GenericRow.of("f0", jl(9L), jd(5.5D), jl(5L)))
val ret = handler.getValue
Assert.assertEquals(4L, ret.getLong(0), 0)
Assert.assertEquals(6.75, ret.getDouble(1), 0)
Assert.assertEquals(2L, ret.getLong(2), 0)
}
@Test
def testAvgWithMerge(): Unit = {
val handler = getHandler(needRetract = false, needMerge = true)
handler.resetAccumulators()
handler.merge(GenericRow.of("f0", jl(50L), jl(2L), jd(5D), jl(2L), jt(50L, 2L)))
handler.merge(GenericRow.of("f0", jl(40L), jl(2L), jd(4D), jl(2L), jt(40L, 2L)))
handler.merge(GenericRow.of("f0", jl(43L), jl(1L), jd(4D), jl(1L), jt(43L, 1L)))
val ret = handler.getValue
Assert.assertEquals(26L, ret.getLong(0), 0)
Assert.assertEquals(2.6, ret.getDouble(1), 0)
Assert.assertEquals(26L, ret.getLong(2), 0)
}
private def jl(l: Long): lang.Long = {
new lang.Long(l)
}
private def jd(l: Double): lang.Double = {
new lang.Double(l)
}
private def jt(l1: Long, l2: Long): GenericRow = {
GenericRow.of(jl(l1), jl(l2))
}
private def getHandler(needRetract: Boolean, needMerge: Boolean): AggsHandleFunction = {
val generator = new AggsHandlerCodeGenerator(ctx, relBuilder, inputTypes, true)
if (needRetract) {
generator.needRetract()
}
if (needMerge) {
generator.needMerge(1, mergedAccOnHeap = true,
Array(DataTypes.BIGINT, DataTypes.BIGINT, DataTypes.DOUBLE, DataTypes.BIGINT,
TypeConversions.fromLegacyInfoToDataType(imperativeAggFunc.getAccumulatorType)))
}
val handler = generator
.needAccumulate()
.generateAggsHandler("Test", aggInfoList).newInstance(classLoader)
handler.open(new PerKeyStateDataViewStore(context.getRuntimeContext))
handler
}
}
|
fhueske/flink
|
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/codegen/agg/AggsHandlerCodeGeneratorTest.scala
|
Scala
|
apache-2.0
| 3,931
|
package org.ferrit.core.crawler
import akka.actor.{ActorSystem, Actor, ActorRef, Props}
import akka.testkit.{TestKit, ImplicitSender}
import akka.pattern.ask
import akka.util.Timeout
import scala.concurrent.{Await, Future, Promise}
import scala.concurrent.duration._
import org.scalatest.{BeforeAndAfterAll, FlatSpec}
import org.scalatest.matchers.ShouldMatchers
import org.ferrit.core.crawler.CrawlWorker.Stopped
import org.ferrit.core.filter.FirstMatchUriFilter
import org.ferrit.core.filter.FirstMatchUriFilter.Accept
import org.ferrit.core.http.{HttpClient, Request, Response}
import org.ferrit.core.model.CrawlJob
import org.ferrit.core.robot.{RobotRulesCache, DefaultRobotRulesCache, RobotRulesCacheActor}
import org.ferrit.core.test.{LinkedListHttpClient, FakeHttpClient}
import org.ferrit.core.test.FakeHttpClient.HtmlResponse
import org.ferrit.core.uri.CrawlUri
import org.ferrit.core.util.{Counters, UniqueId}
class TestCrawlerManager extends FlatSpec with ShouldMatchers with BeforeAndAfterAll {
behavior of "CrawlerManager"
import CrawlerManager._
implicit val system = ActorSystem("test")
implicit val execContext = system.dispatcher
val node = "localhost"
val userAgent = "Test Agent"
def logger = system.actorOf(Props[CrawlLog])
override def afterAll():Unit = system.shutdown()
class ManagerTest extends TestKit(system) with ImplicitSender {
/**
* All crawlers use the same robot rules cache
*/
def robotRulesCache(httpClient: HttpClient) = {
system.actorOf(Props(
classOf[RobotRulesCacheActor],
new DefaultRobotRulesCache(httpClient)
))
}
def makeManager(maxCrawlers: Int, httpClient: HttpClient):ActorRef =
system.actorOf(Props(
classOf[CrawlerManager],
node,
userAgent,
maxCrawlers,
httpClient,
robotRulesCache(httpClient)
))
}
def makeConfig(uri: String) = CrawlConfig(
id = UniqueId.next,
userAgent = None,
crawlerName = "Test Crawler " + scala.util.Random.nextInt(10000),
seeds = Seq(CrawlUri(uri)),
uriFilter = new FirstMatchUriFilter(Seq(Accept(uri.r))),
tests = None,
crawlDelayMillis = 30, // crawls complete too quickly if 0
crawlTimeoutMillis = 20000,
maxDepth = Int.MaxValue,
maxFetches = 10000,
maxQueueSize = 10000,
maxRequestFails = 0.5
)
val askTimeout = new Timeout(1.second)
val NoLogger = Nil // or Some(logger)
it should "not accept new job with duplicate crawler configuration" in new ManagerTest {
val site = "http://site.net"
val manager = makeManager(10, new LinkedListHttpClient(site, 10))
val config = makeConfig(site)
manager ! StartJob(config, NoLogger)
fishForMessage(1.second) {
case c: CrawlJob => true
}
manager ! StartJob(config, NoLogger)
fishForMessage(1.second) {
case JobStartFailed(CrawlRejectException(CrawlerManager.crawlerExists)) => true
}
}
it should "not accept new job when max crawlers exceeded" in new ManagerTest {
val maxCrawlers = 1
val site = "http://site.net"
val manager = makeManager(maxCrawlers, new LinkedListHttpClient(site, 10))
val config1 = makeConfig(site)
val config2 = makeConfig(site).copy(crawlerName = "Another Crawler")
manager ! StartJob(config1, NoLogger)
fishForMessage(1.second) {
case c: CrawlJob => true
}
manager ! StartJob(config2, NoLogger)
fishForMessage(1.second) {
case JobStartFailed(CrawlRejectException(CrawlerManager.tooManyCrawlers)) => true
}
}
it should "not accept new job for a bad crawler configuration" in new ManagerTest {
val manager = makeManager(10, new LinkedListHttpClient("etc", 10))
val config = makeConfig("etc").copy(userAgent = Some(" "))
manager ! StartJob(config, NoLogger)
fishForMessage(1.second) {
case JobStartFailed(CrawlRejectException(CrawlConfig.UserAgentMissing)) => true
}
}
it should "provide information about running crawlers" in new ManagerTest {
val site = "http://site.net"
val manager = makeManager(10, new LinkedListHttpClient(site, 50))
val config = makeConfig(site)
manager ! StartJob(config, NoLogger)
val job = {
var opt: Option[CrawlJob] = None
fishForMessage(1.second) {
case c: CrawlJob => opt = Some(c); true
}
opt match {
case None => fail("new CrawlJob not created or found?")
case Some(job) => job
}
}
manager ! JobsQuery()
fishForMessage(1.second) {
case JobsInfo(Seq(CrawlJob(crawlerId, _, id, _,_,_,_,_,_,_,_,_,_,_,_,_ )))
if (config.id == crawlerId && job.jobId == id) => true
}
}
//
// The number of pages and crawl delay needs to be such that the crawler
// does not complete by itself before there is a chance to issue a StopCrawl
// and check that the job was stopped and removed.
//
it should "stop a running crawler" in new ManagerTest {
val site = "http://site.net"
val manager = makeManager(10, new LinkedListHttpClient(site, 50))
val config = makeConfig(site)
manager ! StartJob(config, NoLogger)
val job = {
var opt: Option[CrawlJob] = None
fishForMessage(1.second) {
case c: CrawlJob => opt = Some(c); true
}
opt match {
case None => fail("new CrawlJob not created or found?")
case Some(job) => job
}
}
manager ! StopJob(job.jobId)
fishForMessage(1.second) {
case StopAccepted(Seq(id)) if (job.jobId == id) => true
}
manager ! JobsQuery()
fishForMessage(1.second) {
case JobsInfo(jobs) if (jobs.isEmpty) => true
case other =>
manager ! JobsQuery() // keep asking until timeout
false
}
}
it should "stop all running crawlers" in new ManagerTest {
val maxJobs = 10
val site = "http://site.net"
val manager = makeManager(maxJobs, new LinkedListHttpClient(site, 50))
(1 to maxJobs).foreach({i =>
manager ! StartJob(makeConfig(site), NoLogger)
})
val jobsInfo = Await.result(
manager.ask(JobsQuery())(askTimeout).mapTo[JobsInfo], 1.seconds
)
jobsInfo.jobs.size should equal (maxJobs)
manager ! StopAllJobs()
manager ! JobsQuery()
fishForMessage(1.second) {
case JobsInfo(jobs) if (jobs.isEmpty) => true
case other =>
manager ! JobsQuery() // keep asking until timeout
false
}
}
// Creates N crawlers each searching for P pages.
// The HTML template returns a page with P links which
// should result in a crawl of P pages because each page
// is returning the same HTML.
it should "run concurrent crawls" in new ManagerTest {
// !!! CAREFUL with these settings !!!
// -----------------------------------
// Set totalPages/maxCrawlers too high and you can
// max out all cores on a 4 core i7 at 100%
// Takes 8 seconds:
// maxCrawlers = 20
// totalPages = 200
// TOTAL: 4000 pages
// Takes 2 seconds
// maxCrawlers = 200
// totalPages = 20
// TOTAL: 4000 pages
// Possible reasons why few crawlers many pages takes longer:
// * Slow queue: fixed by changing to mutable
// * Request delay: fixed by reducing to 0
//
// * Pages with many links are MUCH larger and slower to parse
// (parsing HTML takes longer and more links need extracting)
// * Crawlers are serial, they do not do parallel fetches on same site,
// whereas multiple crawlers can run concurrently.
// The single threaded nature of the crawler can be seen when running
// just one crawl job - the load is bound to one CPU.
// * URL seen queue is larger
val maxCrawlers = 10
val totalPages = 10
// Average page size is more like 20-30kb with less than 100 links
// News sites seem to have larger pages.
// The current HTML generation algorithm does not scale correctly
// #Links HTML Bytes
// ------------------
// 10 438
// 100 3137 2.6 Kb
// 200 6236 6.2 Kb
// 500 15536 15 Kb
// 1000 31037 31 Kb
// Pre-generate HTML before running to avoid cost during test.
// Takes about 500ms for 6000 pages.
val total = maxCrawlers * totalPages
val pages:Array[String] = (0 to total).map({i => makeHtml(totalPages)}).toArray
val httpClient = new FakeHttpClient {
override val _ec = execContext
override def handleRequest(request: Request):Response = {
val idx = scala.util.Random.nextInt(total)
val html = pages(idx) // serve pre-generated page
val pr = HtmlResponse(html)
pr.toResponse(request)
}
}
def runCrawl(site: String):Future[Boolean] = {
val p = Promise[Boolean]()
val config = CrawlConfig(
id = UniqueId.next,
userAgent = None,
crawlerName = "Test Crawler " + scala.util.Random.nextInt(10000),
seeds = Seq(CrawlUri(site)),
uriFilter = new FirstMatchUriFilter(Seq(Accept(site.r))),
tests = None,
crawlDelayMillis = 0, // <--- controls speed of test
crawlTimeoutMillis = 20000,
maxDepth = Int.MaxValue,
maxFetches = 20000,
maxQueueSize = 20000,
maxRequestFails = 0.5
)
val listener = system.actorOf(Props(new Actor {
def receive = {
case Stopped(CompletedOkay, job) =>
val expectedResult =
job.fetchCounters == Map(
"FetchAttempts" -> totalPages,
"FetchSucceeds" -> totalPages
)
p.success(expectedResult)
}
}))
manager ! StartJob(config, Seq(listener))
p.future
}
// Creates single manager for all crawlers
val manager = system.actorOf(Props(
classOf[CrawlerManager],
node,
userAgent,
maxCrawlers,
httpClient,
robotRulesCache(httpClient)
))
// Submits N crawl jobs.
// Things will now start getting hot around here ...
val start = System.currentTimeMillis
val future = Future.sequence(
(1 to maxCrawlers).map(i => runCrawl(s"http://site$i.net"))
)
// A result of Vector(true, true ...) means all crawlers completed okay
val result = Await.result(future, 16.seconds)
result.forall(_ == true) should equal (true)
val info = Await.result(
manager.ask(JobsQuery())(askTimeout).mapTo[JobsInfo],
16.seconds
)
val end = System.currentTimeMillis
info.jobs.size should equal (0) // all finished jobs removed
}
def makeHtml(links: Int) =
("""
|<html>
|<head>
|<title>Test Page</title>
|</head>
|<body>
|<h1>Test Page</h1>
|<p>The section below contains a batch of auto-generated anchors:</p>
""" + {
val range = 0 until (links-1)
scala.util.Random.shuffle(
range.map(i => s"<a href='page$i'>link text</a>")
).mkString
} + """
|</body>
|</html>
|""").stripMargin
}
|
reggoodwin/ferrit
|
src/test/scala/org/ferrit/core/crawler/TestCrawlerManager.scala
|
Scala
|
mit
| 11,233
|
package mg
import domain._
import java.util.concurrent.Executors
import prickle._
import unfiltered.jetty.ContextAdder
import unfiltered.request._
import unfiltered.response._
import scala.collection.mutable.Buffer
import scala.concurrent.{ExecutionContext, Await, Future, Promise}
import scala.util.{Random, Failure, Success}
import scala.concurrent.duration._
object UnfilteredApp {
implicit val ec = ExecutionContext.fromExecutor(Executors.newFixedThreadPool(30))
def main(args: Array[String]) {
val api = unfiltered.filter.Planify {
case req@ControllerApi(method, json) => {
val r = Await.result(invokeController(method, json), 600 seconds)
ResponseString(r)
}
case POST(Path("/reset")) => {
Games.reset()
ResponseString("Server state reset")
}
case req@Path(x) => {
println(s"Unrecognized: method=${req.method} path=$x")
NotFound ~> ResponseString(s"Unrecognized: method=${req.method} path=$x")
}
}
unfiltered.jetty.Server.http(8080).
context("/client"){ ctx: ContextAdder =>
ctx.resources(new java.net.URL(
"""file:../mg-client"""
)).allowAliases(true)
}.filter(api).run()
}
object ControllerApi {
object ControllerMethod {
def unapply(s: String): Option[String] =
if (s.startsWith("/mg/domain/Api/"))
Some(s.stripPrefix("/mg/domain/Api/"))
else
None
}
def unapply[T](req: HttpRequest[T]): Option[(String, String)] = req match {
case POST(Path(ControllerMethod(m))) =>
Some((m, req.reader.readAll()))
case _ => None
}
}
def invokeController(method: String, json: String) = {
val router = MyServer.route[Api](Games)
router.apply {
autowire.Core.Request(List("mg", "domain", "Api", method),
Unpickle[Map[String, String]].fromString(json).get)
}
}
}
object Games extends Api {
def reset(): Unit = {
gamesAndObservers = Map.empty
waitingPlayer = None
}
import UnfilteredApp.ec
import collection.mutable.Buffer
val PairCount = 8
var seq: Int = 1
def nextSeq() = {
val s = seq; seq = seq + 1; s
}
var gamesAndObservers: Map[Int, (Game, Buffer[Promise[Game]])] = Map.empty
def games = gamesAndObservers.values.map(_._1)
var waitingPlayer: Option[(String, Promise[Game])] = None
def join(player1: String): Future[Game] = {
synchronized {
waitingPlayer match {
case Some((player2, promise)) if player1 != player2 => {
println(s"player $player2 waiting, signalling OK, player $player1 joined. Game ready.")
waitingPlayer = None
val g = newGame(player1, player2)
gamesAndObservers = gamesAndObservers.updated(g.id, (g, Buffer.empty[Promise[Game]]))
promise.success(g)
Future(g)
}
case _ => {
val p = Promise[Game]
waitingPlayer = Some(player1, p)
println(s"player $player1 queued up for next game")
p.future
}
}
}
}
def waitOnUpdate(game: Game): Future[Game] = {
println(s"observer watching game ${game.id}")
val p = Promise[Game]
gamesAndObservers.get(game.id).foreach(pr =>
pr._2.append(p)
)
p.future
}
def newGame(player1Name: String, player2Name: String): Game = {
val cards = 1.to(PairCount).flatMap(n =>
Seq(Card(Random.nextInt, n), Card(Random.nextInt, n)))
val shuffled = Random.shuffle(cards)
val player1 = Player(1, player1Name)
val player2 = Player(2, player2Name)
val game = new Game(
nextSeq(),
if (Random.nextBoolean()) player1.id else player2.id,
Map(player1.id -> player1, player2.id -> player2),
shuffled)
game
}
def reveal(game: Game, playerId: Int, card: Card): Game = {
notifyObservers(
if (playerId == game.currentPlayerId) {
if (game.revealed.size == 2)
game.advanceTurn()
else
game.reveal(card)
}
else game
)
}
def advanceTurn(game: Game): Game = {
notifyObservers(game.advanceTurn())
}
private def notifyObservers(game: Game): Game = {
gamesAndObservers.get(game.id).foreach(pr => {
val observers = pr._2
observers.foreach(_.success(game))
})
gamesAndObservers = gamesAndObservers.updated(game.id, (game, Buffer.empty[Promise[Game]]))
game
}
def concede(game: Game, playerId: Int): Game = {
notifyObservers(game.concede(playerId))
}
}
object MyServer extends autowire.Server[String, Unpickler, Pickler]{
def write[Result: Pickler](r: Result) = Pickle.intoString(r)
def read[Result: Unpickler](p: String) = Unpickle[Result].fromString(p).get
}
|
benhutchison/mg-web
|
src/main/scala/mg/UnfilteredApp.scala
|
Scala
|
apache-2.0
| 4,739
|
import sbt._
import Keys._
trait Default {
lazy val defaultSettings = Seq(
organization := "org.hrscala.sbt"
, version := "0.0.1"
, scalaVersion := "2.11.7"
)
}
object MyBuild extends Build with Default {
lazy val api = Project(
"api"
, file("api")
, settings = defaultSettings
)
lazy val core = Project(
"core"
, file("core")
, settings = defaultSettings
) dependsOn api
lazy val root = project in file(".") settings (defaultSettings: _*)
}
|
dstimac/its-so-sbt
|
31-sbt-multi-project-build/project/MyBuild.scala
|
Scala
|
unlicense
| 496
|
package com.landoop.streamreactor.connect.hive.orc.vectors
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector
object BytesVectorWriter extends OrcVectorWriter[BytesColumnVector, Array[Byte]] {
override def write(vector: BytesColumnVector, offset: Int, value: Option[Array[Byte]]): Unit = value match {
case Some(bytes) => vector.setRef(offset, bytes, 0, bytes.length)
case _ =>
vector.isNull(offset) = true
vector.noNulls = false
}
}
|
datamountaineer/stream-reactor
|
kafka-connect-hive/src/main/scala/com/landoop/streamreactor/connect/hive/orc/vectors/BytesVectorWriter.scala
|
Scala
|
apache-2.0
| 472
|
package io.mpjsons.impl.deserializer.immutables
import io.mpjsons.impl.deserializer.jsontypes.AbstractJsonArrayDeserializer
import io.mpjsons.impl.util.Context
import io.mpjsons.impl.{DeserializerFactory, StringIterator}
import scala.collection.immutable.ListSet
import scala.reflect.runtime.universe._
/**
* @author Marcin Pieciukiewicz
*/
class ListSetDeserializer[E](deserializerFactory: DeserializerFactory, tpe: Type, context: Context)
extends AbstractJsonArrayDeserializer[E, ListSet[E]](deserializerFactory, tpe, context) {
override def deserialize(jsonIterator: StringIterator): ListSet[E] = {
ListSet(deserializeArray(jsonIterator, tpe): _*)
}
}
|
marpiec/mpjsons
|
src/main/scala/io/mpjsons/impl/deserializer/immutables/ListSetDeserializer.scala
|
Scala
|
apache-2.0
| 672
|
package org.apache.spark.mllib.clustering
import breeze.linalg.{ DenseVector => DBV, DenseMatrix => DBM, diag, max, eigSym, Vector => BV, Matrix => BM }
import org.apache.spark.mllib.util.{ MLUtils, Loader, Saveable }
import org.apache.spark.mllib.stat.distribution.{ MultivariateGaussian, Dirichlet, Wishart, NormalWishart, MultivariateStudentsT }
import org.apache.spark.mllib.linalg.{ Vectors, Vector, Matrices, Matrix }
import org.apache.spark.rdd.RDD
import breeze.linalg._
import breeze.math._
import breeze.numerics._
/*
* This class implements the variational Bayes Gaussian Mixture Model. In this model
* the posterior distribution of the parameters is represented by a product of two
* distributions:
* 1. A Dirichlet distribution for the mixing weights
* 2. A Normal-Wishart distribution for each cluster mean/precision
*
* Furthermore, the predictive density is given by a mixture of multivariate
* Student's T distributions
*
* @param dirichlet The Dirichlet distribution for the mixing weights
* @param normalWisharts The Array[NormalWishart] for mean/precisio pairs for
* each cluster
*/
class VBGaussianMixtureModel(
val dirichlet: Dirichlet,
val normalWisharts: Array[NormalWishart]) extends Serializable { //with Saveable {
def K: Int = dirichlet.length
def D: Int = normalWisharts(0).D
require(K == normalWisharts.length, "Number of mixture components is not consistent between dirichlet and normalWishart distributions")
/*
* Compute posterior probability at points
*/
def predict(points: RDD[Vector]): RDD[Double] = {
val sc = points.sparkContext
val dirBC = sc.broadcast(dirichlet)
val nwsBC = sc.broadcast(normalWisharts)
points.map(point => computePredictiveDensity(point, dirBC.value, nwsBC.value))
}
def predict(point: Vector): Double = {
computePredictiveDensity(point, dirichlet, normalWisharts)
}
/**
* Maps given point to its most likely cluster index.
*/
def predictLabel(points: RDD[Vector]): RDD[Int] = {
points.map(x => predictLabel(x))
}
def predictLabel(point: Vector): Int = {
val p = predictSoft(point)
p.indexOf(p.max)
}
/**
* Given the input vector, return the membership values to all mixture components.
*/
def predictSoft(points: RDD[Vector]): RDD[Array[Double]] = {
val sc = points.sparkContext
val nwBC = sc.broadcast(normalWisharts)
val dirBC = sc.broadcast(dirichlet)
points.map(x => computeResponsibilities(x, nwBC.value, dirBC.value))
}
def predictSoft(point: Vector): Array[Double] = {
computeResponsibilities(point, normalWisharts, dirichlet)
}
/**
* Compute the partial assignments for each vector
*/
private def computeResponsibilities(
pt: Vector,
normalWisharts: Array[NormalWishart],
dirichlet: Dirichlet): Array[Double] = {
val dirExpArray = dirichlet.expectationLog.toArray
val rawResp = dirExpArray.zip(normalWisharts)
.map {
case (dir, nw) => {
val logRawResp = dir + 0.5 * nw.expectationLogWishart
-0.5 * nw.quadraticForm(pt)
math.exp(logRawResp)
}
}
val normConst = rawResp.sum
rawResp.map(x => x / normConst)
}
/*
* Compute predictive density at a point. Predictive density is a mixture of multivariate
* Student's t distributions
*/
private def computePredictiveDensity(x: Vector, dirichlet: Dirichlet, nws: Array[NormalWishart]): Double = {
val alphaBreeze = dirichlet.alpha.toBreeze
val alphaSum = sum(alphaBreeze)
val weights = alphaBreeze / alphaSum
val mvStudents = nws.map(nw => {
val studentDF = nw.nu + 1.0 - nw.D
val studentPrecision = Matrices.fromBreeze(studentDF / (1 + 1.0 / nw.beta) * inv(nw.L.toBreeze.toDenseMatrix))
new MultivariateStudentsT(nw.mu0, studentPrecision, studentDF)
})
weights.toArray.zip(mvStudents)
.map { case (w, d) => w * d.pdf(x) }
.sum
}
}
|
geb5101h/BayesGaussianMixture
|
src/main/scala/bayesGaussianMixture/VBGaussianMixtureModel.scala
|
Scala
|
mit
| 3,984
|
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.features.avro.serde
import java.io.OutputStream
import java.nio._
import java.util.concurrent.TimeUnit
import java.util.{Date, UUID, Collection => JCollection, List => JList}
import com.github.benmanes.caffeine.cache.{CacheLoader, Caffeine, LoadingCache}
import com.google.common.collect.Maps
import com.vividsolutions.jts.geom.Geometry
import org.apache.avro.generic.{GenericData, GenericDatumWriter, GenericRecord}
import org.apache.avro.io.{BinaryEncoder, EncoderFactory}
import org.apache.avro.{Schema, SchemaBuilder}
import org.apache.commons.codec.binary.Hex
import org.geotools.data.DataUtilities
import org.geotools.feature.`type`.{AttributeDescriptorImpl, Types}
import org.geotools.feature.{AttributeImpl, GeometryAttributeImpl}
import org.geotools.geometry.jts.ReferencedEnvelope
import org.geotools.util.Converters
import org.locationtech.geomesa.utils.text.WKBUtils
import org.opengis.feature.`type`.{AttributeDescriptor, Name}
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.feature.{GeometryAttribute, Property}
import org.opengis.filter.identity.FeatureId
import org.opengis.geometry.BoundingBox
import scala.collection.JavaConversions._
import scala.util.Try
/*
* TODO: OLD CLASS KEPT AROUND FOR TESTING the WRITE method and speed...
*
* TODO: Should be removed after stable 1.0.0 release and AvroSimpleFeatureWriter is known to work OK
*/
class Version2ASF(id: FeatureId, sft: SimpleFeatureType)
extends SimpleFeature
with Serializable {
import Version2ASF._
val values = Array.ofDim[AnyRef](sft.getAttributeCount)
@transient val userData = collection.mutable.HashMap.empty[AnyRef, AnyRef]
@transient val typeMap = typeMapCache.get(sft)
@transient val names = nameCache.get(sft)
@transient val nameIndex = nameIndexCache.get(sft)
@transient val schema = avroSchemaCache.get(sft)
def write(datumWriter: GenericDatumWriter[GenericRecord], encoder: BinaryEncoder) {
val record = new GenericData.Record(schema)
record.put(Version2ASF.AVRO_SIMPLE_FEATURE_VERSION, Version2ASF.VERSION)
record.put(Version2ASF.FEATURE_ID_AVRO_FIELD_NAME, getID)
// We've tried to optimize this.
for (i <- 0 until sft.getAttributeCount) {
if (values(i) == null) {
record.put(i+2, null)
} else {
record.put(i+2, convertValue(i, values(i)))
}
}
datumWriter.write(record, encoder)
encoder.flush()
}
def convertValue(idx: Int, v: AnyRef) = typeMap(names(idx)).conv.apply(v)
val gdw = new GenericDatumWriter[GenericRecord](schema)
var encoder: BinaryEncoder = null
def write(os: OutputStream) {
encoder = EncoderFactory.get.binaryEncoder(os, null)
write(gdw, encoder)
}
def getFeatureType = sft
def getType = sft
def getIdentifier = id
def getID = id.getID
def getAttribute(name: String) = nameIndex.get(name).map(getAttribute).orNull
def getAttribute(name: Name) = getAttribute(name.getLocalPart)
def getAttribute(index: Int) = values(index)
def setAttribute(name: String, value: Object) = setAttribute(nameIndex(name), value)
def setAttribute(name: Name, value: Object) = setAttribute(name.getLocalPart, value)
def setAttribute(index: Int, value: Object) = setAttributeNoConvert(index, Converters.convert(value, getFeatureType.getDescriptor(index).getType.getBinding).asInstanceOf[AnyRef])
def setAttributes(vals: JList[Object]) = vals.zipWithIndex.foreach { case (v, idx) => setAttribute(idx, v) }
def setAttributes(vals: Array[Object])= vals.zipWithIndex.foreach { case (v, idx) => setAttribute(idx, v) }
def setAttributeNoConvert(index: Int, value: Object) = values(index) = value
def setAttributeNoConvert(name: String, value: Object): Unit = setAttributeNoConvert(nameIndex(name), value)
def setAttributeNoConvert(name: Name, value: Object): Unit = setAttributeNoConvert(name.getLocalPart, value)
def setAttributesNoConvert(vals: JList[Object]) = vals.zipWithIndex.foreach { case (v, idx) => values(idx) = v }
def setAttributesNoConvert(vals: Array[Object])= vals.zipWithIndex.foreach { case (v, idx) => values(idx) = v }
def getAttributeCount = values.length
def getAttributes: JList[Object] = values.toList
def getDefaultGeometry: Object = Try(sft.getGeometryDescriptor.getName).map { getAttribute }.getOrElse(null)
def setDefaultGeometry(geo: Object) = setAttribute(sft.getGeometryDescriptor.getName, geo)
def getBounds: BoundingBox = getDefaultGeometry match {
case g: Geometry =>
new ReferencedEnvelope(g.getEnvelopeInternal, sft.getCoordinateReferenceSystem)
case _ =>
new ReferencedEnvelope(sft.getCoordinateReferenceSystem)
}
def getDefaultGeometryProperty: GeometryAttribute = {
val geoDesc = sft.getGeometryDescriptor
geoDesc != null match {
case true =>
new GeometryAttributeImpl(getDefaultGeometry, geoDesc, null)
case false =>
null
}
}
def setDefaultGeometryProperty(geoAttr: GeometryAttribute) = geoAttr != null match {
case true =>
setDefaultGeometry(geoAttr.getValue)
case false =>
setDefaultGeometry(null)
}
def getProperties: JCollection[Property] =
getAttributes.zip(sft.getAttributeDescriptors).map {
case(attribute, attributeDescriptor) =>
new AttributeImpl(attribute, attributeDescriptor, id)
}
def getProperties(name: Name): JCollection[Property] = getProperties(name.getLocalPart)
def getProperties(name: String): JCollection[Property] = getProperties.filter(_.getName.toString == name)
def getProperty(name: Name): Property = getProperty(name.getLocalPart)
def getProperty(name: String): Property =
Option(sft.getDescriptor(name)) match {
case Some(descriptor) => new AttributeImpl(getAttribute(name), descriptor, id)
case _ => null
}
def getValue: JCollection[_ <: Property] = getProperties
def setValue(values: JCollection[Property]) = values.zipWithIndex.foreach { case (p, idx) =>
this.values(idx) = p.getValue}
def getDescriptor: AttributeDescriptor = new AttributeDescriptorImpl(sft, sft.getName, 0, Int.MaxValue, true, null)
def getName: Name = sft.getName
def getUserData = userData
def isNillable = true
def setValue(newValue: Object) = setValue (newValue.asInstanceOf[JCollection[Property]])
def validate() = values.zipWithIndex.foreach { case (v, idx) => Types.validate(getType.getDescriptor(idx), v) }
}
object Version2ASF {
def apply(sf: SimpleFeature) = {
val asf = new Version2ASF(sf.getIdentifier, sf.getFeatureType)
for (i <- 0 until sf.getAttributeCount) asf.setAttribute(i, sf.getAttribute(i))
asf
}
import scala.collection.JavaConversions._
val primitiveTypes =
List(
classOf[String],
classOf[java.lang.Integer],
classOf[Int],
classOf[java.lang.Long],
classOf[Long],
classOf[java.lang.Double],
classOf[Double],
classOf[java.lang.Float],
classOf[Float],
classOf[java.lang.Boolean],
classOf[Boolean]
)
def loadingCacheBuilder[V <: AnyRef](f: SimpleFeatureType => V) =
Caffeine
.newBuilder
.maximumSize(100)
.expireAfterWrite(10, TimeUnit.MINUTES)
.build(
new CacheLoader[SimpleFeatureType, V] {
def load(sft: SimpleFeatureType): V = f(sft)
}
)
case class Binding(clazz: Class[_], conv: AnyRef => Any)
val typeMapCache: LoadingCache[SimpleFeatureType, Map[String, Binding]] =
loadingCacheBuilder { sft =>
sft.getAttributeDescriptors.map { ad =>
val conv =
ad.getType.getBinding match {
case t if primitiveTypes.contains(t) => (v: AnyRef) => v
case t if classOf[UUID].isAssignableFrom(t) =>
(v: AnyRef) => {
val uuid = v.asInstanceOf[UUID]
val bb = ByteBuffer.allocate(16)
bb.putLong(uuid.getMostSignificantBits)
bb.putLong(uuid.getLeastSignificantBits)
bb.flip
bb
}
case t if classOf[Date].isAssignableFrom(t) =>
(v: AnyRef) => v.asInstanceOf[Date].getTime
case t if classOf[Geometry].isAssignableFrom(t) =>
(v: AnyRef) => ByteBuffer.wrap(WKBUtils.write(v.asInstanceOf[Geometry]))
case t if classOf[Array[Byte]].isAssignableFrom(t) =>
(v: AnyRef) => ByteBuffer.wrap(v.asInstanceOf[Array[Byte]])
case _ =>
(v: AnyRef) =>
Option(Converters.convert(v, classOf[String])).getOrElse { a: AnyRef => a.toString }
}
(encodeAttributeName(ad.getLocalName), Binding(ad.getType.getBinding, conv))
}.toMap
}
val avroSchemaCache: LoadingCache[SimpleFeatureType, Schema] =
loadingCacheBuilder { sft => generateSchema(sft) }
val nameCache: LoadingCache[SimpleFeatureType, Array[String]] =
loadingCacheBuilder { sft => DataUtilities.attributeNames(sft).map(encodeAttributeName) }
val nameIndexCache: LoadingCache[SimpleFeatureType, Map[String, Int]] =
loadingCacheBuilder { sft =>
DataUtilities.attributeNames(sft).map { name => (name, sft.indexOf(name))}.toMap
}
val datumWriterCache: LoadingCache[SimpleFeatureType, GenericDatumWriter[GenericRecord]] =
loadingCacheBuilder { sft =>
new GenericDatumWriter[GenericRecord](avroSchemaCache.get(sft))
}
val attributeNameLookUp = Maps.newConcurrentMap[String, String]()
final val FEATURE_ID_AVRO_FIELD_NAME: String = "__fid__"
final val AVRO_SIMPLE_FEATURE_VERSION: String = "__version__"
final val VERSION: Int = 2
final val AVRO_NAMESPACE: String = "org.geomesa"
def encode(s: String): String = "_" + Hex.encodeHexString(s.getBytes("UTF8"))
def decode(s: String): String = new String(Hex.decodeHex(s.substring(1).toCharArray), "UTF8")
def encodeAttributeName(s: String): String = attributeNameLookUp.getOrElseUpdate(s, encode(s))
def decodeAttributeName(s: String): String = attributeNameLookUp.getOrElseUpdate(s, decode(s))
def generateSchema(sft: SimpleFeatureType): Schema = {
val initialAssembler: SchemaBuilder.FieldAssembler[Schema] =
SchemaBuilder.record(encodeAttributeName(sft.getTypeName))
.namespace(AVRO_NAMESPACE)
.fields
.name(AVRO_SIMPLE_FEATURE_VERSION).`type`.intType.noDefault
.name(FEATURE_ID_AVRO_FIELD_NAME).`type`.stringType.noDefault
val result =
sft.getAttributeDescriptors.foldLeft(initialAssembler) { case (assembler, ad) =>
addField(assembler, encodeAttributeName(ad.getLocalName), ad.getType.getBinding, ad.isNillable)
}
result.endRecord
}
def addField(assembler: SchemaBuilder.FieldAssembler[Schema],
name: String,
ct: Class[_],
nillable: Boolean): SchemaBuilder.FieldAssembler[Schema] = {
val baseType = if (nillable) assembler.name(name).`type`.nullable() else assembler.name(name).`type`
ct match {
case c if classOf[String].isAssignableFrom(c) => baseType.stringType.noDefault
case c if classOf[java.lang.Integer].isAssignableFrom(c) => baseType.intType.noDefault
case c if classOf[java.lang.Long].isAssignableFrom(c) => baseType.longType.noDefault
case c if classOf[java.lang.Double].isAssignableFrom(c) => baseType.doubleType.noDefault
case c if classOf[java.lang.Float].isAssignableFrom(c) => baseType.floatType.noDefault
case c if classOf[java.lang.Boolean].isAssignableFrom(c) => baseType.booleanType.noDefault
case c if classOf[UUID].isAssignableFrom(c) => baseType.bytesType.noDefault
case c if classOf[Date].isAssignableFrom(c) => baseType.longType.noDefault
case c if classOf[Geometry].isAssignableFrom(c) => baseType.bytesType.noDefault
case c if classOf[Array[Byte]].isAssignableFrom(c) => baseType.bytesType.noDefault
}
}
}
|
tkunicki/geomesa
|
geomesa-features/geomesa-feature-avro/src/test/scala/org/locationtech/geomesa/features/avro/serde/Version2ASF.scala
|
Scala
|
apache-2.0
| 12,465
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.aggregate
import java.util.{Collections, ArrayList => JArrayList, List => JList}
import org.apache.flink.api.common.state.{MapState, MapStateDescriptor, ValueState, ValueStateDescriptor}
import org.apache.flink.api.common.typeinfo.{BasicTypeInfo, TypeInformation}
import org.apache.flink.api.java.typeutils.ListTypeInfo
import org.apache.flink.configuration.Configuration
import org.apache.flink.streaming.api.functions.ProcessFunction
import org.apache.flink.streaming.api.operators.TimestampedCollector
import org.apache.flink.table.runtime.types.{CRow, CRowTypeInfo}
import org.apache.flink.types.Row
import org.apache.flink.util.{Collector, Preconditions}
/**
* ProcessFunction to sort on event-time and possibly addtional secondary sort attributes.
*
* @param inputRowType The data type of the input data.
* @param rowtimeIdx The index of the rowtime field.
* @param rowComparator A comparator to sort rows.
*/
class RowTimeSortProcessFunction(
private val inputRowType: CRowTypeInfo,
private val rowtimeIdx: Int,
private val rowComparator: Option[CollectionRowComparator])
extends ProcessFunction[CRow, CRow] {
Preconditions.checkNotNull(rowComparator)
// State to collect rows between watermarks.
private var dataState: MapState[Long, JList[Row]] = _
// the state keep the last triggering timestamp. Used to filter late events.
private var lastTriggeringTsState: ValueState[Long] = _
private var outputC: CRow = _
override def open(config: Configuration) {
val keyTypeInformation: TypeInformation[Long] =
BasicTypeInfo.LONG_TYPE_INFO.asInstanceOf[TypeInformation[Long]]
val valueTypeInformation: TypeInformation[JList[Row]] =
new ListTypeInfo[Row](inputRowType.asInstanceOf[CRowTypeInfo].rowType)
val mapStateDescriptor: MapStateDescriptor[Long, JList[Row]] =
new MapStateDescriptor[Long, JList[Row]](
"dataState",
keyTypeInformation,
valueTypeInformation)
dataState = getRuntimeContext.getMapState(mapStateDescriptor)
val lastTriggeringTsDescriptor: ValueStateDescriptor[Long] =
new ValueStateDescriptor[Long]("lastTriggeringTsState", classOf[Long])
lastTriggeringTsState = getRuntimeContext.getState(lastTriggeringTsDescriptor)
outputC = new CRow()
}
override def processElement(
inputC: CRow,
ctx: ProcessFunction[CRow, CRow]#Context,
out: Collector[CRow]): Unit = {
val input = inputC.row
// timestamp of the processed row
val rowtime = input.getField(rowtimeIdx).asInstanceOf[Long]
val lastTriggeringTs = lastTriggeringTsState.value
// check if the row is late and drop it if it is late
if (rowtime > lastTriggeringTs) {
// get list for timestamp
val rows = dataState.get(rowtime)
if (null != rows) {
rows.add(input)
dataState.put(rowtime, rows)
} else {
val rows = new JArrayList[Row]
rows.add(input)
dataState.put(rowtime, rows)
// register event time timer
ctx.timerService.registerEventTimeTimer(rowtime)
}
}
}
override def onTimer(
timestamp: Long,
ctx: ProcessFunction[CRow, CRow]#OnTimerContext,
out: Collector[CRow]): Unit = {
// remove timestamp set outside of ProcessFunction.
out.asInstanceOf[TimestampedCollector[_]].eraseTimestamp()
// gets all rows for the triggering timestamps
val inputs: JList[Row] = dataState.get(timestamp)
if (null != inputs) {
// sort rows on secondary fields if necessary
if (rowComparator.isDefined) {
Collections.sort(inputs, rowComparator.get)
}
// emit rows in order
var i = 0
while (i < inputs.size) {
outputC.row = inputs.get(i)
out.collect(outputC)
i += 1
}
// remove emitted rows from state
dataState.remove(timestamp)
lastTriggeringTsState.update(timestamp)
}
}
}
|
zimmermatt/flink
|
flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/RowTimeSortProcessFunction.scala
|
Scala
|
apache-2.0
| 4,801
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.kernel.protocol.v5.kernel
import akka.actor.{ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestKit, TestProbe}
import org.apache.toree.kernel.protocol.v5.{MessageType, SocketType}
import org.scalatest.mock.MockitoSugar
import org.scalatest.{FunSpecLike, Matchers}
import test.utils.TestProbeProxyActor
import scala.concurrent.duration._
class ActorLoaderSpec extends TestKit(ActorSystem("ActorLoaderSpecSystem"))
with ImplicitSender with FunSpecLike with Matchers with MockitoSugar {
describe("ActorLoader"){
describe("#load( MessageType )"){
it("should load an ActorSelection that has been loaded into the system"){
val testProbe: TestProbe = TestProbe()
system.actorOf(Props(classOf[TestProbeProxyActor], testProbe),
MessageType.Outgoing.ClearOutput.toString)
val actorLoader: ActorLoader = SimpleActorLoader(system)
actorLoader.load(MessageType.Outgoing.ClearOutput) ! "<Test Message>"
testProbe.expectMsg("<Test Message>")
}
it("should expect no message when there is no actor"){
val testProbe: TestProbe = TestProbe()
val actorLoader: ActorLoader = SimpleActorLoader(system)
actorLoader.load(MessageType.Outgoing.CompleteReply) ! "<Test Message>"
testProbe.expectNoMsg(25.millis)
// This is to test to see if there the messages go to the actor inbox or the dead mail inbox
system.actorOf(Props(classOf[TestProbeProxyActor], testProbe),
MessageType.Outgoing.CompleteReply.toString)
testProbe.expectNoMsg(25.millis)
}
}
describe("#load( SocketType )"){
it("should load an ActorSelection that has been loaded into the system"){
val testProbe: TestProbe = TestProbe()
system.actorOf(Props(classOf[TestProbeProxyActor], testProbe), SocketType.Shell.toString)
val actorLoader: ActorLoader = SimpleActorLoader(system)
actorLoader.load(SocketType.Shell) ! "<Test Message>"
testProbe.expectMsg("<Test Message>")
}
it("should expect no message when there is no actor"){
val testProbe: TestProbe = TestProbe()
val actorLoader: ActorLoader = SimpleActorLoader(system)
actorLoader.load(SocketType.IOPub) ! "<Test Message>"
testProbe.expectNoMsg(25.millis)
// This is to test to see if there the messages go to the actor inbox or the dead mail inbox
system.actorOf(Props(classOf[TestProbeProxyActor], testProbe), SocketType.IOPub.toString)
testProbe.expectNoMsg(25.millis)
}
}
}
}
|
asorianostratio/incubator-toree
|
kernel/src/test/scala/org/apache/toree/kernel/protocol/v5/kernel/ActorLoaderSpec.scala
|
Scala
|
apache-2.0
| 3,408
|
package controllers
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import actions.Actions
import daos.ProductDao
import be.objectify.deadbolt.scala.{ActionBuilders, AuthenticatedRequest}
import javax.inject.Inject
import play.api.i18n.{MessagesApi, I18nSupport}
import play.api.mvc.Controller
class Application @Inject() (
val actionBuilder : ActionBuilders,
val actions : Actions,
val dao : ProductDao,
val messagesApi : MessagesApi
) extends Controller with I18nSupport {
def index = actions.timedAction { implicit authRequest =>
Future {
val login = authRequest.subject.map(s => s.identifier).getOrElse("unknown")
Ok(views.html.index(login))
}
}
def employeeIndex = actions.roleAction("employee") { implicit authRequest =>
Future {
Ok(views.html.employeeIndex(getLogin(authRequest)))
}
}
def allProducts = actions.timedAction { implicit req =>
dao.all.map( ps => Ok( views.html.product.allProducts( ps ) ) )
}
private[this] def getLogin(implicit req: AuthenticatedRequest[_]): String =
req.subject.map(s => s.identifier).getOrElse("unknown")
}
|
kdoomsday/doomcart
|
app/controllers/Application.scala
|
Scala
|
unlicense
| 1,179
|
/*
* Copyright (c) 2014-2022 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.iglu.schemaddl.jsonschema
sealed trait Keyword {
def name: Symbol
/** Can contain another schema */
def recursive: Boolean
}
object Keyword {
// increased/decresed
// added/removed (type, enum)
// copy of a Schema
// with prop: (was, became)
case object MultipleOf extends Keyword {
val name = Symbol("multipleOf")
val recursive = false
}
case object Minimum extends Keyword {
val name = Symbol("minimum")
val recursive = false
}
case object Maximum extends Keyword {
val name = Symbol("maximum")
val recursive = false
}
case object MaxLength extends Keyword {
val name = Symbol("maxLength")
val recursive = false
}
case object MinLength extends Keyword {
val name = Symbol("minLength")
val recursive = false
}
case object Pattern extends Keyword {
val name = Symbol("pattern")
val recursive = false
}
case object Format extends Keyword {
val name = Symbol("format")
val recursive = false
}
case object SchemaUri extends Keyword {
val name = Symbol("$schema")
val recursive = false
}
case object Items extends Keyword {
val name = Symbol("items")
val recursive = true
}
case object AdditionalItems extends Keyword {
val name = Symbol("additionalItems")
val recursive = true
}
case object MinItems extends Keyword {
val name = Symbol("minItems")
val recursive = false
}
case object MaxItems extends Keyword {
val name = Symbol("maxItems")
val recursive = false
}
case object Properties extends Keyword {
val name = Symbol("properties")
val recursive = true
}
case object AdditionalProperties extends Keyword {
val name = Symbol("additionalProperties")
val recursive = true
}
case object Required extends Keyword {
val name = Symbol("required")
val recursive = false
}
case object PatternProperties extends Keyword {
val name = Symbol("patternProperties")
val recursive = true
}
case object Type extends Keyword {
val name = Symbol("type")
val recursive = false
}
case object Enum extends Keyword {
val name = Symbol("enum")
val recursive = false
}
case object OneOf extends Keyword {
val name = Symbol("oneOf")
val recursive = true
}
case object Description extends Keyword {
val name = Symbol("description")
val recursive = false
}
}
|
snowplow/schema-ddl
|
modules/core/src/main/scala/com.snowplowanalytics/iglu.schemaddl/jsonschema/Keyword.scala
|
Scala
|
apache-2.0
| 3,134
|
/************************************************************************\\
** Project **
** ______ ______ __ ______ ____ **
** / ____/ / __ / / / / __ / / __/ (c) 2011-2014 **
** / /__ / /_/ / / / / /_/ / / /_ **
** /___ / / ____/ / / / __ / / __/ Erik Osheim, Tom Switzer **
** ____/ / / / / / / / | | / /__ **
** /_____/ /_/ /_/ /_/ |_| /____/ All rights reserved. **
** **
** Redistribution and use permitted under the MIT license. **
** **
\\************************************************************************/
package spire
package random
package rng
import spire.syntax.cfor._
import spire.util.Pack
import java.nio.ByteBuffer
import java.util.Arrays
/**
* This is a 32-bit Scala implementation of MersenneTwister based on MT19937.c.
*
* <p>MersenneTwister is a fast, 623-dimensionally equidistributed pseudo random number generator
* with a <tt>2<sup>19937</sup> - 1</tt> long period.
*
* <p><b>Reference: </b>
* Makoto Matsumoto and Takuji Nishimura:
* "Mersenne Twister: A 623-Dimensionally Equidistributed Uniform Pseudo-Random Number Generator",
* <i>ACM Transactions on Modeling and Computer Simulation,</i> Vol. 8, No. 1, January 1998, pp 3--30.
*
* @see <a href="http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/CODES/mt19937ar.c">MT19937.c</a>
* @see <a href="http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html">Mersenne Twister Home Page</a>
* @see <a href="http://en.wikipedia.org/wiki/Mersenne_twister">Mersenne Twister @ Wikipedia</a>
* @author <a href="mailto:dusan.kysel@gmail.com">Dušan Kysel</a>
*/
final class MersenneTwister32 protected[random](mt: Array[Int], mti0: Int = 625) extends IntBasedGenerator { // N + 1 == 625
import MersenneTwister32.{UpperMask, LowerMask, N, M, N_M, N_1, M_N, M_1, BYTES, mag01}
private var mti = mti0
def copyInit: MersenneTwister32 = new MersenneTwister32(mt.clone, mti)
def getSeedBytes(): Array[Byte] = {
val bytes = new Array[Byte](BYTES)
val bb = ByteBuffer.wrap(bytes)
cfor(0)(_ < N, _ + 1) { i => bb.putInt(mt(i)) }
bb.putInt(mti)
bytes
}
def setSeedBytes(bytes: Array[Byte]) {
val bs = if (bytes.length < BYTES) Arrays.copyOf(bytes, BYTES) else bytes
val bb = ByteBuffer.wrap(bs)
cfor(0)(_ < N, _ + 1) { i => mt(i) = bb.getInt() }
mti = bb.getInt
}
// Generates the next random integer in the sequence
def nextInt(): Int = {
var y = 0
if (mti >= N) {
var kk = 0
while (kk < N_M) {
y = (mt(kk) & UpperMask) | (mt(kk + 1) & LowerMask)
mt(kk) = mt(kk + M) ^ (y >>> 1) ^ mag01(y)
kk += 1
}
while (kk < N_1) {
y = (mt(kk) & UpperMask) | (mt(kk + 1) & LowerMask)
mt(kk) = mt(kk + (M_N)) ^ (y >>> 1) ^ mag01(y)
kk += 1
}
y = (mt(N_1) & UpperMask) | (mt(0) & LowerMask)
mt(N_1) = mt(M_1) ^ (y >>> 1) ^ mag01(y)
mti = 0
}
y = mt(mti)
mti += 1
// Tempering
y ^= (y >>> 11)
y ^= (y << 7) & 0x9D2C5680
y ^= (y << 15) & 0xEFC60000
y ^= (y >>> 18)
y
}
}
object MersenneTwister32 extends GeneratorCompanion[MersenneTwister32, (Array[Int], Int)] {
@inline private val UpperMask = 0x80000000 // = Int.MinValue = 0xFFFFFFFF ^ Int.MaxValue
@inline private val LowerMask = 0x7FFFFFFF // = Int.MaxValue = 0xFFFFFFFF ^ Int.MinValue
@inline private val N = 624
@inline private val M = 397
@inline private val N_M = N - M
@inline private val N_1 = N - 1
@inline private val M_N = M - N
@inline private val M_1 = M - 1
@inline private val BYTES = N * 4 + 4
@inline private def mag01(x: Int) = if((x & 1) == 0) 0 else 0x9908B0DF
def randomSeed(): (Array[Int], Int) = (Utils.seedFromInt(N, Utils.intFromTime()), N + 1)
def fromSeed(seed: (Array[Int], Int)): MersenneTwister32 =
seed match {
case (mt, mti) =>
assert(mt.length == N)
new MersenneTwister32(mt, mti)
}
def fromArray(arr: Array[Int]): MersenneTwister32 = fromSeed((Utils.seedFromArray(N, arr)), N + 1)
def fromBytes(bytes: Array[Byte]): MersenneTwister32 = fromArray(Pack.intsFromBytes(bytes, bytes.length / 4))
def fromTime(time: Long = System.nanoTime) : MersenneTwister32 = fromSeed((Utils.seedFromInt(N, Utils.intFromTime(time))), N + 1)
}
|
lrytz/spire
|
core/src/main/scala/spire/random/rng/MersenneTwister32.scala
|
Scala
|
mit
| 4,663
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.coordinator.transaction
import kafka.utils.MockScheduler
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.requests.TransactionResult
import org.apache.kafka.common.utils.MockTime
import org.easymock.{Capture, EasyMock, IAnswer}
import org.junit.Assert._
import org.junit.Test
import scala.collection.mutable
class TransactionCoordinatorTest {
val time = new MockTime()
var nextPid: Long = 0L
val pidManager: ProducerIdManager = EasyMock.createNiceMock(classOf[ProducerIdManager])
val transactionManager: TransactionStateManager = EasyMock.createNiceMock(classOf[TransactionStateManager])
val transactionMarkerChannelManager: TransactionMarkerChannelManager = EasyMock.createNiceMock(classOf[TransactionMarkerChannelManager])
val capturedTxn: Capture[TransactionMetadata] = EasyMock.newCapture()
val capturedErrorsCallback: Capture[Errors => Unit] = EasyMock.newCapture()
val brokerId = 0
val coordinatorEpoch = 0
private val transactionalId = "known"
private val producerId = 10
private val producerEpoch:Short = 1
private val txnTimeoutMs = 1
private val partitions = mutable.Set[TopicPartition](new TopicPartition("topic1", 0))
private val scheduler = new MockScheduler(time)
val coordinator = new TransactionCoordinator(brokerId,
scheduler,
pidManager,
transactionManager,
transactionMarkerChannelManager,
time)
var result: InitProducerIdResult = _
var error: Errors = Errors.NONE
private def mockPidManager(): Unit = {
EasyMock.expect(pidManager.generateProducerId())
.andAnswer(new IAnswer[Long] {
override def answer(): Long = {
nextPid += 1
nextPid - 1
}
})
.anyTimes()
}
private def initPidGenericMocks(transactionalId: String): Unit = {
mockPidManager()
EasyMock.expect(transactionManager.validateTransactionTimeoutMs(EasyMock.anyInt()))
.andReturn(true)
.anyTimes()
}
@Test
def shouldReturnInvalidRequestWhenTransactionalIdIsEmpty(): Unit = {
mockPidManager()
EasyMock.replay(pidManager)
coordinator.handleInitProducerId("", txnTimeoutMs, initProducerIdMockCallback)
assertEquals(InitProducerIdResult(-1L, -1, Errors.INVALID_REQUEST), result)
coordinator.handleInitProducerId("", txnTimeoutMs, initProducerIdMockCallback)
assertEquals(InitProducerIdResult(-1L, -1, Errors.INVALID_REQUEST), result)
}
@Test
def shouldAcceptInitPidAndReturnNextPidWhenTransactionalIdIsNull(): Unit = {
mockPidManager()
EasyMock.replay(pidManager)
coordinator.handleInitProducerId(null, txnTimeoutMs, initProducerIdMockCallback)
assertEquals(InitProducerIdResult(0L, 0, Errors.NONE), result)
coordinator.handleInitProducerId(null, txnTimeoutMs, initProducerIdMockCallback)
assertEquals(InitProducerIdResult(1L, 0, Errors.NONE), result)
}
@Test
def shouldInitPidWithEpochZeroForNewTransactionalId(): Unit = {
initPidGenericMocks(transactionalId)
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Right(None))
.once()
EasyMock.expect(transactionManager.putTransactionStateIfNotExists(EasyMock.eq(transactionalId), EasyMock.capture(capturedTxn)))
.andAnswer(new IAnswer[Either[Errors, CoordinatorEpochAndTxnMetadata]] {
override def answer(): Either[Errors, CoordinatorEpochAndTxnMetadata] = {
assertTrue(capturedTxn.hasCaptured)
Right(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, capturedTxn.getValue))
}
})
.once()
EasyMock.expect(transactionManager.appendTransactionToLog(
EasyMock.eq(transactionalId),
EasyMock.eq(coordinatorEpoch),
EasyMock.anyObject().asInstanceOf[TxnTransitMetadata],
EasyMock.capture(capturedErrorsCallback),
EasyMock.anyObject()))
.andAnswer(new IAnswer[Unit] {
override def answer(): Unit = {
capturedErrorsCallback.getValue.apply(Errors.NONE)
}
})
.anyTimes()
EasyMock.replay(pidManager, transactionManager)
coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, initProducerIdMockCallback)
assertEquals(InitProducerIdResult(nextPid - 1, 0, Errors.NONE), result)
}
@Test
def shouldGenerateNewProducerIdIfEpochsExhausted(): Unit = {
initPidGenericMocks(transactionalId)
val txnMetadata = new TransactionMetadata(transactionalId, producerId, (Short.MaxValue - 1).toShort,
txnTimeoutMs, Empty, mutable.Set.empty, time.milliseconds(), time.milliseconds())
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata))))
EasyMock.expect(transactionManager.appendTransactionToLog(
EasyMock.eq(transactionalId),
EasyMock.eq(coordinatorEpoch),
EasyMock.anyObject().asInstanceOf[TxnTransitMetadata],
EasyMock.capture(capturedErrorsCallback),
EasyMock.anyObject()
)).andAnswer(new IAnswer[Unit] {
override def answer(): Unit = {
capturedErrorsCallback.getValue.apply(Errors.NONE)
}
})
EasyMock.replay(pidManager, transactionManager)
coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, initProducerIdMockCallback)
assertNotEquals(producerId, result.producerId)
assertEquals(0, result.producerEpoch)
assertEquals(Errors.NONE, result.error)
}
@Test
def shouldRespondWithNotCoordinatorOnInitPidWhenNotCoordinator(): Unit = {
EasyMock.expect(transactionManager.validateTransactionTimeoutMs(EasyMock.anyInt()))
.andReturn(true)
.anyTimes()
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Left(Errors.NOT_COORDINATOR))
EasyMock.replay(transactionManager)
coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, initProducerIdMockCallback)
assertEquals(InitProducerIdResult(-1, -1, Errors.NOT_COORDINATOR), result)
}
@Test
def shouldRespondWithCoordinatorLoadInProgressOnInitPidWhenCoordintorLoading(): Unit = {
EasyMock.expect(transactionManager.validateTransactionTimeoutMs(EasyMock.anyInt()))
.andReturn(true)
.anyTimes()
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Left(Errors.COORDINATOR_LOAD_IN_PROGRESS))
EasyMock.replay(transactionManager)
coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, initProducerIdMockCallback)
assertEquals(InitProducerIdResult(-1, -1, Errors.COORDINATOR_LOAD_IN_PROGRESS), result)
}
@Test
def shouldRespondWithInvalidPidMappingOnAddPartitionsToTransactionWhenTransactionalIdNotPresent(): Unit = {
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Right(None))
EasyMock.replay(transactionManager)
coordinator.handleAddPartitionsToTransaction(transactionalId, 0L, 1, partitions, errorsCallback)
assertEquals(Errors.INVALID_PRODUCER_ID_MAPPING, error)
}
@Test
def shouldRespondWithInvalidRequestAddPartitionsToTransactionWhenTransactionalIdIsEmpty(): Unit = {
coordinator.handleAddPartitionsToTransaction("", 0L, 1, partitions, errorsCallback)
assertEquals(Errors.INVALID_REQUEST, error)
}
@Test
def shouldRespondWithInvalidRequestAddPartitionsToTransactionWhenTransactionalIdIsNull(): Unit = {
coordinator.handleAddPartitionsToTransaction(null, 0L, 1, partitions, errorsCallback)
assertEquals(Errors.INVALID_REQUEST, error)
}
@Test
def shouldRespondWithNotCoordinatorOnAddPartitionsWhenNotCoordinator(): Unit = {
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Left(Errors.NOT_COORDINATOR))
EasyMock.replay(transactionManager)
coordinator.handleAddPartitionsToTransaction(transactionalId, 0L, 1, partitions, errorsCallback)
assertEquals(Errors.NOT_COORDINATOR, error)
}
@Test
def shouldRespondWithCoordinatorLoadInProgressOnAddPartitionsWhenCoordintorLoading(): Unit = {
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Left(Errors.COORDINATOR_LOAD_IN_PROGRESS))
EasyMock.replay(transactionManager)
coordinator.handleAddPartitionsToTransaction(transactionalId, 0L, 1, partitions, errorsCallback)
assertEquals(Errors.COORDINATOR_LOAD_IN_PROGRESS, error)
}
@Test
def shouldRespondWithConcurrentTransactionsOnAddPartitionsWhenStateIsPrepareCommit(): Unit = {
validateConcurrentTransactions(PrepareCommit)
}
@Test
def shouldRespondWithConcurrentTransactionOnAddPartitionsWhenStateIsPrepareAbort(): Unit = {
validateConcurrentTransactions(PrepareAbort)
}
def validateConcurrentTransactions(state: TransactionState): Unit = {
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, 0, 0, 0, state, mutable.Set.empty, 0, 0)))))
EasyMock.replay(transactionManager)
coordinator.handleAddPartitionsToTransaction(transactionalId, 0L, 0, partitions, errorsCallback)
assertEquals(Errors.CONCURRENT_TRANSACTIONS, error)
}
@Test
def shouldRespondWithInvalidTnxProduceEpochOnAddPartitionsWhenEpochsAreDifferent(): Unit = {
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, 0, 10, 0, PrepareCommit, mutable.Set.empty, 0, 0)))))
EasyMock.replay(transactionManager)
coordinator.handleAddPartitionsToTransaction(transactionalId, 0L, 0, partitions, errorsCallback)
assertEquals(Errors.INVALID_PRODUCER_EPOCH, error)
}
@Test
def shouldAppendNewMetadataToLogOnAddPartitionsWhenPartitionsAdded(): Unit = {
validateSuccessfulAddPartitions(Empty)
}
@Test
def shouldRespondWithSuccessOnAddPartitionsWhenStateIsOngoing(): Unit = {
validateSuccessfulAddPartitions(Ongoing)
}
@Test
def shouldRespondWithSuccessOnAddPartitionsWhenStateIsCompleteCommit(): Unit = {
validateSuccessfulAddPartitions(CompleteCommit)
}
@Test
def shouldRespondWithSuccessOnAddPartitionsWhenStateIsCompleteAbort(): Unit = {
validateSuccessfulAddPartitions(CompleteAbort)
}
def validateSuccessfulAddPartitions(previousState: TransactionState): Unit = {
val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerEpoch, txnTimeoutMs, previousState,
mutable.Set.empty, time.milliseconds(), time.milliseconds())
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata))))
EasyMock.expect(transactionManager.appendTransactionToLog(
EasyMock.eq(transactionalId),
EasyMock.eq(coordinatorEpoch),
EasyMock.anyObject().asInstanceOf[TxnTransitMetadata],
EasyMock.capture(capturedErrorsCallback),
EasyMock.anyObject()
))
EasyMock.replay(transactionManager)
coordinator.handleAddPartitionsToTransaction(transactionalId, producerId, producerEpoch, partitions, errorsCallback)
EasyMock.verify(transactionManager)
}
@Test
def shouldRespondWithErrorsNoneOnAddPartitionWhenNoErrorsAndPartitionsTheSame(): Unit = {
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, 0, 0, 0, Empty, partitions, 0, 0)))))
EasyMock.replay(transactionManager)
coordinator.handleAddPartitionsToTransaction(transactionalId, 0L, 0, partitions, errorsCallback)
assertEquals(Errors.NONE, error)
EasyMock.verify(transactionManager)
}
@Test
def shouldReplyWithInvalidPidMappingOnEndTxnWhenTxnIdDoesntExist(): Unit = {
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Right(None))
EasyMock.replay(transactionManager)
coordinator.handleEndTransaction(transactionalId, 0, 0, TransactionResult.COMMIT, errorsCallback)
assertEquals(Errors.INVALID_PRODUCER_ID_MAPPING, error)
EasyMock.verify(transactionManager)
}
@Test
def shouldReplyWithInvalidPidMappingOnEndTxnWhenPidDosentMatchMapped(): Unit = {
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, 10, 0, 0, Ongoing, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds())))))
EasyMock.replay(transactionManager)
coordinator.handleEndTransaction(transactionalId, 0, 0, TransactionResult.COMMIT, errorsCallback)
assertEquals(Errors.INVALID_PRODUCER_ID_MAPPING, error)
EasyMock.verify(transactionManager)
}
@Test
def shouldReplyWithProducerFencedOnEndTxnWhenEpochIsNotSameAsTransaction(): Unit = {
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, 1, 1, Ongoing, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds())))))
EasyMock.replay(transactionManager)
coordinator.handleEndTransaction(transactionalId, producerId, 0, TransactionResult.COMMIT, errorsCallback)
assertEquals(Errors.INVALID_PRODUCER_EPOCH, error)
EasyMock.verify(transactionManager)
}
@Test
def shouldReturnOkOnEndTxnWhenStatusIsCompleteCommitAndResultIsCommit(): Unit ={
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, 1, 1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds())))))
EasyMock.replay(transactionManager)
coordinator.handleEndTransaction(transactionalId, producerId, 1, TransactionResult.COMMIT, errorsCallback)
assertEquals(Errors.NONE, error)
EasyMock.verify(transactionManager)
}
@Test
def shouldReturnOkOnEndTxnWhenStatusIsCompleteAbortAndResultIsAbort(): Unit ={
val txnMetadata = new TransactionMetadata(transactionalId, producerId, 1, 1, CompleteAbort, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds())
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata))))
EasyMock.replay(transactionManager)
coordinator.handleEndTransaction(transactionalId, producerId, 1, TransactionResult.ABORT, errorsCallback)
assertEquals(Errors.NONE, error)
EasyMock.verify(transactionManager)
}
@Test
def shouldReturnInvalidTxnRequestOnEndTxnRequestWhenStatusIsCompleteAbortAndResultIsNotAbort(): Unit = {
val txnMetadata = new TransactionMetadata(transactionalId, producerId, 1, 1, CompleteAbort, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds())
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata))))
EasyMock.replay(transactionManager)
coordinator.handleEndTransaction(transactionalId, producerId, 1, TransactionResult.COMMIT, errorsCallback)
assertEquals(Errors.INVALID_TXN_STATE, error)
EasyMock.verify(transactionManager)
}
@Test
def shouldReturnInvalidTxnRequestOnEndTxnRequestWhenStatusIsCompleteCommitAndResultIsNotCommit(): Unit = {
val txnMetadata = new TransactionMetadata(transactionalId, producerId, 1, 1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds())
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata))))
EasyMock.replay(transactionManager)
coordinator.handleEndTransaction(transactionalId, producerId, 1, TransactionResult.ABORT, errorsCallback)
assertEquals(Errors.INVALID_TXN_STATE, error)
EasyMock.verify(transactionManager)
}
@Test
def shouldReturnConcurrentTxnRequestOnEndTxnRequestWhenStatusIsPrepareCommit(): Unit = {
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, 1, 1, PrepareCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds())))))
EasyMock.replay(transactionManager)
coordinator.handleEndTransaction(transactionalId, producerId, 1, TransactionResult.COMMIT, errorsCallback)
assertEquals(Errors.CONCURRENT_TRANSACTIONS, error)
EasyMock.verify(transactionManager)
}
@Test
def shouldReturnInvalidTxnRequestOnEndTxnRequestWhenStatusIsPrepareAbort(): Unit = {
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, 1, 1, PrepareAbort, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds())))))
EasyMock.replay(transactionManager)
coordinator.handleEndTransaction(transactionalId, producerId, 1, TransactionResult.COMMIT, errorsCallback)
assertEquals(Errors.INVALID_TXN_STATE, error)
EasyMock.verify(transactionManager)
}
@Test
def shouldAppendPrepareCommitToLogOnEndTxnWhenStatusIsOngoingAndResultIsCommit(): Unit = {
mockPrepare(PrepareCommit)
EasyMock.replay(transactionManager)
coordinator.handleEndTransaction(transactionalId, producerId, producerEpoch, TransactionResult.COMMIT, errorsCallback)
EasyMock.verify(transactionManager)
}
@Test
def shouldAppendPrepareAbortToLogOnEndTxnWhenStatusIsOngoingAndResultIsAbort(): Unit = {
mockPrepare(PrepareAbort)
EasyMock.replay(transactionManager)
coordinator.handleEndTransaction(transactionalId, producerId, producerEpoch, TransactionResult.ABORT, errorsCallback)
EasyMock.verify(transactionManager)
}
@Test
def shouldRespondWithInvalidRequestOnEndTxnWhenTransactionalIdIsNull(): Unit = {
coordinator.handleEndTransaction(null, 0, 0, TransactionResult.COMMIT, errorsCallback)
assertEquals(Errors.INVALID_REQUEST, error)
}
@Test
def shouldRespondWithInvalidRequestOnEndTxnWhenTransactionalIdIsEmpty(): Unit = {
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Left(Errors.NOT_COORDINATOR))
EasyMock.replay(transactionManager)
coordinator.handleEndTransaction("", 0, 0, TransactionResult.COMMIT, errorsCallback)
assertEquals(Errors.INVALID_REQUEST, error)
}
@Test
def shouldRespondWithNotCoordinatorOnEndTxnWhenIsNotCoordinatorForId(): Unit = {
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Left(Errors.NOT_COORDINATOR))
EasyMock.replay(transactionManager)
coordinator.handleEndTransaction(transactionalId, 0, 0, TransactionResult.COMMIT, errorsCallback)
assertEquals(Errors.NOT_COORDINATOR, error)
}
@Test
def shouldRespondWithCoordinatorLoadInProgressOnEndTxnWhenCoordinatorIsLoading(): Unit = {
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Left(Errors.COORDINATOR_LOAD_IN_PROGRESS))
EasyMock.replay(transactionManager)
coordinator.handleEndTransaction(transactionalId, 0, 0, TransactionResult.COMMIT, errorsCallback)
assertEquals(Errors.COORDINATOR_LOAD_IN_PROGRESS, error)
}
@Test
def shouldIncrementEpochAndUpdateMetadataOnHandleInitPidWhenExistingEmptyTransaction(): Unit = {
validateIncrementEpochAndUpdateMetadata(Empty)
}
@Test
def shouldIncrementEpochAndUpdateMetadataOnHandleInitPidWhenExistingCompleteTransaction(): Unit = {
validateIncrementEpochAndUpdateMetadata(CompleteAbort)
}
@Test
def shouldIncrementEpochAndUpdateMetadataOnHandleInitPidWhenExistingCompleteCommitTransaction(): Unit = {
validateIncrementEpochAndUpdateMetadata(CompleteCommit)
}
@Test
def shouldWaitForCommitToCompleteOnHandleInitPidAndExistingTransactionInPrepareCommitState(): Unit ={
validateRespondsWithConcurrentTransactionsOnInitPidWhenInPrepareState(PrepareCommit)
}
@Test
def shouldWaitForCommitToCompleteOnHandleInitPidAndExistingTransactionInPrepareAbortState(): Unit ={
validateRespondsWithConcurrentTransactionsOnInitPidWhenInPrepareState(PrepareAbort)
}
@Test
def shouldAbortTransactionOnHandleInitPidWhenExistingTransactionInOngoingState(): Unit = {
val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerEpoch, txnTimeoutMs, Ongoing,
partitions, time.milliseconds(), time.milliseconds())
EasyMock.expect(transactionManager.validateTransactionTimeoutMs(EasyMock.anyInt()))
.andReturn(true)
EasyMock.expect(transactionManager.putTransactionStateIfNotExists(EasyMock.eq(transactionalId), EasyMock.anyObject[TransactionMetadata]()))
.andReturn(Right(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))
.anyTimes()
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata))))
.anyTimes()
val originalMetadata = new TransactionMetadata(transactionalId, producerId, (producerEpoch + 1).toShort,
txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds())
EasyMock.expect(transactionManager.appendTransactionToLog(
EasyMock.eq(transactionalId),
EasyMock.eq(coordinatorEpoch),
EasyMock.eq(originalMetadata.prepareAbortOrCommit(PrepareAbort, time.milliseconds())),
EasyMock.capture(capturedErrorsCallback),
EasyMock.anyObject()))
.andAnswer(new IAnswer[Unit] {
override def answer(): Unit = {
capturedErrorsCallback.getValue.apply(Errors.NONE)
}
})
EasyMock.replay(transactionManager)
coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, initProducerIdMockCallback)
assertEquals(InitProducerIdResult(-1, -1, Errors.CONCURRENT_TRANSACTIONS), result)
EasyMock.verify(transactionManager)
}
@Test
def shouldUseLastEpochToFenceWhenEpochsAreExhausted(): Unit = {
val txnMetadata = new TransactionMetadata(transactionalId, producerId, (Short.MaxValue - 1).toShort,
txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds())
assertTrue(txnMetadata.isProducerEpochExhausted)
EasyMock.expect(transactionManager.validateTransactionTimeoutMs(EasyMock.anyInt()))
.andReturn(true)
EasyMock.expect(transactionManager.putTransactionStateIfNotExists(EasyMock.eq(transactionalId), EasyMock.anyObject[TransactionMetadata]()))
.andReturn(Right(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))
.anyTimes()
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata))))
.anyTimes()
EasyMock.expect(transactionManager.appendTransactionToLog(
EasyMock.eq(transactionalId),
EasyMock.eq(coordinatorEpoch),
EasyMock.eq(TxnTransitMetadata(
producerId = producerId,
producerEpoch = Short.MaxValue,
txnTimeoutMs = txnTimeoutMs,
txnState = PrepareAbort,
topicPartitions = partitions.toSet,
txnStartTimestamp = time.milliseconds(),
txnLastUpdateTimestamp = time.milliseconds())),
EasyMock.capture(capturedErrorsCallback),
EasyMock.anyObject()))
.andAnswer(new IAnswer[Unit] {
override def answer(): Unit = {
capturedErrorsCallback.getValue.apply(Errors.NONE)
}
})
EasyMock.replay(transactionManager)
coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, initProducerIdMockCallback)
assertEquals(Short.MaxValue, txnMetadata.producerEpoch)
assertEquals(InitProducerIdResult(-1, -1, Errors.CONCURRENT_TRANSACTIONS), result)
EasyMock.verify(transactionManager)
}
@Test
def shouldRemoveTransactionsForPartitionOnEmigration(): Unit = {
EasyMock.expect(transactionManager.removeTransactionsForTxnTopicPartition(0, coordinatorEpoch))
EasyMock.expect(transactionMarkerChannelManager.removeMarkersForTxnTopicPartition(0))
EasyMock.replay(transactionManager, transactionMarkerChannelManager)
coordinator.handleTxnEmigration(0, coordinatorEpoch)
EasyMock.verify(transactionManager, transactionMarkerChannelManager)
}
@Test
def shouldAbortExpiredTransactionsInOngoingState(): Unit = {
val now = time.milliseconds()
val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerEpoch, txnTimeoutMs, Ongoing,
partitions, now, now)
EasyMock.expect(transactionManager.timedOutTransactions())
.andReturn(List(TransactionalIdAndProducerIdEpoch(transactionalId, producerId, producerEpoch)))
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata))))
.once()
val expectedTransition = TxnTransitMetadata(producerId, producerEpoch, txnTimeoutMs, PrepareAbort,
partitions.toSet, now, now + TransactionStateManager.DefaultAbortTimedOutTransactionsIntervalMs)
EasyMock.expect(transactionManager.appendTransactionToLog(EasyMock.eq(transactionalId),
EasyMock.eq(coordinatorEpoch),
EasyMock.eq(expectedTransition),
EasyMock.capture(capturedErrorsCallback),
EasyMock.anyObject()))
.andAnswer(new IAnswer[Unit] {
override def answer(): Unit = {}
})
.once()
EasyMock.replay(transactionManager, transactionMarkerChannelManager)
coordinator.startup(false)
time.sleep(TransactionStateManager.DefaultAbortTimedOutTransactionsIntervalMs)
scheduler.tick()
EasyMock.verify(transactionManager)
}
@Test
def shouldNotAbortExpiredTransactionsThatHaveAPendingStateTransition(): Unit = {
val metadata = new TransactionMetadata(transactionalId, producerId, producerEpoch, txnTimeoutMs, Ongoing,
partitions, time.milliseconds(), time.milliseconds())
metadata.prepareAbortOrCommit(PrepareCommit, time.milliseconds())
EasyMock.expect(transactionManager.timedOutTransactions())
.andReturn(List(TransactionalIdAndProducerIdEpoch(transactionalId, producerId, producerEpoch)))
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, metadata))))
EasyMock.replay(transactionManager, transactionMarkerChannelManager)
coordinator.startup(false)
time.sleep(TransactionStateManager.DefaultAbortTimedOutTransactionsIntervalMs)
scheduler.tick()
EasyMock.verify(transactionManager)
}
private def validateRespondsWithConcurrentTransactionsOnInitPidWhenInPrepareState(state: TransactionState) = {
EasyMock.expect(transactionManager.validateTransactionTimeoutMs(EasyMock.anyInt()))
.andReturn(true).anyTimes()
val metadata = new TransactionMetadata(transactionalId, 0, 0, 0, state, mutable.Set[TopicPartition](new TopicPartition("topic", 1)), 0, 0)
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, metadata)))).anyTimes()
EasyMock.replay(transactionManager)
coordinator.handleInitProducerId(transactionalId, 10, initProducerIdMockCallback)
assertEquals(InitProducerIdResult(-1, -1, Errors.CONCURRENT_TRANSACTIONS), result)
}
private def validateIncrementEpochAndUpdateMetadata(state: TransactionState) = {
EasyMock.expect(pidManager.generateProducerId())
.andReturn(producerId)
.anyTimes()
EasyMock.expect(transactionManager.validateTransactionTimeoutMs(EasyMock.anyInt()))
.andReturn(true)
val metadata = new TransactionMetadata(transactionalId, producerId, producerEpoch, txnTimeoutMs, state, mutable.Set.empty[TopicPartition], time.milliseconds(), time.milliseconds())
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, metadata))))
val capturedNewMetadata: Capture[TxnTransitMetadata] = EasyMock.newCapture()
EasyMock.expect(transactionManager.appendTransactionToLog(
EasyMock.eq(transactionalId),
EasyMock.eq(coordinatorEpoch),
EasyMock.capture(capturedNewMetadata),
EasyMock.capture(capturedErrorsCallback),
EasyMock.anyObject()
)).andAnswer(new IAnswer[Unit] {
override def answer(): Unit = {
metadata.completeTransitionTo(capturedNewMetadata.getValue)
capturedErrorsCallback.getValue.apply(Errors.NONE)
}
})
EasyMock.replay(pidManager, transactionManager)
val newTxnTimeoutMs = 10
coordinator.handleInitProducerId(transactionalId, newTxnTimeoutMs, initProducerIdMockCallback)
assertEquals(InitProducerIdResult(producerId, (producerEpoch + 1).toShort, Errors.NONE), result)
assertEquals(newTxnTimeoutMs, metadata.txnTimeoutMs)
assertEquals(time.milliseconds(), metadata.txnLastUpdateTimestamp)
assertEquals((producerEpoch + 1).toShort, metadata.producerEpoch)
assertEquals(producerId, metadata.producerId)
}
private def mockPrepare(transactionState: TransactionState, runCallback: Boolean = false): TransactionMetadata = {
val now = time.milliseconds()
val originalMetadata = new TransactionMetadata(transactionalId, producerId, producerEpoch, txnTimeoutMs,
Ongoing, partitions, now, now)
val transition = TxnTransitMetadata(producerId, producerEpoch, txnTimeoutMs, transactionState,
partitions.toSet, now, now)
EasyMock.expect(transactionManager.getTransactionState(EasyMock.eq(transactionalId)))
.andReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, originalMetadata))))
.once()
EasyMock.expect(transactionManager.appendTransactionToLog(
EasyMock.eq(transactionalId),
EasyMock.eq(coordinatorEpoch),
EasyMock.eq(transition),
EasyMock.capture(capturedErrorsCallback),
EasyMock.anyObject()))
.andAnswer(new IAnswer[Unit] {
override def answer(): Unit = {
if (runCallback)
capturedErrorsCallback.getValue.apply(Errors.NONE)
}
}).once()
new TransactionMetadata(transactionalId, producerId, producerEpoch, txnTimeoutMs, transactionState, partitions,
time.milliseconds(), time.milliseconds())
}
def initProducerIdMockCallback(ret: InitProducerIdResult): Unit = {
result = ret
}
def errorsCallback(ret: Errors): Unit = {
error = ret
}
}
|
wangcy6/storm_app
|
frame/kafka-0.11.0/kafka-0.11.0.1-src/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorTest.scala
|
Scala
|
apache-2.0
| 32,232
|
package com.twitter.finagle.netty4.ssl.client
import com.twitter.finagle.Address
import com.twitter.finagle.netty4.param.Allocator
import com.twitter.finagle.netty4.ssl.Netty4SslConfigurations
import com.twitter.finagle.ssl._
import com.twitter.finagle.ssl.client.{SslClientConfiguration, SslClientEngineFactory}
import com.twitter.util.security.{Pkcs8EncodedKeySpecFile, X509CertificateFile}
import com.twitter.util.{Try, Return, Throw}
import io.netty.buffer.ByteBufAllocator
import io.netty.handler.ssl.{OpenSsl, SslContext, SslContextBuilder}
import java.io.File
import java.security.{KeyFactory, PrivateKey}
import java.security.spec.InvalidKeySpecException
import javax.net.ssl.SSLEngine
/**
* This engine factory uses Netty 4's `SslContextBuilder`. It is the
* recommended path for using native SSL/TLS engines with Finagle.
*/
class Netty4ClientEngineFactory(allocator: ByteBufAllocator, forceJdk: Boolean)
extends SslClientEngineFactory {
private[this] def mkSslEngine(
context: SslContext,
address: Address,
config: SslClientConfiguration
): SSLEngine =
address match {
case Address.Inet(isa, _) =>
context.newEngine(allocator, SslClientEngineFactory.getHostname(isa, config), isa.getPort)
case _ =>
context.newEngine(allocator)
}
private[this] def getPrivateKey(keyFile: File): Try[PrivateKey] = {
val encodedKeySpec = new Pkcs8EncodedKeySpecFile(keyFile).readPkcs8EncodedKeySpec()
// keeps identical behavior to netty
// https://github.com/netty/netty/blob/netty-4.1.11.Final/handler/src/main/java/io/netty/handler/ssl/SslContext.java#L1006
encodedKeySpec.flatMap { keySpec =>
Try {
KeyFactory.getInstance("RSA").generatePrivate(keySpec)
}.handle {
case _: InvalidKeySpecException => KeyFactory.getInstance("DSA").generatePrivate(keySpec)
}.handle {
case _: InvalidKeySpecException => KeyFactory.getInstance("EC").generatePrivate(keySpec)
}.handle {
case ex: InvalidKeySpecException =>
throw new InvalidKeySpecException("Neither RSA, DSA nor EC worked", ex)
}
}
}
private[this] def addKey(
builder: SslContextBuilder,
keyCredentials: KeyCredentials
): Try[SslContextBuilder] =
keyCredentials match {
case KeyCredentials.Unspecified =>
Return(builder) // Do Nothing
case KeyCredentials.CertAndKey(certFile, keyFile) => Try {
builder.keyManager(certFile, keyFile)
}
case KeyCredentials.CertKeyAndChain(certFile, keyFile, chainFile) =>
for {
key <- getPrivateKey(keyFile)
cert <- new X509CertificateFile(certFile).readX509Certificate()
chain <- new X509CertificateFile(chainFile).readX509Certificate()
} yield builder.keyManager(key, cert, chain)
}
/**
* Creates a new `Engine` based on an `Address` and an `SslClientConfiguration`.
*
* @param address A physical address which potentially includes metadata.
*
* @param config A collection of parameters which the engine factory should
* consider when creating the TLS client `Engine`.
*
* @note Using `TrustCredentials.Insecure` forces the underlying engine to be
* a JDK engine and not a native engine, based on what Netty supports.
*
* @note `ApplicationProtocols` other than Unspecified are only supported
* by using a native engine via netty-tcnative.
*/
def apply(address: Address, config: SslClientConfiguration): Engine = {
val builder = SslContextBuilder.forClient()
val withKey = addKey(builder, config.keyCredentials) match {
case Return(builderWithKey) => builderWithKey
case Throw(ex) => throw new SslConfigurationException(ex.getMessage, ex)
}
val withProvider = Netty4SslConfigurations.configureProvider(withKey, forceJdk)
val withTrust = Netty4SslConfigurations.configureTrust(withProvider, config.trustCredentials)
val withAppProtocols = Netty4SslConfigurations.configureApplicationProtocols(
withTrust, config.applicationProtocols)
val context = withAppProtocols.build()
val engine = new Engine(mkSslEngine(context, address, config))
SslClientEngineFactory.configureEngine(engine, config)
engine
}
}
object Netty4ClientEngineFactory {
/**
* Creates an instance of the [[Netty4ClientEngineFactory]] using the
* allocator defined for use by default in Finagle-Netty4.
*
* @param forceJdk Indicates whether the underlying `SslProvider` should
* be forced to be the Jdk version instead of the native version if
* available.
*/
def apply(forceJdk: Boolean): Netty4ClientEngineFactory = {
val allocator = Allocator.allocatorParam.default.allocator
new Netty4ClientEngineFactory(allocator, forceJdk)
}
/**
* Creates an instance of the [[Netty4ClientEngineFactory]] using the
* specified allocator.
*
* @param allocator The allocator which should be used as part of
* `Engine` creation. See Netty's `SslContextBuilder` docs for
* more information.
*
* @note Whether this engine factory should be forced to use the
* Jdk version is determined by whether Netty is able to load
* a native engine library via netty-tcnative.
*/
def apply(allocator: ByteBufAllocator): Netty4ClientEngineFactory =
new Netty4ClientEngineFactory(allocator, !OpenSsl.isAvailable)
/**
* Creates an instance of the [[Netty4ClientEngineFactory]] using the
* default allocator.
*
* @note Whether this engine factory should be forced to use the
* Jdk version is determined by whether Netty is able to load
* a native engine library via netty-tcnative.
*/
def apply(): Netty4ClientEngineFactory =
apply(!OpenSsl.isAvailable)
}
|
koshelev/finagle
|
finagle-netty4/src/main/scala/com/twitter/finagle/netty4/ssl/client/Netty4ClientEngineFactory.scala
|
Scala
|
apache-2.0
| 5,744
|
package repositories.analysis.dao
import com.google.inject.{Inject, Singleton}
import models.analysis._
import models.analysis.events.SampleCreated
import no.uio.musit.MusitResults.{MusitDbError, MusitResult, MusitSuccess}
import no.uio.musit.repositories.events.EventActions
import no.uio.musit.models.ObjectTypes.SampleObjectType
import no.uio.musit.models._
import no.uio.musit.security.AuthenticatedUser
import play.api.Logger
import play.api.db.slick.DatabaseConfigProvider
import repositories.shared.dao.SharedTables
import scala.concurrent.{ExecutionContext, Future}
@Singleton
class SampleObjectDao @Inject()(
implicit
val dbConfigProvider: DatabaseConfigProvider,
val ec: ExecutionContext
) extends AnalysisTables
with AnalysisEventTableProvider
with EventActions
with AnalysisEventRowMappers
with SharedTables {
val logger = Logger(classOf[SampleObjectDao])
import profile.api._
def insert(
so: SampleObject
)(implicit currUsr: AuthenticatedUser): Future[MusitResult[ObjectUUID]] = {
val soTuple = asSampleObjectTuple(so)
val action = sampleObjTable += soTuple
db.run(action.transactionally)
.map(_ => MusitSuccess(soTuple._1))
.recover(nonFatal(s"An unexpected error occurred inserting a sample object"))
}
def insert(
mid: MuseumId,
so: SampleObject,
eventObj: SampleCreated
)(implicit currUsr: AuthenticatedUser): Future[MusitResult[ObjectUUID]] = {
val soTuple = asSampleObjectTuple(so)
insertAdditionalWithEvent(mid, eventObj)(asRow)(_ => sampleObjTable += soTuple)
.map(_.map(_ => soTuple._1))
.recover(nonFatal(s"An unexpected error occurred inserting a sample object"))
}
def update(
so: SampleObject
)(implicit currUsr: AuthenticatedUser): Future[MusitResult[Unit]] = {
val a = sampleObjTable.filter(_.id === so.objectId).update(asSampleObjectTuple(so))
db.run(a.transactionally)
.map {
case res: Int if res == 1 => MusitSuccess(())
case res: Int if 1 > res => MusitDbError("Nothing was updated")
case res: Int if 1 < res => MusitDbError(s"Too many rows were updated: $res")
}
.recover(nonFatal(s"An unexpected error occurred updating sample ${so.sampleId}"))
}
def findByUUID(
uuid: ObjectUUID
)(implicit currUsr: AuthenticatedUser): Future[MusitResult[Option[SampleObject]]] = {
val q =
sampleObjTable
.filter(so => so.id === uuid && so.isDeleted === false)
.result
.headOption
db.run(q)
.map(sor => MusitSuccess(sor.map(fromSampleObjectRow)))
.recover(nonFatal(s"An unexpected error occurred fetching sample object $uuid"))
}
def listForParentObject(
parent: ObjectUUID
)(implicit currUsr: AuthenticatedUser): Future[MusitResult[Seq[SampleObject]]] = {
val q = sampleObjTable.filter(_.parentId === parent).result
db.run(q)
.map(_.map(fromSampleObjectRow))
.map(MusitSuccess.apply)
.recover(
nonFatal(s"An unexpected error occurred fetching child samples for $parent")
)
}
def listForOriginatingObject(
originating: ObjectUUID
)(implicit currUsr: AuthenticatedUser): Future[MusitResult[Seq[SampleObject]]] = {
val q = sampleObjTable.filter(_.originatedFrom === originating).result
db.run(q)
.map(_.map(fromSampleObjectRow))
.map(MusitSuccess.apply)
.recover(
nonFatal(
s"An unexpected error occurred fetching samples for object $originating"
)
)
}
def listForMuseum(
mid: MuseumId
)(implicit currUsr: AuthenticatedUser): Future[MusitResult[Seq[SampleObject]]] = {
val q = sampleObjTable.filter(_.museumId === mid).result
db.run(q)
.map(_.map(fromSampleObjectRow))
.map(MusitSuccess.apply)
.recover(
nonFatal(s"An unexpected error occurred fetching samples for Museum $mid")
)
}
private def collectionFilter(
collections: Seq[MuseumCollection]
)(implicit currUsr: AuthenticatedUser) =
if (currUsr.hasGodMode) ""
else {
val in = collections.map(_.collection.id).mkString("(", ",", ")")
s"""AND mt.NEW_COLLECTION_ID in $in"""
}
// scalastyle:off method.length
def listForNode(
mid: MuseumId,
nodeId: StorageNodeId,
collections: Seq[MuseumCollection]
)(
implicit currUsr: AuthenticatedUser
): Future[MusitResult[Seq[EnrichedSampleObject]]] = {
val query =
sql"""
SELECT /*+ FIRST_ROWS DRIVING_SITE(mt) */
mt.MUSEUMNO, mt.SUBNO, mt.TERM,
so.SAMPLE_UUID,
so.ORIGINATED_OBJECT_UUID,
so.PARENT_OBJECT_UUID,
so.PARENT_OBJECT_TYPE,
so.IS_EXTRACTED,
so.MUSEUM_ID,
so.STATUS,
so.RESPONSIBLE_ACTOR,
so.DONE_BY,
so.DONE_DATE,
so.SAMPLE_ID,
so.SAMPLE_NUM,
so.EXTERNAL_ID, so.EXTERNAL_ID_SOURCE,
so.SAMPLE_TYPE_ID,
so.SAMPLE_SIZE, so.SAMPLE_SIZE_UNIT,
so.SAMPLE_CONTAINER,
so.STORAGE_MEDIUM,
so.TREATMENT,
so.LEFTOVER_SAMPLE,
so.DESCRIPTION,
so.NOTE,
so.REGISTERED_BY, so.REGISTERED_DATE,
so.UPDATED_BY, so.UPDATED_DATE,
so.IS_DELETED
FROM
MUSARK_ANALYSIS.SAMPLE_OBJECT so,
MUSARK_STORAGE.NEW_LOCAL_OBJECT lo,
MUSARK_STORAGE.NEW_EVENT ne,
MUSIT_MAPPING.MUSITTHING mt
WHERE so.MUSEUM_ID = ${mid.underlying}
AND so.SAMPLE_UUID = ne.AFFECTED_UUID
AND so.ORIGINATED_OBJECT_UUID = mt.MUSITTHING_UUID
AND so.IS_DELETED = 0
AND lo.CURRENT_LOCATION_ID = ${nodeId.asString}
AND lo.OBJECT_TYPE = ${SampleObjectType.name}
AND lo.LATEST_MOVE_ID = ne.EVENT_ID #${collectionFilter(collections)}
ORDER BY
mt.MUSEUMNOASNUMBER ASC,
LOWER(mt.MUSEUMNO) ASC,
mt.SUBNOASNUMBER ASC,
LOWER(mt.SUBNO) ASC
""".as[EnrichedSampleObject]
db.run(query)
.map(MusitSuccess.apply)
.recover(nonFatal(s"There was an error looking for samples for node $nodeId"))
}
// scalastyle:on method.length
}
|
MUSIT-Norway/musit
|
service_backend/app/repositories/analysis/dao/SampleObjectDao.scala
|
Scala
|
gpl-2.0
| 6,231
|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.nn.onnx
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.utils.serializer.ModuleSerializationTest
import org.scalatest.{FlatSpec, Matchers}
class ShapeSpec extends FlatSpec with Matchers {
"Shape" should "work" in {
val inputTensor = Tensor[Float](20, 1, 9).rand()
val shape = Shape[Float]()
val output = shape.forward(inputTensor)
val ans = Tensor[Float](3)
ans.setValue(1, 20)
ans.setValue(2, 1)
ans.setValue(3, 9)
output.nDimension() should be (1)
output.nDimension() should be (ans.nDimension())
output.size(1) should be (ans.size(1))
(1 to output.size(1)).foreach(i => {
output.valueAt(i) should be (ans.valueAt(i))
})
}
}
class ShapeSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val shape = Shape[Float]()
val input = Tensor[Float](5).rand()
runSerializationTest(shape, input)
}
}
|
intel-analytics/BigDL
|
scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/nn/onnx/ShapeSpec.scala
|
Scala
|
apache-2.0
| 1,585
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package invokerShoot
import java.io.File
import java.nio.charset.StandardCharsets
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import common._
import common.rest.WskRestOperations
import org.apache.openwhisk.core.entity.WhiskAction
import org.apache.commons.io.FileUtils
import spray.json._
import spray.json.DefaultJsonProtocol._
@RunWith(classOf[JUnitRunner])
class ShootInvokerTests extends TestHelpers with WskTestHelpers with JsHelpers with WskActorSystem {
implicit val wskprops = WskProps()
// wsk must have type WskOperations so that tests using CLI (class Wsk)
// instead of REST (WskRestOperations) still work.
val wsk: WskOperations = new WskRestOperations
val testString = "this is a test"
val testResult = JsObject("count" -> testString.split(" ").length.toJson)
val guestNamespace = wskprops.namespace
behavior of "Whisk actions"
it should "create an action with an empty file" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
val name = "empty"
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
action.create(name, Some(TestUtils.getTestActionFilename("empty.js")))
}
}
it should "invoke an action returning a promise" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
val name = "hello promise"
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
action.create(name, Some(TestUtils.getTestActionFilename("helloPromise.js")))
}
val run = wsk.action.invoke(name)
withActivation(wsk.activation, run) { activation =>
activation.response.status shouldBe "success"
activation.response.result shouldBe Some(JsObject("done" -> true.toJson))
activation.logs.get.mkString(" ") shouldBe empty
}
}
it should "invoke an action with a space in the name" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
val name = "hello Async"
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
action.create(name, Some(TestUtils.getTestActionFilename("helloAsync.js")))
}
val run = wsk.action.invoke(name, Map("payload" -> testString.toJson))
withActivation(wsk.activation, run) { activation =>
activation.response.status shouldBe "success"
activation.response.result shouldBe Some(testResult)
activation.logs.get.mkString(" ") should include(testString)
}
}
it should "invoke an action that throws an uncaught exception and returns correct status code" in withAssetCleaner(
wskprops) { (wp, assetHelper) =>
val name = "throwExceptionAction"
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
action.create(name, Some(TestUtils.getTestActionFilename("runexception.js")))
}
withActivation(wsk.activation, wsk.action.invoke(name)) { activation =>
val response = activation.response
activation.response.status shouldBe "action developer error"
activation.response.result shouldBe Some(
JsObject("error" -> "An error has occurred: Extraordinary exception".toJson))
}
}
it should "pass parameters bound on creation-time to the action" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
val name = "printParams"
val params = Map("param1" -> "test1", "param2" -> "test2")
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
action.create(
name,
Some(TestUtils.getTestActionFilename("printParams.js")),
parameters = params.mapValues(_.toJson))
}
val invokeParams = Map("payload" -> testString)
val run = wsk.action.invoke(name, invokeParams.mapValues(_.toJson))
withActivation(wsk.activation, run) { activation =>
val logs = activation.logs.get.mkString(" ")
(params ++ invokeParams).foreach {
case (key, value) =>
logs should include(s"params.$key: $value")
}
}
}
it should "copy an action and invoke it successfully" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
val name = "copied"
val packageName = "samples"
val actionName = "wordcount"
val fullQualifiedName = s"/$guestNamespace/$packageName/$actionName"
assetHelper.withCleaner(wsk.pkg, packageName) { (pkg, _) =>
pkg.create(packageName, shared = Some(true))
}
assetHelper.withCleaner(wsk.action, fullQualifiedName) {
val file = Some(TestUtils.getTestActionFilename("wc.js"))
(action, _) =>
action.create(fullQualifiedName, file)
}
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
action.create(name, Some(fullQualifiedName), Some("copy"))
}
val run = wsk.action.invoke(name, Map("payload" -> testString.toJson))
withActivation(wsk.activation, run) { activation =>
activation.response.status shouldBe "success"
activation.response.result shouldBe Some(testResult)
activation.logs.get.mkString(" ") should include(testString)
}
}
it should "copy an action and ensure exec, parameters, and annotations copied" in withAssetCleaner(wskprops) {
(wp, assetHelper) =>
val origActionName = "origAction"
val copiedActionName = "copiedAction"
val params = Map("a" -> "A".toJson)
val annots = Map("b" -> "B".toJson)
assetHelper.withCleaner(wsk.action, origActionName) {
val file = Some(TestUtils.getTestActionFilename("wc.js"))
(action, _) =>
action.create(origActionName, file, parameters = params, annotations = annots)
}
assetHelper.withCleaner(wsk.action, copiedActionName) { (action, _) =>
action.create(copiedActionName, Some(origActionName), Some("copy"))
}
val copiedAction = wsk.parseJsonString(wsk.action.get(copiedActionName).stdout)
val origAction = wsk.parseJsonString(wsk.action.get(copiedActionName).stdout)
copiedAction.fields("annotations") shouldBe origAction.fields("annotations")
copiedAction.fields("parameters") shouldBe origAction.fields("parameters")
copiedAction.fields("exec") shouldBe origAction.fields("exec")
copiedAction.fields("version") shouldBe JsString("0.0.1")
}
it should "add new parameters and annotations while copying an action" in withAssetCleaner(wskprops) {
(wp, assetHelper) =>
val origName = "origAction"
val copiedName = "copiedAction"
val origParams = Map("origParam1" -> "origParamValue1".toJson, "origParam2" -> 999.toJson)
val copiedParams = Map("copiedParam1" -> "copiedParamValue1".toJson, "copiedParam2" -> 123.toJson)
val origAnnots = Map("origAnnot1" -> "origAnnotValue1".toJson, "origAnnot2" -> true.toJson)
val copiedAnnots = Map("copiedAnnot1" -> "copiedAnnotValue1".toJson, "copiedAnnot2" -> false.toJson)
val resParams = Seq(
JsObject("key" -> JsString("copiedParam1"), "value" -> JsString("copiedParamValue1")),
JsObject("key" -> JsString("copiedParam2"), "value" -> JsNumber(123)),
JsObject("key" -> JsString("origParam1"), "value" -> JsString("origParamValue1")),
JsObject("key" -> JsString("origParam2"), "value" -> JsNumber(999)))
val resAnnots = Seq(
JsObject("key" -> JsString("origAnnot1"), "value" -> JsString("origAnnotValue1")),
JsObject("key" -> JsString("copiedAnnot2"), "value" -> JsBoolean(false)),
JsObject("key" -> JsString("copiedAnnot1"), "value" -> JsString("copiedAnnotValue1")),
JsObject("key" -> JsString("origAnnot2"), "value" -> JsBoolean(true)),
JsObject("key" -> JsString("exec"), "value" -> JsString("nodejs:6")),
JsObject("key" -> WhiskAction.provideApiKeyAnnotationName.toJson, "value" -> JsBoolean(false)))
assetHelper.withCleaner(wsk.action, origName) {
val file = Some(TestUtils.getTestActionFilename("echo.js"))
(action, _) =>
action.create(origName, file, parameters = origParams, annotations = origAnnots)
}
assetHelper.withCleaner(wsk.action, copiedName) { (action, _) =>
println("created copied ")
action.create(copiedName, Some(origName), Some("copy"), parameters = copiedParams, annotations = copiedAnnots)
}
val copiedAction = wsk.parseJsonString(wsk.action.get(copiedName).stdout)
// CLI does not guarantee order of annotations and parameters so do a diff to compare the values
copiedAction.fields("parameters").convertTo[Seq[JsObject]] diff resParams shouldBe List.empty
copiedAction.fields("annotations").convertTo[Seq[JsObject]] diff resAnnots shouldBe List.empty
}
it should "recreate and invoke a new action with different code" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
val name = "recreatedAction"
assetHelper.withCleaner(wsk.action, name, false) { (action, _) =>
action.create(name, Some(TestUtils.getTestActionFilename("wc.js")))
}
val run1 = wsk.action.invoke(name, Map("payload" -> testString.toJson))
withActivation(wsk.activation, run1) { activation =>
activation.response.status shouldBe "success"
activation.logs.get.mkString(" ") should include(s"The message '$testString' has")
}
wsk.action.delete(name)
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
action.create(name, Some(TestUtils.getTestActionFilename("hello.js")))
}
val run2 = wsk.action.invoke(name, Map("payload" -> testString.toJson))
withActivation(wsk.activation, run2) { activation =>
activation.response.status shouldBe "success"
activation.logs.get.mkString(" ") should include(s"hello, $testString")
}
}
it should "fail to invoke an action with an empty file" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
val name = "empty"
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
action.create(name, Some(TestUtils.getTestActionFilename("empty.js")))
}
val run = wsk.action.invoke(name)
withActivation(wsk.activation, run) { activation =>
activation.response.status shouldBe "action developer error"
activation.response.result shouldBe Some(JsObject("error" -> "Missing main/no code to execute.".toJson))
}
}
it should "blocking invoke of nested blocking actions" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
val name = "nestedBlockingAction"
val child = "wc"
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
action.create(
name,
Some(TestUtils.getTestActionFilename("wcbin.js")),
annotations = Map(WhiskAction.provideApiKeyAnnotationName -> JsBoolean(true)))
}
assetHelper.withCleaner(wsk.action, child) { (action, _) =>
action.create(child, Some(TestUtils.getTestActionFilename("wc.js")))
}
val run = wsk.action.invoke(name, Map("payload" -> testString.toJson), blocking = true)
val activation = wsk.parseJsonString(run.stdout).convertTo[ActivationResult]
withClue(s"check failed for activation: $activation") {
val wordCount = testString.split(" ").length
activation.response.result.get shouldBe JsObject("binaryCount" -> s"${wordCount.toBinaryString} (base 2)".toJson)
}
}
it should "blocking invoke an asynchronous action" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
val name = "helloAsync"
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
action.create(name, Some(TestUtils.getTestActionFilename("helloAsync.js")))
}
val run = wsk.action.invoke(name, Map("payload" -> testString.toJson), blocking = true)
val activation = wsk.parseJsonString(run.stdout).convertTo[ActivationResult]
withClue(s"check failed for activation: $activation") {
activation.response.status shouldBe "success"
activation.response.result shouldBe Some(testResult)
activation.logs shouldBe Some(List.empty)
}
}
it should "not be able to use 'ping' in an action" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
val name = "ping"
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
action.create(name, Some(TestUtils.getTestActionFilename("ping.js")))
}
val run = wsk.action.invoke(name, Map("payload" -> "google.com".toJson))
withActivation(wsk.activation, run) { activation =>
val result = activation.response.result.get
result.getFields("stdout", "code") match {
case Seq(JsString(stdout), JsNumber(code)) =>
stdout should not include "bytes from"
code.intValue() should not be 0
case _ => fail(s"fields 'stdout' or 'code' where not of the expected format, was $result")
}
}
}
it should "support UTF-8 as input and output format" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
val name = "utf8Test"
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
action.create(name, Some(TestUtils.getTestActionFilename("hello.js")))
}
val utf8 = "«ταБЬℓσö»: 1<2 & 4+1>³, now 20%€§$ off!"
val run = wsk.action.invoke(name, Map("payload" -> utf8.toJson))
withActivation(wsk.activation, run) { activation =>
activation.response.status shouldBe "success"
activation.logs.get.mkString(" ") should include(s"hello, $utf8")
}
}
it should "invoke action with large code" in withAssetCleaner(wskprops) { (wp, assetHelper) =>
val name = "big-hello"
assetHelper.withCleaner(wsk.action, name) { (action, _) =>
val filePath = TestUtils.getTestActionFilename("hello.js")
val code = FileUtils.readFileToString(new File(filePath), StandardCharsets.UTF_8)
val largeCode = code + " " * (WhiskProperties.getMaxActionSizeMB * FileUtils.ONE_MB).toInt
val tmpFile = File.createTempFile("whisk", ".js")
FileUtils.write(tmpFile, largeCode, StandardCharsets.UTF_8)
val result = action.create(name, Some(tmpFile.getAbsolutePath))
tmpFile.delete()
result
}
val hello = "hello"
val run = wsk.action.invoke(name, Map("payload" -> hello.toJson))
withActivation(wsk.activation, run) { activation =>
activation.response.status shouldBe "success"
activation.logs.get.mkString(" ") should include(s"hello, $hello")
}
}
}
|
csantanapr/incubator-openwhisk
|
tests/src/test/scala/invokerShoot/ShootInvokerTests.scala
|
Scala
|
apache-2.0
| 14,899
|
/*
* Copyright 2018 Analytics Zoo Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.zoo.serving.utils
import java.io.File
import java.nio.file.{Files, Paths}
import java.text.SimpleDateFormat
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
object FileUtils {
/**
* Use hadoop utils to copy file from remote to local
* @param src remote path, could be hdfs, s3
* @param dst local path
*/
def copyToLocal(src: String, dst: String): Unit = {
val conf = new Configuration()
val srcPath = new Path(src)
val fs = srcPath.getFileSystem(conf)
val dstPath = new Path(dst)
fs.copyToLocalFile(srcPath, dstPath)
}
/**
* Check stop signal, return true if signal detected
* @return
*/
def checkStop(): Boolean = {
if (!Files.exists(Paths.get("running"))) {
println("Stop Signal received, will exit soon.")
return true
}
return false
}
def getLastModified(path: String): Long = {
val dir = new File(path)
val files = dir.listFiles()
if (files == null) {
return Long.MinValue
}
var lastModified: Long = Long.MinValue
for (file <- files) {
if (file.lastModified() > lastModified) {
lastModified = file.lastModified()
}
}
return lastModified
}
def checkModified(path: String, lastModified: Long): Boolean = {
val dir = new File(path)
val files = dir.listFiles()
if (files == null) {
return false
}
for (file <- files) {
// println(file.lastModified())
if (file.lastModified() > lastModified) {
val sdf = new SimpleDateFormat("MM/dd/yyyy HH:mm:ss")
println(s"new file detected, time is ${sdf.format(file.lastModified())}")
return true
}
}
return false
}
}
|
intel-analytics/analytics-zoo
|
zoo/src/main/scala/com/intel/analytics/zoo/serving/utils/FileUtils.scala
|
Scala
|
apache-2.0
| 2,342
|
package com.rasterfoundry.api.toolrun
import com.rasterfoundry.akkautil.Authentication
import com.rasterfoundry.database.MapTokenDao
import cats.effect.IO
import cats.implicits._
import akka.http.scaladsl.server._
import doobie.Transactor
import doobie.implicits._
import doobie._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import java.util.UUID
trait ToolRunAuthorizationDirective extends Authentication with Directives {
implicit val xa: Transactor[IO]
def toolRunAuthProjectFromMapTokenO(mapTokenO: Option[UUID],
projectId: UUID): Directive0 = {
authorizeAsync {
mapTokenO match {
case Some(mapToken) =>
MapTokenDao
.checkProject(projectId)(mapToken)
.transact(xa)
.map({
case Some(_) => true
case _ => false
})
.unsafeToFuture
case _ => false.pure[Future]
}
}
}
}
|
azavea/raster-foundry
|
app-backend/api/src/main/scala/toolrun/ToolRunAuthorizationDirective.scala
|
Scala
|
apache-2.0
| 1,004
|
package calculator
object Polynomial {
def computeDelta(a: Signal[Double], b: Signal[Double],
c: Signal[Double]): Signal[Double] = {
Signal(
b() * b() - 4 * a() * c()
)
}
def computeSolutions(a: Signal[Double], b: Signal[Double],
c: Signal[Double], delta: Signal[Double]): Signal[Set[Double]] = {
Signal {
val delta = computeDelta(a, b, c)()
if (delta < 0)
Set()
else if (delta == 0)
Set(-b() / 2 / a())
else
Set(
(-b() + Math.sqrt(delta)) / 2 / a(),
(-b() - Math.sqrt(delta)) / 2 / a()
)
}
}
}
|
shouya/thinking-dumps
|
progfun2/week4-calculator/src/main/scala/calculator/Polynomial.scala
|
Scala
|
mit
| 615
|
/*
* Copyright (c) 2014 koiroha.org.
* All sources and related resources are available under Apache License 2.0.
* http://www.apache.org/licenses/LICENSE-2.0.html
*/
package org.asterisque.msg
import java.lang.reflect.Modifier
import org.specs2.Specification
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// AbortSpec
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
/**
* @author Takami Torao
*/
class AbortSpec extends Specification { def is = s2"""
Abort should:
declare as final. ${Modifier.isFinal(classOf[Abort].getModifiers) must beTrue}
have properties that specified in constructor. $e0
throw NullPointerException when message is null. ${new Abort(100, null) must throwA[NullPointerException]}
"""
def e0 = {
val a = new Abort(100, "hoge")
(a.code === 100) and (a.message === "hoge")
}
}
|
torao/asterisque
|
core-scala/src/test/scala/org/asterisque/msg/AbortSpec.scala
|
Scala
|
apache-2.0
| 917
|
/*
* Copyright (c) 2010-2011 Belmont Technology Pty Ltd. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sodatest.runtime.data
package results
import blocks.Block
abstract class BlockResult[T <: Block](
val block: T,
val errorOccurred: Boolean,
val executionErrorOccurred: Boolean,
val succeeded: Boolean,
val blockError: Option[ExecutionError]) {
def this(block: T, executionErrorOccurred: Boolean, succeeded: Boolean, blockError: Option[ExecutionError]) =
this(block, executionErrorOccurred || blockError != None, executionErrorOccurred, succeeded, blockError)
def this(block: T, executionErrorOccurred: Boolean, blockError: Option[ExecutionError]) =
this(block, executionErrorOccurred, !executionErrorOccurred, blockError)
override def toString =
"Result for " + block.toString +
(blockError match {case Some(e) => " [Error: " + e.message + "]"; case _ => ""})
}
|
GrahamLea/SodaTest
|
sodatest-runtime/src/main/scala/org/sodatest/runtime/data/results/BlockResult.scala
|
Scala
|
apache-2.0
| 1,453
|
package controllers
import play.api.mvc.{Result, Controller, Results}
import play.api.test.FakeRequest
import utils.{PlaySpecification, WithApplication}
import scala.concurrent.Future
class ClaimEndingSpec extends PlaySpecification with Results {
class TestController() extends Controller
section("unit")
"Claim ending" should {
"return timeout page with claim start page if timeout()" in new WithApplication {
val result: Future[Result] = ClaimEnding.timeout.apply(FakeRequest())
val bodyText: String = contentAsString(result)
bodyText must contain("You haven't entered any details")
status(result) mustEqual REQUEST_TIMEOUT
}
"return error page with claim start page if error()" in new WithApplication {
val result: Future[Result] = ClaimEnding.error.apply(FakeRequest())
val bodyText: String = contentAsString(result)
bodyText must contain("There's been a problem")
status(result) mustEqual INTERNAL_SERVER_ERROR
}
"return error cookie page if errorCookie()" in new WithApplication {
val result: Future[Result] = ClaimEnding.errorCookie.apply(FakeRequest())
val bodyText: String = contentAsString(result)
bodyText must contain("cookie")
status(result) mustEqual UNAUTHORIZED
}
"return back button page if errorBrowserBackButton()" in new WithApplication {
val result: Future[Result] = ClaimEnding.errorBrowserBackbutton.apply(FakeRequest())
val bodyText: String = contentAsString(result)
bodyText must contain("web browser buttons")
status(result) mustEqual BAD_REQUEST
}
"return ok thank you page if thankYou()" in new WithApplication {
val result: Future[Result] = ClaimEnding.thankyou.apply(FakeRequest())
val bodyText: String = contentAsString(result)
bodyText must contain("been sent")
status(result) mustEqual OK
}
}
section("unit")
}
|
Department-for-Work-and-Pensions/ClaimCapture
|
c3/test/controllers/ClaimEndingSpec.scala
|
Scala
|
mit
| 1,922
|
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js Test Suite **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.testsuite.jsinterop
import org.scalajs.jasminetest.JasmineTest
object `1_TestName` extends JasmineTest { // scalastyle:ignore
describe("a test with name 1_TestName") {
it("should run") {}
}
}
object eval extends JasmineTest { // scalastyle:ignore
describe("a test with name eval") {
it("should run") {}
}
}
object `\\u1f4a7` extends JasmineTest { // scalastyle:ignore
describe("a test with name \\u1f4a9") {
it("should run") {}
}
}
|
jmnarloch/scala-js
|
test-suite/src/test/scala/org/scalajs/testsuite/jsinterop/StrangeNamedTests.scala
|
Scala
|
bsd-3-clause
| 1,003
|
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.filters.headers
import javax.inject.{ Inject, Provider, Singleton }
import play.api.Configuration
import play.api.inject._
import play.api.mvc._
/**
* This class sets a number of common security headers on the HTTP request.
*
* NOTE: Because these are security headers, they are "secure by default." If the filter is applied, but these
* fields are NOT defined in Configuration, the defaults on the filter are NOT omitted, but are instead
* set to the strictest possible value.
*
* <ul>
* <li>{{play.filters.headers.frameOptions}} - sets frameOptions. Some("DENY") by default.
* <li>{{play.filters.headers.xssProtection}} - sets xssProtection. Some("1; mode=block") by default.
* <li>{{play.filters.headers.contentTypeOptions}} - sets contentTypeOptions. Some("nosniff") by default.
* <li>{{play.filters.headers.permittedCrossDomainPolicies}} - sets permittedCrossDomainPolicies. Some("master-only") by default.
* <li>{{play.filters.headers.contentSecurityPolicy}} - sets contentSecurityPolicy. Some("default-src 'self'") by default.
* <li>{{play.filters.headers.referrerPolicy}} - sets referrerPolicy. Some("origin-when-cross-origin, strict-origin-when-cross-origin") by default.
* <li>{{play.filters.headers.allowActionSpecificHeaders}} - sets whether .withHeaders may be used to provide page-specific overrides. False by default.
* </ul>
*
* @see <a href="https://developer.mozilla.org/en-US/docs/HTTP/X-Frame-Options">X-Frame-Options</a>
* @see <a href="http://blogs.msdn.com/b/ie/archive/2008/09/02/ie8-security-part-vi-beta-2-update.aspx">X-Content-Type-Options</a>
* @see <a href="http://blogs.msdn.com/b/ie/archive/2008/07/02/ie8-security-part-iv-the-xss-filter.aspx">X-XSS-Protection</a>
* @see <a href="http://www.html5rocks.com/en/tutorials/security/content-security-policy/">Content-Security-Policy</a>
* @see <a href="http://www.adobe.com/devnet/articles/crossdomain_policy_file_spec.html">Cross Domain Policy File Specification</a>
* @see <a href="https://www.w3.org/TR/referrer-policy/">Referrer Policy</a>
*/
object SecurityHeadersFilter {
val X_FRAME_OPTIONS_HEADER = "X-Frame-Options"
val X_XSS_PROTECTION_HEADER = "X-XSS-Protection"
val X_CONTENT_TYPE_OPTIONS_HEADER = "X-Content-Type-Options"
val X_PERMITTED_CROSS_DOMAIN_POLICIES_HEADER = "X-Permitted-Cross-Domain-Policies"
val CONTENT_SECURITY_POLICY_HEADER = "Content-Security-Policy"
val REFERRER_POLICY = "Referrer-Policy"
/**
* Convenience method for creating a SecurityHeadersFilter that reads settings from application.conf. Generally speaking,
* you'll want to use this or the apply(SecurityHeadersConfig) method.
*
* @return a configured SecurityHeadersFilter.
*/
def apply(config: SecurityHeadersConfig = SecurityHeadersConfig()): SecurityHeadersFilter = {
new SecurityHeadersFilter(config)
}
/**
* Convenience method for creating a filter using play.api.Configuration. Good for testing.
*
* @param config a configuration object that may contain string settings.
* @return a configured SecurityHeadersFilter.
*/
def apply(config: Configuration): SecurityHeadersFilter = {
new SecurityHeadersFilter(SecurityHeadersConfig.fromConfiguration(config))
}
}
/**
* A type safe configuration object for setting security headers.
*
* @param frameOptions "X-Frame-Options":
* @param xssProtection "X-XSS-Protection":
* @param contentTypeOptions "X-Content-Type-Options"
* @param permittedCrossDomainPolicies "X-Permitted-Cross-Domain-Policies".
* @param contentSecurityPolicy "Content-Security-Policy"
* @param referrerPolicy "Referrer-Policy"
*/
case class SecurityHeadersConfig(
frameOptions: Option[String] = Some("DENY"),
xssProtection: Option[String] = Some("1; mode=block"),
contentTypeOptions: Option[String] = Some("nosniff"),
permittedCrossDomainPolicies: Option[String] = Some("master-only"),
contentSecurityPolicy: Option[String] = Some("default-src 'self'"),
referrerPolicy: Option[String] = Some("origin-when-cross-origin, strict-origin-when-cross-origin"),
allowActionSpecificHeaders: Boolean = false) {
def this() {
this(frameOptions = Some("DENY"))
}
import java.{ util => ju }
import scala.compat.java8.OptionConverters._
def withFrameOptions(frameOptions: ju.Optional[String]): SecurityHeadersConfig =
copy(frameOptions = frameOptions.asScala)
def withXssProtection(xssProtection: ju.Optional[String]): SecurityHeadersConfig =
copy(xssProtection = xssProtection.asScala)
def withContentTypeOptions(contentTypeOptions: ju.Optional[String]): SecurityHeadersConfig =
copy(contentTypeOptions = contentTypeOptions.asScala)
def withPermittedCrossDomainPolicies(permittedCrossDomainPolicies: ju.Optional[String]): SecurityHeadersConfig =
copy(permittedCrossDomainPolicies = permittedCrossDomainPolicies.asScala)
def withContentSecurityPolicy(contentSecurityPolicy: ju.Optional[String]): SecurityHeadersConfig =
copy(contentSecurityPolicy = contentSecurityPolicy.asScala)
def withReferrerPolicy(referrerPolicy: ju.Optional[String]): SecurityHeadersConfig = copy(referrerPolicy = referrerPolicy.asScala)
}
/**
* Parses out a SecurityHeadersConfig from play.api.Configuration (usually this means application.conf).
*/
object SecurityHeadersConfig {
def fromConfiguration(conf: Configuration): SecurityHeadersConfig = {
val config = conf.get[Configuration]("play.filters.headers")
SecurityHeadersConfig(
frameOptions = config.get[Option[String]]("frameOptions"),
xssProtection = config.get[Option[String]]("xssProtection"),
contentTypeOptions = config.get[Option[String]]("contentTypeOptions"),
permittedCrossDomainPolicies = config.get[Option[String]]("permittedCrossDomainPolicies"),
contentSecurityPolicy = config.get[Option[String]]("contentSecurityPolicy"),
referrerPolicy = config.get[Option[String]]("referrerPolicy"),
allowActionSpecificHeaders = config.get[Option[Boolean]]("allowActionSpecificHeaders").getOrElse(false))
}
}
/**
* The case class that implements the filter. This gives you the most control, but you may want to use the apply()
* method on the companion singleton for convenience.
*/
@Singleton
class SecurityHeadersFilter @Inject() (config: SecurityHeadersConfig) extends EssentialFilter {
import SecurityHeadersFilter._
/**
* Returns the security headers for a request.
* All security headers applied to all requests by default.
* Omit any headers explicitly provided in the result object, provided
* play.filters.headers.allowActionSpecificHeaders is true.
* Override this method to alter that behavior.
*/
protected def headers(request: RequestHeader, result: Result): Seq[(String, String)] = {
val headers = Seq(
config.frameOptions.map(X_FRAME_OPTIONS_HEADER -> _),
config.xssProtection.map(X_XSS_PROTECTION_HEADER -> _),
config.contentTypeOptions.map(X_CONTENT_TYPE_OPTIONS_HEADER -> _),
config.permittedCrossDomainPolicies.map(X_PERMITTED_CROSS_DOMAIN_POLICIES_HEADER -> _),
config.contentSecurityPolicy.map(CONTENT_SECURITY_POLICY_HEADER -> _),
config.referrerPolicy.map(REFERRER_POLICY -> _)
).flatten
if (config.allowActionSpecificHeaders) {
headers.filter { case (name, _) => result.header.headers.get(name).isEmpty }
} else {
headers
}
}
/**
* Applies the filter to an action, appending the headers to the result so it shows in the HTTP response.
*/
def apply(next: EssentialAction) = EssentialAction { req =>
import play.core.Execution.Implicits.trampoline
next(req).map(result => result.withHeaders(headers(req, result): _*))
}
}
/**
* Provider for security headers configuration.
*/
@Singleton
class SecurityHeadersConfigProvider @Inject() (configuration: Configuration) extends Provider[SecurityHeadersConfig] {
lazy val get = SecurityHeadersConfig.fromConfiguration(configuration)
}
/**
* The security headers module.
*/
class SecurityHeadersModule extends SimpleModule(
bind[SecurityHeadersConfig].toProvider[SecurityHeadersConfigProvider],
bind[SecurityHeadersFilter].toSelf
)
/**
* The security headers components.
*/
trait SecurityHeadersComponents {
def configuration: Configuration
lazy val securityHeadersConfig: SecurityHeadersConfig = SecurityHeadersConfig.fromConfiguration(configuration)
lazy val securityHeadersFilter: SecurityHeadersFilter = SecurityHeadersFilter(securityHeadersConfig)
}
|
ktoso/playframework
|
framework/src/play-filters-helpers/src/main/scala/play/filters/headers/SecurityHeadersFilter.scala
|
Scala
|
apache-2.0
| 8,598
|
package jp.que.ti.sv.validator.immutable.pram1
import jp.que.ti.sv.Validator1ParamIF
class Min protected (validators: Traversable[Validator1ParamIF]) extends AndValidator(validators)
object Min {
def apply(min: Int) = new Min(Number() :: MinValue(min) :: Nil)
def apply(min: Long) = new Min(Number() :: MinValue(min) :: Nil)
}
|
yangiYA/simple-validator
|
simple-valid/src/main/scala/jp/que/ti/sv/validator/immutable/pram1/Min.scala
|
Scala
|
mit
| 333
|
object cool {
def main(args: Array[String]) {
// Put code here
}
}
|
LoyolaChicagoBooks/introcs-scala-examples
|
cool/cool.scala
|
Scala
|
gpl-3.0
| 75
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import org.apache.spark.TaskContext
class FakeTask(
stageId: Int,
partitionId: Int,
prefLocs: Seq[TaskLocation] = Nil) extends Task[Int](stageId, 0, partitionId) {
override def runTask(context: TaskContext): Int = 0
override def preferredLocations: Seq[TaskLocation] = prefLocs
}
object FakeTask {
/**
* Utility method to create a TaskSet, potentially setting a particular sequence of preferred
* locations for each task (given as varargs) if this sequence is not empty.
*/
def createTaskSet(numTasks: Int, prefLocs: Seq[TaskLocation]*): TaskSet = {
createTaskSet(numTasks, stageAttemptId = 0, prefLocs: _*)
}
def createTaskSet(numTasks: Int, stageAttemptId: Int, prefLocs: Seq[TaskLocation]*): TaskSet = {
createTaskSet(numTasks, stageId = 0, stageAttemptId, prefLocs: _*)
}
def createTaskSet(numTasks: Int, stageId: Int, stageAttemptId: Int, prefLocs: Seq[TaskLocation]*):
TaskSet = {
if (prefLocs.size != 0 && prefLocs.size != numTasks) {
throw new IllegalArgumentException("Wrong number of task locations")
}
val tasks = Array.tabulate[Task[_]](numTasks) { i =>
new FakeTask(stageId, i, if (prefLocs.size != 0) prefLocs(i) else Nil)
}
new TaskSet(tasks, stageId, stageAttemptId, priority = 0, null)
}
}
|
Panos-Bletsos/spark-cost-model-optimizer
|
core/src/test/scala/org/apache/spark/scheduler/FakeTask.scala
|
Scala
|
apache-2.0
| 2,130
|
package recfun
import org.scalatest.FunSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class BalanceSuite extends FunSuite {
import Main.balance
test("trivial case, empty list") {
assert(balance("".toList))
}
test("trivial success case") {
assert(balance("()".toList))
}
test("balance: '(if (zero? x) max (/ 1 x))' is balanced") {
assert(balance("(if (zero? x) max (/ 1 x))".toList))
}
test("balance: 'I told him ...' is balanced") {
assert(balance("I told him (that it's not (yet) done).\\n(But he wasn't listening)".toList))
}
test("balance: ':-)' is unbalanced") {
assert(!balance(":-)".toList))
}
test("balance: counting is not enough") {
assert(!balance("())(".toList))
}
}
|
xfornesa/progfun
|
recfun/src/test/scala/recfun/BalanceSuite.scala
|
Scala
|
mit
| 797
|
/* Copyright 2009-2021 EPFL, Lausanne */
object NestedFunState6 {
def simpleSideEffect(n: BigInt): BigInt = {
require(n > 0)
var a = BigInt(0)
def incA(prevA: BigInt): Unit = {
require(prevA == a)
a += 1
} ensuring(_ => a == prevA + 1)
incA(a)
incA(a)
incA(a)
incA(a)
a
} ensuring(_ == 4)
}
|
epfl-lara/stainless
|
frontends/benchmarks/imperative/valid/NestedFunState6.scala
|
Scala
|
apache-2.0
| 349
|
package observatory
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import org.scalatest.prop.Checkers
import Interaction._
import scala.collection.concurrent.TrieMap
@RunWith(classOf[JUnitRunner])
class InteractionTest extends FunSuite with Checkers {
test("tileLocation should work"){
val actualLocation = tileLocation(17, 65544, 43582)
val expectedLocation = Location(51.512161249555156,0.02197265625)
// 51.512161249555156
assert(actualLocation === expectedLocation)
}
//Tile(65544,43582,17)
// LatLonPoint(51.51202,0.02435,17)
}
|
kevllino/scala-specialization
|
observatory/src/test/scala/observatory/InteractionTest.scala
|
Scala
|
mit
| 619
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import scala.concurrent.duration._
import scala.language.implicitConversions
import org.scalatest.Matchers
import org.scalatest.concurrent.Eventually._
import org.apache.spark.JobExecutionStatus._
class StatusTrackerSuite extends SparkFunSuite with Matchers with LocalSparkContext {
testRetry("basic status API usage") {
sc = new SparkContext("local", "test", new SparkConf(false))
val jobFuture = sc.parallelize(1 to 10000, 2).map(identity).groupBy(identity).collectAsync()
val jobId: Int = eventually(timeout(10.seconds)) {
val jobIds = jobFuture.jobIds
jobIds.size should be(1)
jobIds.head
}
val jobInfo = eventually(timeout(10.seconds)) {
sc.statusTracker.getJobInfo(jobId).get
}
jobInfo.status() should not be FAILED
val stageIds = jobInfo.stageIds()
stageIds.size should be(2)
val firstStageInfo = eventually(timeout(10.seconds)) {
sc.statusTracker.getStageInfo(stageIds.min).get
}
firstStageInfo.stageId() should be(stageIds.min)
firstStageInfo.currentAttemptId() should be(0)
firstStageInfo.numTasks() should be(2)
eventually(timeout(10.seconds)) {
val updatedFirstStageInfo = sc.statusTracker.getStageInfo(stageIds.min).get
updatedFirstStageInfo.numCompletedTasks() should be(2)
updatedFirstStageInfo.numActiveTasks() should be(0)
updatedFirstStageInfo.numFailedTasks() should be(0)
}
}
test("getJobIdsForGroup()") {
sc = new SparkContext("local", "test", new SparkConf(false))
// Passing `null` should return jobs that were not run in a job group:
val defaultJobGroupFuture = sc.parallelize(1 to 1000).countAsync()
val defaultJobGroupJobId = eventually(timeout(10.seconds)) {
defaultJobGroupFuture.jobIds.head
}
eventually(timeout(10.seconds)) {
sc.statusTracker.getJobIdsForGroup(null).toSet should be (Set(defaultJobGroupJobId))
}
// Test jobs submitted in job groups:
sc.setJobGroup("my-job-group", "description")
sc.statusTracker.getJobIdsForGroup("my-job-group") should be (Seq.empty)
val firstJobFuture = sc.parallelize(1 to 1000).countAsync()
val firstJobId = eventually(timeout(10.seconds)) {
firstJobFuture.jobIds.head
}
eventually(timeout(10.seconds)) {
sc.statusTracker.getJobIdsForGroup("my-job-group") should be (Seq(firstJobId))
}
val secondJobFuture = sc.parallelize(1 to 1000).countAsync()
val secondJobId = eventually(timeout(10.seconds)) {
secondJobFuture.jobIds.head
}
eventually(timeout(10.seconds)) {
sc.statusTracker.getJobIdsForGroup("my-job-group").toSet should be (
Set(firstJobId, secondJobId))
}
}
test("getJobIdsForGroup() with takeAsync()") {
sc = new SparkContext("local", "test", new SparkConf(false))
sc.setJobGroup("my-job-group2", "description")
sc.statusTracker.getJobIdsForGroup("my-job-group2") shouldBe empty
val firstJobFuture = sc.parallelize(1 to 1000, 1).takeAsync(1)
val firstJobId = eventually(timeout(10.seconds)) {
firstJobFuture.jobIds.head
}
eventually(timeout(10.seconds)) {
sc.statusTracker.getJobIdsForGroup("my-job-group2") should be (Seq(firstJobId))
}
}
test("getJobIdsForGroup() with takeAsync() across multiple partitions") {
sc = new SparkContext("local", "test", new SparkConf(false))
sc.setJobGroup("my-job-group2", "description")
sc.statusTracker.getJobIdsForGroup("my-job-group2") shouldBe empty
val firstJobFuture = sc.parallelize(1 to 1000, 2).takeAsync(999)
eventually(timeout(10.seconds)) {
firstJobFuture.jobIds.head
}
eventually(timeout(10.seconds)) {
sc.statusTracker.getJobIdsForGroup("my-job-group2") should have size 2
}
}
}
|
bdrillard/spark
|
core/src/test/scala/org/apache/spark/StatusTrackerSuite.scala
|
Scala
|
apache-2.0
| 4,588
|
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.dispatcher
import javax.management.ObjectName
import akka.actor.ActorSystem
import akka.pattern._
import akka.stream.ActorMaterializer
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config.ConfigFactory
import org.scalatest.concurrent.Waiters
import org.scalatest.{BeforeAndAfterAll, Inspectors, Matchers, WordSpecLike}
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex.Timeouts._
import org.squbs.unicomplex.{JMX, PortBindings, Unicomplex, UnicomplexBoot}
import scala.concurrent.Await
object ForkJoinConfiguratorSpec {
val dummyJarsDir = getClass.getClassLoader.getResource("classpaths").getPath
val classPaths = Array(
"DummyCube",
"DummyCubeSvc",
"DummySvc",
"DummySvcActor",
"StashCube",
"DummyExtensions.jar"
) map (dummyJarsDir + "/" + _)
val jmxPrefix = "forkJoinConfiguratorSpec"
val config = ConfigFactory.parseString(
s"""
|squbs {
| actorsystem-name = ForkJoinConfiguratorSpec
| ${JMX.prefixConfig} = true
|}
|
|default-listener.bind-port = 0
|
|akka.actor {
| default-dispatcher {
| default-executor.fallback = org.squbs.dispatcher.ForkJoinConfigurator
| fork-join-executor {
| # Avoid JMX naming conflict in case of multiple tests.
| jmx-name-prefix = $jmxPrefix
| }
| }
|}
""".stripMargin)
val boot = UnicomplexBoot(config)
.createUsing {(name, config) => ActorSystem(name, config)}
.scanComponents(classPaths)
.initExtensions.start()
}
class ForkJoinConfiguratorSpec extends TestKit(ForkJoinConfiguratorSpec.boot.actorSystem) with ImplicitSender
with WordSpecLike with Matchers with Inspectors with BeforeAndAfterAll with Waiters {
import ForkJoinConfiguratorSpec._
import system.dispatcher
implicit val am = ActorMaterializer()
val portBindings = Await.result((Unicomplex(system).uniActor ? PortBindings).mapTo[Map[String, Int]], awaitMax)
val port = portBindings("default-listener")
"The ForkJoinConfigurator" must {
"have some actors started" in {
val w = new Waiter
system.actorSelection("/user/DummyCube").resolveOne().onComplete { result =>
w {
assert(result.isSuccess)
}
w.dismiss()
}
w.await()
}
"be able to handle a simple Akka Http request" in {
import org.squbs.unicomplex._
Await.result(entityAsString(s"http://127.0.0.1:$port/dummysvc/msg/hello"), awaitMax) should be("^hello$")
}
"expose proper ForkJoinPool MXBean stats" in {
import org.squbs.unicomplex.JMX._
val fjName =
new ObjectName(jmxPrefix + '.' + forkJoinStatsName + "ForkJoinConfiguratorSpec-akka.actor.default-dispatcher")
get(fjName, "PoolSize").asInstanceOf[Int] should be > 0
get(fjName, "ActiveThreadCount").asInstanceOf[Int] should be >= 0
get(fjName, "Parallelism").asInstanceOf[Int] should be > 0
get(fjName, "StealCount").asInstanceOf[Long] should be > 0L
get(fjName, "Mode").asInstanceOf[String] should be ("Async")
get(fjName, "QueuedSubmissionCount").asInstanceOf[Int] should be >= 0
get(fjName, "QueuedTaskCount").asInstanceOf[Long] should be >= 0L
get(fjName, "RunningThreadCount").asInstanceOf[Int] should be >= 0
get(fjName, "Quiescent") shouldBe a[java.lang.Boolean]
}
}
override def afterAll(): Unit = {
Unicomplex(system).uniActor ! GracefulStop
}
}
|
anilgursel/squbs
|
squbs-unicomplex/src/test/scala/org/squbs/dispatcher/ForkJoinConfiguratorSpec.scala
|
Scala
|
apache-2.0
| 4,091
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.expressions
import org.apache.flink.api.common.typeinfo.Types
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.table.dataformat.Decimal
import org.apache.flink.table.expressions.utils.ExpressionTestBase
import org.apache.flink.table.typeutils.DecimalTypeInfo
import org.apache.flink.types.Row
import org.junit.Test
class DecimalTypeTest extends ExpressionTestBase {
@Test
def testDecimalLiterals(): Unit = {
// implicit double
testSqlApi(
"11.2",
"11.2")
// implicit double
testSqlApi(
"0.7623533651719233",
"0.7623533651719233")
// explicit decimal (with precision of 19)
testSqlApi(
"1234567891234567891",
"1234567891234567891")
}
@Test
def testDecimalBorders(): Unit = {
testSqlApi(
Double.MaxValue.toString,
Double.MaxValue.toString)
testSqlApi(
Double.MinValue.toString,
Double.MinValue.toString)
testSqlApi(
s"CAST(${Double.MinValue} AS FLOAT)",
Float.NegativeInfinity.toString)
testSqlApi(
s"CAST(${Byte.MinValue} AS TINYINT)",
Byte.MinValue.toString)
testSqlApi(
s"CAST(${Byte.MinValue} AS TINYINT) - CAST(1 AS TINYINT)",
Byte.MaxValue.toString)
testSqlApi(
s"CAST(${Short.MinValue} AS SMALLINT)",
Short.MinValue.toString)
testSqlApi(
s"CAST(${Int.MinValue} AS INT) - 1",
Int.MaxValue.toString)
testSqlApi(
s"CAST(${Long.MinValue} AS BIGINT)",
Long.MinValue.toString)
}
@Test
def testDecimalCasting(): Unit = {
// from String
testSqlApi(
"CAST('123456789123456789123456789' AS DECIMAL(27, 0))",
"123456789123456789123456789")
// from double
testSqlApi(
"CAST(f3 AS DECIMAL)",
"4")
testSqlApi(
"CAST(f3 AS DECIMAL(10,2))",
"4.20"
)
// to double
testSqlApi(
"CAST(f0 AS DOUBLE)",
"1.2345678912345679E8")
// to int
testSqlApi(
"CAST(f4 AS INT)",
"123456789")
// to long
testSqlApi(
"CAST(f4 AS BIGINT)",
"123456789")
}
@Test
def testDecimalArithmetic(): Unit = {
// note: calcite type inference:
// Decimal+ExactNumeric => Decimal
// Decimal+Double => Double.
// implicit cast to decimal
testSqlApi(
"f1 + 12",
"123456789123456789123456801")
// implicit cast to decimal
testSqlApi(
"12 + f1",
"123456789123456789123456801")
testSqlApi(
"f1 + 12.3",
"123456789123456789123456801.3"
)
testSqlApi(
"12.3 + f1",
"123456789123456789123456801.3")
testSqlApi(
"f1 + f1",
"246913578246913578246913578")
testSqlApi(
"f1 - f1",
"0")
testSqlApi(
"f1 / f1",
"1.00000000")
testSqlApi(
"MOD(f1, f1)",
"0")
testSqlApi(
"-f0",
"-123456789.123456789123456789")
}
@Test
def testDecimalComparison(): Unit = {
testSqlApi(
"f1 < 12",
"false")
testSqlApi(
"f1 > 12",
"true")
testSqlApi(
"f1 = 12",
"false")
testSqlApi(
"f5 = 0",
"true")
testSqlApi(
"f1 = CAST('123456789123456789123456789' AS DECIMAL(30, 0))",
"true")
testSqlApi(
"f1 <> CAST('123456789123456789123456789' AS DECIMAL(30, 0))",
"false")
testSqlApi(
"f4 < f0",
"true")
// TODO add all tests if FLINK-4070 is fixed
testSqlApi(
"12 < f1",
"true")
}
// ----------------------------------------------------------------------------------------------
override def testData: Row = {
val testData = new Row(6)
testData.setField(0, Decimal.castFrom("123456789.123456789123456789", 30, 18))
testData.setField(1, Decimal.castFrom("123456789123456789123456789", 30, 0))
testData.setField(2, 42)
testData.setField(3, 4.2)
testData.setField(4, Decimal.castFrom("123456789", 10, 0))
testData.setField(5, Decimal.castFrom("0.000", 10, 3))
testData
}
override def typeInfo: RowTypeInfo = {
new RowTypeInfo(
/* 0 */ DecimalTypeInfo.of(30, 18),
/* 1 */ DecimalTypeInfo.of(30, 0),
/* 2 */ Types.INT,
/* 3 */ Types.DOUBLE,
/* 4 */ DecimalTypeInfo.of(10, 0),
/* 5 */ DecimalTypeInfo.of(10, 3))
}
}
|
shaoxuan-wang/flink
|
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/expressions/DecimalTypeTest.scala
|
Scala
|
apache-2.0
| 5,153
|
package services.alertwatcherct
import javax.inject._
import scala.concurrent.Future
import utils.Awaits
import models.alertwatcherct.Alert
import dao.alertwatcherct.AlertWatcherCTDao
import dao.alertwatcherct.IAlertWatcherCTDao
import org.joda.time.DateTime
import org.joda.time.LocalDate
import java.sql.Timestamp
import com.github.tototoshi.slick.PostgresJodaSupport._
trait IAlertWatcherCTService extends BaseService[Alert] {
def insert(alert: Alert): Future[Unit]
def update(id: Long, alert: Alert): Future[Unit]
def remove(id: Long): Future[Int]
def findById(id: Long): Future[Option[Alert]]
def findBySiteId(siteid: String): Future[Option[Seq[Alert]]]
def findAll(): Future[Option[Seq[Alert]]]
def findAllAlerts(): Seq[(Long, String)]
}
@Singleton
class AlertWatcherCTService @Inject() (dao:IAlertWatcherCTDao) extends IAlertWatcherCTService{
import play.api.libs.concurrent.Execution.Implicits.defaultContext
def insert(alert: Alert): Future[Unit] = {
dao.insert(alert);
}
def update(id: Long, alert: Alert): Future[Unit] = {
// alert.id = Option(id.toInt)
// alert.id = id
dao.update(alert)
}
def remove(id: Long): Future[Int] = {
dao.remove(id)
}
def findById(id: Long): Future[Option[Alert]] = {
dao.findById(id)
}
def findBySiteId(siteid: String): Future[Option[Seq[Alert]]] = {
dao.findBySiteId(siteid).map { x => Option(x) }
}
def findAll(): Future[Option[Seq[Alert]]] = {
dao.findAll().map { x => Option(x) }
}
private def validateId(id: Long): Unit = {
val future = findById(id)
val entry = Awaits.get(5, future)
if (entry==null || entry.equals(None)) throw new RuntimeException("Could not find Alert: " + id)
}
def findAllAlerts(): Seq[(Long, String)] = {
val future = this.findAll()
val result = Awaits.get(5, future)
val alerts: Seq[(Long, String)] = result
.getOrElse(Seq(Alert(0, "", "", 0, new LocalDate(), new DateTime(), "", "", "",
Some(""), Some(""), Some(""),
0, 0, 0, "", "", "", "")))
.toSeq
// .map { alert => (alert.id.get.toString,alert.name) }
.map { alert => (alert.id, alert.sitename) }
return alerts
}
}
|
tnddn/iv-web
|
portal/rest-portal/app/services/alertwatcherct/AlertWatcherCTService.scala
|
Scala
|
apache-2.0
| 2,273
|
/**
* © 2019 Refinitiv. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.dc.stream
import akka.NotUsed
import akka.actor.ActorSystem
import akka.http.scaladsl.model.{HttpEntity, HttpResponse}
import akka.stream.Supervision._
import akka.stream._
import akka.stream.contrib.{BufferLimiter, Retry}
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, Merge, Partition, Sink}
import akka.util.ByteString
import cmwell.dc.{LazyLogging, Settings}
import cmwell.dc.stream.MessagesTypesAndExceptions._
import cmwell.dc.stream.SingleMachineInfotonIngester.{IngestInput, IngestOutput, IngestState, IngestStateStatus}
import scala.concurrent.Future
import scala.util.{Failure, Success, Try}
import scala.concurrent.ExecutionContext.Implicits.global
/**
* Created by eli on 27/06/16.
*
* Distributes each infoton to its matching machine for ingestion
*/
object InfotonAllMachinesDistributerAndIngester extends LazyLogging {
val initialBulkStatus =
IngestStateStatus(Settings.initialBulkIngestRetryCount, 0, None)
def apply(dckey: DcInfoKey,
hosts: Vector[(String, Option[Int])],
decider: Decider)(implicit sys: ActorSystem, mat: Materializer) = {
val size = hosts.length
Flow.fromGraph(GraphDSL.create() { implicit b =>
import GraphDSL.Implicits._
val part = b.add(Partition[BaseInfotonData](size, {
case BaseInfotonData(path, _) =>
(cmwell.util.string.Hash.adler32long(path) % size).toInt
}))
val mergeIngests = b.add(Merge[(Try[IngestOutput], IngestState)](size))
part.outlets.zipWithIndex.foreach {
case (o: Outlet[BaseInfotonData @unchecked], i) => {
val (host, portOpt) = hosts(i)
val location = s"$host:${portOpt.getOrElse(80)}"
val ingestFlow =
SingleMachineInfotonIngester(dckey, location, decider)
val infotonAggregator = b.add(
InfotonAggregator[BaseInfotonData](
Settings.maxIngestInfotonCount,
Settings.maxIngestByteSize,
Settings.maxTotalInfotonCountAggregatedForIngest,
identity
)
)
val initialStateAdder = b.add(
Flow[scala.collection.immutable.Seq[BaseInfotonData]].map(
ingestData =>
Future
.successful(ingestData) -> (ingestData -> initialBulkStatus)
)
)
val ingestRetrier =
Retry.concat(Settings.ingestRetryQueueSize, 1, BufferLimiter(1, ingestFlow))(
retryDecider(dckey, location)
)
o ~> infotonAggregator ~> initialStateAdder ~> ingestRetrier ~> mergeIngests
.in(i)
}
}
FlowShape(part.in, mergeIngests.out)
})
}
def retryDecider(dcKey: DcInfoKey, location: String)(implicit sys: ActorSystem, mat: Materializer) =
(state: IngestState) =>
state match {
case (
ingestSeq,
IngestStateStatus(
retriesLeft,
singleRetryCount,
Some(ex: IngestServiceUnavailableException)
)
) => {
logger.warn(s"Sync $dcKey: Ingest to machine $location failed (Service Unavailable). " +
s"Will keep trying again until the service will be available. The exception is: ${ex.getMessage} ${ex.getCause.getMessage}")
Util.warnPrintFuturedBodyException(ex)
val ingestState =
(
ingestSeq,
IngestStateStatus(
retriesLeft,
singleRetryCount + (if (ingestSeq.size == 1) 1 else 0),
Some(ex)
)
)
Some(
List(
akka.pattern.after(
Settings.ingestServiceUnavailableDelay,
sys.scheduler
)(Future.successful(ingestSeq)) -> ingestState
)
)
}
case (
ingestSeq,
IngestStateStatus(retriesLeft, singleRetryCount, ex)
) =>
if (ingestSeq.size == 1 && retriesLeft == 0) {
val originalRequest =
ingestSeq.foldLeft(empty)(_ ++ _.data).utf8String
ex.get match {
case e: FuturedBodyException =>
logger.error(
s"${e.getMessage} ${e.getCause.getMessage} No more retries will be done. Please use the red log to see the list of all the failed ingests."
)
Util.errorPrintFuturedBodyException(e)
case e =>
val u = Util.extractUuid(ingestSeq.head)
logger.error(s"Sync $dcKey: Ingest of uuid $u to machine $location failed. No more reties will be done. " +
"Please use the red log to see the list of all the failed ingests. The exception is: ", e)
}
logger.trace(s"Original Ingest request for uuid ${Util.extractUuid(ingestSeq.head)} was: $originalRequest")
redlog.info(s"Sync $dcKey: Ingest of uuid ${Util.extractUuid(ingestSeq.head)} to machine $location failed")
Some(Nil)
} else if (ingestSeq.size == 1) {
logger.trace(s"Sync $dcKey: Ingest of uuid ${Util.extractUuid(ingestSeq.head)} to " +
s"machine $location failed. Retries left $retriesLeft. Will try again. The exception is: ", ex.get)
Util.tracePrintFuturedBodyException(ex.get)
val ingestState =
(
ingestSeq,
IngestStateStatus(retriesLeft - 1, singleRetryCount + 1, ex)
)
Some(
List(
akka.pattern.after(Settings.ingestRetryDelay, sys.scheduler)(
Future.successful(ingestSeq)
) -> ingestState
)
)
} else if (retriesLeft == 0) {
logger.trace(s"Sync $dcKey: Ingest of bulk uuids to machine $location failed. No more bulk retries left. " +
s"Will split to request for each uuid and try again. The exception is: ", ex.get)
Util.tracePrintFuturedBodyException(ex.get)
Some(ingestSeq.view.map { infotonMetaAndData =>
val ingestData = Seq(infotonMetaAndData)
val ingestState = ingestData -> IngestStateStatus(
Settings.initialSingleIngestRetryCount,
singleRetryCount,
ex
)
akka.pattern.after(Settings.ingestRetryDelay, sys.scheduler)(
Future.successful(ingestData)
) -> ingestState
}.to(List))
} else {
logger.trace(
s"Sync $dcKey: Ingest of bulk uuids to machine $location failed. Retries left $retriesLeft. Will try again. The exception is: ",
ex.get
)
Util.tracePrintFuturedBodyException(ex.get)
val ingestState =
(
ingestSeq,
IngestStateStatus(retriesLeft - 1, singleRetryCount, ex)
)
Some(
List(
akka.pattern.after(Settings.ingestRetryDelay, sys.scheduler)(
Future.successful(ingestSeq)
) -> ingestState
)
)
}
}
}
|
e-orz/CM-Well
|
server/cmwell-dc/src/main/scala/cmwell/dc/stream/InfotonAllMachinesDistributerAndIngester.scala
|
Scala
|
apache-2.0
| 7,947
|
package test
import org.scalatest.Tag
import org.scalatest.time.{Milliseconds, Seconds, Span}
/**
* ToolConstants for our tests.
*/
object ToolConstants {
val DefaultMaxInputQueueSize = 50000
val DefaultMaxOutputQueueSize = 50000
val AccumulationTimeout = Span(500, Milliseconds)
val EventuallyTimeout = Span(10, Seconds)
val EventuallyInterval = Span(5, Milliseconds)
val NewInputLineTimeout = Span(10, Seconds)
val NextOutputLineTimeout = Span(5, Seconds)
val NoWindows = Tag("NoWindows")
}
|
ensime/scala-debugger
|
scala-debugger-tool/src/it/scala/test/ToolConstants.scala
|
Scala
|
apache-2.0
| 513
|
/*
* Copyright 2014 - 2015 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package pathy
import pathy.scalacheck._
import org.specs2.mutable._
import org.specs2.ScalaCheck
import scalaz.syntax.foldable._
class PathSpecs extends Specification with ScalaCheck {
import PathyArbitrary._
import Path._
import posixCodec._
"two directories" in {
unsafePrintPath(dir("foo") </> file("bar")) must_== "./foo/bar"
}
"file with two parents" in {
unsafePrintPath(dir("foo") </> dir("bar") </> file("image.png")) must_== "./foo/bar/image.png"
}
"file without extension" in {
unsafePrintPath(file("image") <:> "png") must_== "./image.png"
}
"file with extension" in {
unsafePrintPath(file("image.jpg") <:> "png") must_== "./image.png"
}
"printPath - ./../" in {
unsafePrintPath(parentDir1(currentDir)) must_== "./../"
}
"print and parse again should produce same Path" ! prop { path: AbsFile[Sandboxed] =>
parseAbsFile(printPath(path)).get must_== path
}
"</> - ./../foo/" in {
unsafePrintPath(parentDir1(currentDir) </> unsandbox(dir("foo"))) must_== "./../foo/"
}
"parentDir1 - ./../foo/../" in {
unsafePrintPath((parentDir1(currentDir) </> unsandbox(dir("foo"))) </> parentDir1(currentDir)) must_== "./../foo/../"
}
"<::> - ./../" in {
unsafePrintPath(currentDir <::> currentDir) must_== "./../"
}
"<::> - ./../foo/" in {
unsafePrintPath(currentDir <::> dir("foo")) must_== "./../foo/"
}
"<::> - ./../foo/../" in {
unsafePrintPath((currentDir <::> dir("foo")) <::> currentDir) must_== "./../foo/../"
}
"canonicalize - 1 down, 1 up" in {
unsafePrintPath(canonicalize(parentDir1(dir("foo")))) must_== "./"
}
"canonicalize - 2 down, 2 up" in {
unsafePrintPath(canonicalize(parentDir1(parentDir1(dir("foo") </> dir("bar"))))) must_== "./"
}
"renameFile - single level deep" in {
unsafePrintPath(renameFile(file("image.png"), _.dropExtension)) must_== "./image"
}
"sandbox - sandbox absolute dir to one level higher" in {
sandbox(rootDir </> dir("foo"), rootDir </> dir("foo") </> dir("bar")) must beSome.which {
unsafePrintPath(_) must_== "./bar/"
}
}
"depth - negative" in {
depth(parentDir1(parentDir1(parentDir1(currentDir)))) must_== -3
}
"flatten - returns NEL of result of folding each layer of path" in {
flatten(
"r", "c", "p", identity, identity,
currentDir </> dir("foo") </> dir("bar") </> file("flat.md")
).toList must_== List("c", "foo", "bar", "flat.md")
}
"parseRelFile - image.png" in {
parseRelFile("image.png") must beSome(file("image.png"))
}
"parseRelFile - ./image.png" in {
parseRelFile("./image.png") must beSome(file("image.png"))
}
"parseRelFile - foo/image.png" in {
parseRelFile("foo/image.png") must beSome(dir("foo") </> file("image.png"))
}
"parseRelFile - ../foo/image.png" in {
parseRelFile("../foo/image.png") must beSome(currentDir <::> dir("foo") </> file("image.png"))
}
"parseRelFile - /foo/image.png" in {
parseRelFile("/foo/image.png") must beNone
}
"parseRelFile - foo/" in {
parseRelFile("foo/") must beNone
}
"parseAbsFile - /image.png" in {
parseAbsFile("/image.png") must beSome(rootDir </> file("image.png"))
}
"parseAbsFile - /foo/image.png" in {
parseAbsFile("/foo/image.png") must beSome(rootDir </> dir("foo") </> file("image.png"))
}
"parseAbsFile - /foo/" in {
parseAbsFile("/foo/") must beNone
}
"parseAbsFile - foo/image.png" in {
parseAbsFile("foo/image.png") must beNone
}
"parseRelDir - empty string" in {
parseRelDir("") must beSome(currentDir[Unsandboxed])
}
"parseRelDir - ./../" in {
parseRelDir("./../") must beSome(currentDir <::> currentDir)
}
"parseRelDir - foo/" in {
parseRelDir("foo/") must beSome(dir("foo"))
}
"parseRelDir - foo/bar/" in {
parseRelDir("foo/bar/") must beSome(dir("foo") </> dir("bar"))
}
"parseRelDir - /foo/" in {
parseRelDir("/foo/") must beNone
}
"parseRelDir - foo" in {
parseRelDir("foo") must beNone
}
"parseRelDir - ./foo/bar/" in {
parseRelDir("./foo/bar/") must beSome(dir("foo") </> dir("bar"))
}
"parseRelAsDir - ./foo/bar" in {
parseRelAsDir("./foo/bar") must beSome(dir("foo") </> dir("bar"))
}
"parseRelAsDir - ./foo/bar/" in {
parseRelAsDir("./foo/bar/") must beSome(dir("foo") </> dir("bar"))
}
"parseAbsDir - /" in {
parseAbsDir("/") must beSome(rootDir[Unsandboxed])
}
"parseAbsDir - /foo/" in {
parseAbsDir("/foo/") must beSome(rootDir </> dir("foo"))
}
"parseAbsDir - /foo/bar/" in {
parseAbsDir("/foo/bar/") must beSome(rootDir </> dir("foo") </> dir("bar"))
}
"parseAbsDir - /foo" in {
parseAbsDir("/foo") must beNone
}
"parseAbsDir - foo" in {
parseAbsDir("foo") must beNone
}
"parseAbsAsDir - /foo/bar/" in {
parseAbsAsDir("/foo/bar/") must beSome(rootDir </> dir("foo") </> dir("bar"))
}
"parseAbsAsDir - /foo/bar" in {
parseAbsAsDir("/foo/bar") must beSome(rootDir </> dir("foo") </> dir("bar"))
}
"placeholder codec" in {
"printPath - replaces separator in segments with placeholder" in {
unsafePrintPath(dir("foo/bar") </> dir("baz") </> file("qu/ux.txt")) must_== "./foo$sep$bar/baz/qu$sep$ux.txt"
}
"printPath - replaces single dot dir name with placeholder" in {
unsafePrintPath(dir(".") </> file("config")) must_== "./$dot$/config"
}
"printPath - replaces double dot dir name with placeholder" in {
unsafePrintPath(dir("foo") </> dir("..") </> file("config")) must_== "./foo/$dotdot$/config"
}
"parsePath - reads separator ph in segments" in {
parseRelDir("foo/$sep$/bar/") must beSome(dir("foo") </> dir("/") </> dir("bar"))
}
"parsePath - reads single dot ph in segments" in {
parseRelFile("foo/$dot$/bar") must beSome(dir("foo") </> dir(".") </> file("bar"))
}
"parsePath - reads double dot separator in segments" in {
parseRelFile("foo/bar/$dotdot$") must beSome(dir("foo") </> dir("bar") </> file(".."))
}
}
}
|
mossprescott/scala-pathy
|
tests/src/test/scala/pathy/path.scala
|
Scala
|
apache-2.0
| 6,666
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import kafka.common.TopicAndPartition
import org.apache.kafka.common.TopicPartition
/**
* Keys used for delayed operation metrics recording
*/
trait DelayedOperationKey {
def keyLabel: String
}
object DelayedOperationKey {
val globalLabel = "All"
}
/* used by delayed-produce and delayed-fetch operations */
case class TopicPartitionOperationKey(topic: String, partition: Int) extends DelayedOperationKey {
def this(topicPartition: TopicPartition) = this(topicPartition.topic, topicPartition.partition)
def this(topicAndPartition: TopicAndPartition) = this(topicAndPartition.topic, topicAndPartition.partition)
override def keyLabel = "%s-%d".format(topic, partition)
}
/* used by delayed-join-group operations */
case class MemberKey(groupId: String, consumerId: String) extends DelayedOperationKey {
override def keyLabel = "%s-%s".format(groupId, consumerId)
}
/* used by delayed-rebalance operations */
case class GroupKey(groupId: String) extends DelayedOperationKey {
override def keyLabel = groupId
}
/* used by delayed-topic operations */
case class TopicKey(topic: String) extends DelayedOperationKey {
override def keyLabel = topic
}
|
flange/drift-dev
|
kafka/00-kafka_2.11-0.10.1.0/libs/tmp/kafka/server/DelayedOperationKey.scala
|
Scala
|
apache-2.0
| 1,999
|
/*
* Copyright 2018 Analytics Zoo Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn.tf
trait InternalWithoutInput extends WithoutInput
|
intel-analytics/analytics-zoo
|
zoo/src/main/scala/com/intel/analytics/zoo/pipeline/api/autograd/InternalWithoutInput.scala
|
Scala
|
apache-2.0
| 696
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.sources
import java.io.File
import scala.util.Random
import org.apache.hadoop.fs.Path
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.sql._
import org.apache.spark.sql.execution.DataSourceScanExec
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SQLTestUtils
import org.apache.spark.sql.types._
abstract class HadoopFsRelationTest extends QueryTest with SQLTestUtils with TestHiveSingleton {
import spark.implicits._
val dataSourceName: String
protected def supportsDataType(dataType: DataType): Boolean = true
val dataSchema =
StructType(
Seq(
StructField("a", IntegerType, nullable = false),
StructField("b", StringType, nullable = false)))
lazy val testDF = (1 to 3).map(i => (i, s"val_$i")).toDF("a", "b")
lazy val partitionedTestDF1 = (for {
i <- 1 to 3
p2 <- Seq("foo", "bar")
} yield (i, s"val_$i", 1, p2)).toDF("a", "b", "p1", "p2")
lazy val partitionedTestDF2 = (for {
i <- 1 to 3
p2 <- Seq("foo", "bar")
} yield (i, s"val_$i", 2, p2)).toDF("a", "b", "p1", "p2")
lazy val partitionedTestDF = partitionedTestDF1.union(partitionedTestDF2)
def checkQueries(df: DataFrame): Unit = {
// Selects everything
checkAnswer(
df,
for (i <- 1 to 3; p1 <- 1 to 2; p2 <- Seq("foo", "bar")) yield Row(i, s"val_$i", p1, p2))
// Simple filtering and partition pruning
checkAnswer(
df.filter('a > 1 && 'p1 === 2),
for (i <- 2 to 3; p2 <- Seq("foo", "bar")) yield Row(i, s"val_$i", 2, p2))
// Simple projection and filtering
checkAnswer(
df.filter('a > 1).select('b, 'a + 1),
for (i <- 2 to 3; _ <- 1 to 2; _ <- Seq("foo", "bar")) yield Row(s"val_$i", i + 1))
// Simple projection and partition pruning
checkAnswer(
df.filter('a > 1 && 'p1 < 2).select('b, 'p1),
for (i <- 2 to 3; _ <- Seq("foo", "bar")) yield Row(s"val_$i", 1))
// Project many copies of columns with different types (reproduction for SPARK-7858)
checkAnswer(
df.filter('a > 1 && 'p1 < 2).select('b, 'b, 'b, 'b, 'p1, 'p1, 'p1, 'p1),
for (i <- 2 to 3; _ <- Seq("foo", "bar"))
yield Row(s"val_$i", s"val_$i", s"val_$i", s"val_$i", 1, 1, 1, 1))
// Self-join
df.createOrReplaceTempView("t")
withTempView("t") {
checkAnswer(
sql(
"""SELECT l.a, r.b, l.p1, r.p2
|FROM t l JOIN t r
|ON l.a = r.a AND l.p1 = r.p1 AND l.p2 = r.p2
""".stripMargin),
for (i <- 1 to 3; p1 <- 1 to 2; p2 <- Seq("foo", "bar")) yield Row(i, s"val_$i", p1, p2))
}
}
private val supportedDataTypes = Seq(
StringType, BinaryType,
NullType, BooleanType,
ByteType, ShortType, IntegerType, LongType,
FloatType, DoubleType, DecimalType(25, 5), DecimalType(6, 5),
DateType, TimestampType,
ArrayType(IntegerType),
MapType(StringType, LongType),
new StructType()
.add("f1", FloatType, nullable = true)
.add("f2", ArrayType(BooleanType, containsNull = true), nullable = true),
new UDT.MyDenseVectorUDT()
).filter(supportsDataType)
for (dataType <- supportedDataTypes) {
for (parquetDictionaryEncodingEnabled <- Seq(true, false)) {
test(s"test all data types - $dataType with parquet.enable.dictionary = " +
s"$parquetDictionaryEncodingEnabled") {
val extraOptions = Map[String, String](
"parquet.enable.dictionary" -> parquetDictionaryEncodingEnabled.toString
)
withTempPath { file =>
val path = file.getCanonicalPath
val dataGenerator = RandomDataGenerator.forType(
dataType = dataType,
nullable = true,
new Random(System.nanoTime())
).getOrElse {
fail(s"Failed to create data generator for schema $dataType")
}
// Create a DF for the schema with random data. The index field is used to sort the
// DataFrame. This is a workaround for SPARK-10591.
val schema = new StructType()
.add("index", IntegerType, nullable = false)
.add("col", dataType, nullable = true)
val rdd =
spark.sparkContext.parallelize((1 to 10).map(i => Row(i, dataGenerator())))
val df = spark.createDataFrame(rdd, schema).orderBy("index").coalesce(1)
df.write
.mode("overwrite")
.format(dataSourceName)
.option("dataSchema", df.schema.json)
.options(extraOptions)
.save(path)
val loadedDF = spark
.read
.format(dataSourceName)
.option("dataSchema", df.schema.json)
.schema(df.schema)
.options(extraOptions)
.load(path)
.orderBy("index")
checkAnswer(loadedDF, df)
}
}
}
}
test("save()/load() - non-partitioned table - Overwrite") {
withTempPath { file =>
testDF.write.mode(SaveMode.Overwrite).format(dataSourceName).save(file.getCanonicalPath)
testDF.write.mode(SaveMode.Overwrite).format(dataSourceName).save(file.getCanonicalPath)
checkAnswer(
spark.read.format(dataSourceName)
.option("path", file.getCanonicalPath)
.option("dataSchema", dataSchema.json)
.load(),
testDF.collect())
}
}
test("save()/load() - non-partitioned table - Append") {
withTempPath { file =>
testDF.write.mode(SaveMode.Overwrite).format(dataSourceName).save(file.getCanonicalPath)
testDF.write.mode(SaveMode.Append).format(dataSourceName).save(file.getCanonicalPath)
checkAnswer(
spark.read.format(dataSourceName)
.option("dataSchema", dataSchema.json)
.load(file.getCanonicalPath).orderBy("a"),
testDF.union(testDF).orderBy("a").collect())
}
}
test("save()/load() - non-partitioned table - ErrorIfExists") {
withTempDir { file =>
intercept[AnalysisException] {
testDF.write.format(dataSourceName).mode(SaveMode.ErrorIfExists).save(file.getCanonicalPath)
}
}
}
test("save()/load() - non-partitioned table - Ignore") {
withTempDir { file =>
testDF.write.mode(SaveMode.Ignore).format(dataSourceName).save(file.getCanonicalPath)
val path = new Path(file.getCanonicalPath)
val fs = path.getFileSystem(spark.sessionState.newHadoopConf())
assert(fs.listStatus(path).isEmpty)
}
}
test("save()/load() - partitioned table - simple queries") {
withTempPath { file =>
partitionedTestDF.write
.format(dataSourceName)
.mode(SaveMode.ErrorIfExists)
.partitionBy("p1", "p2")
.save(file.getCanonicalPath)
checkQueries(
spark.read.format(dataSourceName)
.option("dataSchema", dataSchema.json)
.load(file.getCanonicalPath))
}
}
test("save()/load() - partitioned table - Overwrite") {
withTempPath { file =>
partitionedTestDF.write
.format(dataSourceName)
.mode(SaveMode.Overwrite)
.partitionBy("p1", "p2")
.save(file.getCanonicalPath)
partitionedTestDF.write
.format(dataSourceName)
.mode(SaveMode.Overwrite)
.partitionBy("p1", "p2")
.save(file.getCanonicalPath)
checkAnswer(
spark.read.format(dataSourceName)
.option("dataSchema", dataSchema.json)
.load(file.getCanonicalPath),
partitionedTestDF.collect())
}
}
test("save()/load() - partitioned table - Append") {
withTempPath { file =>
partitionedTestDF.write
.format(dataSourceName)
.mode(SaveMode.Overwrite)
.partitionBy("p1", "p2")
.save(file.getCanonicalPath)
partitionedTestDF.write
.format(dataSourceName)
.mode(SaveMode.Append)
.partitionBy("p1", "p2")
.save(file.getCanonicalPath)
checkAnswer(
spark.read.format(dataSourceName)
.option("dataSchema", dataSchema.json)
.load(file.getCanonicalPath),
partitionedTestDF.union(partitionedTestDF).collect())
}
}
test("save()/load() - partitioned table - Append - new partition values") {
withTempPath { file =>
partitionedTestDF1.write
.format(dataSourceName)
.mode(SaveMode.Overwrite)
.partitionBy("p1", "p2")
.save(file.getCanonicalPath)
partitionedTestDF2.write
.format(dataSourceName)
.mode(SaveMode.Append)
.partitionBy("p1", "p2")
.save(file.getCanonicalPath)
checkAnswer(
spark.read.format(dataSourceName)
.option("dataSchema", dataSchema.json)
.load(file.getCanonicalPath),
partitionedTestDF.collect())
}
}
test("save()/load() - partitioned table - ErrorIfExists") {
withTempDir { file =>
intercept[AnalysisException] {
partitionedTestDF.write
.format(dataSourceName)
.mode(SaveMode.ErrorIfExists)
.partitionBy("p1", "p2")
.save(file.getCanonicalPath)
}
}
}
test("save()/load() - partitioned table - Ignore") {
withTempDir { file =>
partitionedTestDF.write
.format(dataSourceName).mode(SaveMode.Ignore).save(file.getCanonicalPath)
val path = new Path(file.getCanonicalPath)
val fs = path.getFileSystem(SparkHadoopUtil.get.conf)
assert(fs.listStatus(path).isEmpty)
}
}
test("saveAsTable()/load() - non-partitioned table - Overwrite") {
testDF.write.format(dataSourceName).mode(SaveMode.Overwrite)
.option("dataSchema", dataSchema.json)
.saveAsTable("t")
withTable("t") {
checkAnswer(spark.table("t"), testDF.collect())
}
}
test("saveAsTable()/load() - non-partitioned table - Append") {
testDF.write.format(dataSourceName).mode(SaveMode.Overwrite).saveAsTable("t")
testDF.write.format(dataSourceName).mode(SaveMode.Append).saveAsTable("t")
withTable("t") {
checkAnswer(spark.table("t"), testDF.union(testDF).orderBy("a").collect())
}
}
test("saveAsTable()/load() - non-partitioned table - ErrorIfExists") {
withTable("t") {
sql("CREATE TABLE t(i INT) USING parquet")
intercept[AnalysisException] {
testDF.write.format(dataSourceName).mode(SaveMode.ErrorIfExists).saveAsTable("t")
}
}
}
test("saveAsTable()/load() - non-partitioned table - Ignore") {
withTable("t") {
sql("CREATE TABLE t(i INT) USING parquet")
testDF.write.format(dataSourceName).mode(SaveMode.Ignore).saveAsTable("t")
assert(spark.table("t").collect().isEmpty)
}
}
test("saveAsTable()/load() - partitioned table - simple queries") {
partitionedTestDF.write.format(dataSourceName)
.mode(SaveMode.Overwrite)
.option("dataSchema", dataSchema.json)
.saveAsTable("t")
withTable("t") {
checkQueries(spark.table("t"))
}
}
test("saveAsTable()/load() - partitioned table - boolean type") {
spark.range(2)
.select('id, ('id % 2 === 0).as("b"))
.write.partitionBy("b").saveAsTable("t")
withTable("t") {
checkAnswer(
spark.table("t").sort('id),
Row(0, true) :: Row(1, false) :: Nil
)
}
}
test("saveAsTable()/load() - partitioned table - Overwrite") {
partitionedTestDF.write
.format(dataSourceName)
.mode(SaveMode.Overwrite)
.option("dataSchema", dataSchema.json)
.partitionBy("p1", "p2")
.saveAsTable("t")
partitionedTestDF.write
.format(dataSourceName)
.mode(SaveMode.Overwrite)
.option("dataSchema", dataSchema.json)
.partitionBy("p1", "p2")
.saveAsTable("t")
withTable("t") {
checkAnswer(spark.table("t"), partitionedTestDF.collect())
}
}
test("saveAsTable()/load() - partitioned table - Append") {
partitionedTestDF.write
.format(dataSourceName)
.mode(SaveMode.Overwrite)
.option("dataSchema", dataSchema.json)
.partitionBy("p1", "p2")
.saveAsTable("t")
partitionedTestDF.write
.format(dataSourceName)
.mode(SaveMode.Append)
.option("dataSchema", dataSchema.json)
.partitionBy("p1", "p2")
.saveAsTable("t")
withTable("t") {
checkAnswer(spark.table("t"), partitionedTestDF.union(partitionedTestDF).collect())
}
}
test("saveAsTable()/load() - partitioned table - Append - new partition values") {
partitionedTestDF1.write
.format(dataSourceName)
.mode(SaveMode.Overwrite)
.option("dataSchema", dataSchema.json)
.partitionBy("p1", "p2")
.saveAsTable("t")
partitionedTestDF2.write
.format(dataSourceName)
.mode(SaveMode.Append)
.option("dataSchema", dataSchema.json)
.partitionBy("p1", "p2")
.saveAsTable("t")
withTable("t") {
checkAnswer(spark.table("t"), partitionedTestDF.collect())
}
}
test("saveAsTable()/load() - partitioned table - Append - mismatched partition columns") {
partitionedTestDF1.write
.format(dataSourceName)
.mode(SaveMode.Overwrite)
.option("dataSchema", dataSchema.json)
.partitionBy("p1", "p2")
.saveAsTable("t")
// Using only a subset of all partition columns
intercept[AnalysisException] {
partitionedTestDF2.write
.format(dataSourceName)
.mode(SaveMode.Append)
.option("dataSchema", dataSchema.json)
.partitionBy("p1")
.saveAsTable("t")
}
}
test("saveAsTable()/load() - partitioned table - ErrorIfExists") {
Seq.empty[(Int, String)].toDF().createOrReplaceTempView("t")
withTempView("t") {
intercept[AnalysisException] {
partitionedTestDF.write
.format(dataSourceName)
.mode(SaveMode.ErrorIfExists)
.option("dataSchema", dataSchema.json)
.partitionBy("p1", "p2")
.saveAsTable("t")
}
}
}
test("saveAsTable()/load() - partitioned table - Ignore") {
Seq.empty[(Int, String)].toDF().createOrReplaceTempView("t")
withTempView("t") {
partitionedTestDF.write
.format(dataSourceName)
.mode(SaveMode.Ignore)
.option("dataSchema", dataSchema.json)
.partitionBy("p1", "p2")
.saveAsTable("t")
assert(spark.table("t").collect().isEmpty)
}
}
test("load() - with directory of unpartitioned data in nested subdirs") {
withTempPath { dir =>
val subdir = new File(dir, "subdir")
val dataInDir = Seq(1, 2, 3).toDF("value")
val dataInSubdir = Seq(4, 5, 6).toDF("value")
/*
Directory structure to be generated
dir
|
|___ [ files of dataInDir ]
|
|___ subsubdir
|
|___ [ files of dataInSubdir ]
*/
// Generated dataInSubdir, not data in dir
dataInSubdir.write
.format(dataSourceName)
.mode(SaveMode.Overwrite)
.save(subdir.getCanonicalPath)
// Inferring schema should throw error as it should not find any file to infer
val e = intercept[Exception] {
spark.read.format(dataSourceName).load(dir.getCanonicalPath)
}
e match {
case _: AnalysisException =>
assert(e.getMessage.contains("infer"))
case _: java.util.NoSuchElementException if e.getMessage.contains("dataSchema") =>
// Ignore error, the source format requires schema to be provided by user
// This is needed for SimpleTextHadoopFsRelationSuite as SimpleTextSource needs schema
case _ =>
fail("Unexpected error trying to infer schema from empty dir", e)
}
/** Test whether data is read with the given path matches the expected answer */
def testWithPath(path: File, expectedAnswer: Seq[Row]): Unit = {
val df = spark.read
.format(dataSourceName)
.schema(dataInDir.schema) // avoid schema inference for any format
.load(path.getCanonicalPath)
checkAnswer(df, expectedAnswer)
}
// Verify that reading by path 'dir/' gives empty results as there are no files in 'file'
// and it should not pick up files in 'dir/subdir'
require(subdir.exists)
require(subdir.listFiles().exists(!_.isDirectory))
testWithPath(dir, Seq.empty)
// Verify that if there is data in dir, then reading by path 'dir/' reads only dataInDir
dataInDir.write
.format(dataSourceName)
.mode(SaveMode.Append) // append to prevent subdir from being deleted
.save(dir.getCanonicalPath)
require(dir.listFiles().exists(!_.isDirectory))
require(subdir.exists())
require(subdir.listFiles().exists(!_.isDirectory))
testWithPath(dir, dataInDir.collect())
}
}
test("Hadoop style globbing - unpartitioned data") {
withTempPath { file =>
val dir = file.getCanonicalPath
val subdir = new File(dir, "subdir")
val subsubdir = new File(subdir, "subsubdir")
val anotherSubsubdir =
new File(new File(dir, "another-subdir"), "another-subsubdir")
val dataInSubdir = Seq(1, 2, 3).toDF("value")
val dataInSubsubdir = Seq(4, 5, 6).toDF("value")
val dataInAnotherSubsubdir = Seq(7, 8, 9).toDF("value")
dataInSubdir.write
.format (dataSourceName)
.mode (SaveMode.Overwrite)
.save (subdir.getCanonicalPath)
dataInSubsubdir.write
.format (dataSourceName)
.mode (SaveMode.Overwrite)
.save (subsubdir.getCanonicalPath)
dataInAnotherSubsubdir.write
.format (dataSourceName)
.mode (SaveMode.Overwrite)
.save (anotherSubsubdir.getCanonicalPath)
require(subdir.exists)
require(subdir.listFiles().exists(!_.isDirectory))
require(subsubdir.exists)
require(subsubdir.listFiles().exists(!_.isDirectory))
require(anotherSubsubdir.exists)
require(anotherSubsubdir.listFiles().exists(!_.isDirectory))
/*
Directory structure generated
dir
|
|___ subdir
| |
| |___ [ files of dataInSubdir ]
| |
| |___ subsubdir
| |
| |___ [ files of dataInSubsubdir ]
|
|
|___ anotherSubdir
|
|___ anotherSubsubdir
|
|___ [ files of dataInAnotherSubsubdir ]
*/
val schema = dataInSubdir.schema
/** Check whether data is read with the given path matches the expected answer */
def check(path: String, expectedDf: DataFrame): Unit = {
val df = spark.read
.format(dataSourceName)
.schema(schema) // avoid schema inference for any format, expected to be same format
.load(path)
checkAnswer(df, expectedDf)
}
check(s"$dir/*/", dataInSubdir)
check(s"$dir/sub*/*", dataInSubdir.union(dataInSubsubdir))
check(s"$dir/another*/*", dataInAnotherSubsubdir)
check(s"$dir/*/another*", dataInAnotherSubsubdir)
check(s"$dir/*/*", dataInSubdir.union(dataInSubsubdir).union(dataInAnotherSubsubdir))
}
}
test("Hadoop style globbing - partitioned data with schema inference") {
// Tests the following on partition data
// - partitions are not discovered with globbing and without base path set.
// - partitions are discovered with globbing and base path set, though more detailed
// tests for this is in ParquetPartitionDiscoverySuite
withTempPath { path =>
val dir = path.getCanonicalPath
partitionedTestDF.write
.format(dataSourceName)
.mode(SaveMode.Overwrite)
.partitionBy("p1", "p2")
.save(dir)
def check(
path: String,
expectedResult: Either[DataFrame, String],
basePath: Option[String] = None
): Unit = {
try {
val reader = spark.read
basePath.foreach(reader.option("basePath", _))
val testDf = reader
.format(dataSourceName)
.load(path)
assert(expectedResult.isLeft, s"Error was expected with $path but result found")
checkAnswer(testDf, expectedResult.left.get)
} catch {
case e: java.util.NoSuchElementException if e.getMessage.contains("dataSchema") =>
// Ignore error, the source format requires schema to be provided by user
// This is needed for SimpleTextHadoopFsRelationSuite as SimpleTextSource needs schema
case e: Throwable =>
assert(expectedResult.isRight, s"Was not expecting error with $path: " + e)
assert(
e.getMessage.contains(expectedResult.right.get),
s"Did not find expected error message wiht $path")
}
}
object Error {
def apply(msg: String): Either[DataFrame, String] = Right(msg)
}
object Result {
def apply(df: DataFrame): Either[DataFrame, String] = Left(df)
}
// ---- Without base path set ----
// Should find all the data with partitioning columns
check(s"$dir", Result(partitionedTestDF))
// Should fail as globbing finds dirs without files, only subdirs in them.
check(s"$dir/*/", Error("please set \\"basePath\\""))
check(s"$dir/p1=*/", Error("please set \\"basePath\\""))
// Should not find partition columns as the globs resolve to p2 dirs
// with files in them
check(s"$dir/*/*", Result(partitionedTestDF.drop("p1", "p2")))
check(s"$dir/p1=*/p2=foo", Result(partitionedTestDF.filter("p2 = 'foo'").drop("p1", "p2")))
check(s"$dir/p1=1/p2=???", Result(partitionedTestDF.filter("p1 = 1").drop("p1", "p2")))
// Should find all data without the partitioning columns as the globs resolve to the files
check(s"$dir/*/*/*", Result(partitionedTestDF.drop("p1", "p2")))
// ---- With base path set ----
val resultDf = partitionedTestDF.select("a", "b", "p1", "p2")
check(path = s"$dir/*", Result(resultDf), basePath = Some(dir))
check(path = s"$dir/*/*", Result(resultDf), basePath = Some(dir))
check(path = s"$dir/*/*/*", Result(resultDf), basePath = Some(dir))
}
}
test("SPARK-9735 Partition column type casting") {
withTempPath { file =>
val df = (for {
i <- 1 to 3
p2 <- Seq("foo", "bar")
} yield (i, s"val_$i", 1.0d, p2, 123, 123.123f)).toDF("a", "b", "p1", "p2", "p3", "f")
val input = df.select(
'a,
'b,
'p1.cast(StringType).as('ps1),
'p2,
'p3.cast(FloatType).as('pf1),
'f)
withTempView("t") {
input
.write
.format(dataSourceName)
.mode(SaveMode.Overwrite)
.partitionBy("ps1", "p2", "pf1", "f")
.saveAsTable("t")
input
.write
.format(dataSourceName)
.mode(SaveMode.Append)
.partitionBy("ps1", "p2", "pf1", "f")
.saveAsTable("t")
val realData = input.collect()
checkAnswer(spark.table("t"), realData ++ realData)
}
}
}
test("SPARK-7616: adjust column name order accordingly when saving partitioned table") {
val df = (1 to 3).map(i => (i, s"val_$i", i * 2)).toDF("a", "b", "c")
df.write
.format(dataSourceName)
.mode(SaveMode.Overwrite)
.partitionBy("c", "a")
.saveAsTable("t")
withTable("t") {
checkAnswer(spark.table("t").select('b, 'c, 'a), df.select('b, 'c, 'a).collect())
}
}
// NOTE: This test suite is not super deterministic. On nodes with only relatively few cores
// (4 or even 1), it's hard to reproduce the data loss issue. But on nodes with for example 8 or
// more cores, the issue can be reproduced steadily. Fortunately our Jenkins builder meets this
// requirement. We probably want to move this test case to spark-integration-tests or spark-perf
// later.
test("SPARK-8406: Avoids name collision while writing files") {
withTempPath { dir =>
val path = dir.getCanonicalPath
spark
.range(10000)
.repartition(250)
.write
.mode(SaveMode.Overwrite)
.format(dataSourceName)
.save(path)
assertResult(10000) {
spark
.read
.format(dataSourceName)
.option("dataSchema", StructType(StructField("id", LongType) :: Nil).json)
.load(path)
.count()
}
}
}
test("SPARK-8887: Explicitly define which data types can be used as dynamic partition columns") {
val df = Seq(
(1, "v1", Array(1, 2, 3), Map("k1" -> "v1"), Tuple2(1, "4")),
(2, "v2", Array(4, 5, 6), Map("k2" -> "v2"), Tuple2(2, "5")),
(3, "v3", Array(7, 8, 9), Map("k3" -> "v3"), Tuple2(3, "6"))).toDF("a", "b", "c", "d", "e")
withTempDir { file =>
intercept[AnalysisException] {
df.write.format(dataSourceName).partitionBy("c", "d", "e").save(file.getCanonicalPath)
}
}
intercept[AnalysisException] {
df.write.format(dataSourceName).partitionBy("c", "d", "e").saveAsTable("t")
}
}
test("Locality support for FileScanRDD") {
val options = Map[String, String](
"fs.file.impl" -> classOf[LocalityTestFileSystem].getName,
"fs.file.impl.disable.cache" -> "true"
)
withTempPath { dir =>
val path = dir.toURI.toString
val df1 = spark.range(4)
df1.coalesce(1).write.mode("overwrite").options(options).format(dataSourceName).save(path)
df1.coalesce(1).write.mode("append").options(options).format(dataSourceName).save(path)
def checkLocality(): Unit = {
val df2 = spark.read
.format(dataSourceName)
.option("dataSchema", df1.schema.json)
.options(options)
.load(path)
val Some(fileScanRDD) = df2.queryExecution.executedPlan.collectFirst {
case scan: DataSourceScanExec if scan.inputRDDs().head.isInstanceOf[FileScanRDD] =>
scan.inputRDDs().head.asInstanceOf[FileScanRDD]
}
val partitions = fileScanRDD.partitions
val preferredLocations = partitions.flatMap(fileScanRDD.preferredLocations)
assert(preferredLocations.distinct.length == 2)
}
checkLocality()
withSQLConf(SQLConf.PARALLEL_PARTITION_DISCOVERY_THRESHOLD.key -> "0") {
checkLocality()
}
}
}
test("SPARK-16975: Partitioned table with the column having '_' should be read correctly") {
withTempDir { dir =>
val childDir = new File(dir, dataSourceName).getCanonicalPath
val dataDf = spark.range(10).toDF()
val df = dataDf.withColumn("_col", $"id")
df.write.format(dataSourceName).partitionBy("_col").save(childDir)
val reader = spark.read.format(dataSourceName)
// This is needed for SimpleTextHadoopFsRelationSuite as SimpleTextSource needs schema.
if (dataSourceName == classOf[SimpleTextSource].getCanonicalName) {
reader.option("dataSchema", dataDf.schema.json)
}
val readBack = reader.load(childDir)
checkAnswer(df, readBack)
}
}
}
|
mike0sv/spark
|
sql/hive/src/test/scala/org/apache/spark/sql/sources/HadoopFsRelationTest.scala
|
Scala
|
apache-2.0
| 28,171
|
/*
* Terron Ishihara
*
* A script to determine which users in the database are most relevant.
* Relevance here is defined by
* - users who most frequently tweet (who have the most tweets in the database)
* - have a picture (TODO: consider using only pics that contain the user's face)
* - being human (not an organization or blog)
* - being likely overweight
*/
import java.io.PrintWriter
import scala.io.Source
import scala.collection.mutable
object RelevantUsers {
def main(args: Array[String]) {
// Note the location of the .txt file (not necessarily included on github)
val databasePath = "/Users/Terron/Documents/twitter4food/food_sample_2Oct2013_7Jan2016.txt"
var userTweetCounts = new mutable.HashMap[String, Int]().withDefaultValue(0)
// Iterate over text file. Tweets are represented by three lines
// 1. tab-separated info about user
// handle, name, id, location, followers count, utc offset, time zone, creation timestamp, language
// 2. tab-separated info about tweet
// creation timestamp if available, location coordinates if available, place name if available
// 3. the tweet itself
println("=== Iterating over database of tweets...")
// The maximum heap size may have to be increased to process the entire file
for ((line, index) <- Source.fromFile(databasePath).getLines.zipWithIndex) {
if (index % 3 == 0) {
// First term on first line is the twitter handle
val twitterHandle = line.splitAt(line.indexOf('\\t'))._1
userTweetCounts(twitterHandle) += 1
// Debug print to view progress
if (index > 10000 && index % 10000 == 0) {
val count = userTweetCounts(twitterHandle)
println(s"$index\\t$twitterHandle\\t$count")
}
}
}
// Write out sorted users to file
// NOTE: Local file path used since I ran this with the scala command, not by building the project
var writer = new PrintWriter("/Users/Terron/Documents/Git/twitter4food/src/main/resources/org.clulab.twitter4food.t2dm/usersSortedByTweetCounts.txt")
println("\\n=== Users sorted by highest food-related tweet counts:")
// Print first few just for viewing purposes
var numUsersToPrint = 200
// Sort by highest food-related tweet count first
for ((handle, count) <- userTweetCounts.toSeq.sortBy(_._2).reverse) {
if (numUsersToPrint > 0) {
println(s"$handle\\t$count")
numUsersToPrint -= 1
}
writer.write(handle + "\\t" + count + "\\n")
}
writer.close
writer = new PrintWriter("/Users/Terron/Documents/Git/twitter4food/src/main/resources/org.clulab.twitter4food.t2dm/usersMidrange.txt")
val minimum = 10
val maximum = 7000
System.out.println("Total number of users: " + userTweetCounts.size)
// Filter in users whose counts are in the middle range of tweet counts
val usersMidrange = userTweetCounts.filter(tup => tup._2 >= minimum && tup._2 <= maximum)
System.out.println("Number of users in range [" + minimum + ", " + maximum + "]: " + usersMidrange.size)
// Sort by highest food-related tweet count first
for ((handle, count) <- usersMidrange.toSeq.sortBy(_._2).reverse) {
if (numUsersToPrint > 0) {
println(s"$handle\\t$count")
numUsersToPrint -= 1
}
writer.write(handle + "\\t" + count + "\\n")
}
writer.close
}
}
|
clulab/twitter4food
|
src/main/scala/org/clulab/twitter4food/t2dm/RelevantUsers.scala
|
Scala
|
apache-2.0
| 3,681
|
package org.openapitools.server.model
/**
* @param `class` for example: ''null''
*/
final case class Label1 (
`class`: Option[String]
)
|
cliffano/swaggy-jenkins
|
clients/scala-akka-http-server/generated/src/main/scala/org/openapitools/server/model/Label1.scala
|
Scala
|
mit
| 143
|
package org.machine.engine.exceptions
class InternalErrorException(message: String = null, cause: Throwable = null) extends Exception(message, cause)
|
sholloway/graph-engine
|
src/main/scala/org/machine/engine/exceptions/InternalErrorException.scala
|
Scala
|
mit
| 151
|
package org.scaladebugger.api.profiles.java.info
import org.scaladebugger.api.lowlevel.events.misc.NoResume
import org.scaladebugger.api.profiles.java.JavaDebugProfile
import org.scaladebugger.api.profiles.traits.info.{IndexedVariableInfo, ThreadInfo}
import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import test.{ApiTestUtilities, VirtualMachineFixtures}
class JavaThreadInfoIntegrationSpec extends ParallelMockFunSpec
with VirtualMachineFixtures
with ApiTestUtilities
{
describe("JavaThreadInfo") {
it("should be able to get a list of frames for the suspended thread") {
val testClass = "org.scaladebugger.test.info.Frames"
val testFile = JDITools.scalaClassStringToFileString(testClass)
@volatile var t: Option[ThreadInfo] = None
val s = DummyScalaVirtualMachine.newInstance()
// NOTE: Do not resume so we can check the stack frames
s.withProfile(JavaDebugProfile.Name)
.getOrCreateBreakpointRequest(testFile, 25, NoResume)
.foreach(e => t = Some(e.thread))
withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
logTimeTaken(eventually {
val thread = t.get
// Should be 5
val totalFrames = thread.totalFrames
val index = 1
val length = 2
// Valid list of frames
thread.frames.slice(index, length + 1).map(_.toPrettyString) should
be (thread.frames(index, length).map(_.toPrettyString))
// Length too long when retrieving should revert to all frames
thread.frames.slice(index, totalFrames).map(_.toPrettyString) should
be (thread.frames(index, totalFrames + 1).map(_.toPrettyString))
// Length of -1 should return all remaining frames
thread.frames.slice(index, totalFrames).map(_.toPrettyString) should
be (thread.frames(index, -1).map(_.toPrettyString))
})
}
}
it("should be able to find a variable by its name") {
val testClass = "org.scaladebugger.test.info.Variables"
val testFile = JDITools.scalaClassStringToFileString(testClass)
@volatile var t: Option[ThreadInfo] = None
val s = DummyScalaVirtualMachine.newInstance()
// NOTE: Do not resume so we can check the variables at the stack frame
s.withProfile(JavaDebugProfile.Name)
.getOrCreateBreakpointRequest(testFile, 32, NoResume)
.foreach(e => t = Some(e.thread))
withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
logTimeTaken(eventually {
val thread = t.get
// Should support retrieving local variables
val localVariable = thread.findVariableByName("a").get
localVariable.isLocal should be (true)
localVariable.name should be ("a")
// Should support retrieving fields
val field = thread.findVariableByName("z1").get
field.isField should be (true)
field.name should be ("z1")
})
}
}
it("should be able to find a variable by its index") {
val testClass = "org.scaladebugger.test.info.Variables"
val testFile = JDITools.scalaClassStringToFileString(testClass)
@volatile var t: Option[ThreadInfo] = None
val s = DummyScalaVirtualMachine.newInstance()
// NOTE: Do not resume so we can check the variables at the stack frame
s.withProfile(JavaDebugProfile.Name)
.getOrCreateBreakpointRequest(testFile, 32, NoResume)
.foreach(e => t = Some(e.thread))
withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
logTimeTaken(eventually {
val thread = t.get
// Local variables are always indexed
val localVariable = thread.findVariableByName("a").get
.asInstanceOf[IndexedVariableInfo]
val indexedVariable = thread.findVariableByIndex(
localVariable.frameIndex,
localVariable.offsetIndex
).get
// Loaded variable should be the same
indexedVariable.name should be (localVariable.name)
})
}
}
}
}
|
ensime/scala-debugger
|
scala-debugger-api/src/it/scala/org/scaladebugger/api/profiles/java/info/JavaThreadInfoIntegrationSpec.scala
|
Scala
|
apache-2.0
| 4,276
|
package ru.maizy.ambient7.core.util
/**
* Copyright (c) Nikita Kovaliov, maizy.ru, 2016-2017
* See LICENSE.txt for details.
*/
import java.time.ZonedDateTime
import java.time.temporal.ChronoUnit
import scala.concurrent.duration.Duration
// TODO: generalize for any date order past->future, future->past
class DateTimeIterator private (from: ZonedDateTime, to: ZonedDateTime, step: Long, stepUnit: ChronoUnit)
extends Iterator[ZonedDateTime]
{
var current = from
override def hasNext: Boolean = current.compareTo(to) < 0
override def next(): ZonedDateTime = {
val res = current
current = current.plus(step, stepUnit)
res
}
}
object DateTimeIterator {
def apply(from: ZonedDateTime, to: ZonedDateTime, step: Long, stepUnit: ChronoUnit): DateTimeIterator = {
new DateTimeIterator(from, to, step, stepUnit)
}
def apply(from: ZonedDateTime, to: ZonedDateTime, stepDuration: Duration): DateTimeIterator = {
require(stepDuration.isFinite)
new DateTimeIterator(from, to, stepDuration.toMicros, ChronoUnit.MICROS)
}
}
|
maizy/ambient7
|
core/src/main/scala/ru/maizy/ambient7/core/util/DateTimeIterator.scala
|
Scala
|
apache-2.0
| 1,060
|
package controllers
import io.apibuilder.api.v0.models.{Publication, SubscriptionForm}
import java.util.UUID
import org.scalatestplus.play.guice.GuiceOneServerPerSuite
import org.scalatestplus.play.PlaySpec
class SubscriptionsSpec extends PlaySpec with MockClient with GuiceOneServerPerSuite {
import scala.concurrent.ExecutionContext.Implicits.global
private[this] lazy val org = createOrganization()
"POST /subscriptions" in {
val user = createUser()
val subscription = await(client.subscriptions.post(
SubscriptionForm(
organizationKey = org.key,
userGuid = user.guid,
publication = Publication.MembershipRequestsCreate
)
))
subscription.organization.key must equal(org.key)
subscription.user.guid must equal(user.guid)
subscription.publication must equal(Publication.MembershipRequestsCreate)
}
"POST /subscriptions handles user already subscribed" in {
val user = createUser()
val form = createSubscriptionForm(org, user)
val subscription = await(client.subscriptions.post(form))
expectErrors {
client.subscriptions.post(form)
}
}
"POST /subscriptions allows user to subscribe to a different organization" in {
val user = createUser()
val form = createSubscriptionForm(org, user)
val subscription1 = await(client.subscriptions.post(form))
subscription1.organization.key must equal(org.key)
subscription1.user.guid must equal(user.guid)
subscription1.publication must equal(Publication.MembershipRequestsCreate)
val org2 = createOrganization()
val subscription2 = await(client.subscriptions.post(form.copy(organizationKey = org2.key)))
subscription2.organization.key must equal(org2.key)
subscription2.user.guid must equal(user.guid)
subscription2.publication must equal(Publication.MembershipRequestsCreate)
}
"POST /subscriptions validates org key" in {
val user = createUser()
expectErrors {
client.subscriptions.post(
SubscriptionForm(
organizationKey = UUID.randomUUID.toString,
userGuid = user.guid,
publication = Publication.MembershipRequestsCreate
)
)
}.errors.map(_.message) must equal(Seq("Organization not found"))
}
"POST /subscriptions validates user guid" in {
expectErrors {
client.subscriptions.post(
SubscriptionForm(
organizationKey = org.key,
userGuid = UUID.randomUUID,
publication = Publication.MembershipRequestsCreate
)
)
}.errors.map(_.message) must equal(Seq("User not found"))
}
"POST /subscriptions validates publication" in {
val user = createUser()
expectErrors {
client.subscriptions.post(
SubscriptionForm(
organizationKey = org.key,
userGuid = user.guid,
publication = Publication(UUID.randomUUID.toString)
)
)
}.errors.map(_.message) must equal(Seq("Publication not found"))
}
"DELETE /subscriptions/:guid" in {
val subscription = await(client.subscriptions.post(createSubscriptionForm(org)))
expectStatus(204) {
client.subscriptions.deleteByGuid(subscription.guid)
}
expectNotFound {
client.subscriptions.deleteByGuid(subscription.guid)
}
// now recreate
val subscription2 = await(client.subscriptions.post(createSubscriptionForm(org)))
await(client.subscriptions.getByGuid(subscription2.guid)) must equal(subscription2)
}
"GET /subscriptions/:guid" in {
val subscription = await(client.subscriptions.post(createSubscriptionForm(org)))
await(client.subscriptions.getByGuid(subscription.guid)) must equal(subscription)
expectNotFound {
client.subscriptions.getByGuid(UUID.randomUUID)
}
}
"GET /subscriptions filters" in {
val user1 = createUser()
val user2 = createUser()
val org1 = createOrganization()
val org2 = createOrganization()
val subscription1 = await(client.subscriptions.post(
SubscriptionForm(
organizationKey = org1.key,
userGuid = user1.guid,
publication = Publication.MembershipRequestsCreate
)
))
val subscription2 = await(client.subscriptions.post(
SubscriptionForm(
organizationKey = org2.key,
userGuid = user2.guid,
publication = Publication.ApplicationsCreate
)
))
await(client.subscriptions.get(organizationKey = Some(UUID.randomUUID.toString))) must equal(Nil)
await(client.subscriptions.get(organizationKey = Some(org1.key))).map(_.guid) must equal(Seq(subscription1.guid))
await(client.subscriptions.get(organizationKey = Some(org2.key))).map(_.guid) must equal(Seq(subscription2.guid))
await(client.subscriptions.get(userGuid = Some(UUID.randomUUID))) must equal(Nil)
await(client.subscriptions.get(userGuid = Some(user1.guid))).map(_.guid) must equal(Seq(subscription1.guid))
await(client.subscriptions.get(userGuid = Some(user2.guid))).map(_.guid) must equal(Seq(subscription2.guid))
await(client.subscriptions.get(userGuid = Some(user1.guid), publication = Some(Publication.MembershipRequestsCreate))).map(_.guid) must equal(Seq(subscription1.guid))
await(client.subscriptions.get(userGuid = Some(user2.guid), publication = Some(Publication.ApplicationsCreate))).map(_.guid) must equal(Seq(subscription2.guid))
expectStatus(400) {
client.subscriptions.get(publication = Some(Publication(UUID.randomUUID.toString)))
}
}
"GET /subscriptions authorizes user" in {
val subscription = await(client.subscriptions.post(createSubscriptionForm(org)))
val randomUser = createUser()
await(client.subscriptions.get(guid = Some(subscription.guid))).map(_.guid) must equal(Seq(subscription.guid))
await(newClient(randomUser).subscriptions.get(guid = Some(subscription.guid))).map(_.guid) must equal(Nil)
}
}
|
gheine/apidoc
|
api/test/controllers/SubscriptionsSpec.scala
|
Scala
|
mit
| 5,904
|
package org.broadinstitute.clio.server.webservice
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import org.broadinstitute.clio.server.service.IndexService
import org.broadinstitute.clio.transfer.model.GvcfIndex
import org.broadinstitute.clio.transfer.model.gvcf.GvcfKey
import org.broadinstitute.clio.util.model.{DataType, Location}
class GvcfWebService(gvcfService: IndexService[GvcfIndex.type])
extends IndexWebService(gvcfService) {
private[webservice] val pathPrefixKey: Directive1[GvcfKey] = {
for {
location <- pathPrefix(Location.namesToValuesMap)
project <- pathPrefix(Segment)
dataType <- pathPrefix(DataType.namesToValuesMap)
sampleAlias <- pathPrefix(Segment)
version <- path(IntNumber)
} yield
GvcfKey(
location,
project,
dataType,
IndexWebService.decodeSlashes(sampleAlias),
version
)
}
}
|
broadinstitute/clio
|
clio-server/src/main/scala/org/broadinstitute/clio/server/webservice/GvcfWebService.scala
|
Scala
|
bsd-3-clause
| 938
|
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalatest.events.Event
import org.scalatest.prop.Tables
// SKIP-SCALATESTJS,NATIVE-START
import org.scalatest.refspec.RefSpec
// SKIP-SCALATESTJS,NATIVE-END
import org.scalatest.{ featurespec, flatspec, freespec, funspec, funsuite, propspec, wordspec }
import org.scalatest.featurespec.AnyFeatureSpec
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.freespec.AnyFreeSpec
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.propspec.AnyPropSpec
import org.scalatest.wordspec.AnyWordSpec
trait DeprecatedOrderExpectedResults extends EventHelpers {
def assertOrderTest(events: List[Event]): Unit
}
object DeprecatedParallelTestExecutionOrderExamples extends Tables {
// SKIP-SCALATESTJS,NATIVE-START
def orderSpec = new DeprecatedExampleParallelTestExecutionOrderSpec
// SKIP-SCALATESTJS,NATIVE-END
def orderFunSuite = new DeprecatedExampleParallelTestExecutionOrderFunSuite
def orderFixtureFunSuite = new DeprecatedExampleParallelTestExecutionOrderFixtureFunSuite
def orderFunSpec = new DeprecatedExampleParallelTestExecutionOrderFunSpec
def orderFixtureFunSpec = new DeprecatedExampleParallelTestExecutionOrderFixtureFunSpec
def orderFeatureSpec = new DeprecatedExampleParallelTestExecutionOrderFeatureSpec
def orderFixtureFeatureSpec = new DeprecatedExampleParallelTestExecutionOrderFixtureFeatureSpec
def orderFlatSpec = new DeprecatedExampleParallelTestExecutionOrderFlatSpec
def orderFixtureFlatSpec = new DeprecatedExampleParallelTestExecutionOrderFixtureFlatSpec
def orderFreeSpec = new DeprecatedExampleParallelTestExecutionOrderFreeSpec
def orderFixtureFreeSpec = new DeprecatedExampleParallelTestExecutionOrderFixtureFreeSpec
def orderPropSpec = new DeprecatedExampleParallelTestExecutionOrderPropSpec
def orderFixturePropSpec = new DeprecatedExampleParallelTestExecutionOrderFixturePropSpec
def orderWordSpec = new DeprecatedExampleParallelTestExecutionOrderWordSpec
def orderFixtureWordSpec = new DeprecatedExampleParallelTestExecutionOrderFixtureWordSpec
def orderExamples =
Table(
"suite1",
// SKIP-SCALATESTJS,NATIVE-START
orderSpec,
// SKIP-SCALATESTJS,NATIVE-END
orderFunSuite,
orderFixtureFunSuite,
orderFunSpec,
orderFixtureFunSpec,
orderFeatureSpec,
orderFixtureFeatureSpec,
orderFlatSpec,
orderFixtureFlatSpec,
orderFreeSpec,
orderFixtureFreeSpec,
orderPropSpec,
orderFixturePropSpec,
orderWordSpec,
orderFixtureWordSpec
)
}
// SKIP-SCALATESTJS,NATIVE-START
@DoNotDiscover
protected[scalatest] class DeprecatedExampleParallelTestExecutionOrderSpec extends RefSpec with DeprecatedOrderExpectedResults with ParallelTestExecution {
def `test 1`: Unit = {}
def `test 2`: Unit = {}
def `test 3`: Unit = {}
def assertOrderTest(events: List[Event]): Unit = {
assert(events.size === 6)
checkTestStarting(events(0), "test 1")
checkTestSucceeded(events(1), "test 1")
checkTestStarting(events(2), "test 2")
checkTestSucceeded(events(3), "test 2")
checkTestStarting(events(4), "test 3")
checkTestSucceeded(events(5), "test 3")
}
}
// SKIP-SCALATESTJS,NATIVE-END
@DoNotDiscover
protected[scalatest] class DeprecatedExampleParallelTestExecutionOrderFunSuite extends AnyFunSuite with DeprecatedOrderExpectedResults with ParallelTestExecution {
test("Test 1") {}
test("Test 2") {}
test("Test 3") {}
def assertOrderTest(events: List[Event]): Unit = {
assert(events.size === 6)
checkTestStarting(events(0), "Test 1")
checkTestSucceeded(events(1), "Test 1")
checkTestStarting(events(2), "Test 2")
checkTestSucceeded(events(3), "Test 2")
checkTestStarting(events(4), "Test 3")
checkTestSucceeded(events(5), "Test 3")
}
//SCALATESTJS,NATIVE-ONLY override def newInstance: Suite with ParallelTestExecution = new ExampleParallelTestExecutionOrderFunSuite
}
@DoNotDiscover
protected[scalatest] class DeprecatedExampleParallelTestExecutionOrderFixtureFunSuite extends funsuite.FixtureAnyFunSuite with DeprecatedOrderExpectedResults with ParallelTestExecution with StringFixture {
test("Fixture Test 1") { fixture => }
test("Fixture Test 2") { fixture => }
test("Fixture Test 3") { fixture => }
def assertOrderTest(events: List[Event]): Unit = {
assert(events.size === 6)
checkTestStarting(events(0), "Fixture Test 1")
checkTestSucceeded(events(1), "Fixture Test 1")
checkTestStarting(events(2), "Fixture Test 2")
checkTestSucceeded(events(3), "Fixture Test 2")
checkTestStarting(events(4), "Fixture Test 3")
checkTestSucceeded(events(5), "Fixture Test 3")
}
//SCALATESTJS,NATIVE-ONLY override def newInstance: Suite with ParallelTestExecution = new ExampleParallelTestExecutionOrderFixtureFunSuite
}
@DoNotDiscover
protected[scalatest] class DeprecatedExampleParallelTestExecutionOrderFunSpec extends AnyFunSpec with DeprecatedOrderExpectedResults with ParallelTestExecution {
describe("Scope 1") {
it("Test 1") {}
it("Test 2") {}
}
describe("Scope 2") {
it("Test 3") {}
it("Test 4") {}
}
def assertOrderTest(events: List[Event]): Unit = {
assert(events.size === 12)
checkScopeOpened(events(0), "Scope 1")
checkTestStarting(events(1), "Scope 1 Test 1")
checkTestSucceeded(events(2), "Scope 1 Test 1")
checkTestStarting(events(3), "Scope 1 Test 2")
checkTestSucceeded(events(4), "Scope 1 Test 2")
checkScopeClosed(events(5), "Scope 1")
checkScopeOpened(events(6), "Scope 2")
checkTestStarting(events(7), "Scope 2 Test 3")
checkTestSucceeded(events(8), "Scope 2 Test 3")
checkTestStarting(events(9), "Scope 2 Test 4")
checkTestSucceeded(events(10), "Scope 2 Test 4")
checkScopeClosed(events(11), "Scope 2")
}
//SCALATESTJS,NATIVE-ONLY override def newInstance: Suite with ParallelTestExecution = new ExampleParallelTestExecutionOrderFunSpec
}
@DoNotDiscover
protected[scalatest] class DeprecatedExampleParallelTestExecutionOrderFixtureFunSpec extends funspec.FixtureAnyFunSpec with DeprecatedOrderExpectedResults with ParallelTestExecution with StringFixture {
describe("Fixture Scope 1") {
it("Fixture Test 1") { fixture => }
it("Fixture Test 2") { fixture =>}
}
describe("Fixture Scope 2") {
it("Fixture Test 3") { fixture => }
it("Fixture Test 4") { fixture =>}
}
def assertOrderTest(events: List[Event]): Unit = {
assert(events.size === 12)
checkScopeOpened(events(0), "Fixture Scope 1")
checkTestStarting(events(1), "Fixture Scope 1 Fixture Test 1")
checkTestSucceeded(events(2), "Fixture Scope 1 Fixture Test 1")
checkTestStarting(events(3), "Fixture Scope 1 Fixture Test 2")
checkTestSucceeded(events(4), "Fixture Scope 1 Fixture Test 2")
checkScopeClosed(events(5), "Fixture Scope 1")
checkScopeOpened(events(6), "Fixture Scope 2")
checkTestStarting(events(7), "Fixture Scope 2 Fixture Test 3")
checkTestSucceeded(events(8), "Fixture Scope 2 Fixture Test 3")
checkTestStarting(events(9), "Fixture Scope 2 Fixture Test 4")
checkTestSucceeded(events(10), "Fixture Scope 2 Fixture Test 4")
checkScopeClosed(events(11), "Fixture Scope 2")
}
//SCALATESTJS,NATIVE-ONLY override def newInstance: Suite with ParallelTestExecution = new ExampleParallelTestExecutionOrderFixtureFunSpec
}
@DoNotDiscover
protected[scalatest] class DeprecatedExampleParallelTestExecutionOrderFeatureSpec extends AnyFeatureSpec with DeprecatedOrderExpectedResults with ParallelTestExecution {
Feature("Scope 1") {
Scenario("Test 1") {}
Scenario("Test 2") {}
}
Feature("Scope 2") {
Scenario("Test 3") {}
Scenario("Test 4") {}
}
def assertOrderTest(events: List[Event]): Unit = {
assert(events.size === 12)
checkScopeOpened(events(0), "Feature: Scope 1")
checkTestStarting(events(1), "Feature: Scope 1 Scenario: Test 1")
checkTestSucceeded(events(2), "Feature: Scope 1 Scenario: Test 1")
checkTestStarting(events(3), "Feature: Scope 1 Scenario: Test 2")
checkTestSucceeded(events(4), "Feature: Scope 1 Scenario: Test 2")
checkScopeClosed(events(5), "Feature: Scope 1")
checkScopeOpened(events(6), "Feature: Scope 2")
checkTestStarting(events(7), "Feature: Scope 2 Scenario: Test 3")
checkTestSucceeded(events(8), "Feature: Scope 2 Scenario: Test 3")
checkTestStarting(events(9), "Feature: Scope 2 Scenario: Test 4")
checkTestSucceeded(events(10), "Feature: Scope 2 Scenario: Test 4")
checkScopeClosed(events(11), "Feature: Scope 2")
}
//SCALATESTJS,NATIVE-ONLY override def newInstance: Suite with ParallelTestExecution = new ExampleParallelTestExecutionOrderFeatureSpec
}
@DoNotDiscover
protected[scalatest] class DeprecatedExampleParallelTestExecutionOrderFixtureFeatureSpec extends featurespec.FixtureAnyFeatureSpec with DeprecatedOrderExpectedResults with ParallelTestExecution with StringFixture {
Feature("Fixture Scope 1") {
Scenario("Fixture Test 1") { fixture => }
Scenario("Fixture Test 2") { fixture =>}
}
Feature("Fixture Scope 2") {
Scenario("Fixture Test 3") { fixture => }
Scenario("Fixture Test 4") { fixture =>}
}
def assertOrderTest(events: List[Event]): Unit = {
assert(events.size === 12)
checkScopeOpened(events(0), "Feature: Fixture Scope 1")
checkTestStarting(events(1), "Feature: Fixture Scope 1 Scenario: Fixture Test 1")
checkTestSucceeded(events(2), "Feature: Fixture Scope 1 Scenario: Fixture Test 1")
checkTestStarting(events(3), "Feature: Fixture Scope 1 Scenario: Fixture Test 2")
checkTestSucceeded(events(4), "Feature: Fixture Scope 1 Scenario: Fixture Test 2")
checkScopeClosed(events(5), "Feature: Fixture Scope 1")
checkScopeOpened(events(6), "Feature: Fixture Scope 2")
checkTestStarting(events(7), "Feature: Fixture Scope 2 Scenario: Fixture Test 3")
checkTestSucceeded(events(8), "Feature: Fixture Scope 2 Scenario: Fixture Test 3")
checkTestStarting(events(9), "Feature: Fixture Scope 2 Scenario: Fixture Test 4")
checkTestSucceeded(events(10), "Feature: Fixture Scope 2 Scenario: Fixture Test 4")
checkScopeClosed(events(11), "Feature: Fixture Scope 2")
}
//SCALATESTJS,NATIVE-ONLY override def newInstance: Suite with ParallelTestExecution = new ExampleParallelTestExecutionOrderFixtureFeatureSpec
}
@DoNotDiscover
protected[scalatest] class DeprecatedExampleParallelTestExecutionOrderFlatSpec extends AnyFlatSpec with DeprecatedOrderExpectedResults with ParallelTestExecution {
behavior of "Scope 1"
it should "Test 1" in {}
it should "Test 2" in {}
behavior of "Scope 2"
it should "Test 3" in {}
it should "Test 4" in {}
def assertOrderTest(events: List[Event]): Unit = {
assert(events.size === 12)
checkScopeOpened(events(0), "Scope 1")
checkTestStarting(events(1), "Scope 1 should Test 1")
checkTestSucceeded(events(2), "Scope 1 should Test 1")
checkTestStarting(events(3), "Scope 1 should Test 2")
checkTestSucceeded(events(4), "Scope 1 should Test 2")
checkScopeClosed(events(5), "Scope 1")
checkScopeOpened(events(6), "Scope 2")
checkTestStarting(events(7), "Scope 2 should Test 3")
checkTestSucceeded(events(8), "Scope 2 should Test 3")
checkTestStarting(events(9), "Scope 2 should Test 4")
checkTestSucceeded(events(10), "Scope 2 should Test 4")
checkScopeClosed(events(11), "Scope 2")
}
//SCALATESTJS,NATIVE-ONLY override def newInstance: Suite with ParallelTestExecution = new ExampleParallelTestExecutionOrderFlatSpec
}
@DoNotDiscover
protected[scalatest] class DeprecatedExampleParallelTestExecutionOrderFixtureFlatSpec extends flatspec.FixtureAnyFlatSpec with DeprecatedOrderExpectedResults with ParallelTestExecution with StringFixture {
behavior of "Fixture Scope 1"
it should "Fixture Test 1" in { fixture => }
it should "Fixture Test 2" in { fixture => }
behavior of "Fixture Scope 2"
it should "Fixture Test 3" in { fixture => }
it should "Fixture Test 4" in { fixture => }
def assertOrderTest(events: List[Event]): Unit = {
assert(events.size === 12)
checkScopeOpened(events(0), "Fixture Scope 1")
checkTestStarting(events(1), "Fixture Scope 1 should Fixture Test 1")
checkTestSucceeded(events(2), "Fixture Scope 1 should Fixture Test 1")
checkTestStarting(events(3), "Fixture Scope 1 should Fixture Test 2")
checkTestSucceeded(events(4), "Fixture Scope 1 should Fixture Test 2")
checkScopeClosed(events(5), "Fixture Scope 1")
checkScopeOpened(events(6), "Fixture Scope 2")
checkTestStarting(events(7), "Fixture Scope 2 should Fixture Test 3")
checkTestSucceeded(events(8), "Fixture Scope 2 should Fixture Test 3")
checkTestStarting(events(9), "Fixture Scope 2 should Fixture Test 4")
checkTestSucceeded(events(10), "Fixture Scope 2 should Fixture Test 4")
checkScopeClosed(events(11), "Fixture Scope 2")
}
//SCALATESTJS,NATIVE-ONLY override def newInstance: Suite with ParallelTestExecution = new ExampleParallelTestExecutionOrderFixtureFlatSpec
}
@DoNotDiscover
protected[scalatest] class DeprecatedExampleParallelTestExecutionOrderFreeSpec extends AnyFreeSpec with DeprecatedOrderExpectedResults with ParallelTestExecution {
"Scope 1" - {
"Test 1" in {}
"Test 2" in {}
}
"Scope 2" - {
"Test 3" in {}
"Test 4" in {}
}
def assertOrderTest(events: List[Event]): Unit = {
assert(events.size === 12)
checkScopeOpened(events(0), "Scope 1")
checkTestStarting(events(1), "Scope 1 Test 1")
checkTestSucceeded(events(2), "Scope 1 Test 1")
checkTestStarting(events(3), "Scope 1 Test 2")
checkTestSucceeded(events(4), "Scope 1 Test 2")
checkScopeClosed(events(5), "Scope 1")
checkScopeOpened(events(6), "Scope 2")
checkTestStarting(events(7), "Scope 2 Test 3")
checkTestSucceeded(events(8), "Scope 2 Test 3")
checkTestStarting(events(9), "Scope 2 Test 4")
checkTestSucceeded(events(10), "Scope 2 Test 4")
checkScopeClosed(events(11), "Scope 2")
}
//SCALATESTJS,NATIVE-ONLY override def newInstance: Suite with ParallelTestExecution = new ExampleParallelTestExecutionOrderFreeSpec
}
@DoNotDiscover
protected[scalatest] class DeprecatedExampleParallelTestExecutionOrderFixtureFreeSpec extends freespec.FixtureAnyFreeSpec with DeprecatedOrderExpectedResults with ParallelTestExecution with StringFixture {
"Fixture Scope 1" - {
"Fixture Test 1" in { fixture => }
"Fixture Test 2" in { fixture => }
}
"Fixture Scope 2" - {
"Fixture Test 3" in { fixture => }
"Fixture Test 4" in { fixture => }
}
def assertOrderTest(events: List[Event]): Unit = {
assert(events.size === 12)
checkScopeOpened(events(0), "Fixture Scope 1")
checkTestStarting(events(1), "Fixture Scope 1 Fixture Test 1")
checkTestSucceeded(events(2), "Fixture Scope 1 Fixture Test 1")
checkTestStarting(events(3), "Fixture Scope 1 Fixture Test 2")
checkTestSucceeded(events(4), "Fixture Scope 1 Fixture Test 2")
checkScopeClosed(events(5), "Fixture Scope 1")
checkScopeOpened(events(6), "Fixture Scope 2")
checkTestStarting(events(7), "Fixture Scope 2 Fixture Test 3")
checkTestSucceeded(events(8), "Fixture Scope 2 Fixture Test 3")
checkTestStarting(events(9), "Fixture Scope 2 Fixture Test 4")
checkTestSucceeded(events(10), "Fixture Scope 2 Fixture Test 4")
checkScopeClosed(events(11), "Fixture Scope 2")
}
//SCALATESTJS,NATIVE-ONLY override def newInstance: Suite with ParallelTestExecution = new ExampleParallelTestExecutionOrderFixtureFreeSpec
}
@DoNotDiscover
protected[scalatest] class DeprecatedExampleParallelTestExecutionOrderPropSpec extends AnyPropSpec with DeprecatedOrderExpectedResults with ParallelTestExecution {
property("Test 1") {}
property("Test 2") {}
property("Test 3") {}
def assertOrderTest(events: List[Event]): Unit = {
assert(events.size === 6)
checkTestStarting(events(0), "Test 1")
checkTestSucceeded(events(1), "Test 1")
checkTestStarting(events(2), "Test 2")
checkTestSucceeded(events(3), "Test 2")
checkTestStarting(events(4), "Test 3")
checkTestSucceeded(events(5), "Test 3")
}
//SCALATESTJS,NATIVE-ONLY override def newInstance: Suite with ParallelTestExecution = new ExampleParallelTestExecutionOrderPropSpec
}
@DoNotDiscover
protected[scalatest] class DeprecatedExampleParallelTestExecutionOrderFixturePropSpec extends propspec.FixtureAnyPropSpec with DeprecatedOrderExpectedResults with ParallelTestExecution with StringFixture {
property("Fixture Test 1") { fixture => }
property("Fixture Test 2") { fixture => }
property("Fixture Test 3") { fixture => }
def assertOrderTest(events: List[Event]): Unit = {
assert(events.size === 6)
checkTestStarting(events(0), "Fixture Test 1")
checkTestSucceeded(events(1), "Fixture Test 1")
checkTestStarting(events(2), "Fixture Test 2")
checkTestSucceeded(events(3), "Fixture Test 2")
checkTestStarting(events(4), "Fixture Test 3")
checkTestSucceeded(events(5), "Fixture Test 3")
}
//SCALATESTJS,NATIVE-ONLY override def newInstance: Suite with ParallelTestExecution = new ExampleParallelTestExecutionOrderFixturePropSpec
}
@DoNotDiscover
protected[scalatest] class DeprecatedExampleParallelTestExecutionOrderWordSpec extends AnyWordSpec with DeprecatedOrderExpectedResults with ParallelTestExecution {
"Scope 1" should {
"Test 1" in {}
"Test 2" in {}
}
"Scope 2" should {
"Test 3" in {}
"Test 4" in {}
}
def assertOrderTest(events: List[Event]): Unit = {
assert(events.size === 12)
checkScopeOpened(events(0), "Scope 1")
checkTestStarting(events(1), "Scope 1 should Test 1")
checkTestSucceeded(events(2), "Scope 1 should Test 1")
checkTestStarting(events(3), "Scope 1 should Test 2")
checkTestSucceeded(events(4), "Scope 1 should Test 2")
checkScopeClosed(events(5), "Scope 1")
checkScopeOpened(events(6), "Scope 2")
checkTestStarting(events(7), "Scope 2 should Test 3")
checkTestSucceeded(events(8), "Scope 2 should Test 3")
checkTestStarting(events(9), "Scope 2 should Test 4")
checkTestSucceeded(events(10), "Scope 2 should Test 4")
checkScopeClosed(events(11), "Scope 2")
}
//SCALATESTJS,NATIVE-ONLY override def newInstance: Suite with ParallelTestExecution = new ExampleParallelTestExecutionOrderWordSpec
}
@DoNotDiscover
protected[scalatest] class DeprecatedExampleParallelTestExecutionOrderFixtureWordSpec extends wordspec.FixtureAnyWordSpec with DeprecatedOrderExpectedResults with ParallelTestExecution with StringFixture {
"Fixture Scope 1" should {
"Fixture Test 1" in { fixture => }
"Fixture Test 2" in { fixture => }
}
"Fixture Scope 2" should {
"Fixture Test 3" in { fixture => }
"Fixture Test 4" in { fixture => }
}
def assertOrderTest(events: List[Event]): Unit = {
assert(events.size === 12)
checkScopeOpened(events(0), "Fixture Scope 1")
checkTestStarting(events(1), "Fixture Scope 1 should Fixture Test 1")
checkTestSucceeded(events(2), "Fixture Scope 1 should Fixture Test 1")
checkTestStarting(events(3), "Fixture Scope 1 should Fixture Test 2")
checkTestSucceeded(events(4), "Fixture Scope 1 should Fixture Test 2")
checkScopeClosed(events(5), "Fixture Scope 1")
checkScopeOpened(events(6), "Fixture Scope 2")
checkTestStarting(events(7), "Fixture Scope 2 should Fixture Test 3")
checkTestSucceeded(events(8), "Fixture Scope 2 should Fixture Test 3")
checkTestStarting(events(9), "Fixture Scope 2 should Fixture Test 4")
checkTestSucceeded(events(10), "Fixture Scope 2 should Fixture Test 4")
checkScopeClosed(events(11), "Fixture Scope 2")
}
//SCALATESTJS,NATIVE-ONLY override def newInstance: Suite with ParallelTestExecution = new ExampleParallelTestExecutionOrderFixtureWordSpec
}
|
scalatest/scalatest
|
jvm/scalatest-test/src/test/scala/org/scalatest/DeprecatedParallelTestExecutionOrderExamples.scala
|
Scala
|
apache-2.0
| 20,592
|
package controller
import skinny._
import skinny.filter.TxPerRequestFilter
class SampleTxApiController extends SkinnyApiController with TxPerRequestFilter {
def index = throw new RuntimeException("test")
}
|
Kuchitama/skinny-framework
|
example/src/main/scala/controller/SampleTxApiController.scala
|
Scala
|
mit
| 211
|
package org.jetbrains.plugins.scala.lang.completion.keyword
package generated
class KeywordCompletionToplevelTest extends KeywordCompletionTestBase {
//This class was generated by build script, please don't change this
override def folderPath: String = super.folderPath + "toplevel/"
def testcaseClass() = doTest()
def testcaseClause() = doTest()
def testclass() = doTest()
def testimport() = doTest()
def testpackage1() = doTest()
}
|
katejim/intellij-scala
|
test/org/jetbrains/plugins/scala/lang/completion/keyword/generated/KeywordCompletionToplevelTest.scala
|
Scala
|
apache-2.0
| 453
|
package coursier.cli.install
import java.io.File
import java.nio.charset.StandardCharsets
import java.nio.file.Files
import cats.data.{NonEmptyList, Validated, ValidatedNel}
import cats.implicits._
import coursier.cli.jvm.SharedJavaParams
import coursier.cli.params.{CacheParams, EnvParams, OutputParams, RepositoryParams}
import coursier.install.Channel
final case class InstallParams(
cache: CacheParams,
output: OutputParams,
shared: SharedInstallParams,
sharedChannel: SharedChannelParams,
sharedJava: SharedJavaParams,
repository: RepositoryParams,
env: EnvParams,
addChannels: Seq[Channel],
installChannels: Seq[String],
force: Boolean
) {
lazy val channels: Seq[Channel] =
(sharedChannel.channels ++ addChannels).distinct
}
object InstallParams {
def apply(options: InstallOptions, anyArg: Boolean): ValidatedNel[String, InstallParams] = {
val cacheParamsV = options.cacheOptions.params(None)
val outputV = OutputParams(options.outputOptions)
val shared = SharedInstallParams(options.sharedInstallOptions)
val sharedChannelV = SharedChannelParams(options.sharedChannelOptions)
val sharedJavaV = SharedJavaParams(options.sharedJavaOptions)
val envV = EnvParams(options.envOptions)
val repoV = RepositoryParams(options.repositoryOptions)
val addChannelsV = options.addChannel.traverse { s =>
val e = Channel.parse(s)
.left.map(NonEmptyList.one)
.map(c => (s, c))
Validated.fromEither(e)
}
val force = options.force
val checkNeedsChannelsV = {
val missingChannels = anyArg &&
sharedChannelV.toOption.exists(_.channels.isEmpty) &&
addChannelsV.toOption.exists(_.isEmpty)
if (missingChannels)
Validated.invalidNel(s"Error: no channels specified")
else
Validated.validNel(())
}
val flags = Seq(
options.addChannel.nonEmpty,
envV.toOption.fold(false)(_.anyFlag)
)
val flagsV =
if (flags.count(identity) > 1)
Validated.invalidNel("Error: can only specify one of --add-channel, --env, --setup.")
else
Validated.validNel(())
val checkArgsV =
if (anyArg && flags.exists(identity))
Validated.invalidNel(
s"Error: unexpected arguments passed along --add-channel, --env, or --setup."
)
else
Validated.validNel(())
(
cacheParamsV,
outputV,
sharedChannelV,
sharedJavaV,
envV,
repoV,
addChannelsV,
checkNeedsChannelsV,
flagsV,
checkArgsV
).mapN {
(cacheParams, output, sharedChannel, sharedJava, env, repo, addChannels, _, _, _) =>
InstallParams(
cacheParams,
output,
shared,
sharedChannel,
sharedJava,
repo,
env,
addChannels.map(_._2),
addChannels.map(_._1),
force
)
}
}
}
|
alexarchambault/coursier
|
modules/cli/src/main/scala/coursier/cli/install/InstallParams.scala
|
Scala
|
apache-2.0
| 2,940
|
/* Copyright 2014-2015 - Big Data Technologies S.R.L. All Rights Reserved. */
package supler
import app.utils.IDGenerator
import japgolly.scalajs.react.ReactElement
import scala.collection.mutable.ListBuffer
import scala.scalajs.js
import app.utils.ExtJSON._
import japgolly.scalajs.react.vdom.prefix_<^._
case class Form(id: String, fields: List[Field] = Nil, fieldOrder:List[List[String]]=Nil,
renderHint: Option[RenderHint] = None, meta:js.Dictionary[Any]=js.Dictionary[Any](),
isSuplerForm:Boolean=false) extends GridSystem{
def render(implicit idGenerator: IDGenerator): ReactElement = {
<.form(id.nonEmpty ?= (^.id := id), //^.cls := "form-horizontal",
<.div(^.cls := "div-container-fluid",
fieldOrder.map(fo =>
<.div(^.cls := "row",
fo.map(field =>
<.div(^.cls := getClassGridSystem(12, fo.length),
fields.find(_.name == field).map(_.render(renderHint))
)
)
)
// fields.map(_.render(renderHint))
)
)
)
}
def extractValue: js.Dictionary[js.Any] = {
var values = fields.flatMap(_.extractValue)
values ::= ("id" -> id)
js.Dictionary(values: _*)
}
}
object Form {
def fromJson(json: js.Dictionary[Any], onChange: (Field => Unit), globalOptions: GlobalOptions=GlobalOptions()): Form = {
var gl=globalOptions.updateErrors(json.asListDict("errors"))
val mainFormJSON=json.asObj("main_form")
val fieldsJson = mainFormJSON.as[js.Array[js.Dictionary[Any]]]("fields", js.Array[js.Dictionary[Any]]())
val fields = fieldsJson.map(f=> Field.fromJson(f, onChange = onChange, globalOptions=gl))
val fieldOrderJson = mainFormJSON.as[js.Array[js.Array[String]]]("fieldOrder", js.Array[js.Array[String]]())
val fieldOrder = fieldOrderJson.map(f=> f.toList).toList
new Form(
id = mainFormJSON.as[String]("id", ""),
fields = fields.toList,
fieldOrder=fieldOrder,
isSuplerForm=json.as[Boolean]("is_supler_form", false),
meta=json.asObj("supler_meta")
)
}
}
trait Field {
def name: String
def `type`: String
def label: String
def path: String
def enabled: Boolean
def onChange: (Field => Unit)
def render(parentRenderHint: Option[RenderHint])(implicit idGenerator: IDGenerator): TagMod
def extractValue: Option[(String, js.Any)]
def globalOptions:GlobalOptions
//returns if the field should render the label
def renderInputBody(id:String, parentRenderHint: Option[RenderHint], body:TagMod, skipLabel:Boolean=false):List[TagMod]={
val items=new ListBuffer[TagMod]()
if(!skipLabel && shouldRenderLabel(parentRenderHint))
items += renderLabel(id, parentRenderHint)
items += body
if(globalOptions.hasError(this.path))
items += getErrorsDiv
items.toList
}
def renderLabel(id:String, renderHint: Option[RenderHint]):ReactElement={
<.label(^.`for` := id, label)
}
//returns if the field should render the label
def shouldRenderLabel(renderHint: Option[RenderHint]):Boolean={
renderHint match {
case Some(rh) =>
rh.name match {
case "table"=>false
case _ => true
}
case None =>
true
}
}
def getErrorClass=if(globalOptions.hasError(this.path)) " has-error" else ""
def getErrorsDiv:ReactElement = {
val errors=globalOptions.errors.filter(_.fieldPath==this.path)
<.div(^.cls := "text-danger", errors.map(_.errorKey))
}
// <div class="text-danger" id="id4">Value is required</div>
}
object Field {
def fromJson(json: js.Dictionary[Any], onChange: (Field => Unit), globalOptions: GlobalOptions): Field = {
// println(name)
val kind = json.apply("type").asInstanceOf[String]
import supler.fields._
kind match {
case StringField.NAME => StringField.fromJson(json, onChange, globalOptions=globalOptions)
case BooleanField.NAME => BooleanField.fromJson(json, onChange, globalOptions=globalOptions)
case FloatField.NAME => FloatField.fromJson(json, onChange, globalOptions=globalOptions)
case IntegerField.NAME => IntegerField.fromJson(json, onChange, globalOptions=globalOptions)
case SelectField.NAME => SelectField.fromJson(json, onChange, globalOptions=globalOptions)
case MultiEditField.NAME => MultiEditField.fromJson(json, onChange, globalOptions=globalOptions)
case SubFormField.NAME => SubFormField.fromJson(json, onChange, globalOptions=globalOptions)
case ActionField.NAME => ActionField.fromJson(json, onChange, globalOptions=globalOptions)
case StaticField.NAME => StaticField.fromJson(json, onChange, globalOptions=globalOptions)
}
}
}
trait GridSystem{
def getClassGridSystem(numberColumns: Int, numberFieldsRow: Int): String = {
val baseClass = "col-md-"
numberFieldsRow match {
case 0 => baseClass + numberColumns
case _ => {
val value = Math floor numberColumns / numberFieldsRow
value match {
case 0 => baseClass + 1
case _ => baseClass + value
}
}
}
}
}
|
aparo/scalajs-supler
|
supler/js/src/main/supler/Form.scala
|
Scala
|
apache-2.0
| 5,196
|
package scorex.app.api.http
import akka.util.Timeout
import play.api.libs.json.{JsObject, JsValue}
import scorex.account.Account
import scorex.app.Controller
import scorex.block.Block
import scorex.crypto.{Base58, SigningFunctionsImpl}
import scala.concurrent.duration._
trait CommonApiFunctions {
implicit val timeout = Timeout(5.seconds)
protected[api] def walletExists(): Option[JsObject] =
if (Controller.wallet.exists()) {
Some(ApiError.json(ApiError.WalletAlreadyExists))
} else None
protected[api] def withBlock(encodedSignature: String)(action: Block => JsValue): JsValue =
Base58.decode(encodedSignature).toOption.map { signature =>
Controller.blockchainStorage.blockByHeader(signature) match {
case Some(block) => action(block)
case None => ApiError.json(ApiError.BlockNotExists)
}
}.getOrElse(ApiError.json(ApiError.InvalidSignature))
protected[api] def withAccount(address: String)(action: Account => JsValue): JsValue =
walletNotExists().getOrElse {
if (!Account.isValidAddress(address)) {
ApiError.json(ApiError.InvalidAddress)
} else {
Controller.wallet.privateKeyAccount(address) match {
case None => ApiError.json(ApiError.WalletAddressNotExists)
case Some(account) => action(account)
}
}
}
protected[api] def walletNotExists(): Option[JsObject] =
if (!Controller.wallet.exists()) {
Some(ApiError.json(ApiError.WalletNotExist))
} else None
}
|
pozharko/Scorex-Lagonaki
|
src/main/scala/scorex/app/api/http/CommonApiFunctions.scala
|
Scala
|
cc0-1.0
| 1,510
|
package org.crockeo.genericplatformer.game.world
import org.crockeo.genericplatformer.assets.TextureManager
import org.crockeo.genericplatformer.game.World
import org.crockeo.genericplatformer.geom._
import org.crockeo.genericplatformer.Graphics
class Player(sp: Vector) extends WorldObject(sp, new Vector(32, 64)) {
private val maxspeed : Vector = new Vector(300f , 1200f)
private val minspeed : Vector = new Vector(50f , 50f )
private val accelrate : Vector = new Vector(0.25f, 1.75f)
private val airaccelrate: Vector = new Vector(1f , 1.75f)
private val decelrate : Vector = new Vector(0.2f , 1f )
private val airdecelrate: Vector = new Vector(2.f , 1f )
private val jumpspeed : Float = 400
private var speed: Vector = new Vector(0, 0)
private var onground: Boolean = true
private var justjumped: Boolean = false
private var jumpsleft: Int = 2
// Renderable
def render = {
Graphics.render("player", TextureManager.rLoad("res/player.png"), pos.x, pos.y, size.x, size.y)
}
// Updateable
def update(w: World, rd: Map[String, Boolean], dt: Float) {
// Handling the input
def handleInput {
val as =
if (onground) accelrate
else airaccelrate
val ds =
if (onground) decelrate
else airdecelrate
// Moving leftward
if (rd("left") )
if (speed.x > minspeed.x) speed.x = Lerps.lerp(speed.x, -maxspeed.x, Math.abs(as.x - ds.x), dt)
else speed.x = Lerps.lerp(speed.x, -maxspeed.x, as.x , dt)
// Moving rightward
if (rd("right"))
if (speed.x < -minspeed.x) speed.x = Lerps.lerp(speed.x, maxspeed.x, math.abs(as.x - ds.x), dt)
else speed.x = Lerps.lerp(speed.x, maxspeed.x, as.x , dt)
// Decelerating
if (!rd("left") && !rd("right")) {
if (speed.x >= -minspeed.x &&
speed.x <= minspeed.x &&
onground ) speed.x = 0
else speed.x = Lerps.lerp(speed.x, 0, ds.x, dt)
}
// Jumping
if (rd("jump") && jumpsleft > 0) {
if (!justjumped) {
if (rd("left") && speed.x > minspeed.x) speed.x = -maxspeed.x / 3
if (rd("right") && speed.x < minspeed.x) speed.x = maxspeed.x / 3
speed.y = -jumpspeed
onground = false
jumpsleft -= 1
justjumped = true
}
} else justjumped = false
// Respawning
if (rd("reset")) w.respawn
}
// Applying gravity
def gravity {
if (onground) jumpsleft = 2
else {
jumpsleft =
if (jumpsleft >= 1) 1
else 0
speed.y = Lerps.lerp(speed.y, maxspeed.y, accelrate.y, dt)
}
}
// Making sure the player doesn't exceed its max speed
def fixSpeeds {
if (speed.x > maxspeed.x) speed.x = maxspeed.x
if (speed.x < -maxspeed.x) speed.x = -maxspeed.x
if (speed.y > maxspeed.y) speed.y = maxspeed.y
if (speed.y < -maxspeed.y) speed.y = -maxspeed.y
if (onground) speed.y = 0
}
// Moving
def move { translate(speed * dt) }
// Handling collision logic
def bound {
var anycollision = false
for (b <- w.blocks) {
collision(b) match {
case None => Unit
case Some(ct) => {
anycollision = true
ct match {
case TopCollision => {
pos.y = b.bottom
if (speed.y < 0) speed.y = (-speed.y / 12)
}
case BottomCollision => {
if (speed.y >= 0) {
pos.y = b.top - size.y
onground = true
jumpsleft = 2
}
}
case LeftCollision => {
pos.x = b.right
if (speed.x < 0) speed.x = (-speed.x / 6)
}
case RightCollision => {
pos.x = b.left - size.x
if (speed.x > 0) speed.x = (-speed.x / 6)
}
}
}
}
}
for (cp <- w.checkpoints) {
if (collides(cp)) {
w.activeCheckpoint.activated = false
cp.activated = true
w.activeCheckpoint = cp
}
}
if (!anycollision) onground = false
}
handleInput
gravity
fixSpeeds
move
bound
}
}
|
crockeo/generic-platformer
|
src/org/crockeo/genericplatformer/game/world/Player.scala
|
Scala
|
gpl-3.0
| 4,682
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.tez.io
import org.apache.spark.shuffle.ShuffleReader
import org.apache.tez.runtime.api.LogicalInput
import org.apache.tez.runtime.api.Reader
import org.apache.tez.runtime.library.api.KeyValuesReader
import org.apache.tez.runtime.library.api.KeyValueReader
import org.apache.spark.shuffle.BaseShuffleHandle
import org.apache.spark.TaskContext
import org.apache.hadoop.io.Writable
import java.util.Map
import org.apache.hadoop.io.Text
import org.apache.hadoop.io.IntWritable
import org.apache.hadoop.io.LongWritable
import org.apache.hadoop.conf.Configuration
import org.apache.tez.dag.api.TezConfiguration
import org.apache.spark.InterruptibleIterator
import org.apache.spark.util.NextIterator
import java.io.IOException
import scala.collection.JavaConverters._
/**
* Implementation of Spark's ShuffleReader which delegates it's read functionality to Tez
* This implementation is tailored for before-shuffle reads (e.g., Reading initial source)
*/
//TODO: combine common functionality in TezShuffleReader into an abstract class
class TezSourceReader[K, C](input: Map[Integer, LogicalInput])
extends ShuffleReader[K, C] {
private val inputIndex = input.keySet().iterator().next()
private val reader = input.remove(this.inputIndex).getReader().asInstanceOf[KeyValueReader]
/**
*
*/
override def read(): Iterator[Product2[K, C]] = {
new SourceIterator(this.reader).asInstanceOf[Iterator[Product2[K, C]]].map(pair => (pair._1, pair._2))
}
/**
*
*/
def stop = ()
}
/**
*
*/
private class SourceIterator[K, C](reader: KeyValueReader) extends Iterator[Product2[Any, Any]] {
var hasNextNeverCalled = true
var containsNext = false;
var shoudlCheckHasNext = false;
private var currentValues: Iterator[Object] = _
/**
*
*/
override def hasNext(): Boolean = {
if (this.hasNextNeverCalled || shoudlCheckHasNext) {
this.hasNextNeverCalled = false
this.containsNext = this.doHasNext
}
this.containsNext
}
/**
*
*/
override def next(): Product2[Any, Any] = {
if (hasNextNeverCalled) {
this.hasNext
}
if (this.containsNext) {
val result = (reader.getCurrentKey, reader.getCurrentValue)
this.shoudlCheckHasNext = true
result
} else {
throw new IllegalStateException("Reached the end of the iterator. " +
"Calling hasNext() prior to next() would avoid this exception")
}
}
/**
*
*/
private def doHasNext(): Boolean = {
this.shoudlCheckHasNext = false
this.reader.next
}
}
|
sequenceiq/spark-native-yarn
|
src/main/scala/org/apache/spark/tez/io/TezSourceReader.scala
|
Scala
|
apache-2.0
| 3,358
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.s2graph.core.benchmark
import org.apache.s2graph.core.OrderingUtil._
import org.apache.s2graph.core.{OrderingUtil, SeqMultiOrdering}
import play.api.libs.json.{JsNumber, JsValue}
import scala.util.Random
class OrderingUtilBenchmarkSpec extends BenchmarkCommon {
"OrderingUtilBenchmarkSpec" should {
"performance MultiOrdering any" >> {
val tupLs = (0 until 10) map { i =>
Random.nextDouble() -> Random.nextLong()
}
val seqLs = tupLs.map { tup =>
Seq(tup._1, tup._2)
}
val sorted1 = duration("TupleOrdering double,long") {
(0 until 1000) foreach { _ =>
tupLs.sortBy { case (x, y) =>
-x -> -y
}
}
tupLs.sortBy { case (x, y) =>
-x -> -y
}
}.map { x => x._1 }
val sorted2 = duration("MultiOrdering double,long") {
(0 until 1000) foreach { _ =>
seqLs.sorted(new SeqMultiOrdering[Any](Seq(false, false)))
}
seqLs.sorted(new SeqMultiOrdering[Any](Seq(false, false)))
}.map { x => x.head }
sorted1.toString() must_== sorted2.toString()
}
"performance MultiOrdering double" >> {
val tupLs = (0 until 50) map { i =>
Random.nextDouble() -> Random.nextDouble()
}
val seqLs = tupLs.map { tup =>
Seq(tup._1, tup._2)
}
duration("MultiOrdering double") {
(0 until 1000) foreach { _ =>
seqLs.sorted(new SeqMultiOrdering[Double](Seq(false, false)))
}
}
duration("TupleOrdering double") {
(0 until 1000) foreach { _ =>
tupLs.sortBy { case (x, y) =>
-x -> -y
}
}
}
1 must_== 1
}
"performance MultiOrdering jsvalue" >> {
val tupLs = (0 until 50) map { i =>
Random.nextDouble() -> Random.nextLong()
}
val seqLs = tupLs.map { tup =>
Seq(JsNumber(tup._1), JsNumber(tup._2))
}
val sorted1 = duration("TupleOrdering double,long") {
(0 until 1000) foreach { _ =>
tupLs.sortBy { case (x, y) =>
-x -> -y
}
}
tupLs.sortBy { case (x, y) =>
-x -> -y
}
}
val sorted2 = duration("MultiOrdering jsvalue") {
(0 until 1000) foreach { _ =>
seqLs.sorted(new SeqMultiOrdering[JsValue](Seq(false, false)))
}
seqLs.sorted(new SeqMultiOrdering[JsValue](Seq(false, false)))
}
1 must_== 1
}
}
}
|
daewon/incubator-s2graph
|
s2core/src/test/scala/org/apache/s2graph/core/benchmark/OrderingUtilBenchmarkSpec.scala
|
Scala
|
apache-2.0
| 3,326
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.nodes.physical.stream
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.plan.PartialFinalType
import org.apache.flink.table.planner.plan.nodes.exec.stream.StreamExecLocalGroupAggregate
import org.apache.flink.table.planner.plan.nodes.exec.{InputProperty, ExecNode}
import org.apache.flink.table.planner.plan.utils._
import org.apache.calcite.plan.{RelOptCluster, RelTraitSet}
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.AggregateCall
import org.apache.calcite.rel.{RelNode, RelWriter}
import java.util
/**
* Stream physical RelNode for unbounded local group aggregate.
*
* @see [[StreamPhysicalGroupAggregateBase]] for more info.
*/
class StreamPhysicalLocalGroupAggregate(
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputRel: RelNode,
val grouping: Array[Int],
val aggCalls: Seq[AggregateCall],
aggCallNeedRetractions: Array[Boolean],
needRetraction: Boolean,
val partialFinalType: PartialFinalType)
extends StreamPhysicalGroupAggregateBase(cluster, traitSet, inputRel) {
private lazy val aggInfoList = AggregateUtil.transformToStreamAggregateInfoList(
FlinkTypeFactory.toLogicalRowType(inputRel.getRowType),
aggCalls,
aggCallNeedRetractions,
needRetraction,
isStateBackendDataViews = false)
override def requireWatermark: Boolean = false
override def deriveRowType(): RelDataType = {
AggregateUtil.inferLocalAggRowType(
aggInfoList,
inputRel.getRowType,
grouping,
getCluster.getTypeFactory.asInstanceOf[FlinkTypeFactory])
}
override def copy(traitSet: RelTraitSet, inputs: util.List[RelNode]): RelNode = {
new StreamPhysicalLocalGroupAggregate(
cluster,
traitSet,
inputs.get(0),
grouping,
aggCalls,
aggCallNeedRetractions,
needRetraction,
partialFinalType)
}
override def explainTerms(pw: RelWriter): RelWriter = {
val inputRowType = getInput.getRowType
super.explainTerms(pw)
.itemIf("groupBy", RelExplainUtil.fieldToString(grouping, inputRowType),
grouping.nonEmpty)
.itemIf("partialFinalType", partialFinalType, partialFinalType != PartialFinalType.NONE)
.item("select", RelExplainUtil.streamGroupAggregationToString(
inputRowType,
getRowType,
aggInfoList,
grouping,
isLocal = true))
}
override def translateToExecNode(): ExecNode[_] = {
new StreamExecLocalGroupAggregate(
grouping,
aggCalls.toArray,
aggCallNeedRetractions,
needRetraction,
InputProperty.DEFAULT,
FlinkTypeFactory.toLogicalRowType(getRowType),
getRelDetailedDescription
)
}
}
|
tillrohrmann/flink
|
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/stream/StreamPhysicalLocalGroupAggregate.scala
|
Scala
|
apache-2.0
| 3,585
|
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compatibility
import org.neo4j.cypher.CypherVersion
import org.neo4j.cypher.internal._
import org.neo4j.cypher.internal.compiler.v1_9.executionplan.{ExecutionPlan => ExecutionPlan_v1_9}
import org.neo4j.cypher.internal.compiler.v1_9.{CypherCompiler => CypherCompiler1_9}
import org.neo4j.cypher.internal.compiler.v2_3
import org.neo4j.cypher.internal.compiler.v2_3.CompilationPhaseTracer
import org.neo4j.cypher.internal.spi.v1_9.{GDSBackedQueryContext => QueryContext_v1_9}
import org.neo4j.graphdb.GraphDatabaseService
import org.neo4j.kernel.GraphDatabaseAPI
import org.neo4j.kernel.api.Statement
import org.neo4j.kernel.impl.query.{QueryExecutionMonitor, QuerySession}
import org.neo4j.kernel.monitoring.Monitors
case class CompatibilityFor1_9(graph: GraphDatabaseService, queryCacheSize: Int, kernelMonitors: Monitors) {
private val queryCache1_9 = new v2_3.LRUCache[String, Object](queryCacheSize)
private val compiler1_9 = new CypherCompiler1_9(graph, (q, f) => queryCache1_9.getOrElseUpdate(q, f))
implicit val executionMonitor = kernelMonitors.newMonitor(classOf[QueryExecutionMonitor])
def parseQuery(statementAsText: String) = new ParsedQuery {
def plan(statement: Statement, tracer: CompilationPhaseTracer): (ExecutionPlan, Map[String, Any]) = {
val planImpl = compiler1_9.prepare(statementAsText)
(new ExecutionPlanWrapper(planImpl), Map.empty)
}
def isPeriodicCommit = false
//we lack the knowledge whether or not this query is correct
def hasErrors = false
}
class ExecutionPlanWrapper(inner: ExecutionPlan_v1_9) extends ExecutionPlan {
private def queryContext(graph: GraphDatabaseAPI) =
new QueryContext_v1_9(graph)
def run(graph: GraphDatabaseAPI, txInfo: TransactionInfo, executionMode: CypherExecutionMode, params: Map[String, Any], session: QuerySession) = executionMode match {
case CypherExecutionMode.normal => execute(graph, txInfo, params, session)
case CypherExecutionMode.profile => profile(graph, txInfo, params, session)
case _ => throw new UnsupportedOperationException(s"${CypherVersion.v1_9.name}: $executionMode is unsupported")
}
private def execute(graph: GraphDatabaseAPI, txInfo: TransactionInfo, params: Map[String, Any], session: QuerySession) = {
implicit val qs = session
LegacyExecutionResultWrapper(inner.execute(queryContext(graph), txInfo.tx, params), planDescriptionRequested = false, CypherVersion.v1_9)
}
private def profile(graph: GraphDatabaseAPI, txInfo: TransactionInfo, params: Map[String, Any], session: QuerySession) = {
implicit val qs = session
LegacyExecutionResultWrapper(inner.profile(queryContext(graph), txInfo.tx, params), planDescriptionRequested = true, CypherVersion.v1_9)
}
def isPeriodicCommit = false
def isStale(lastCommittedTxId: LastCommittedTxIdProvider, statement: Statement): Boolean = false
def notifications = Iterable.empty
}
}
|
HuangLS/neo4j
|
community/cypher/cypher/src/main/scala/org/neo4j/cypher/internal/compatibility/CompatibilityFor1_9.scala
|
Scala
|
apache-2.0
| 3,791
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.runtime.stream.table
import java.time.{LocalDateTime, ZoneOffset}
import java.util.TimeZone
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.functions.sink.SinkFunction
import org.apache.flink.streaming.api.scala.DataStream
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.table.planner.factories.TestValuesTableFactory
import org.apache.flink.table.planner.factories.TestValuesTableFactory.TestSinkContextTableSink
import org.apache.flink.table.planner.runtime.utils.{AbstractExactlyOnceSink, StreamingTestBase, TestSinkUtil, TestingRetractSink}
import org.apache.flink.types.Row
import org.junit.Assert.assertEquals
import org.junit.Test
import scala.collection.JavaConversions._
/**
* Test the conversion between [[Table]] and [[DataStream]] should
* not loss row time attribute.
*/
final class TableToDataStreamITCase extends StreamingTestBase {
@Test
def testHasRowtimeFromTableToAppendStream(): Unit = {
val data = List(
rowOf(localDateTime(1L), "A"),
rowOf(localDateTime(2L), "B"),
rowOf(localDateTime(3L), "C"),
rowOf(localDateTime(4L), "D"),
rowOf(localDateTime(7L), "E"))
val dataId: String = TestValuesTableFactory.registerData(data)
val sourceDDL =
s"""
|CREATE TABLE src (
| ts TIMESTAMP(3),
| a STRING,
| WATERMARK FOR ts AS ts - INTERVAL '0.005' SECOND
|) WITH (
| 'connector' = 'values',
| 'data-id' = '$dataId'
|)
""".stripMargin
tEnv.executeSql(sourceDDL)
val dataStream = tEnv.sqlQuery("SELECT a, ts FROM src").toAppendStream[Row]
val expected = List(
"+I[A, 1970-01-01T00:00:01], 1000",
"+I[B, 1970-01-01T00:00:02], 2000",
"+I[C, 1970-01-01T00:00:03], 3000",
"+I[D, 1970-01-01T00:00:04], 4000",
"+I[E, 1970-01-01T00:00:07], 7000")
val sink = new StringWithTimestampSink[Row]
dataStream.addSink(sink)
env.execute("TableToAppendStream")
assertEquals(expected, sink.getResults.sorted)
}
@Test
def testHasRowtimeFromTableToRetractStream(): Unit = {
val data = List(
rowOf(localDateTime(1L), "A"),
rowOf(localDateTime(2L), "A"),
rowOf(localDateTime(3L), "C"),
rowOf(localDateTime(4L), "D"),
rowOf(localDateTime(7L), "E"))
val dataId: String = TestValuesTableFactory.registerData(data)
val sourceDDL =
s"""
|CREATE TABLE src (
| ts TIMESTAMP(3),
| a STRING,
| WATERMARK FOR ts AS ts - INTERVAL '0.005' SECOND
|) WITH (
| 'connector' = 'values',
| 'data-id' = '$dataId'
|)
""".stripMargin
tEnv.executeSql(sourceDDL)
val dataStream = tEnv.sqlQuery(
"""
|SELECT a, ts
|FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY a ORDER BY ts DESC) as rowNum
| FROM src
|)
|WHERE rowNum = 1
""".stripMargin
).toRetractStream[Row]
val sink = new StringWithTimestampRetractSink[Row]
dataStream.addSink(sink)
env.execute("TableToRetractStream")
val expected = List(
"A,1970-01-01T00:00:02,2000",
"C,1970-01-01T00:00:03,3000",
"D,1970-01-01T00:00:04,4000",
"E,1970-01-01T00:00:07,7000")
assertEquals(expected, sink.getRetractResults.sorted)
val expectedRetract = List(
"(true,A,1970-01-01T00:00:01,1000)",
"(false,A,1970-01-01T00:00:01,1000)",
"(true,A,1970-01-01T00:00:02,2000)",
"(true,C,1970-01-01T00:00:03,3000)",
"(true,D,1970-01-01T00:00:04,4000)",
"(true,E,1970-01-01T00:00:07,7000)")
assertEquals(expectedRetract.sorted, sink.getRawResults.sorted)
}
@Test
def testHasRowtimeFromDataStreamToTableBackDataStream(): Unit = {
val data = Seq(
(1L, "A"),
(2L, "B"),
(3L, "C"),
(4L, "D"),
(7L, "E"))
val ds1 = env.fromCollection(data)
// second to millisecond
.assignAscendingTimestamps(_._1 * 1000L)
val table = ds1.toTable(tEnv, 'ts, 'a, 'rowtime.rowtime)
tEnv.registerTable("t1", table)
val ds2 = tEnv.sqlQuery(
"""
| SELECT CONCAT(a, '_'), ts, rowtime
| FROM t1
""".stripMargin
).toAppendStream[Row]
val expected = List(
"+I[A_, 1, 1970-01-01T00:00:01], 1000",
"+I[B_, 2, 1970-01-01T00:00:02], 2000",
"+I[C_, 3, 1970-01-01T00:00:03], 3000",
"+I[D_, 4, 1970-01-01T00:00:04], 4000",
"+I[E_, 7, 1970-01-01T00:00:07], 7000")
val sink = new StringWithTimestampSink[Row]
ds2.addSink(sink)
env.execute("DataStreamToTableBackDataStream")
assertEquals(expected, sink.getResults.sorted)
}
@Test
def testHasRowtimeFromTableToExternalSystem(): Unit = {
val data = List(
rowOf("1970-01-01 00:00:00.001", localDateTime(1L), 1, 1d),
rowOf("1970-01-01 00:00:00.002", localDateTime(2L), 1, 2d),
rowOf("1970-01-01 00:00:00.003", localDateTime(3L), 1, 2d),
rowOf("1970-01-01 00:00:00.004", localDateTime(4L), 1, 5d),
rowOf("1970-01-01 00:00:00.007", localDateTime(7L), 1, 3d),
rowOf("1970-01-01 00:00:00.008", localDateTime(8L), 1, 3d),
rowOf("1970-01-01 00:00:00.016", localDateTime(16L), 1, 4d))
val dataId: String = TestValuesTableFactory.registerData(data)
val sourceDDL =
s"""
|CREATE TABLE src (
| log_ts STRING,
| ts TIMESTAMP(3),
| a INT,
| b DOUBLE,
| WATERMARK FOR ts AS ts - INTERVAL '0.001' SECOND
|) WITH (
| 'connector' = 'values',
| 'data-id' = '$dataId'
|)
""".stripMargin
val sinkDDL =
s"""
|CREATE TABLE sink (
| log_ts STRING,
| ts TIMESTAMP(3),
| a INT,
| b DOUBLE
|) WITH (
| 'connector' = 'values',
| 'table-sink-class' = '${classOf[TestSinkContextTableSink].getName}'
|)
""".stripMargin
tEnv.executeSql(sourceDDL)
tEnv.executeSql(sinkDDL)
//---------------------------------------------------------------------------------------
// Verify writing out a source directly with the rowtime attribute
//---------------------------------------------------------------------------------------
tEnv.executeSql("INSERT INTO sink SELECT * FROM src").await()
val expected = List(1000, 2000, 3000, 4000, 7000, 8000, 16000)
assertEquals(expected.sorted, TestSinkContextTableSink.ROWTIMES.sorted)
val sinkDDL2 =
s"""
|CREATE TABLE sink2 (
| window_rowtime TIMESTAMP(3),
| b DOUBLE
|) WITH (
| 'connector' = 'values',
| 'table-sink-class' = '${classOf[TestSinkContextTableSink].getName}'
|)
""".stripMargin
tEnv.executeSql(sinkDDL2)
//---------------------------------------------------------------------------------------
// Verify writing out with additional operator to generate a new rowtime attribute
//---------------------------------------------------------------------------------------
tEnv.executeSql(
"""
|INSERT INTO sink2
|SELECT
| TUMBLE_ROWTIME(ts, INTERVAL '5' SECOND),
| SUM(b)
|FROM src
|GROUP BY TUMBLE(ts, INTERVAL '5' SECOND)
|""".stripMargin
).await()
val expected2 = List(4999, 9999, 19999)
assertEquals(expected2.sorted, TestSinkContextTableSink.ROWTIMES.sorted)
}
private def localDateTime(epochSecond: Long): LocalDateTime = {
LocalDateTime.ofEpochSecond(epochSecond, 0, ZoneOffset.UTC)
}
}
/**
* Append test Sink that outputs record with timestamp.
*/
final class StringWithTimestampSink[T] extends AbstractExactlyOnceSink[T]() {
override def invoke(value: T, context: SinkFunction.Context): Unit = {
localResults += s"${value.toString}, ${context.timestamp()}"
}
override def getResults: List[String] = super.getResults
}
/**
* Retract test Sink that outputs record with timestamp.
*/
final class StringWithTimestampRetractSink[T](tz: TimeZone) extends
TestingRetractSink(tz) {
def this() {
this(TimeZone.getTimeZone("UTC"))
}
override def invoke(v: (Boolean, Row), context: SinkFunction.Context): Unit = {
this.synchronized {
val rowString = s"${TestSinkUtil.rowToString(v._2, tz)},${context.timestamp()}"
val tupleString = "(" + v._1.toString + "," + rowString + ")"
localResults += tupleString
if (v._1) {
localRetractResults += rowString
} else {
val index = localRetractResults.indexOf(rowString)
if (index >= 0) {
localRetractResults.remove(index)
} else {
throw new RuntimeException("Tried to retract a value that wasn't added first. " +
"This is probably an incorrectly implemented test. " +
"Try to set the parallelism of the sink to 1.")
}
}
}
}
override def getResults: List[String] = super.getResults
}
|
kl0u/flink
|
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/runtime/stream/table/TableToDataStreamITCase.scala
|
Scala
|
apache-2.0
| 9,920
|
package org.nexbook.performance.app
import com.typesafe.config.ConfigFactory
import org.nexbook.tags.Performance
import org.slf4j.LoggerFactory
/**
* Created by milczu on 1/2/16.
*/
class OrderBookAppScenarioA03PerformanceTest extends OrderBookAppPerformanceTest {
val logger = LoggerFactory.getLogger(classOf[OrderBookAppScenarioA03PerformanceTest])
val scenarioName = "scenario_A_03"
System.setProperty("config.name", s"scenarios/$scenarioName")
val config = ConfigFactory.load(s"config/scenarios/$scenarioName").withFallback(ConfigFactory.load("config/general"))
override val benchmarkConfig = config.getConfig("benchmark")
override val testDataPath = s"src/test/resources/data/${benchmarkConfig.getString("testDataFile")}"
override val resultLog = s"$appRoot/logs/test/$scenarioName.log"
override val expectedTotalOrdersCount = benchmarkConfig.getInt("expectedOrderCount")
import org.scalatest.time.SpanSugar._
s"OrderBook: $scenarioName" should {
"work fast!" taggedAs Performance in {
failAfter(600 seconds) {
executeTest()
}
}
}
}
|
milczarekIT/nexbook
|
src/test/scala/org/nexbook/performance/app/OrderBookAppScenarioA03PerformanceTest.scala
|
Scala
|
apache-2.0
| 1,081
|
package org.apache.spark.hbase.helpers
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.hbase.Serde
/**
* Created by mharis on 07/08/15.
*/
trait SerdeInt extends Serde[Int] {
override def toBytes = (value: Int) => Bytes.toBytes(value)
override def fromBytes = (bytes: Array[Byte], o: Int, l: Int) => Bytes.toInt(bytes, o)
}
|
michal-harish/spark-on-hbase
|
src/main/scala/org/apache/spark/hbase/helpers/SerdeInt.scala
|
Scala
|
apache-2.0
| 353
|
package xml.circumstances
import models.domain.{CircumstancesPaymentChange, Claim}
import scala.xml.NodeSeq
import xml.XMLHelper._
object PaymentChange {
def xml(circs :Claim): NodeSeq = {
val circsPaymentChangeOption: Option[CircumstancesPaymentChange] = circs.questionGroup[CircumstancesPaymentChange]
circsPaymentChangeOption match {
case Some(circsPaymentChange) => {
<PaymentChange>
<PaidIntoAccountDetails>
{question(<PaidIntoAccount/>,"currentlyPaidIntoBankLabel", circsPaymentChange.currentlyPaidIntoBankAnswer)}
{question(<BankName/>,"currentlyPaidIntoBankText1", circsPaymentChange.currentlyPaidIntoBankText1)}
{question(<MethodOfPayment/>,"currentlyPaidIntoBankText2", circsPaymentChange.currentlyPaidIntoBankText2)}
</PaidIntoAccountDetails>
{account(circs)}
{question(<PaymentFrequency/>,"paymentFrequency", circsPaymentChange.paymentFrequency)}
{question(<OtherChanges/>, "moreAboutChanges", circsPaymentChange.moreAboutChanges)}
</PaymentChange>
}
case _ => NodeSeq.Empty
}
}
def account(circs:Claim) = {
val bankBuildingSocietyDetails = circs.questionGroup[CircumstancesPaymentChange].getOrElse(CircumstancesPaymentChange())
<AccountDetails>
{question(<HolderName/>, "accountHolderName", encrypt(bankBuildingSocietyDetails.accountHolderName))}
<BuildingSocietyDetails>
{question(<AccountNumber/>, "accountNumber", encrypt(bankBuildingSocietyDetails.accountNumber.replaceAll(" ","")))}
{question(<RollNumber/>,"rollOrReferenceNumber", bankBuildingSocietyDetails.rollOrReferenceNumber)}
{question(<SortCode/>,"sortCode", encrypt(bankBuildingSocietyDetails.sortCode))}
{question(<Name/>, "bankFullName", bankBuildingSocietyDetails.bankFullName)}
</BuildingSocietyDetails>
</AccountDetails>
}
}
|
Department-for-Work-and-Pensions/ClaimCapture
|
c3/app/xml/circumstances/PaymentChange.scala
|
Scala
|
mit
| 1,913
|
import sbt._
import sbt.Keys._
object BuildSettings {
val Name = "activator-scalding"
val Version = "1.1.0"
val ScalaVersion = "2.10.4"
import Scalding._
val basicSettings = Defaults.defaultSettings ++ scaldingSettings ++ Seq (
name := Name,
version := Version,
scalaVersion := ScalaVersion,
organization := "com.typesafe",
description := "Activator Scalding Template",
scalacOptions := Seq("-deprecation", "-unchecked", "-encoding", "utf8")
)
// sbt-assembly settings for building a fat jar that includes all dependencies.
// This is useful for running Hadoop jobs, but not needed for local script testing.
// Adapted from https://github.com/snowplow/scalding-example-project
import sbtassembly.Plugin._
import AssemblyKeys._
lazy val sbtAssemblySettings = assemblySettings ++ Seq(
// Slightly cleaner jar name
jarName in assembly := s"${name.value}-${version.value}.jar" ,
// Drop these jars, most of which are dependencies of dependencies and already exist
// in Hadoop deployments or aren't needed for local mode execution. Some are older
// versions of jars that collide with newer versions in the dependency graph!!
excludedJars in assembly <<= (fullClasspath in assembly) map { cp =>
val excludes = Set(
"scala-compiler.jar",
"jsp-api-2.1-6.1.14.jar",
"jsp-2.1-6.1.14.jar",
"jasper-compiler-5.5.12.jar",
"minlog-1.2.jar", // Otherwise causes conflicts with Kyro (which Scalding pulls in)
"janino-2.5.16.jar", // Janino includes a broken signature, and is not needed anyway
"commons-beanutils-core-1.8.0.jar", // Clash with each other and with commons-collections
"commons-beanutils-1.7.0.jar",
"stax-api-1.0.1.jar",
"asm-3.1.jar",
"scalatest-2.0.jar"
)
cp filter { jar => excludes(jar.data.getName) }
},
mergeStrategy in assembly <<= (mergeStrategy in assembly) {
(old) => {
case "project.clj" => MergeStrategy.discard // Leiningen build files
case x => old(x)
}
}
)
lazy val buildSettings = basicSettings ++ sbtAssemblySettings
}
object Resolvers {
val typesafe = "Typesafe Repository" at "http://repo.typesafe.com/typesafe/releases/"
val sonatype = "Sonatype Release" at "https://oss.sonatype.org/content/repositories/releases"
val mvnrepository = "MVN Repo" at "http://mvnrepository.com/artifact"
val conjars = "Concurrent Maven Repo" at "http://conjars.org/repo"
val clojars = "Clojars Repo" at "http://clojars.org/repo"
val twitterMaven = "Twitter Maven" at "http://maven.twttr.com"
val allResolvers = Seq(typesafe, sonatype, mvnrepository, conjars, clojars, twitterMaven)
}
object Dependency {
object Version {
val Scalding = "0.10.0"
val Algebird = "0.2.0"
val Hadoop = "1.1.2" // Fairly old, but reliable version. Substitute your "favorite"
val ScalaTest = "2.0"
}
// ---- Application dependencies ----
// Include the Scala compiler itself for reification and evaluation of expressions.
val scalaCompiler = "org.scala-lang" % "scala-compiler" % BuildSettings.ScalaVersion
val scalding_args = "com.twitter" %% "scalding-args" % Version.Scalding
val scalding_core = "com.twitter" %% "scalding-core" % Version.Scalding
val scalding_date = "com.twitter" %% "scalding-date" % Version.Scalding
val algebird_core = "com.twitter" %% "algebird-core" % Version.Algebird
val algebird_util = "com.twitter" %% "algebird-util" % Version.Algebird
val hadoop_core = "org.apache.hadoop" % "hadoop-core" % Version.Hadoop
val scalaTest = "org.scalatest" % "scalatest_2.10" % Version.ScalaTest % "test"
}
object Dependencies {
import Dependency._
val activatorscalding = Seq(
scalaCompiler, scalding_args, scalding_core, scalding_date,
algebird_core, algebird_util, hadoop_core, scalaTest)
}
object ActivatorScaldingBuild extends Build {
import Resolvers._
import Dependencies._
import BuildSettings._
lazy val activatorscalding = Project(
id = "Activator-Scalding",
base = file("."),
settings = buildSettings ++ Seq(
// runScriptSetting,
resolvers := allResolvers,
libraryDependencies ++= Dependencies.activatorscalding,
mainClass := Some("RunAll")))
}
|
softberries/ugproject
|
activator-scalding/project/Build.scala
|
Scala
|
unlicense
| 4,376
|
package com.teamisotope.techexpansion.item.special
import java.util
import cofh.api.energy.ItemEnergyContainer
import com.teamisotope.techexpansion.TechExpansion
import net.minecraft.entity.player.EntityPlayer
import net.minecraft.item.ItemStack
import net.minecraft.util.{ActionResult, EnumActionResult, EnumHand}
import net.minecraft.world.World
class CoFHTest extends ItemEnergyContainer(1000) {
this.setRegistryName("cofh_test").setUnlocalizedName("cofh_test").setCreativeTab(TechExpansion.tab_misc).setMaxStackSize(1)
override def addInformation(stack: ItemStack, playerIn: EntityPlayer, tooltip: util.List[String], advanced: Boolean): Unit = {
tooltip.add("Energy:" + this.getEnergyStored(stack) + "/" + this.getMaxEnergyStored(stack))
}
override def onItemRightClick(stack: ItemStack, world: World, player: EntityPlayer, hand: EnumHand): ActionResult[ItemStack] = {
this.receiveEnergy(stack, 10, false)
ActionResult.newResult(EnumActionResult.SUCCESS, stack)
}
}
|
collaborationmods/TechExpansion
|
src/main/scala/com/teamisotope/techexpansion/item/special/CoFHTest.scala
|
Scala
|
gpl-3.0
| 999
|
package special
import scalan.RType
import scalan.util.CollectionUtil
import scala.reflect.ClassTag
import scalan.RType.SomeType
import special.collection.Coll
object Types {
type StructData = Coll[Any]
def structRType(names: Array[String], types: Array[SomeType]): RType[StructData] = StructType(names, types)
case class StructType(fieldNames: Array[String], fieldTypes: Array[SomeType]) extends RType[StructData] {
val classTag: ClassTag[StructData] = scala.reflect.classTag[StructData]
override def isConstantSize: Boolean = fieldTypes.forall(_.isConstantSize)
override def hashCode(): Int = {
var h = CollectionUtil.deepHashCode(fieldNames)
h += h * 31 + CollectionUtil.deepHashCode(fieldTypes)
h
}
override def equals(obj: Any): Boolean = (this eq obj.asInstanceOf[AnyRef]) || (obj match {
case that: StructType =>
java.util.Arrays.equals(fieldNames.asInstanceOf[Array[AnyRef]], that.fieldNames.asInstanceOf[Array[AnyRef]]) &&
java.util.Arrays.equals(fieldTypes.asInstanceOf[Array[AnyRef]], that.fieldTypes.asInstanceOf[Array[AnyRef]])
case _ => false
})
}
type TupleData = Coll[Any]
def tupleRType(types: Array[SomeType]): RType[TupleData] = TupleType(types)
case class TupleType(items: Array[SomeType]) extends RType[StructData] {
val classTag: ClassTag[TupleData] = scala.reflect.classTag[TupleData]
override def name: String = items.map(_.name).mkString("(", ", ", ")")
override def isConstantSize: Boolean = items.forall(_.isConstantSize)
override def hashCode(): Int = CollectionUtil.deepHashCode(items)
override def equals(obj: Any): Boolean = (this eq obj.asInstanceOf[AnyRef]) || (obj match {
case that: TupleType => java.util.Arrays.equals(items.asInstanceOf[Array[AnyRef]], that.items.asInstanceOf[Array[AnyRef]])
case _ => false
})
}
}
|
ScorexFoundation/sigmastate-interpreter
|
library-api/src/main/scala/special/Types.scala
|
Scala
|
mit
| 1,890
|
//: ----------------------------------------------------------------------------
//: Copyright (C) 2014 Verizon. All Rights Reserved.
//:
//: Licensed under the Apache License, Version 2.0 (the "License");
//: you may not use this file except in compliance with the License.
//: You may obtain a copy of the License at
//:
//: http://www.apache.org/licenses/LICENSE-2.0
//:
//: Unless required by applicable law or agreed to in writing, software
//: distributed under the License is distributed on an "AS IS" BASIS,
//: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//: See the License for the specific language governing permissions and
//: limitations under the License.
//:
//: ----------------------------------------------------------------------------
package remotely.codecs
import scodec.Err
class EncodingFailure(err: Err) extends Exception(err.messageWithContext)
|
ShengC/remotely
|
core/src/main/scala/codecs/EncodingFailure.scala
|
Scala
|
apache-2.0
| 927
|
/*
* Copyright (C) 2012 The Regents of The University California.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shark
import org.apache.hadoop.hive.ql.plan.JoinCondDesc
import org.scalatest.{BeforeAndAfter, FunSuite}
import shark.execution.ReduceKey
import spark.{RDD, SparkContext}
import spark.SparkContext._
class SortSuite extends FunSuite {
TestUtils.init()
test("order by limit") {
val sc = new SparkContext("local", "test")
val data = Array((new ReduceKey(Array[Byte](4)), "val_4"),
(new ReduceKey(Array[Byte](1)), "val_1"),
(new ReduceKey(Array[Byte](7)), "val_7"),
(new ReduceKey(Array[Byte](0)), "val_0"))
val expected = data.sortWith(_._1 < _._1).toSeq
val rdd = sc.parallelize(data, 50)
for (k <- 0 to 5) {
val output = RDDUtils.sortLeastKByKey(rdd, k).collect().toSeq
assert(output.size == math.min(k, 4))
assert(output == expected.take(math.min(k, 4)))
}
sc.stop()
System.clearProperty("spark.driver.port")
}
}
|
sameeragarwal/blinkdb_dev
|
src/test/scala/shark/SortSuite.scala
|
Scala
|
apache-2.0
| 1,600
|
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.observers
import minitest.TestSuite
import monix.execution.Ack
import monix.execution.Ack.{Continue, Stop}
import monix.execution.schedulers.TestScheduler
import monix.reactive.Observer
import monix.reactive.OverflowStrategy.DropNew
import monix.execution.exceptions.DummyException
import scala.concurrent.{Future, Promise}
object OverflowStrategyDropNewSuite extends TestSuite[TestScheduler] {
def setup() = TestScheduler()
def tearDown(s: TestScheduler) = {
assert(s.state.tasks.isEmpty, "TestScheduler should have no pending tasks")
}
test("should not lose events, synchronous test 1") { implicit s =>
var number = 0
var wasCompleted = false
val underlying = new Observer[Int] {
def onNext(elem: Int): Future[Ack] = {
number += 1
Continue
}
def onError(ex: Throwable): Unit = {
s.reportFailure(ex)
}
def onComplete(): Unit = {
wasCompleted = true
}
}
val buffer = BufferedSubscriber[Int](Subscriber(underlying, s), DropNew(1000))
for (i <- 0 until 1000) buffer.onNext(i)
buffer.onComplete()
assert(!wasCompleted)
s.tick()
assert(number == 1000)
assert(wasCompleted)
}
test("should not lose events, synchronous test 2") { implicit s =>
var number = 0
var completed = false
val underlying = new Observer[Int] {
def onNext(elem: Int): Future[Ack] = {
number += 1
Continue
}
def onError(ex: Throwable): Unit = {
s.reportFailure(ex)
}
def onComplete(): Unit = {
completed = true
}
}
val buffer = BufferedSubscriber[Int](Subscriber(underlying, s), DropNew(1000))
def loop(n: Int): Unit =
if (n > 0)
s.execute { () =>
buffer.onNext(n); loop(n - 1)
} else
buffer.onComplete()
loop(10000)
assert(!completed)
assertEquals(number, 0)
s.tick()
assert(completed)
assertEquals(number, 10000)
}
test("should not lose events, async test 1") { implicit s =>
var number = 0
var wasCompleted = false
val underlying = new Observer[Int] {
def onNext(elem: Int) = Future {
number += 1
Continue
}
def onError(ex: Throwable): Unit = {
s.reportFailure(ex)
}
def onComplete(): Unit = {
wasCompleted = true
}
}
val buffer = BufferedSubscriber[Int](Subscriber(underlying, s), DropNew(1000))
for (i <- 0 until 1000) buffer.onNext(i)
buffer.onComplete()
assert(!wasCompleted)
s.tick()
assert(number == 1000)
assert(wasCompleted)
}
test("should not lose events, async test 2") { implicit s =>
var number = 0
var completed = false
val underlying = new Observer[Int] {
def onNext(elem: Int) = Future {
number += 1
Continue
}
def onError(ex: Throwable): Unit =
s.reportFailure(ex)
def onComplete(): Unit =
completed = true
}
val buffer = BufferedSubscriber[Int](Subscriber(underlying, s), DropNew(10000))
def loop(n: Int): Unit =
if (n > 0)
s.execute { () =>
buffer.onNext(n); loop(n - 1)
} else
buffer.onComplete()
loop(10000)
assert(!completed)
assertEquals(number, 0)
s.tick()
assert(completed)
assertEquals(number, 10000)
}
test("should drop incoming when over capacity") { implicit s =>
var received = 0
var wasCompleted = false
val promise = Promise[Ack]()
val underlying = new Observer[Int] {
def onNext(elem: Int) = {
received += elem
promise.future
}
def onError(ex: Throwable) = ()
def onComplete() = {
wasCompleted = true
}
}
val buffer = BufferedSubscriber[Int](Subscriber(underlying, s), DropNew(5))
for (i <- 1 to 9)
assertEquals(buffer.onNext(i), Continue)
for (i <- 0 until 5)
assertEquals(buffer.onNext(10 + i), Continue)
s.tick()
assertEquals(received, 1)
promise.success(Continue); s.tick()
assertEquals(received, (1 to 8).sum)
for (i <- 1 to 8) assertEquals(buffer.onNext(i), Continue)
s.tick()
assertEquals(received, (1 to 8).sum * 2)
buffer.onComplete(); s.tick()
assert(wasCompleted, "wasCompleted should be true")
}
test("should send onError when empty") { implicit s =>
var errorThrown: Throwable = null
val buffer = BufferedSubscriber[Int](
new Subscriber[Int] {
def onError(ex: Throwable) = {
errorThrown = ex
}
def onNext(elem: Int) = throw new IllegalStateException()
def onComplete() = throw new IllegalStateException()
val scheduler = s
},
DropNew(5)
)
buffer.onError(DummyException("dummy"))
s.tickOne()
assertEquals(errorThrown, DummyException("dummy"))
val r = buffer.onNext(1)
assertEquals(r, Stop)
}
test("should send onError when in flight") { implicit s =>
var errorThrown: Throwable = null
val buffer = BufferedSubscriber[Int](
new Subscriber[Int] {
def onError(ex: Throwable) = {
errorThrown = ex
}
def onNext(elem: Int) = Continue
def onComplete() = throw new IllegalStateException()
val scheduler = s
},
DropNew(5)
)
buffer.onNext(1)
buffer.onError(DummyException("dummy"))
s.tickOne()
assertEquals(errorThrown, DummyException("dummy"))
}
test("should send onError when at capacity") { implicit s =>
var errorThrown: Throwable = null
val promise = Promise[Ack]()
val buffer = BufferedSubscriber[Int](
new Subscriber[Int] {
def onError(ex: Throwable) = {
errorThrown = ex
}
def onNext(elem: Int) = promise.future
def onComplete() = throw new IllegalStateException()
val scheduler = s
},
DropNew(5)
)
buffer.onNext(1)
buffer.onNext(2)
buffer.onNext(3)
buffer.onNext(4)
buffer.onNext(5)
buffer.onError(DummyException("dummy"))
promise.success(Continue)
s.tick()
assertEquals(errorThrown, DummyException("dummy"))
}
test("should do onComplete only after all the queue was drained") { implicit s =>
var sum = 0L
var wasCompleted = false
val startConsuming = Promise[Continue.type]()
val buffer = BufferedSubscriber[Long](
new Subscriber[Long] {
def onNext(elem: Long) = {
sum += elem
startConsuming.future
}
def onError(ex: Throwable) = throw ex
def onComplete() = wasCompleted = true
val scheduler = s
},
DropNew(10000)
)
(0 until 9999).foreach { x => buffer.onNext(x.toLong); () }
buffer.onComplete()
startConsuming.success(Continue)
s.tick()
assert(wasCompleted)
assert(sum == (0 until 9999).sum)
}
test("should do onComplete only after all the queue was drained, test2") { implicit s =>
var sum = 0L
var wasCompleted = false
val buffer = BufferedSubscriber[Long](
new Subscriber[Long] {
def onNext(elem: Long) = {
sum += elem
Continue
}
def onError(ex: Throwable) = throw ex
def onComplete() = wasCompleted = true
val scheduler = s
},
DropNew(10000)
)
(0 until 9999).foreach { x => buffer.onNext(x.toLong); () }
buffer.onComplete()
s.tick()
assert(wasCompleted)
assert(sum == (0 until 9999).sum)
}
test("should do onError only after the queue was drained") { implicit s =>
var sum = 0L
var errorThrown: Throwable = null
val startConsuming = Promise[Continue.type]()
val buffer = BufferedSubscriber[Long](
new Subscriber[Long] {
def onNext(elem: Long) = {
sum += elem
startConsuming.future
}
def onError(ex: Throwable) = errorThrown = ex
def onComplete() = throw new IllegalStateException()
val scheduler = s
},
DropNew(10000)
)
(0 until 9999).foreach { x => buffer.onNext(x.toLong); () }
buffer.onError(DummyException("dummy"))
startConsuming.success(Continue)
s.tick()
assertEquals(errorThrown, DummyException("dummy"))
assertEquals(sum, (0 until 9999).sum.toLong)
}
test("should do onError only after all the queue was drained, test2") { implicit s =>
var sum = 0L
var errorThrown: Throwable = null
val buffer = BufferedSubscriber[Long](
new Subscriber[Long] {
def onNext(elem: Long) = {
sum += elem
Continue
}
def onError(ex: Throwable) = errorThrown = ex
def onComplete() = throw new IllegalStateException()
val scheduler = s
},
DropNew(10000)
)
(0 until 9999).foreach { x => buffer.onNext(x.toLong); () }
buffer.onError(DummyException("dummy"))
s.tick()
assertEquals(errorThrown, DummyException("dummy"))
assertEquals(sum, (0 until 9999).sum.toLong)
}
test("subscriber STOP after a synchronous onNext") { implicit s =>
var received = 0
var wasCompleted = false
val underlying = new Subscriber[Int] {
val scheduler = s
def onNext(elem: Int): Future[Ack] = {
received += elem
Stop
}
def onError(ex: Throwable): Unit =
throw ex
def onComplete(): Unit =
wasCompleted = true
}
val buffer = BufferedSubscriber[Int](underlying, DropNew(16))
assertEquals(buffer.onNext(1), Continue)
s.tick()
assertEquals(buffer.onNext(2), Stop)
buffer.onComplete(); s.tick()
assert(!wasCompleted, "!wasCompleted")
assertEquals(received, 1)
}
test("subscriber STOP after an asynchronous onNext") { implicit s =>
var received = 0
var wasCompleted = false
val underlying = new Subscriber[Int] {
val scheduler = s
def onNext(elem: Int): Future[Ack] = Future {
received += elem
Stop
}
def onError(ex: Throwable): Unit =
throw ex
def onComplete(): Unit =
wasCompleted = true
}
val buffer = BufferedSubscriber[Int](underlying, DropNew(16))
assertEquals(buffer.onNext(1), Continue)
s.tick()
assertEquals(received, 1)
buffer.onNext(2); s.tick() // uncertain
assertEquals(buffer.onNext(3), Stop)
buffer.onComplete(); s.tick()
assert(!wasCompleted, "!wasCompleted")
assertEquals(received, 1)
}
test("stop after a synchronous Failure(ex)") { implicit s =>
var received = 0
var wasCompleted = false
var errorThrown: Throwable = null
val dummy = new RuntimeException("dummy")
val underlying = new Subscriber[Int] {
val scheduler = s
def onNext(elem: Int): Future[Ack] = {
received += elem
Future.failed(dummy)
}
def onError(ex: Throwable): Unit =
errorThrown = ex
def onComplete(): Unit =
wasCompleted = true
}
val buffer = BufferedSubscriber[Int](underlying, DropNew(16))
assertEquals(buffer.onNext(1), Continue)
s.tick()
assertEquals(buffer.onNext(2), Stop)
buffer.onComplete(); s.tick()
assert(!wasCompleted, "!wasCompleted")
assertEquals(received, 1)
assertEquals(errorThrown, dummy)
}
test("stop after an asynchronous Failure(ex)") { implicit s =>
var received = 0
var wasCompleted = false
var errorThrown: Throwable = null
val dummy = new RuntimeException("dummy")
val underlying = new Subscriber[Int] {
val scheduler = s
def onNext(elem: Int): Future[Ack] = Future {
received += elem
throw dummy
}
def onError(ex: Throwable): Unit =
errorThrown = ex
def onComplete(): Unit =
wasCompleted = true
}
val buffer = BufferedSubscriber[Int](underlying, DropNew(16))
assertEquals(buffer.onNext(1), Continue)
s.tick(); buffer.onNext(2) // uncertain
s.tick()
assertEquals(buffer.onNext(3), Stop)
buffer.onComplete(); s.tick()
assert(!wasCompleted, "!wasCompleted")
assertEquals(received, 1)
assertEquals(errorThrown, dummy)
}
test("should protect against user-code in onNext") { implicit s =>
var received = 0
var wasCompleted = false
var errorThrown: Throwable = null
val dummy = new RuntimeException("dummy")
val underlying = new Subscriber[Int] {
val scheduler = s
def onNext(elem: Int): Future[Ack] = {
received += elem
throw dummy
}
def onError(ex: Throwable): Unit =
errorThrown = ex
def onComplete(): Unit =
wasCompleted = true
}
val buffer = BufferedSubscriber[Int](underlying, DropNew(16))
assertEquals(buffer.onNext(1), Continue)
s.tick()
assertEquals(buffer.onNext(2), Stop)
buffer.onComplete(); s.tick()
assert(!wasCompleted, "!wasCompleted")
assertEquals(received, 1)
assertEquals(errorThrown, dummy)
}
test("should protect against user-code in onComplete") { implicit s =>
var received = 0
var errorThrown: Throwable = null
val dummy = new RuntimeException("dummy")
val underlying = new Subscriber[Int] {
val scheduler = s
def onNext(elem: Int): Future[Ack] = {
received += elem
Continue
}
def onError(ex: Throwable): Unit =
errorThrown = ex
def onComplete(): Unit =
throw dummy
}
val buffer = BufferedSubscriber[Int](underlying, DropNew(16))
buffer.onNext(1)
buffer.onComplete()
s.tick()
assertEquals(received, 1)
assertEquals(errorThrown, null)
assertEquals(s.state.lastReportedError, dummy)
}
test("should protect against user-code in onError") { implicit s =>
var received = 0
var errorThrown: Throwable = null
val dummy1 = new RuntimeException("dummy1")
val dummy2 = new RuntimeException("dummy2")
val underlying = new Subscriber[Int] {
val scheduler = s
def onNext(elem: Int): Future[Ack] = {
received += elem
Future.failed(dummy1)
}
def onError(ex: Throwable): Unit = {
errorThrown = ex
throw dummy2
}
def onComplete(): Unit =
throw new IllegalStateException("onComplete")
}
val buffer = BufferedSubscriber[Int](underlying, DropNew(16))
buffer.onNext(1)
s.tick()
assertEquals(received, 1)
assertEquals(errorThrown, dummy1)
assertEquals(s.state.lastReportedError, dummy2)
}
test("streaming null is not allowed") { implicit s =>
var errorThrown: Throwable = null
val underlying = new Subscriber[String] {
val scheduler = s
def onNext(elem: String) =
Continue
def onError(ex: Throwable): Unit =
errorThrown = ex
def onComplete(): Unit =
throw new IllegalStateException("onComplete")
}
val buffer = BufferedSubscriber[String](underlying, DropNew(16))
buffer.onNext(null)
s.tick()
assert(errorThrown != null, "errorThrown != null")
assert(errorThrown.isInstanceOf[NullPointerException], "errorThrown.isInstanceOf[NullPointerException]")
}
test("buffer size is required to be greater than 1") { implicit s =>
intercept[IllegalArgumentException] {
BufferedSubscriber[Int](Subscriber.empty[Int], DropNew(1))
()
}
()
}
}
|
monifu/monifu
|
monix-reactive/shared/src/test/scala/monix/reactive/observers/OverflowStrategyDropNewSuite.scala
|
Scala
|
apache-2.0
| 16,081
|
package com.twitter.finagle.netty3.channel
import com.twitter.finagle.{ClientConnection, Service, ServiceFactory}
import com.twitter.util.TimeConversions._
import com.twitter.util.{Await, Future, Promise, Time}
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import org.junit.runner.RunWith
import org.scalatest.mock.MockitoSugar
import org.mockito.Mockito.{times, verify, when}
import org.mockito.Mockito
import org.mockito.Matchers._
import org.mockito.stubbing.Answer
import org.mockito.invocation.InvocationOnMock
@RunWith(classOf[JUnitRunner])
class IdleConnectionFilterTest extends FunSuite with MockitoSugar {
class ChannelHelper {
val service = mock[Service[String, String]]
when(service.close(any)) thenReturn Future.Done
val underlying = ServiceFactory.const(service)
val threshold = OpenConnectionsThresholds(2, 4, 1.second)
val filter = new IdleConnectionFilter(underlying, threshold)
def open(filter: IdleConnectionFilter[_, _]) = {
val c = mock[ClientConnection]
val closeFuture = new Promise[Unit]
when(c.onClose) thenReturn closeFuture
when(c.close()) thenAnswer {
new Answer[Future[Unit]] {
override def answer(invocation: InvocationOnMock): Future[Unit] = {
closeFuture.setDone()
closeFuture
}
}
}
filter(c)
(c, closeFuture)
}
}
test("IdleConnectionFilter should count connections") {
val h = new ChannelHelper
import h._
assert(filter.openConnections === 0)
val (_, closeFuture) = open(filter)
assert(filter.openConnections === 1)
closeFuture.setDone()
assert(filter.openConnections === 0)
}
test("IdleConnectionFilter should refuse connection if above highWaterMark") {
val h = new ChannelHelper
import h._
assert(filter.openConnections === 0)
val closeFutures = (1 to threshold.highWaterMark) map { _ =>
val (_, closeFuture) = open(filter)
closeFuture
}
assert(filter.openConnections === threshold.highWaterMark)
open(filter)
assert(filter.openConnections === threshold.highWaterMark)
closeFutures foreach {
_.setDone()
}
assert(filter.openConnections === 0)
}
test("IdleConnectionFilter should try to close an idle connection if above lowerWaterMark") {
val h = new ChannelHelper
import h._
val spyFilter = Mockito.spy(new IdleConnectionFilter(underlying, threshold))
assert(spyFilter.openConnections === 0)
(1 to threshold.lowWaterMark) map { _ =>
open(spyFilter)
}
assert(spyFilter.openConnections === threshold.lowWaterMark)
// open must try to close an idle connection
open(spyFilter)
verify(spyFilter, times(1)).closeIdleConnections()
}
test("IdleConnectionFilter should don't close connections not yet answered by the server (long processing requests)") {
val h = new ChannelHelper
import h._
var t = Time.now
Time.withTimeFunction(t) { _ =>
val service = new Service[String, String] {
def apply(req: String): Future[String] = new Promise[String]
}
val underlying = ServiceFactory.const(service)
val spyFilter = Mockito.spy(new IdleConnectionFilter(underlying, threshold))
assert(spyFilter.openConnections === 0)
(1 to threshold.highWaterMark) map { _ =>
val (c, _) = open(spyFilter)
spyFilter.filterFactory(c)("titi", service)
}
assert(spyFilter.openConnections === threshold.highWaterMark)
// wait a long time
t += threshold.idleTimeout * 3
val c = mock[ClientConnection]
val closeFuture = new Promise[Unit]
when(c.onClose) thenReturn closeFuture
/* same pb as before*/
when(c.close()) thenAnswer {
new Answer[Future[Unit]] {
override def answer(invocation: InvocationOnMock): Future[Unit] = {
closeFuture.setDone()
closeFuture
}
}
}
spyFilter(c)
verify(c, times(1)).close()
assert(spyFilter.openConnections === threshold.highWaterMark)
}
}
test("IdleConnectionFilter should close an idle connection to accept a new one") {
val h = new ChannelHelper
import h._
var t = Time.now
Time.withTimeFunction(t) { _ =>
val responses = collection.mutable.HashSet.empty[Promise[String]]
val service = new Service[String, String] {
def apply(req: String): Future[String] = {
val p = new Promise[String]
responses += p
p
}
}
val underlying = ServiceFactory.const(service)
val spyFilter = Mockito.spy(new IdleConnectionFilter(underlying, threshold))
// Open all connections
(1 to threshold.highWaterMark) map { _ =>
val (c, _) = open(spyFilter)
spyFilter.filterFactory(c)("titi", Await.result(underlying(c)))
}
// Simulate response from the server
responses foreach {
f => f.setValue("toto")
}
// wait a long time
t += threshold.idleTimeout * 3
val c = mock[ClientConnection]
val closeFuture = new Promise[Unit]
when(c.onClose) thenReturn closeFuture
spyFilter(c)
verify(c, times(0)).close()
}
}
}
|
jpederzolli/finagle
|
finagle-core/src/test/scala/com/twitter/finagle/netty3/channel/IdleConnectionFilterTest.scala
|
Scala
|
apache-2.0
| 5,269
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import org.apache.parquet.bytes.{BytesInput, HeapByteBufferAllocator}
import org.apache.parquet.column.values.plain.{BooleanPlainValuesWriter, PlainValuesWriter}
import org.apache.parquet.column.values.rle.RunLengthBitPackingHybridValuesWriter
import org.apache.parquet.io.api.Binary
import org.apache.spark.SparkFunSuite
import org.apache.spark.internal.Logging
import org.apache.spark.memory.MemoryMode
import org.apache.spark.sql.oap.adapter.ColumnVectorAdapter
import org.apache.spark.sql.types.BinaryType
class SkipAndReadValueWithPackedDefinitionLevelsSuite extends SparkFunSuite with Logging {
test("read and skip Integers") {
// prepare data: [null, null, 1, null, null, null, 2, null, 3, 4]
// value: [1, 2, 3, 4]
val valueWriter = new PlainValuesWriter(
64 * 1024, 64 * 1024, HeapByteBufferAllocator.getInstance())
(1 until 5).foreach(valueWriter.writeInteger)
// init value reader
val valueReader = new SkippableVectorizedPlainValuesReader()
val valueData = valueWriter.getBytes.toByteArray
valueReader.initFromPage(4, BytesInput.from(valueData).toInputStream)
// skip data assisted by defReader
val reader = defReader
reader.skipIntegers(5, 1, valueReader)
reader.skipIntegers(3, 1, valueReader)
reader.skipIntegers(0, 1, valueReader)
// assert read value
assert(valueReader.readInteger() == 3)
}
test("read and skip Booleans") {
// prepare data: [null, null, true, null, null, null, false, null, false, true]
// value: [true, false, false, true]
val valueWriter = new BooleanPlainValuesWriter()
valueWriter.writeBoolean(true)
valueWriter.writeBoolean(false)
valueWriter.writeBoolean(false)
valueWriter.writeBoolean(true)
// init value reader
val valueReader = new SkippableVectorizedPlainValuesReader()
val valueData = valueWriter.getBytes.toByteArray
valueReader.initFromPage(4, BytesInput.from(valueData).toInputStream)
// skip data assisted by defReader
val reader = defReader
reader.skipBooleans(5, 1, valueReader)
reader.skipBooleans(3, 1, valueReader)
reader.skipBooleans(0, 1, valueReader)
// assert read value
assert(!valueReader.readBoolean())
}
test("read and skip skipBytes") {
// prepare data: [null, null, A, null, null, null, B, null, C, D]
// value: value: [A, B, C, D]
val valueWriter = new PlainValuesWriter(
64 * 1024, 64 * 1024, HeapByteBufferAllocator.getInstance())
"ABCD".getBytes.foreach { v =>
valueWriter.writeByte(v)
valueWriter.writeByte(0)
valueWriter.writeByte(0)
valueWriter.writeByte(0)
}
// init value reader
val valueReader = new SkippableVectorizedPlainValuesReader()
val valueData = valueWriter.getBytes.toByteArray
valueReader.initFromPage(4, BytesInput.from(valueData).toInputStream)
// skip data assisted by defReader
val reader = defReader
reader.skipBytes(5, 1, valueReader)
reader.skipBytes(3, 1, valueReader)
reader.skipBytes(0, 1, valueReader)
// assert read value
assert(valueReader.readByte() == 'C'.toInt)
}
test("read and skip Shorts") {
// actually short store as int
// prepare data: [null, null, 1, null, null, null, 2, null, 3, 4]
// value: [1, 2, 3, 4]
val valueWriter = new PlainValuesWriter(
64 * 1024, 64 * 1024, HeapByteBufferAllocator.getInstance())
(1 until 5).foreach(valueWriter.writeInteger)
// init value reader
val valueReader = new SkippableVectorizedPlainValuesReader()
val valueData = valueWriter.getBytes.toByteArray
valueReader.initFromPage(4, BytesInput.from(valueData).toInputStream)
// skip data assisted by defReader
val reader = defReader
reader.skipShorts(5, 1, valueReader)
reader.skipShorts(3, 1, valueReader)
reader.skipShorts(0, 1, valueReader)
// assert read value
assert(valueReader.readInteger().toShort == 3.toShort)
}
test("read and skip Longs") {
// prepare data: [null, null, 1L, null, null, null, 2L, null, 3L, 4L]
// value: [1L, 2L, 3L, 4L]
val valueWriter = new PlainValuesWriter(
64 * 1024, 64 * 1024, HeapByteBufferAllocator.getInstance())
(1 until 5).foreach(v => valueWriter.writeLong(v.toLong))
// init value reader
val valueReader = new SkippableVectorizedPlainValuesReader()
val valueData = valueWriter.getBytes.toByteArray
valueReader.initFromPage(4, BytesInput.from(valueData).toInputStream)
// skip data assisted by defReader
val reader = defReader
reader.skipLongs(5, 1, valueReader)
reader.skipLongs(3, 1, valueReader)
reader.skipLongs(0, 1, valueReader)
// assert read value
assert(valueReader.readLong() == 3L)
}
test("read and skip Floats") {
// prepare data: [null, null, 1.0F, null, null, null, 2.0F, null, 3.0F, 4.0F]
// value: [1.0F, 2.0F, 3.0F, 4.0F]
val valueWriter = new PlainValuesWriter(
64 * 1024, 64 * 1024, HeapByteBufferAllocator.getInstance())
(1 until 5).foreach(v => valueWriter.writeFloat(v.toFloat))
// init value reader
val valueReader = new SkippableVectorizedPlainValuesReader()
val valueData = valueWriter.getBytes.toByteArray
valueReader.initFromPage(4, BytesInput.from(valueData).toInputStream)
// skip data assisted by defReader
val reader = defReader
reader.skipFloats(5, 1, valueReader)
reader.skipFloats(3, 1, valueReader)
reader.skipFloats(0, 1, valueReader)
// assert read value
assert(valueReader.readFloat() == 3.0F)
}
test("read and skip Doubles") {
// prepare data: [null, null, 1.0D, null, null, null, 2.0D, null, 3.0D, 4.0D]
// value: [1.0D, 2.0D, 3.0D, 4.0D]
val valueWriter = new PlainValuesWriter(
64 * 1024, 64 * 1024, HeapByteBufferAllocator.getInstance())
(1 until 5).foreach(v => valueWriter.writeDouble(v.toDouble))
// init value reader
val valueReader = new SkippableVectorizedPlainValuesReader()
val valueData = valueWriter.getBytes.toByteArray
valueReader.initFromPage(4, BytesInput.from(valueData).toInputStream)
// skip data assisted by defReader
val reader = defReader
reader.skipDoubles(5, 1, valueReader)
reader.skipDoubles(3, 1, valueReader)
reader.skipDoubles(0, 1, valueReader)
// assert read value
assert(valueReader.readDouble() == 3.0D)
}
test("read and skip Binarys") {
// prepare data: [null, null, AB, null, null, null, CDE, null, F, GHI]
// value: [AB, CDE, F, GHI]
val valueWriter = new PlainValuesWriter(
64 * 1024, 64 * 1024, HeapByteBufferAllocator.getInstance())
valueWriter.writeBytes(Binary.fromString("AB"))
valueWriter.writeBytes(Binary.fromString("CDE"))
valueWriter.writeBytes(Binary.fromString("F"))
valueWriter.writeBytes(Binary.fromString("GHI"))
// init value reader
val valueReader = new SkippableVectorizedPlainValuesReader()
val valueData = valueWriter.getBytes.toByteArray
valueReader.initFromPage(4, BytesInput.from(valueData).toInputStream)
// skip data assisted by defReader
val reader = defReader
reader.skipBinarys(5, 1, valueReader)
reader.skipBinarys(3, 1, valueReader)
reader.skipBinarys(0, 1, valueReader)
// read binary to a vector and assert read value
val vector = ColumnVectorAdapter.allocate(10, BinaryType, MemoryMode.ON_HEAP)
valueReader.readBinary(1, vector, 0)
assert(vector.getBinary(0).sameElements("F".getBytes))
}
/**
* For ut build a unified SkippableVectorizedRleValuesReader with data
* [0, 0, 1, 0, 0, 0, 1, 0, 1, 1], reader will use packed mode
* @return SkippableVectorizedRleValuesReader represent definition level values
*/
private def defReader: SkippableVectorizedRleValuesReader = {
val defWriter = new RunLengthBitPackingHybridValuesWriter(
3, 5, 10, HeapByteBufferAllocator.getInstance())
Array(0, 0, 1, 0, 0, 0, 1, 0, 1, 1).foreach(defWriter.writeInteger)
val defData = defWriter.getBytes.toByteArray
val defReader = new SkippableVectorizedRleValuesReader(3)
defReader.initFromPage(10, BytesInput.from(defData).toInputStream)
defReader
}
}
|
Intel-bigdata/OAP
|
oap-cache/oap/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/SkipAndReadValueWithPackedDefinitionLevelsSuite.scala
|
Scala
|
apache-2.0
| 9,043
|
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.physical.rdbms.planner
import slamdata.Predef._
import quasar.fp.ski._
import quasar.Planner._
import quasar.contrib.pathy.{ADir, AFile}
import quasar.qscript._
import quasar.NameGenerator
import quasar.physical.rdbms.planner.sql.SqlExpr
import matryoshka._
import scalaz._
trait Planner[T[_[_]], F[_], QS[_]] extends Serializable {
def plan: AlgebraM[F, QS, T[SqlExpr]]
}
object Planner {
def apply[T[_[_]], F[_], QS[_]](
implicit P: Planner[T, F, QS]): Planner[T, F, QS] = P
implicit def constDeadEndPlanner[T[_[_]], F[_]: PlannerErrorME]
: Planner[T, F, Const[DeadEnd, ?]] =
unreachable("deadEnd")
implicit def constReadPlanner[T[_[_]], F[_]: PlannerErrorME, A]
: Planner[T, F, Const[Read[A], ?]] =
unreachable("read")
implicit def constShiftedReadDirPlanner[T[_[_]], F[_]: PlannerErrorME]
: Planner[T, F, Const[ShiftedRead[ADir], ?]] =
unreachable("shifted read of a dir")
implicit def constShiftedReadFilePlanner[
T[_[_]]: CorecursiveT,
F[_]: Applicative: NameGenerator]
: Planner[T, F, Const[ShiftedRead[AFile], ?]] =
new ShiftedReadPlanner[T, F]
implicit def projectBucketPlanner[T[_[_]]: RecursiveT: ShowT, F[_]: PlannerErrorME]
: Planner[T, F, ProjectBucket[T, ?]] =
unreachable("projectBucket")
implicit def thetaJoinPlanner[T[_[_]]: RecursiveT: ShowT, F[_]: PlannerErrorME]
: Planner[T, F, ThetaJoin[T, ?]] = unreachable("thetajoin")
def mapFuncPlanner[T[_[_]]: BirecursiveT: ShowT, F[_]: Applicative: Monad: NameGenerator: PlannerErrorME]
: Planner[T, F, MapFunc[T, ?]] = {
val core = new MapFuncCorePlanner[T, F]
val derived = new MapFuncDerivedPlanner(core)
coproduct(core, derived)
}
def reduceFuncPlanner[T[_[_]] : BirecursiveT, F[_] : Applicative]
: Planner[T, F, ReduceFunc] =
new ReduceFuncPlanner[T, F]
implicit def qScriptCorePlanner[
T[_[_]]: BirecursiveT: ShowT: EqualT,
F[_]: Monad: NameGenerator: PlannerErrorME]
: Planner[T, F, QScriptCore[T, ?]] = new QScriptCorePlanner[T, F](mapFuncPlanner)
implicit def equiJoinPlanner[
T[_[_]]: BirecursiveT: ShowT: EqualT,
F[_]: Monad: NameGenerator: PlannerErrorME]
: Planner[T, F, EquiJoin[T, ?]] = new EquiJoinPlanner[T, F](mapFuncPlanner)
implicit def coproduct[T[_[_]], N[_], F[_], G[_]](
implicit F: Planner[T, N, F], G: Planner[T, N, G]
): Planner[T, N, Coproduct[F, G, ?]] =
new Planner[T, N, Coproduct[F, G, ?]] {
val plan: AlgebraM[N, Coproduct[F, G, ?], T[SqlExpr]] =
_.run.fold(F.plan, G.plan)
}
private def unreachable[T[_[_]], F[_]: PlannerErrorME, QS[_]](
what: String): Planner[T, F, QS] =
new Planner[T, F, QS] {
override def plan: AlgebraM[F, QS, T[SqlExpr]] =
κ(
PlannerErrorME[F].raiseError(
InternalError.fromMsg(s"unreachable $what")))
}
}
|
jedesah/Quasar
|
rdbms/src/main/scala/quasar/physical/rdbms/planner/Planner.scala
|
Scala
|
apache-2.0
| 3,556
|
/*
* Copyright 2015 Matt Massie
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// GENERATED SOURCE: DO NOT EDIT.
package com.github.massie.avrotuples
import java.io._
import java.util
import com.esotericsoftware.kryo.{Kryo, KryoSerializable}
import com.esotericsoftware.kryo.io.{Input, Output}
import org.apache.avro.Schema
import org.apache.avro.generic.GenericData
import org.apache.avro.io.{DecoderFactory, EncoderFactory}
import org.apache.avro.specific.{SpecificDatumReader, SpecificDatumWriter, SpecificRecord}
import org.apache.avro.util.Utf8
object AvroTuple22 {
val SCHEMA$ = AvroTupleSchemas.recursiveSchemas(21)
val reader = new SpecificDatumReader[AvroTuple22[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _]](SCHEMA$)
val writer = new SpecificDatumWriter[AvroTuple22[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _]](SCHEMA$)
def readFromInputStream(tuple: AvroTuple22[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], in: InputStream) = {
AvroTuple22.reader.read(tuple, DecoderFactory.get.directBinaryDecoder(in, null))
}
def writeToOutputStream(tuple: AvroTuple22[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], out: OutputStream) = {
AvroTuple22.writer.write(tuple, EncoderFactory.get.directBinaryEncoder(out, null))
}
def fromInputStream(in: InputStream) : AvroTuple22[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _] = {
readFromInputStream(null.asInstanceOf[AvroTuple22[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _]], in)
}
def fromBytes(bytes: Array[Byte]): AvroTuple22[_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _] = {
val in = new ByteArrayInputStream(bytes)
val tuple = fromInputStream(in)
in.close()
tuple
}
}
final case class AvroTuple22[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22](
@transient var _1: T1,
@transient var _2: T2,
@transient var _3: T3,
@transient var _4: T4,
@transient var _5: T5,
@transient var _6: T6,
@transient var _7: T7,
@transient var _8: T8,
@transient var _9: T9,
@transient var _10: T10,
@transient var _11: T11,
@transient var _12: T12,
@transient var _13: T13,
@transient var _14: T14,
@transient var _15: T15,
@transient var _16: T16,
@transient var _17: T17,
@transient var _18: T18,
@transient var _19: T19,
@transient var _20: T20,
@transient var _21: T21,
@transient var _22: T22)
extends Product22[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22] with SpecificRecord with KryoSerializable with Externalizable {
def this() = this(null.asInstanceOf[T1],
null.asInstanceOf[T2],
null.asInstanceOf[T3],
null.asInstanceOf[T4],
null.asInstanceOf[T5],
null.asInstanceOf[T6],
null.asInstanceOf[T7],
null.asInstanceOf[T8],
null.asInstanceOf[T9],
null.asInstanceOf[T10],
null.asInstanceOf[T11],
null.asInstanceOf[T12],
null.asInstanceOf[T13],
null.asInstanceOf[T14],
null.asInstanceOf[T15],
null.asInstanceOf[T16],
null.asInstanceOf[T17],
null.asInstanceOf[T18],
null.asInstanceOf[T19],
null.asInstanceOf[T20],
null.asInstanceOf[T21],
null.asInstanceOf[T22])
def update(n1: T1, n2: T2, n3: T3, n4: T4, n5: T5, n6: T6, n7: T7, n8: T8, n9: T9, n10: T10, n11: T11, n12: T12, n13: T13, n14: T14, n15: T15, n16: T16, n17: T17, n18: T18, n19: T19, n20: T20, n21: T21, n22: T22): AvroTuple22[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22] = {
_1 = n1
_2 = n2
_3 = n3
_4 = n4
_5 = n5
_6 = n6
_7 = n7
_8 = n8
_9 = n9
_10 = n10
_11 = n11
_12 = n12
_13 = n13
_14 = n14
_15 = n15
_16 = n16
_17 = n17
_18 = n18
_19 = n19
_20 = n20
_21 = n21
_22 = n22
this
}
@throws(classOf[IndexOutOfBoundsException])
override def get(i: Int): AnyRef = i match {
case 0 => val values = new util.ArrayList[AnyRef](productArity)
values.add(0, _1.asInstanceOf[AnyRef])
values.add(1, _2.asInstanceOf[AnyRef])
values.add(2, _3.asInstanceOf[AnyRef])
values.add(3, _4.asInstanceOf[AnyRef])
values.add(4, _5.asInstanceOf[AnyRef])
values.add(5, _6.asInstanceOf[AnyRef])
values.add(6, _7.asInstanceOf[AnyRef])
values.add(7, _8.asInstanceOf[AnyRef])
values.add(8, _9.asInstanceOf[AnyRef])
values.add(9, _10.asInstanceOf[AnyRef])
values.add(10, _11.asInstanceOf[AnyRef])
values.add(11, _12.asInstanceOf[AnyRef])
values.add(12, _13.asInstanceOf[AnyRef])
values.add(13, _14.asInstanceOf[AnyRef])
values.add(14, _15.asInstanceOf[AnyRef])
values.add(15, _16.asInstanceOf[AnyRef])
values.add(16, _17.asInstanceOf[AnyRef])
values.add(17, _18.asInstanceOf[AnyRef])
values.add(18, _19.asInstanceOf[AnyRef])
values.add(19, _20.asInstanceOf[AnyRef])
values.add(20, _21.asInstanceOf[AnyRef])
values.add(21, _22.asInstanceOf[AnyRef])
values.asInstanceOf[AnyRef]
case _ => throw new IndexOutOfBoundsException(i.toString)
}
private def utf8string(obj: Any) = obj match {
case u: Utf8 => u.toString
case _ => obj
}
@throws(classOf[IndexOutOfBoundsException])
override def put(i: Int, v: scala.Any): Unit = i match {
case 0 =>
val array = v match {
case avroArray: GenericData.Array[_]=> avroArray
case javaArray: util.ArrayList[_]=> javaArray
}
assert(array.size == productArity,
s"Tried to put ${array.size} values into AvroTuple with productArity of $productArity")
_1 = utf8string(array.get(0)).asInstanceOf[T1]
_2 = utf8string(array.get(1)).asInstanceOf[T2]
_3 = utf8string(array.get(2)).asInstanceOf[T3]
_4 = utf8string(array.get(3)).asInstanceOf[T4]
_5 = utf8string(array.get(4)).asInstanceOf[T5]
_6 = utf8string(array.get(5)).asInstanceOf[T6]
_7 = utf8string(array.get(6)).asInstanceOf[T7]
_8 = utf8string(array.get(7)).asInstanceOf[T8]
_9 = utf8string(array.get(8)).asInstanceOf[T9]
_10 = utf8string(array.get(9)).asInstanceOf[T10]
_11 = utf8string(array.get(10)).asInstanceOf[T11]
_12 = utf8string(array.get(11)).asInstanceOf[T12]
_13 = utf8string(array.get(12)).asInstanceOf[T13]
_14 = utf8string(array.get(13)).asInstanceOf[T14]
_15 = utf8string(array.get(14)).asInstanceOf[T15]
_16 = utf8string(array.get(15)).asInstanceOf[T16]
_17 = utf8string(array.get(16)).asInstanceOf[T17]
_18 = utf8string(array.get(17)).asInstanceOf[T18]
_19 = utf8string(array.get(18)).asInstanceOf[T19]
_20 = utf8string(array.get(19)).asInstanceOf[T20]
_21 = utf8string(array.get(20)).asInstanceOf[T21]
_22 = utf8string(array.get(21)).asInstanceOf[T22]
case _ => throw new IndexOutOfBoundsException(i.toString)
}
override def getSchema: Schema = AvroTuple22.SCHEMA$
override def toString: String = "(" + _1 + "," + _2 + "," + _3 + "," + _4 + "," + _5 + "," + _6 + "," + _7 + "," + _8 + "," + _9 + "," + _10 + "," + _11 + "," + _12 + "," + _13 + "," + _14 + "," + _15 + "," + _16 + "," + _17 + "," + _18 + "," + _19 + "," + _20 + "," + _21 + "," + _22 + ")"
def toBytes: Array[Byte] = {
val byteStream = new ByteArrayOutputStream()
AvroTuple22.writeToOutputStream(this, byteStream)
byteStream.flush()
val bytes = byteStream.toByteArray
byteStream.close()
bytes
}
override def readExternal(in: ObjectInput): Unit = {
AvroTuple22.readFromInputStream(this, ExternalizableInput(in))
}
override def writeExternal(out: ObjectOutput): Unit = {
AvroTuple22.writeToOutputStream(this, ExternalizableOutput(out))
}
override def write(kryo: Kryo, output: Output): Unit = {
AvroTuple22.writeToOutputStream(this, output.getOutputStream)
}
override def read(kryo: Kryo, input: Input): Unit = {
AvroTuple22.readFromInputStream(this, input.getInputStream)
}
}
|
massie/avrotuples
|
src/main/scala/com/github/massie/avrotuples/AvroTuple22.scala
|
Scala
|
apache-2.0
| 9,046
|
package liang.don.dzviewer.log
import liang.don.dzviewer.config.ViewerProperties
import liang.don.dzviewer.config.ViewerProperties.{LogType, BuildTarget}
/**
* Logs information.
*
* @author Don Liang
* @Version 0.1, 15/09/2011
*/
object Logger {
protected val baseFolder = "log"
private val _logger: LoggerInterface = {
val buildTarget = ViewerProperties.buildTarget
val logType = ViewerProperties.logType
if (LogType.Console == logType) {
if (BuildTarget.Java == buildTarget) {
new LoggerInterface with java.ConsoleLogger
} else if (BuildTarget.Net == buildTarget) {
// TODO .Net impl
new LoggerInterface with net.ConsoleLogger
} else {
sys.error("[" + getClass.getName + "] Invalid buildTarget.")
}
} else if (LogType.File == logType) {
if (BuildTarget.Java == buildTarget) {
new LoggerInterface with java.FileLogger
} else if (BuildTarget.Net == buildTarget) {
// TODO .Net impl
new LoggerInterface with net.FileLogger
} else {
sys.error("[" + getClass.getName + "] Invalid buildTarget.")
}
} else {
// no logging.
new LoggerInterface with DummyLogger
}
}
def instance: LoggerInterface = _logger
}
|
dl2k84/DeepZoomViewer
|
src/liang/don/dzviewer/log/Logger.scala
|
Scala
|
mit
| 1,267
|
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2002-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala
/** A class for Product0 which was missing from the scala distribution. */
object Product0 {
def unapply(x: Product0): Option[Product0] =
Some(x)
}
trait Product0 extends Any with Product {
override def productArity = 0
@throws(classOf[IndexOutOfBoundsException])
override def productElement(n: Int) =
throw new IndexOutOfBoundsException(n.toString())
}
|
yusuke2255/dotty
|
src/scala/Product0.scala
|
Scala
|
bsd-3-clause
| 913
|
/**
* Created by cnavarro on 15/10/15.
*/
import org.apache.spark._
object PipeExample {
def main(args: Array[String]) {
val master = args.length match {
case x: Int if x > 0 => args(0)
case _ => "local"
}
val sc = new SparkContext(master, "PipeExample", System.getenv("SPARK_HOME"))
val rdd = sc.parallelize(Array(
"37.75889318222431,-122.42683635321838,37.7614213,-122.4240097",
"37.7519528,-122.4208689,37.8709087,-122.2688365"))
// adds our script to a list of files for each node to download with this job
val distScript = "./src/resources/finddistance.R"
val distScriptName = "finddistance.R"
sc.addFile(distScript)
val piped = rdd.pipe(SparkFiles.get(distScriptName))
val result = piped.collect
println("Result+++++++++++++:"+result.mkString(" ##### "))
}
}
|
canademar/me_extractors
|
spark_test/src/main/scala/PipeExample.scala
|
Scala
|
gpl-2.0
| 843
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.