code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package com.themillhousegroup.witchhunt.checks
import com.themillhousegroup.scoup.ScoupImplicits
import com.themillhousegroup.witchhunt.{ RuleEnumerator, UnusedSelectorViolation, Violation, ViolationType }
import org.jsoup.nodes.Document
import scala._
import scala.Some
import com.themillhousegroup.witchhunt.Violation
import com.helger.css.decl.CSSDeclaration
object UnusedSelectorCheck extends WitchhuntViolationCheck with ScoupImplicits {
// Return a violation if there is no element matching the selector in ANY of the supplied pages
def checkSelector(implicit ruleSet: RuleEnumerator, selector: String, lineNumber: Int, declarationsWithin: Seq[CSSDeclaration], applicablePages: Set[Document]): Option[Violation] = {
// As soon as we find an element that matches the selector, we can stop:
applicablePages.find { stylePage =>
stylePage.select(selector).nonEmpty
}.fold(
buildViolation(UnusedSelectorViolation)
)(_ => None)
}
}
| themillhousegroup/witchhunt | src/main/scala/com/themillhousegroup/witchhunt/checks/UnusedSelectorCheck.scala | Scala | mit | 969 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import java.util.UUID
import scala.collection.mutable
import scala.language.reflectiveCalls
import org.scalactic.TolerantNumerics
import org.scalatest.BeforeAndAfter
import org.scalatest.PrivateMethodTester._
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.concurrent.Waiters.Waiter
import org.apache.spark.SparkException
import org.apache.spark.scheduler._
import org.apache.spark.sql.{Encoder, SparkSession}
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources.v2.reader.streaming.{Offset => OffsetV2}
import org.apache.spark.sql.streaming.StreamingQueryListener._
import org.apache.spark.sql.streaming.util.StreamManualClock
import org.apache.spark.util.JsonProtocol
class StreamingQueryListenerSuite extends StreamTest with BeforeAndAfter {
import testImplicits._
// To make === between double tolerate inexact values
implicit val doubleEquality = TolerantNumerics.tolerantDoubleEquality(0.01)
after {
spark.streams.active.foreach(_.stop())
assert(spark.streams.active.isEmpty)
assert(addedListeners().isEmpty)
// Make sure we don't leak any events to the next test
spark.sparkContext.listenerBus.waitUntilEmpty(10000)
}
testQuietly("single listener, check trigger events are generated correctly") {
val clock = new StreamManualClock
val inputData = new MemoryStream[Int](0, sqlContext)
val df = inputData.toDS().as[Long].map { 10 / _ }
val listener = new EventCollector
case class AssertStreamExecThreadToWaitForClock()
extends AssertOnQuery(q => {
eventually(Timeout(streamingTimeout)) {
if (q.exception.isEmpty) {
assert(clock.asInstanceOf[StreamManualClock].isStreamWaitingAt(clock.getTimeMillis))
}
}
if (q.exception.isDefined) {
throw q.exception.get
}
true
}, "")
try {
// No events until started
spark.streams.addListener(listener)
assert(listener.startEvent === null)
assert(listener.progressEvents.isEmpty)
assert(listener.terminationEvent === null)
testStream(df, OutputMode.Append)(
// Start event generated when query started
StartStream(Trigger.ProcessingTime(100), triggerClock = clock),
AssertOnQuery { query =>
assert(listener.startEvent !== null)
assert(listener.startEvent.id === query.id)
assert(listener.startEvent.runId === query.runId)
assert(listener.startEvent.name === query.name)
assert(listener.progressEvents.isEmpty)
assert(listener.terminationEvent === null)
true
},
// Progress event generated when data processed
AddData(inputData, 1, 2),
AdvanceManualClock(100),
AssertStreamExecThreadToWaitForClock(),
CheckAnswer(10, 5),
AssertOnQuery { query =>
assert(listener.progressEvents.nonEmpty)
// SPARK-18868: We can't use query.lastProgress, because in progressEvents, we filter
// out non-zero input rows, but the lastProgress may be a zero input row trigger
val lastNonZeroProgress = query.recentProgress.filter(_.numInputRows > 0).lastOption
.getOrElse(fail("No progress updates received in StreamingQuery!"))
assert(listener.progressEvents.last.json === lastNonZeroProgress.json)
assert(listener.terminationEvent === null)
true
},
// Termination event generated when stopped cleanly
StopStream,
AssertOnQuery { query =>
eventually(Timeout(streamingTimeout)) {
assert(listener.terminationEvent !== null)
assert(listener.terminationEvent.id === query.id)
assert(listener.terminationEvent.runId === query.runId)
assert(listener.terminationEvent.exception === None)
}
listener.checkAsyncErrors()
listener.reset()
true
},
// Termination event generated with exception message when stopped with error
StartStream(Trigger.ProcessingTime(100), triggerClock = clock),
AssertStreamExecThreadToWaitForClock(),
AddData(inputData, 0),
AdvanceManualClock(100), // process bad data
ExpectFailure[SparkException](),
AssertOnQuery { query =>
eventually(Timeout(streamingTimeout)) {
assert(listener.terminationEvent !== null)
assert(listener.terminationEvent.id === query.id)
assert(listener.terminationEvent.exception.nonEmpty)
// Make sure that the exception message reported through listener
// contains the actual exception and relevant stack trace
assert(!listener.terminationEvent.exception.get.contains("StreamingQueryException"))
assert(
listener.terminationEvent.exception.get.contains("java.lang.ArithmeticException"))
assert(listener.terminationEvent.exception.get.contains("StreamingQueryListenerSuite"))
}
listener.checkAsyncErrors()
true
}
)
} finally {
spark.streams.removeListener(listener)
}
}
test("SPARK-19594: all of listeners should receive QueryTerminatedEvent") {
val df = MemoryStream[Int].toDS().as[Long]
val listeners = (1 to 5).map(_ => new EventCollector)
try {
listeners.foreach(listener => spark.streams.addListener(listener))
testStream(df, OutputMode.Append)(
StartStream(),
StopStream,
AssertOnQuery { query =>
eventually(Timeout(streamingTimeout)) {
listeners.foreach(listener => assert(listener.terminationEvent !== null))
listeners.foreach(listener => assert(listener.terminationEvent.id === query.id))
listeners.foreach(listener => assert(listener.terminationEvent.runId === query.runId))
listeners.foreach(listener => assert(listener.terminationEvent.exception === None))
}
listeners.foreach(listener => listener.checkAsyncErrors())
listeners.foreach(listener => listener.reset())
true
}
)
} finally {
listeners.foreach(spark.streams.removeListener)
}
}
test("continuous processing listeners should receive QueryTerminatedEvent") {
val df = spark.readStream.format("rate").load()
val listeners = (1 to 5).map(_ => new EventCollector)
try {
listeners.foreach(listener => spark.streams.addListener(listener))
testStream(df, OutputMode.Append, useV2Sink = true)(
StartStream(Trigger.Continuous(1000)),
StopStream,
AssertOnQuery { query =>
eventually(Timeout(streamingTimeout)) {
listeners.foreach(listener => assert(listener.terminationEvent !== null))
listeners.foreach(listener => assert(listener.terminationEvent.id === query.id))
listeners.foreach(listener => assert(listener.terminationEvent.runId === query.runId))
listeners.foreach(listener => assert(listener.terminationEvent.exception === None))
}
listeners.foreach(listener => listener.checkAsyncErrors())
listeners.foreach(listener => listener.reset())
true
}
)
} finally {
listeners.foreach(spark.streams.removeListener)
}
}
test("adding and removing listener") {
def isListenerActive(listener: EventCollector): Boolean = {
listener.reset()
testStream(MemoryStream[Int].toDS)(
StartStream(),
StopStream
)
listener.startEvent != null
}
try {
val listener1 = new EventCollector
val listener2 = new EventCollector
spark.streams.addListener(listener1)
assert(isListenerActive(listener1))
assert(isListenerActive(listener2) === false)
spark.streams.addListener(listener2)
assert(isListenerActive(listener1))
assert(isListenerActive(listener2))
spark.streams.removeListener(listener1)
assert(isListenerActive(listener1) === false)
assert(isListenerActive(listener2))
} finally {
addedListeners().foreach(spark.streams.removeListener)
}
}
test("event ordering") {
val listener = new EventCollector
withListenerAdded(listener) {
for (i <- 1 to 50) {
listener.reset()
require(listener.startEvent === null)
testStream(MemoryStream[Int].toDS)(
StartStream(),
Assert(listener.startEvent !== null, "onQueryStarted not called before query returned"),
StopStream,
Assert { listener.checkAsyncErrors() }
)
}
}
}
test("QueryStartedEvent serialization") {
def testSerialization(event: QueryStartedEvent): Unit = {
val json = JsonProtocol.sparkEventToJson(event)
val newEvent = JsonProtocol.sparkEventFromJson(json).asInstanceOf[QueryStartedEvent]
assert(newEvent.id === event.id)
assert(newEvent.runId === event.runId)
assert(newEvent.name === event.name)
}
testSerialization(new QueryStartedEvent(UUID.randomUUID, UUID.randomUUID, "name"))
testSerialization(new QueryStartedEvent(UUID.randomUUID, UUID.randomUUID, null))
}
test("QueryProgressEvent serialization") {
def testSerialization(event: QueryProgressEvent): Unit = {
import scala.collection.JavaConverters._
val json = JsonProtocol.sparkEventToJson(event)
val newEvent = JsonProtocol.sparkEventFromJson(json).asInstanceOf[QueryProgressEvent]
assert(newEvent.progress.json === event.progress.json) // json as a proxy for equality
assert(newEvent.progress.durationMs.asScala === event.progress.durationMs.asScala)
assert(newEvent.progress.eventTime.asScala === event.progress.eventTime.asScala)
}
testSerialization(new QueryProgressEvent(StreamingQueryStatusAndProgressSuite.testProgress1))
testSerialization(new QueryProgressEvent(StreamingQueryStatusAndProgressSuite.testProgress2))
}
test("QueryTerminatedEvent serialization") {
def testSerialization(event: QueryTerminatedEvent): Unit = {
val json = JsonProtocol.sparkEventToJson(event)
val newEvent = JsonProtocol.sparkEventFromJson(json).asInstanceOf[QueryTerminatedEvent]
assert(newEvent.id === event.id)
assert(newEvent.runId === event.runId)
assert(newEvent.exception === event.exception)
}
val exception = new RuntimeException("exception")
testSerialization(
new QueryTerminatedEvent(UUID.randomUUID, UUID.randomUUID, Some(exception.getMessage)))
}
test("only one progress event per interval when no data") {
// This test will start a query but not push any data, and then check if we push too many events
withSQLConf(SQLConf.STREAMING_NO_DATA_PROGRESS_EVENT_INTERVAL.key -> "100ms") {
@volatile var numProgressEvent = 0
val listener = new StreamingQueryListener {
override def onQueryStarted(event: QueryStartedEvent): Unit = {}
override def onQueryProgress(event: QueryProgressEvent): Unit = {
numProgressEvent += 1
}
override def onQueryTerminated(event: QueryTerminatedEvent): Unit = {}
}
spark.streams.addListener(listener)
try {
val input = new MemoryStream[Int](0, sqlContext) {
@volatile var numTriggers = 0
override def latestOffset(): OffsetV2 = {
numTriggers += 1
super.latestOffset()
}
}
val clock = new StreamManualClock()
val actions = mutable.ArrayBuffer[StreamAction]()
actions += StartStream(trigger = Trigger.ProcessingTime(10), triggerClock = clock)
for (_ <- 1 to 100) {
actions += AdvanceManualClock(10)
}
actions += AssertOnQuery { _ =>
eventually(timeout(streamingTimeout)) {
assert(input.numTriggers > 100) // at least 100 triggers have occurred
}
true
}
// `recentProgress` should not receive too many no data events
actions += AssertOnQuery { q =>
q.recentProgress.size > 1 && q.recentProgress.size <= 11
}
testStream(input.toDS)(actions: _*)
spark.sparkContext.listenerBus.waitUntilEmpty(10000)
// 11 is the max value of the possible numbers of events.
assert(numProgressEvent > 1 && numProgressEvent <= 11)
} finally {
spark.streams.removeListener(listener)
}
}
}
test("listener only posts events from queries started in the related sessions") {
val session1 = spark.newSession()
val session2 = spark.newSession()
val collector1 = new EventCollector
val collector2 = new EventCollector
def runQuery(session: SparkSession): Unit = {
collector1.reset()
collector2.reset()
val mem = MemoryStream[Int](implicitly[Encoder[Int]], session.sqlContext)
testStream(mem.toDS)(
AddData(mem, 1, 2, 3),
CheckAnswer(1, 2, 3)
)
session.sparkContext.listenerBus.waitUntilEmpty(5000)
}
def assertEventsCollected(collector: EventCollector): Unit = {
assert(collector.startEvent !== null)
assert(collector.progressEvents.nonEmpty)
assert(collector.terminationEvent !== null)
}
def assertEventsNotCollected(collector: EventCollector): Unit = {
assert(collector.startEvent === null)
assert(collector.progressEvents.isEmpty)
assert(collector.terminationEvent === null)
}
assert(session1.ne(session2))
assert(session1.streams.ne(session2.streams))
withListenerAdded(collector1, session1) {
assert(addedListeners(session1).nonEmpty)
withListenerAdded(collector2, session2) {
assert(addedListeners(session2).nonEmpty)
// query on session1 should send events only to collector1
runQuery(session1)
assertEventsCollected(collector1)
assertEventsNotCollected(collector2)
// query on session2 should send events only to collector2
runQuery(session2)
assertEventsCollected(collector2)
assertEventsNotCollected(collector1)
}
}
}
testQuietly("ReplayListenerBus should ignore broken event jsons generated in 2.0.0") {
// query-event-logs-version-2.0.0.txt has all types of events generated by
// Structured Streaming in Spark 2.0.0.
// SparkListenerApplicationEnd is the only valid event and it's the last event. We use it
// to verify that we can skip broken jsons generated by Structured Streaming.
testReplayListenerBusWithBorkenEventJsons("query-event-logs-version-2.0.0.txt")
}
testQuietly("ReplayListenerBus should ignore broken event jsons generated in 2.0.1") {
// query-event-logs-version-2.0.1.txt has all types of events generated by
// Structured Streaming in Spark 2.0.1.
// SparkListenerApplicationEnd is the only valid event and it's the last event. We use it
// to verify that we can skip broken jsons generated by Structured Streaming.
testReplayListenerBusWithBorkenEventJsons("query-event-logs-version-2.0.1.txt")
}
testQuietly("ReplayListenerBus should ignore broken event jsons generated in 2.0.2") {
// query-event-logs-version-2.0.2.txt has all types of events generated by
// Structured Streaming in Spark 2.0.2.
// SparkListenerApplicationEnd is the only valid event and it's the last event. We use it
// to verify that we can skip broken jsons generated by Structured Streaming.
testReplayListenerBusWithBorkenEventJsons("query-event-logs-version-2.0.2.txt")
}
private def testReplayListenerBusWithBorkenEventJsons(fileName: String): Unit = {
val input = getClass.getResourceAsStream(s"/structured-streaming/$fileName")
val events = mutable.ArrayBuffer[SparkListenerEvent]()
try {
val replayer = new ReplayListenerBus() {
// Redirect all parsed events to `events`
override def doPostEvent(
listener: SparkListenerInterface,
event: SparkListenerEvent): Unit = {
events += event
}
}
// Add a dummy listener so that "doPostEvent" will be called.
replayer.addListener(new SparkListener {})
replayer.replay(input, fileName)
// SparkListenerApplicationEnd is the only valid event
assert(events.size === 1)
assert(events(0).isInstanceOf[SparkListenerApplicationEnd])
} finally {
input.close()
}
}
private def withListenerAdded(
listener: StreamingQueryListener,
session: SparkSession = spark)(body: => Unit): Unit = {
try {
failAfter(streamingTimeout) {
session.streams.addListener(listener)
body
}
} finally {
session.streams.removeListener(listener)
}
}
private def addedListeners(session: SparkSession = spark): Array[StreamingQueryListener] = {
val listenerBusMethod =
PrivateMethod[StreamingQueryListenerBus]('listenerBus)
val listenerBus = session.streams invokePrivate listenerBusMethod()
listenerBus.listeners.toArray.map(_.asInstanceOf[StreamingQueryListener])
}
/** Collects events from the StreamingQueryListener for testing */
class EventCollector extends StreamingQueryListener {
// to catch errors in the async listener events
@volatile private var asyncTestWaiter = new Waiter
@volatile var startEvent: QueryStartedEvent = null
@volatile var terminationEvent: QueryTerminatedEvent = null
private val _progressEvents = new mutable.Queue[StreamingQueryProgress]
def progressEvents: Seq[StreamingQueryProgress] = _progressEvents.synchronized {
_progressEvents.filter(_.numInputRows > 0)
}
def reset(): Unit = {
startEvent = null
terminationEvent = null
_progressEvents.clear()
asyncTestWaiter = new Waiter
}
def checkAsyncErrors(): Unit = {
asyncTestWaiter.await(timeout(streamingTimeout))
}
override def onQueryStarted(queryStarted: QueryStartedEvent): Unit = {
asyncTestWaiter {
startEvent = queryStarted
}
}
override def onQueryProgress(queryProgress: QueryProgressEvent): Unit = {
asyncTestWaiter {
assert(startEvent != null, "onQueryProgress called before onQueryStarted")
_progressEvents.synchronized { _progressEvents += queryProgress.progress }
}
}
override def onQueryTerminated(queryTerminated: QueryTerminatedEvent): Unit = {
asyncTestWaiter {
assert(startEvent != null, "onQueryTerminated called before onQueryStarted")
terminationEvent = queryTerminated
}
asyncTestWaiter.dismiss()
}
}
}
| yanboliang/spark | sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQueryListenerSuite.scala | Scala | apache-2.0 | 19,568 |
/* Copyright 2014 Nest Labs
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package nest.sparkle.loader.kafka
import nest.sparkle.util.RandomUtil.randomAlphaNum
import scala.concurrent.ExecutionContext
import nest.sparkle.loader.kafka.KafkaEncoders.Implicits._
import nest.sparkle.loader.kafka.KafkaDecoders.Implicits._
import com.typesafe.config.ConfigFactory
import com.typesafe.config.Config
import nest.sparkle.util.Log
/** Create a kafka reader/writer pair for a kafka test.
* Note topics are not cleaned up: use withKafkaTestTopic instead if possible. */
private class KafkaTestTopic(rootConfig:Config, id:String = randomAlphaNum(3)) extends Log {
val topic = s"testTopic-$id"
val clientGroup = s"testClient-$id"
val writer = KafkaWriter[String](topic, rootConfig)
val reader = KafkaReader[String](topic, rootConfig, clientGroup = Some(clientGroup))
log.debug("created test topic {}", topic)
def close() {
writer.close()
reader.close()
log.debug(s"close test topic {}", topic)
}
}
/** run a test with a kafka reader/writer pair, cleaning up afterwards */
object KafkaTestTopic {
/** run a test with a kafka reader/writer pair, cleaning up afterwards */
def withKafkaTestTopic[T](rootConfig:Config, id:String = randomAlphaNum(3))(fn:KafkaTestTopic => T):T = {
var topic:KafkaTestTopic = null
try {
topic = new KafkaTestTopic(rootConfig, id)
fn(topic)
} finally {
topic.close()
}
}
}
| mighdoll/sparkle | kafka/src/it/scala/nest/sparkle/loader/kafka/KafkaTestTopic.scala | Scala | apache-2.0 | 1,984 |
package com.zobot
import akka.util.ByteString
import com.zobot.client.ZobotClient
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Failure, Success}
object Zobot extends App {
val client = new ZobotClient("192.168.99.100", 32770)
client login("foo", "pass") andThen {
case Success(a: ByteString) => println("completed once!", a decodeString ByteString.UTF_8)
case Failure(e) => println("error", e)
}
}
| BecauseNoReason/zobot | src/main/scala/com/zobot/Zobot.scala | Scala | mit | 468 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.utils.stats
import java.lang.{Double => jDouble, Long => jLong}
import java.util.Date
import com.vividsolutions.jts.geom.Geometry
import org.geotools.feature.simple.SimpleFeatureBuilder
import org.junit.runner.RunWith
import org.locationtech.geomesa.utils.geotools.{GeoToolsDateFormat, SimpleFeatureTypes}
import org.locationtech.geomesa.utils.text.WKTUtils
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class TopKTest extends Specification {
val sft = SimpleFeatureTypes.createType("topk", "name:String,score:Long,height:Double,dtg:Date,*geom:Point:srid=4326")
val builder = new SimpleFeatureBuilder(sft)
val features1 = (0 until 100).map { i =>
if (i < 10) {
builder.addAll(Array[AnyRef]("name10", "10", "10.0", "2010-01-01T00:00:00.000Z", "POINT(10 0)"))
} else if (i < 15) {
builder.addAll(Array[AnyRef]("name15", "15", "15.0", "2015-01-01T00:00:00.000Z", "POINT(15 0)"))
} else if (i < 30) {
builder.addAll(Array[AnyRef]("name30", "30", "30.0", "2030-01-01T00:00:00.000Z", "POINT(30 0)"))
} else if (i < 50) {
builder.addAll(Array[AnyRef]("name50", "50", "50.0", "2050-01-01T00:00:00.000Z", "POINT(50 0)"))
} else {
builder.addAll(Array[AnyRef]("name100", "100", "100.0", "2100-01-01T00:00:00.000Z", "POINT(100 0)"))
}
builder.buildFeature(i.toString)
}
val features2 = (0 until 100).map { i =>
if (i < 10) {
builder.addAll(Array[AnyRef]("name10-2", "210", "10.2", "2010-01-01T02:00:00.000Z", "POINT(10 2)"))
} else if (i < 15) {
builder.addAll(Array[AnyRef]("name15-2", "215", "15.2", "2015-01-01T02:00:00.000Z", "POINT(15 2)"))
} else if (i < 30) {
builder.addAll(Array[AnyRef]("name30-2", "230", "30.2", "2030-01-01T02:00:00.000Z", "POINT(30 2)"))
} else if (i < 50) {
builder.addAll(Array[AnyRef]("name50-2", "250", "50.2", "2050-01-01T02:00:00.000Z", "POINT(50 2)"))
} else {
builder.addAll(Array[AnyRef]("name100-2", "2100", "100.2", "2100-01-01T02:00:00.000Z", "POINT(100 2)"))
}
builder.buildFeature(i.toString)
}
def createStat[T](attribute: String): TopK[T] = Stat(sft, s"TopK($attribute)").asInstanceOf[TopK[T]]
def stringStat = createStat[String]("name")
def longStat = createStat[jLong]("score")
def doubleStat = createStat[jDouble]("height")
def dateStat = createStat[Date]("dtg")
def geomStat = createStat[Geometry]("geom")
"TopK stat" should {
"work with strings" >> {
"be empty initially" >> {
val stat = stringStat
stat.isEmpty must beTrue
stat.topK(10) must beEmpty
}
"correctly calculate values" >> {
val stat = stringStat
features1.foreach(stat.observe)
stat.isEmpty must beFalse
stat.size mustEqual 5
stat.topK(10) mustEqual Seq(("name100", 50), ("name50", 20), ("name30", 15), ("name10", 10), ("name15", 5))
}
"serialize and deserialize" >> {
val stat = stringStat
features1.foreach(stat.observe)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[TopK[String]]
unpacked.asInstanceOf[TopK[String]].size mustEqual stat.size
unpacked.asInstanceOf[TopK[String]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[TopK[String]].toJson mustEqual stat.toJson
unpacked.isEquivalent(stat) must beTrue
}
"serialize and deserialize empty stats" >> {
val stat = stringStat
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[TopK[String]]
unpacked.asInstanceOf[TopK[String]].size mustEqual stat.size
unpacked.asInstanceOf[TopK[String]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[TopK[String]].toJson mustEqual stat.toJson
unpacked.isEquivalent(stat) must beTrue
}
"deserialize as immutable value" >> {
val stat = stringStat
features1.foreach(stat.observe)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed, immutable = true)
unpacked must beAnInstanceOf[TopK[String]]
unpacked.asInstanceOf[TopK[String]].size mustEqual stat.size
unpacked.asInstanceOf[TopK[String]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[TopK[String]].toJson mustEqual stat.toJson
unpacked.isEquivalent(stat) must beTrue
unpacked.clear must throwAn[Exception]
unpacked.+=(stat) must throwAn[Exception]
unpacked.observe(features1.head) must throwAn[Exception]
unpacked.unobserve(features1.head) must throwAn[Exception]
}
"combine two TopKs" >> {
val stat = stringStat
val stat2 = stringStat
features1.foreach(stat.observe)
features2.foreach(stat2.observe)
stat2.size mustEqual 5
stat2.topK(10) mustEqual Seq(("name100-2", 50), ("name50-2", 20), ("name30-2", 15), ("name10-2", 10), ("name15-2", 5))
stat += stat2
stat.size mustEqual 10
stat.topK(10) mustEqual Seq(("name100", 50), ("name100-2", 50), ("name50", 20), ("name50-2", 20),
("name30", 15), ("name30-2", 15), ("name10", 10), ("name10-2", 10), ("name15", 5), ("name15-2", 5))
stat2.size mustEqual 5
stat2.topK(10) mustEqual Seq(("name100-2", 50), ("name50-2", 20), ("name30-2", 15), ("name10-2", 10), ("name15-2", 5))
}
"clear" >> {
val stat = stringStat
features1.foreach(stat.observe)
stat.clear()
stat.isEmpty must beTrue
stat.size mustEqual 0
stat.topK(10) must beEmpty
}
}
"work with longs" >> {
"correctly calculate values" >> {
val stat = longStat
features1.foreach(stat.observe)
stat.isEmpty must beFalse
stat.size mustEqual 5
stat.topK(10) mustEqual Seq((100L, 50), (50L, 20), (30L, 15), (10L, 10), (15L, 5))
}
"serialize and deserialize" >> {
val stat = longStat
features1.foreach(stat.observe)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[TopK[String]]
unpacked.asInstanceOf[TopK[String]].size mustEqual stat.size
unpacked.asInstanceOf[TopK[String]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[TopK[String]].toJson mustEqual stat.toJson
unpacked.isEquivalent(stat) must beTrue
}
}
"work with doubles" >> {
"correctly calculate values" >> {
val stat = doubleStat
features1.foreach(stat.observe)
stat.isEmpty must beFalse
stat.size mustEqual 5
stat.topK(10) mustEqual Seq((100.0, 50), (50.0, 20), (30.0, 15), (10.0, 10), (15.0, 5))
}
"serialize and deserialize" >> {
val stat = doubleStat
features1.foreach(stat.observe)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[TopK[String]]
unpacked.asInstanceOf[TopK[String]].size mustEqual stat.size
unpacked.asInstanceOf[TopK[String]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[TopK[String]].toJson mustEqual stat.toJson
unpacked.isEquivalent(stat) must beTrue
}
}
"work with dates" >> {
def toDate(year: Int) = java.util.Date.from(java.time.LocalDateTime.parse(f"2$year%03d-01-01T00:00:00.000Z", GeoToolsDateFormat).toInstant(java.time.ZoneOffset.UTC))
"correctly calculate values" >> {
val stat = dateStat
features1.foreach(stat.observe)
stat.isEmpty must beFalse
stat.size mustEqual 5
stat.topK(10) mustEqual Seq((toDate(100), 50), (toDate(50), 20), (toDate(30), 15), (toDate(10), 10), (toDate(15), 5))
}
"serialize and deserialize" >> {
val stat = dateStat
features1.foreach(stat.observe)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[TopK[String]]
unpacked.asInstanceOf[TopK[String]].size mustEqual stat.size
unpacked.asInstanceOf[TopK[String]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[TopK[String]].toJson mustEqual stat.toJson
unpacked.isEquivalent(stat) must beTrue
}
}
"work with geometries" >> {
def toGeom(lon: Int) = WKTUtils.read(s"POINT($lon 0)")
"correctly calculate values" >> {
val stat = geomStat
features1.foreach(stat.observe)
stat.isEmpty must beFalse
stat.size mustEqual 5
stat.topK(10) mustEqual Seq((toGeom(100), 50), (toGeom(50), 20), (toGeom(30), 15), (toGeom(10), 10), (toGeom(15), 5))
}
"serialize and deserialize" >> {
val stat = geomStat
features1.foreach(stat.observe)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[TopK[String]]
unpacked.asInstanceOf[TopK[String]].size mustEqual stat.size
unpacked.asInstanceOf[TopK[String]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[TopK[String]].toJson mustEqual stat.toJson
unpacked.isEquivalent(stat) must beTrue
}
}
}
}
| MutahirKazmi/geomesa | geomesa-utils/src/test/scala/org/locationtech/geomesa/utils/stats/TopKTest.scala | Scala | apache-2.0 | 10,141 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.http.cache.client
import play.api.libs.json.{JsPath, JsValue, Json, JsonValidationError}
class KeyStoreEntryValidationException(
val key: String,
val invalidJson: JsValue,
val readingAs: Class[_],
val errors: Seq[(JsPath, Seq[JsonValidationError])])
extends Exception {
override def getMessage: String =
s"KeyStore entry for key '$key' was '${Json.stringify(invalidJson)}'. Attempt to convert to ${readingAs.getName} gave errors: $errors"
} | hmrc/http-caching-client | src/main/scala/uk/gov/hmrc/http/cache/client/KeyStoreEntryValidationException.scala | Scala | apache-2.0 | 1,085 |
package org.jetbrains.sbt.settings
import com.intellij.openapi.externalSystem.service.settings.AbstractExternalSystemConfigurable
import com.intellij.openapi.project.Project
import org.jetbrains.sbt.project.SbtProjectSystem
import org.jetbrains.sbt.project.settings.Context.Configuration
import org.jetbrains.sbt.project.settings._
/**
* User: Dmitry Naydanov
* Date: 11/25/13
*/
class SbtExternalSystemConfigurable(project: Project)
extends AbstractExternalSystemConfigurable[SbtProjectSettings, SbtProjectSettingsListener, SbtSettings](project, SbtProjectSystem.Id) {
override def createProjectSettingsControl(settings: SbtProjectSettings): SbtProjectSettingsControl = new SbtProjectSettingsControl(Configuration, settings)
override def createSystemSettingsControl(settings: SbtSettings): SbtSettingsControl = new SbtSettingsControl(settings)
override def newProjectSettings(): SbtProjectSettings = new SbtProjectSettings()
override def getId: String = "sbt.project.settings.configurable"
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/sbt/settings/SbtExternalSystemConfigurable.scala | Scala | apache-2.0 | 1,013 |
package com.sksamuel.elastic4s
package admin
import org.elasticsearch.action.admin.cluster.repositories.put.{PutRepositoryRequest, PutRepositoryResponse}
import org.elasticsearch.action.admin.cluster.snapshots.create.{CreateSnapshotRequestBuilder, CreateSnapshotResponse}
import org.elasticsearch.action.admin.cluster.snapshots.delete.{DeleteSnapshotRequestBuilder, DeleteSnapshotResponse}
import org.elasticsearch.action.admin.cluster.snapshots.get.{GetSnapshotsRequestBuilder, GetSnapshotsResponse}
import org.elasticsearch.action.admin.cluster.snapshots.restore.{RestoreSnapshotRequestBuilder, RestoreSnapshotResponse}
import org.elasticsearch.action.support.IndicesOptions
import org.elasticsearch.client.Client
import scala.collection.JavaConverters._
import scala.concurrent.Future
/** @author Stephen Samuel
*
* DSL Syntax:
*
* repository create <repo> settings <settings>
* snapshot create <name> in <repo>
* snapshot delete <name> in <repo>
* snapshot restore <name> from <repo>
*
*/
trait SnapshotDsl {
class CreateRepositoryExpectsType(name: String) {
def `type`(`type`: String) = new CreateRepositoryDefinition(name, `type`)
}
class CreateSnapshotExpectsIn(name: String) {
def in(repo: String) = new CreateSnapshotDefinition(name, repo)
}
class GetSnapshotsExpectsFrom(snapshotNames: Seq[String]) {
def from(repo: String) = new GetSnapshotsDefinition(snapshotNames.toArray, repo)
}
class RestoreSnapshotExpectsFrom(name: String) {
def from(repo: String) = new RestoreSnapshotDefinition(name, repo)
}
class DeleteSnapshotExpectsIn(name: String) {
def in(repo: String) = new DeleteSnapshotDefinition(name, repo)
}
implicit object DeleteSnapshotDefinitionExecutable
extends Executable[DeleteSnapshotDefinition, DeleteSnapshotResponse, DeleteSnapshotResponse] {
override def apply(c: Client, t: DeleteSnapshotDefinition): Future[DeleteSnapshotResponse] = {
injectFuture(c.admin.cluster.deleteSnapshot(t.build, _))
}
}
implicit object RestoreSnapshotDefinitionExecutable
extends Executable[RestoreSnapshotDefinition, RestoreSnapshotResponse, RestoreSnapshotResponse] {
override def apply(c: Client, t: RestoreSnapshotDefinition): Future[RestoreSnapshotResponse] = {
injectFuture(c.admin.cluster.restoreSnapshot(t.build, _))
}
}
implicit object CreateSnapshotDefinitionExecutable
extends Executable[CreateSnapshotDefinition, CreateSnapshotResponse, CreateSnapshotResponse] {
override def apply(c: Client, t: CreateSnapshotDefinition): Future[CreateSnapshotResponse] = {
injectFuture(c.admin.cluster.createSnapshot(t.build, _))
}
}
implicit object GetSnapshotsDefinitionExecutable
extends Executable[GetSnapshotsDefinition, GetSnapshotsResponse, GetSnapshotsResponse] {
override def apply(c: Client, t: GetSnapshotsDefinition): Future[GetSnapshotsResponse] = {
injectFuture(c.admin.cluster.getSnapshots(t.build, _))
}
}
implicit object CreateRepositoryDefinitionExecutable
extends Executable[CreateRepositoryDefinition, PutRepositoryResponse, PutRepositoryResponse] {
override def apply(c: Client, t: CreateRepositoryDefinition): Future[PutRepositoryResponse] = {
injectFuture(c.admin.cluster.putRepository(t.build, _))
}
}
}
class CreateRepositoryDefinition(name: String, `type`: String) {
protected val request = new PutRepositoryRequest(name).`type`(`type`)
def build = request
def settings(map: Map[String, AnyRef]): this.type = {
request.settings(map.asJava)
this
}
}
class DeleteSnapshotDefinition(name: String, repo: String) {
val request = new DeleteSnapshotRequestBuilder(ProxyClients.cluster, repo, name)
def build = request.request()
}
class GetSnapshotsDefinition(snapshotNames: Array[String], repo: String) {
val request = new GetSnapshotsRequestBuilder(ProxyClients.cluster, repo).setSnapshots(snapshotNames: _*)
def build = request.request()
}
class CreateSnapshotDefinition(name: String, repo: String) {
val request = new CreateSnapshotRequestBuilder(ProxyClients.cluster, repo, name)
def build = request.request()
def partial(p: Boolean): this.type = {
request.setPartial(p)
this
}
def setIndicesOptions(indicesOptions: IndicesOptions): this.type = {
request.setIndicesOptions(indicesOptions)
this
}
def includeGlobalState(global: Boolean): this.type = {
request.setIncludeGlobalState(global)
this
}
def waitForCompletion(waitForCompletion: Boolean): this.type = {
request.setWaitForCompletion(waitForCompletion)
this
}
def index(index: String): this.type = {
request.setIndices(index)
this
}
def indexes(indexes: String*): this.type = {
request.setIndices(indexes: _*)
this
}
def settings(map: Map[String, AnyRef]): this.type = {
request.setSettings(map.asJava)
this
}
}
case class RestoreSnapshotDefinition(name: String, repo: String) {
val request = new RestoreSnapshotRequestBuilder(ProxyClients.cluster, repo, name)
def build = request.request()
def restoreGlobalState(global: Boolean): this.type = {
request.setRestoreGlobalState(global)
this
}
def renamePattern(renamePattern: String): this.type = {
request.setRenamePattern(renamePattern)
this
}
def renameReplacement(renameReplacement: String): this.type = {
request.setRenameReplacement(renameReplacement)
this
}
def partial(partial: Boolean): this.type = {
request.setPartial(partial)
this
}
def includeAliases(includeAliases: Boolean): this.type = {
request.setIncludeAliases(includeAliases)
this
}
def ignoreIndexSettings(ignoreIndexSettings: String*): this.type = {
request.setIgnoreIndexSettings(ignoreIndexSettings: _*)
this
}
def waitForCompletion(waitForCompletion: Boolean): this.type = {
request.setWaitForCompletion(waitForCompletion)
this
}
def index(index: String): this.type = {
request.setIndices(index)
this
}
def indexes(indexes: String*): this.type = {
request.setIndices(indexes: _*)
this
}
def settings(map: Map[String, AnyRef]): this.type = {
import scala.collection.JavaConverters._
request.setSettings(map.asJava)
this
}
}
| alexander-svendsen/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/admin/SnapshotDsl.scala | Scala | apache-2.0 | 6,312 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js Test Suite **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.testsuite.javalib
import scala.scalajs.js
import org.scalajs.jasminetest.JasmineTest
import java.lang.Math
object MathTest extends JasmineTest {
describe("java.lang.Math") {
it("should respond to `cbrt`") {
expect(1 / Math.cbrt(-0.0) < 0).toBeTruthy
expect(Math.cbrt(27.0)).toEqual(3.0)
expect(Math.cbrt(1000000.0)).toEqual(100.0)
expect(Math.cbrt(1000000000.0)).toEqual(1000.0)
expect(Math.cbrt(-1.0E24)).toEqual(-100000000.0)
expect(Math.cbrt(-65890311319.0E24)).toEqual(-4039.0E8)
}
unless("rhino"). // js.Math.round() is buggy on Rhino
it("rint(Double)") {
import Math.rint
def isPosZero(x: Double): Boolean =
x == 0.0 && (1.0 / x) == Double.PositiveInfinity
def isNegZero(x: Double): Boolean =
x == 0.0 && (1.0 / x) == Double.NegativeInfinity
// Specials
expect(isPosZero(rint(+0.0))).toBeTruthy
expect(isNegZero(rint(-0.0))).toBeTruthy
expect(rint(Double.PositiveInfinity)).toBe(Double.PositiveInfinity)
expect(rint(Double.NegativeInfinity)).toBe(Double.NegativeInfinity)
expect(rint(Double.NaN).isNaN).toBeTruthy
// Positive values
expect(isPosZero(rint(0.1))).toBeTruthy
expect(isPosZero(rint(0.5))).toBeTruthy
expect(rint(0.5000000000000001)).toBe(1.0)
expect(rint(0.999)).toBe(1.0)
expect(rint(1.4999999999999998)).toBe(1.0)
expect(rint(1.5)).toBe(2.0)
expect(rint(2.0)).toBe(2.0)
expect(rint(2.1)).toBe(2.0)
expect(rint(2.5)).toBe(2.0)
expect(rint(Double.MaxValue)).toBe(Double.MaxValue)
expect(rint(4503599627370495.5)).toBe(4503599627370496.0) // MaxSafeInt / 2
// Negative values
expect(isNegZero(rint(-0.1))).toBeTruthy
expect(isNegZero(rint(-0.5))).toBeTruthy
expect(rint(-0.5000000000000001)).toBe(-1.0)
expect(rint(-0.999)).toBe(-1.0)
expect(rint(-1.4999999999999998)).toBe(-1.0)
expect(rint(-1.5)).toBe(-2.0)
expect(rint(-2.0)).toBe(-2.0)
expect(rint(-2.1)).toBe(-2.0)
expect(rint(-2.5)).toBe(-2.0)
expect(rint(Double.MinValue)).toBe(Double.MinValue)
expect(rint(-4503599627370495.5)).toBe(-4503599627370496.0) // -MaxSafeInt / 2
}
it("should respond to `log1p`") {
expect(Math.log1p(-2.0).isNaN).toBeTruthy
expect(Math.log1p(Double.NaN).isNaN).toBeTruthy
expect(Math.log1p(0.0)).toEqual(0.0)
}
it("should respond to `log10`") {
expect(Math.log10(-230.0).isNaN).toBeTruthy
expect(Math.log10(Double.NaN).isNaN).toBeTruthy
}
it("should respond to `signum` for Double") {
expect(Math.signum(234394.2198273)).toEqual(1.0)
expect(Math.signum(-124937498.58)).toEqual(-1.0)
expect(Math.signum(+0.0)).toEqual(0.0)
expect(1 / Math.signum(+0.0) > 0).toBeTruthy
expect(Math.signum(-0.0)).toEqual(-0.0)
expect(1 / Math.signum(-0.0) < 0).toBeTruthy
expect(Math.signum(Double.NaN).isNaN).toBeTruthy
}
it("should respond to `signum` for Float") {
expect(Math.signum(234394.2198273f)).toEqual(1.0f)
expect(Math.signum(-124937498.58f)).toEqual(-1.0f)
expect(Math.signum(+0.0f)).toEqual(0.0f)
expect(1 / Math.signum(+0.0f) > 0).toBeTruthy
expect(Math.signum(-0.0f)).toEqual(-0.0f)
expect(1 / Math.signum(-0.0f) < 0).toBeTruthy
expect(Math.signum(Float.NaN).isNaN).toBeTruthy
}
it("should respond to `nextUp` for Double") {
expect(Math.nextUp(Double.PositiveInfinity)).toEqual(Double.PositiveInfinity)
expect(Math.nextUp(Double.NegativeInfinity)).toEqual(-Double.MaxValue)
expect(Math.nextUp(Double.MaxValue)).toEqual(Double.PositiveInfinity)
expect(Math.nextUp(-Double.MaxValue)).toEqual(-1.7976931348623155e+308)
expect(Math.nextUp(-Double.MinValue)).toEqual(Double.PositiveInfinity)
expect(Math.nextUp(0.0)).toEqual(Double.MinValue)
expect(Math.nextUp(-0.0)).toEqual(Double.MinValue)
expect(Math.nextUp(9007199254740991.0)).toEqual(9007199254740992.0)
expect(Math.nextUp(9007199254740992.0)).toEqual(9007199254740994.0)
expect(Math.nextUp(1.0)).toEqual(1 + 2.2204460492503130808472633361816E-16)
}
it("should respond to `nextAfter` for Double") {
expect(Math.nextAfter(1.0, Double.NaN).isNaN).toBeTruthy
expect(Math.nextAfter(Double.NaN, 1.0).isNaN).toBeTruthy
expect(Math.nextAfter(0.0, 0.0)).toEqual(0.0)
expect(Math.nextAfter(0.0, -0.0)).toEqual(-0.0)
expect(Math.nextAfter(-0.0, 0.0)).toEqual(0.0)
expect(Math.nextAfter(-0.0, -0.0)).toEqual(-0.0)
expect(Math.nextAfter(Double.MinValue, Double.NegativeInfinity)).toEqual(Double.NegativeInfinity)
expect(Math.nextAfter(-Double.MinValue, Double.PositiveInfinity)).toEqual(Double.PositiveInfinity)
expect(Math.nextAfter(Double.PositiveInfinity, Double.NegativeInfinity)).toEqual(Double.MaxValue)
expect(Math.nextAfter(Double.NegativeInfinity, Double.PositiveInfinity)).toEqual(-Double.MaxValue)
expect(Math.nextAfter(Double.MaxValue, Double.PositiveInfinity)).toEqual(Double.PositiveInfinity)
expect(Math.nextAfter(-Double.MaxValue, Double.NegativeInfinity)).toEqual(Double.NegativeInfinity)
expect(Math.nextAfter(1.0, 1.0)).toEqual(1.0)
}
it("should respond to `ulp` for Double") {
expect(Math.ulp(3.4)).toEqual(4.440892098500626E-16)
expect(Math.ulp(3.423E109)).toEqual(4.1718496795330275E93)
expect(Math.ulp(0.0)).toEqual(Double.MinValue)
}
it("should respond to `hypot`") {
expect(Math.hypot(0.0, 0.0)).toBeCloseTo(0.0)
expect(Math.hypot(3.0, 4.0)).toBeCloseTo(5.0)
expect(Math.hypot(3.0, Double.NaN).isNaN).toBeTruthy
expect(Math.hypot(Double.NegativeInfinity, 4.0)).toEqual(Double.PositiveInfinity)
}
it("should respond to `expm1`") {
expect(1 / Math.expm1(-0.0) < 0).toBeTruthy
expect(Math.expm1(-0.0)).toBeCloseTo(0.0)
expect(Math.expm1(3.0)).toBeCloseTo(19.085536923187668)
expect(Math.expm1(15.0)).toBeCloseTo(3269016.3724721107)
expect(Math.expm1(1.8E10)).toEqual(Double.PositiveInfinity)
expect(Math.expm1(Double.PositiveInfinity)).toEqual(Double.PositiveInfinity)
expect(Math.expm1(Double.NegativeInfinity)).toBeCloseTo(-1.0)
expect(Math.expm1(4.9E-324)).toBeCloseTo(4.9E-324)
}
it("should respond to `sinh`") {
expect(Math.sinh(-1234.56)).toEqual(Double.NegativeInfinity)
expect(Math.sinh(1234.56)).toEqual(Double.PositiveInfinity)
expect(Math.sinh(0.0)).toBeCloseTo(0.0)
expect(Math.sinh(Double.PositiveInfinity)).toEqual(Double.PositiveInfinity)
}
it("should respond to `cosh`") {
expect(Math.cosh(-1234.56)).toEqual(Double.PositiveInfinity)
expect(Math.cosh(1234.56)).toEqual(Double.PositiveInfinity)
expect(Math.cosh(-0.0)).toBeCloseTo(1.0)
expect(Math.cosh(Double.PositiveInfinity)).toEqual(Double.PositiveInfinity)
}
it("should respond to `tanh`") {
expect(Math.tanh(-1234.56)).toBeCloseTo(-1.0)
expect(Math.tanh(-120.56)).toBeCloseTo(-1.0)
expect(Math.tanh(1234.56)).toBeCloseTo(1.0)
expect(Math.tanh(0.0)).toBeCloseTo(0.0)
expect(Math.tanh(Double.PositiveInfinity)).toBeCloseTo(1.0)
expect(Math.tanh(Double.NegativeInfinity)).toBeCloseTo(-1.0)
}
}
}
| jmnarloch/scala-js | test-suite/src/test/scala/org/scalajs/testsuite/javalib/MathTest.scala | Scala | bsd-3-clause | 7,896 |
/*
* Copyright 2009-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package util
trait ValueHolder {
type ValueType
/**
* Get the value. Use get.
*
* @deprecated
*/
@deprecated("Use get", "2.4")
def is: ValueType
/**
* get the value
*/
def get: ValueType
}
/**
* A value that can be set
*/
trait Settable extends ValueHolder {
def set(in: ValueType): ValueType
/**
* Perform an atomic update of this Settable.
* The current value is passed to the function and the ValueHolder
* is set to the result of the function. This is enclosed in the
* performAtomicOperation method which will, by default, synchronize
* this instance
*/
def atomicUpdate(f: ValueType => ValueType): ValueType =
performAtomicOperation(set(f(get)))
/**
* Perform an atomic operation on the Settable. By default
* synchronizes the instance, but it could use other mechanisms
*/
def performAtomicOperation[T](f: => T): T = synchronized {
f
}
}
trait SettableValueHolder extends Settable
trait PValueHolder[T] extends ValueHolder {
type ValueType = T
}
object PValueHolder {
implicit def tToVHT[T](in: T): PValueHolder[T] = new PValueHolder[T] {def is = in; def get = is}
def apply[T](in: T) = tToVHT(in)
}
object ValueHolder {
implicit def tToVHT[T](in: T): ValueHolder = new PValueHolder[T] {def is = in; def get = is}
def apply[T](in: T) = tToVHT(in)
}
trait PSettableValueHolder[T] extends PValueHolder[T] with SettableValueHolder
/**
* Kinda like a JavaBean property. It's something that can
* be set and retrieved
*/
trait LiftValue[T] extends PSettableValueHolder[T] {
def is: T = get
}
| pbrant/framework | core/util/src/main/scala/net/liftweb/util/ValueHolder.scala | Scala | apache-2.0 | 2,242 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
import sbt._
import Keys._
import buildinfo.BuildInfo
object Dependencies {
val akkaVersion = "2.5.0"
val akkaHttpVersion = "10.0.5"
val playJsonVersion = "2.6.0-M6"
val logback = "ch.qos.logback" % "logback-classic" % "1.2.3"
val specsVersion = "3.8.9"
val specsBuild = Seq(
"specs2-core",
"specs2-junit",
"specs2-mock"
).map("org.specs2" %% _ % specsVersion) ++ Seq(logback)
val specsMatcherExtra = "org.specs2" %% "specs2-matcher-extra" % specsVersion
val specsSbt = specsBuild
val jacksons = Seq(
"com.fasterxml.jackson.core" % "jackson-core",
"com.fasterxml.jackson.core" % "jackson-annotations",
"com.fasterxml.jackson.core" % "jackson-databind",
"com.fasterxml.jackson.datatype" % "jackson-datatype-jdk8",
"com.fasterxml.jackson.datatype" % "jackson-datatype-jsr310"
).map(_ % "2.8.8")
val playJson = "com.typesafe.play" %% "play-json" % playJsonVersion
val slf4j = Seq("slf4j-api", "jul-to-slf4j", "jcl-over-slf4j").map("org.slf4j" % _ % "1.7.25")
val guava = "com.google.guava" % "guava" % "21.0"
val findBugs = "com.google.code.findbugs" % "jsr305" % "3.0.1" // Needed by guava
val mockitoAll = "org.mockito" % "mockito-all" % "1.10.19"
val h2database = "com.h2database" % "h2" % "1.4.194"
val derbyDatabase = "org.apache.derby" % "derby" % "10.13.1.1"
val acolyteVersion = "1.0.43-j7p"
val acolyte = "org.eu.acolyte" % "jdbc-driver" % acolyteVersion
val jjwt = "io.jsonwebtoken" % "jjwt" % "0.7.0"
val jdbcDeps = Seq(
"com.jolbox" % "bonecp" % "0.8.0.RELEASE",
"com.zaxxer" % "HikariCP" % "2.6.1",
"com.googlecode.usc" % "jdbcdslog" % "1.0.6.2",
h2database % Test,
acolyte % Test,
"tyrex" % "tyrex" % "1.0.1") ++ specsBuild.map(_ % Test)
val jpaDeps = Seq(
"org.hibernate.javax.persistence" % "hibernate-jpa-2.1-api" % "1.0.0.Final",
"org.hibernate" % "hibernate-entitymanager" % "5.2.9.Final" % "test"
)
val scalaJava8Compat = "org.scala-lang.modules" %% "scala-java8-compat" % "0.8.0"
def scalaParserCombinators(scalaVersion: String) = CrossVersion.partialVersion(scalaVersion) match {
case Some((2, major)) if major >= 11 => Seq("org.scala-lang.modules" %% "scala-parser-combinators" % "1.0.5")
case _ => Nil
}
val springFrameworkVersion = "4.3.7.RELEASE"
val javaDeps = Seq(
scalaJava8Compat,
("org.reflections" % "reflections" % "0.9.11")
.exclude("com.google.code.findbugs", "annotations")
.classifier(""),
// Used by the Java routing DSL
"net.jodah" % "typetools" % "0.4.9"
) ++ specsBuild.map(_ % Test)
val javaFormsDeps = Seq(
"org.hibernate" % "hibernate-validator" % "5.4.1.Final",
("org.springframework" % "spring-context" % springFrameworkVersion)
.exclude("org.springframework", "spring-aop")
.exclude("org.springframework", "spring-beans")
.exclude("org.springframework", "spring-core")
.exclude("org.springframework", "spring-expression")
.exclude("org.springframework", "spring-asm"),
("org.springframework" % "spring-core" % springFrameworkVersion)
.exclude("org.springframework", "spring-asm")
.exclude("commons-logging", "commons-logging"),
("org.springframework" % "spring-beans" % springFrameworkVersion)
.exclude("org.springframework", "spring-core")
) ++ specsBuild.map(_ % Test)
val junitInterface = "com.novocode" % "junit-interface" % "0.11"
val junit = "junit" % "junit" % "4.12"
val javaTestDeps = Seq(
junit,
junitInterface,
"org.easytesting" % "fest-assert" % "1.4",
mockitoAll,
logback
).map(_ % Test)
val guiceVersion = "4.1.0"
val guiceDeps = Seq(
"com.google.inject" % "guice" % guiceVersion,
"com.google.inject.extensions" % "guice-assistedinject" % guiceVersion
)
def runtime(scalaVersion: String) =
slf4j ++
Seq("akka-actor", "akka-slf4j").map("com.typesafe.akka" %% _ % akkaVersion) ++
Seq("akka-testkit").map("com.typesafe.akka" %% _ % akkaVersion % Test) ++
jacksons ++
Seq(
"commons-codec" % "commons-codec" % "1.10",
playJson,
guava,
jjwt,
"org.apache.commons" % "commons-lang3" % "3.5",
"javax.transaction" % "jta" % "1.1",
"javax.inject" % "javax.inject" % "1",
"org.scala-lang" % "scala-reflect" % scalaVersion,
scalaJava8Compat
) ++ scalaParserCombinators(scalaVersion) ++
specsBuild.map(_ % Test) ++
javaTestDeps
val nettyVersion = "4.1.8.Final"
val netty = Seq(
"com.typesafe.netty" % "netty-reactive-streams-http" % "2.0.0-M1",
"io.netty" % "netty-transport-native-epoll" % nettyVersion classifier "linux-x86_64"
) ++ specsBuild.map(_ % Test)
val nettyUtilsDependencies = slf4j
def routesCompilerDependencies(scalaVersion: String) = Seq(
"commons-io" % "commons-io" % "2.5",
specsMatcherExtra % Test
) ++ specsBuild.map(_ % Test) ++ scalaParserCombinators(scalaVersion)
private def sbtPluginDep(sbtVersion: String, scalaVersion: String, moduleId: ModuleID) = {
moduleId.extra(
"sbtVersion" -> CrossVersion.binarySbtVersion(sbtVersion),
"scalaVersion" -> CrossVersion.binaryScalaVersion(scalaVersion)
)
}
val runSupportDependencies = Seq(
"com.lightbend.play" %% "play-file-watch" % "1.0.0"
) ++ specsBuild.map(_ % Test)
// use partial version so that non-standard scala binary versions from dbuild also work
def sbtIO(sbtVersion: String, scalaVersion: String): ModuleID = CrossVersion.partialVersion(scalaVersion) match {
case Some((2, major)) if major >= 11 => "org.scala-sbt" %% "io" % "0.13.15" % "provided"
case _ => "org.scala-sbt" % "io" % sbtVersion % "provided"
}
val jnotify = "net.contentobjects.jnotify" % "jnotify" % "0.94-play-1"
val typesafeConfig = "com.typesafe" % "config" % "1.3.1"
def sbtDependencies(sbtVersion: String, scalaVersion: String) = {
def sbtDep(moduleId: ModuleID) = sbtPluginDep(sbtVersion, scalaVersion, moduleId)
Seq(
"org.scala-lang" % "scala-reflect" % scalaVersion % "provided",
typesafeConfig,
jnotify,
sbtDep("com.typesafe.sbt" % "sbt-twirl" % BuildInfo.sbtTwirlVersion),
sbtDep("com.typesafe.sbt" % "sbt-native-packager" % BuildInfo.sbtNativePackagerVersion),
sbtDep("com.typesafe.sbt" % "sbt-web" % "1.3.0"),
sbtDep("com.typesafe.sbt" % "sbt-js-engine" % "1.1.3")
) ++ specsBuild.map(_ % Test)
}
val playdocWebjarDependencies = Seq(
"org.webjars" % "jquery" % "2.2.4" % "webjars",
"org.webjars" % "prettify" % "4-Mar-2013" % "webjars"
)
val playDocVersion = "1.8.0"
val playDocsDependencies = Seq(
"com.typesafe.play" %% "play-doc" % playDocVersion
) ++ playdocWebjarDependencies
val streamsDependencies = Seq(
"org.reactivestreams" % "reactive-streams" % "1.0.0",
"com.typesafe.akka" %% "akka-stream" % akkaVersion,
scalaJava8Compat
) ++ specsBuild.map(_ % Test) ++ javaTestDeps
val scalacheckDependencies = Seq(
"org.specs2" %% "specs2-scalacheck" % specsVersion % Test,
"org.scalacheck" %% "scalacheck" % "1.13.4" % Test
)
val playServerDependencies = Seq(
guava % Test
) ++ specsBuild.map(_ % Test)
val seleniumVersion = "3.3.1"
val testDependencies = Seq(junit) ++ specsBuild.map(_ % Test) ++ Seq(
junitInterface,
guava,
findBugs,
"org.fluentlenium" % "fluentlenium-core" % "3.1.1" exclude("org.jboss.netty", "netty"),
// htmlunit-driver uses an open range to selenium dependencies. This is slightly
// slowing down the build. So the open range deps were removed and we can re-add
// them using a specific version. Using an open range is also not good for the
// local cache.
"org.seleniumhq.selenium" % "htmlunit-driver" % "2.26" excludeAll(
ExclusionRule("org.seleniumhq.selenium", "selenium-api"),
ExclusionRule("org.seleniumhq.selenium", "selenium-support")
),
"org.seleniumhq.selenium" % "selenium-api" % seleniumVersion,
"org.seleniumhq.selenium" % "selenium-support" % seleniumVersion,
"org.seleniumhq.selenium" % "selenium-firefox-driver" % seleniumVersion
) ++ guiceDeps
val playCacheDeps = specsBuild.map(_ % Test)
val ehcacheVersion = "2.6.11"
val playEhcacheDeps = Seq("net.sf.ehcache" % "ehcache-core" % ehcacheVersion)
val playWsStandaloneVersion = "1.0.0-M6"
val playWsDeps = Seq(
"com.typesafe.play" %% "play-ws-standalone" % playWsStandaloneVersion
) ++
(specsBuild :+ specsMatcherExtra).map(_ % Test) :+
mockitoAll % Test
val playAhcWsDeps = Seq(
"com.typesafe.play" %% "play-ahc-ws-standalone" % playWsStandaloneVersion
)
val playDocsSbtPluginDependencies = Seq(
"com.typesafe.play" %% "play-doc" % playDocVersion
)
}
/*
* How to use this:
* $ sbt -J-XX:+UnlockCommercialFeatures -J-XX:+FlightRecorder -Dakka-http.sources=$HOME/code/akka-http '; project Play-Akka-Http-Server; test:run'
*
* Make sure Akka-HTTP has 2.12 as the FIRST version (or that scalaVersion := "2.12.1", otherwise it won't find the artifact
* crossScalaVersions := Seq("2.12.1", "2.11.8"),
*/
object AkkaDependency {
// Needs to be a URI like git://github.com/akka/akka.git#master or file:///xyz/akka
val akkaSourceDependencyUri = sys.props.getOrElse("akka-http.sources", "")
val shouldUseSourceDependency = akkaSourceDependencyUri != ""
val akkaRepository = uri(akkaSourceDependencyUri)
implicit class RichProject(project: Project) {
/** Adds either a source or a binary dependency, depending on whether the above settings are set */
def addAkkaModuleDependency(module: String, config: String = ""): Project =
if (shouldUseSourceDependency) {
val moduleRef = ProjectRef(akkaRepository, module)
val withConfig: ClasspathDependency =
if (config == "") {
println(" Using Akka-HTTP directly from sources, from: " + akkaSourceDependencyUri)
moduleRef
} else moduleRef % config
project.dependsOn(withConfig)
} else {
val dep = "com.typesafe.akka" %% module % Dependencies.akkaHttpVersion
val withConfig =
if (config == "") dep
else dep % config
project.settings(libraryDependencies += withConfig)
}
}
}
| hagl/playframework | framework/project/Dependencies.scala | Scala | apache-2.0 | 10,392 |
/**
* Copyright (c) 2013-2015 Patrick Nicolas - Scala for Machine Learning - All rights reserved
*
* The source code in this file is provided by the author for the sole purpose of illustrating the
* concepts and algorithms presented in "Scala for Machine Learning". It should not be used to
* build commercial applications.
* ISBN: 978-1-783355-874-2 Packt Publishing.
* Unless required by applicable law or agreed to in writing, software is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* Version 0.98
*/
package org.scalaml.supervised.regression.linear
// Scala classes
import scala.util.{Try, Success, Failure}
// Third party libraries classes
import org.apache.log4j.Logger
import org.apache.commons.math3.stat.regression.AbstractMultipleLinearRegression
import org.apache.commons.math3.linear.{RealMatrix, RealVector, QRDecomposition, LUDecomposition}
import org.apache.commons.math3.stat.StatUtils
import org.apache.commons.math3.stat.descriptive.moment.SecondMoment
// ScalaML classes
import org.scalaml.core.Types.ScalaMl
import org.scalaml.core.XTSeries
import org.scalaml.core.Design.PipeOperator
import org.scalaml.supervised.regression.RegressionModel
import org.scalaml.util.DisplayUtils
import ScalaMl._
/**
* <p>Definition of the Ridge regression (linear least squares regression
* with a L2 penalty form). The training is executed during the instantiation
* of the class. The minimization of the loss function including the L2 regularization
* method uses a simple QR decomposition, although Cholesky factorization could be also used.
* <pre><span style="font-size:9pt;color: #351c75;font-family: "Helvetica Neue"
* ,Arial,Helvetica,sans-serif;">
* Ridge regression estimate w' = argmin Sum [squares {y(i) - f(x(i)|w)} + lambda.w.w]<br>
* with regression model f(x|w) = w(0) + w(1).x(1) + ... + w(n).x(n)
* Residuals are defined as r(i) = Y(i) - X(i)*A
* Residuals sum of squared error as rss = sqrt(SUM r(i)*r(i))</span></pre></p>
* @constructor Instantiates a Ridge regression model.
* @throws IllegalArgumentException if the class parameters are undefined
* @param xt Time series of features observations
* @param y Target or labeled output values
* @param lambda L2 penalty factor.
* @see org.apache.commons.math3.stat.regression.AbstractMultipleLinearRegression
* @see org.apache.commons.math3.linear._
* @see org.scalaml.core.Design.PipeOperator
*
* @author Patrick Nicolas
* @since April 14, 2014
* @note Scala for Machine Learning Chapter 6 Regression and regularization / Ridge regression
*/
final class RidgeRegression[T <% Double](xt: XTSeries[Array[T]], y: DblVector, lambda: Double)
extends AbstractMultipleLinearRegression with PipeOperator[Array[T], Double] {
import RidgeRegression._
check(xt, y)
/**
* Standard type for a featur
*/
type Feature = Array[T]
private val logger = Logger.getLogger("RidgeRegression")
// The linear optimization method (QR, Cholesky...) is selected at run time.
private[this] var qr: QRDecomposition = _
/**
* Model created during training/instantiation of the class. The model is
* instantiated only if the regression weights can be computed.
*/
private[this] val model: Option[RegressionModel] = train match {
case Success(m) => Some(m)
case Failure(e) => DisplayUtils.none("RidgeRegression model undefined", logger, e)
}
/**
* <p>Retrieve the weights of this Ridge regression model. The vector of the
* weights is returned if the model has been successfully created (trained).
* @return weight vector option if the model was successfully trained, None otherwise
*/
final def weights: Option[DblVector] = model.map(_.weights)
/**
* <p>Retrieve the residuals sum of squares RSS of this Ridge regression model. The RSS
* value is returned if the model has been successfully created (trained).
* @return rss option if the model was successfully trained, None otherwise
*/
final def rss: Option[Double] = model.map( _.rss)
/**
* <p>Test if the model has been trained and is defined.</p>
* @return true is the model has been trained, false otherwise
*/
final def isModel = model != None
/**
* <p>Data transformation that predicts the value of a vector input using the Ridge regression.
* </p>
* @throws MatchError if the model is undefined or has an incorrect size
* @return PartialFunction of feature of type Array[T] as input and the predicted value of
* type Double as output
*/
override def |> : PartialFunction[Feature, Double] = {
case x: Feature if( !x.isEmpty && model != None && x.size == model.get.size-1) => {
// Get the weights without intercept from the model
val weights = model.get.weights.drop(1)
// Apply the formula Y = w1.x1 + w2.x2 + ... + wn.xn + w0
x.zip(weights).foldLeft(weights(0))((s, z) => s + z._1*z._2)
}
}
/**
* <p>Override the newXSampleData method of the Common Math class
* <b>AbstractMultipleLinearRegression</b>.
* The purpose is to add a lambda components to the loss function</p>
* @param x Vector of features to be converted
*/
override protected def newXSampleData(x: DblMatrix): Unit = {
super.newXSampleData(x)
val xtx: RealMatrix = getX
// Add the lambda (L2 regularization) component to the loss function
Range(0, xt(0).size).foreach(i
=> xtx.setEntry(i, i, xtx.getEntry(i, i) + lambda) )
// Uses a simple QR decomposition
qr = new QRDecomposition(xtx)
}
/**
* Override the computation of the beta value
* @return A vector with beta values of type RealVector
*/
override protected def calculateBeta: RealVector = qr.getSolver().solve(getY())
/**
* <p>Override the calculateBetaVariance method of the Common Math class
* <b>AbstractMultipleLinearRegression</b>.
* using the QR decomposition</p>
* @return the matrix of variance of model
*/
override protected def calculateBetaVariance: RealMatrix = {
val colDim = getX().getColumnDimension
// Extracts the matrix of residuals
val R = qr.getR().getSubMatrix(0, colDim - 1 , 0, colDim - 1)
// Solve the linear system and compute the inverse matrix
// using the LU decomposition.
val Rinv = new LUDecomposition(R).getSolver.getInverse
Rinv.multiply(Rinv.transpose);
}
private def train: Try[RegressionModel] = {
Try {
// Invoke Apache Commons Math generation of the X and Y values.
this.newXSampleData(xt.toDblMatrix)
newYSampleData(y)
// Retrieve the residuals from AbstractMultipleLinearRegression class
// then compute sum of squared errors using a map and sum.
val _rss = calculateResiduals.toArray.map(x => x*x).sum
// Extract the tuple (regression weights, residual sum of squared errors)
val wRss = (calculateBeta.toArray, _rss)
RegressionModel(wRss._1, wRss._2)
}
}
/**
* Compute the total sum of squared error. The computation uses
* the simple sum of squares value from Apache Commons Math, StatsUtils.sumSq
* in the case of No intercept values, or use the statistical second moment
*/
private def calculateTotalSumOfSquares: Double =
if (isNoIntercept)
StatUtils.sumSq(getY.toArray)
else
(new SecondMoment).evaluate(getY.toArray)
}
/**
* Companion object for the Ridge regression. This singleton is used
* to validate the class parameters.
*
* @author Patrick Nicolas
* @since April 14, 2014
* @note Scala for Machine Learning Chapter 6 Regression and regularization / Ridge regression
*/
object RidgeRegression {
/**
* Default constructor for the Ridge regression
* @param xt Time series of features observations
* @param y Target or labeled output values
* @param lambda L2 penalty factor.
*/
def apply[T <% Double](xt: XTSeries[Array[T]], y: DblVector, lambda: Double): RidgeRegression[T] =
new RidgeRegression(xt, y, lambda)
private def check[T <% Double](xt: XTSeries[Array[T]], y: DblVector): Unit = {
require( !xt.isEmpty,
"Cannot create Ridge regression model with undefined features")
require( !y.isEmpty,
"Cannot create Ridge regression model with undefined observed data")
require(xt.size == y.size,
s"Size of the features set ${xt.size} differs for the size of observed data ${y.size}")
}
}
// -------------------------- EOF ------------------------------- | batermj/algorithm-challenger | books/cs/machine-learning/scala-for-machine-learning/1rst-edition/original-src-from-the-book/src/main/scala/org/scalaml/supervised/regression/linear/RidgeRegression.scala | Scala | apache-2.0 | 8,506 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.iterators
import java.time.{ZoneOffset, ZonedDateTime}
import com.typesafe.scalalogging.LazyLogging
import org.locationtech.jts.geom.Polygon
import org.geotools.data.Query
import org.geotools.data.simple.SimpleFeatureCollection
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.accumulo._
import org.locationtech.geomesa.accumulo.iterators.TestData._
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.text.WKTUtils
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.filter.Filter
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.util.{Failure, Success, Try}
@RunWith(classOf[JUnitRunner])
class MultiIteratorTest extends Specification with TestWithMultipleSfts with LazyLogging {
sequential
val spec = SimpleFeatureTypes.encodeType(TestData.featureType, includeUserData = true)
val MinDateTime = ZonedDateTime.of(0, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC)
val MaxDateTime = ZonedDateTime.of(9999, 12, 31, 23, 59, 59, 999000000, ZoneOffset.UTC)
// noinspection LanguageFeature
// note: size returns an estimated amount, instead we need to actually count the features
implicit def collectionToIter(c: SimpleFeatureCollection): SelfClosingIterator[SimpleFeature] = SelfClosingIterator(c)
def getQuery(sft: SimpleFeatureType,
ecqlFilter: Option[String],
dtFilter: (ZonedDateTime, ZonedDateTime) = null,
overrideGeometry: Boolean = false): Query = {
val polygon: Polygon = if (overrideGeometry) {
org.locationtech.geomesa.utils.geotools.WholeWorldPolygon
} else {
WKTUtils.read(TestData.wktQuery).asInstanceOf[Polygon]
}
val gf = s"INTERSECTS(geom, ${polygon.toText})"
val dt: Option[String] = Option(dtFilter).map { case (start, end) =>
s"(dtg between '$start' AND '$end')"
}
def red(f: String, og: Option[String]) = og match {
case Some(g) => s"$f AND $g"
case None => f
}
val tfString = red(red(gf, dt), ecqlFilter)
val tf = ECQL.toFilter(tfString)
new Query(sft.getTypeName, tf)
}
def output(f: Filter, filterCount: Int, queryCount: Int): Unit = {
if (filterCount != queryCount) {
logger.error(s"Filter: $f expected: $filterCount query: $queryCount")
} else {
logger.debug(s"Filter: $f expected: $filterCount query: $queryCount")
}
}
"Mock Accumulo with fullData" should {
val sft = createNewSchema(spec)
val features = TestData.fullData.map(createSF(_, sft))
addFeatures(sft, features)
val fs = ds.getFeatureSource(sft.getTypeName)
"return the same result for our iterators" in {
val q = getQuery(sft, None)
val filteredCount = features.count(q.getFilter.evaluate)
val stQueriedCount = fs.getFeatures(q).length
output(q.getFilter, filteredCount, stQueriedCount)
stQueriedCount mustEqual filteredCount
}
"return a full results-set" in {
val filterString = "true = true"
val q = getQuery(sft, Some(filterString))
val filteredCount = features.count(q.getFilter.evaluate)
val stQueriedCount = fs.getFeatures(q).length
output(q.getFilter, filteredCount, stQueriedCount)
// validate the total number of query-hits
stQueriedCount mustEqual filteredCount
}
"return a partial results-set" in {
val filterString = """(attr2 like '2nd___')"""
val q = getQuery(sft, Some(filterString))
val filteredCount = features.count(q.getFilter.evaluate)
val stQueriedCount = fs.getFeatures(q).length
output(q.getFilter, filteredCount, stQueriedCount)
// validate the total number of query-hits
stQueriedCount mustEqual filteredCount
}
}
"Mock Accumulo with a small table" should {
val sft = createNewSchema(spec)
val features = TestData.shortListOfPoints.map(createSF(_, sft))
addFeatures(sft, features)
val fs = ds.getFeatureSource(sft.getTypeName)
"cover corner cases" in {
val q = getQuery(sft, None)
val filteredCount = features.count(q.getFilter.evaluate)
val stQueriedCount = fs.getFeatures(q).length
output(q.getFilter, filteredCount, stQueriedCount)
// validate the total number of query-hits
// Since we are playing with points, we can count **exactly** how many results we should
// get back. This is important to check corner cases.
stQueriedCount mustEqual filteredCount
}
}
"Realistic Mock Accumulo" should {
val sft = createNewSchema(spec)
val features = (TestData.shortListOfPoints ++ TestData.geohashHitActualNotHit).map(createSF(_, sft))
addFeatures(sft, features)
val fs = ds.getFeatureSource(sft.getTypeName)
"handle edge intersection false positives" in {
val q = getQuery(sft, None)
val filteredCount = features.count(q.getFilter.evaluate)
val stQueriedCount = fs.getFeatures(q).length
output(q.getFilter, filteredCount, stQueriedCount)
// validate the total number of query-hits
stQueriedCount mustEqual filteredCount
}
}
"Large Mock Accumulo" should {
val sft = createNewSchema(spec)
val features = TestData.hugeData.map(createSF(_, sft))
addFeatures(sft, features)
val fs = ds.getFeatureSource(sft.getTypeName)
"return a partial results-set with a meaningful attribute-filter" in {
val filterString = "(not dtg after 2010-08-08T23:59:59Z) and (not dtg_end_time before 2010-08-08T00:00:00Z)"
val q = getQuery(sft, Some(filterString))
val filteredCount = features.count(q.getFilter.evaluate)
val stQueriedCount = fs.getFeatures(q).length
output(q.getFilter, filteredCount, stQueriedCount)
// validate the total number of query-hits
stQueriedCount mustEqual filteredCount
}
"return a filtered results-set with a meaningful time-range" in {
val filterString = "true = true"
val dtFilter = (
ZonedDateTime.of(2010, 8, 8, 0, 0, 0, 0, ZoneOffset.UTC),
ZonedDateTime.of(2010, 8, 8, 23, 59, 59, 999000000, ZoneOffset.UTC)
)
val q = getQuery(sft, Some(filterString), dtFilter)
val filteredCount = features.count(q.getFilter.evaluate)
val stQueriedCount = fs.getFeatures(q).length
output(q.getFilter, filteredCount, stQueriedCount)
// validate the total number of query-hits
stQueriedCount mustEqual filteredCount
}
"return a filtered results-set with a degenerate time-range" in {
val filterString = "true = true"
val dtFilter = (MinDateTime, MaxDateTime)
val q = getQuery(sft, Some(filterString), dtFilter)
val filteredCount = features.count(q.getFilter.evaluate)
val stQueriedCount = fs.getFeatures(q).length
output(q.getFilter, filteredCount, stQueriedCount)
// validate the total number of query-hits
stQueriedCount mustEqual filteredCount
}
"return an unfiltered results-set with a global request" in {
val dtFilter = (MinDateTime, MaxDateTime)
val q = getQuery(sft, None, dtFilter, overrideGeometry = true)
val filteredCount = features.count(q.getFilter.evaluate)
val stQueriedCount = fs.getFeatures(q).length
output(q.getFilter, filteredCount, stQueriedCount)
// validate the total number of query-hits
stQueriedCount mustEqual filteredCount
}
}
"non-point geometries" should {
val sft = createNewSchema(spec)
val wkts = Seq[String](
"POLYGON((-10 -10, -10 10, 10 10, 10 -10, -10 -10))",
"POLYGON((-10 -10, -10 0, 0 0, 0 -10, -10 -10))",
"POLYGON((0 0, 0 10, 10 10, 10 0, 0 0))",
"POLYGON((-10 0, -10 10, 0 10, 0 0, -10 0))",
"POLYGON((0 0, 10 0, 10 -10, 0 -10, 0 0))"
)
val features: Seq[SimpleFeature] = wkts.zipWithIndex.map {
case (wkt, i) => createSF(Entry(wkt, s"fid_$i"), sft)
}
addFeatures(sft, features)
val fs = ds.getFeatureSource(sft.getTypeName)
def doesQueryRun(filterString: String, optExpectedCount: Option[Int] = None): Boolean = {
logger.debug(s"Odd-point query filter: $filterString")
val outcome = Try {
val q = getQuery(sft, Some(filterString), overrideGeometry = true)
val filteredCount = features.count(q.getFilter.evaluate)
val stQueriedCount = fs.getFeatures(q).length
output(q.getFilter, filteredCount, stQueriedCount)
val expectedCount = optExpectedCount.getOrElse(filteredCount)
logger.debug(s"Query:\\n $filterString\\n Expected count: $optExpectedCount -> $expectedCount" +
s"\\n Filtered count: $filteredCount\\n ST-queried count: $stQueriedCount")
// validate the total number of query-hits
filteredCount == expectedCount && stQueriedCount == expectedCount
}
outcome match {
case Success(result) => result
case Failure(ex) =>
logger.error(ex.getStackTrace.mkString("\\n"))
false
}
}
"perform query variants that include correctly" in {
doesQueryRun("CONTAINS(geom, POINT(0.0 0.0))", Option(1)) must beTrue
doesQueryRun("INTERSECTS(geom, POINT(0.0 0.0))") must beTrue
doesQueryRun("INTERSECTS(POINT(0.0 0.0), geom)") must beTrue
}
}
}
| elahrvivaz/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/accumulo/iterators/MultiIteratorTest.scala | Scala | apache-2.0 | 9,960 |
// scalac: -Yimports:hello.world.minidef
class C {
val v: Numb = Magic
def greet() = println("hello, world!")
}
| lrytz/scala | test/files/neg/yimports-custom/C_2.scala | Scala | apache-2.0 | 117 |
/** Copyright 2015 TappingStone, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.prediction.data.store
import io.prediction.data.storage.Storage
import io.prediction.data.storage.Event
import org.joda.time.DateTime
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
/** This object provides a set of operation to access Event Store
* without going through Spark's parallelization
*/
object LEventStore {
private val defaultTimeout = Duration(60, "seconds")
@transient lazy private val eventsDb = Storage.getLEvents()
/** Reads events of the specified entity. May use this in Algorithm's predict()
* or Serving logic to have fast event store access.
*
* @param appName return events of this app
* @param entityType return events of this entityType
* @param entityId return events of this entityId
* @param channelName return events of this channel (default channel if it's None)
* @param eventNames return events with any of these event names.
* @param targetEntityType return events of this targetEntityType:
* - None means no restriction on targetEntityType
* - Some(None) means no targetEntityType for this event
* - Some(Some(x)) means targetEntityType should match x.
* @param targetEntityId return events of this targetEntityId
* - None means no restriction on targetEntityId
* - Some(None) means no targetEntityId for this event
* - Some(Some(x)) means targetEntityId should match x.
* @param startTime return events with eventTime >= startTime
* @param untilTime return events with eventTime < untilTime
* @param limit Limit number of events. Get all events if None or Some(-1)
* @param latest Return latest event first (default true)
* @return Iterator[Event]
*/
def findByEntity(
appName: String,
entityType: String,
entityId: String,
channelName: Option[String] = None,
eventNames: Option[Seq[String]] = None,
targetEntityType: Option[Option[String]] = None,
targetEntityId: Option[Option[String]] = None,
startTime: Option[DateTime] = None,
untilTime: Option[DateTime] = None,
limit: Option[Int] = None,
latest: Boolean = true,
timeout: Duration = defaultTimeout): Iterator[Event] = {
val (appId, channelId) = Common.appNameToId(appName, channelName)
Await.result(eventsDb.futureFind(
appId = appId,
channelId = channelId,
startTime = startTime,
untilTime = untilTime,
entityType = Some(entityType),
entityId = Some(entityId),
eventNames = eventNames,
targetEntityType = targetEntityType,
targetEntityId = targetEntityId,
limit = limit,
reversed = Some(latest)),
timeout)
}
/** Reads events generically. If entityType or entityId is not specified, it
* results in table scan.
*
* @param appName return events of this app
* @param entityType return events of this entityType
* - None means no restriction on entityType
* - Some(x) means entityType should match x.
* @param entityId return events of this entityId
* - None means no restriction on entityId
* - Some(x) means entityId should match x.
* @param channelName return events of this channel (default channel if it's None)
* @param eventNames return events with any of these event names.
* @param targetEntityType return events of this targetEntityType:
* - None means no restriction on targetEntityType
* - Some(None) means no targetEntityType for this event
* - Some(Some(x)) means targetEntityType should match x.
* @param targetEntityId return events of this targetEntityId
* - None means no restriction on targetEntityId
* - Some(None) means no targetEntityId for this event
* - Some(Some(x)) means targetEntityId should match x.
* @param startTime return events with eventTime >= startTime
* @param untilTime return events with eventTime < untilTime
* @param limit Limit number of events. Get all events if None or Some(-1)
* @return Iterator[Event]
*/
def find(
appName: String,
entityType: Option[String] = None,
entityId: Option[String] = None,
channelName: Option[String] = None,
eventNames: Option[Seq[String]] = None,
targetEntityType: Option[Option[String]] = None,
targetEntityId: Option[Option[String]] = None,
startTime: Option[DateTime] = None,
untilTime: Option[DateTime] = None,
limit: Option[Int] = None,
timeout: Duration = defaultTimeout): Iterator[Event] = {
val (appId, channelId) = Common.appNameToId(appName, channelName)
Await.result(eventsDb.futureFind(
appId = appId,
channelId = channelId,
startTime = startTime,
untilTime = untilTime,
entityType = entityType,
entityId = entityId,
eventNames = eventNames,
targetEntityType = targetEntityType,
targetEntityId = targetEntityId,
limit = limit), timeout)
}
}
| beni55/PredictionIO | data/src/main/scala/io/prediction/data/store/LEventStore.scala | Scala | apache-2.0 | 5,612 |
package com.socrata.geospace.lib.shapefile
import java.io.{IOException, File}
import com.socrata.geospace.lib.Utils._
import com.socrata.geospace.lib.errors.InvalidShapefileSet
import org.apache.commons.io.FilenameUtils
import org.geoscript.projection._
import org.geoscript.feature._
import org.geoscript.projection.Projection
import scala.util.{Success, Failure, Try}
/**
* Shape file reader but for the case where we have multiple layers.
* Notable is that we can also pass a different projection if we so desire.
* If not, then default is take to be "EPSG:4326"
*
* <p> Note: As currently used each layer (distinguished by their namespace) will
* result in a features and schema tuple that will be used and ingested as desired.
* For socrata internally that means each layer will be assigned to a 4x4 that in turn
* will be referenced in a parent's 4x4 view's metadata. The metadata will have a map
* named layers that will map a namepsace to a 4x4. The choice of mapping will help in
* updating consistently and adding new layers.</p>
* @param projectionString
* @param forceLatLon
*/
case class MultiLayerShapefileReader(projectionString: String = ShapeFileConstants.StandardProjection,
forceLatLon: Boolean) extends ShapeReader {
val projection = getTargetProjection(projectionString, forceLatLon).fold(throw _, x => x )
type IngestResultMap = Map[String, (Traversable[Feature], Schema)]
/**
* Validates a shapefile and extracts its contents
* @param directory Directory containing the set of files that make up the shapefile
* @return The shapefile shape layer and schema
*/
def read(directory: File): Either[InvalidShapefileSet, IngestResultMap] = {
// validates the shapefile, returns a failure case if
validate(directory).right.flatMap(getContents(_))
}
/**
* Validates that the shapefile directory contains the expected set of files and nothing else
* @param directory Directory containing the set of files that make up the shapefile
*/
def validate(directory: File): Either[InvalidShapefileSet, Map[String, Array[File]]] = {
// TODO : Should we just let the Geotools shapefile parser throw an (albeit slightly more ambiguous) error?
logMemoryUsage("Before validating shapefile zip contents")
logger.info("Validating shapefile zip contents")
// some file systems add some hidden files, this removes them.
val files = directory.listFiles.filter(!_.isHidden)
// 1. All files in the set must have the same prefix (eg. foo.shp, foo.shx,...).
// 2. All required file types should be in the zip
val namedGroups = files.groupBy { f => FilenameUtils.getBaseName(f.getName)}
// group by should produce a map of files, no longer are we to restrict ourselves
if (namedGroups.size == 0) {
Left(InvalidShapefileSet("Expected at least a single set of consistently named shapefiles"))
} else {
val errors = for {
(name, array) <- namedGroups
rf <- ShapeFileConstants.RequiredFiles
error <- getFileFromArray(array, rf).left.toSeq
} yield "FileName: " + name + " - error: " + error
if (errors.size == 0) {
Right(namedGroups)
} else {
Left(InvalidShapefileSet(errors.mkString("; ")))
}
}
}
/**
* From an array of files, looks for a file with a given extension and returns success if found.
* @param directory Array to look for files with the given extension
* @param extension The desired file extension
* @return The first file in the array that matches the specified extension, or None if there are no matches.
*/
def getFileFromArray(directory: Array[File], extension: String): Either[InvalidShapefileSet, File] = {
// find file given the extension, if fond return that value, if not then send an InvalidShapeFIleSet
directory.find { f =>
FilenameUtils.getExtension(f.getName).equals(extension)
}.toRight(InvalidShapefileSet(s".$extension file is missing"))
}
/**
* Extracts the contents of a shapefile.
* Assumes that validate() has already been called on the shapefile contents.
* @param map Directory containing the set of files that make up the shapefile
* @return The shapefile features and schema, reprojected to WGS84.
*/
def getContents(map: Map[String, Array[File]]): Either[InvalidShapefileSet, IngestResultMap] = {
logMemoryUsage("Before reading Shapefile...")
// take each item, then push to transform.
val result = map.transform { (name, array) => parseShape(name, array)}
// get map of error messages by stripping out errors only and then the messages.
val errors = result.filter(_._2.isLeft).transform((name, leftError) => leftError.left.get.message)
// if no errors, get the results otherwise create a full error report (should be one report per layer).
if(errors.isEmpty){
Right(result.transform((name, shapeResult) => shapeResult.right.get))
} else {
Left(InvalidShapefileSet(errors.mkString("; \n")))
}
}
/**
* Actual parsing of shapefiles done here. Including projections
*/
def parseShape(name: String, array: Array[File]): Either[InvalidShapefileSet, (Traversable[Feature], Schema)] = {
val contents = for {
shp <- getFileFromArray(array, ShapeFileConstants.ShapeFormat).fold(Failure(_), Success(_))
proj <- Try(doProjections(projection, shp))
} yield proj
contents match {
case Success(c) =>
Right(c)
case Failure(e: Exception) =>
logger.warn("\"Reader failed to parse shape layer {}. -> {}", name, e.getMessage)
Left(InvalidShapefileSet("Reader failed to parse shape layer '%s'. -> %s".format(name, e.getMessage)))
case Failure(e) =>
throw e
}
}
}
object MultiLayerShapefileReader {
def apply(projection: Projection, forceLatLon: Boolean): MultiLayerShapefileReader =
new MultiLayerShapefileReader(projection.id, forceLatLon)
}
| socrata-platform/geospace | src/main/scala/com/socrata/geospace/lib/shapefile/MultiLayerShapefileReader.scala | Scala | apache-2.0 | 5,993 |
package kvstore
import akka.actor.{ OneForOneStrategy, Props, ActorRef, Actor }
import kvstore.Arbiter._
import scala.collection.immutable.Queue
import akka.actor.SupervisorStrategy.Restart
import scala.annotation.tailrec
import akka.pattern.{ ask, pipe }
import akka.actor.Terminated
import scala.concurrent.duration._
import akka.actor.PoisonPill
import akka.actor.OneForOneStrategy
import akka.actor.SupervisorStrategy
import akka.util.Timeout
object Replica {
sealed trait Operation {
def key: String
def id: Long
}
case class Insert(key: String, value: String, id: Long) extends Operation
case class Remove(key: String, id: Long) extends Operation
case class Get(key: String, id: Long) extends Operation
sealed trait OperationReply
case class OperationAck(id: Long) extends OperationReply
case class OperationFailed(id: Long) extends OperationReply
case class GetResult(key: String, valueOption: Option[String], id: Long) extends OperationReply
def props(arbiter: ActorRef, persistenceProps: Props): Props = Props(new Replica(arbiter, persistenceProps))
}
class Replica(val arbiter: ActorRef, persistenceProps: Props) extends Actor {
import Replica._
import Replicator._
import Persistence._
import context.dispatcher
/*
* The contents of this actor is just a suggestion, you can implement it in any way you like.
*/
var kv = Map.empty[String, String]
// a map from secondary replicas to replicators
var secondaries = Map.empty[ActorRef, ActorRef]
// the current set of replicators
var replicators = Set.empty[ActorRef]
def receive = {
case JoinedPrimary => context.become(leader)
case JoinedSecondary => context.become(replica)
}
/* TODO Behavior for the leader role. */
val leader: Receive = {
case _ =>
}
/* TODO Behavior for the replica role. */
val replica: Receive = {
case _ =>
}
}
| mitochon/hexercise | src/mooc/reactive/week6.kvstore.orig/src/main/scala/kvstore/Replica.scala | Scala | mit | 1,896 |
/*
* Copyright (c) 2012-2013 SnowPlow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.spark
// Spark
import org.apache.spark.SparkContext
object WordCountJob {
def main(args: Array[String]) {
// Run the word count
WordCount.execute(
master = None,
args = args.toList,
jars = List(SparkContext.jarOfObject(this).get)
)
// Exit with success
System.exit(0)
}
}
| snowplow/spark-example-project | src/main/scala/com/snowplowanalytics/spark/WordCountJob.scala | Scala | apache-2.0 | 1,074 |
// Copyright (C) Dialectics 2016
package org.scamas.smp
import akka.actor.{ActorSystem, Props}
import akka.pattern.ask
import akka.util.Timeout
import org.scamas.core.{Halt, Start}
import scala.concurrent.Await
import scala.concurrent.duration._
/**
* Main app to test centralized Gale-Shapley solver
* */
object Main {
val debug= true
val system = ActorSystem("SMPDemonstration")//The Actor system
val TIMEOUTVALUE=50 seconds// default timeout of a run
implicit val timeout = Timeout(TIMEOUTVALUE)// TODO make default Duration.Inf
/**
* Run the Actor system
*/
def main(args: Array[String]): Unit ={
val x1= new Individual("x1",Array("y2","y1","y3"))
val x2= new Individual("x2",Array("y3","y2","y1"))
val x3= new Individual("x3",Array("y1","y3","y2"))
val men= List(x1,x2,x3)
val y1= new Individual("y1",Array("x2","x1","x3"))
val y2= new Individual("y2",Array("x3","x2","x1"))
val y3= new Individual("y3",Array("x1","x3","x2"))
val women= List(y1,y2,y3)
// Launch a new system
val solver= new SMPSolver(men, women)
solver.run()
if (debug) println(solver.solution)
println("That's all folk ! ")
}
}
| DavidSummer/Scamas | src/main/scala/org/scamas/smp/Main.scala | Scala | gpl-3.0 | 1,184 |
package at.logic.gapt.expr.fol
import at.logic.gapt.expr._
object FOLMatchingAlgorithm {
/**
* Computes a FOLSubstitution that turns term from into term to, if one exists.
*
* @param from A LambdaExpression.
* @param to A LambdaExpression.
* @param forbiddenVars A set of variables that cannot be in the domain of the FOLSubstitution. Defaults to the empty set.
* @return If there is a variable FOLSubstitution that turns from into to And doesn't contain any elements of forbiddenVars, it is returned. Otherwise None.
*/
def matchTerms( from: FOLExpression, to: FOLExpression, forbiddenVars: Set[FOLVar] = Set() ): Option[FOLSubstitution] =
syntacticMatching( List( from -> to ), forbiddenVars map { v => v -> v } toMap )
def matchTerms( pairs: List[( FOLExpression, FOLExpression )] ): Option[FOLSubstitution] =
syntacticMatching( pairs )
}
| loewenheim/gapt | src/main/scala/at/logic/gapt/expr/fol/FOLMatchingAlgorithm.scala | Scala | gpl-3.0 | 880 |
package com.codeseq.mcdc
import akka.actor.{ActorSystem, Actor, ActorRef, ExtensionId, Props}
import akka.io.IO.Extension
import akka.io.Tcp
import akka.testkit.{TestKit, TestProbe, ImplicitSender}
import java.net.InetSocketAddress
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}
class ioSpec extends TestKit(ActorSystem("ioSpec")) with ImplicitSender
with WordSpecLike
with BeforeAndAfterAll
with Matchers {
import MemcachedClientActor._
val gHost = "host"
val gPort = 11211
val remoteAddr = new InetSocketAddress(gHost, gPort)
val localAddr = new InetSocketAddress("localhost", 10000)
trait TestIOProvider extends IOPRovider {
val ioRef: ActorRef
def io[T <: Extension](key: ExtensionId[T])(implicit system: ActorSystem): ActorRef = ioRef
}
class SUT {
val probe = TestProbe()
val ioActor = TestProbe()
val memcache = system.actorOf(Props(new MemcachedClientActor with TestIOProvider {
lazy val host = gHost
lazy val port = gPort
lazy val ioRef: ActorRef = ioActor.ref
}))
}
override def afterAll() {
system.shutdown()
}
def handleConnect(probe: TestProbe, memcache: ActorRef): Unit = {
probe.expectMsg(Tcp.Connect(remoteAddr))
probe.reply(Tcp.Connected(remoteAddr, localAddr))
probe.expectMsg(Tcp.Register(memcache))
}
"MemcachedClientActor" should { //{1
"ask to connect on startup" in new SUT { //{2
ioActor.expectMsg(Tcp.Connect(remoteAddr))
} //}2
"send a notification after connect" in new SUT { //{2
memcache ! EventPublisher.Register(probe.ref)
handleConnect(ioActor, memcache)
probe.expectMsg(ConnectionUp(gHost, gPort))
} //}2
"send a notification even when connected" in new SUT { //{2
handleConnect(ioActor, memcache)
memcache ! EventPublisher.Register(probe.ref)
probe.expectMsg(ConnectionUp(gHost, gPort))
} //}2
"send a notification even when connection closes" in new SUT { //{2
handleConnect(ioActor, memcache)
memcache ! EventPublisher.Register(probe.ref)
probe.expectMsg(ConnectionUp(gHost, gPort))
memcache ! Tcp.Closed
probe.expectMsg(ConnectionDown(gHost, gPort))
} //}2
"try to reconnect on close" in new SUT { //{2
handleConnect(ioActor, memcache)
memcache ! Tcp.Closed
handleConnect(ioActor, memcache)
} //}2
} //}1
}
| derekwyatt/akka-memcached | src/test/scala/com/codeseq/mcdc/ioSpec.scala | Scala | apache-2.0 | 2,544 |
package de.kalass.batchmonads.base.impl
private[base] class BaseOperationBatchProcessor extends BatchProcessor {
/**
* Adds a base operation to the batch that contains all base operations that were created by the same batch operation
* and returns the modified map.
*/
def addOperationToBatchMap(map: Map[BatchOperation[_, _], BaseOperationBatch[_, _]], opWithIdx: (BaseOperation[_,_], Int)) = {
val op = opWithIdx._1
val idx = opWithIdx._2
map.update(op.creator, map.get(op.creator) match {
case Some(oldBatcher) => oldBatcher.add(op, idx)
case None => new BaseOperationBatch(op, idx)
})
}
protected[base] def execute(operationsWithIndices: List[Tuple2[Operation[_], Int]]): BatchProcessorResult = {
val (selectedOperations, remaining) = Util.partition[Operation[_], BaseOperation[_,_]](operationsWithIndices, {case op: BaseOperation[_,_] => op})
val batcherList = selectedOperations.foldLeft(Map[BatchOperation[_, _], BaseOperationBatch[_, _]]()) (addOperationToBatchMap _).values.toList
// execute the batchers: each batch operation is called with all base operations that were previously created by it
val results = batcherList.flatMap(_.execute())
BatchProcessorResult(new BaseOperationBatchProcessor(), remaining, results)
}
}
| kkalass/BatchMonads | src/de/kalass/batchmonads/base/impl/BaseOperationBatchProcessor.scala | Scala | lgpl-3.0 | 1,369 |
package org.eknet.publet.webdav.pvfs
import org.eknet.publet.vfs.{Writeable, Modifyable, ContentResource}
import org.eknet.publet.webdav.{DavContainerResource, WebdavResource}
import io.milton.resource.{CollectionResource, CopyableResource}
import io.milton.http.exceptions.{BadRequestException, ConflictException}
/**
* @author Eike Kettner eike.kettner@gmail.com
* @since 27.06.12 23:23
*/
trait ContentCopy extends CopyableResource {
this: DelegateResource[ContentResource] =>
def copyTo(toCollection: CollectionResource, name: String) {
toCollection match {
case wd: DavContainerResource => {
val r = wd.resource.content(name)
if (r.exists) throw new ConflictException(WebdavResource(r))
else r match {
case mr : Modifyable if (mr.isInstanceOf[Writeable]) => {
mr.create()
mr.asInstanceOf[Writeable].writeFrom(resource.inputStream)
}
case _ => throw new BadRequestException("Resource not modifyable: "+r)
}
}
case _ => throw new BadRequestException("Unable to copy resource to unknown collection: "+ toCollection)
}
}
}
| eikek/publet | webdav/src/main/scala/org/eknet/publet/webdav/pvfs/ContentCopy.scala | Scala | apache-2.0 | 1,149 |
package satisfaction
package hadoop
package hive.ms
import org.apache.hadoop.hive.ql.metadata.Partition
import hive.ms._
import org.joda.time._
/**
* Data instance for a set of Hive Partitions
* which were probably created by a Hive query
* with dynamic partitioning
*/
/// XXX Better name ??? HivePartitionGroup vs HivePartitinSet ????
case class HivePartitionSet(
val partitionSet: Set[HiveTablePartition])
extends DataInstance with Markable {
def size: Long = {
partitionSet.map(_.size).sum
}
def created: DateTime = {
partitionSet.toSeq.head.created
}
/// SIC ... is that OK ???
def lastAccessedTime: DateTime = lastModifiedTime
def lastModifiedTime: DateTime = {
partitionSet.toSeq.head.lastModifiedTime
}
def lastAccessed: DateTime = {
partitionSet.toSeq.head.lastModifiedTime
}
def lastModifiedBy: String = {
partitionSet.toSeq.head.lastModifiedBy
}
/**
* Mark that the producer of this
* DataInstance fully completed .
*/
def markCompleted : Unit = {
partitionSet.foreach( _.markCompleted)
}
def markIncomplete : Unit = {
partitionSet.foreach( _.markIncomplete )
}
/**
* Check that the Data instance has been Marked completed,
* according to the test of the markable.
*/
def isMarkedCompleted : Boolean = {
partitionSet.forall( _.isMarkedCompleted )
}
} | ifwe/satisfaction | modules/hive-ms/src/main/scala/satisfaction/hive/ms/HivePartitionSet.scala | Scala | apache-2.0 | 1,486 |
package mesosphere.marathon.core.task.tracker.impl.steps
import javax.inject.Named
import akka.event.EventStream
import com.google.inject.Inject
import mesosphere.marathon.Protos.MarathonTask
import mesosphere.marathon.core.task.tracker.TaskStatusUpdateStep
import mesosphere.marathon.event.{ EventModule, MesosStatusUpdateEvent }
import mesosphere.marathon.state.{ PathId, Timestamp }
import org.apache.mesos.Protos.TaskState.{
TASK_ERROR,
TASK_FAILED,
TASK_FINISHED,
TASK_KILLED,
TASK_LOST,
TASK_RUNNING
}
import org.apache.mesos.Protos.{ TaskState, TaskStatus }
import org.slf4j.LoggerFactory
import scala.concurrent.Future
/**
* Post this update to the internal event stream.
*/
class PostToEventStreamStepImpl @Inject() (
@Named(EventModule.busName) eventBus: EventStream) extends TaskStatusUpdateStep {
private[this] val log = LoggerFactory.getLogger(getClass)
override def name: String = "postTaskStatusEvent"
override def processUpdate(
timestamp: Timestamp, appId: PathId, task: MarathonTask, status: TaskStatus): Future[_] = {
status.getState match {
case TASK_ERROR | TASK_FAILED | TASK_FINISHED | TASK_KILLED | TASK_LOST =>
postEvent(timestamp, appId, status, task)
case TASK_RUNNING if !task.hasStartedAt => // staged, not running
postEvent(timestamp, appId, status, task)
case state: TaskState =>
val taskId = status.getTaskId.getValue
log.debug(s"Do not post event $state for [$taskId] of app [$appId].")
}
Future.successful(())
}
private[this] def postEvent(timestamp: Timestamp, appId: PathId, status: TaskStatus, task: MarathonTask): Unit = {
log.info(
"Sending event notification for task [{}] of app [{}]: {}",
Array[Object](task.getId, appId, status.getState): _*
)
import scala.collection.JavaConverters._
eventBus.publish(
MesosStatusUpdateEvent(
status.getSlaveId.getValue,
status.getTaskId.getValue,
status.getState.name,
if (status.hasMessage) status.getMessage else "",
appId,
task.getHost,
task.getPortsList.asScala,
task.getVersion,
timestamp = timestamp.toString
)
)
}
}
| Kosta-Github/marathon | src/main/scala/mesosphere/marathon/core/task/tracker/impl/steps/PostToEventStreamStepImpl.scala | Scala | apache-2.0 | 2,226 |
package mimir.util
import org.specs2.specification._
import org.specs2.specification.core.Fragments
import org.specs2.mutable._
import mimir.algebra._
object TextUtilsSpec
extends Specification
{
"TextUtils" should {
"Parse Primitive Values" >> {
Fragments.foreach(Seq(
(TInt(), "1", IntPrimitive(1)),
(TFloat(), "1.0", FloatPrimitive(1.0)),
(TFloat(), "1", FloatPrimitive(1.0)),
(TFloat(), "1e-2", FloatPrimitive(0.01)),
(TBool(), "YES", BoolPrimitive(true)),
(TBool(), "yes", BoolPrimitive(true)),
(TBool(), "True", BoolPrimitive(true)),
(TBool(), "NO", BoolPrimitive(false)),
(TBool(), "0", BoolPrimitive(false)),
(TType(), "int", TypePrimitive(TInt())),
(TType(), "zipcode", TypePrimitive(TUser("zipcode")))
)) { case (t, str, v) =>
s"CAST('$str' AS $t) == $v" >> {
TextUtils.parsePrimitive(t, str) must be equalTo(v)
}
}
}
"Parse Dates" >> {
TextUtils.parseDate("2017-02-12") must be equalTo(DatePrimitive(2017, 2, 12))
}
"Parse Timestamps" >> {
TextUtils.parseTimestamp("2017-02-12 02:12:16") must be equalTo(TimestampPrimitive(2017, 2, 12, 2, 12, 16, 0))
TextUtils.parseTimestamp("2013-10-07 08:23:19.120") must be equalTo(TimestampPrimitive(2013, 10, 7, 8, 23, 19, 120))
TextUtils.parseTimestamp("2013-10-07 08:23:19.12") must be equalTo(TimestampPrimitive(2013, 10, 7, 8, 23, 19, 120))
TextUtils.parseTimestamp("2013-10-07 08:23:19.1") must be equalTo(TimestampPrimitive(2013, 10, 7, 8, 23, 19, 100))
TextUtils.parseTimestamp("2013-10-07 08:23:19.1201") must be equalTo(TimestampPrimitive(2013, 10, 7, 8, 23, 19, 120))
}
}
} | UBOdin/mimir | src/test/scala/mimir/util/TextUtilsSpec.scala | Scala | apache-2.0 | 1,814 |
/**
* Copyright (c) 2002-2012 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.pipes.aggregation
import org.neo4j.cypher.internal.commands.expressions.Expression
import collection.Map
import org.neo4j.cypher.internal.pipes.ExecutionContext
class AvgFunction(val value: Expression)
extends AggregationFunction
with Plus
with NumericExpressionOnly {
def name = "AVG"
private var count: Int = 0
private var sofar: Any = 0
def result =
if (count > 0)
divide(sofar, count)
else
null
def apply(data: ExecutionContext) {
actOnNumber(value(data), (number) => {
count += 1
sofar = plus(sofar, number)
})
}
} | dksaputra/community | cypher/src/main/scala/org/neo4j/cypher/internal/pipes/aggregation/AvgFunction.scala | Scala | gpl-3.0 | 1,414 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.index.view
import org.geotools.data._
import org.geotools.data.simple.{SimpleFeatureReader, SimpleFeatureSource}
import org.locationtech.geomesa.curve.TimePeriod.TimePeriod
import org.locationtech.geomesa.index.geotools.GeoMesaFeatureReader
import org.locationtech.geomesa.index.stats.GeoMesaStats.{GeoMesaStatWriter, StatUpdater}
import org.locationtech.geomesa.index.stats.RunnableStats.UnoptimizedRunnableStats
import org.locationtech.geomesa.index.stats.{GeoMesaStats, HasGeoMesaStats}
import org.locationtech.geomesa.index.view.MergedDataStoreView.MergedStats
import org.locationtech.geomesa.index.view.MergedQueryRunner.DataStoreQueryable
import org.locationtech.geomesa.utils.io.CloseWithLogging
import org.locationtech.geomesa.utils.stats._
import org.opengis.feature.`type`.Name
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.filter.Filter
/**
* Merged querying against multiple data stores
*
* @param stores delegate stores
* @param namespace namespace
*/
class MergedDataStoreView(val stores: Seq[(DataStore, Option[Filter])], namespace: Option[String] = None)
extends MergedDataStoreSchemas(stores.map(_._1), namespace) with HasGeoMesaStats {
require(stores.nonEmpty, "No delegate stores configured")
private [view] val runner = new MergedQueryRunner(this, stores.map { case (ds, f) => DataStoreQueryable(ds) -> f })
override val stats: GeoMesaStats = new MergedStats(stores)
override def getFeatureSource(name: Name): SimpleFeatureSource = getFeatureSource(name.getLocalPart)
override def getFeatureSource(typeName: String): SimpleFeatureSource = {
val sources = stores.map { case (store, filter) => (store.getFeatureSource(typeName), filter) }
new MergedFeatureSourceView(this, sources, getSchema(typeName))
}
override def getFeatureReader(query: Query, transaction: Transaction): SimpleFeatureReader =
GeoMesaFeatureReader(getSchema(query.getTypeName), query, runner, None, None)
}
object MergedDataStoreView {
class MergedStats(stores: Seq[(DataStore, Option[Filter])]) extends GeoMesaStats {
private val stats = stores.map {
case (s: HasGeoMesaStats, f) => (s.stats, f)
case (s, f) => (new UnoptimizedRunnableStats(s), f)
}
override val writer: GeoMesaStatWriter = new MergedStatWriter(stats.map(_._1.writer))
override def getCount(sft: SimpleFeatureType, filter: Filter, exact: Boolean): Option[Long] = {
// note: unlike most methods in this class, this will return if any of the merged stores provide a response
val counts = stats.flatMap { case (stat, f) => stat.getCount(sft, mergeFilter(filter, f), exact) }
counts.reduceLeftOption(_ + _)
}
override def getMinMax[T](
sft: SimpleFeatureType,
attribute: String,
filter: Filter,
exact: Boolean): Option[MinMax[T]] = {
// note: unlike most methods in this class, this will return if any of the merged stores provide a response
val bounds = stats.flatMap { case (stat, f) =>
stat.getMinMax[T](sft, attribute, mergeFilter(filter, f), exact)
}
bounds.reduceLeftOption(_ + _)
}
override def getEnumeration[T](
sft: SimpleFeatureType,
attribute: String,
filter: Filter,
exact: Boolean): Option[EnumerationStat[T]] = {
merge((stat, f) => stat.getEnumeration[T](sft, attribute, mergeFilter(filter, f), exact))
}
override def getFrequency[T](
sft: SimpleFeatureType,
attribute: String,
precision: Int,
filter: Filter,
exact: Boolean): Option[Frequency[T]] = {
merge((stat, f) => stat.getFrequency[T](sft, attribute, precision, mergeFilter(filter, f), exact))
}
override def getTopK[T](
sft: SimpleFeatureType,
attribute: String,
filter: Filter,
exact: Boolean): Option[TopK[T]] = {
merge((stat, f) => stat.getTopK[T](sft, attribute, mergeFilter(filter, f), exact))
}
override def getHistogram[T](
sft: SimpleFeatureType,
attribute: String,
bins: Int,
min: T,
max: T,
filter: Filter,
exact: Boolean): Option[Histogram[T]] = {
merge((stat, f) => stat.getHistogram[T](sft, attribute, bins, min, max, mergeFilter(filter, f), exact))
}
override def getZ3Histogram(
sft: SimpleFeatureType,
geom: String,
dtg: String,
period: TimePeriod,
bins: Int,
filter: Filter,
exact: Boolean): Option[Z3Histogram] = {
merge((stat, f) => stat.getZ3Histogram(sft, geom, dtg, period, bins, mergeFilter(filter, f), exact))
}
override def getStat[T <: Stat](
sft: SimpleFeatureType,
query: String,
filter: Filter,
exact: Boolean): Option[T] = {
merge((stat, f) => stat.getStat(sft, query, mergeFilter(filter, f), exact))
}
override def close(): Unit = CloseWithLogging(stats.map(_._1))
private def merge[T <: Stat](query: (GeoMesaStats, Option[Filter]) => Option[T]): Option[T] = {
// lazily evaluate each stat as we only return Some if all the child stores do
val head = query(stats.head._1, stats.head._2)
stats.tail.foldLeft(head) { case (result, (stat, filter)) =>
for { r <- result; n <- query(stat, filter) } yield { (r + n).asInstanceOf[T] }
}
}
}
class MergedStatWriter(writers: Seq[GeoMesaStatWriter]) extends GeoMesaStatWriter {
override def analyze(sft: SimpleFeatureType): Seq[Stat] = {
writers.map(_.analyze(sft)).reduceLeft[Seq[Stat]] { case (left, right) =>
left.zip(right).map { case (l, r) => l + r }
}
}
override def updater(sft: SimpleFeatureType): StatUpdater = new MergedStatUpdater(writers.map(_.updater(sft)))
override def rename(sft: SimpleFeatureType, previous: SimpleFeatureType): Unit =
writers.foreach(_.rename(sft, previous))
override def clear(sft: SimpleFeatureType): Unit = writers.foreach(_.clear(sft))
}
class MergedStatUpdater(updaters: Seq[StatUpdater]) extends StatUpdater {
override def add(sf: SimpleFeature): Unit = updaters.foreach(_.add(sf))
override def remove(sf: SimpleFeature): Unit = updaters.foreach(_.remove(sf))
override def flush(): Unit = updaters.foreach(_.flush())
override def close(): Unit = CloseWithLogging(updaters)
}
}
| aheyne/geomesa | geomesa-index-api/src/main/scala/org/locationtech/geomesa/index/view/MergedDataStoreView.scala | Scala | apache-2.0 | 6,894 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.fpm
import scala.collection.JavaConverters._
import scala.reflect.ClassTag
import org.apache.spark.annotation.Since
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.api.java.JavaSparkContext.fakeClassTag
import org.apache.spark.internal.Logging
import org.apache.spark.mllib.fpm.AssociationRules.Rule
import org.apache.spark.mllib.fpm.FPGrowth.FreqItemset
import org.apache.spark.rdd.RDD
/**
* Generates association rules from a `RDD[FreqItemset[Item]]`. This method only generates
* association rules which have a single item as the consequent.
*
*/
@Since("1.5.0")
class AssociationRules private[fpm] (
private var minConfidence: Double) extends Logging with Serializable {
/**
* Constructs a default instance with default parameters {minConfidence = 0.8}.
*/
@Since("1.5.0")
def this() = this(0.8)
/**
* Sets the minimal confidence (default: `0.8`).
*/
@Since("1.5.0")
def setMinConfidence(minConfidence: Double): this.type = {
require(minConfidence >= 0.0 && minConfidence <= 1.0,
s"Minimal confidence must be in range [0, 1] but got ${minConfidence}")
this.minConfidence = minConfidence
this
}
/**
* Computes the association rules with confidence above `minConfidence`.
* @param freqItemsets frequent itemset model obtained from [[FPGrowth]]
* @return a `RDD[Rule[Item]]` containing the association rules.
*
*/
@Since("1.5.0")
def run[Item: ClassTag](freqItemsets: RDD[FreqItemset[Item]]): RDD[Rule[Item]] = {
run(freqItemsets, Map.empty[Item, Double])
}
/**
* Computes the association rules with confidence above `minConfidence`.
* @param freqItemsets frequent itemset model obtained from [[FPGrowth]]
* @param itemSupport map containing an item and its support
* @return a `RDD[Rule[Item]]` containing the association rules. The rules will be able to
* compute also the lift metric.
*/
@Since("2.4.0")
def run[Item: ClassTag](freqItemsets: RDD[FreqItemset[Item]],
itemSupport: scala.collection.Map[Item, Double]): RDD[Rule[Item]] = {
// For candidate rule X => Y, generate (X, (Y, freq(X union Y)))
val candidates = freqItemsets.flatMap { itemset =>
val items = itemset.items
items.flatMap { item =>
items.partition(_ == item) match {
case (consequent, antecedent) if !antecedent.isEmpty =>
Some((antecedent.toSeq, (consequent.toSeq, itemset.freq)))
case _ => None
}
}
}
// Join to get (X, ((Y, freq(X union Y)), freq(X))), generate rules, and filter by confidence
candidates.join(freqItemsets.map(x => (x.items.toSeq, x.freq)))
.map { case (antecedent, ((consequent, freqUnion), freqAntecedent)) =>
new Rule(antecedent.toArray,
consequent.toArray,
freqUnion,
freqAntecedent,
// the consequent contains always only one element
itemSupport.get(consequent.head))
}.filter(_.confidence >= minConfidence)
}
/**
* Java-friendly version of `run`.
*/
@Since("1.5.0")
def run[Item](freqItemsets: JavaRDD[FreqItemset[Item]]): JavaRDD[Rule[Item]] = {
val tag = fakeClassTag[Item]
run(freqItemsets.rdd)(tag)
}
}
@Since("1.5.0")
object AssociationRules {
/**
* An association rule between sets of items.
* @param antecedent hypotheses of the rule. Java users should call [[Rule#javaAntecedent]]
* instead.
* @param consequent conclusion of the rule. Java users should call [[Rule#javaConsequent]]
* instead.
* @tparam Item item type
*
*/
@Since("1.5.0")
class Rule[Item] private[fpm] (
@Since("1.5.0") val antecedent: Array[Item],
@Since("1.5.0") val consequent: Array[Item],
private[spark] val freqUnion: Double,
freqAntecedent: Double,
freqConsequent: Option[Double]) extends Serializable {
/**
* Returns the confidence of the rule.
*
*/
@Since("1.5.0")
def confidence: Double = freqUnion / freqAntecedent
/**
* Returns the lift of the rule.
*/
@Since("2.4.0")
def lift: Option[Double] = freqConsequent.map(fCons => confidence / fCons)
require(antecedent.toSet.intersect(consequent.toSet).isEmpty, {
val sharedItems = antecedent.toSet.intersect(consequent.toSet)
s"A valid association rule must have disjoint antecedent and " +
s"consequent but ${sharedItems} is present in both."
})
/**
* Returns antecedent in a Java List.
*
*/
@Since("1.5.0")
def javaAntecedent: java.util.List[Item] = {
antecedent.toList.asJava
}
/**
* Returns consequent in a Java List.
*
*/
@Since("1.5.0")
def javaConsequent: java.util.List[Item] = {
consequent.toList.asJava
}
override def toString: String = {
s"${antecedent.mkString("{", ",", "}")} => " +
s"${consequent.mkString("{", ",", "}")}: (confidence: $confidence; lift: $lift)"
}
}
}
| maropu/spark | mllib/src/main/scala/org/apache/spark/mllib/fpm/AssociationRules.scala | Scala | apache-2.0 | 5,850 |
package com.lambtors.poker_api.module.poker.application.player_cards.find
import com.lambtors.poker_api.infrastructure.query_bus.Query
final case class FindPlayerCardsQuery(playerId: String) extends Query
| lambtors/poker-api | src/main/scala/com/lambtors/poker_api/module/poker/application/player_cards/find/FindPlayerCardsQuery.scala | Scala | mit | 207 |
/*
Copyright (c) 2009, 2010 Hanno Braun <mail@hannobraun.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.hannobraun.sd.core
import scala.reflect.Manifest
/**
* A single phase of the iteration step that is executed by World.
*/
trait StepPhase[ B <: Body, C <: AnyRef ] {
/**
* A concrete iteration step phase must implement this method. It will be called during each step and the
* following parameters will be passed:
* * dt: The time that has passed since the last step.
* * bodies: All bodies, filtered according to the type parameter of the step phase. For example, if a concrete step phase was
* concerned with something collision-related, it would extend StepPhase[ Shape ] and only Bodies with Shape mixed in would
* be passed to this step method.
* * constraints: All constraints, filtered according to the type parameter.
*
* The method is supposed to return the updated body and constraint iterables.
*/
def execute( dt: Double, bodies: Iterable[ B ], constraints: Iterable[ C ] ): ( Iterable[ B ], Iterable[ C ] )
}
| hannobraun/ScalableDynamics | src/main/scala/com/hannobraun/sd/core/StepPhase.scala | Scala | apache-2.0 | 1,587 |
package io.findify.s3mock.route
import java.io.StringWriter
import java.net.URLDecoder
import java.util.Date
import akka.http.scaladsl.model.HttpEntity.Strict
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.headers.{RawHeader, `Last-Modified`}
import akka.http.scaladsl.server.Directives._
import com.amazonaws.services.s3.Headers
import com.amazonaws.services.s3.model.ObjectMetadata
import com.amazonaws.util.DateUtils
import com.typesafe.scalalogging.LazyLogging
import io.findify.s3mock.error.{InternalErrorException, NoSuchBucketException, NoSuchKeyException}
import io.findify.s3mock.provider.{GetObjectData, Provider}
import scala.collection.JavaConverters._
import scala.util.{Failure, Success, Try}
/**
* Created by shutty on 8/19/16.
*/
case class GetObject()(implicit provider: Provider) extends LazyLogging {
def route(bucket: String, path: String, params: Map[String, String]) = get {
withRangeSupport {
respondWithDefaultHeader(`Last-Modified`(DateTime(1970, 1, 1))) {
complete {
logger.debug(s"get object: bucket=$bucket, path=$path")
Try(provider.getObject(bucket, path)) match {
case Success(GetObjectData(data, metaOption)) =>
metaOption match {
case Some(meta) =>
val entity: Strict = ContentType.parse(meta.getContentType) match {
case Right(value) => HttpEntity(value, data)
case Left(error) => HttpEntity(data)
}
if (params.contains("tagging")) {
handleTaggingRequest(meta)
} else {
HttpResponse(
status = StatusCodes.OK,
entity = entity,
headers = metadataToHeaderList(meta)
)
}
case None =>
HttpResponse(
status = StatusCodes.OK,
entity = HttpEntity(data),
headers = List()
)
}
case Failure(e: NoSuchKeyException) =>
HttpResponse(
StatusCodes.NotFound,
entity = e.toXML.toString()
)
case Failure(e: NoSuchBucketException) =>
HttpResponse(
StatusCodes.NotFound,
entity = e.toXML.toString()
)
case Failure(t) =>
logger.error("Oops: ", t)
HttpResponse(
StatusCodes.InternalServerError,
entity = InternalErrorException(t).toXML.toString()
)
}
}
}
}
}
protected def handleTaggingRequest(meta: ObjectMetadata): HttpResponse = {
var root = <Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/"></Tagging>
var tagset = <TagSet></TagSet>
var w = new StringWriter()
if (meta.getRawMetadata.containsKey("x-amz-tagging")){
var doc =
<Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<TagSet>
{
meta.getRawMetadata.get("x-amz-tagging").asInstanceOf[String].split("&").map(
(rawTag: String) => {
rawTag.split("=", 2).map(
(part: String) => URLDecoder.decode(part, "UTF-8")
)
}).map(
(kv: Array[String]) =>
<Tag>
<Key>{kv(0)}</Key>
<Value>{kv(1)}</Value>
</Tag>)
}
</TagSet>
</Tagging>
xml.XML.write(w, doc, "UTF-8", true, null)
} else {
var doc = <Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><TagSet></TagSet></Tagging>
xml.XML.write(w, doc, "UTF-8", true, null)
}
meta.setContentType("application/xml; charset=utf-8")
HttpResponse(
status = StatusCodes.OK,
entity = w.toString,
headers = `Last-Modified`(DateTime(1970, 1, 1)) :: metadataToHeaderList(meta)
)
}
val headerBlacklist = Set("content-type", "connection")
protected def metadataToHeaderList(metadata: ObjectMetadata): List[HttpHeader] = {
val headers = Option(metadata.getRawMetadata)
.map(_.asScala.toMap)
.map(_.map {
case (_, date: Date) =>
`Last-Modified`(DateTime(new org.joda.time.DateTime(date).getMillis))
case (key, value) =>
RawHeader(key, value.toString)
}.toList)
.toList.flatten
.filterNot(header => headerBlacklist.contains(header.lowercaseName))
val httpExpires = Option(metadata.getHttpExpiresDate).map(date => RawHeader(Headers.EXPIRES, DateUtils.formatRFC822Date(date)))
val userHeaders = Option(metadata.getUserMetadata)
.map(_.asScala.toMap)
.map(_.map { case (key, value) => {
val name = Option(key).map(_.trim).getOrElse("")
val hvalue = Option(value).map(_.trim).getOrElse("")
RawHeader(Headers.S3_USER_METADATA_PREFIX + name, hvalue)
}}.toList)
.toList
.flatten
headers ++ httpExpires.toList ++ userHeaders ++ Option(metadata.getContentMD5).map(md5 => RawHeader(Headers.ETAG, md5))
}
}
| findify/s3mock | src/main/scala/io/findify/s3mock/route/GetObject.scala | Scala | mit | 5,226 |
package org.jetbrains.plugins.scala.codeInspection.unused
import com.intellij.codeInspection.LocalInspectionTool
import com.intellij.testFramework.EditorTestUtil
import org.jetbrains.plugins.scala.codeInspection.ScalaQuickFixTestBase
import org.jetbrains.plugins.scala.codeInspection.unusedInspections.{DeleteUnusedElementFix, ScalaUnusedSymbolInspection}
/**
* Created by Svyatoslav Ilinskiy on 11.07.16.
*/
class ScalaUnusedSymbolInspectionTest extends ScalaQuickFixTestBase {
import EditorTestUtil.{SELECTION_END_TAG => END, SELECTION_START_TAG => START}
override protected val classOfInspection: Class[_ <: LocalInspectionTool] =
classOf[ScalaUnusedSymbolInspection]
override protected val description: String =
ScalaUnusedSymbolInspection.Annotation
val hint = DeleteUnusedElementFix.Hint
def testPrivateField(): Unit = {
val code =
s"""
|class Foo {
| private val ${START}s$END = 0
|}
""".stripMargin
checkTextHasError(code)
val before =
"""
|class Foo {
| private val s = 0
|}
""".stripMargin
val after =
"""
|class Foo {
|}
""".stripMargin
testQuickFix(before, after, hint)
}
def testLocalUnusedSymbol(): Unit = {
val code =
s"""
|object Foo {
| def foo(): Unit = {
| val ${START}s$END = 0
| }
|}
""".stripMargin
checkTextHasError(code)
val before =
"""
|object Foo {
| def foo(): Unit = {
| val s = 0
| }
|}
""".stripMargin
val after =
"""
|object Foo {
| def foo(): Unit = {
| }
|}
""".stripMargin
testQuickFix(before, after, hint)
}
def testNonPrivateField(): Unit = {
val code =
"""
|class Foo {
| val s: String = ""
| protected val z: Int = 2
|}
""".stripMargin
checkTextHasNoErrors(code)
}
def testRemoveMultiDeclaration(): Unit = {
val code =
s"""
|class Foo {
| private val (${START}a$END, b): String = ???
| println(b)
|}
""".stripMargin
checkTextHasError(code)
val before =
"""
|class Foo {
| private val (a, b): String = ???
| println(b)
|}
""".stripMargin
val after =
"""
|class Foo {
| private val (_, b): String = ???
| println(b)
|}
""".stripMargin
testQuickFix(before, after, hint)
}
def testSupressed(): Unit = {
val code =
"""
|class Bar {
| //noinspection ScalaUnusedSymbol
| private val f = 2
|
| def aa(): Unit = {
| //noinspection ScalaUnusedSymbol
| val d = 2
| }
|}
""".stripMargin
checkTextHasNoErrors(code)
}
def testLocalVar(): Unit = {
val code =
s"""
|class Bar {
| def aa(): Unit = {
| var (${START}d$END, a) = 10
| }
|}
""".stripMargin
checkTextHasError(code)
val before =
s"""
|class Bar {
| def aa(): Unit = {
| var (d, a) = 10
| println(a)
| }
|}
""".stripMargin
val after =
s"""
|class Bar {
| def aa(): Unit = {
| var (_, a) = 10
| println(a)
| }
|}
""".stripMargin
testQuickFix(before, after, hint)
}
def testMatchCaseWithType(): Unit = {
val code =
s"""
|class Moo {
| Option(null) match {
| case Some(${START}s$END: String) =>
| println("AA")
| }
|}
""".stripMargin
checkTextHasError(code)
val before =
"""
|class Moo {
| Option(null) match {
| case Some(s: String) =>
| println("AA")
| }
|}
""".stripMargin
val after =
"""
|class Moo {
| Option(null) match {
| case Some(_: String) =>
| println("AA")
| }
|}
""".stripMargin
testQuickFix(before, after, hint)
}
def testMatchCaseNoType(): Unit = {
val code =
s"""
|class Moo {
| Option(null) match {
| case Some(${START}s$END) =>
| println("AA")
| }
|}
""".stripMargin
checkTextHasError(code)
val before =
"""
|class Moo {
| Option(null) match {
| case Some(s) =>
| println("AA")
| }
|}
""".stripMargin
val after =
"""
|class Moo {
| Option(null) match {
| case Some(_) =>
| println("AA")
| }
|}
""".stripMargin
testQuickFix(before, after, hint)
}
def testAnonymousFunctionDestructor(): Unit = {
val code =
s"""
|class Moo {
| Option("").map {
| case ${START}a$END: String =>
| }
|}
""".stripMargin
checkTextHasError(code)
val before =
"""
|class Moo {
| Option("").map {
| case a: String =>
| }
|}
""".stripMargin
val after =
"""
|class Moo {
| Option("").map {
| case _: String =>
| }
|}
""".stripMargin
testQuickFix(before, after, hint)
}
def testBindingPattern(): Unit = {
val code =
s"""
|class Moo {
| Option(null) match {
| case ${START}s$END@Some(a) => println(a)
| }
|}
""".stripMargin
checkTextHasError(code)
val before =
"""
|class Moo {
| Option(null) match {
| case s@Some(a) => println(a)
| }
|}
""".stripMargin
val after =
"""
|class Moo {
| Option(null) match {
| case Some(a) => println(a)
| }
|}
""".stripMargin
testQuickFix(before, after, hint)
}
def testBindingPattern2(): Unit = {
val code =
s"""
|class Moo {
| Option(null) match {
| case s@Some(${START}a$END) => println(s)
| }
|}
""".stripMargin
checkTextHasError(code)
val before =
"""
|class Moo {
| Option(null) match {
| case s@Some(a) => println(s)
| }
|}
""".stripMargin
val after =
"""
|class Moo {
| Option(null) match {
| case s@Some(_) => println(s)
| }
|}
""".stripMargin
testQuickFix(before, after, hint)
}
def testAnonymousFunctionWithCaseClause(): Unit = {
val code =
s"""
|class Moo {
| def foo(s: Seq[(Int, Int)]): Seq[Int] = {
| s.map {
| case (a, ${START}b$END) => a
| }
| }
|}
""".stripMargin
checkTextHasError(code)
val before =
"""
|class Moo {
| def foo(s: Seq[(Int, Int)]): Seq[Int] = {
| s.map {
| case (a, b) => a
| }
| }
|}
""".stripMargin
val after =
"""
|class Moo {
| def foo(s: Seq[(Int, Int)]): Seq[Int] = {
| s.map {
| case (a, _) => a
| }
| }
|}
""".stripMargin
testQuickFix(before, after, hint)
}
def testUnusedRegularAnonymousFunction(): Unit = {
val code =
s"""
|class Moo {
| def foo(s: Seq[(Int, Int)]): Seq[Int] = {
| s.map (${START}a$END => 1)
| }
|}
""".stripMargin
checkTextHasError(code)
val before =
"""
|class Moo {
| def foo(s: Seq[(Int, Int)]): Seq[Int] = {
| s.map (a => 1)
| }
|}
""".stripMargin
val after =
"""
|class Moo {
| def foo(s: Seq[(Int, Int)]): Seq[Int] = {
| s.map (_ => 1)
| }
|}
""".stripMargin
testQuickFix(before, after, hint)
}
def testFor(): Unit = {
val code =
s"""
|class Moo {
| val s = Seq("")
| for (${START}j$END <- s) {
| println(s)
| }
|}
""".stripMargin
checkTextHasError(code)
val before =
"""
|class Moo {
| val s = Seq("")
| for (j <- s) {
| println(s)
| }
|}
""".stripMargin
val after =
"""
|class Moo {
| val s = Seq("")
| for (_ <- s) {
| println(s)
| }
|}
""".stripMargin
testQuickFix(before, after, hint)
}
def testNoHighlightWildCards(): Unit = {
val code =
"""
|class Moo {
| def foo(i: Any): Unit = i match {
| case _: String => println()
| case b: Seq[String] => b.foreach(_ => println())
| case t: (String, Int) =>
| val (s, _) = t
| println(s)
| case _ =>
| for (_ <- 1 to 2) {
| println()
| }
| }
|}
""".stripMargin
checkTextHasNoErrors(code)
}
def testImplicitParamter(): Unit = {
val code =
"""
|class Bar
|class Baz
|class Moo {
| def foo(x: Bar => Baz) = ???
| foo { implicit bar => new Baz }
|}
""".stripMargin
checkTextHasNoErrors(code)
}
def testLocalClass(): Unit = {
val code =
s"""
|class Person() {
| def func() = {
| object ${START}A$END
| }
|}
""".stripMargin
val before =
"""
|class Person() {
| def func() = {
| object A
| }
|}
""".stripMargin
val after =
"""
|class Person() {
| def func() = {
|
| }
|}
""".stripMargin
checkTextHasError(code)
testQuickFix(before, after, hint)
}
def testInnerClass(): Unit = {
val code =
s"""
|class Person() {
| private object ${START}A$END
|}
""".stripMargin
val before =
"""
|class Person() {
| private object A
|}
""".stripMargin
val after =
"""
|class Person() {
|
|}
""".stripMargin
checkTextHasError(code)
testQuickFix(before, after, hint)
}
}
class Person() {
private object A
} | jastice/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/codeInspection/unused/ScalaUnusedSymbolInspectionTest.scala | Scala | apache-2.0 | 10,669 |
package pl.newicom.dddd.cluster
import akka.actor.{ActorRef, ActorSystem, PoisonPill, Props}
import akka.cluster.singleton.{ClusterSingletonManager, ClusterSingletonManagerSettings, ClusterSingletonProxy, ClusterSingletonProxySettings}
import pl.newicom.dddd.actor.ActorFactory
import pl.newicom.dddd.office.LocalOfficeId
class SingletonManagerFactory[A : LocalOfficeId](implicit system: ActorSystem) extends ActorFactory[A] {
override def getChild(name: String): Option[ActorRef] = throw new UnsupportedOperationException
override def createChild(props: Props, name: String): ActorRef = {
val singletonManagerName: String = s"singletonOf$name"
val department = implicitly[LocalOfficeId[A]].department
val managerSettings = ClusterSingletonManagerSettings(system)
.withSingletonName(name)
.withRole(department)
system.actorOf(
ClusterSingletonManager.props(
singletonProps = props,
terminationMessage = PoisonPill,
managerSettings
),
name = singletonManagerName)
val proxySettings = ClusterSingletonProxySettings(system)
.withSingletonName(name)
.withRole(department)
system.actorOf(
ClusterSingletonProxy.props(
singletonManagerPath = s"/user/$singletonManagerName",
proxySettings),
name = s"${name}Proxy")
}
} | AndreyLadniy/akka-ddd | akka-ddd-core/src/main/scala/pl/newicom/dddd/cluster/SingletonManagerFactory.scala | Scala | mit | 1,343 |
/**
* @author Daniel Perez
*/
package net.javachallenge.util.internationalization
import scala.collection.mutable
/**
* A locale used in the application
*
* @constructor creates a locale with a language and a country
* @param language the name of the locale's language (eg. 'ja')
* @param country the name of the locale's country (eg. 'JP')
*/
class Locale(val language: String, val country: String) {
/**
* Registers the new locale when created
*/
Locale.registerLocale(this)
/**
* A container for the translations in the locale's language
*/
val map: mutable.Map[String, String] = mutable.Map()
/**
* Returns a readable name for the locale.
*
* @return the locale name (eg. 'ja_JP')
*/
override def toString: String = (language + "_" + country)
/**
* Adds a word for translation in the locale's language.
*
* @param key the key of the word (eg. 'hello_world')
* @param word the translation for the key (eg. 'こんにちは世界。')
*/
def addWord(key: String, word: String) = (map += (key -> word))
/**
* Translates the given key in the locale's language or
* returns the key if the locale is not found.
*
* @param key the key to translate
* @return the translation for the key
*/
def translate(key: String): String = map get (key) match {
case None => Locale.fallback match {
case Some(locale) if locale != this => locale.translate(key)
case _ => key
}
case Some(string) => string
}
}
/**
* Helpers for [[net.javachallenge.util.internationalization.Locale]] instances
*/
object Locale {
/**
* The current locale for the application.
*/
var current: Option[Locale] = None
/**
* The fallback to use when key not found in current locale.
*/
var fallback: Option[Locale] = None
/**
* A container for the available locales
* The key used is the language of the locale (eg. 'ja')
*/
var locales: mutable.Map[String, Locale] = mutable.Map()
/**
* Constructs a locale with the given language if it does not already exists.
* Set the locale's country to the language name in uppercase.
*
* @param language the locale's language
* @return the new locale
*/
def apply(language: String): Locale = getOrCreate(language, language toUpperCase)
/**
* Constructs a locale with the given language if it does not already exists.
*
* @param language the locale's language
* @return the new locale
*/
def apply(name: String, country: String): Locale = getOrCreate(name, country)
/**
* Returns the locale if it exists or create a new one if not.
*
* @param language the name of the locale's language (eg. 'ja')
* @param country the name of the locale's country (eg. 'JP')
* @return the existent or newly created locale
*/
protected def getOrCreate(language: String, country: String): Locale = {
locales.get(language) match {
case Some(locale) => locale
case None => new Locale(language, country)
}
}
/**
* Clear all the infos of the locales
*/
def clear: Unit = {
locales.clear
current = None
fallback = None
}
/**
* Adds a new locale to the available locales.
*
* @param locale the language name for the locale (eg. 'ja')
*/
def registerLocale(locale: Locale): Unit = (locales += (locale.language -> locale))
/**
* Set the current locale for the application.
*
* @param locale the language name for the locale (eg. 'ja')
*/
def set(locale: String): Unit = (current = locales.get(locale))
/**
* Set the current fallback for the application.
*
* @param locale the language name for the locale (eg. 'ja')
*/
def setFallback(locale: String): Unit = (fallback = locales.get(locale))
/**
* Checks if the locale exists or not.
*
* @param locale the language name for the locale (eg. 'ja')
*/
def has(locale: String): Boolean = locales.isDefinedAt(locale)
} | AI-comp/JavaChallenge2012 | src/main/scala/net/javachallenge/util/internationalization/Locale.scala | Scala | apache-2.0 | 3,971 |
package zzz.akka.avionics
import akka.actor.Actor
import akka.actor.Actor.Receive
/**
* Created by justin on 14/08/2014.
*/
class AutoPilot extends Actor {
override def receive: Receive = ???
}
| justindav1s/learning-scala | src/main/scala/zzz/akka/avionics/AutoPilot.scala | Scala | apache-2.0 | 201 |
import scalaxb.compiler.wsdl11.Driver
import java.io.File
import scalaxb.compiler.Config
import scalaxb.compiler.ConfigEntry._
import scalaxb.stockquote.server._
import scala.concurrent._, duration.Duration
class Wsdl11DocumentWrappedTest extends TestBase with JaxwsTestBase {
override val module = new Driver // with Verbose
// specs2 has its own Duration
def serviceImpl:DocumentWrappedService = new DocumentWrappedService(Duration(0, "seconds"))
def serviceAddress: String = "document-wrapped"
step {
startServer
}
val packageName = "stockquote"
val wsdlFile = new File(s"integration/target/$serviceAddress.wsdl")
val config = Config.default.update(PackageNames(Map(None -> Some(packageName)))).
update(Outdir(tmp)).
update(GeneratePackageDir)
lazy val generated = {
writeStringToFile(retrieveWsdl, wsdlFile)
module.process(wsdlFile, config)
}
"document-wrapped service works" in {
(List("""import stockquote._
import scala.concurrent._, duration._, ExecutionContext.Implicits._""",
"""val service = (new DocumentWrappedServiceSoapBindings with scalaxb.Soap11ClientsAsync with scalaxb.DispatchHttpClientsAsync {}).service""",
"""val fresponse = service.price(Some("GOOG"))""",
"""val response = Await.result(fresponse, 5.seconds)""",
"""if (response != 42.0) sys.error(response.toString) else ()""",
"""true"""), generated) must evaluateTo(true,
outdir = "./tmp", usecurrentcp = true)
}
step {
stopServer
}
}
| eed3si9n/scalaxb | integration/src/test/scala/Wsdl11DocumentWrappedTest.scala | Scala | mit | 1,529 |
package models
import uk.gov.dvla.vehicles.presentation.common.model.VehicleAndKeeperDetailsModel
import uk.gov.dvla.vehicles.presentation.common.views.constraints.RegistrationNumber.formatVrm
final case class VehicleLookupFailureViewModel(registrationNumber: String,
v5ref: String,
postcode: String,
failureCode: String,
vehicleDetails: VehicleAndKeeperDetailsModel)
object VehicleLookupFailureViewModel {
def apply(vehicleAndKeeperLookupForm: VehicleAndKeeperLookupFormModel,
vehicleAndKeeperDetails: Option[VehicleAndKeeperDetailsModel],
failureCode: String)(implicit config: utils.helpers.Config): VehicleLookupFailureViewModel =
VehicleLookupFailureViewModel(
registrationNumber = formatVrm(vehicleAndKeeperLookupForm.registrationNumber),
v5ref = vehicleAndKeeperLookupForm.referenceNumber,
postcode = vehicleAndKeeperLookupForm.postcode,
failureCode = filteredFailureCode(failureCode),
vehicleAndKeeperDetails match {
case Some(details) => details
case None =>
VehicleAndKeeperDetailsModel(
registrationNumber = formatVrm(vehicleAndKeeperLookupForm.registrationNumber),
make = None,
model = None,
title = None,
firstName = None,
lastName = None,
address = None,
disposeFlag = None,
keeperEndDate = None,
keeperChangeDate = None,
suppressedV5Flag = None
)
}
)
private def filteredFailureCode(code: String)(implicit config: utils.helpers.Config): String =
config.failureCodeBlacklist match {
case Some(failureCodes) =>
if (failureCodes.contains(code)) ""
else code
case _ => code
}
}
| dvla/vrm-retention-online | app/models/VehicleLookupFailureViewModel.scala | Scala | mit | 1,973 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.physical.couchbase.planner
import slamdata.Predef._
import quasar.NameGenerator
import quasar.fp.ski._
import quasar.physical.couchbase._
import quasar.Planner.PlannerErrorME
import quasar.qscript._
import matryoshka._
import scalaz._
final class MapFuncDerivedPlanner[T[_[_]]: BirecursiveT: ShowT, F[_]: Applicative: Monad: NameGenerator: PlannerErrorME]
(core: Planner[T, F, MapFuncCore[T, ?]])
extends Planner[T, F, MapFuncDerived[T, ?]] {
def plan: AlgebraM[F, MapFuncDerived[T, ?], T[N1QL]] = ExpandMapFunc.expand(core.plan, κ(None))
}
| jedesah/Quasar | couchbase/src/main/scala/quasar/physical/couchbase/planner/MapFuncDerivedPlanner.scala | Scala | apache-2.0 | 1,174 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import scala.util.Random
import org.apache.spark.AccumulatorSuite
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
import org.apache.spark.sql.{RandomDataGenerator, Row}
/**
* Test sorting. Many of the test cases generate random data and compares the sorted result with one
* sorted by a reference implementation ([[ReferenceSort]]).
*/
class SortSuite extends SparkPlanTest with SharedSQLContext {
import testImplicits.localSeqToDataFrameHolder
test("basic sorting using ExternalSort") {
val input = Seq(
("Hello", 4, 2.0),
("Hello", 1, 1.0),
("World", 8, 3.0)
)
checkAnswer(
input.toDF("a", "b", "c"),
(child: SparkPlan) => Sort('a.asc :: 'b.asc :: Nil, global = true, child = child),
input.sortBy(t => (t._1, t._2)).map(Row.fromTuple),
sortAnswers = false)
checkAnswer(
input.toDF("a", "b", "c"),
(child: SparkPlan) => Sort('b.asc :: 'a.asc :: Nil, global = true, child = child),
input.sortBy(t => (t._2, t._1)).map(Row.fromTuple),
sortAnswers = false)
}
test("sort followed by limit") {
checkThatPlansAgree(
(1 to 100).map(v => Tuple1(v)).toDF("a"),
(child: SparkPlan) => Limit(10, Sort('a.asc :: Nil, global = true, child = child)),
(child: SparkPlan) => Limit(10, ReferenceSort('a.asc :: Nil, global = true, child)),
sortAnswers = false
)
}
test("sorting does not crash for large inputs") {
val sortOrder = 'a.asc :: Nil
val stringLength = 1024 * 1024 * 2
checkThatPlansAgree(
Seq(Tuple1("a" * stringLength), Tuple1("b" * stringLength)).toDF("a").repartition(1),
Sort(sortOrder, global = true, _: SparkPlan, testSpillFrequency = 1),
ReferenceSort(sortOrder, global = true, _: SparkPlan),
sortAnswers = false
)
}
test("sorting updates peak execution memory") {
AccumulatorSuite.verifyPeakExecutionMemorySet(sparkContext, "unsafe external sort") {
checkThatPlansAgree(
(1 to 100).map(v => Tuple1(v)).toDF("a"),
(child: SparkPlan) => Sort('a.asc :: Nil, global = true, child = child),
(child: SparkPlan) => ReferenceSort('a.asc :: Nil, global = true, child),
sortAnswers = false)
}
}
// Test sorting on different data types
for (
dataType <- DataTypeTestUtils.atomicTypes ++ Set(NullType);
nullable <- Seq(true, false);
sortOrder <- Seq('a.asc :: Nil, 'a.desc :: Nil);
randomDataGenerator <- RandomDataGenerator.forType(dataType, nullable)
) {
test(s"sorting on $dataType with nullable=$nullable, sortOrder=$sortOrder") {
val inputData = Seq.fill(1000)(randomDataGenerator())
val inputDf = sqlContext.createDataFrame(
sparkContext.parallelize(Random.shuffle(inputData).map(v => Row(v))),
StructType(StructField("a", dataType, nullable = true) :: Nil)
)
checkThatPlansAgree(
inputDf,
p => ConvertToSafe(Sort(sortOrder, global = true, p: SparkPlan, testSpillFrequency = 23)),
ReferenceSort(sortOrder, global = true, _: SparkPlan),
sortAnswers = false
)
}
}
}
| chenc10/Spark-PAF | sql/core/src/test/scala/org/apache/spark/sql/execution/SortSuite.scala | Scala | apache-2.0 | 4,036 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.aggregate
import java.lang.{Long => JLong}
import org.apache.flink.api.common.state.{ValueState, ValueStateDescriptor}
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.configuration.Configuration
import org.apache.flink.streaming.api.functions.ProcessFunction
import org.apache.flink.table.api.{StreamQueryConfig, Types}
import org.apache.flink.table.codegen.{Compiler, GeneratedAggregationsFunction}
import org.apache.flink.table.runtime.types.CRow
import org.apache.flink.table.util.Logging
import org.apache.flink.types.Row
import org.apache.flink.util.Collector
/**
* Aggregate Function used for the groupby (without window) aggregate
*
* @param genAggregations Generated aggregate helper function
* @param aggregationStateType The row type info of aggregation
*/
class GroupAggProcessFunction(
private val genAggregations: GeneratedAggregationsFunction,
private val aggregationStateType: RowTypeInfo,
private val generateRetraction: Boolean,
private val queryConfig: StreamQueryConfig)
extends ProcessFunctionWithCleanupState[CRow, CRow](queryConfig)
with Compiler[GeneratedAggregations]
with Logging {
private var function: GeneratedAggregations = _
private var newRow: CRow = _
private var prevRow: CRow = _
private var firstRow: Boolean = _
// stores the accumulators
private var state: ValueState[Row] = _
// counts the number of added and retracted input records
private var cntState: ValueState[JLong] = _
override def open(config: Configuration) {
LOG.debug(s"Compiling AggregateHelper: $genAggregations.name \\n\\n " +
s"Code:\\n$genAggregations.code")
val clazz = compile(
getRuntimeContext.getUserCodeClassLoader,
genAggregations.name,
genAggregations.code)
LOG.debug("Instantiating AggregateHelper.")
function = clazz.newInstance()
function.open(getRuntimeContext)
newRow = new CRow(function.createOutputRow(), true)
prevRow = new CRow(function.createOutputRow(), false)
val stateDescriptor: ValueStateDescriptor[Row] =
new ValueStateDescriptor[Row]("GroupAggregateState", aggregationStateType)
state = getRuntimeContext.getState(stateDescriptor)
val inputCntDescriptor: ValueStateDescriptor[JLong] =
new ValueStateDescriptor[JLong]("GroupAggregateInputCounter", Types.LONG)
cntState = getRuntimeContext.getState(inputCntDescriptor)
initCleanupTimeState("GroupAggregateCleanupTime")
}
override def processElement(
inputC: CRow,
ctx: ProcessFunction[CRow, CRow]#Context,
out: Collector[CRow]): Unit = {
val currentTime = ctx.timerService().currentProcessingTime()
// register state-cleanup timer
registerProcessingCleanupTimer(ctx, currentTime)
val input = inputC.row
// get accumulators and input counter
var accumulators = state.value()
var inputCnt = cntState.value()
if (null == accumulators) {
firstRow = true
accumulators = function.createAccumulators()
} else {
firstRow = false
}
if (null == inputCnt) {
inputCnt = 0L
}
// Set group keys value to the final output
function.setForwardedFields(input, newRow.row)
function.setForwardedFields(input, prevRow.row)
// Set previous aggregate result to the prevRow
function.setAggregationResults(accumulators, prevRow.row)
// update aggregate result and set to the newRow
if (inputC.change) {
inputCnt += 1
// accumulate input
function.accumulate(accumulators, input)
function.setAggregationResults(accumulators, newRow.row)
} else {
inputCnt -= 1
// retract input
function.retract(accumulators, input)
function.setAggregationResults(accumulators, newRow.row)
}
if (inputCnt != 0) {
// we aggregated at least one record for this key
// update the state
state.update(accumulators)
cntState.update(inputCnt)
// if this was not the first row
if (!firstRow) {
if (prevRow.row.equals(newRow.row) && !stateCleaningEnabled) {
// newRow is the same as before and state cleaning is not enabled.
// We emit nothing
// If state cleaning is enabled, we have to emit messages to prevent too early
// state eviction of downstream operators.
return
} else {
// retract previous result
if (generateRetraction) {
out.collect(prevRow)
}
}
}
// emit the new result
out.collect(newRow)
} else {
// we retracted the last record for this key
// sent out a delete message
out.collect(prevRow)
// and clear all state
state.clear()
cntState.clear()
}
}
override def onTimer(
timestamp: Long,
ctx: ProcessFunction[CRow, CRow]#OnTimerContext,
out: Collector[CRow]): Unit = {
if (needToCleanupState(timestamp)) {
cleanupState(state, cntState)
function.cleanup()
}
}
override def close(): Unit = {
function.close()
}
}
| zimmermatt/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/GroupAggProcessFunction.scala | Scala | apache-2.0 | 5,943 |
package cookbook.core
// fintrospect-core
object Simple_Request_Parameters_Example extends App {
import com.twitter.finagle.http.Method.Get
import com.twitter.finagle.http.path.Root
import com.twitter.finagle.http.{Request, Response}
import com.twitter.finagle.{Http, Service}
import com.twitter.util.Await.ready
import io.fintrospect.formats.PlainText.ResponseBuilder._
import io.fintrospect.parameters.Header.Optional
import io.fintrospect.parameters.Query.MandatorySeq
import io.fintrospect.parameters.{Header, ParameterSpec, Query}
import io.fintrospect.{Module, RouteModule, RouteSpec, ServerRoute}
val operatorSpec: ParameterSpec[(Int, Int) => Int] = ParameterSpec.string().map {
case "-" => (i: Int, j: Int) => i - j
case "+" => (i: Int, j: Int) => i + j
}
val operator: Optional[(Int, Int) => Int] = Header.optional(operatorSpec, "operator", "+ or -")
val values: MandatorySeq[Int] = Query.required.*.int("value")
val calculate: Service[Request, Response] = Service.mk[Request, Response] {
req => {
val components: Seq[Int] = values <-- req
val op = operator <-- req
Ok(s"the answer is ${components.fold(0)(op.getOrElse(_ + _))}" + " !")
}
}
val route: ServerRoute[Request, Response] = RouteSpec().taking(operator).taking(values).at(Get) bindTo calculate
val module: Module = RouteModule(Root).withRoute(route)
ready(Http.serve(":9999", module.toService))
}
//curl -v -H"operator: +" http://localhost:9999?value=10&value=20&value=70 | daviddenton/fintrospect | src/test/scala/cookbook/core/Simple_Request_Parameters_Example.scala | Scala | apache-2.0 | 1,525 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import scala.collection.mutable
import scala.collection.Set
import scala.collection.Map
import kafka.utils.Logging
import kafka.cluster.BrokerEndPoint
import kafka.metrics.KafkaMetricsGroup
import com.yammer.metrics.core.Gauge
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.utils.Utils
//管理fetcher线程
abstract class AbstractFetcherManager(protected val name: String, clientId: String, numFetchers: Int = 1)
extends Logging with KafkaMetricsGroup {
// map of (source broker_id, fetcher_id per source broker) => fetcher
private val fetcherThreadMap = new mutable.HashMap[BrokerIdAndFetcherId, AbstractFetcherThread]
private val mapLock = new Object
this.logIdent = "[" + name + "] "
newGauge(
"MaxLag",
new Gauge[Long] {
// current max lag across all fetchers/topics/partitions
def value = fetcherThreadMap.foldLeft(0L)((curMaxAll, fetcherThreadMapEntry) => {
fetcherThreadMapEntry._2.fetcherLagStats.stats.foldLeft(0L)((curMaxThread, fetcherLagStatsEntry) => {
curMaxThread.max(fetcherLagStatsEntry._2.lag)
}).max(curMaxAll)
})
},
Map("clientId" -> clientId)
)
newGauge(
"MinFetchRate", {
new Gauge[Double] {
// current min fetch rate across all fetchers/topics/partitions
def value = {
val headRate: Double =
fetcherThreadMap.headOption.map(_._2.fetcherStats.requestRate.oneMinuteRate).getOrElse(0)
fetcherThreadMap.foldLeft(headRate)((curMinAll, fetcherThreadMapEntry) => {
fetcherThreadMapEntry._2.fetcherStats.requestRate.oneMinuteRate.min(curMinAll)
})
}
}
},
Map("clientId" -> clientId)
)
private def getFetcherId(topic: String, partitionId: Int) : Int = {
Utils.abs(31 * topic.hashCode() + partitionId) % numFetchers
}
// to be defined in subclass to create a specific fetcher
def createFetcherThread(fetcherId: Int, sourceBroker: BrokerEndPoint): AbstractFetcherThread
//follower从指定的offset开始和leader副本同步
def addFetcherForPartitions(partitionAndOffsets: Map[TopicPartition, BrokerAndInitialOffset]) {
mapLock synchronized {
//通过分区所属的topic和分区编号计算得到对应的Fetcher线程id
//与broker的网络位置信息组成key并进行分组,每组对应相同的fetcher线程
val partitionsPerFetcher = partitionAndOffsets.groupBy { case(topicPartition, brokerAndInitialOffset) =>
BrokerAndFetcherId(brokerAndInitialOffset.broker, getFetcherId(topicPartition.topic, topicPartition.partition))}
def addAndStartFetcherThread(brokerAndFetcherId: BrokerAndFetcherId, brokerIdAndFetcherId: BrokerIdAndFetcherId) {
val fetcherThread = createFetcherThread(brokerAndFetcherId.fetcherId, brokerAndFetcherId.broker)
fetcherThreadMap.put(brokerIdAndFetcherId, fetcherThread)
fetcherThread.start
}
//按照key查找对应的fetcher线程,找不懂则创建启动
for ((brokerAndFetcherId, partitionAndOffsets) <- partitionsPerFetcher) {
val brokerIdAndFetcherId = BrokerIdAndFetcherId(brokerAndFetcherId.broker.id, brokerAndFetcherId.fetcherId)
fetcherThreadMap.get(brokerIdAndFetcherId) match {
case Some(f) if f.sourceBroker.host == brokerAndFetcherId.broker.host && f.sourceBroker.port == brokerAndFetcherId.broker.port =>
// reuse the fetcher thread
case Some(f) =>
f.shutdown()
addAndStartFetcherThread(brokerAndFetcherId, brokerIdAndFetcherId)
case None =>
addAndStartFetcherThread(brokerAndFetcherId, brokerIdAndFetcherId)
}
//将分区信息以及同步起始位置传递给fetcher线程,并唤醒fetcher线程开始同步
fetcherThreadMap(brokerIdAndFetcherId).addPartitions(partitionAndOffsets.map { case (tp, brokerAndInitOffset) =>
tp -> brokerAndInitOffset.initOffset
})
}
}
info("Added fetcher for partitions %s".format(partitionAndOffsets.map { case (topicPartition, brokerAndInitialOffset) =>
"[" + topicPartition + ", initOffset " + brokerAndInitialOffset.initOffset + " to broker " + brokerAndInitialOffset.broker + "] "}))
}
//将topicandPartiton信息移除
def removeFetcherForPartitions(partitions: Set[TopicPartition]) {
mapLock synchronized {
for (fetcher <- fetcherThreadMap.values)
fetcher.removePartitions(partitions)
}
info("Removed fetcher for partitions %s".format(partitions.mkString(",")))
}
//fetcher线程不在为任何分区的follower副本进行同步
def shutdownIdleFetcherThreads() {
mapLock synchronized {
val keysToBeRemoved = new mutable.HashSet[BrokerIdAndFetcherId]
for ((key, fetcher) <- fetcherThreadMap) {
if (fetcher.partitionCount <= 0) {
fetcher.shutdown()
keysToBeRemoved += key
}
}
fetcherThreadMap --= keysToBeRemoved
}
}
def closeAllFetchers() {
mapLock synchronized {
for ( (_, fetcher) <- fetcherThreadMap) {
fetcher.initiateShutdown()
}
for ( (_, fetcher) <- fetcherThreadMap) {
fetcher.shutdown()
}
fetcherThreadMap.clear()
}
}
}
case class BrokerAndFetcherId(broker: BrokerEndPoint, fetcherId: Int)
case class BrokerAndInitialOffset(broker: BrokerEndPoint, initOffset: Long)
case class BrokerIdAndFetcherId(brokerId: Int, fetcherId: Int)
| YMCoding/kafka-0.11.0.0-src-with-comment | core/src/main/scala/kafka/server/AbstractFetcherManager.scala | Scala | apache-2.0 | 6,291 |
package com.webtrends.harness.utils
import java.util.Locale
import org.specs2.mutable.SpecificationWithJUnit
class LocalizedStringSpec extends SpecificationWithJUnit {
"localized message" should {
LocalizedString("hello")(Locale.ENGLISH) must be equalTo "Hello"
LocalizedString("hello")(Locale.forLanguageTag("ru")) must be equalTo "Привет"
}
"fallback to default" should {
LocalizedString("world")(Locale.ENGLISH) must be equalTo "World"
LocalizedString("world")(Locale.forLanguageTag("ru")) must be equalTo "World"
}
"format" should {
LocalizedString("greet", "world")(Locale.ENGLISH) must be equalTo "Hello, world"
LocalizedString("greet", "world")(Locale.forLanguageTag("ru")) must be equalTo "Привет, world"
}
"localized message in custom path" should {
LocalizedString("custom_path.hello")(Locale.ENGLISH, "com.custom.path.messages") must be equalTo "Hello"
LocalizedString("custom_path.hello")(Locale.forLanguageTag("ru"), "com.custom.path.messages") must be equalTo "Привет"
}
"fallback to default in custom path" should {
LocalizedString("custom_path.world")(Locale.ENGLISH, "com.custom.path.messages") must be equalTo "World"
LocalizedString("custom_path.world")(Locale.forLanguageTag("ru"), "com.custom.path.messages") must be equalTo "World"
}
"format in custom path" should {
LocalizedString("custom_path.greet", "world")(Locale.ENGLISH, "com.custom.path.messages") must be equalTo "Hello, world"
LocalizedString("custom_path.greet", "world")(Locale.forLanguageTag("ru"), "com.custom.path.messages") must be equalTo "Привет, world"
}
"custom localized message" should {
LocalizedString("custom.hello")(Locale.ENGLISH, "custom") must be equalTo "Hello"
LocalizedString("custom.hello")(Locale.forLanguageTag("ru"), "custom") must be equalTo "Привет"
}
"custom fallback to default" should {
LocalizedString("custom.world")(Locale.ENGLISH, "custom") must be equalTo "World"
LocalizedString("custom.world")(Locale.forLanguageTag("ru"), "custom") must be equalTo "World"
}
"custom format" should {
LocalizedString("custom.greet", "custom world")(Locale.ENGLISH, "custom") must be equalTo "Hello, custom world"
LocalizedString("custom.greet", "custom world")(Locale.forLanguageTag("ru"), "custom") must be equalTo "Привет, custom world"
}
} | Kraagen/wookiee | wookiee-core/src/test/scala/com/webtrends/harness/utils/LocalizedStringSpec.scala | Scala | apache-2.0 | 2,424 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive
package issues
import minitest.TestSuite
import monix.eval.Task
import monix.execution.{Scheduler, UncaughtExceptionReporter}
import monix.execution.schedulers.SchedulerService
import monix.reactive.subjects.{AsyncSubject, Subject}
import scala.concurrent.{Await, TimeoutException}
import scala.concurrent.duration._
import scala.util.{Failure, Success}
object Issue908Suite extends TestSuite[SchedulerService] {
val CONCURRENT_TASKS = 1000
val CYCLES = 100
def setup(): SchedulerService = {
Scheduler.computation(
parallelism = math.max(Runtime.getRuntime.availableProcessors(), 2),
name = "issue908-suite",
daemonic = true,
reporter = UncaughtExceptionReporter(_ => ()))
}
def tearDown(env: SchedulerService): Unit = {
env.shutdown()
assert(env.awaitTermination(1.minute), "scheduler.awaitTermination")
}
test("broken tasks test (1)") { implicit sc =>
for (_ <- 0 until CYCLES) {
val task = Task.async[String] { cb =>
sc.executeAsync(() => cb.onSuccess("1"))
sc.executeAsync(() => cb.onSuccess("2"))
}
val f = Task.race(task, task).runToFuture
val r = Await.result(f, 30.seconds)
assert(r != null, "r != null")
assert(r.isInstanceOf[Either[_, _]], "r.isInstanceOf[Either[_, _]]")
val i = r.fold(x => x, x => x)
assert(i == "1" || i == "2", s"$i == 1 || $i == 2")
}
}
test("broken tasks test (2)") { implicit sc =>
for (_ <- 0 until CYCLES) {
val task = Task.async[String] { cb =>
sc.executeAsync(() => cb.onSuccess("1"))
sc.executeAsync(() => cb.onSuccess("2"))
}
val f = Task.raceMany((0 until CONCURRENT_TASKS).map(_ => task)).runToFuture
val r = Await.result(f, 30.seconds)
assert(r == "1" || r == "2", s"$r == 1 || $r == 2")
}
}
test("broken tasks test (3)") { implicit sc =>
for (_ <- 0 until CYCLES) {
val task = Task.async[String] { cb =>
sc.executeAsync(() => cb.onSuccess("1"))
sc.executeAsync(() => cb.onSuccess("2"))
}
val f = task.timeout(1.millis).materialize.runToFuture
Await.result(f, 30.seconds) match {
case Success("1" | "2") =>
case Failure(_: TimeoutException) =>
case other =>
fail(s"Invalid value: $other")
}
}
}
test("concurrent test (1)") { implicit sc =>
for (_ <- 0 until CYCLES) {
val subject = AsyncSubject.apply[Int]()
val tasks = (0 until CONCURRENT_TASKS).map { _ =>
subject.firstL.timeoutTo(1.millis, Task(1))
}
val await = Task.parSequenceUnordered(tasks).map(_.sum)
val f = Await.result(await.runToFuture, 30.seconds)
assertEquals(f, CONCURRENT_TASKS)
}
}
test("concurrent test (2)") { implicit sc =>
for (_ <- 0 until CYCLES) {
val subject = AsyncSubject.apply[Int]()
val tasks = (0 until CONCURRENT_TASKS).map { _ =>
subject.firstL.timeoutTo(30.seconds, Task(1))
}
val await = for {
fiber <- Task.parSequenceUnordered(tasks).map(_.sum).start
_ <- awaitSubscribers(subject, CONCURRENT_TASKS)
_ <- Task {
subject.onNext(2)
subject.onComplete()
}
result <- fiber.join
} yield {
result
}
val f = Await.result(await.runToFuture, 30.seconds)
assertEquals(f, CONCURRENT_TASKS * 2)
}
}
def awaitSubscribers(subject: Subject[_, _], nr: Int): Task[Unit] =
Task.suspend {
if (subject.size < nr)
Task.sleep(1.millis).flatMap(_ => awaitSubscribers(subject, nr))
else
Task.unit
}
}
| alexandru/monifu | monix-reactive/jvm/src/test/scala/monix/reactive/issues/Issue908Suite.scala | Scala | apache-2.0 | 4,335 |
package im.mange.driveby.conditions
import im.mange.driveby.browser.UnSafeBrowser
import im.mange.driveby.{By, Condition}
case class ElementClassesEquals(by: By, classNames: Set[String]) extends Condition {
import im.mange.driveby.Describer._
def expectation = expect("ElementClassesEqual", List(by.toString, classNames.mkString(" ")))
def isSatisfied(browser: UnSafeBrowser) = { browser.attribute(by, "class").split(" ").toSet == classNames }
def describeFailure(browser: UnSafeBrowser) = { expectation + butWas(() => browser.attribute(by, "class")) }
} | alltonp/driveby | src/main/scala/im/mange/driveby/conditions/ElementClassesEquals.scala | Scala | apache-2.0 | 565 |
package com.sksamuel.elastic4s.http.termvectors
import com.fasterxml.jackson.annotation.JsonProperty
import com.sksamuel.elastic4s.DocumentRef
import com.sksamuel.elastic4s.http.{HttpExecutable, ResponseHandler}
import com.sksamuel.elastic4s.termvectors.TermVectorsDefinition
import org.apache.http.entity.{ContentType, StringEntity}
import org.elasticsearch.client.RestClient
import org.elasticsearch.common.xcontent.XContentFactory
import scala.concurrent.Future
trait TermVectorsExecutables {
implicit object TermVectorHttpExecutable extends HttpExecutable[TermVectorsDefinition, TermVectorsResponse] {
override def execute(client: RestClient, request: TermVectorsDefinition): Future[TermVectorsResponse] = {
val endpoint = s"/${request.indexAndType.index}/${request.indexAndType.`type`}/${request.id}/_termvectors"
val builder = XContentFactory.jsonBuilder().startObject()
if (request.fields.nonEmpty)
builder.array("fields", request.fields: _*)
request.termStatistics.foreach(builder.field("term_statistics", _))
request.fieldStatistics.foreach(builder.field("field_statistics", _))
request.payloads.foreach(builder.field("payloads", _))
request.positions.foreach(builder.field("positions", _))
request.offsets.foreach(builder.field("offsets", _))
builder.startObject("filter")
request.maxNumTerms.foreach(builder.field("max_num_terms", _))
request.minTermFreq.foreach(builder.field("min_term_freq", _))
request.maxTermFreq.foreach(builder.field("max_term_freq", _))
request.minDocFreq.foreach(builder.field("min_doc_freq", _))
request.maxDocFreq.foreach(builder.field("max_doc_freq", _))
request.minWordLength.foreach(builder.field("min_word_length", _))
request.maxWordLength.foreach(builder.field("max_word_length", _))
builder.endObject()
builder.endObject()
val params = scala.collection.mutable.Map.empty[String, Any]
request.realtime.foreach(params.put("realtime", _))
client.async("GET", endpoint, params.toMap, new StringEntity(builder.string(), ContentType.APPLICATION_JSON), ResponseHandler.default)
}
}
}
case class TermVectorsResponse(@JsonProperty("_index") index: String,
@JsonProperty("_type") `type`: String,
@JsonProperty("_id") id: String,
@JsonProperty("_version") version: Long,
found: Boolean,
took: Int,
@JsonProperty("term_vectors") termVectors: Map[String, TermVectors]) {
def ref = DocumentRef(index, `type`, id)
}
case class FieldStatistics(@JsonProperty("sum_doc_freq") sumDocFreq: Int,
@JsonProperty("doc_count") docCount: Int,
@JsonProperty("sum_ttf") sumTtf: Int)
case class Terms(@JsonProperty("doc_freq") docFreq: Int,
@JsonProperty("ttf") ttf: Int,
@JsonProperty("score") score: Double,
@JsonProperty("term_freq") termFreq: Int,
tokens: Seq[Token])
case class Token(@JsonProperty("position") position: Int,
@JsonProperty("start_offset") startOffset: Int,
@JsonProperty("end_offset") endOffset: Int)
case class TermVectors(@JsonProperty("field_statistics") fieldStatistics: FieldStatistics,
terms: Map[String, Terms])
| aroundus-inc/elastic4s | elastic4s-http/src/main/scala/com/sksamuel/elastic4s/http/termvectors/TermVectorsExecutables.scala | Scala | apache-2.0 | 3,486 |
package korolev.effect.io
import java.net.SocketAddress
import java.nio.ByteBuffer
import java.nio.channels.{AsynchronousChannelGroup, AsynchronousCloseException, AsynchronousServerSocketChannel, AsynchronousSocketChannel, CompletionHandler}
import korolev.data.BytesLike
import korolev.effect.{Close, Effect, Queue, Stream}
import korolev.effect.syntax._
import scala.concurrent.ExecutionContext
/**
* Stream API for AsynchronousServerSocketChannel.
* Use `ServerSocket.bind` to start listening.
* @see [[AsynchronousServerSocketChannel]]
*/
class ServerSocket[F[_]: Effect, B: BytesLike](channel: AsynchronousServerSocketChannel,
bufferSize: Int) extends Stream[F, RawDataSocket[F, B]] {
@volatile private var canceled = false
def pull(): F[Option[RawDataSocket[F, B]]] = Effect[F].promise { cb =>
if (canceled) cb(Right(None)) else {
channel.accept((), new CompletionHandler[AsynchronousSocketChannel, Unit] {
def completed(socket: AsynchronousSocketChannel, notUsed: Unit): Unit =
cb(Right(Some(new RawDataSocket[F, B](socket, ByteBuffer.allocate(bufferSize), ByteBuffer.allocate(bufferSize)))))
def failed(throwable: Throwable, notUsed: Unit): Unit = throwable match {
case _: AsynchronousCloseException if canceled =>
// Its okay. Accepting new connection was
// stopped by Stream cancel
cb(Right(None))
case _ => cb(Left(throwable))
}
})
}
}
def cancel(): F[Unit] = Effect[F].delay {
canceled = true
channel.close()
}
}
object ServerSocket {
/**
* Bind server socket to `address` and accept connections with `f`.
*
* @see [[bind]]
*/
def accept[F[_]: Effect, B: BytesLike](address: SocketAddress,
backlog: Int = 0,
bufferSize: Int = 8096,
group: AsynchronousChannelGroup = null,
gracefulShutdown: Boolean = false)
(f: RawDataSocket[F, B] => F[Unit])
(implicit ec: ExecutionContext): F[ServerSocketHandler[F]] =
bind(address, backlog, bufferSize, group = group).flatMap { server =>
val connectionsQueue = Queue[F, F[Unit]]()
server
.foreach { connection =>
f(connection)
.start
.flatMap(f => connectionsQueue.enqueue(f.join()))
}
.start
.map { serverFiber =>
new ServerSocketHandler[F] {
def awaitShutdown(): F[Unit] = {
if (gracefulShutdown) {
serverFiber.join() *>
connectionsQueue.stop() *>
connectionsQueue.stream.foreach(identity)
} else {
serverFiber.join()
}
}
def stopServingRequests(): F[Unit] =
server.cancel()
}
}
}
/**
* Open an AsynchronousServerSocketChannel and bind it to `socketAddress`.
* @see [[AsynchronousServerSocketChannel]]
*/
def bind[F[_]: Effect, B: BytesLike](socketAddress: SocketAddress,
backlog: Int = 0,
bufferSize: Int = 8096,
group: AsynchronousChannelGroup = null): F[ServerSocket[F, B]] =
Effect[F].delay {
val channel = AsynchronousServerSocketChannel
.open(group)
.bind(socketAddress, backlog)
new ServerSocket[F, B](channel, bufferSize)
}
sealed trait ServerSocketHandler[F[_]] {
/**
* Awaits server socket close.
* If server configured with graceful shutdown, waits until all client connections are closed.
*/
def awaitShutdown(): F[Unit]
/**
* Stop accepting new connections and serving requests.
*
* If you are using server with HTTP note that WebSockets
* and other request without content length will be open
* until connection closed by a client.
*/
def stopServingRequests(): F[Unit]
}
object ServerSocketHandler {
implicit def serverSocketHandlerCloseInstance[F[_]: Effect]: Close[F, ServerSocketHandler[F]] =
new Close[F, ServerSocketHandler[F]] {
def onClose(that: ServerSocketHandler[F]): F[Unit] =
that.awaitShutdown()
def close(that: ServerSocketHandler[F]): F[Unit] =
that.stopServingRequests()
}
}
} | fomkin/korolev | modules/http/src/main/scala/korolev/effect/io/ServerSocket.scala | Scala | apache-2.0 | 4,594 |
package org.scalaide.debug.internal.command
import org.scalaide.debug.internal.model.JdiRequestFactory
import org.scalaide.debug.internal.model.ScalaDebugTarget
import org.scalaide.debug.internal.model.ScalaStackFrame
import org.scalaide.debug.internal.model.ScalaThread
import org.eclipse.debug.core.DebugEvent
import com.sun.jdi.event.StepEvent
import com.sun.jdi.request.EventRequest
import com.sun.jdi.request.StepRequest
import com.sun.jdi.event.Event
import org.scalaide.debug.internal.BaseDebuggerActor
object ScalaStepInto {
/*
* Initialize a Scala step into
*/
def apply(scalaStackFrame: ScalaStackFrame): ScalaStep = {
// we noticed that STEP_LINE would miss events for stepping into BoxesRunTime. Might be because the
// file has no source file information, need to check
val stepIntoRequest = JdiRequestFactory.createStepRequest(StepRequest.STEP_MIN, StepRequest.STEP_INTO, scalaStackFrame.thread)
stepIntoRequest.setSuspendPolicy(EventRequest.SUSPEND_EVENT_THREAD)
val stepOutRequest = JdiRequestFactory.createStepRequest(StepRequest.STEP_LINE, StepRequest.STEP_OUT, scalaStackFrame.thread)
stepOutRequest.setSuspendPolicy(EventRequest.SUSPEND_EVENT_THREAD)
val stackFrames = scalaStackFrame.thread.getStackFrames
val depth = stackFrames.length - stackFrames.indexOf(scalaStackFrame)
val companionActor = new ScalaStepIntoActor(scalaStackFrame.getDebugTarget, scalaStackFrame.thread, stepIntoRequest, stepOutRequest, depth, scalaStackFrame.stackFrame.location.lineNumber) {
override val scalaStep: ScalaStep = new ScalaStepImpl(this)
}
companionActor.start()
companionActor.scalaStep
}
}
/**
* Actor used to manage a Scala step into. It keeps track of the request needed to perform this step.
* This class is thread safe. Instances are not to be created outside of the ScalaStepInto object.
*/
private[command] abstract class ScalaStepIntoActor(debugTarget: ScalaDebugTarget, thread: ScalaThread, stepIntoRequest: StepRequest, stepOutRequest: StepRequest, stackDepth: Int, stackLine: Int) extends BaseDebuggerActor {
/**
* Needed to perform a correct step out (see Eclipse bug report #38744)
*/
private var stepOutStackDepth = 0
private var enabled = false
protected[command] def scalaStep: ScalaStep
override protected def postStart(): Unit = link(thread.companionActor)
override protected def behavior = {
// JDI event triggered when a step has been performed
case stepEvent: StepEvent =>
reply(stepEvent.request.asInstanceOf[StepRequest].depth match {
case StepRequest.STEP_INTO =>
if (debugTarget.cache.isOpaqueLocation(stepEvent.location)) {
// don't step deeper into constructor from 'hidden' entities
stepOutStackDepth = stepEvent.thread.frameCount
stepIntoRequest.disable()
stepOutRequest.enable()
false
} else {
if (!debugTarget.cache.isTransparentLocation(stepEvent.location) && stepEvent.location.lineNumber != stackLine) {
terminate()
thread.suspendedFromScala(DebugEvent.STEP_INTO)
true
}
else false
}
case StepRequest.STEP_OUT =>
if (stepEvent.thread.frameCount == stackDepth && stepEvent.location.lineNumber != stackLine) {
// we are back on the method, but on a different line, stopping the stepping
terminate()
thread.suspendedFromScala(DebugEvent.STEP_INTO)
true
} else {
// switch back to step into only if the step return has been effectively done.
if (stepEvent.thread.frameCount < stepOutStackDepth) {
// launch a new step into
stepOutRequest.disable()
stepIntoRequest.enable()
}
false
}
})
// user step request
case ScalaStep.Step =>
step()
case ScalaStep.Stop =>
terminate()
}
private def step(): Unit = {
enable()
thread.resumeFromScala(scalaStep, DebugEvent.STEP_INTO)
}
private def terminate(): Unit = {
disable()
poison()
}
private def enable(): Unit = {
if (!enabled) {
val eventDispatcher = debugTarget.eventDispatcher
eventDispatcher.setActorFor(this, stepIntoRequest)
eventDispatcher.setActorFor(this, stepOutRequest)
stepIntoRequest.enable()
enabled = true
}
}
private def disable(): Unit = {
if (enabled) {
val eventDispatcher = debugTarget.eventDispatcher
val eventRequestManager = debugTarget.virtualMachine.eventRequestManager
// make sure that actors are gracefully shut down
eventDispatcher.unsetActorFor(stepIntoRequest)
eventDispatcher.unsetActorFor(stepOutRequest)
stepIntoRequest.disable()
stepOutRequest.disable()
eventRequestManager.deleteEventRequest(stepIntoRequest)
eventRequestManager.deleteEventRequest(stepOutRequest)
enabled = false
}
}
override protected def preExit(): Unit = {
unlink(thread.companionActor)
disable()
}
}
| Kwestor/scala-ide | org.scala-ide.sdt.debug/src/org/scalaide/debug/internal/command/ScalaStepInto.scala | Scala | bsd-3-clause | 5,146 |
package Tutorial
import Chisel._
import Node._
import Literal._
import scala.collection.mutable.HashMap
import scala.collection.mutable.ArrayBuffer
/*This source implements a module that add a constat value to each input data element,
* and send the result out as the output data element. The constant value is retrived
* through an offload interface.
*/
class Top extends Component with GorillaUtil {
val io = new gInOutBundle (() => UFix(width = 32), () => UFix(width = 32))
val main = MTEngine("incThroughOffload.c", 1)
val incFactor = Engine("sendConst.c")
val result = Offload(Replicate(main, 1), incFactor, "incrementFactor")
}
| seyedmaysamlavasani/GorillaPP | apps/incThroughOffloadUseDef/src/backup/Top.scala | Scala | bsd-3-clause | 653 |
package ch.ethz.inf.da.tipstersearch.metrics
import ch.ethz.inf.da.tipstersearch.Query
/**
* Computes basic IR retrieval metrics about a given query.
* The query contains the result set and the known binary relevance ground truth which
* this class uses to compute various IR retrieval metrics such as precision and recall.
*
* @constructor creates a precision recall metric based on given query data
* @param the query
*/
class PrecisionRecall(query:Query) {
val rankings:List[String] = query.results.ordered.map(r => r.id)
val retrieved:Set[String] = rankings.toSet
val relevant:Set[String] = query.truth.filter(_._2 == 1).map{case (id,c) => id}.toSet
/**
* Computes the precision over the results
*
* @return the precision
*/
def precision : Double = {
return (retrieved & relevant).size.toDouble / retrieved.size.toDouble
}
/**
* Computes the precision over the first k results
*
* @param k the number of results to consider
* @return the precision
*/
def precision(k:Int) : Double = {
return (rankings.take(k).toSet & relevant).size.toDouble / k
}
/**
* Computes the recall over the results
*
* @return the recall
*/
def recall : Double = {
return (retrieved & relevant).size.toDouble / relevant.size.toDouble
}
/**
* Indicator function to determine if document at index k is relevant
*
* @param k the index to check (1-based)
* @return 1 if the document at k is relevant, 0 if it is not
*/
def isRelevant(k:Int) : Int = {
if (relevant.contains(rankings(k-1))) return 1 else return 0
}
/**
* Computes the average precision over the results
*
* @return the average precision
*/
def averagePrecision : Double = {
if((retrieved & relevant).size == 0) {
0.0
} else {
val sumOfAvgPrec:Double = (1 to retrieved.size).map(k => precision(k)*isRelevant(k).toDouble).sum
sumOfAvgPrec / (retrieved & relevant).size.toDouble
}
}
}
| rjagerman/TipsterSearch | src/main/scala/metrics/PrecisionRecall.scala | Scala | mit | 2,222 |
package controllers
import fly.play.aws.{Aws4Signer, AwsCredentials}
import fly.play.s3.{BucketItem, S3, S3Client, S3Configuration}
import models.ImageId
import play.api.Logger
import play.api.Play.current
import play.api.libs.concurrent.Execution.Implicits._
import play.api.libs.json.JsArray
import play.api.libs.ws.{WS, WSProxyServer, WSRequestHolder}
import play.api.mvc.{Action, Controller, Flash}
import system.index.S3Index
import system.{Configuration, Production}
import scala.concurrent.Future
import scala.xml.Elem
case class MultipartUpload(key: String, uploadId: String)
class HitherS3Signer(credentials: AwsCredentials, region: String, proxy: Option[WSProxyServer])
extends Aws4Signer(credentials, "s3", region) {
private def addProxy(request: WSRequestHolder): WSRequestHolder = {
proxy.map { p =>
Logger.debug("Adding proxy to request")
request.withProxyServer(p)
}.getOrElse(request)
}
// allways include the content payload header
override def sign(request: WSRequestHolder, method: String, body: Array[Byte]): WSRequestHolder =
super.sign(addProxy(request.withHeaders(amzContentSha256(body))), method, body)
}
object S3Controller extends Controller with ContentFeeding {
lazy val s3Config = S3Configuration.fromConfig
lazy val s3Client = new S3Client(WS.client, new HitherS3Signer(s3Config.credentials, s3Config.region, Configuration.aws.proxy), s3Config)
lazy val s3 = new S3(s3Client)
lazy val bucket = s3.getBucket(Configuration.s3.bucketName)
def multipartUploads = Action.async { implicit request =>
val awsRequest = listMultipartUploads
awsRequest.get().map { response =>
response.status match {
case 200 => Ok(views.html.listMultipartUploads(Configuration.s3.bucketName,
extract(scala.xml.XML.loadString(response.body)).sortBy(_.uploadId)))
case _ => InternalServerError
}
}
}
private def listMultipartUploads =
s3Client.resourceRequest(Configuration.s3.bucketName, "")
.withHeaders("Content-Type" -> "application/json")
.withQueryString("uploads" -> "")
private def removeMultipartUpload(upload: MultipartUpload): Future[String] = {
val url = s"http://${Configuration.s3.bucketName}.${s3Config.host}/${upload.key}"
val awsRequest = s3Client.resourceRequest(Configuration.s3.bucketName, upload.key)
.withQueryString("uploadId" -> upload.uploadId)
awsRequest.delete().map { response =>
response.status match {
case 204 => s"${upload.key} cleared"
case s => s"${upload.key} not cleared with status $s, ${response.body}"
}
}
}
def clearMultipartUploads = Action.async { implicit request =>
listMultipartUploads.get().flatMap { response =>
response.status match {
case 200 =>
val fs = extract(scala.xml.XML.loadString(response.body)).map(removeMultipartUpload)
Future.sequence(fs).map { results =>
val flash = Flash(Map(results.map("success" -> _): _*))
Redirect(routes.RepositoryController.repositories).flashing(flash)
}
case _ => Future(InternalServerError)
}
}
}
private def extract(xml: Elem): List[MultipartUpload] = {
(xml \\ "Upload").iterator.toList.map { node =>
(node \\ "Key", node \\ "UploadId") match {
case (key, uploadId) => MultipartUpload(key.text, uploadId.text)
}
}
}
def trimQuotes(s: String): String = s match {
case _ if s.startsWith("\\"") => trimQuotes(s.substring(1))
case _ if s.startsWith("'") => trimQuotes(s.substring(1))
case _ if s.endsWith("\\"") => trimQuotes(s.substring(0, s.length - 1))
case _ if s.endsWith("'") => trimQuotes(s.substring(0, s.length - 1))
case _ => s
}
private def ancestryFor(imageId: ImageId): Future[Set[ImageId]] = {
val registry = Production.s3Registry
registry.ancestry(imageId).flatMap {
case Some(ce) => ce.asJson.map {
case JsArray(ids) => ids.map(id => ImageId(trimQuotes(id.toString()))).toSet
case _ => Logger.debug(s"Ancestry for $imageId was not a JsArray!"); Set()
}
case None => Future(Set())
}
}
private def listIndexedLayers: Future[Set[ImageId]] = {
val index: S3Index = Production.s3Index
val headImages = index.repositories.flatMap { repos =>
repos.foldLeft(Future(Set[ImageId]())) { case (f, repo) =>
f.flatMap { imageIds =>
index.tagSet(repo).map { tags =>
tags.map(_.version) ++ imageIds
}
}
}
}
headImages.flatMap { imageIds =>
imageIds.foldLeft(Future(Set[ImageId]())) { case (f, i) =>
f.flatMap(acc => ancestryFor(i).map(_ ++ acc))
}
}
}
def removeOrphanedLayers() = Action.async { implicit request =>
for {
indexed <- listIndexedLayers
all <- listAllLayers
toRemove = all -- indexed
_ <- removeLayers(toRemove)
} yield {
Redirect(routes.RepositoryController.repositories).flashing("success" -> s"Found ${indexed.size} layers in the index. ${all.size} layers in total. Removed ${toRemove.size}")
}
}
private def listAllLayers: Future[Set[ImageId]] = {
bucket.list(Configuration.s3.registryRoot + "/").map(_.map(item => ImageId(item.name.split("/").last)).toSet)
}
/**
* Removes layer directories in the S3 bucket that do not have a 'layer' entry in them
*/
def removeIncomplete() = Action.async { implicit request =>
findIncompletes.map { incompletes =>
incompletes.map(i => ImageId(i.name.split("/").last)).foreach(removeLayer)
Redirect(routes.RepositoryController.repositories).flashing("success" -> s"Removed ${incompletes.length} incomplete layers from ${bucket.name}")
}
}
private def removeLayers(layerIds: Set[ImageId]): Future[Unit] = {
layerIds.foldLeft(Future[Unit](Unit)) { case (f, layerId) =>
f.flatMap(_ => removeLayer(layerId))
}
}
private def removeLayer(layerId: ImageId): Future[Unit] = {
Logger.debug(s"Removing layer $layerId")
val layerPath: String = Configuration.s3.registryRoot + "/" + layerId.id + "/"
bucket.list(layerPath).map {
_.foldLeft(Future[Unit](Unit)) { case (f, i) =>
bucket.remove(i.name)
}
}.flatMap(_ => bucket.remove(layerPath))
}
private def findIncompletes: Future[List[BucketItem]] = {
bucket.list(Configuration.s3.registryRoot + "/").flatMap { layerIds =>
layerIds.foldLeft(Future[List[BucketItem]](List())) { case (f, layerItem) =>
f.flatMap { acc =>
bucket.list(layerItem.name).map { items =>
val entries = items.map(_.name.split("/").last).toList
if (!entries.contains("layer")) layerItem +: acc else acc
}
}
}
}
}
}
| WiredThing/hither | app/controllers/S3Controller.scala | Scala | mit | 6,795 |
package controllers.jenkins
import com.thetestpeople.trt.utils.FormUtils._
import play.api.data._
import play.api.data.Forms._
import viewModel._
import controllers.jenkins.JenkinsFormConstraints._
import com.thetestpeople.trt.model.Configuration
object CiImportSpecForm {
lazy val form: Form[EditableImportSpec] =
Form(Forms.mapping(
"jobUrl" -> url.verifying(isCiJob),
"pollingInterval" -> duration,
"importConsoleLog" -> boolean,
"configuration" -> optional(configuration))(EditableImportSpec.apply)(EditableImportSpec.unapply))
lazy val initial: Form[EditableImportSpec] =
form.bind(Map(
"pollingInterval" -> "5 minutes",
"configuration" -> Configuration.Default.configuration)).discardingErrors
} | thetestpeople/trt | app/controllers/jenkins/JenkinsImportSpecForm.scala | Scala | mit | 754 |
package com.twitter.finatra.kafkastreams.integration.delay
import com.twitter.conversions.DurationOps._
import com.twitter.finatra.kafkastreams.KafkaStreamsTwitterServer
import com.twitter.finatra.kafkastreams.dsl.FinatraDslDelay
import com.twitter.finatra.kafkastreams.integration.delay.DelayStoreServer.{
Delay,
DelayStoreKey,
IncomingTopic,
OutgoingTopic
}
import org.apache.kafka.streams.StreamsBuilder
import org.apache.kafka.streams.scala.Serdes
import org.apache.kafka.streams.scala.kstream.{Consumed, Produced}
object DelayStoreServer {
val IncomingTopic = "incoming-topic"
val OutgoingTopic = "outgoing-topic"
val Delay = 10.seconds
val DelayStoreKey = "storekey"
}
class DelayStoreServer extends KafkaStreamsTwitterServer with FinatraDslDelay {
override protected def configureKafkaStreams(builder: StreamsBuilder): Unit = {
implicit val produced: Produced[Long, Long] =
Produced.`with`(Serdes.Long, Serdes.Long)
builder.asScala
.stream(IncomingTopic)(Consumed.`with`(Serdes.Long, Serdes.Long))
.delayWithStore(Delay, DelayStoreKey)
.to(OutgoingTopic)
}
}
| twitter/finatra | kafka-streams/kafka-streams/src/test/scala/com/twitter/finatra/kafkastreams/integration/delay/DelayStoreServer.scala | Scala | apache-2.0 | 1,123 |
package spire.math
import spire.algebra.Sign
import org.scalatest.FunSuite
import java.math.MathContext
class AlgebraicTest extends FunSuite {
def trickyZero: Algebraic = Algebraic(18).sqrt - Algebraic(8).sqrt - Algebraic(2).sqrt
test("Sign of tricky zero is Zero") {
assert(trickyZero.sign === Sign.Zero)
}
test("Relative approximation of zero is zero") {
assert((Algebraic(0) approximateTo MathContext.DECIMAL128) === BigDecimal(0))
assert(trickyZero.toDouble === 0.0)
}
test("Absolute approximation of addition is correct") {
val sqrt2x100 = Iterator.fill(100)(Algebraic(2).sqrt) reduce (_ + _)
val dblSqrt2x100 = math.sqrt(2) * 100
val err = BigDecimal(0.0001)
val approx = sqrt2x100 +/- err
assert(approx - err <= dblSqrt2x100 && dblSqrt2x100 <= approx + err)
}
test("Relative approximation of addition is correct") {
val sum = Iterator.fill(29)(Algebraic(1) / 29) reduce (_ + _)
assert(sum.toDouble === 1.0)
assert(sum.toBigDecimal === BigDecimal(1))
}
test("Absolute approximation of subtraction is correct") {
val negSqrt2x98 = Iterator.fill(100)(Algebraic(2).sqrt) reduce (_ - _)
val dblNegSqrt2x98 = -math.sqrt(2) * 98
val err = BigDecimal(0.0001)
val approx = negSqrt2x98 +/- err
assert(approx - err <= dblNegSqrt2x98 && dblNegSqrt2x98 <= approx + err)
}
test("Absolute approximation of multiplication is correct") {
val prod = Iterator.fill(32)(Algebraic(2).sqrt) reduce (_ * _)
val err = BigDecimal(0.0001)
val approx = prod +/- err
val actual = BigDecimal(1 << 16)
assert(actual - err <= approx && approx <= actual + err)
}
test("Relative approximation of multiplication is correct") {
val prod = Iterator.fill(32)(Algebraic(2).sqrt) reduce (_ * _)
val approx = prod approximateTo MathContext.DECIMAL64
val actual = BigDecimal(1 << 16)
assert(approx === actual)
}
test("Absolute approximation of division is correct") {
val quot = Algebraic(2).sqrt / 2
val actual = 0.7071067811865476
val err = BigDecimal(0.0001)
val approx = quot +/- err
assert(actual - err <= approx && approx <= actual + err)
}
test("Relative approximation of division is correct") {
// The bubble-up div transform will actually just transform this to
// 65536 / 65536... so not sure how good of a test it is.
val quot = Iterator.fill(16)(Algebraic(2)).foldLeft(Algebraic(1 << 16))(_ / _)
assert(quot.toDouble === 1.0)
val aThird = Algebraic(-1) / 3
val actual = BigDecimal(-1, MathContext.DECIMAL128) / 3
assert(aThird.toBigDecimal(MathContext.DECIMAL128) === actual)
val aThird2 = Algebraic(1) / -3
assert(aThird2.toBigDecimal(MathContext.DECIMAL128) === actual)
}
test("Absolute approximation of roots is correct") {
val a = Algebraic(2).sqrt
val err = BigDecimal(0.00001)
val actual = BigDecimal(1.4142135623730951)
val approx = a +/- err
assert(actual - err <= approx && approx <= actual + err)
val b = Algebraic(-4) nroot 3
val bctual = BigDecimal(-1.5874010519681994) // give or take
val bpprox = b +/- err
assert(bctual - err <= bpprox && bpprox <= bctual + err)
}
test("Associativity with large and small numbers") {
val x = Algebraic(1e308)
val y = Algebraic(-1e308)
val z = Algebraic(1)
assert((x + (y + z)) === (x + y + z))
}
}
| lrytz/spire | tests/src/test/scala/spire/math/AlgebraicTest.scala | Scala | mit | 3,414 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst
import java.util.Locale
import com.google.common.collect.Maps
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.analysis.{Resolver, UnresolvedAttribute}
import org.apache.spark.sql.types.{StructField, StructType}
/**
* A set of classes that can be used to represent trees of relational expressions. A key goal of
* the expression library is to hide the details of naming and scoping from developers who want to
* manipulate trees of relational operators. As such, the library defines a special type of
* expression, a [[NamedExpression]] in addition to the standard collection of expressions.
*
* ==Standard Expressions==
* A library of standard expressions (e.g., [[Add]], [[EqualTo]]), aggregates (e.g., SUM, COUNT),
* and other computations (e.g. UDFs). Each expression type is capable of determining its output
* schema as a function of its children's output schema.
*
* ==Named Expressions==
* Some expression are named and thus can be referenced by later operators in the dataflow graph.
* The two types of named expressions are [[AttributeReference]]s and [[Alias]]es.
* [[AttributeReference]]s refer to attributes of the input tuple for a given operator and form
* the leaves of some expression trees. Aliases assign a name to intermediate computations.
* For example, in the SQL statement `SELECT a+b AS c FROM ...`, the expressions `a` and `b` would
* be represented by `AttributeReferences` and `c` would be represented by an `Alias`.
*
* During [[analysis]], all named expressions are assigned a globally unique expression id, which
* can be used for equality comparisons. While the original names are kept around for debugging
* purposes, they should never be used to check if two attributes refer to the same value, as
* plan transformations can result in the introduction of naming ambiguity. For example, consider
* a plan that contains subqueries, both of which are reading from the same table. If an
* optimization removes the subqueries, scoping information would be destroyed, eliminating the
* ability to reason about which subquery produced a given attribute.
*
* ==Evaluation==
* The result of expressions can be evaluated using the `Expression.apply(Row)` method.
*/
package object expressions {
/**
* Used as input into expressions whose output does not depend on any input value.
*/
val EmptyRow: InternalRow = null
/**
* Converts a [[InternalRow]] to another Row given a sequence of expression that define each
* column of the new row. If the schema of the input row is specified, then the given expression
* will be bound to that schema.
*/
abstract class Projection extends (InternalRow => InternalRow) {
/**
* Initializes internal states given the current partition index.
* This is used by nondeterministic expressions to set initial states.
* The default implementation does nothing.
*/
def initialize(partitionIndex: Int): Unit = {}
}
/**
* An identity projection. This returns the input row.
*/
object IdentityProjection extends Projection {
override def apply(row: InternalRow): InternalRow = row
}
/**
* Helper functions for working with `Seq[Attribute]`.
*/
implicit class AttributeSeq(val attrs: Seq[Attribute]) extends Serializable {
/** Creates a StructType with a schema matching this `Seq[Attribute]`. */
def toStructType: StructType = {
StructType(attrs.map(a => StructField(a.name, a.dataType, a.nullable, a.metadata)))
}
// It's possible that `attrs` is a linked list, which can lead to bad O(n) loops when
// accessing attributes by their ordinals. To avoid this performance penalty, convert the input
// to an array.
@transient private lazy val attrsArray = attrs.toArray
@transient private lazy val exprIdToOrdinal = {
val arr = attrsArray
val map = Maps.newHashMapWithExpectedSize[ExprId, Int](arr.length)
// Iterate over the array in reverse order so that the final map value is the first attribute
// with a given expression id.
var index = arr.length - 1
while (index >= 0) {
map.put(arr(index).exprId, index)
index -= 1
}
map
}
/**
* Returns the attribute at the given index.
*/
def apply(ordinal: Int): Attribute = attrsArray(ordinal)
/**
* Returns the index of first attribute with a matching expression id, or -1 if no match exists.
*/
def indexOf(exprId: ExprId): Int = {
Option(exprIdToOrdinal.get(exprId)).getOrElse(-1)
}
private def unique[T](m: Map[T, Seq[Attribute]]): Map[T, Seq[Attribute]] = {
m.mapValues(_.distinct).toMap
}
/** Map to use for direct case insensitive attribute lookups. */
@transient private lazy val direct: Map[String, Seq[Attribute]] = {
unique(attrs.groupBy(_.name.toLowerCase(Locale.ROOT)))
}
/** Map to use for qualified case insensitive attribute lookups with 2 part key */
@transient private lazy val qualified: Map[(String, String), Seq[Attribute]] = {
// key is 2 part: table/alias and name
val grouped = attrs.filter(_.qualifier.nonEmpty).groupBy {
a => (a.qualifier.last.toLowerCase(Locale.ROOT), a.name.toLowerCase(Locale.ROOT))
}
unique(grouped)
}
/** Map to use for qualified case insensitive attribute lookups with 3 part key */
@transient private lazy val qualified3Part: Map[(String, String, String), Seq[Attribute]] = {
// key is 3 part: database name, table name and name
val grouped = attrs.filter(a => a.qualifier.length >= 2 && a.qualifier.length <= 3)
.groupBy { a =>
val qualifier = if (a.qualifier.length == 2) {
a.qualifier
} else {
a.qualifier.takeRight(2)
}
(qualifier.head.toLowerCase(Locale.ROOT),
qualifier.last.toLowerCase(Locale.ROOT),
a.name.toLowerCase(Locale.ROOT))
}
unique(grouped)
}
/** Map to use for qualified case insensitive attribute lookups with 4 part key */
@transient
private lazy val qualified4Part: Map[(String, String, String, String), Seq[Attribute]] = {
// key is 4 part: catalog name, database name, table name and name
val grouped = attrs.filter(_.qualifier.length == 3).groupBy { a =>
a.qualifier match {
case Seq(catalog, db, tbl) =>
(catalog.toLowerCase(Locale.ROOT),
db.toLowerCase(Locale.ROOT),
tbl.toLowerCase(Locale.ROOT),
a.name.toLowerCase(Locale.ROOT))
}
}
unique(grouped)
}
/** Returns true if all qualifiers in `attrs` have 3 or less parts. */
@transient private val hasThreeOrLessQualifierParts: Boolean =
attrs.forall(_.qualifier.length <= 3)
/** Match attributes for the case where all qualifiers in `attrs` have 3 or less parts. */
private def matchWithThreeOrLessQualifierParts(
nameParts: Seq[String],
resolver: Resolver): (Seq[Attribute], Seq[String]) = {
// Collect matching attributes given a name and a lookup.
def collectMatches(name: String, candidates: Option[Seq[Attribute]]): Seq[Attribute] = {
candidates.getOrElse(Nil).collect {
case a if resolver(a.name, name) => a.withName(name)
}
}
// Find matches for the given name assuming that the 1st three parts are qualifier
// (i.e. catalog name, database name and table name) and the 4th part is the actual
// column name.
//
// For example, consider an example where "cat" is the catalog name, "db1" is the database
// name, "a" is the table name and "b" is the column name and "c" is the struct field name.
// If the name parts is cat.db1.a.b.c, then Attribute will match
// Attribute(b, qualifier("cat", "db1", "a")) and List("c") will be the second element
var matches: (Seq[Attribute], Seq[String]) = nameParts match {
case catalogPart +: dbPart +: tblPart +: name +: nestedFields =>
val key = (catalogPart.toLowerCase(Locale.ROOT), dbPart.toLowerCase(Locale.ROOT),
tblPart.toLowerCase(Locale.ROOT), name.toLowerCase(Locale.ROOT))
val attributes = collectMatches(name, qualified4Part.get(key)).filter { a =>
assert(a.qualifier.length == 3)
resolver(catalogPart, a.qualifier(0)) && resolver(dbPart, a.qualifier(1)) &&
resolver(tblPart, a.qualifier(2))
}
(attributes, nestedFields)
case _ =>
(Seq.empty, Seq.empty)
}
// Find matches for the given name assuming that the 1st two parts are qualifier
// (i.e. database name and table name) and the 3rd part is the actual column name.
//
// For example, consider an example where "db1" is the database name, "a" is the table name
// and "b" is the column name and "c" is the struct field name.
// If the name parts is db1.a.b.c, then it can match both
// Attribute(b, qualifier("cat", "db1, "a")) and Attribute(b, qualifier("db1, "a")),
// and List("c") will be the second element
if (matches._1.isEmpty) {
matches = nameParts match {
case dbPart +: tblPart +: name +: nestedFields =>
val key = (dbPart.toLowerCase(Locale.ROOT),
tblPart.toLowerCase(Locale.ROOT), name.toLowerCase(Locale.ROOT))
val attributes = collectMatches(name, qualified3Part.get(key)).filter { a =>
val qualifier = if (a.qualifier.length == 2) {
a.qualifier
} else {
a.qualifier.takeRight(2)
}
resolver(dbPart, qualifier.head) && resolver(tblPart, qualifier.last)
}
(attributes, nestedFields)
case _ =>
(Seq.empty, Seq.empty)
}
}
// If there are no matches, then find matches for the given name assuming that
// the 1st part is a qualifier (i.e. table name, alias, or subquery alias) and the
// 2nd part is the actual name. This returns a tuple of
// matched attributes and a list of parts that are to be resolved.
//
// For example, consider an example where "a" is the table name, "b" is the column name,
// and "c" is the struct field name, i.e. "a.b.c". In this case, Attribute will be "a.b",
// and the second element will be List("c").
if (matches._1.isEmpty) {
matches = nameParts match {
case qualifier +: name +: nestedFields =>
val key = (qualifier.toLowerCase(Locale.ROOT), name.toLowerCase(Locale.ROOT))
val attributes = collectMatches(name, qualified.get(key)).filter { a =>
resolver(qualifier, a.qualifier.last)
}
(attributes, nestedFields)
case _ =>
(Seq.empty[Attribute], Seq.empty[String])
}
}
// If none of attributes match database.table.column pattern or
// `table.column` pattern, we try to resolve it as a column.
matches match {
case (Seq(), _) =>
val name = nameParts.head
val attributes = collectMatches(name, direct.get(name.toLowerCase(Locale.ROOT)))
(attributes, nameParts.tail)
case _ => matches
}
}
/**
* Match attributes for the case where at least one qualifier in `attrs` has more than 3 parts.
*/
private def matchWithFourOrMoreQualifierParts(
nameParts: Seq[String],
resolver: Resolver): (Seq[Attribute], Seq[String]) = {
// Returns true if the `short` qualifier is a subset of the last elements of
// `long` qualifier. For example, Seq("a", "b") is a subset of Seq("a", "a", "b"),
// but not a subset of Seq("a", "b", "b").
def matchQualifier(short: Seq[String], long: Seq[String]): Boolean = {
(long.length >= short.length) &&
long.takeRight(short.length)
.zip(short)
.forall(x => resolver(x._1, x._2))
}
// Collect attributes that match the given name and qualifier.
// A match occurs if
// 1) the given name matches the attribute's name according to the resolver.
// 2) the given qualifier is a subset of the attribute's qualifier.
def collectMatches(
name: String,
qualifier: Seq[String],
candidates: Option[Seq[Attribute]]): Seq[Attribute] = {
candidates.getOrElse(Nil).collect {
case a if resolver(name, a.name) && matchQualifier(qualifier, a.qualifier) =>
a.withName(name)
}
}
// Iterate each string in `nameParts` in a reverse order and try to match the attributes
// considering the current string as the attribute name. For example, if `nameParts` is
// Seq("a", "b", "c"), the match will be performed in the following order:
// 1) name = "c", qualifier = Seq("a", "b")
// 2) name = "b", qualifier = Seq("a")
// 3) name = "a", qualifier = Seq()
// Note that the match is performed in the reverse order in order to match the longest
// qualifier as possible. If a match is found, the remaining portion of `nameParts`
// is also returned as nested fields.
var candidates: Seq[Attribute] = Nil
var nestedFields: Seq[String] = Nil
var i = nameParts.length - 1
while (i >= 0 && candidates.isEmpty) {
val name = nameParts(i)
candidates = collectMatches(
name,
nameParts.take(i),
direct.get(name.toLowerCase(Locale.ROOT)))
if (candidates.nonEmpty) {
nestedFields = nameParts.takeRight(nameParts.length - i - 1)
}
i -= 1
}
(candidates, nestedFields)
}
/** Perform attribute resolution given a name and a resolver. */
def resolve(nameParts: Seq[String], resolver: Resolver): Option[NamedExpression] = {
val (candidates, nestedFields) = if (hasThreeOrLessQualifierParts) {
matchWithThreeOrLessQualifierParts(nameParts, resolver)
} else {
matchWithFourOrMoreQualifierParts(nameParts, resolver)
}
def name = UnresolvedAttribute(nameParts).name
candidates match {
case Seq(a) if nestedFields.nonEmpty =>
// One match, but we also need to extract the requested nested field.
// The foldLeft adds ExtractValues for every remaining parts of the identifier,
// and aliased it with the last part of the name.
// For example, consider "a.b.c", where "a" is resolved to an existing attribute.
// Then this will add ExtractValue("c", ExtractValue("b", a)), and alias the final
// expression as "c".
val fieldExprs = nestedFields.foldLeft(a: Expression) { (e, name) =>
ExtractValue(e, Literal(name), resolver)
}
Some(Alias(fieldExprs, nestedFields.last)())
case Seq(a) =>
// One match, no nested fields, use it.
Some(a)
case Seq() =>
// No matches.
None
case ambiguousReferences =>
// More than one match.
val referenceNames = ambiguousReferences.map(_.qualifiedName).mkString(", ")
throw new AnalysisException(s"Reference '$name' is ambiguous, could be: $referenceNames.")
}
}
}
/**
* When an expression inherits this, meaning the expression is null intolerant (i.e. any null
* input will result in null output). We will use this information during constructing IsNotNull
* constraints.
*/
trait NullIntolerant extends Expression
}
| ueshin/apache-spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/package.scala | Scala | apache-2.0 | 16,530 |
/*
* Code Pulse: A real-time code coverage testing tool. For more information
* see http://code-pulse.com
*
* Copyright (C) 2014 Applied Visions - http://securedecisions.avi.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.avi.sbt.betterzip
import sbt.ErrorHandling.translate
import sbt.Using._
import scala.annotation.tailrec
import scala.language.implicitConversions
import java.io.File
import org.apache.commons.compress.archivers.zip.{ AsiExtraField, ZipArchiveEntry, ZipArchiveOutputStream }
import org.apache.commons.io.FileUtils
/** A better IO.zip implementation supporting file permissions (using commons-compress).
*
* @author robertf
*/
object BetterZip {
val ExecutableMode = 493 // octal 755: r/w/x owner, r/x group, r/x all
// asiMode seems to work on mac, whereas unixmode on linux. the two seem to be mutually exclusive, though
sealed trait FileMode
case class AsiExtraFieldMode(mode: Int) extends FileMode
case class UnixMode(mode: Int) extends FileMode
sealed trait ExecutableType { def mode: Option[FileMode] }
object ExecutableType {
case object Windows extends ExecutableType { val mode = None }
case object Mac extends ExecutableType { val mode = Some(AsiExtraFieldMode(ExecutableMode)) }
case object Unix extends ExecutableType { val mode = Some(UnixMode(ExecutableMode)) }
}
case class Entry(source: File, destination: String, mode: Option[FileMode]) {
private[BetterZip] lazy val normalized = Entry(source, normalizeName(destination), mode)
}
implicit class EntryGenerator(source: File) {
def ->(destination: String) = Entry(source, destination, None)
def ->*(destination: String, executableType: ExecutableType) = Entry(source, destination, executableType.mode)
}
implicit def mappingToEntry(mapping: (File, String)) = Entry(mapping._1, mapping._2, None)
implicit def mappingSeqToEntry(mappings: Seq[(File, String)]) = mappings map mappingToEntry
def zip(sources: Traversable[Entry], outputZip: File) {
if (outputZip.isDirectory)
sys.error("Specified output file " + outputZip + " is a directory.")
else withZipOutput(outputZip) { zout =>
for (file <- sources.filter(_.source.isFile).map(_.normalized)) {
val entry = new ZipArchiveEntry(file.source, file.destination)
for (mode <- file.mode) mode match {
case AsiExtraFieldMode(mode) =>
val aef = new AsiExtraField
aef setMode mode
entry addExtraField aef
case UnixMode(mode) =>
entry setUnixMode mode
}
zout putArchiveEntry entry
FileUtils.copyFile(file.source, zout)
zout.closeArchiveEntry
}
}
}
private def withZipOutput(file: File)(thunk: ZipArchiveOutputStream => Unit) {
val stream = new ZipArchiveOutputStream(file)
try { thunk(stream) }
finally { stream.close }
}
private def normalizeName(name: String) = File.separatorChar match {
case '/' => name
case sep => name.replace(sep, '/')
}
} | secdec/codepulse | project/sbt-betterzip/src/main/scala/com/avi/sbt/betterzip/BetterZip.scala | Scala | apache-2.0 | 3,442 |
package io.getquill.context.async
import java.time.LocalDate
import java.util.Date
import io.getquill.PostgresAsyncContext
import io.getquill.context.sql.encoding.ArrayEncoding
import org.joda.time.{ DateTime => JodaDateTime, LocalDate => JodaLocalDate, LocalDateTime => JodaLocalDateTime }
trait ArrayEncoders extends ArrayEncoding {
self: PostgresAsyncContext[_] =>
implicit def arrayStringEncoder[Col <: Seq[String]]: Encoder[Col] = arrayRawEncoder[String, Col]
implicit def arrayBigDecimalEncoder[Col <: Seq[BigDecimal]]: Encoder[Col] = arrayRawEncoder[BigDecimal, Col]
implicit def arrayBooleanEncoder[Col <: Seq[Boolean]]: Encoder[Col] = arrayRawEncoder[Boolean, Col]
implicit def arrayByteEncoder[Col <: Seq[Byte]]: Encoder[Col] = arrayRawEncoder[Byte, Col]
implicit def arrayShortEncoder[Col <: Seq[Short]]: Encoder[Col] = arrayRawEncoder[Short, Col]
implicit def arrayIntEncoder[Col <: Seq[Index]]: Encoder[Col] = arrayRawEncoder[Index, Col]
implicit def arrayLongEncoder[Col <: Seq[Long]]: Encoder[Col] = arrayRawEncoder[Long, Col]
implicit def arrayFloatEncoder[Col <: Seq[Float]]: Encoder[Col] = arrayRawEncoder[Float, Col]
implicit def arrayDoubleEncoder[Col <: Seq[Double]]: Encoder[Col] = arrayRawEncoder[Double, Col]
implicit def arrayDateEncoder[Col <: Seq[Date]]: Encoder[Col] = arrayRawEncoder[Date, Col]
implicit def arrayJodaDateTimeEncoder[Col <: Seq[JodaDateTime]]: Encoder[Col] = arrayEncoder[JodaDateTime, Col](_.toLocalDateTime)
implicit def arrayJodaLocalDateTimeEncoder[Col <: Seq[JodaLocalDateTime]]: Encoder[Col] = arrayRawEncoder[JodaLocalDateTime, Col]
implicit def arrayJodaLocalDateEncoder[Col <: Seq[JodaLocalDate]]: Encoder[Col] = arrayRawEncoder[JodaLocalDate, Col]
implicit def arrayLocalDateEncoder[Col <: Seq[LocalDate]]: Encoder[Col] = arrayEncoder[LocalDate, Col](encodeLocalDate.f)
def arrayEncoder[T, Col <: Seq[T]](mapper: T => Any): Encoder[Col] =
encoder[Col]((col: Col) => col.toIndexedSeq.map(mapper), SqlTypes.ARRAY)
def arrayRawEncoder[T, Col <: Seq[T]]: Encoder[Col] = arrayEncoder[T, Col](identity)
}
| getquill/quill | quill-async-postgres/src/main/scala/io/getquill/context/async/ArrayEncoders.scala | Scala | apache-2.0 | 2,098 |
// Project: surfice-entity (https://github.com/jokade/surfice-entity)
// Module: sql
// Description: Mixin trait for SqlServiceS that use the scalikejdbc default DB
// Copyright (c) 2016. Distributed under the MIT License (see included LICENSE file).
package surfice.entity.sql
import scalikejdbc.{DB, DBSession}
trait DBProvider {
this: SqlService[_,_] =>
final def readOnly[A](execution: (DBSession)=>A): A = DB.readOnly(execution)
final def autoCommit[A](execution: (DBSession)=>A): A = DB.autoCommit(execution)
}
| jokade/surfice-entity | sql/src/main/scala/surfice/entity/sql/DBProvider.scala | Scala | mit | 537 |
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.optimize
import cc.factorie.model.{WeightsMap, WeightsSet}
import cc.factorie.util.FastLogging
import scala.collection.mutable.ArrayBuffer
// TODO What kind of regularization would be used with LBFGS other than L2?
// If nothing, then incorporate it directly into LBFGS. -akm
/** A quasi-Newton batch gradient optimizer.
Limited-memory BFGS, as described in Byrd, Nocedal, and Schnabel, "Representations of Quasi-Newton Matrices and Their Use in Limited Memory Methods" */
class LBFGS(var numIterations: Double = 1000,
var maxIterations: Int = 1000,
var tolerance: Double = 0.0001,
var gradientTolerance : Double= 0.001,
val eps : Double = 1.0e-5,
val rankOfApproximation : Int = 4,
val initialStepSize : Double = 1.0) extends GradientOptimizer with FastLogging {
private var _isConverged = false
def isConverged = _isConverged
case class StepTooSmallException(msg:String) extends Exception(msg)
var lineMaximizer: BackTrackLineOptimizer = null
// The number of corrections used in BFGS update
// ideally 3 <= m <= 7. Larger m means more cpu time, memory.
// State of search
// g = gradient
// s = list of m previous "parameters" values
// y = list of m previous "g" values
// rho = intermediate calculation
var g: WeightsMap = null
var oldg: WeightsMap = null
var direction: WeightsMap = null
var params: WeightsSet = null
var oldParams: WeightsMap = null
var s: ArrayBuffer[WeightsMap] = null
var y: ArrayBuffer[WeightsMap] = null
var rho: ArrayBuffer[Double] = null
var alpha: Array[Double] = null
var step = 1.0
var iterations: Int = 0
var oldValue: Double = Double.NegativeInfinity
// override to evaluate on dev set, save the intermediate model, etc.
def postIteration(iter: Int): Unit = ()
def reset(): Unit = {
_isConverged = false
step = 1.0
iterations = 0
oldValue = Double.NegativeInfinity
g = null
s = null
y = null
rho = null
alpha = null
params = null
oldParams = null
direction = null
oldg = null
}
def initializeWeights(weights: WeightsSet): Unit = { }
def finalizeWeights(weights: WeightsSet): Unit = { }
def step(weights:WeightsSet, gradient:WeightsMap, value:Double): Unit = {
if (_isConverged) return
//todo: is the right behavior to set _isConverged = true if exceeded numIters?
if (iterations > numIterations) { logger.warn("LBFGS: Failed to converge: too many iterations"); _isConverged = true; return }
//if first time in, initialize
if (g == null) {
logger.debug("LBFGS: Initial value = " + value)
iterations = 0
s = new ArrayBuffer[WeightsMap]
y = new ArrayBuffer[WeightsMap]
rho = new ArrayBuffer[Double]
alpha = new Array[Double](rankOfApproximation)
params = weights
oldParams = params.copy
//use copy to get the right size
g = gradient
oldg = gradient.copy
direction = gradient.copy
if (direction.twoNorm == 0) {
logger.info("LBFGS: Initial initial gradient is zero; saying converged")
g = null
_isConverged = true
//return true;
}
direction.*=(1.0 / direction.twoNorm)
// take a step in the direction
lineMaximizer = new BackTrackLineOptimizer(gradient, direction, initialStepSize)
lineMaximizer.step(weights, gradient, value)
//todo: change this to just check if lineOptimizer has converged
// if (step == 0.0) {
// // could not step in this direction
// // give up and say converged
// g = null // reset search
// step = 1.0
// logger.error("Line search could not step in the current direction. " +
// "(This is not necessarily cause for alarm. Sometimes this happens close to the maximum," +
// " where the function may be very flat.)")
// //throw new StepTooSmallException("Line search could not step in current direction.")
// return false
// }
oldValue = value
}else if(!lineMaximizer.isConverged){
lineMaximizer.step(weights, gradient, value)
}
//else{
if (lineMaximizer.isConverged) {
//first, check for convergence:
iterations += 1
logger.debug("LBFGS: At iteration " + iterations + ", value = " + value)
//params and g are just aliases for the names of the variables passed in
g = gradient
params = weights
if (2.0 * math.abs(value - oldValue) <= tolerance * (math.abs(value) + math.abs(oldValue) + eps)) {
logger.debug("LBFGS: Exiting on termination #1: value difference below tolerance (oldValue: " + oldValue + " newValue: " + value)
_isConverged = true
return
}
val gg = g.twoNorm
if (gg < gradientTolerance) {
logger.trace("LBFGS: Exiting on termination #2: gradient=" + gg + " < " + gradientTolerance)
_isConverged = true
return
}
if (gg == 0.0) {
logger.trace("LBFGS: Exiting on termination #3: gradient==0.0")
_isConverged = true
return
}
logger.trace("Gradient = " + gg)
iterations += 1
if (iterations > maxIterations) {
logger.warn("Too many iterations in L-BFGS.java. Continuing with current parameters.")
_isConverged = true
return
}
// get difference between previous 2 gradients and parameters
var sy = 0.0
var yy = 0.0
//todo: these next check are quite inefficient, but is a hack to avoid doing the following line on tensors:
//params(i).isInfinite && oldParams(i).isInfinite && (params(i) * oldParams(i) > 0)) 0.0
if(!params.toArray.forall(d => !(d == Double.PositiveInfinity || d == Double.NegativeInfinity))) throw new IllegalStateException("Weight value can't be infinite")
if(!gradient.toArray.forall(d => !(d == Double.PositiveInfinity || d == Double.NegativeInfinity))) throw new IllegalStateException("gradient value can't be infinite")
oldParams = params - oldParams
oldg = g - oldg
sy = oldParams dot oldg
yy = oldg.twoNormSquared
direction := gradient
if (sy > 0) throw new IllegalStateException("sy=" + sy + "> 0")
val gamma = sy / yy // scaling factor
if (gamma > 0) throw new IllegalStateException("gamma=" + gamma + "> 0")
pushDbl(rho, 1.0 / sy)
pushTensor(s, oldParams)
pushTensor(y, oldg)
// calculate new direction
assert(s.size == y.size)
for (i <- s.size -1 to 0 by -1) {
// alpha(i) = rho(i) * ArrayOps.dot(direction, s(i))
alpha(i) = rho(i) * (direction dot s(i))
// ArrayOps.incr(direction, y(i), -1.0 * alpha(i))
direction.+=(y(i),-1.0 * alpha(i))
}
direction.*=(gamma)
for (i <- 0 until s.size) {
//val beta = rho(i) * ArrayOps.dot(direction, y(i))
val beta = rho(i) * (direction dot y(i))
//ArrayOps.incr(direction, s(i), alpha(i) - beta)
direction.+=(s(i),alpha(i) - beta)
}
oldParams := params
oldValue = value
oldg := g
direction.*=(-1)
lineMaximizer = null
postIteration(iterations)
lineMaximizer = new BackTrackLineOptimizer(gradient, direction, initialStepSize)
lineMaximizer.step(weights, gradient, value)
}
}
def pushTensor(l: ArrayBuffer[WeightsMap], toadd: WeightsMap): Unit = {
assert(l.size <= rankOfApproximation)
if (l.size == rankOfApproximation) {
l.remove(0)
l += toadd.copy
//todo: change back to this circular thing below
// val last = l(0)
// Array.copy(toadd, 0, last, 0, toadd.length)
// forIndex(l.size - 1)(i => {l(i) = l(i + 1)})
// l(m - 1) = last
} else {
l += toadd.copy
}
}
def pushDbl(l: ArrayBuffer[Double], toadd: Double): Unit = {
assert(l.size <= rankOfApproximation)
if (l.size == rankOfApproximation) l.remove(0)
l += toadd
}
}
//class L2RegularizedLBFGS(var l2: Double = 0.1) extends LBFGS {
// override def step(weightsSet: Tensor, gradient: Tensor, value: Double, margin: Double) {
// gradient += (weightsSet, -l2)
// super.step(weightsSet, gradient, value - l2 * (weightsSet dot weightsSet), margin)
// }
//} | patverga/factorie | src/main/scala/cc/factorie/optimize/LBFGS.scala | Scala | apache-2.0 | 9,162 |
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2016 Matthias Langer (t3l@threelights.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.latrobe.blaze
import edu.latrobe.io._
import edu.latrobe.blaze.sinks._
import scala.collection._
import scala.language.implicitConversions
/**
* Here we define major parts of the DSL for objectives.
*
* Note that this is the operator precedence in Scala!
* http://scala-lang.org/files/archive/spec/2.11/06-expressions.html#infix-operations
*
* (all letters)
* |
* ^
* &
* = !
* < >
* :
* + -
* * / %
* (all other special characters)
*
*/
package object objectives {
object Implicits {
implicit def sink2OutputRedirection(sink: SinkBuilder)
: OutputRedirectionBuilder = OutputRedirectionBuilder(sink)
}
final implicit class ObjectiveBuilderFunctions(obj: ObjectiveBuilder) {
def benchmark()
: objectives.BenchmarkObjectiveBuilder = BenchmarkObjectiveBuilder(obj)
// ---------------------------------------------------------------------------
// DSL
// ---------------------------------------------------------------------------
/**
* Priority 1:
*
* ! can wrap around trigger objectives and inverts their result.
*/
def unary_!()
: InvertTriggerBuilder = InvertTriggerBuilder(obj)
/**
* Priority 2:
*
* && will bundle objectives together to form a complex objective that
* is only met if all sub-objectives evaluate true. The objectives are
* evaluated first to last. The first objectives that evaluates to None
* will break the execution.
*/
def &&(other: ObjectiveBuilder)
: ComplexObjectiveBuilder = obj match {
case obj: ComplexObjectiveBuilder =>
obj.children += other
obj
case _ =>
ComplexObjectiveBuilder(obj, other)
}
/**
* Priority 3:
*
* || will group objectives together. They will be evaluated front to back.
* The first objective that evaluates to anything except None will break
* execution.
*/
def ||(other: ObjectiveBuilder)
: MultiObjectiveBuilder = obj match {
case obj: MultiObjectiveBuilder =>
obj.children += other
obj
case _ =>
MultiObjectiveBuilder(obj, other)
}
/**
* Priority 4:
*
* >& will redirect the output from the left hand objective into a
* a sink.
*/
def >>(other: OutputRedirectionBuilder)
: OutputRedirectionBuilder = {
other.children += obj
other
}
/**
* Priority 4:
*
* >& will redirect the output from the left hand objective into a
* a sink.
*/
def >>(other: SinkBuilder)
: OutputRedirectionBuilder = OutputRedirectionBuilder(other, obj)
}
}
| bashimao/ltudl | blaze/src/main/scala/edu/latrobe/blaze/objectives/package.scala | Scala | apache-2.0 | 3,380 |
//
// Bot.scala -- Scala object Bot
// Project OrcScala
//
// Created by dkitchin on Nov 26, 2010.
//
// Copyright (c) 2017 The University of Texas at Austin. All rights reserved.
//
// Use and redistribution of this file is governed by the license terms in
// the LICENSE file found in the project's top-level directory and also found at
// URL: http://orc.csres.utexas.edu/license.shtml .
//
package orc.types
/** @author dkitchin
*/
case object Bot extends CallableType {
override def toString = "Bot"
override def join(that: Type): Type = that
override def meet(that: Type): Type = this
override def <(that: Type) = true
def call(typeArgs: List[Type], argTypes: List[Type]) = Bot
}
| orc-lang/orc | OrcScala/src/orc/types/Bot.scala | Scala | bsd-3-clause | 702 |
package scala.generator
import scala.annotation.tailrec
import ScalaPrimitive._
import ScalaDatatype._
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.matchers.should.Matchers
class ScalaDatatypeSpec extends AnyFunSpec with Matchers {
it("should generate the right variable name when nested") {
val model = new Model(Namespaces("org.example"), "User")
val string = ScalaPrimitive.String
@tailrec
def nest[T <: ScalaDatatype](
d: ScalaDatatype, nester: ScalaDatatype => T, levels: Int
): ScalaDatatype = {
if (levels <= 0) d else nest(nester(d), nester, levels - 1)
}
def nestList(d: ScalaDatatype, levels: Int): ScalaDatatype = {
nest(d, List(_), levels)
}
def nestMap(d: ScalaDatatype, levels: Int): ScalaDatatype = {
nest(d, Map(_), levels)
}
nestList(model, 0).toVariableName should be("user")
nestList(model, 1).toVariableName should be("users")
nestList(model, 2).toVariableName should be("users")
nestList(model, 3).toVariableName should be("users")
nestMap(model, 0).toVariableName should be("user")
nestMap(model, 1).toVariableName should be("users")
nestMap(model, 2).toVariableName should be("users")
nestMap(model, 3).toVariableName should be("users")
nestList(string, 0).toVariableName should be("value")
nestList(string, 1).toVariableName should be("values")
nestList(string, 2).toVariableName should be("values")
nestList(string, 3).toVariableName should be("values")
nestMap(string, 0).toVariableName should be("value")
nestMap(string, 1).toVariableName should be("values")
nestMap(string, 2).toVariableName should be("values")
nestMap(string, 3).toVariableName should be("values")
}
it("DateIso8601Joda sanity check") {
DateIso8601Joda.asString("myVar") shouldBe "_root_.org.joda.time.format.ISODateTimeFormat.date.print(myVar)"
DateIso8601Joda.default("2020-12-31") shouldBe """_root_.org.joda.time.format.ISODateTimeFormat.dateTimeParser.parseLocalDate("2020-12-31")"""
DateIso8601Joda.name shouldBe "_root_.org.joda.time.LocalDate"
}
it("DateIso8601Java sanity check") {
DateIso8601Java.asString("myVar") shouldBe "myVar.toString"
DateIso8601Java.default("2020-12-31") shouldBe "_root_.java.time.LocalDate.parse(\"2020-12-31\")"
DateIso8601Java.name shouldBe "_root_.java.time.LocalDate"
}
it("DateTimeIso8601Joda sanity check") {
DateTimeIso8601Joda.asString("myVar") shouldBe "_root_.org.joda.time.format.ISODateTimeFormat.dateTime.print(myVar)"
DateTimeIso8601Joda.default("2020-12-31") shouldBe "_root_.org.joda.time.format.ISODateTimeFormat.dateTimeParser.parseDateTime(\"2020-12-31\")"
DateTimeIso8601Joda.name shouldBe "_root_.org.joda.time.DateTime"
}
it("DateTimeIso8601Java sanity check") {
DateTimeIso8601JavaInstant.asString("myVar") shouldBe "myVar.toString"
DateTimeIso8601JavaInstant.default("2020-12-31") shouldBe "_root_.java.time.OffsetDateTime.parse(\"2020-12-31\").toInstant"
DateTimeIso8601JavaInstant.name shouldBe "_root_.java.time.Instant"
}
}
| mbryzek/apidoc-generator | scala-generator/src/test/scala/models/generator/ScalaDatatypeSpec.scala | Scala | mit | 3,114 |
package com.karasiq.bootstrap.carousel
import rx.Rx
import com.karasiq.bootstrap.components.BootstrapComponents
import com.karasiq.bootstrap.context.RenderingContext
import com.karasiq.bootstrap.icons.Icons
import com.karasiq.bootstrap.utils.Utils
trait UniversalCarousels { self: RenderingContext with Carousels with Utils with Icons with BootstrapComponents ⇒
import scalaTags.all._
import BootstrapAttrs._
class UniversalCarousel(val carouselId: String, val content: Rx[Seq[Modifier]]) extends AbstractCarousel {
protected val carouselElementId = s"$carouselId-carousel"
def indicators: Rx[Tag] = Rx {
def mkIndicator(index: Int): Tag = {
li(`data-target` := s"#$carouselElementId", `data-slide-to` := index)
}
val indexes = content().indices
ol(`class` := "carousel-indicators")(
mkIndicator(indexes.head)(`class` := "active"),
for (i <- indexes.tail) yield mkIndicator(i)
)
}
def slides: Rx[Tag] = Rx {
val data = content()
div(`class` := "carousel-inner", role := "listbox")(
div(`class` := "item active", data.head),
for (slide <- data.tail) yield div(`class` := "item", slide)
)
}
def carousel: Tag = {
div(id := carouselElementId, Seq("carousel", "slide").map(_.addClass))(
indicators,
slides,
a(`class` := "left carousel-control", href := s"#$carouselElementId", role := "button", `data-slide` := "prev")(
Bootstrap.icon("chevron-left"),
span(`class` := "sr-only", "Previous")
),
a(`class` := "right carousel-control", href := s"#$carouselElementId", role := "button", `data-slide` := "next")(
Bootstrap.icon("chevron-right"),
span(`class` := "sr-only", "Next")
)
)
}
def render(md: Modifier*): Modifier = {
carousel(`data-ride` := "carousel")(md:_*)
}
}
object UniversalCarousel {
def slide(image: String, content: Modifier*): Modifier = {
Seq(
img(src := image),
div(`class` := "carousel-caption")(content:_*)
)
}
}
}
| Karasiq/scalajs-bootstrap | library/shared/src/main/scala/com/karasiq/bootstrap/carousel/UniversalCarousels.scala | Scala | mit | 2,121 |
package com.olegych.scastie.util
import com.olegych.scastie.api._
import akka.actor.ActorRef
case class SbtTask(snippetId: SnippetId,
inputs: Inputs,
ip: String,
login: Option[String],
progressActor: ActorRef)
case class SbtRun(snippetId: SnippetId,
inputs: Inputs,
progressActor: ActorRef,
snippetActor: ActorRef)
case class EnsimeConfigTask(inputs: Inputs)
case object EnsimeConfigReady
case object EnsimeConfigTimeout
case class Replay(run: SbtRun)
case object SbtUp | OlegYch/scastie | utils/src/main/scala/com.olegych.scastie/util/SbtTask.scala | Scala | apache-2.0 | 607 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.clustering
import org.apache.hadoop.fs.Path
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTestingUtils}
import org.apache.spark.ml.util.TestingUtils._
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.sql._
object LDASuite {
def generateLDAData(
spark: SparkSession,
rows: Int,
k: Int,
vocabSize: Int): DataFrame = {
val avgWC = 1 // average instances of each word in a doc
val sc = spark.sparkContext
val rng = new java.util.Random()
rng.setSeed(1)
val rdd = sc.parallelize(1 to rows).map { i =>
Vectors.dense(Array.fill(vocabSize)(rng.nextInt(2 * avgWC).toDouble))
}.map(v => new TestRow(v))
spark.createDataFrame(rdd)
}
/**
* Mapping from all Params to valid settings which differ from the defaults.
* This is useful for tests which need to exercise all Params, such as save/load.
* This excludes input columns to simplify some tests.
*/
val allParamSettings: Map[String, Any] = Map(
"k" -> 3,
"maxIter" -> 2,
"checkpointInterval" -> 30,
"learningOffset" -> 1023.0,
"learningDecay" -> 0.52,
"subsamplingRate" -> 0.051,
"docConcentration" -> Array(2.0)
)
}
class LDASuite extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest {
import testImplicits._
val k: Int = 5
val vocabSize: Int = 30
@transient var dataset: Dataset[_] = _
override def beforeAll(): Unit = {
super.beforeAll()
dataset = LDASuite.generateLDAData(spark, 50, k, vocabSize)
}
test("default parameters") {
val lda = new LDA()
assert(lda.getFeaturesCol === "features")
assert(lda.getMaxIter === 20)
assert(lda.isDefined(lda.seed))
assert(lda.getCheckpointInterval === 10)
assert(lda.getK === 10)
assert(!lda.isSet(lda.docConcentration))
assert(!lda.isSet(lda.topicConcentration))
assert(lda.getOptimizer === "online")
assert(lda.getLearningDecay === 0.51)
assert(lda.getLearningOffset === 1024)
assert(lda.getSubsamplingRate === 0.05)
assert(lda.getOptimizeDocConcentration)
assert(lda.getTopicDistributionCol === "topicDistribution")
}
test("set parameters") {
val lda = new LDA()
.setFeaturesCol("test_feature")
.setMaxIter(33)
.setSeed(123)
.setCheckpointInterval(7)
.setK(9)
.setTopicConcentration(0.56)
.setTopicDistributionCol("myOutput")
assert(lda.getFeaturesCol === "test_feature")
assert(lda.getMaxIter === 33)
assert(lda.getSeed === 123)
assert(lda.getCheckpointInterval === 7)
assert(lda.getK === 9)
assert(lda.getTopicConcentration === 0.56)
assert(lda.getTopicDistributionCol === "myOutput")
// setOptimizer
lda.setOptimizer("em")
assert(lda.getOptimizer === "em")
lda.setOptimizer("online")
assert(lda.getOptimizer === "online")
lda.setLearningDecay(0.53)
assert(lda.getLearningDecay === 0.53)
lda.setLearningOffset(1027)
assert(lda.getLearningOffset === 1027)
lda.setSubsamplingRate(0.06)
assert(lda.getSubsamplingRate === 0.06)
lda.setOptimizeDocConcentration(false)
assert(!lda.getOptimizeDocConcentration)
}
test("parameters validation") {
val lda = new LDA()
// misc Params
intercept[IllegalArgumentException] {
new LDA().setK(1)
}
intercept[IllegalArgumentException] {
new LDA().setOptimizer("no_such_optimizer")
}
intercept[IllegalArgumentException] {
new LDA().setDocConcentration(-1.1)
}
intercept[IllegalArgumentException] {
new LDA().setTopicConcentration(-1.1)
}
val dummyDF = Seq((1, Vectors.dense(1.0, 2.0))).toDF("id", "features")
// validate parameters
lda.transformSchema(dummyDF.schema)
lda.setDocConcentration(1.1)
lda.transformSchema(dummyDF.schema)
lda.setDocConcentration(Range(0, lda.getK).map(_ + 2.0).toArray)
lda.transformSchema(dummyDF.schema)
lda.setDocConcentration(Range(0, lda.getK - 1).map(_ + 2.0).toArray)
withClue("LDA docConcentration validity check failed for bad array length") {
intercept[IllegalArgumentException] {
lda.transformSchema(dummyDF.schema)
}
}
// Online LDA
intercept[IllegalArgumentException] {
new LDA().setLearningOffset(0)
}
intercept[IllegalArgumentException] {
new LDA().setLearningDecay(0)
}
intercept[IllegalArgumentException] {
new LDA().setSubsamplingRate(0)
}
intercept[IllegalArgumentException] {
new LDA().setSubsamplingRate(1.1)
}
}
test("fit & transform with Online LDA") {
val lda = new LDA().setK(k).setSeed(1).setOptimizer("online").setMaxIter(2)
val model = lda.fit(dataset)
MLTestingUtils.checkCopy(model)
assert(model.isInstanceOf[LocalLDAModel])
assert(model.vocabSize === vocabSize)
assert(model.estimatedDocConcentration.size === k)
assert(model.topicsMatrix.numRows === vocabSize)
assert(model.topicsMatrix.numCols === k)
assert(!model.isDistributed)
// transform()
val transformed = model.transform(dataset)
val expectedColumns = Array("features", lda.getTopicDistributionCol)
expectedColumns.foreach { column =>
assert(transformed.columns.contains(column))
}
transformed.select(lda.getTopicDistributionCol).collect().foreach { r =>
val topicDistribution = r.getAs[Vector](0)
assert(topicDistribution.size === k)
assert(topicDistribution.toArray.forall(w => w >= 0.0 && w <= 1.0))
}
// logLikelihood, logPerplexity
val ll = model.logLikelihood(dataset)
assert(ll <= 0.0 && ll != Double.NegativeInfinity)
val lp = model.logPerplexity(dataset)
assert(lp >= 0.0 && lp != Double.PositiveInfinity)
// describeTopics
val topics = model.describeTopics(3)
assert(topics.count() === k)
assert(topics.select("topic").rdd.map(_.getInt(0)).collect().toSet === Range(0, k).toSet)
topics.select("termIndices").collect().foreach { case r: Row =>
val termIndices = r.getAs[Seq[Int]](0)
assert(termIndices.length === 3 && termIndices.toSet.size === 3)
}
topics.select("termWeights").collect().foreach { case r: Row =>
val termWeights = r.getAs[Seq[Double]](0)
assert(termWeights.length === 3 && termWeights.forall(w => w >= 0.0 && w <= 1.0))
}
}
test("fit & transform with EM LDA") {
val lda = new LDA().setK(k).setSeed(1).setOptimizer("em").setMaxIter(2)
val model_ = lda.fit(dataset)
MLTestingUtils.checkCopy(model_)
assert(model_.isInstanceOf[DistributedLDAModel])
val model = model_.asInstanceOf[DistributedLDAModel]
assert(model.vocabSize === vocabSize)
assert(model.estimatedDocConcentration.size === k)
assert(model.topicsMatrix.numRows === vocabSize)
assert(model.topicsMatrix.numCols === k)
assert(model.isDistributed)
val localModel = model.toLocal
assert(localModel.isInstanceOf[LocalLDAModel])
// training logLikelihood, logPrior
val ll = model.trainingLogLikelihood
assert(ll <= 0.0 && ll != Double.NegativeInfinity)
val lp = model.logPrior
assert(lp <= 0.0 && lp != Double.NegativeInfinity)
}
test("read/write LocalLDAModel") {
def checkModelData(model: LDAModel, model2: LDAModel): Unit = {
assert(model.vocabSize === model2.vocabSize)
assert(Vectors.dense(model.topicsMatrix.toArray) ~==
Vectors.dense(model2.topicsMatrix.toArray) absTol 1e-6)
assert(Vectors.dense(model.getDocConcentration) ~==
Vectors.dense(model2.getDocConcentration) absTol 1e-6)
}
val lda = new LDA()
testEstimatorAndModelReadWrite(lda, dataset, LDASuite.allParamSettings, checkModelData)
}
test("read/write DistributedLDAModel") {
def checkModelData(model: LDAModel, model2: LDAModel): Unit = {
assert(model.vocabSize === model2.vocabSize)
assert(Vectors.dense(model.topicsMatrix.toArray) ~==
Vectors.dense(model2.topicsMatrix.toArray) absTol 1e-6)
assert(Vectors.dense(model.getDocConcentration) ~==
Vectors.dense(model2.getDocConcentration) absTol 1e-6)
}
val lda = new LDA()
testEstimatorAndModelReadWrite(lda, dataset,
LDASuite.allParamSettings ++ Map("optimizer" -> "em"), checkModelData)
}
test("EM LDA checkpointing: save last checkpoint") {
// Checkpoint dir is set by MLlibTestSparkContext
val lda = new LDA().setK(2).setSeed(1).setOptimizer("em").setMaxIter(3).setCheckpointInterval(1)
val model_ = lda.fit(dataset)
assert(model_.isInstanceOf[DistributedLDAModel])
val model = model_.asInstanceOf[DistributedLDAModel]
// There should be 1 checkpoint remaining.
assert(model.getCheckpointFiles.length === 1)
val checkpointFile = new Path(model.getCheckpointFiles.head)
val fs = checkpointFile.getFileSystem(spark.sparkContext.hadoopConfiguration)
assert(fs.exists(checkpointFile))
model.deleteCheckpointFiles()
assert(model.getCheckpointFiles.isEmpty)
}
test("EM LDA checkpointing: remove last checkpoint") {
// Checkpoint dir is set by MLlibTestSparkContext
val lda = new LDA().setK(2).setSeed(1).setOptimizer("em").setMaxIter(3).setCheckpointInterval(1)
.setKeepLastCheckpoint(false)
val model_ = lda.fit(dataset)
assert(model_.isInstanceOf[DistributedLDAModel])
val model = model_.asInstanceOf[DistributedLDAModel]
assert(model.getCheckpointFiles.isEmpty)
}
test("EM LDA disable checkpointing") {
// Checkpoint dir is set by MLlibTestSparkContext
val lda = new LDA().setK(2).setSeed(1).setOptimizer("em").setMaxIter(3)
.setCheckpointInterval(-1)
val model_ = lda.fit(dataset)
assert(model_.isInstanceOf[DistributedLDAModel])
val model = model_.asInstanceOf[DistributedLDAModel]
assert(model.getCheckpointFiles.isEmpty)
}
}
| Panos-Bletsos/spark-cost-model-optimizer | mllib/src/test/scala/org/apache/spark/ml/clustering/LDASuite.scala | Scala | apache-2.0 | 10,772 |
package com.twitter.finagle.netty3
import com.twitter.finagle.client.{LatencyCompensation, Transporter}
import com.twitter.finagle.httpproxy.HttpConnectHandler
import com.twitter.finagle.netty3.channel.{ChannelRequestStatsHandler, ChannelStatsHandler, IdleChannelHandler}
import com.twitter.finagle.netty3.socks.SocksConnectHandler
import com.twitter.finagle.netty3.ssl.SslConnectHandler
import com.twitter.finagle.netty3.transport.ChannelTransport
import com.twitter.finagle.socks.{SocksProxyFlags, Unauthenticated, UsernamePassAuthenticationSetting}
import com.twitter.finagle.ssl.Engine
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finagle.transport.Transport
import com.twitter.finagle.util.DefaultTimer
import com.twitter.finagle.{Stack, WriteException, CancelledConnectionException}
import com.twitter.util.{Future, Promise, Duration, NonFatal, Stopwatch}
import java.net.{InetSocketAddress, SocketAddress}
import java.nio.channels.UnresolvedAddressException
import java.util.IdentityHashMap
import java.util.concurrent.TimeUnit
import java.util.logging.Level
import org.jboss.netty.channel.ChannelHandler
import org.jboss.netty.channel.socket.ChannelRunnableWrapper
import org.jboss.netty.channel.socket.nio.{NioSocketChannel, NioClientSocketChannelFactory}
import org.jboss.netty.channel.{ChannelFactory => NettyChannelFactory, _}
import org.jboss.netty.handler.timeout.IdleStateHandler
import scala.collection.JavaConverters._
import scala.collection.mutable
/** Bridges a netty3 channel with a transport */
private[netty3] class ChannelConnector[In, Out](
newChannel: () => Channel,
newTransport: Channel => Transport[In, Out],
statsReceiver: StatsReceiver
) extends (SocketAddress => Future[Transport[In, Out]]) {
private[this] val connectLatencyStat = statsReceiver.stat("connect_latency_ms")
private[this] val failedConnectLatencyStat = statsReceiver.stat("failed_connect_latency_ms")
private[this] val cancelledConnects = statsReceiver.counter("cancelled_connects")
def apply(addr: SocketAddress): Future[Transport[In, Out]] = {
require(addr != null)
val elapsed = Stopwatch.start()
val ch = try newChannel() catch {
case NonFatal(exc) => return Future.exception(exc)
}
// Transport is now bound to the channel; this is done prior to
// it being connected so we don't lose any messages.
val transport = newTransport(ch)
val connectFuture = ch.connect(addr)
val promise = new Promise[Transport[In, Out]]
promise setInterruptHandler { case _cause =>
// Propagate cancellations onto the netty future.
connectFuture.cancel()
}
connectFuture.addListener(new ChannelFutureListener {
def operationComplete(f: ChannelFuture) {
val latency = elapsed().inMilliseconds
if (f.isSuccess) {
connectLatencyStat.add(latency)
promise.setValue(transport)
} else if (f.isCancelled) {
cancelledConnects.incr()
promise.setException(WriteException(new CancelledConnectionException))
} else {
failedConnectLatencyStat.add(latency)
promise.setException(f.getCause match {
case e: UnresolvedAddressException => e
case e => WriteException(e)
})
}
}
})
promise onFailure { _ =>
Channels.close(ch)
}
}
}
object Netty3Transporter {
import com.twitter.finagle.param._
val defaultChannelOptions: Map[String, Object] = Map(
"tcpNoDelay" -> java.lang.Boolean.TRUE,
"reuseAddress" -> java.lang.Boolean.TRUE,
"connectTimeoutMillis" -> (1000L: java.lang.Long)
)
val channelFactory: NettyChannelFactory = new NioClientSocketChannelFactory(
Executor, 1 /*# boss threads*/, WorkerPool, DefaultTimer.netty) {
override def releaseExternalResources() = () // no-op; unreleasable
}
/**
* A [[com.twitter.finagle.Stack.Param]] used to configure a netty3
* ChannelFactory.
*/
case class ChannelFactory(cf: NettyChannelFactory) {
def mk(): (ChannelFactory, Stack.Param[ChannelFactory]) =
(this, ChannelFactory.param)
}
object ChannelFactory {
implicit val param = Stack.Param(ChannelFactory(channelFactory))
}
/**
* A [[com.twitter.finagle.Stack.Param]] used to configure a transport
* factory, a function from a netty3 channel to a finagle Transport.
*/
case class TransportFactory(newTransport: Channel => Transport[Any, Any]) {
def mk(): (TransportFactory, Stack.Param[TransportFactory]) =
(this, TransportFactory.param)
}
object TransportFactory {
implicit val param = Stack.Param(TransportFactory(new ChannelTransport(_)))
}
/**
* Constructs a `Netty3Transporter` given a netty3 `ChannelPipelineFactory`
* `Stack.Params`.
*/
private[netty3] def make[In, Out](
pipelineFactory: ChannelPipelineFactory,
params: Stack.Params
): Netty3Transporter[In, Out] = {
val Label(label) = params[Label]
val Logger(logger) = params[Logger]
// transport and transporter params
val ChannelFactory(cf) = params[ChannelFactory]
val TransportFactory(newTransport) = params[TransportFactory]
val Transporter.ConnectTimeout(connectTimeout) = params[Transporter.ConnectTimeout]
val LatencyCompensation.Compensation(compensation) = params[LatencyCompensation.Compensation]
val Transporter.TLSHostname(tlsHostname) = params[Transporter.TLSHostname]
val Transporter.HttpProxy(httpProxy, httpProxyCredentials) = params[Transporter.HttpProxy]
val Transporter.SocksProxy(socksProxy, socksCredentials) = params[Transporter.SocksProxy]
val Transport.BufferSizes(sendBufSize, recvBufSize) = params[Transport.BufferSizes]
val Transport.TLSClientEngine(tls) = params[Transport.TLSClientEngine]
val Transport.Liveness(readerTimeout, writerTimeout, keepAlive) = params[Transport.Liveness]
val snooper = params[Transport.Verbose] match {
case Transport.Verbose(true) => Some(ChannelSnooper(label)(logger.log(Level.INFO, _, _)))
case _ => None
}
val Transport.Options(noDelay, reuseAddr) = params[Transport.Options]
val opts = new mutable.HashMap[String, Object]()
opts += "connectTimeoutMillis" -> ((connectTimeout + compensation).inMilliseconds: java.lang.Long)
opts += "tcpNoDelay" -> (noDelay: java.lang.Boolean)
opts += "reuseAddress" -> (reuseAddr: java.lang.Boolean)
for (v <- keepAlive) opts += "keepAlive" -> (v: java.lang.Boolean)
for (s <- sendBufSize) opts += "sendBufferSize" -> (s: java.lang.Integer)
for (s <- recvBufSize) opts += "receiveBufferSize" -> (s: java.lang.Integer)
for (v <- params[Transporter.TrafficClass].value)
opts += "trafficClass" -> (v: java.lang.Integer)
Netty3Transporter[In, Out](
label,
pipelineFactory,
newChannel = cf.newChannel(_),
newTransport = (ch: Channel) => Transport.cast[In, Out](newTransport(ch)),
tlsConfig = tls map { case engine => Netty3TransporterTLSConfig(engine, tlsHostname) },
httpProxy = httpProxy,
httpProxyCredentials = httpProxyCredentials,
socksProxy = socksProxy,
socksUsernameAndPassword = socksCredentials,
channelReaderTimeout = readerTimeout,
channelWriterTimeout = writerTimeout,
channelSnooper = snooper,
channelOptions = opts.toMap
)
}
/**
* Constructs a `Transporter[In, Out]` given a netty3 `ChannelPipelineFactory`
* responsible for framing a `Transport` stream. The `Transporter` is configured
* via the passed in [[com.twitter.finagle.Stack.Param]]'s.
*
* @see [[com.twitter.finagle.client.Transporter]]
* @see [[com.twitter.finagle.transport.Transport]]
* @see [[com.twitter.finagle.param]]
*/
def apply[In, Out](
pipelineFactory: ChannelPipelineFactory,
params: Stack.Params
): Transporter[In, Out] = {
val Stats(stats) = params[Stats]
val transporter = make[In, Out](pipelineFactory, params)
new Transporter[In, Out] {
def apply(sa: SocketAddress): Future[Transport[In, Out]] =
transporter(sa, stats)
}
}
}
/**
* Netty3 TLS configuration.
*
* @param newEngine Creates a new SSL Engine
*
* @param verifyHost If specified, checks the session hostname
* against the given value.
*/
case class Netty3TransporterTLSConfig(
newEngine: SocketAddress => Engine, verifyHost: Option[String])
/**
* A [[ChannelFutureListener]] instance that fires "channelClosed" upstream event to the
* pipeline. It maintains events in order by running the task in the I/O thread.
*/
private[netty3] object FireChannelClosedLater extends ChannelFutureListener {
override def operationComplete(future: ChannelFuture): Unit = {
future.getChannel match {
case nioChannel: NioSocketChannel =>
val channelClosed = new ChannelRunnableWrapper(nioChannel, new Runnable() {
override def run(): Unit =
Channels.fireChannelClosed(nioChannel)
})
nioChannel.getWorker.executeInIoThread(channelClosed, /* alwaysAsync */ true)
case channel =>
Channels.fireChannelClosedLater(channel)
}
}
}
/**
* A transporter for netty3 which, given an endpoint name (socket
* address), provides a typed transport for communicating with this
* endpoint.
*
* @tparam In the type of requests. The given pipeline must consume
* `Req`-typed objects
*
* @tparam Out the type of replies. The given pipeline must produce
* objects of this type.
*
* @param pipelineFactory the pipeline factory that implements the
* the ''Codec'': it must input (downstream) ''In'' objects,
* and output (upstream) ''Out'' objects.
*
* @param newChannel A function used to create a new netty3 channel,
* given a pipeline.
*
* @param newTransport Create a new transport, given a channel.
*
* @param tlsConfig If defined, use SSL with the given configuration
*
* @param channelReaderTimeout The amount of time for which a channel
* may be read-idle.
*
* @param channelWriterTimeout The amount of time for which a channel
* may be write-idle.
*
* @param channelSnooper If defined, install the given snooper on
* each channel. Used for debugging.
*
* @param channelOptions These netty channel options are applied to
* the channel prior to establishing a new connection.
*/
case class Netty3Transporter[In, Out](
name: String,
pipelineFactory: ChannelPipelineFactory,
newChannel: ChannelPipeline => Channel =
Netty3Transporter.channelFactory.newChannel,
newTransport: Channel => Transport[In, Out] =
(ch: Channel) => Transport.cast[In, Out](new ChannelTransport[Any, Any](ch)),
tlsConfig: Option[Netty3TransporterTLSConfig] = None,
httpProxy: Option[SocketAddress] = None,
socksProxy: Option[SocketAddress] = SocksProxyFlags.socksProxy,
socksUsernameAndPassword: Option[(String,String)] = SocksProxyFlags.socksUsernameAndPassword,
channelReaderTimeout: Duration = Duration.Top,
channelWriterTimeout: Duration = Duration.Top,
channelSnooper: Option[ChannelSnooper] = None,
channelOptions: Map[String, Object] = Netty3Transporter.defaultChannelOptions,
httpProxyCredentials: Option[Transporter.Credentials] = None
) extends ((SocketAddress, StatsReceiver) => Future[Transport[In, Out]]) {
private[this] val statsHandlers = new IdentityHashMap[StatsReceiver, ChannelHandler]
def channelStatsHandler(statsReceiver: StatsReceiver): ChannelHandler = synchronized {
if (!(statsHandlers containsKey statsReceiver)) {
statsHandlers.put(statsReceiver, new ChannelStatsHandler(statsReceiver))
}
statsHandlers.get(statsReceiver)
}
private[netty3] def newPipeline(
addr: SocketAddress,
statsReceiver: StatsReceiver
): ChannelPipeline = {
val pipeline = pipelineFactory.getPipeline()
pipeline.addFirst("channelStatsHandler", channelStatsHandler(statsReceiver))
pipeline.addFirst("channelRequestStatsHandler",
new ChannelRequestStatsHandler(statsReceiver)
)
if (channelReaderTimeout.isFinite || channelWriterTimeout.isFinite) {
val rms =
if (channelReaderTimeout.isFinite)
channelReaderTimeout.inMilliseconds
else
0L
val wms =
if (channelWriterTimeout.isFinite)
channelWriterTimeout.inMilliseconds
else
0L
pipeline.addFirst("idleReactor", new IdleChannelHandler(statsReceiver))
pipeline.addFirst("idleDetector",
new IdleStateHandler(DefaultTimer.netty, rms, wms, 0, TimeUnit.MILLISECONDS))
}
for (Netty3TransporterTLSConfig(newEngine, verifyHost) <- tlsConfig) {
import org.jboss.netty.handler.ssl._
val engine = newEngine(addr)
engine.self.setUseClientMode(true)
engine.self.setEnableSessionCreation(true)
val verifier = verifyHost.map(SslConnectHandler.sessionHostnameVerifier).getOrElse {
Function.const(None) _
}
val sslHandler = new SslHandler(engine.self)
val sslConnectHandler = new SslConnectHandler(sslHandler, verifier)
pipeline.addFirst("sslConnect", sslConnectHandler)
pipeline.addFirst("ssl", sslHandler)
// We should close the channel if the remote peer closed TLS session [1] (i.e., sent "close_notify").
// While it's possible to restart [2] the TLS session we decided to close it instead, since this
// approach is safe and fits well into the Finagle infrastructure. Rather than tolerating the errors
// on the transport level, we fail (close the channel) instead and propagate the exception to the
// higher level (load balancing, connection pooling, etc.), so it can react on the failure.
//
// In order to close the channel, we simply fire the upstream "channelClosed" event in the pipeline.
// To maintain events in order, the upstream event should be fired in the I/O thread.
//
// [1]: https://github.com/netty/netty/issues/137
// [2]: https://github.com/netty/netty/blob/3.10/src/main/java/org/jboss/netty/handler/ssl/SslHandler.java#L119
sslHandler.getSSLEngineInboundCloseFuture.addListener(FireChannelClosedLater)
}
(socksProxy, addr) match {
case (Some(proxyAddr), inetSockAddr: InetSocketAddress) if !inetSockAddr.isUnresolved =>
val inetAddr = inetSockAddr.getAddress
if (!inetAddr.isLoopbackAddress && !inetAddr.isLinkLocalAddress) {
val authentication = socksUsernameAndPassword match {
case (Some((username, password))) =>
UsernamePassAuthenticationSetting(username, password)
case _ => Unauthenticated
}
pipeline.addFirst("socksConnect",
new SocksConnectHandler(proxyAddr, inetSockAddr, Seq(authentication)))
}
case _ =>
}
(httpProxy, addr) match {
case (Some(proxyAddr), inetAddr: InetSocketAddress) if !inetAddr.isUnresolved =>
HttpConnectHandler.addHandler(proxyAddr, inetAddr, pipeline, httpProxyCredentials)
case _ =>
}
for (snooper <- channelSnooper)
pipeline.addFirst("channelSnooper", snooper)
pipeline
}
private def newConfiguredChannel(addr: SocketAddress, statsReceiver: StatsReceiver) = {
val ch = newChannel(newPipeline(addr, statsReceiver))
ch.getConfig.setOptions(channelOptions.asJava)
ch
}
def apply(addr: SocketAddress, statsReceiver: StatsReceiver): Future[Transport[In, Out]] = {
val conn = new ChannelConnector[In, Out](
() => newConfiguredChannel(addr, statsReceiver),
newTransport, statsReceiver)
conn(addr)
}
}
| a-manumohan/finagle | finagle-core/src/main/scala/com/twitter/finagle/netty3/Netty3Transporter.scala | Scala | apache-2.0 | 15,563 |
package chandu0101.scalajs.react.components.demo.components.reactinfinite
import chandu0101.scalajs.react.components.demo.components.CodeExample
import chandu0101.scalajs.react.components.listviews.ReactInfinite
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.prefix_<^._
import scala.scalajs.js.Dynamic.{literal => json}
import scala.scalajs.js.{Array => JArray}
import scalacss.Defaults._
import scalacss.ScalaCssReact._
object ReactInfiniteDemo {
val code =
"""
|
| val data = (1 to 500).toVector.map(i => s"List Item $i")
|
| <.div(
| if(S.isLoading) <.div("Loading ..")
| else ReactInfinite(elementHeight = 40,
| containerHeight = 400)(S.data.map(B.renderRow _))
| )
|
""".stripMargin
object styles extends StyleSheet.Inline {
import dsl._
val container = style(display.flex,
justifyContent.center,
alignItems.center,
width(65 %%))
val item = style(
width(300 px),
textAlign.center,
height(70 px),
padding(20 px)
)
val border = style(borderBottom :=! "2px solid rgba(0, 0, 0, 0.1)",
marginLeft(4 px))
}
case class State(isLoading: Boolean = true, data: Vector[String] = Vector())
class Backend(t: BackendScope[_, State]) {
def renderRow(s: String): ReactElement = {
<.div(styles.item, s, ^.key := s,
<.div(styles.border)
)
}
def loadData() = {
val data = (1 to 500).toVector.map(i => s"List Item $i")
t.modState(_.copy(isLoading = false, data = data))
}
}
val component = ReactComponentB[Unit]("ReactSelectDemo")
.initialState(State())
.backend(new Backend(_))
.render((P, S, B) => {
<.div(
CodeExample(code, "Demo")(
<.div(styles.container,
if (S.isLoading) <.div("Loading ..")
else ReactInfinite(elementHeight = 70,
containerHeight = 400)(S.data.map(B.renderRow _))
)
)
)
})
.componentDidMount(scope => scope.backend.loadData())
.buildU
def apply() = component()
}
| mproch/scalajs-react-components | demo/src/main/scala/chandu0101/scalajs/react/components/demo/components/reactinfinite/ReactInfiniteDemo.scala | Scala | apache-2.0 | 2,133 |
package com.softwaremill.codebrag.dao.user
import com.softwaremill.codebrag.domain._
import org.bson.types.ObjectId
import org.joda.time.DateTime
trait UserDAO {
def add(user: User)
def findAll(): List[User]
def findById(userId: ObjectId): Option[User]
def findByEmail(email: String): Option[User]
def findByLowerCasedLogin(login: String): Option[User]
def findByLoginOrEmail(loginOrEmail: String): Option[User] = findByLoginOrEmail(loginOrEmail, loginOrEmail)
def findByLoginOrEmail(login: String, email: String): Option[User]
def findByToken(token: String): Option[User]
def modifyUser(user: User)
def changeAuthentication(id: ObjectId, authentication: Authentication)
def rememberNotifications(id: ObjectId, notifications: LastUserNotificationDispatch)
def changeUserSettings(userID: ObjectId, newSettings: UserSettings)
def setToReviewStartDate(id: ObjectId, newToReviewDate: DateTime)
def findCommitAuthor(commit: CommitInfo): Option[User]
def findPartialUserDetails(names: Iterable[String], emails: Iterable[String]): Iterable[PartialUserDetails]
def findPartialUserDetails(ids: Iterable[ObjectId]): Iterable[PartialUserDetails]
def countAll(): Long
def countAllActive(): Long
def removeExpiredTokens(userId: ObjectId): Option[User]
}
| softwaremill/codebrag | codebrag-dao/src/main/scala/com/softwaremill/codebrag/dao/user/UserDAO.scala | Scala | agpl-3.0 | 1,299 |
/*
* Copyright (c) 2015, PagerDuty
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions
* and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials provided with
* the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors may be used to
* endorse or promote products derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
* WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.pagerduty.eris.serializers
import java.nio.ByteBuffer
import com.netflix.astyanax.Serializer
import com.netflix.astyanax.model.Composite
import com.netflix.astyanax.serializers.{AbstractSerializer, ComparatorType}
import scala.collection.JavaConversions._
/**
* Allows to serialize tuples. Do not extend this class directly, use InferredSerializer instead:
* {{{
* import com.pagerduty.eris.serializers.implicits._
* class MyTupleSerializer extends InferredSerializer[(Int, (String, Time))]
* }}}
*/
class ProductSerializer[P <: Product](
protected val serializers: IndexedSeq[Serializer[Any]],
protected val factory: IndexedSeq[Any] => Product)
extends AbstractSerializer[P]
with ValidatorClass {
private[this] val serializerInjector: java.util.List[Serializer[_]] = serializers
protected def mkCompositeValidatorClass(validatorClasses: Seq[String]): String = {
"CompositeType" + validatorClasses.mkString("(", ",", ")")
}
def toByteBuffer(product: P): ByteBuffer = {
require(product.productArity == serializers.size, "Product arity does not match serializer.")
val composite = new Composite()
for (i <- 0 until serializers.size) {
composite.setComponent(i, product.productElement(i), serializers(i))
}
composite.serialize()
}
def fromByteBuffer(bytes: ByteBuffer): P = {
val composite = new Composite()
composite.setSerializersByPosition(serializerInjector)
composite.deserialize(bytes)
val components = for (i <- 0 until serializers.size) yield composite.get(i)
factory(components).asInstanceOf[P]
}
override def getComparatorType(): ComparatorType = ComparatorType.COMPOSITETYPE
val validatorClass: String = {
val validatorClasses = serializers.map(ValidatorClass(_))
mkCompositeValidatorClass(validatorClasses)
}
}
| PagerDuty/eris-core | main/src/main/scala/com/pagerduty/eris/serializers/ProductSerializer.scala | Scala | bsd-3-clause | 3,402 |
package org.flowpaint.pixelprocessors
/**
*
*
* @author Hans Haggstrom
*/
class WrappingNoise | zzorn/flowpaint | src/main/scala/org/flowpaint/pixelprocessors/WrappingNoise.scala | Scala | gpl-2.0 | 100 |
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import sbt._
import play.core.PlayVersion
object AppDependencies {
val compile: Seq[ModuleID] = PlayCrossCompilation.dependencies(
shared = Seq(
"com.ibm.icu" % "icu4j" % "69.1",
"com.typesafe.play" %% "play" % PlayVersion.current
),
play26 = Seq(
"uk.gov.hmrc" %% "url-builder" % "3.5.0-play-26"
),
play27 = Seq(
"uk.gov.hmrc" %% "url-builder" % "3.5.0-play-27"
),
play28 = Seq(
"uk.gov.hmrc" %% "url-builder" % "3.5.0-play-28"
)
)
val test: Seq[ModuleID] = PlayCrossCompilation.dependencies(
shared = Seq("org.pegdown" % "pegdown" % "1.6.0" % Test),
play26 = Seq("org.scalatestplus.play" %% "scalatestplus-play" % "3.1.2" % Test),
play27 = Seq("org.scalatestplus.play" %% "scalatestplus-play" % "4.0.3" % Test),
play28 = Seq("org.scalatestplus.play" %% "scalatestplus-play" % "5.0.0" % Test)
)
val all: Seq[ModuleID] = compile ++ test
}
| hmrc/play-language | project/AppDependencies.scala | Scala | apache-2.0 | 1,544 |
package uk.org.nbn.nbnv.importer.validation
import uk.org.nbn.nbnv.importer.records.NbnRecord
import uk.org.nbn.nbnv.importer.fidelity.{ResultLevel, Result}
//validate Determiner field length
class Nbnv92Validator {
def validate(record: NbnRecord) = {
val validator = new LengthValidator
validator.validate("NBNV-92", record.key, "Determiner", record.determiner getOrElse "", 140)
}
}
| JNCC-dev-team/nbn-importer | importer/src/main/scala/uk/org/nbn/nbnv/importer/validation/Nbnv92Validator.scala | Scala | apache-2.0 | 411 |
/* Copyright 2016-2019 UniCredit S.p.A.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package unicredit.lethe
package data
import java.util.UUID
import oram.{ ORAM, MultiORAM }
import transport.Remote
class DataStore[Data, Field](
oram: ORAM[UUID, Data],
index: ORAM[Field, Set[UUID]],
field: Data => Field
) {
def add(data: Data) = {
val uuid = UUID.randomUUID
val f = field(data)
oram.write(uuid, data)
val docs = index.read(f)
index.write(f, docs + uuid)
}
def search(f: Field): Set[Data] =
index.read(f) map oram.read
}
class DataStore2[Data, Field1, Field2](
oram: ORAM[UUID, Data],
index1: ORAM[Field1, Set[UUID]],
index2: ORAM[Field2, Set[UUID]],
field1: Data => Field1,
field2: Data => Field2
) {
def add(data: Data) = {
val uuid = UUID.randomUUID
val f1 = field1(data)
val f2 = field2(data)
oram.write(uuid, data)
val docs1 = index1.read(f1)
index1.write(f1, docs1 + uuid)
val docs2 = index2.read(f2)
index2.write(f2, docs2 + uuid)
}
def search1(f: Field1): Set[Data] =
index1.read(f) map oram.read
def search2(f: Field2): Set[Data] =
index2.read(f) map oram.read
}
object DataStore {
import boopickle.Default._
implicit val puuid = Pointed(UUID.fromString("16b01bbe-484b-49e8-85c5-f424a983205f"))
implicit val puuidset = Pointed(Set.empty[UUID])
def apply[Data: Pickler: Pointed, Field: Pickler: Pointed](
f: Data => Field,
remote: Remote,
passPhrase: String,
params: Params
) = {
val (index, oram) = MultiORAM.gen2[Field, Set[UUID], UUID, Data](
remote, passPhrase, params)
oram.init
index.init
new DataStore(oram, index, f)
}
def apply[Data: Pickler: Pointed, Field1: Pickler: Pointed, Field2: Pickler: Pointed](
f1: Data => Field1,
f2: Data => Field2,
remote: Remote,
passPhrase: String,
params: Params
) = {
val (index1, index2, oram) = MultiORAM.gen3[
Field1,
Set[UUID],
Field2,
Set[UUID],
UUID,
Data
](remote, passPhrase, params)
oram.init
index1.init
index2.init
new DataStore2(oram, index1, index2, f1, f2)
}
} | unicredit/lethe | src/main/scala/unicredit/lethe/data/DataStore.scala | Scala | apache-2.0 | 2,674 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.{LeafNode, LogicalPlan, Statistics}
import org.apache.spark.sql.connector.catalog.{Table, TableProvider}
import org.apache.spark.sql.connector.read.streaming.SparkDataStream
import org.apache.spark.sql.execution.LeafExecNode
import org.apache.spark.sql.execution.datasources.DataSource
import org.apache.spark.sql.util.CaseInsensitiveStringMap
object StreamingRelation {
def apply(dataSource: DataSource): StreamingRelation = {
StreamingRelation(
dataSource, dataSource.sourceInfo.name, dataSource.sourceInfo.schema.toAttributes)
}
}
/**
* Used to link a streaming [[DataSource]] into a
* [[org.apache.spark.sql.catalyst.plans.logical.LogicalPlan]]. This is only used for creating
* a streaming [[org.apache.spark.sql.DataFrame]] from [[org.apache.spark.sql.DataFrameReader]].
* It should be used to create [[Source]] and converted to [[StreamingExecutionRelation]] when
* passing to [[StreamExecution]] to run a query.
*/
case class StreamingRelation(dataSource: DataSource, sourceName: String, output: Seq[Attribute])
extends LeafNode with MultiInstanceRelation {
override def isStreaming: Boolean = true
override def toString: String = sourceName
// There's no sensible value here. On the execution path, this relation will be
// swapped out with microbatches. But some dataframe operations (in particular explain) do lead
// to this node surviving analysis. So we satisfy the LeafNode contract with the session default
// value.
override def computeStats(): Statistics = Statistics(
sizeInBytes = BigInt(dataSource.sparkSession.sessionState.conf.defaultSizeInBytes)
)
override def newInstance(): LogicalPlan = this.copy(output = output.map(_.newInstance()))
}
/**
* Used to link a streaming [[Source]] of data into a
* [[org.apache.spark.sql.catalyst.plans.logical.LogicalPlan]].
*/
case class StreamingExecutionRelation(
source: SparkDataStream,
output: Seq[Attribute])(session: SparkSession)
extends LeafNode with MultiInstanceRelation {
override def otherCopyArgs: Seq[AnyRef] = session :: Nil
override def isStreaming: Boolean = true
override def toString: String = source.toString
// There's no sensible value here. On the execution path, this relation will be
// swapped out with microbatches. But some dataframe operations (in particular explain) do lead
// to this node surviving analysis. So we satisfy the LeafNode contract with the session default
// value.
override def computeStats(): Statistics = Statistics(
sizeInBytes = BigInt(session.sessionState.conf.defaultSizeInBytes)
)
override def newInstance(): LogicalPlan = this.copy(output = output.map(_.newInstance()))(session)
}
// We have to pack in the V1 data source as a shim, for the case when a source implements
// continuous processing (which is always V2) but only has V1 microbatch support. We don't
// know at read time whether the query is continuous or not, so we need to be able to
// swap a V1 relation back in.
/**
* Used to link a [[TableProvider]] into a streaming
* [[org.apache.spark.sql.catalyst.plans.logical.LogicalPlan]]. This is only used for creating
* a streaming [[org.apache.spark.sql.DataFrame]] from [[org.apache.spark.sql.DataFrameReader]],
* and should be converted before passing to [[StreamExecution]].
*/
case class StreamingRelationV2(
source: TableProvider,
sourceName: String,
table: Table,
extraOptions: CaseInsensitiveStringMap,
output: Seq[Attribute],
v1Relation: Option[StreamingRelation])(session: SparkSession)
extends LeafNode with MultiInstanceRelation {
override def otherCopyArgs: Seq[AnyRef] = session :: Nil
override def isStreaming: Boolean = true
override def toString: String = sourceName
override def computeStats(): Statistics = Statistics(
sizeInBytes = BigInt(session.sessionState.conf.defaultSizeInBytes)
)
override def newInstance(): LogicalPlan = this.copy(output = output.map(_.newInstance()))(session)
}
/**
* A dummy physical plan for [[StreamingRelation]] to support
* [[org.apache.spark.sql.Dataset.explain]]
*/
case class StreamingRelationExec(sourceName: String, output: Seq[Attribute]) extends LeafExecNode {
override def toString: String = sourceName
override protected def doExecute(): RDD[InternalRow] = {
throw new UnsupportedOperationException("StreamingRelationExec cannot be executed")
}
}
object StreamingExecutionRelation {
def apply(source: Source, session: SparkSession): StreamingExecutionRelation = {
StreamingExecutionRelation(source, source.schema.toAttributes)(session)
}
}
| ConeyLiu/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamingRelation.scala | Scala | apache-2.0 | 5,741 |
/*
* Copyright 2013 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package geomesa.core.iterators
import AbstractIteratorTest._
import collection.JavaConverters._
import java.nio.ByteBuffer
import org.apache.accumulo.core.data.Key
import org.apache.accumulo.core.data.Range
import org.apache.accumulo.core.data.Value
import org.apache.accumulo.core.security.Authorizations
import org.junit.Before
import org.junit.Test
class RowOnlyIteratorTest
extends AbstractIteratorTest {
@Before
def setup() {
val rows = Seq("dqb6b46", "dqb6b40", "dqb6b43")
val cfs = Seq("cf1")
val cqs = Seq("cqA", "cqb")
val timestamps = Seq(0, 5, 100)
setup(
(for {
row <- rows
cf <- cfs
cq <- cqs
timestamp <- timestamps
} yield {
val bytes = new Array[Byte](8)
ByteBuffer.wrap(bytes).putDouble(5.0)
new Key(row, cf, cq, timestamp) -> new Value(bytes)
}).toMap
)
}
@Test
def nocfts() {
val scanner = conn.createScanner(TEST_TABLE_NAME, new Authorizations)
scanner.setRange(new Range)
RowOnlyIterator.setupRowOnlyIterator(scanner, 1000)
scanner.asScala.foreach(entry => {
System.out.println(entry.getKey + " " + ByteBuffer.wrap(entry.getValue.get).getDouble)
})
}
@Test
def comparison() {
val scanner = conn.createScanner(TEST_TABLE_NAME, new Authorizations)
scanner.setRange(new Range)
scanner.asScala.foreach(entry => {
System.out.println(entry.getKey + " " + (ByteBuffer.wrap(entry.getValue.get).getDouble))
})
}
}
| anthonyccri/geomesa | geomesa-core/src/test/scala/geomesa/core/iterators/RowOnlyIteratorTest.scala | Scala | apache-2.0 | 2,171 |
/*
* Copyright 2014 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark.kernel.protocol.v5.kernel
import akka.actor.{ActorRefFactory, ActorSelection}
/**
* This trait defines the interface for loading actors based on some value
* (enum, attribute, etc...). The thought is to allow external consumers
* acquire actors through a common interface, minimizing the spread of the
* logic about the Actors, ActorSystem, and other similar concepts.
*/
trait ActorLoader {
/**
* This method is meant to find an actor associated with an enum value. This
* enum value can map to an actor associated with handling a specific kernel
* message, a socket type, or other functionality.
*
* @param actorEnum The enum value used to load the actor
*
* @return An ActorSelection to pass messages to
*/
def load(actorEnum: Enumeration#Value): ActorSelection
}
case class SimpleActorLoader(actorRefFactory: ActorRefFactory)
extends ActorLoader
{
private val userActorDirectory: String = "/user/%s"
override def load(actorEnum: Enumeration#Value): ActorSelection = {
actorRefFactory.actorSelection(
userActorDirectory.format(actorEnum.toString)
)
}
}
| codeaudit/spark-kernel | kernel/src/main/scala/com/ibm/spark/kernel/protocol/v5/kernel/ActorLoader.scala | Scala | apache-2.0 | 1,732 |
package com.rasterfoundry.api.project
import com.rasterfoundry.datamodel.{Annotation, User}
import com.rasterfoundry.common.S3
import com.rasterfoundry.api.utils.Config
import com.typesafe.scalalogging.LazyLogging
import org.locationtech.jts.{geom => jts}
import com.amazonaws.services.s3.AmazonS3URI
import geotrellis.proj4.CRS
import geotrellis.vector._
import geotrellis.vector.reproject.Reproject
import org.geotools.data.DefaultTransaction
import org.geotools.data.shapefile.{
ShapefileDataStore,
ShapefileDataStoreFactory
}
import org.geotools.data.simple.SimpleFeatureStore
import org.geotools.feature.DefaultFeatureCollection
import org.geotools.feature.simple.{
SimpleFeatureBuilder,
SimpleFeatureTypeBuilder
}
import org.geotools.referencing.{CRS => _}
import org.geotools.referencing.crs.DefaultGeographicCRS
import org.opengis.feature.simple.SimpleFeature
import better.files._
import java.util.{HashMap => JHashMap}
import java.util.Calendar
object AnnotationShapefileService extends LazyLogging with Config {
def annotationsToShapefile(annotations: Seq[Annotation]): File = {
val annotationFeatures = annotations.flatMap(this.createSimpleFeature)
val featureCollection = new DefaultFeatureCollection()
annotationFeatures.foreach(feature => {
featureCollection.add(feature)
})
val zipfile = File.newTemporaryFile("export", ".zip")
File.usingTemporaryDirectory() { directory =>
this.createShapefiles(featureCollection, directory)
directory.zipTo(destination = zipfile)
}
zipfile
}
def getAnnotationShapefileDownloadUrl(annotations: List[Annotation],
user: User): String = {
val zipfile: File = annotationsToShapefile(annotations)
val cal: Calendar = Calendar.getInstance()
val s3Client = S3()
val s3Uri: AmazonS3URI = new AmazonS3URI(
user.getDefaultAnnotationShapefileSource(dataBucket))
cal.add(Calendar.DAY_OF_YEAR, 1)
s3Client
.putObject(dataBucket, s3Uri.getKey, zipfile.toJava)
.setExpirationTime(cal.getTime)
zipfile.delete(true)
s3Client.getSignedUrl(dataBucket, s3Uri.getKey).toString()
}
// TODO: Update this to use GeoTrellis's build in conversion once the id bug is fixed:
// https://github.com/locationtech/geotrellis/issues/2575
def createSimpleFeature(annotation: Annotation): Option[SimpleFeature] = {
annotation.geometry match {
case Some(geometry) =>
// annotations in RF DB are projected to EPSG: 3857, WebMercator
// when exporting, we reproject them to EPSG:4326, WGS:84
val geom = Reproject(
geometry.geom,
CRS.fromEpsgCode(3857),
CRS.fromEpsgCode(4326)
)
val geometryField = "the_geom"
val sftb = (new SimpleFeatureTypeBuilder)
.minOccurs(1)
.maxOccurs(1)
.nillable(false)
sftb.setName("Annotaion")
geom match {
case _: Point => sftb.add(geometryField, classOf[jts.Point])
case _: Line => sftb.add(geometryField, classOf[jts.LineString])
case _: Polygon => sftb.add(geometryField, classOf[jts.Polygon])
case _: MultiPoint =>
sftb.add(geometryField, classOf[jts.MultiPoint])
case _: MultiLine =>
sftb.add(geometryField, classOf[jts.MultiLineString])
case _: MultiPolygon =>
sftb.add(geometryField, classOf[jts.MultiPolygon])
case g: Geometry =>
throw new Exception(s"Unhandled Geotrellis Geometry $g")
}
sftb.setDefaultGeometry(geometryField)
val data = Seq(
("id", annotation.id),
("label", annotation.label match {
case "" => "Unlabeled"
case _ => annotation.label
}),
("desc", annotation.description.getOrElse("")),
("machinegen", annotation.machineGenerated.getOrElse(false)),
("confidence", annotation.confidence.getOrElse(0)),
("quality", annotation.quality.getOrElse("UNSURE").toString)
)
data.foreach({
case (key, value) =>
sftb
.minOccurs(1)
.maxOccurs(1)
.nillable(false)
.add(key, value.getClass)
})
val sft = sftb.buildFeatureType
val sfb = new SimpleFeatureBuilder(sft)
geom match {
case Point(pt) => sfb.add(pt)
case Line(ln) => sfb.add(ln)
case Polygon(pg) => sfb.add(pg)
case MultiPoint(mp) => sfb.add(mp)
case MultiLine(ml) => sfb.add(ml)
case MultiPolygon(mp) => sfb.add(mp)
case g: Geometry =>
throw new Exception(s"Unhandled Geotrellis Geometry $g")
}
data.foreach({ case (_, value) => sfb.add(value) })
Some(sfb.buildFeature(annotation.id.toString))
case _ =>
None
}
}
@SuppressWarnings(Array("AsInstanceOf", "CatchThrowable"))
def createShapefiles(featureCollection: DefaultFeatureCollection,
directory: File): Unit = {
val shapeFile = directory / "shapefile.shp"
val dataStoreFactory = new ShapefileDataStoreFactory()
val params = new JHashMap[String, java.io.Serializable]()
params.put("url", shapeFile.url)
params.put("create spatial index", true)
val newDataStore = dataStoreFactory
.createNewDataStore(params)
.asInstanceOf[ShapefileDataStore]
newDataStore.createSchema(featureCollection.getSchema)
// we reprojected annotations from WebMercator to WGS84 above
// so schema should be as follow
newDataStore.forceSchemaCRS(DefaultGeographicCRS.WGS84)
val transaction = new DefaultTransaction("create")
val typeName = newDataStore.getTypeNames.head
newDataStore.getFeatureSource(typeName) match {
case featureStore: SimpleFeatureStore =>
featureStore.setTransaction(transaction)
try {
featureStore.addFeatures(featureCollection)
transaction.commit()
} catch {
case default: Throwable =>
transaction.rollback()
throw default
} finally {
transaction.close()
}
}
}
}
| azavea/raster-foundry | app-backend/api/src/main/scala/project/AnnotationShapefileService.scala | Scala | apache-2.0 | 6,275 |
/*
* Copyright 2001-2011 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import java.awt.AWTError
import java.lang.annotation._
import java.io.Serializable
import java.lang.reflect.Constructor
import java.lang.reflect.InvocationTargetException
import java.lang.reflect.Method
import java.lang.reflect.Modifier
import java.nio.charset.CoderMalfunctionError
import javax.xml.parsers.FactoryConfigurationError
import javax.xml.transform.TransformerFactoryConfigurationError
import Suite.simpleNameForTest
import Suite.parseSimpleName
import Suite.stripDollars
import Suite.formatterForSuiteStarting
import Suite.formatterForSuiteCompleted
import Suite.checkForPublicNoArgConstructor
import Suite.formatterForSuiteAborted
import Suite.anErrorThatShouldCauseAnAbort
import Suite.getSimpleNameOfAnObjectsClass
import Suite.takesInformer
import Suite.isTestMethodGoodies
import Suite.testMethodTakesAnInformer
import scala.collection.immutable.TreeSet
import Suite.getIndentedText
import Suite.getDecodedName
import Suite.getLineInFile
import org.scalatest.events._
import org.scalatest.tools.StandardOutReporter
import Suite.checkRunTestParamsForNull
import Suite.getIndentedTextForInfo
import Suite.getMessageForException
import Suite.reportTestStarting
import Suite.reportTestIgnored
import Suite.reportTestSucceeded
import Suite.reportTestPending
import Suite.reportTestCanceled
import Suite.reportInfoProvided
import scala.reflect.NameTransformer
/**
* A suite of tests. A <code>Suite</code> instance encapsulates a conceptual
* suite (<em>i.e.</em>, a collection) of tests.
*
* <p>
* This trait provides an interface that allows suites of tests to be run.
* Its implementation enables a default way of writing and executing tests. Subtraits and subclasses can
* override <code>Suite</code>'s methods to enable other ways of writing and executing tests.
* This trait's default approach allows tests to be defined as methods whose name starts with "<code>test</code>."
* This approach is easy to understand, and a good way for Scala beginners to start writing tests.
* More advanced Scala programmers may prefer to mix together other <code>Suite</code> subtraits defined in ScalaTest,
* or create their own, to write tests in the way they feel makes them most productive. Here's a quick overview
* of some of the options to help you get started:
* </p>
*
* <p>
* <em>For JUnit 3 users</em>
* </p>
*
* <p>
* If you are using JUnit 3 (version 3.8 or earlier releases) and you want to write JUnit 3 tests in Scala, look at
* <a href="junit/AssertionsForJUnit.html"><code>AssertionsForJUnit</code></a>,
* <a href="junit/ShouldMatchersForJUnit.html"><code>ShouldMatchersForJUnit</code></a>, and
* <a href="junit/JUnit3Suite.html"><code>JUnit3Suite</code></a>.
* </p>
*
* <p>
* <em>For JUnit 4 users</em>
* </p>
*
* <p>
* If you are using JUnit 4 and you want to write JUnit 4 tests in Scala, look at
* <a href="junit/JUnitSuite.html"><code>JUnitSuite</code></a>, and
* <a href="junit/JUnitRunner.html"><code>JUnitRunner</code></a>. With <code>JUnitRunner</code>,
* you can use any of the traits described here and still run your tests with JUnit 4.
* </p>
*
* <p>
* <em>For TestNG users</em>
* </p>
*
* <p>
* If you are using TestNG and you want to write TestNG tests in Scala, look at
* <a href="testng/TestNGSuite.html"><code>TestNGSuite</code></a>.
* </p>
*
* <p>
* <em>For high-level testing</em>
* </p>
*
* <p>
* If you want to write tests at a higher level than unit tests, such as integration tests, acceptance tests,
* or functional tests, check out <a href="FeatureSpec.html"><code>FeatureSpec</code></a>.
* </p>
*
* <p>
* <em>For unit testing</em>
* </p>
*
* <p>
* If you prefer a behavior-driven development (BDD) style, in which tests are combined with text that
* specifies the behavior being tested, look at
* <a href="FunSpec.html"><code>FunSpec</code></a>,
* <a href="FlatSpec.html"><code>FlatSpec</code></a>,
* <a href="FreeSpec.html"><code>FreeSpec</code></a>, and
* <a href="WordSpec.html"><code>WordSpec</code></a>. Otherwise, if you just want to write tests
* and don't want to combine testing with specifying, look at
* <a href="FunSuite.html"><code>FunSuite</code></a> or read on to learn how to write
* tests using this base trait, <code>Suite</code>.
* </p>
*
* <p>
* To use this trait's approach to writing tests, simply create classes that
* extend <code>Suite</code> and define test methods. Test methods have names of the form <code>testX</code>,
* where <code>X</code> is some unique, hopefully meaningful, string. A test method must be public and
* can have any result type, but the most common result type is <code>Unit</code>. Here's an example:
* </p>
*
* <pre class="stHighlight">
* import org.scalatest.Suite
*
* class ExampleSuite extends Suite {
*
* def testAddition {
* val sum = 1 + 1
* assert(sum === 2)
* }
*
* def testSubtraction {
* val diff = 4 - 1
* assert(diff === 3)
* }
* }
* </pre>
*
* <p>
* You can run a <code>Suite</code> by invoking <code>execute</code> on it.
* This method, which prints test results to the standard output, is intended to serve as a
* convenient way to run tests from within the Scala interpreter. For example,
* to run <code>ExampleSuite</code> from within the Scala interpreter, you could write:
* </p>
*
* <pre class="stREPL">
* scala> (new ExampleSuite).execute()
* </pre>
*
* <p>
* And you would see:
* </p>
*
* <pre class="stREPL">
* <span class="stGreen">ExampleSuite:
* - testAddition
* - testSubtraction</span>
* </pre>
*
* <p>
* Or, to run just the <code>testAddition</code> method, you could write:
* </p>
*
* <pre class="stREPL">
* scala> (new ExampleSuite).execute("testAddition")
* </pre>
*
* <p>
* And you would see:
* </p>
*
* <pre class="stREPL">
* <span class="stGreen">ExampleSuite:
* - testAddition</span>
* </pre>
*
* <p>
* You can also pass to <code>execute</code> a <a href="#configMapSection"><em>config map</em></a> of key-value
* pairs, which will be passed down into suites and tests, as well as other parameters that configure the run itself.
* For more information on running in the Scala interpreter, see the documentation for <code>execute</code> (below) and the
* <a href="Shell.html">ScalaTest shell</a>.</code>
* </p>
*
* <p>
* The <code>execute</code> method invokes a <code>run</code> method takes seven
* parameters. This <code>run</code> method, which actually executes the suite, will usually be invoked by a test runner, such
* as <code>org.scalatest.tools.Runner</code> or an IDE. See the <a href="tools/Runner$.html">documentation
* for <code>Runner</code></a> for more details.
* </p>
*
* <h2>Assertions and <code>=</code><code>=</code><code>=</code></h2>
*
* <p>
* Inside test methods in a <code>Suite</code>, you can write assertions by invoking <code>assert</code> and passing in a <code>Boolean</code> expression,
* such as:
* </p>
*
* <pre class="stHighlight">
* val left = 2
* val right = 1
* assert(left == right)
* </pre>
*
* <p>
* If the passed expression is <code>true</code>, <code>assert</code> will return normally. If <code>false</code>,
* <code>assert</code> will complete abruptly with a <code>TestFailedException</code>. This exception is usually not caught
* by the test method, which means the test method itself will complete abruptly by throwing the <code>TestFailedException</code>. Any
* test method that completes abruptly with an exception is considered a failed
* test. A test method that returns normally is considered a successful test.
* </p>
*
* <p>
* If you pass a <code>Boolean</code> expression to <code>assert</code>, a failed assertion will be reported, but without
* reporting the left and right values. You can alternatively encode these values in a <code>String</code> passed as
* a second argument to <code>assert</code>, as in:
* </p>
*
* <pre class="stHighlight">
* val left = 2
* val right = 1
* assert(left == right, left + " did not equal " + right)
* </pre>
*
* <p>
* Using this form of <code>assert</code>, the failure report will include the left and right values,
* helping you debug the problem. However, ScalaTest provides the <code>===</code> operator to make this easier.
* (The <code>===</code> operator is defined in trait <a href="Assertions.html"><code>Assertions</code></a> which trait <code>Suite</code> extends.)
* You use it like this:
* </p>
*
* <pre class="stHighlight">
* val left = 2
* val right = 1
* assert(left === right)
* </pre>
*
* <p>
* Because you use <code>===</code> here instead of <code>==</code>, the failure report will include the left
* and right values. For example, the detail message in the thrown <code>TestFailedException</code> from the <code>assert</code>
* shown previously will include, "2 did not equal 1".
* From this message you will know that the operand on the left had the value 2, and the operand on the right had the value 1.
* </p>
*
* <p>
* If you're familiar with JUnit, you would use <code>===</code>
* in a ScalaTest <code>Suite</code> where you'd use <code>assertEquals</code> in a JUnit <code>TestCase</code>.
* The <code>===</code> operator is made possible by an implicit conversion from <code>Any</code>
* to <code>Equalizer</code>. If you're curious to understand the mechanics, see the <a href="Assertions$Equalizer.html">documentation for
* <code>Equalizer</code></a> and the <code>convertToEqualizer</code> method.
* </p>
*
* <h2>Expected results</h2>
*
* Although <code>===</code> provides a natural, readable extension to Scala's <code>assert</code> mechanism,
* as the operands become lengthy, the code becomes less readable. In addition, the <code>===</code> comparison
* doesn't distinguish between actual and expected values. The operands are just called <code>left</code> and <code>right</code>,
* because if one were named <code>expected</code> and the other <code>actual</code>, it would be difficult for people to
* remember which was which. To help with these limitations of assertions, <code>Suite</code> includes a method called <code>expect</code> that
* can be used as an alternative to <code>assert</code> with <code>===</code>. To use <code>expect</code>, you place
* the expected value in parentheses after <code>expect</code>, followed by curly braces containing code
* that should result in the expected value. For example:
*
* <pre class="stHighlight">
* val a = 5
* val b = 2
* expect(2) {
* a - b
* }
* </pre>
*
* <p>
* In this case, the expected value is <code>2</code>, and the code being tested is <code>a - b</code>. This expectation will fail, and
* the detail message in the <code>TestFailedException</code> will read, "Expected 2, but got 3."
* </p>
*
* <h2>Intercepted exceptions</h2>
*
* <p>
* Sometimes you need to test whether a method throws an expected exception under certain circumstances, such
* as when invalid arguments are passed to the method. You can do this in the JUnit style, like this:
* </p>
*
* <pre class="stHighlight">
* val s = "hi"
* try {
* s.charAt(-1)
* fail()
* }
* catch {
* case _: IndexOutOfBoundsException => // Expected, so continue
* }
* </pre>
*
* <p>
* If <code>charAt</code> throws <code>IndexOutOfBoundsException</code> as expected, control will transfer
* to the catch case, which does nothing. If, however, <code>charAt</code> fails to throw an exception,
* the next statement, <code>fail()</code>, will be executed. The <code>fail</code> method always completes abruptly with
* a <code>TestFailedException</code>, thereby signaling a failed test.
* </p>
*
* <p>
* To make this common use case easier to express and read, ScalaTest provides an <code>intercept</code>
* method. You use it like this:
* </p>
*
* <pre class="stHighlight">
* val s = "hi"
* intercept[IndexOutOfBoundsException] {
* s.charAt(-1)
* }
* </pre>
*
* <p>
* This code behaves much like the previous example. If <code>charAt</code> throws an instance of <code>IndexOutOfBoundsException</code>,
* <code>intercept</code> will return that exception. But if <code>charAt</code> completes normally, or throws a different
* exception, <code>intercept</code> will complete abruptly with a <code>TestFailedException</code>. The <code>intercept</code> method returns the
* caught exception so that you can inspect it further if you wish, for example, to ensure that data contained inside
* the exception has the expected values. Here's an example:
* </p>
*
* <pre class="stHighlight">
* val s = "hi"
* val caught =
* intercept[IndexOutOfBoundsException] {
* s.charAt(-1)
* }
* assert(caught.getMessage === "String index out of range: -1")
* </pre>
*
* <h2>Using other assertions</h2>
*
* <p>
* ScalaTest also supports another style of assertions via its matchers DSL. By mixing in
* trait <a href="matchers/ShouldMatchers.html"><code>ShouldMatchers</code></a>, you can
* write suites that look like:
* </p>
*
* <pre class="stHighlight">
* import org.scalatest.Suite
* import org.scalatest.matchers.ShouldMatchers
*
* class ExampleSuite extends Suite with ShouldMatchers {
*
* def testAddition {
* val sum = 1 + 1
* sum should equal (2)
* }
*
* def testSubtraction {
* val diff = 4 - 1
* diff should equal (3)
* }
* }
* </pre>
*
* <p>If you prefer the word "<code>must</code>" to the word "<code>should</code>," you can alternatively mix in
* trait <a href="matchers/MustMatchers.html"><code>MustMatchers</code></a>.
* </p>
*
* <p>
* If you are comfortable with assertion mechanisms from other test frameworks, chances
* are you can use them with ScalaTest. Any assertion mechanism that indicates a failure with an exception
* can be used as is with ScalaTest. For example, to use the <code>assertEquals</code>
* methods provided by JUnit or TestNG, simply import them and use them. (You will of course need
* to include the relevant JAR file for the framework whose assertions you want to use on either the
* classpath or runpath when you run your tests.) Here's an example in which JUnit's assertions are
* imported, then used within a ScalaTest suite:
* </p>
*
* <pre class="stHighlight">
* import org.scalatest.Suite
* import org.junit.Assert._
*
* class ExampleSuite extends Suite {
*
* def testAddition {
* val sum = 1 + 1
* assertEquals(2, sum)
* }
*
* def testSubtraction {
* val diff = 4 - 1
* assertEquals(3, diff)
* }
* }
* </pre>
*
* <h2>Nested suites</h2>
*
* <p>
* A <code>Suite</code> can refer to a collection of other <code>Suite</code>s,
* which are called <em>nested</em> <code>Suite</code>s. Those nested <code>Suite</code>s can in turn have
* their own nested <code>Suite</code>s, and so on. Large test suites can be organized, therefore, as a tree of
* nested <code>Suite</code>s.
* This trait's <code>run</code> method, in addition to invoking its
* test methods, invokes <code>run</code> on each of its nested <code>Suite</code>s.
* </p>
*
* <p>
* A <code>List</code> of a <code>Suite</code>'s nested <code>Suite</code>s can be obtained by invoking its
* <code>nestedSuites</code> method. If you wish to create a <code>Suite</code> that serves as a
* container for nested <code>Suite</code>s, whether or not it has test methods of its own, simply override <code>nestedSuites</code>
* to return a <code>List</code> of the nested <code>Suite</code>s. Because this is a common use case, ScalaTest provides
* a convenience <code>Suites</code> class, which takes a variable number of nested <code>Suite</code>s as constructor
* parameters. Here's an example:
* </p>
*
* <pre class="stHighlight">
* import org.scalatest.Suite
* import org.scalatest.Suites
*
* class ASuite extends Suite {
* def testA {}
* }
* class BSuite extends Suite {
* def testB {}
* }
* class CSuite extends Suite {
* def testC {}
* }
*
* class AlphabetSuite extends Suites(
* new ASuite,
* new BSuite,
* new CSuite
* )
* </pre>
*
* <p>
* If you now run <code>AlphabetSuite</code>:
* </p>
*
* <pre class="stREPL">
* scala> (new AlphabetSuite).execute()
* </pre>
*
* <p>
* You will see reports printed to the standard output that indicate the nested
* suites—<code>ASuite</code>, <code>BSuite</code>, and
* <code>CSuite</code>—were run:
* </p>
*
* <pre class="stREPL">
* <span class="stGreen">AlphabetSuite:
* ASuite:
* - testA
* BSuite:
* - testB
* CSuite:
* - testC</span>
* </pre>
*
* <p>
* Note that <code>Runner</code> can discover <code>Suite</code>s automatically, so you need not
* necessarily define nested <code>Suites</code> explicitly. See the <a href="tools/Runner$.html$membersOnlyWildcard">documentation
* for <code>Runner</code></a> for more information.
* </p>
*
* <a name="configMapSection"></a><h2>The config map</h2>
*
* <p>
* In some cases you may need to pass information to a suite of tests.
* For example, perhaps a suite of tests needs to grab information from a file, and you want
* to be able to specify a different filename during different runs. You can accomplish this in ScalaTest by passing
* the filename in a <em>config map</em> of key-value pairs, which is passed to <code>run</code> as a <code>Map[String, Any]</code>.
* The values in the config map are called "config objects," because they can be used to <em>configure</em>
* suites, reporters, and tests.
* </p>
*
* <p>
* You can specify a string config object is via the ScalaTest <code>Runner</code>, either via the command line
* or ScalaTest's ant task.
* (See the <a href="tools/Runner$.html#configMapSection">documentation for Runner</a> for information on how to specify
* config objects on the command line.)
* The config map is passed to <code>run</code>, <code>runNestedSuites</code>, <code>runTests</code>, and <code>runTest</code>,
* so one way to access it in your suite is to override one of those methods. If you need to use the config map inside your tests, you
* can access it from the <code>NoArgTest</code> passed to <code>withFixture</code>, or the <code>OneArgTest</code> passed to
* <code>withFixture</code> in the traits in the <code>org.scalatest.fixture</code> package. (See the
* <a href="fixture/Suite.html">documentation for <code>fixture.Suite</code></a>
* for instructions on how to access the config map in tests.)
* </p>
*
* <h2>Ignored tests</h2>
*
* <p>
* Another common use case is that tests must be “temporarily” disabled, with the
* good intention of resurrecting the test at a later time. ScalaTest provides an <code>Ignore</code>
* annotation for this purpose. You use it like this:
* </p>
*
* <pre class="stHighlight">
* import org.scalatest.Suite
* import org.scalatest.Ignore
*
* class ExampleSuite extends Suite {
*
* def testAddition {
* val sum = 1 + 1
* assert(sum === 2)
* }
*
* @Ignore
* def testSubtraction {
* val diff = 4 - 1
* assert(diff === 3)
* }
* }
* </pre>
*
* <p>
* If you run this version of <code>ExampleSuite</code> with:
* </p>
*
* <pre class="stREPL">
* scala> (new ExampleSuite).run()
* </pre>
*
* <p>
* It will run only <code>testAddition</code> and report that <code>testSubtraction</code> was ignored. You'll see:
* </p>
*
* <pre class="stREPL">
* <span class="stGreen">ExampleSuite:
* - testAddition</span>
* <span class="stYellow">- testSubtraction !!! IGNORED !!!</span>
* </pre>
*
* <p>
* <code>Ignore</code> is implemented as a tag. The <code>Filter</code> class effectively
* adds <code>org.scalatest.Ignore</code> to the <code>tagsToExclude</code> <code>Set</code> if it not already
* in the <code>tagsToExclude</code> set passed to its primary constructor. The only difference between
* <code>org.scalatest.Ignore</code> and the tags you may define and exclude is that ScalaTest reports
* ignored tests to the <code>Reporter</code>. The reason ScalaTest reports ignored tests is
* to encourage ignored tests to be eventually fixed and added back into the active suite of tests.
* </p>
*
* <h2>Pending tests</h2>
*
* <p>
* A <em>pending test</em> is one that has been given a name but is not yet implemented. The purpose of
* pending tests is to facilitate a style of testing in which documentation of behavior is sketched
* out before tests are written to verify that behavior (and often, before the behavior of
* the system being tested is itself implemented). Such sketches form a kind of specification of
* what tests and functionality to implement later.
* </p>
*
* <p>
* To support this style of testing, a test can be given a name that specifies one
* bit of behavior required by the system being tested. The test can also include some code that
* sends more information about the behavior to the reporter when the tests run. At the end of the test,
* it can call method <code>pending</code>, which will cause it to complete abruptly with <code>TestPendingException</code>.
* </p>
*
* <p>
* Because tests in ScalaTest can be designated as pending with <code>TestPendingException</code>, both the test name and any information
* sent to the reporter when running the test can appear in the report of a test run. (In other words,
* the code of a pending test is executed just like any other test.) However, because the test completes abruptly
* with <code>TestPendingException</code>, the test will be reported as pending, to indicate
* the actual test, and possibly the functionality it is intended to test, has not yet been implemented.
* </p>
*
* <p>
* Although pending tests may be used more often in specification-style suites, such as
* <code>org.scalatest.FunSpec</code>, you can also use it in <code>Suite</code>, like this:
* </p>
*
* <pre class="stHighlight">
* import org.scalatest.Suite
*
* class ExampleSuite extends Suite {
*
* def testAddition {
* val sum = 1 + 1
* assert(sum === 2)
* }
*
* def testSubtraction { pending }
* }
* </pre>
*
* <p>
* If you run this version of <code>ExampleSuite</code> with:
* </p>
*
* <pre class="stREPL">
* scala> (new ExampleSuite).run()
* </pre>
*
* <p>
* It will run both tests but report that <code>testSubtraction</code> is pending. You'll see:
* </p>
*
* <pre class="stREPL">
* <span class="stGreen">ExampleSuite:
* - testAddition</span>
* <span class="stYellow">- testSubtraction (pending)</span>
* </pre>
*
* <h2>Informers</h2>
*
* <p>
* One of the parameters to <code>run</code> is a <code>Reporter</code>, which
* will collect and report information about the running suite of tests.
* Information about suites and tests that were run, whether tests succeeded or failed,
* and tests that were ignored will be passed to the <code>Reporter</code> as the suite runs.
* Most often the reporting done by default by <code>Suite</code>'s methods will be sufficient, but
* occasionally you may wish to provide custom information to the <code>Reporter</code> from a test method.
* For this purpose, you can optionally include an <code>Informer</code> parameter in a test method, and then
* pass the extra information to the <code>Informer</code> via its <code>apply</code> method. The <code>Informer</code>
* will then pass the information to the <code>Reporter</code> by sending an <code>InfoProvided</code> event.
* Here's an example:
* </p>
*
* <pre class="stHighlight">
* import org.scalatest._
*
* class ExampleSuite extends Suite {
*
* def testAddition(info: Informer) {
* assert(1 + 1 === 2)
* info("Addition seems to work")
* }
* }
* </pre>
*
* If you run this <code>Suite</code> from the interpreter, you will see the message
* included in the printed report:
*
* <pre class="stREPL">
* scala> (new ExampleSuite).run()
* <span class="stGreen">ExampleSuite:
* - testAddition(Informer)
* + Addition seems to work </span>
* </pre>
*
* <h2>Executing suites in parallel</h2>
*
* <p>
* The <code>run</code> method takes as one of its parameters an optional <code>Distributor</code>. If
* a <code>Distributor</code> is passed in, this trait's implementation of <code>run</code> puts its nested
* <code>Suite</code>s into the distributor rather than executing them directly. The caller of <code>run</code>
* is responsible for ensuring that some entity runs the <code>Suite</code>s placed into the
* distributor. The <code>-c</code> command line parameter to <code>Runner</code>, for example, will cause
* <code>Suite</code>s put into the <code>Distributor</code> to be run in parallel via a pool of threads.
* </p>
*
* <a name="TaggingTests"></a><h2>Tagging tests</h2>
*
* <p>
* A <code>Suite</code>'s tests may be classified into groups by <em>tagging</em> them with string names. When executing
* a <code>Suite</code>, groups of tests can optionally be included and/or excluded. In this
* trait's implementation, tags are indicated by annotations attached to the test method. To
* create a new tag type to use in <code>Suite</code>s, simply define a new Java annotation that itself is annotated with the <code>org.scalatest.TagAnnotation</code> annotation.
* (Currently, for annotations to be
* visible in Scala programs via Java reflection, the annotations themselves must be written in Java.) For example,
* to create a tag named <code>SlowAsMolasses</code>, to use to mark slow tests, you would
* write in Java:
* </p>
*
* <p><b>Because of a Scaladoc bug in Scala 2.8, I had to put a space after the at sign in one the target annotation example below. If you
* want to copy and paste from this example, you'll need to remove the space by hand. - Bill Venners</b></p>
*
* <pre>
* import java.lang.annotation.*;
* import org.scalatest.TagAnnotation
*
* @TagAnnotation
* @Retention(RetentionPolicy.RUNTIME)
* @ Target({ElementType.METHOD, ElementType.TYPE})
* public @interface SlowAsMolasses {}
* </pre>
*
* <p>
* Given this new annotation, you could place a <code>Suite</code> test method into the <code>SlowAsMolasses</code> group
* (<em>i.e.</em>, tag it as being <code>SlowAsMolasses</code>) like this:
* </p>
*
* <pre class="stHighlight">
* @SlowAsMolasses
* def testSleeping { sleep(1000000) }
* </pre>
*
* <p>
* The <code>run</code> method takes a <code>Filter</code>, whose constructor takes an optional
* <code>Set[String]</code> called <code>tagsToInclude</code> and a <code>Set[String]</code> called
* <code>tagsToExclude</code>. If <code>tagsToInclude</code> is <code>None</code>, all tests will be run
* except those those belonging to tags listed in the
* <code>tagsToExclude</code> <code>Set</code>. If <code>tagsToInclude</code> is defined, only tests
* belonging to tags mentioned in the <code>tagsToInclude</code> set, and not mentioned in <code>tagsToExclude</code>,
* will be run.
* </p>
*
* <a name="sharedFixtures"></a><h2>Shared fixtures</h2>
*
* <p>
* A test <em>fixture</em> is objects or other artifacts (such as files, sockets, database
* connections, <em>etc.</em>) used by tests to do their work.
* If a fixture is used by only one test method, then the definitions of the fixture objects can
* be local to the method, such as the objects assigned to <code>sum</code> and <code>diff</code> in the
* previous <code>ExampleSuite</code> examples. If multiple methods need to share an immutable fixture, one approach
* is to assign them to instance variables.
* </p>
*
* <p>
* In some cases, however, shared <em>mutable</em> fixture objects may be changed by test methods such that
* they need to be recreated or reinitialized before each test. Shared resources such
* as files or database connections may also need to
* be created and initialized before, and cleaned up after, each test. JUnit 3 offered methods <code>setUp</code> and
* <code>tearDown</code> for this purpose. In ScalaTest, you can use the <code>BeforeAndAfterEach</code> trait,
* which will be described later, to implement an approach similar to JUnit's <code>setUp</code>
* and <code>tearDown</code>, however, this approach usually involves reassigning <code>var</code>s or mutating objects
* between tests. Before going that route, you may wish to consider some more functional approaches that
* avoid side effects.
* </p>
*
* <h4>Calling create-fixture methods</h4>
*
* <p>
* One approach is to write one or more <em>create-fixture</em> methods
* that return a new instance of a needed fixture object (or an holder object containing multiple needed fixture objects) each time it
* is called. You can then call a create-fixture method at the beginning of each
* test method that needs the fixture, storing the returned object or objects in local variables. Here's an example:
* </p>
*
* <pre class="stHighlight">
* import org.scalatest.Suite
* import collection.mutable.ListBuffer
*
* class ExampleSuite extends Suite {
*
* def fixture =
* new {
* val builder = new StringBuilder("ScalaTest is ")
* val buffer = new ListBuffer[String]
* }
*
* def testEasy {
* val f = fixture
* f.builder.append("easy!")
* assert(f.builder.toString === "ScalaTest is easy!")
* assert(f.buffer.isEmpty)
* f.buffer += "sweet"
* }
*
* def testFun {
* val f = fixture
* f.builder.append("fun!")
* assert(f.builder.toString === "ScalaTest is fun!")
* assert(f.buffer.isEmpty)
* }
* }
* </pre>
*
* <p>
* The “<code>f.</code>” in front of each use of a fixture object provides a visual indication of which objects
* are part of the fixture, but if you prefer, you can import the the members with “<code>import f._</code>” and use the names directly.
* </p>
*
* <h4>Instantiating fixture traits</h4>
*
* <p>
* A related technique is to place
* the fixture objects in a <em>fixture trait</em> and run your test code in the context of a new anonymous class instance that mixes in
* the fixture trait, like this:
* </p>
*
* <pre class="stHighlight">
* import org.scalatest.Suite
* import collection.mutable.ListBuffer
*
* class ExampleSuite extends Suite {
*
* trait Fixture {
* val builder = new StringBuilder("ScalaTest is ")
* val buffer = new ListBuffer[String]
* }
*
* def testEasy {
* new Fixture {
* builder.append("easy!")
* assert(builder.toString === "ScalaTest is easy!")
* assert(buffer.isEmpty)
* buffer += "sweet"
* }
* }
*
* def testFun {
* new Fixture {
* builder.append("fun!")
* assert(builder.toString === "ScalaTest is fun!")
* assert(buffer.isEmpty)
* }
* }
* }
* </pre>
*
* <h4>Mixing in <code>OneInstancePerTest</code></h4>
*
* <p>
* If every test method requires the same set of
* mutable fixture objects, one other approach you can take is make them simply <code>val</code>s and mix in trait
* <a href="OneInstancePerTest.html"><code>OneInstancePerTest</code></a>. If you mix in <code>OneInstancePerTest</code>, each test
* will be run in its own instance of the <code>Suite</code>, similar to the way JUnit tests are executed. Here's an example:
* </p>
*
* <pre class="stHighlight">
* import org.scalatest.Suite
* import org.scalatest.OneInstancePerTest
* import collection.mutable.ListBuffer
*
* class ExampleSuite extends Suite with OneInstancePerTest {
*
* val builder = new StringBuilder("ScalaTest is ")
* val buffer = new ListBuffer[String]
*
* def testEasy {
* builder.append("easy!")
* assert(builder.toString === "ScalaTest is easy!")
* assert(buffer.isEmpty)
* buffer += "sweet"
* }
*
* def testFun {
* builder.append("fun!")
* assert(builder.toString === "ScalaTest is fun!")
* assert(buffer.isEmpty)
* }
* }
* </pre>
*
* <p>
* Although the create-fixture, fixture-trait, and <code>OneInstancePerTest</code> approaches take care of setting up a fixture before each
* test, they don't address the problem of cleaning up a fixture after the test completes. In this situation, you'll need to either
* use side effects or the <em>loan pattern</em>.
* </p>
*
* <h4>Mixing in <code>BeforeAndAfter</code></h4>
*
* <p>
* One way to use side effects is to mix in the <a href="BeforeAndAfter.html"><code>BeforeAndAfter</code></a> trait.
* With this trait you can denote a bit of code to run before each test with <code>before</code> and/or after each test
* each test with <code>after</code>, like this:
* </p>
*
* <pre class="stHighlight">
* import org.scalatest.Suite
* import org.scalatest.BeforeAndAfter
* import collection.mutable.ListBuffer
*
* class ExampleSuite extends Suite with BeforeAndAfter {
*
* val builder = new StringBuilder
* val buffer = new ListBuffer[String]
*
* before {
* builder.append("ScalaTest is ")
* }
*
* after {
* builder.clear()
* buffer.clear()
* }
*
* def testEasy {
* builder.append("easy!")
* assert(builder.toString === "ScalaTest is easy!")
* assert(buffer.isEmpty)
* buffer += "sweet"
* }
*
* def testFun {
* builder.append("fun!")
* assert(builder.toString === "ScalaTest is fun!")
* assert(buffer.isEmpty)
* }
* }
* </pre>
*
* <h4>Overriding <code>withFixture(NoArgTest)</code></h4>
*
* <p>
* An alternate way to take care of setup and cleanup via side effects
* is to override <code>withFixture</code>. Trait <code>Suite</code>'s implementation of
* <code>runTest</code> passes a no-arg test function to <code>withFixture</code>. It is <code>withFixture</code>'s
* responsibility to invoke that test function. <code>Suite</code>'s implementation of <code>withFixture</code> simply
* invokes the function, like this:
* </p>
*
* <pre class="stHighlight">
* // Default implementation
* protected def withFixture(test: NoArgTest) {
* test()
* }
* </pre>
*
* <p>
* You can, therefore, override <code>withFixture</code> to perform setup before, and cleanup after, invoking the test function. If
* you have cleanup to perform, you should invoke the test function
* inside a <code>try</code> block and perform the cleanup in a <code>finally</code> clause.
* Here's an example:
* </p>
*
* <pre class="stHighlight">
* import org.scalatest.Suite
* import collection.mutable.ListBuffer
*
* class ExampleSuite extends Suite {
*
* val builder = new StringBuilder
* val buffer = new ListBuffer[String]
*
* override def withFixture(test: NoArgTest) {
* builder.append("ScalaTest is ") // perform setup
* try {
* test() // invoke the test function
* }
* finally {
* builder.clear() // perform cleanup
* buffer.clear()
* }
* }
*
* def testEasy {
* builder.append("easy!")
* assert(builder.toString === "ScalaTest is easy!")
* assert(buffer.isEmpty)
* buffer += "sweet"
* }
*
* def testFun {
* builder.append("fun!")
* assert(builder.toString === "ScalaTest is fun!")
* assert(buffer.isEmpty)
* buffer += "clear"
* }
* }
* </pre>
*
* <p>
* Note that the <a href="Suite$NoArgTest.html"><code>NoArgTest</code></a> passed to <code>withFixture</code>, in addition to
* an <code>apply</code> method that executes the test, also includes the test name as well as the <a href="#configMapSection">config
* map</a> passed to <code>runTest</code>. Thus you can also use the test name and configuration objects in <code>withFixture</code>.
* </p>
*
* <p>
* The reason you should perform cleanup in a <code>finally</code> clause is that <code>withFixture</code> is called by
* <code>runTest</code>, which expects an exception to be thrown to indicate a failed test. Thus when you invoke
* the <code>test</code> function inside <code>withFixture</code>, it may complete abruptly with an exception. The <code>finally</code>
* clause will ensure the fixture cleanup happens as that exception propagates back up the call stack to <code>runTest</code>.
* </p>
*
* <h4>Overriding <code>withFixture(OneArgTest)</code></h4>
*
* <p>
* To use the loan pattern, you can extend <code>fixture.Suite</code> (from the <code>org.scalatest.fixture</code> package) instead of
* <code>Suite</code>. Each test in a <code>fixture.Suite</code> takes a fixture as a parameter, allowing you to pass the fixture into
* the test. You must indicate the type of the fixture parameter by specifying <code>FixtureParam</code>, and implement a
* <code>withFixture</code> method that takes a <code>OneArgTest</code>. This <code>withFixture</code> method is responsible for
* invoking the one-arg test function, so you can perform fixture set up before, and clean up after, invoking and passing
* the fixture into the test function. Here's an example:
* </p>
*
* <pre class="stHighlight">
* import org.scalatest.fixture
* import java.io.FileWriter
* import java.io.File
*
* class ExampleSuite extends fixture.Suite {
*
* final val tmpFile = "temp.txt"
*
* type FixtureParam = FileWriter
*
* def withFixture(test: OneArgTest) {
*
* val writer = new FileWriter(tmpFile) // set up the fixture
* try {
* test(writer) // "loan" the fixture to the test
* }
* finally {
* writer.close() // clean up the fixture
* }
* }
*
* def testEasy(writer: FileWriter) {
* writer.write("Hello, test!")
* writer.flush()
* assert(new File(tmpFile).length === 12)
* }
*
* def testFun(writer: FileWriter) {
* writer.write("Hi, test!")
* writer.flush()
* assert(new File(tmpFile).length === 9)
* }
* }
* </pre>
*
* <p>
* For more information, see the <a href="fixture/Suite.html">documentation for <code>fixture.Suite</code></a>.
* </p>
*
* <a name="differentFixtures"></a><h2>Providing different fixtures to different tests</h2>
*
* <p>
* If different tests in the same <code>Suite</code> require different fixtures, you can combine the previous techniques and
* provide each test with just the fixture or fixtures it needs. Here's an example in which a <code>StringBuilder</code> and a
* <code>ListBuffer</code> are provided via fixture traits, and file writer (that requires cleanup) is provided via the loan pattern:
* </p>
*
* <pre class="stHighlight">
* import java.io.FileWriter
* import java.io.File
* import collection.mutable.ListBuffer
* import org.scalatest.Suite
*
* class ExampleSuite extends Suite {
*
* final val tmpFile = "temp.txt"
*
* trait Builder {
* val builder = new StringBuilder("ScalaTest is ")
* }
*
* trait Buffer {
* val buffer = ListBuffer("ScalaTest", "is")
* }
*
* def withWriter(testCode: FileWriter => Any) {
* val writer = new FileWriter(tmpFile) // set up the fixture
* try {
* testCode(writer) // "loan" the fixture to the test
* }
* finally {
* writer.close() // clean up the fixture
* }
* }
*
* def testProductive { // This test needs the StringBuilder fixture
* new Builder {
* builder.append("productive!")
* assert(builder.toString === "ScalaTest is productive!")
* }
* }
*
* def testReadable { // This test needs the ListBuffer[String] fixture
* new Buffer {
* buffer += ("readable!")
* assert(buffer === List("ScalaTest", "is", "readable!"))
* }
* }
*
* def testFriendly { // This test needs the FileWriter fixture
* withWriter { writer =>
* writer.write("Hello, user!")
* writer.flush()
* assert(new File(tmpFile).length === 12)
* }
* }
*
* def testClearAndConcise { // This test needs the StringBuilder and ListBuffer
* new Builder with Buffer {
* builder.append("clear!")
* buffer += ("concise!")
* assert(builder.toString === "ScalaTest is clear!")
* assert(buffer === List("ScalaTest", "is", "concise!"))
* }
* }
*
* def testComposable { // This test needs all three fixtures
* new Builder with Buffer {
* builder.append("clear!")
* buffer += ("concise!")
* assert(builder.toString === "ScalaTest is clear!")
* assert(buffer === List("ScalaTest", "is", "concise!"))
* withWriter { writer =>
* writer.write(builder.toString)
* writer.flush()
* assert(new File(tmpFile).length === 19)
* }
* }
* }
* }
* </pre>
*
* <p>
* In the previous example, <code>testProductive</code> uses only the <code>StringBuilder</code> fixture, so it just instantiates
* a <code>new Builder</code>, whereas <code>testReadable</code> uses only the <code>ListBuffer</code> fixture, so it just intantiates
* a <code>new Buffer</code>. <code>testFriendly</code> needs just the <code>FileWriter</code> fixture, so it invokes
* <code>withWriter</code>, which prepares and passes a <code>FileWriter</code> to the test (and takes care of closing it afterwords).
* </p>
*
* <p>
* Two tests need multiple fixtures: <code>testClearAndConcise</code> needs both the <code>StringBuilder</code> and the
* <code>ListBuffer</code>, so it instantiates a class that mixes in both fixture traits with <code>new Builder with Buffer</code>.
* <code>testComposable</code> needs all three fixtures, so in addition to <code>new Builder with Buffer</code> it also invokes
* <code>withWriter</code>, wrapping just the of the test code that needs the fixture.
* </p>
*
* <p>
* Note that in this case, the loan pattern is being implemented via the <code>withWriter</code> method that takes a function, not
* by overriding <code>fixture.Suite</code>'s <code>withFixture(OneArgTest)</code> method. <code>fixture.Suite</code> makes the most sense
* if all (or at least most) tests need the same fixture, whereas in this <code>Suite</code> only two tests need the
* <code>FileWriter</code>.
* </p>
*
* <p>
* Note also that two test methods, <code>testFriendly</code> and <code>testComposable</code>, are declared as parameterless methods even
* though they have a side effect. In production code you would normally declare these as <em>empty-paren</em> methods, and call them with
* empty parentheses, to make it more obvious to readers of the code that they have a side effect. Whether or not a test method has
* a side effect, however, is a less important distinction than it is for methods in production code. Moreover, test methods are not
* normally invoked directly by client code, but rather through reflection by running the <code>Suite</code> that contains them, so a
* lack of parentheses on an invocation of a side-effecting test method would not normally appear in any client code. Given the empty
* parentheses do not add much value in the test methods case, the recommended style is to simply always leave them off.
* </p>
*
* <p>
* In the previous example, the <code>withWriter</code> method passed an object into
* the tests. Passing fixture objects into tests is generally a good idea when possible, but sometimes a side affect is unavoidable.
* For example, if you need to initialize a database running on a server across a network, your with-fixture
* method will likely have nothing to pass. In such cases, simply create a with-fixture method that takes a by-name parameter and
* performs setup and cleanup via side effects, like this:
* </p>
*
* <pre class="stHighlight">
* def withDataInDatabase(test: => Any) {
* // initialize the database across the network
* try {
* test // "loan" the initialized database to the test
* }
* finally {
* // clean up the database
* }
* }
* </pre>
*
* <p>
* You can then use it like:
* </p>
*
* <pre class="stHighlight">
* def testUserLogsIn {
* withDataInDatabase {
* // test user logging in scenario
* }
* }
* </pre>
*
* <a name="composingFixtures"></a><h2>Composing stackable fixture traits</h2>
*
* <p>
* In larger projects, teams often end up with several different fixtures that test classes need in different combinations,
* and possibly initialized (and cleaned up) in different orders. A good way to accomplish this in ScalaTest is to factor the individual
* fixtures into traits that can be composed using the <em>stackable trait</em> pattern. This can be done, for example, by placing
* <code>withFixture</code> methods in several traits, each of which call <code>super.withFixture</code>. Here's an example in
* which the <code>StringBuilder</code> and <code>ListBuffer[String]</code> fixtures used in the previous examples have been
* factored out into two <em>stackable fixture traits</em> named <code>Builder</code> and <code>Buffer</code>:
* </p>
*
* <pre class="stHighlight">
* import org.scalatest.Suite
* import org.scalatest.AbstractSuite
* import collection.mutable.ListBuffer
*
* trait Builder extends AbstractSuite { this: Suite =>
*
* val builder = new StringBuilder
*
* abstract override def withFixture(test: NoArgTest) {
* builder.append("ScalaTest is ")
* try {
* super.withFixture(test) // To be stackable, must call super.withFixture
* }
* finally {
* builder.clear()
* }
* }
* }
*
* trait Buffer extends AbstractSuite { this: Suite =>
*
* val buffer = new ListBuffer[String]
*
* abstract override def withFixture(test: NoArgTest) {
* try {
* super.withFixture(test) // To be stackable, must call super.withFixture
* }
* finally {
* buffer.clear()
* }
* }
* }
*
* class ExampleSuite extends Suite with Builder with Buffer {
*
* def testEasy {
* builder.append("easy!")
* assert(builder.toString === "ScalaTest is easy!")
* assert(buffer.isEmpty)
* buffer += "sweet"
* }
*
* def testFun {
* builder.append("fun!")
* assert(builder.toString === "ScalaTest is fun!")
* assert(buffer.isEmpty)
* buffer += "clear"
* }
* }
* </pre>
*
* <p>
* By mixing in both the <code>Builder</code> and <code>Buffer</code> traits, <code>ExampleSuite</code> gets both fixtures, which will be
* initialized before each test and cleaned up after. The order the traits are mixed together determines the order of execution.
* In this case, <code>Builder</code> is "super" to </code>Buffer</code>. If you wanted <code>Buffer</code> to be "super"
* to <code>Builder</code>, you need only switch the order you mix them together, like this:
* </p>
*
* <pre class="stHighlight">
* class Example2Suite extends Suite with Buffer with Builder
* </pre>
*
* <p>
* And if you only need one fixture you mix in only that trait:
* </p>
*
* <pre class="stHighlight">
* class Example3Suite extends Suite with Builder
* </pre>
*
* <p>
* Another way to create stackable fixture traits is by extending the <a href="BeforeAndAfterEach.html"><code>BeforeAndAfterEach</code></a>
* and/or <a href="BeforeAndAfterAll.html"><code>BeforeAndAfterAll</code></a> traits.
* <code>BeforeAndAfterEach</code> has a <code>beforeEach</code> method that will be run before each test (like JUnit's <code>setUp</code>),
* and an <code>afterEach</code> method that will be run after (like JUnit's <code>tearDown</code>).
* Similarly, <code>BeforeAndAfterAll</code> has a <code>beforeAll</code> method that will be run before all tests,
* and an <code>afterAll</code> method that will be run after all tests. Here's what the previously shown example would look like if it
* were rewritten to use the <code>BeforeAndAfterEach</code> methods instead of <code>withFixture</code>:
* </p>
*
* <pre class="stHighlight">
* import org.scalatest.Suite
* import org.scalatest.BeforeAndAfterEach
* import collection.mutable.ListBuffer
*
* trait Builder extends BeforeAndAfterEach { this: Suite =>
*
* val builder = new StringBuilder
*
* override def beforeEach() {
* builder.append("ScalaTest is ")
* super.beforeEach() // To be stackable, must call super.beforeEach
* }
*
* override def afterEach() {
* try {
* super.afterEach() // To be stackable, must call super.afterEach
* }
* finally {
* builder.clear()
* }
* }
* }
*
* trait Buffer extends BeforeAndAfterEach { this: Suite =>
*
* val buffer = new ListBuffer[String]
*
* override def afterEach() {
* try {
* super.afterEach() // To be stackable, must call super.afterEach
* }
* finally {
* buffer.clear()
* }
* }
* }
*
* class ExampleSuite extends Suite with Builder with Buffer {
*
* def testEasy {
* builder.append("easy!")
* assert(builder.toString === "ScalaTest is easy!")
* assert(buffer.isEmpty)
* buffer += "sweet"
* }
*
* def testFun {
* builder.append("fun!")
* assert(builder.toString === "ScalaTest is fun!")
* assert(buffer.isEmpty)
* buffer += "clear"
* }
* }
* </pre>
*
* <p>
* To get the same ordering as <code>withFixture</code>, place your <code>super.beforeEach</code> call at the end of each
* <code>beforeEach</code> method, and the <code>super.afterEach</code> call at the beginning of each <code>afterEach</code>
* method, as shown in the previous example. It is a good idea to invoke <code>super.afterEach</code> in a <code>try</code>
* block and perform cleanup in a <code>finally</code> clause, as shown in the previous example, because this ensures the
* cleanup code is performed even if <code>super.afterAll</code> throws an exception.
* </p>
*
* <p>
* One difference to bear in mind between the before-and-after traits and the <code>withFixture</code> methods, is that if
* a <code>withFixture</code> method completes abruptly with an exception, it is considered a failed test. By contrast, if any of the
* methods on the before-and-after traits (<em>i.e.</em>, <code>before</code> and <code>after</code> of <code>BeforeAndAfter</code>,
* <code>beforeEach</code> and <code>afterEach</code> of <code>BeforeAndAfterEach</code>,
* and <code>beforeAll</code> and <code>afterAll</code> of <code>BeforeAndAfterAll</code>) complete abruptly, it is considered a
* failed suite, which will result in a <a href="events/SuiteAborted.html"><code>SuiteAborted</code></a> event.
* </p>
*
* <a name="errorHandling"></a>
* <h2>Treatment of <code>java.lang.Error</code>s</h2>
*
* <p>
* The Javadoc documentation for <code>java.lang.Error</code> states:
* </p>
*
* <blockquote>
* An <code>Error</code> is a subclass of <code>Throwable</code> that indicates serious problems that a reasonable application should not try to catch. Most
* such errors are abnormal conditions.
* </blockquote>
*
* <p>
* Because <code>Error</code>s are used to denote serious errors, trait <code>Suite</code> and its subtypes in the ScalaTest API do not always treat a test
* that completes abruptly with an <code>Error</code> as a test failure, but sometimes as an indication that serious problems
* have arisen that should cause the run to abort. For example, if a test completes abruptly with an <code>OutOfMemoryError</code>,
* it will not be reported as a test failure, but will instead cause the run to abort. Because not everyone uses <code>Error</code>s only to represent serious
* problems, however, ScalaTest only behaves this way for the following exception types (and their subclasses):
* </p>
*
* <ul>
* <li><code>java.lang.annotation.AnnotationFormatError</code></li>
* <li><code>java.awt.AWTError</code></li>
* <li><code>java.nio.charset.CoderMalfunctionError</code></li>
* <li><code>javax.xml.parsers.FactoryConfigurationError</code></li>
* <li><code>java.lang.LinkageError</code></li>
* <li><code>java.lang.ThreadDeath</code></li>
* <li><code>javax.xml.transform.TransformerFactoryConfigurationError</code></li>
* <li><code>java.lang.VirtualMachineError</code></li>
* </ul>
*
* <p>
* The previous list includes all <code>Error</code>s that exist as part of Java 1.5 API, excluding <code>java.lang.AssertionError</code>. ScalaTest
* does treat a thrown <code>AssertionError</code> as an indication of a test failure. In addition, any other <code>Error</code> that is not an instance of a
* type mentioned in the previous list will be caught by the <code>Suite</code> traits in the ScalaTest API and reported as the cause of a test failure.
* </p>
*
* <p>
* Although trait <code>Suite</code> and all its subtypes in the ScalaTest API consistently behave this way with regard to <code>Error</code>s,
* this behavior is not required by the contract of <code>Suite</code>. Subclasses and subtraits that you define, for example, may treat all
* <code>Error</code>s as test failures, or indicate errors in some other way that has nothing to do with exceptions.
* </p>
*
* <h2>Extensibility</h2>
*
* <p>
* Trait <code>Suite</code> provides default implementations of its methods that should
* be sufficient for most applications, but many methods can be overridden when desired. Here's
* a summary of the methods that are intended to be overridden:
* </p>
*
* <ul>
* <li><code>run</code> - override this method to define custom ways to run suites of
* tests.</li>
* <li><code>runNestedSuites</code> - override this method to define custom ways to run nested suites.</li>
* <li><code>runTests</code> - override this method to define custom ways to run a suite's tests.</li>
* <li><code>runTest</code> - override this method to define custom ways to run a single named test.</li>
* <li><code>testNames</code> - override this method to specify the <code>Suite</code>'s test names in a custom way.</li>
* <li><code>tags</code> - override this method to specify the <code>Suite</code>'s test tags in a custom way.</li>
* <li><code>nestedSuites</code> - override this method to specify the <code>Suite</code>'s nested <code>Suite</code>s in a custom way.</li>
* <li><code>suiteName</code> - override this method to specify the <code>Suite</code>'s name in a custom way.</li>
* <li><code>expectedTestCount</code> - override this method to count this <code>Suite</code>'s expected tests in a custom way.</li>
* </ul>
*
* <p>
* For example, this trait's implementation of <code>testNames</code> performs reflection to discover methods starting with <code>test</code>,
* and places these in a <code>Set</code> whose iterator returns the names in alphabetical order. If you wish to run tests in a different
* order in a particular <code>Suite</code>, perhaps because a test named <code>testAlpha</code> can only succeed after a test named
* <code>testBeta</code> has run, you can override <code>testNames</code> so that it returns a <code>Set</code> whose iterator returns
* <code>testBeta</code> <em>before</em> <code>testAlpha</code>. (This trait's implementation of <code>run</code> will invoke tests
* in the order they come out of the <code>testNames</code> <code>Set</code> iterator.)
* </p>
*
* <p>
* Alternatively, you may not like starting your test methods with <code>test</code>, and prefer using <code>@Test</code> annotations in
* the style of Java's JUnit 4 or TestNG. If so, you can override <code>testNames</code> to discover tests using either of these two APIs
* <code>@Test</code> annotations, or one of your own invention. (This is in fact
* how <code>org.scalatest.junit.JUnitSuite</code> and <code>org.scalatest.testng.TestNGSuite</code> work.)
* </p>
*
* <p>
* Moreover, <em>test</em> in ScalaTest does not necessarily mean <em>test method</em>. A test can be anything that can be given a name,
* that starts and either succeeds or fails, and can be ignored. In <code>org.scalatest.FunSuite</code>, for example, tests are represented
* as function values. This
* approach might look foreign to JUnit users, but may feel more natural to programmers with a functional programming background.
* To facilitate this style of writing tests, <code>FunSuite</code> overrides <code>testNames</code>, <code>runTest</code>, and <code>run</code> such that you can
* define tests as function values.
* </p>
*
* <p>
* You can also model existing JUnit 3, JUnit 4, or TestNG tests as suites of tests, thereby incorporating tests written in Java into a ScalaTest suite.
* The "wrapper" classes in packages <code>org.scalatest.junit</code> and <code>org.scalatest.testng</code> exist to make this easy.
* No matter what legacy tests you may have, it is likely you can create or use an existing <code>Suite</code> subclass that allows you to model those tests
* as ScalaTest suites and tests and incorporate them into a ScalaTest suite. You can then write new tests in Scala and continue supporting
* older tests in Java.
* </p>
*
* @author Bill Venners
*/
@Style("org.scalatest.finders.MethodFinder")
trait Suite extends Assertions with AbstractSuite with Serializable { thisSuite =>
import Suite.TestMethodPrefix, Suite.InformerInParens, Suite.IgnoreAnnotation
/**
* A test function taking no arguments, which also provides a test name and config map.
*
* <p>
* <code>Suite</code>'s implementation of <code>runTest</code> passes instances of this trait
* to <code>withFixture</code> for every test method it executes. It invokes <code>withFixture</code>
* for every test, including test methods that take an <code>Informer</code>. For the latter case,
* the <code>Informer</code> to pass to the test method is already contained inside the
* <code>NoArgTest</code> instance passed to <code>withFixture</code>.
* </p>
*/
protected trait NoArgTest extends (() => Unit) {
/**
* The name of this test.
*/
def name: String
/**
* Runs the code of the test.
*/
def apply()
/**
* A <code>Map[String, Any]</code> containing objects that can be used
* to configure the fixture and test.
*/
def configMap: Map[String, Any]
}
/**
* A <code>List</code> of this <code>Suite</code> object's nested <code>Suite</code>s. If this <code>Suite</code> contains no nested <code>Suite</code>s,
* this method returns an empty <code>List</code>. This trait's implementation of this method returns an empty <code>List</code>.
*/
def nestedSuites: List[Suite] = Nil
/**
* Executes this <code>Suite</code>, printing results to the standard output.
*
* <p>
* This method implementation calls <code>run</code> on this <code>Suite</code>, passing in:
* </p>
*
* <ul>
* <li><code>testName</code> - <code>None</code></li>
* <li><code>reporter</code> - a reporter that prints to the standard output</li>
* <li><code>stopper</code> - a <code>Stopper</code> whose <code>apply</code> method always returns <code>false</code></li>
* <li><code>filter</code> - a <code>Filter</code> constructed with <code>None</code> for <code>tagsToInclude</code> and <code>Set()</code>
* for <code>tagsToExclude</code></li>
* <li><code>configMap</code> - an empty <code>Map[String, Any]</code></li>
* <li><code>distributor</code> - <code>None</code></li>
* <li><code>tracker</code> - a new <code>Tracker</code></li>
* </ul>
*
* <p>
* This method serves as a convenient way to execute a <code>Suite</code>, especially from
* within the Scala interpreter.
* </p>
*
* <p>
* Note: In ScalaTest, the terms "execute" and "run" basically mean the same thing and
* can be used interchangably. The reason this convenience method and its three overloaded forms
* aren't named <code>run</code>
* is because <code>junit.framework.TestCase</code> declares a <code>run</code> method
* that takes no arguments but returns a <code>junit.framework.TestResult</code>. That
* <code>run</code> method would not overload with this method if it were named <code>run</code>,
* because it would have the same parameters but a different return type than the one
* defined in <code>TestCase</code>. To facilitate integration with JUnit 3, therefore,
* these convenience "run" methods are named <code>execute</code>. In particular, this allows trait
* <code>org.scalatest.junit.JUnit3Suite</code> to extend both <code>org.scalatest.Suite</code> and
* <code>junit.framework.TestCase</code>, which enables the creating of classes that
* can be run with either ScalaTest or JUnit 3.
* </p>
*
final def execute() {
run(None, new StandardOutReporter, new Stopper {}, Filter(), Map(), None, new Tracker)
}
* Executes this <code>Suite</code> with the specified <code>configMap</code>, printing results to the standard output.
*
* <p>
* This method implementation calls <code>run</code> on this <code>Suite</code>, passing in:
* </p>
*
* <ul>
* <li><code>testName</code> - <code>None</code></li>
* <li><code>reporter</code> - a reporter that prints to the standard output</li>
* <li><code>stopper</code> - a <code>Stopper</code> whose <code>apply</code> method always returns <code>false</code></li>
* <li><code>filter</code> - a <code>Filter</code> constructed with <code>None</code> for <code>tagsToInclude</code> and <code>Set()</code>
* for <code>tagsToExclude</code></li>
* <li><code>configMap</code> - the specified <code>configMap</code> <code>Map[String, Any]</code></li>
* <li><code>distributor</code> - <code>None</code></li>
* <li><code>tracker</code> - a new <code>Tracker</code></li>
* </ul>
*
* <p>
* This method serves as a convenient way to execute a <code>Suite</code>, passing in some objects via the <code>configMap</code>, especially from within the Scala interpreter.
* </p>
*
* <p>
* Note: In ScalaTest, the terms "execute" and "run" basically mean the same thing and
* can be used interchangably. The reason this convenience method and its three overloaded forms
* aren't named <code>run</code> is described the documentation of the overloaded form that
* takes no parameters: <a href="#execute%28%29">execute()</a>.
* </p>
*
* @param configMap a <code>Map</code> of key-value pairs that can be used by the executing <code>Suite</code> of tests.
*
* @throws NullPointerException if the passed <code>configMap</code> parameter is <code>null</code>.
*
final def execute(configMap: Map[String, Any]) {
run(None, new StandardOutReporter, new Stopper {}, Filter(), configMap, None, new Tracker)
}
* Executes the test specified as <code>testName</code> in this <code>Suite</code>, printing results to the standard output.
*
* <p>
* This method implementation calls <code>run</code> on this <code>Suite</code>, passing in:
* </p>
*
* <ul>
* <li><code>testName</code> - <code>Some(testName)</code></li>
* <li><code>reporter</code> - a reporter that prints to the standard output</li>
* <li><code>stopper</code> - a <code>Stopper</code> whose <code>apply</code> method always returns <code>false</code></li>
* <li><code>filter</code> - a <code>Filter</code> constructed with <code>None</code> for <code>tagsToInclude</code> and <code>Set()</code>
* for <code>tagsToExclude</code></li>
* <li><code>configMap</code> - an empty <code>Map[String, Any]</code></li>
* <li><code>distributor</code> - <code>None</code></li>
* <li><code>tracker</code> - a new <code>Tracker</code></li>
* </ul>
*
* <p>
* This method serves as a convenient way to run a single test, especially from within the Scala interpreter.
* </p>
*
* <p>
* Note: In ScalaTest, the terms "execute" and "run" basically mean the same thing and
* can be used interchangably. The reason this convenience method and its three overloaded forms
* aren't named <code>run</code> is described the documentation of the overloaded form that
* takes no parameters: <a href="#execute%28%29">execute()</a>.
* </p>
*
* @param testName the name of one test to run.
*
* @throws NullPointerException if the passed <code>testName</code> parameter is <code>null</code>.
* @throws IllegalArgumentException if <code>testName</code> is defined, but no test with the specified test name
* exists in this <code>Suite</code>
*
final def execute(testName: String) {
run(Some(testName), new StandardOutReporter, new Stopper {}, Filter(), Map(), None, new Tracker)
}
*/
/**
* Executes one or more tests in this <code>Suite</code>, printing results to the standard output.
*
* <p>
* This method invokes <code>run</code> on itself, passing in values that can be configured via the parameters to this
* method, all of which have default values. This behavior is convenient when working with ScalaTest in the Scala interpreter.
* Here's a summary of this method's parameters and how you can use them:
* </p>
*
* <p>
* <strong>The <code>testName</code> parameter</strong>
* </p>
*
* <p>
* If you leave <code>testName</code> at its default value (of <code>null</code>), this method will pass <code>None</code> to
* the <code>testName</code> parameter of <code>run</code>, and as a result all the tests in this suite will be executed. If you
* specify a <code>testName</code>, this method will pass <code>Some(testName)</code> to <code>run</code>, and only that test
* will be run. Thus to run all tests in a suite from the Scala interpreter, you can write:
* </p>
*
* <pre class="stREPL">
* scala> (new ExampleSuite).execute()
* </pre>
*
* <p>
* To run just the test named <code>"my favorite test"</code> in a suite from the Scala interpreter, you would write:
* </p>
*
* <pre class="stREPL">
* scala> (new ExampleSuite).execute("my favorite test")
* </pre>
*
* <p>
* Or:
* </p>
*
* <pre class="stREPL">
* scala> (new ExampleSuite).execute(testName = "my favorite test")
* </pre>
*
* <p>
* <strong>The <code>configMap</code> parameter</strong>
* </p>
*
* <p>
* If you provide a value for the <code>configMap</code> parameter, this method will pass it to <code>run</code>. If not, the default value
* of an empty <code>Map</code> will be passed. For more information on how to use a config map to configure your test suites, see
* the <a href="#configMapSection">config map section</a> in the main documentation for this trait. Here's an example in which you configure
* a run with the name of an input file:
* </p>
*
* <pre class="stREPL">
* scala> (new ExampleSuite).execute(configMap = Map("inputFileName" -> "in.txt")
* </pre>
*
* <p>
* <strong>The <code>color</code> parameter</strong>
* </p>
*
* <p>
* If you leave the <code>color</code> parameter unspecified, this method will configure the reporter it passes to <code>run</code> to print
* to the standard output in color (via ansi escape characters). If you don't want color output, specify false for <code>color</code>, like this:
* </p>
*
* <pre class="stREPL">
* scala> (new ExampleSuite).execute(color = false)
* </pre>
*
* <p>
* <strong>The <code>durations</code> parameter</strong>
* </p>
*
* <p>
* If you leave the <code>durations</code> parameter unspecified, this method will configure the reporter it passes to <code>run</code> to
* <em>not</em> print durations for tests and suites to the standard output. If you want durations printed, specify true for <code>durations</code>,
* like this:
* </p>
*
* <pre class="stREPL">
* scala> (new ExampleSuite).execute(durations = true)
* </pre>
*
* <p>
* <strong>The <code>shortstacks</code> and <code>fullstacks</code> parameters</strong>
* </p>
*
* <p>
* If you leave both the <code>shortstacks</code> and <code>fullstacks</code> parameters unspecified, this method will configure the reporter
* it passes to <code>run</code> to <em>not</em> print stack traces for failed tests if it has a stack depth that identifies the offending
* line of test code. If you prefer a short stack trace (10 to 15 stack frames) to be printed with any test failure, specify true for
* <code>shortstacks</code>:
* </p>
*
* <pre class="stREPL">
* scala> (new ExampleSuite).execute(shortstacks = true)
* </pre>
*
* <p>
* For full stack traces, set <code>fullstacks</code> to true:
* </p>
*
* <pre class="stREPL">
* scala> (new ExampleSuite).execute(fullstacks = true)
* </pre>
*
* <p>
* If you specify true for both <code>shortstacks</code> and <code>fullstacks</code>, you'll get full stack traces.
* </p>
*
* <p>
* <strong>The <code>stats</code> parameter</strong>
* </p>
*
* <p>
* If you leave the <code>stats</code> parameter unspecified, this method will <em>not</em> fire <code>RunStarting</code> and either <code>RunCompleted</code>
* or <code>RunAborted</code> events to the reporter it passes to <code>run</code>.
* If you specify true for <code>stats</code>, this method will fire the run events to the reporter, and the reporter will print the
* expected test count before the run, and various statistics after, including the number of suites completed and number of tests that
* succeeded, failed, were ignored or marked pending. Here's how you get the stats:
* </p>
*
* <pre class="stREPL">
* scala> (new ExampleSuite).execute(stats = true)
* </pre>
*
*
* <p>
* To summarize, this method will pass to <code>run</code>:
* </p>
* <ul>
* <li><code>testName</code> - <code>None</code> if this method's <code>testName</code> parameter is left at its default value of <code>null</code>, else <code>Some(testName)</code>.
* <li><code>reporter</code> - a reporter that prints to the standard output</li>
* <li><code>stopper</code> - a <code>Stopper</code> whose <code>apply</code> method always returns <code>false</code></li>
* <li><code>filter</code> - a <code>Filter</code> constructed with <code>None</code> for <code>tagsToInclude</code> and <code>Set()</code>
* for <code>tagsToExclude</code></li>
* <li><code>configMap</code> - the <code>configMap</code> passed to this method</li>
* <li><code>distributor</code> - <code>None</code></li>
* <li><code>tracker</code> - a new <code>Tracker</code></li>
* </ul>
*
* <p>
* Note: In ScalaTest, the terms "execute" and "run" basically mean the same thing and
* can be used interchangably. The reason this method isn't named <code>run</code> is that it takes advantage of
* default arguments, and you can't mix overloaded methods and default arguments in Scala. (If named <code>run</code>,
* this method would have the same name but different arguments than the main <a href="#run"><code>run</code> method</a> that
* takes seven arguments. Thus it would overload and couldn't be used with default argument values.)
* </p>
*
* <p>
* Design note: This method has two "features" that may seem unidiomatic. First, the default value of <code>testName</code> is <code>null</code>.
* Normally in Scala the type of <code>testName</code> would be <code>Option[String]</code> and the default value would
* be <code>None</code>, as it is in this trait's <code>run</code> method. The <code>null</code> value is used here for two reasons. First, in
* ScalaTest 1.5, <code>execute</code> was changed from four overloaded methods to one method with default values, taking advantage of
* the default and named parameters feature introduced in Scala 2.8.
* To not break existing source code, <code>testName</code> needed to have type <code>String</code>, as it did in two of the overloaded
* <code>execute</code> methods prior to 1.5. The other reason is that <code>execute</code> has always been designed to be called primarily
* from an interpeter environment, such as the Scala REPL (Read-Evaluate-Print-Loop). In an interpreter environment, minimizing keystrokes is king.
* A <code>String</code> type with a <code>null</code> default value lets users type <code>suite.execute("my test name")</code> rather than
* <code>suite.execute(Some("my test name"))</code>, saving several keystrokes.
* </p>
*
* <p>
* The second non-idiomatic feature is that <code>shortstacks</code> and <code>fullstacks</code> are all lower case rather than
* camel case. This is done to be consistent with the <a href="Shell.html"><code>Shell</code></a>, which also uses those forms. The reason
* lower case is used in the <code>Shell</code> is to save keystrokes in an interpreter environment. Most Unix commands, for
* example, are all lower case, making them easier and quicker to type. In the ScalaTest
* <code>Shell</code>, methods like <code>shortstacks</code>, <code>fullstacks</code>, and <code>nostats</code>, <em>etc.</em>, are
* designed to be all lower case so they feel more like shell commands than methods.
* </p>
*
* @param testName the name of one test to run.
* @param configMap a <code>Map</code> of key-value pairs that can be used by the executing <code>Suite</code> of tests.
* @param color a boolean that configures whether output is printed in color
* @param durations a boolean that configures whether test and suite durations are printed to the standard output
* @param shortstacks a boolean that configures whether short stack traces should be printed for test failures
* @param fullstacks a boolean that configures whether full stack traces should be printed for test failures
* @param stats a boolean that configures whether test and suite statistics are printed to the standard output
*
* @throws NullPointerException if the passed <code>configMap</code> parameter is <code>null</code>.
* @throws IllegalArgumentException if <code>testName</code> is defined, but no test with the specified test name
* exists in this <code>Suite</code>
*/
final def execute(
testName: String = null,
configMap: Map[String, Any] = Map(),
color: Boolean = true,
durations: Boolean = false,
shortstacks: Boolean = false,
fullstacks: Boolean = false,
stats: Boolean = false
) {
if (configMap == null)
throw new NullPointerException("configMap was null")
if (testName != null && !testNames.contains(testName))
throw new IllegalArgumentException(Resources("testNotFound", testName))
val dispatch = new DispatchReporter(List(new StandardOutReporter(durations, color, shortstacks, fullstacks)))
val tracker = new Tracker
val filter = Filter()
val runStartTime = System.currentTimeMillis
if (stats)
dispatch(RunStarting(tracker.nextOrdinal(), expectedTestCount(filter), configMap))
val suiteStartTime = System.currentTimeMillis
def dispatchSuiteAborted(e: Throwable) {
val rawString = Resources("runOnSuiteException")
val formatter = formatterForSuiteAborted(thisSuite, rawString)
val duration = System.currentTimeMillis - suiteStartTime
dispatch(SuiteAborted(tracker.nextOrdinal(), rawString, thisSuite.suiteName, thisSuite.suiteId, Some(thisSuite.getClass.getName), thisSuite.decodedSuiteName, Some(e), Some(duration), formatter, Some(SeeStackDepthException)))
}
try {
val formatter = formatterForSuiteStarting(thisSuite)
dispatch(SuiteStarting(tracker.nextOrdinal(), thisSuite.suiteName, thisSuite.suiteId, thisSuite.decodedSuiteName, Some(thisSuite.getClass.getName), formatter, Some(getTopOfClass)))
run(
//if (testName != null) Some(testName) else None,
Option(testName),
dispatch,
new Stopper {},
filter,
configMap,
None,
tracker
)
val suiteCompletedFormatter = formatterForSuiteCompleted(thisSuite)
val duration = System.currentTimeMillis - suiteStartTime
dispatch(SuiteCompleted(tracker.nextOrdinal(), thisSuite.suiteName, thisSuite.suiteId, Some(thisSuite.getClass.getName), thisSuite.decodedSuiteName, Some(duration), suiteCompletedFormatter, Some(getTopOfClass)))
if (stats) {
val duration = System.currentTimeMillis - runStartTime
dispatch(RunCompleted(tracker.nextOrdinal(), Some(duration)))
}
}
catch {
case e: InstantiationException =>
dispatchSuiteAborted(e)
dispatch(RunAborted(tracker.nextOrdinal(), Resources("cannotInstantiateSuite", e.getMessage), Some(e), Some(System.currentTimeMillis - runStartTime)))
case e: IllegalAccessException =>
dispatchSuiteAborted(e)
dispatch(RunAborted(tracker.nextOrdinal(), Resources("cannotInstantiateSuite", e.getMessage), Some(e), Some(System.currentTimeMillis - runStartTime)))
case e: NoClassDefFoundError =>
dispatchSuiteAborted(e)
dispatch(RunAborted(tracker.nextOrdinal(), Resources("cannotLoadClass", e.getMessage), Some(e), Some(System.currentTimeMillis - runStartTime)))
case e: Throwable =>
dispatchSuiteAborted(e)
dispatch(RunAborted(tracker.nextOrdinal(), Resources.bigProblems(e), Some(e), Some(System.currentTimeMillis - runStartTime)))
}
finally {
dispatch.dispatchDisposeAndWaitUntilDone()
}
}
/**
* This method has been deprecated and will be removed in a future version of ScalaTest.
*
* <p>
* Please use <code>testTags</code> instead. Subclasses overriding <code>tags</code> in earlier versions
* of ScalaTest must override <code>testTags</code> instead.
* </p>
*
* <p>
* This method implementation simplye invokes <code>testTags</code> and returns its result.
* </p>
*/
@deprecated("Please use testTags instead")
final def tags: Map[String, Set[String]] = {
testTags
}
/**
* A <code>Map</code> whose keys are <code>String</code> tag names with which tests in this <code>Suite</code> are marked, and
* whose values are the <code>Set</code> of test names marked with each tag. If this <code>Suite</code> contains no tags, this
* method returns an empty <code>Map</code>.
*
* <p>
* This trait's implementation of this method uses Java reflection to discover any Java annotations attached to its test methods. The
* fully qualified name of each unique annotation that extends <code>TagAnnotation</code> is considered a tag. This trait's
* implementation of this method, therefore, places one key/value pair into to the
* <code>Map</code> for each unique tag annotation name discovered through reflection. The mapped value for each tag name key will contain
* the test method name, as provided via the <code>testNames</code> method.
* </p>
*
* <p>
* Subclasses may override this method to define and/or discover tags in a custom manner, but overriding method implementations
* should never return an empty <code>Set</code> as a value. If a tag has no tests, its name should not appear as a key in the
* returned <code>Map</code>.
* </p>
*/
def testTags: Map[String, Set[String]] = {
def getTags(testName: String) =
for {
a <- getMethodForTestName(testName).getDeclaredAnnotations
annotationClass = a.annotationType
if annotationClass.isAnnotationPresent(classOf[TagAnnotation])
} yield annotationClass.getName
val elements =
for (testName <- testNames; if !getTags(testName).isEmpty)
yield testName -> (Set() ++ getTags(testName))
Map() ++ elements
}
/**
* A <code>Set</code> of tags with which this <code>Suite</code> is marked. If this <code>Suite</code> is marked with no tags, this
* method returns an empty <code>Set</code>.
*
* <p>
* This trait's implementation of this method uses Java reflection to discover any Java annotations attached to its class. The
* fully qualified name of each unique annotation that extends <code>TagAnnotation</code> is considered a tag. This trait's
* implementation of this method, therefore, places one element into to the
* <code>Set</code> for each unique tag annotation name discovered through reflection.
* </p>
*
* <p>
* Subclasses may override this method to define and/or discover tags in a custom manner.
* </p>
*/
def suiteTags: Set[String] = {
val elements = for {
a <- getClass.getDeclaredAnnotations
annotationClass = a.annotationType
if annotationClass.isAnnotationPresent(classOf[TagAnnotation])
} yield annotationClass.getName
Set() ++ elements
}
/**
* A <code>Set</code> of test names. If this <code>Suite</code> contains no tests, this method returns an empty <code>Set</code>.
*
* <p>
* This trait's implementation of this method uses Java reflection to discover all public methods whose name starts with <code>"test"</code>,
* which take either nothing or a single <code>Informer</code> as parameters. For each discovered test method, it assigns a test name
* comprised of just the method name if the method takes no parameters, or the method name plus <code>(Informer)</code> if the
* method takes a <code>Informer</code>. Here are a few method signatures and the names that this trait's implementation assigns them:
* </p>
*
* <pre class="stHighlight">
* def testCat() {} // test name: "testCat"
* def testCat(Informer) {} // test name: "testCat(Informer)"
* def testDog() {} // test name: "testDog"
* def testDog(Informer) {} // test name: "testDog(Informer)"
* def test() {} // test name: "test"
* def test(Informer) {} // test name: "test(Informer)"
* </pre>
*
* <p>
* This trait's implementation of this method returns an immutable <code>Set</code> of all such names, excluding the name
* <code>testNames</code>. The iterator obtained by invoking <code>elements</code> on this
* returned <code>Set</code> will produce the test names in their <em>natural order</em>, as determined by <code>String</code>'s
* <code>compareTo</code> method.
* </p>
*
* <p>
* This trait's implementation of <code>runTests</code> invokes this method
* and calls <code>runTest</code> for each test name in the order they appear in the returned <code>Set</code>'s iterator.
* Although this trait's implementation of this method returns a <code>Set</code> whose iterator produces <code>String</code>
* test names in a well-defined order, the contract of this method does not required a defined order. Subclasses are free to
* override this method and return test names in an undefined order, or in a defined order that's different from <code>String</code>'s
* natural order.
* </p>
*
* <p>
* Subclasses may override this method to produce test names in a custom manner. One potential reason to override <code>testNames</code> is
* to run tests in a different order, for example, to ensure that tests that depend on other tests are run after those other tests.
* Another potential reason to override is allow tests to be defined in a different manner, such as methods annotated <code>@Test</code> annotations
* (as is done in <code>JUnitSuite</code> and <code>TestNGSuite</code>) or test functions registered during construction (as is
* done in <code>FunSuite</code> and <code>FunSpec</code>).
* </p>
*/
def testNames: Set[String] = {
def isTestMethod(m: Method) = {
// Factored out to share code with fixture.Suite.testNames
val (isInstanceMethod, simpleName, firstFour, paramTypes, hasNoParams, isTestNames, isTestTags) = isTestMethodGoodies(m)
isInstanceMethod && (firstFour == "test") && ((hasNoParams && !isTestNames && !isTestTags) || takesInformer(m))
}
val testNameArray =
for (m <- getClass.getMethods; if isTestMethod(m))
yield if (takesInformer(m)) m.getName + InformerInParens else m.getName
TreeSet[String]() ++ testNameArray
}
private[scalatest] def getMethodForTestName(testName: String) =
try {
getClass.getMethod(
simpleNameForTest(testName),
(if (testMethodTakesAnInformer(testName)) Array(classOf[Informer]) else new Array[Class[_]](0)): _*
)
}
catch {
case e: NoSuchMethodException =>
throw new IllegalArgumentException(Resources("testNotFound", testName))
case e =>
throw e
}
/**
* Run the passed test function in the context of a fixture established by this method.
*
* <p>
* This method should set up the fixture needed by the tests of the
* current suite, invoke the test function, and if needed, perform any clean
* up needed after the test completes. Because the <code>NoArgTest</code> function
* passed to this method takes no parameters, preparing the fixture will require
* side effects, such as reassigning instance <code>var</code>s in this <code>Suite</code> or initializing
* a globally accessible external database. If you want to avoid reassigning instance <code>var</code>s
* you can use <a href="fixture/Suite.html">fixture.Suite</a>.
* </p>
*
* <p>
* This trait's implementation of <code>runTest</code> invokes this method for each test, passing
* in a <code>NoArgTest</code> whose <code>apply</code> method will execute the code of the test.
* </p>
*
* <p>
* This trait's implementation of this method simply invokes the passed <code>NoArgTest</code> function.
* </p>
*
* @param test the no-arg test function to run with a fixture
*/
protected def withFixture(test: NoArgTest) {
test()
}
// Factored out to share this with fixture.Suite.runTest
private[scalatest] def getSuiteRunTestGoodies(stopper: Stopper, reporter: Reporter, testName: String) = {
val (stopRequested, report, hasPublicNoArgConstructor, rerunnable, testStartTime) = getRunTestGoodies(stopper, reporter, testName)
val method = getMethodForTestName(testName)
(stopRequested, report, method, hasPublicNoArgConstructor, rerunnable, testStartTime)
}
// Sharing this with FunSuite and fixture.FunSuite as well as Suite and fixture.Suite
private[scalatest] def getRunTestGoodies(stopper: Stopper, reporter: Reporter, testName: String) = {
val stopRequested = stopper
val report = wrapReporterIfNecessary(reporter)
// Create a Rerunner if the Suite has a no-arg constructor
val hasPublicNoArgConstructor = checkForPublicNoArgConstructor(getClass)
val rerunnable =
if (hasPublicNoArgConstructor)
Some(new TestRerunner(getClass.getName, testName))
else
None
val testStartTime = System.currentTimeMillis
(stopRequested, report, hasPublicNoArgConstructor, rerunnable, testStartTime)
}
/**
* Run a test.
*
* <p>
* This trait's implementation uses Java reflection to invoke on this object the test method identified by the passed <code>testName</code>.
* </p>
*
* <p>
* Implementations of this method are responsible for ensuring a <code>TestStarting</code> event
* is fired to the <code>Reporter</code> before executing any test, and either <code>TestSucceeded</code>,
* <code>TestFailed</code>, or <code>TestPending</code> after executing any nested
* <code>Suite</code>. (If a test is marked with the <code>org.scalatest.Ignore</code> tag, the
* <code>runTests</code> method is responsible for ensuring a <code>TestIgnored</code> event is fired and that
* this <code>runTest</code> method is not invoked for that ignored test.)
* </p>
*
* @param testName the name of one test to run.
* @param reporter the <code>Reporter</code> to which results will be reported
* @param stopper the <code>Stopper</code> that will be consulted to determine whether to stop execution early.
* @param configMap a <code>Map</code> of key-value pairs that can be used by the executing <code>Suite</code> of tests.
* @param tracker a <code>Tracker</code> tracking <code>Ordinal</code>s being fired by the current thread.
* @throws NullPointerException if any of <code>testName</code>, <code>reporter</code>, <code>stopper</code>, <code>configMap</code>
* or <code>tracker</code> is <code>null</code>.
* @throws IllegalArgumentException if <code>testName</code> is defined, but no test with the specified test name
* exists in this <code>Suite</code>
*/
protected def runTest(testName: String, reporter: Reporter, stopper: Stopper, configMap: Map[String, Any], tracker: Tracker) {
checkRunTestParamsForNull(testName, reporter, stopper, configMap, tracker)
val (stopRequested, report, method, hasPublicNoArgConstructor, rerunnable, testStartTime) =
getSuiteRunTestGoodies(stopper, reporter, testName)
reportTestStarting(this, report, tracker, testName, testName, getDecodedName(testName), rerunnable, Some(getTopOfMethod(testName)))
val formatter = getIndentedText(testName, 1, true)
val messageRecorderForThisTest = new MessageRecorder
val informerForThisTest =
MessageRecordingInformer(
messageRecorderForThisTest,
(message, isConstructingThread, testWasPending, testWasCanceled, location) => reportInfoProvided(thisSuite, report, tracker, Some(testName), message, 2, location, isConstructingThread, true, Some(testWasPending), Some(testWasCanceled))
)
val documenterForThisTest =
MessageRecordingDocumenter(
messageRecorderForThisTest,
(message, isConstructingThread, testWasPending, testWasCanceled, location) => reportInfoProvided(thisSuite, report, tracker, Some(testName), message, 2, location, isConstructingThread, true, Some(testWasPending)) // TODO: Need a test that fails because testWasCanceleed isn't being passed
)
val args: Array[Object] =
if (testMethodTakesAnInformer(testName)) {
Array(informerForThisTest)
}
else Array()
var testWasPending = false
var testWasCanceled = false
try {
val theConfigMap = configMap
withFixture(
new NoArgTest {
def name = testName
def apply() { method.invoke(thisSuite, args: _*) }
def configMap = theConfigMap
}
)
val duration = System.currentTimeMillis - testStartTime
reportTestSucceeded(this, report, tracker, testName, testName, getDecodedName(testName), duration, formatter, rerunnable, Some(getTopOfMethod(method)))
}
catch {
case ite: InvocationTargetException =>
val t = ite.getTargetException
t match {
case _: TestPendingException =>
val duration = System.currentTimeMillis - testStartTime
reportTestPending(this, report, tracker, testName, testName, getDecodedName(testName), duration, formatter, Some(getTopOfMethod(method)))
testWasPending = true // Set so info's printed out in the finally clause show up yellow
case e: TestCanceledException =>
val duration = System.currentTimeMillis - testStartTime
val message = getMessageForException(e)
val formatter = getIndentedText(testName, 1, true)
report(TestCanceled(tracker.nextOrdinal(), message, thisSuite.suiteName, thisSuite.suiteId, Some(thisSuite.getClass.getName), thisSuite.decodedSuiteName,
testName, testName, getDecodedName(testName), Some(e), Some(duration), Some(formatter), Some(TopOfMethod(thisSuite.getClass.getName, method.toGenericString())), rerunnable))
testWasCanceled = true // Set so info's printed out in the finally clause show up yellow
case e if !anErrorThatShouldCauseAnAbort(e) =>
val duration = System.currentTimeMillis - testStartTime
handleFailedTest(t, hasPublicNoArgConstructor, testName, rerunnable, report, tracker, duration)
case e => throw e
}
case e if !anErrorThatShouldCauseAnAbort(e) =>
val duration = System.currentTimeMillis - testStartTime
handleFailedTest(e, hasPublicNoArgConstructor, testName, rerunnable, report, tracker, duration)
case e => throw e
}
finally {
messageRecorderForThisTest.fireRecordedMessages(testWasPending, testWasCanceled)
}
}
/**
* Run zero to many of this <code>Suite</code>'s tests.
*
* <p>
* This method takes a <code>testName</code> parameter that optionally specifies a test to invoke.
* If <code>testName</code> is defined, this trait's implementation of this method
* invokes <code>runTest</code> on this object, passing in:
* </p>
*
* <ul>
* <li><code>testName</code> - the <code>String</code> value of the <code>testName</code> <code>Option</code> passed
* to this method</li>
* <li><code>reporter</code> - the <code>Reporter</code> passed to this method, or one that wraps and delegates to it</li>
* <li><code>stopper</code> - the <code>Stopper</code> passed to this method, or one that wraps and delegates to it</li>
* <li><code>configMap</code> - the <code>configMap</code> <code>Map</code> passed to this method, or one that wraps and delegates to it</li>
* </ul>
*
* <p>
* This method takes a <code>Filter</code>, which encapsulates an optional <code>Set</code> of tag names that should be included
* (<code>tagsToInclude</code>) and a <code>Set</code> that should be excluded (<code>tagsToExclude</code>), when deciding which
* of this <code>Suite</code>'s tests to run.
* If <code>tagsToInclude</code> is <code>None</code>, all tests will be run
* except those those belonging to tags listed in the <code>tagsToExclude</code> <code>Set</code>. If <code>tagsToInclude</code> is defined, only tests
* belonging to tags mentioned in the <code>tagsToInclude</code> <code>Set</code>, and not mentioned in the <code>tagsToExclude</code <code>Set</code>
* will be run. However, if <code>testName</code> is defined, <code>tagsToInclude</code> and <code>tagsToExclude</code> are essentially ignored.
* Only if <code>testName</code> is <code>None</code> will <code>tagsToInclude</code> and <code>tagsToExclude</code> be consulted to
* determine which of the tests named in the <code>testNames</code> <code>Set</code> should be run. This trait's implementation
* behaves this way, and it is part of the general contract of this method, so all overridden forms of this method should behave
* this way as well. For more information on test tags, see the main documentation for this trait and for class <a href="Filter"><code>Filter</code></a>.
* Note that this means that even if a test is marked as ignored, for example a test method in a <code>Suite</code> annotated with
* <code>org.scalatest.Ignore</code>, if that test name is passed as <code>testName</code> to <code>runTest</code>, it will be invoked
* despite the <code>Ignore</code> annotation.
* </p>
*
* <p>
* If <code>testName</code> is <code>None</code>, this trait's implementation of this method
* invokes <code>testNames</code> on this <code>Suite</code> to get a <code>Set</code> of names of tests to potentially run.
* (A <code>testNames</code> value of <code>None</code> essentially acts as a wildcard that means all tests in
* this <code>Suite</code> that are selected by <code>tagsToInclude</code> and <code>tagsToExclude</code> should be run.)
* For each test in the <code>testName</code> <code>Set</code>, in the order
* they appear in the iterator obtained by invoking the <code>elements</code> method on the <code>Set</code>, this trait's implementation
* of this method checks whether the test should be run based on the <code>Filter</code>.
* If so, this implementation invokes <code>runTest</code>, passing in:
* </p>
*
* <ul>
* <li><code>testName</code> - the <code>String</code> name of the test to run (which will be one of the names in the <code>testNames</code> <code>Set</code>)</li>
* <li><code>reporter</code> - the <code>Reporter</code> passed to this method, or one that wraps and delegates to it</li>
* <li><code>stopper</code> - the <code>Stopper</code> passed to this method, or one that wraps and delegates to it</li>
* <li><code>configMap</code> - the <code>configMap</code> passed to this method, or one that wraps and delegates to it</li>
* </ul>
*
* <p>
* If a test is marked with the <code>org.scalatest.Ignore</code> tag, implementations
* of this method are responsible for ensuring a <code>TestIgnored</code> event is fired for that test
* and that <code>runTest</code> is not called for that test.
* </p>
*
* @param testName an optional name of one test to run. If <code>None</code>, all relevant tests should be run.
* I.e., <code>None</code> acts like a wildcard that means run all relevant tests in this <code>Suite</code>.
* @param reporter the <code>Reporter</code> to which results will be reported
* @param stopper the <code>Stopper</code> that will be consulted to determine whether to stop execution early.
* @param filter a <code>Filter</code> with which to filter tests based on their tags
* @param configMap a <code>Map</code> of key-value pairs that can be used by the executing <code>Suite</code> of tests.
* @param distributor an optional <code>Distributor</code>, into which to put nested <code>Suite</code>s to be run
* by another entity, such as concurrently by a pool of threads. If <code>None</code>, nested <code>Suite</code>s will be run sequentially.
* @param tracker a <code>Tracker</code> tracking <code>Ordinal</code>s being fired by the current thread.
* @throws NullPointerException if any of the passed parameters is <code>null</code>.
* @throws IllegalArgumentException if <code>testName</code> is defined, but no test with the specified test name
* exists in this <code>Suite</code>
*/
protected def runTests(testName: Option[String], reporter: Reporter, stopper: Stopper, filter: Filter,
configMap: Map[String, Any], distributor: Option[Distributor], tracker: Tracker) {
if (testName == null)
throw new NullPointerException("testName was null")
if (reporter == null)
throw new NullPointerException("reporter was null")
if (stopper == null)
throw new NullPointerException("stopper was null")
if (filter == null)
throw new NullPointerException("filter was null")
if (configMap == null)
throw new NullPointerException("configMap was null")
if (distributor == null)
throw new NullPointerException("distributor was null")
if (tracker == null)
throw new NullPointerException("tracker was null")
val stopRequested = stopper
// Wrap any non-DispatchReporter, non-CatchReporter in a CatchReporter,
// so that exceptions are caught and transformed
// into error messages on the standard error stream.
val report = wrapReporterIfNecessary(reporter)
// If a testName is passed to run, just run that, else run the tests returned
// by testNames.
testName match {
case Some(tn) =>
val (filterTest, ignoreTest) = filter(tn, testTags)
if (!filterTest) {
if (ignoreTest)
reportTestIgnored(thisSuite, report, tracker, tn, tn, getDecodedName(tn), 1, Some(getTopOfMethod(tn)))
else
runTest(tn, report, stopRequested, configMap, tracker)
}
case None =>
for ((tn, ignoreTest) <- filter(testNames, testTags)) {
if (!stopRequested()) {
if (ignoreTest)
reportTestIgnored(thisSuite, report, tracker, tn, tn, getDecodedName(tn), 1, Some(getTopOfMethod(tn)))
else
runTest(tn, report, stopRequested, configMap, tracker)
}
}
}
}
/**
* Runs this suite of tests.
*
* <p>If <code>testName</code> is <code>None</code>, this trait's implementation of this method
* calls these two methods on this object in this order:</p>
*
* <ol>
* <li><code>runNestedSuites(report, stopper, tagsToInclude, tagsToExclude, configMap, distributor)</code></li>
* <li><code>runTests(testName, report, stopper, tagsToInclude, tagsToExclude, configMap)</code></li>
* </ol>
*
* <p>
* If <code>testName</code> is defined, then this trait's implementation of this method
* calls <code>runTests</code>, but does not call <code>runNestedSuites</code>. This behavior
* is part of the contract of this method. Subclasses that override <code>run</code> must take
* care not to call <code>runNestedSuites</code> if <code>testName</code> is defined. (The
* <code>OneInstancePerTest</code> trait depends on this behavior, for example.)
* </p>
*
* <p>
* Subclasses and subtraits that override this <code>run</code> method can implement them without
* invoking either the <code>runTests</code> or <code>runNestedSuites</code> methods, which
* are invoked by this trait's implementation of this method. It is recommended, but not required,
* that subclasses and subtraits that override <code>run</code> in a way that does not
* invoke <code>runNestedSuites</code> also override <code>runNestedSuites</code> and make it
* final. Similarly it is recommended, but not required,
* that subclasses and subtraits that override <code>run</code> in a way that does not
* invoke <code>runTests</code> also override <code>runTests</code> (and <code>runTest</code>,
* which this trait's implementation of <code>runTests</code> calls) and make it
* final. The implementation of these final methods can either invoke the superclass implementation
* of the method, or throw an <code>UnsupportedOperationException</code> if appropriate. The
* reason for this recommendation is that ScalaTest includes several traits that override
* these methods to allow behavior to be mixed into a <code>Suite</code>. For example, trait
* <code>BeforeAndAfterEach</code> overrides <code>runTests</code>s. In a <code>Suite</code>
* subclass that no longer invokes <code>runTests</code> from <code>run</code>, the
* <code>BeforeAndAfterEach</code> trait is not applicable. Mixing it in would have no effect.
* By making <code>runTests</code> final in such a <code>Suite</code> subtrait, you make
* the attempt to mix <code>BeforeAndAfterEach</code> into a subclass of your subtrait
* a compiler error. (It would fail to compile with a complaint that <code>BeforeAndAfterEach</code>
* is trying to override <code>runTests</code>, which is a final method in your trait.)
* </p>
*
* @param testName an optional name of one test to run. If <code>None</code>, all relevant tests should be run.
* I.e., <code>None</code> acts like a wildcard that means run all relevant tests in this <code>Suite</code>.
* @param reporter the <code>Reporter</code> to which results will be reported
* @param stopper the <code>Stopper</code> that will be consulted to determine whether to stop execution early.
* @param filter a <code>Filter</code> with which to filter tests based on their tags
* @param configMap a <code>Map</code> of key-value pairs that can be used by the executing <code>Suite</code> of tests.
* @param distributor an optional <code>Distributor</code>, into which to put nested <code>Suite</code>s to be run
* by another entity, such as concurrently by a pool of threads. If <code>None</code>, nested <code>Suite</code>s will be run sequentially.
* @param tracker a <code>Tracker</code> tracking <code>Ordinal</code>s being fired by the current thread.
*
* @throws NullPointerException if any passed parameter is <code>null</code>.
* @throws IllegalArgumentException if <code>testName</code> is defined, but no test with the specified test name
* exists in this <code>Suite</code>
*/
def run(testName: Option[String], reporter: Reporter, stopper: Stopper, filter: Filter,
configMap: Map[String, Any], distributor: Option[Distributor], tracker: Tracker) {
if (testName == null)
throw new NullPointerException("testName was null")
if (reporter == null)
throw new NullPointerException("reporter was null")
if (stopper == null)
throw new NullPointerException("stopper was null")
if (filter == null)
throw new NullPointerException("filter was null")
if (configMap == null)
throw new NullPointerException("configMap was null")
if (distributor == null)
throw new NullPointerException("distributor was null")
if (tracker == null)
throw new NullPointerException("tracker was null")
val stopRequested = stopper
val report = wrapReporterIfNecessary(reporter)
testName match {
case None => runNestedSuites(report, stopRequested, filter, configMap, distributor, tracker)
case Some(_) =>
}
runTests(testName, report, stopRequested, filter, configMap, distributor, tracker)
if (stopRequested()) {
val rawString = Resources("executeStopping")
report(InfoProvided(tracker.nextOrdinal(), rawString, Some(NameInfo(thisSuite.suiteName, thisSuite.suiteId, Some(thisSuite.getClass.getName), thisSuite.decodedSuiteName,
testName match {
case Some(name) => Some(TestNameInfo(name, getDecodedName(name)))
case None => None
}
))))
}
}
// TODO see if I can take away the [scalatest] from the private
private[scalatest] def handleFailedTest(throwable: Throwable, hasPublicNoArgConstructor: Boolean, testName: String,
rerunnable: Option[Rerunner], report: Reporter, tracker: Tracker, duration: Long) {
val message = getMessageForException(throwable)
val formatter = getIndentedText(testName, 1, true)
report(TestFailed(tracker.nextOrdinal(), message, thisSuite.suiteName, thisSuite.suiteId, Some(thisSuite.getClass.getName), thisSuite.decodedSuiteName, testName, testName, getDecodedName(testName), Some(throwable), Some(duration), Some(formatter), Some(SeeStackDepthException), rerunnable))
}
/**
*
* Run zero to many of this <code>Suite</code>'s nested <code>Suite</code>s.
*
* <p>
* If the passed <code>distributor</code> is <code>None</code>, this trait's
* implementation of this method invokes <code>run</code> on each
* nested <code>Suite</code> in the <code>List</code> obtained by invoking <code>nestedSuites</code>.
* If a nested <code>Suite</code>'s <code>run</code>
* method completes abruptly with an exception, this trait's implementation of this
* method reports that the <code>Suite</code> aborted and attempts to run the
* next nested <code>Suite</code>.
* If the passed <code>distributor</code> is defined, this trait's implementation
* puts each nested <code>Suite</code>
* into the <code>Distributor</code> contained in the <code>Some</code>, in the order in which the
* <code>Suite</code>s appear in the <code>List</code> returned by <code>nestedSuites</code>, passing
* in a new <code>Tracker</code> obtained by invoking <code>nextTracker</code> on the <code>Tracker</code>
* passed to this method.
* </p>
*
* <p>
* Implementations of this method are responsible for ensuring <code>SuiteStarting</code> events
* are fired to the <code>Reporter</code> before executing any nested <code>Suite</code>, and either <code>SuiteCompleted</code>
* or <code>SuiteAborted</code> after executing any nested <code>Suite</code>.
* </p>
*
* @param reporter the <code>Reporter</code> to which results will be reported
* @param stopper the <code>Stopper</code> that will be consulted to determine whether to stop execution early.
* @param filter a <code>Filter</code> with which to filter tests based on their tags
* @param configMap a <code>Map</code> of key-value pairs that can be used by the executing <code>Suite</code> of tests.
* @param distributor an optional <code>Distributor</code>, into which to put nested <code>Suite</code>s to be run
* by another entity, such as concurrently by a pool of threads. If <code>None</code>, nested <code>Suite</code>s will be run sequentially.
* @param tracker a <code>Tracker</code> tracking <code>Ordinal</code>s being fired by the current thread.
*
* @throws NullPointerException if any passed parameter is <code>null</code>.
*/
protected def runNestedSuites(reporter: Reporter, stopper: Stopper, filter: Filter,
configMap: Map[String, Any], distributor: Option[Distributor], tracker: Tracker) {
if (reporter == null)
throw new NullPointerException("reporter was null")
if (stopper == null)
throw new NullPointerException("stopper was null")
if (filter == null)
throw new NullPointerException("filter was null")
if (configMap == null)
throw new NullPointerException("configMap was null")
if (distributor == null)
throw new NullPointerException("distributor was null")
if (tracker == null)
throw new NullPointerException("tracker was null")
val stopRequested = stopper
val report = wrapReporterIfNecessary(reporter)
def callExecuteOnSuite(nestedSuite: Suite) {
if (!stopRequested()) {
// Create a Rerunner if the Suite has a no-arg constructor
val hasPublicNoArgConstructor = Suite.checkForPublicNoArgConstructor(nestedSuite.getClass)
val rerunnable =
if (hasPublicNoArgConstructor)
Some(new SuiteRerunner(nestedSuite.getClass.getName))
else
None
val rawString = Resources("suiteExecutionStarting")
val formatter = formatterForSuiteStarting(nestedSuite)
val suiteStartTime = System.currentTimeMillis
report(SuiteStarting(tracker.nextOrdinal(), nestedSuite.suiteName, nestedSuite.suiteId, Some(nestedSuite.getClass.getName), nestedSuite.decodedSuiteName, formatter, Some(TopOfClass(nestedSuite.getClass.getName)), rerunnable))
try {
// Same thread, so OK to send same tracker
nestedSuite.run(None, report, stopRequested, filter, configMap, distributor, tracker)
val rawString = Resources("suiteCompletedNormally")
val formatter = formatterForSuiteCompleted(nestedSuite)
val duration = System.currentTimeMillis - suiteStartTime
report(SuiteCompleted(tracker.nextOrdinal(), nestedSuite.suiteName, nestedSuite.suiteId, Some(nestedSuite.getClass.getName), nestedSuite.decodedSuiteName, Some(duration), formatter, Some(TopOfClass(nestedSuite.getClass.getName)), rerunnable))
}
catch {
case e: RuntimeException => {
val rawString = Resources("executeException")
val formatter = formatterForSuiteAborted(nestedSuite, rawString)
val duration = System.currentTimeMillis - suiteStartTime
report(SuiteAborted(tracker.nextOrdinal(), rawString, nestedSuite.suiteName, nestedSuite.suiteId, Some(nestedSuite.getClass.getName), nestedSuite.decodedSuiteName, Some(e), Some(duration), formatter, Some(SeeStackDepthException), rerunnable))
}
}
}
}
if (filter.includeNestedSuites) {
distributor match {
case None =>
val nestedSuitesArray = nestedSuites.toArray
for (i <- 0 until nestedSuitesArray.length) {
if (!stopRequested()) {
callExecuteOnSuite(nestedSuitesArray(i))
}
}
case Some(distribute) =>
for (nestedSuite <- nestedSuites)
distribute(nestedSuite, tracker.nextTracker())
}
}
}
/**
* A user-friendly suite name for this <code>Suite</code>.
*
* <p>
* This trait's
* implementation of this method returns the simple name of this object's class. This
* trait's implementation of <code>runNestedSuites</code> calls this method to obtain a
* name for <code>Report</code>s to pass to the <code>suiteStarting</code>, <code>suiteCompleted</code>,
* and <code>suiteAborted</code> methods of the <code>Reporter</code>.
* </p>
*
* @return this <code>Suite</code> object's suite name.
*/
def suiteName = getSimpleNameOfAnObjectsClass(thisSuite)
// Decoded suite name enclosed using backtick (`), currently for internal use only.
private[scalatest] val decodedSuiteName:Option[String] = getDecodedName(suiteName)
/**
* A string ID for this <code>Suite</code> that is intended to be unique among all suites reported during a run.
*
* <p>
* This trait's
* implementation of this method returns the fully qualified name of this object's class.
* Each suite reported during a run will commonly be an instance of a different <code>Suite</code> class,
* and in such cases, this default implementation of this method will suffice. However, in special cases
* you may need to override this method to ensure it is unique for each reported suite. For example, if you write
* a <code>Suite</code> subclass that reads in a file whose name is passed to its constructor and dynamically
* creates a suite of tests based on the information in that file, you will likely need to override this method
* in your <code>Suite</code> subclass, perhaps by appending the pathname of the file to the fully qualified class name.
* That way if you run a suite of tests based on a directory full of these files, you'll have unique suite IDs for
* each reported suite.
* </p>
*
* <p>
* The suite ID is <em>intended</em> to be unique, because ScalaTest does not enforce that it is unique. If it is not
* unique, then you may not be able to uniquely identify a particular test of a particular suite. This ability is used,
* for example, to dynamically tag tests as having failed in the previous run when rerunning only failed tests.
* </p>
*
* @return this <code>Suite</code> object's ID.
*/
def suiteId = thisSuite.getClass.getName
/**
* Throws <code>TestPendingException</code> to indicate a test is pending.
*
* <p>
* A <em>pending test</em> is one that has been given a name but is not yet implemented. The purpose of
* pending tests is to facilitate a style of testing in which documentation of behavior is sketched
* out before tests are written to verify that behavior (and often, the before the behavior of
* the system being tested is itself implemented). Such sketches form a kind of specification of
* what tests and functionality to implement later.
* </p>
*
* <p>
* To support this style of testing, a test can be given a name that specifies one
* bit of behavior required by the system being tested. The test can also include some code that
* sends more information about the behavior to the reporter when the tests run. At the end of the test,
* it can call method <code>pending</code>, which will cause it to complete abruptly with <code>TestPendingException</code>.
* Because tests in ScalaTest can be designated as pending with <code>TestPendingException</code>, both the test name and any information
* sent to the reporter when running the test can appear in the report of a test run. (In other words,
* the code of a pending test is executed just like any other test.) However, because the test completes abruptly
* with <code>TestPendingException</code>, the test will be reported as pending, to indicate
* the actual test, and possibly the functionality it is intended to test, has not yet been implemented.
* </p>
*
* <p>
* Note: This method always completes abruptly with a <code>TestPendingException</code>. Thus it always has a side
* effect. Methods with side effects are usually invoked with parentheses, as in <code>pending()</code>. This
* method is defined as a parameterless method, in flagrant contradiction to recommended Scala style, because it
* forms a kind of DSL for pending tests. It enables tests in suites such as <code>FunSuite</code> or <code>FunSpec</code>
* to be denoted by placing "<code>(pending)</code>" after the test name, as in:
* </p>
*
* <pre class="stHighlight">
* test("that style rules are not laws") (pending)
* </pre>
*
* <p>
* Readers of the code see "pending" in parentheses, which looks like a little note attached to the test name to indicate
* it is pending. Whereas "<code>(pending())</code> looks more like a method call, "<code>(pending)</code>" lets readers
* stay at a higher level, forgetting how it is implemented and just focusing on the intent of the programmer who wrote the code.
* </p>
*/
def pending: PendingNothing = { throw new TestPendingException }
/**
* Execute the passed block of code, and if it completes abruptly, throw <code>TestPendingException</code>, else
* throw <code>TestFailedException</code>.
*
* <p>
* This method can be used to temporarily change a failing test into a pending test in such a way that it will
* automatically turn back into a failing test once the problem originally causing the test to fail has been fixed.
* At that point, you need only remove the <code>pendingUntilFixed</code> call. In other words, a
* <code>pendingUntilFixed</code> surrounding a block of code that isn't broken is treated as a test failure.
* The motivation for this behavior is to encourage people to remove <code>pendingUntilFixed</code> calls when
* there are no longer needed.
* </p>
*
* <p>
* This method facilitates a style of testing in which tests are written before the code they test. Sometimes you may
* encounter a test failure that requires more functionality than you want to tackle without writing more tests. In this
* case you can mark the bit of test code causing the failure with <code>pendingUntilFixed</code>. You can then write more
* tests and functionality that eventually will get your production code to a point where the original test won't fail anymore.
* At this point the code block marked with <code>pendingUntilFixed</code> will no longer throw an exception (because the
* problem has been fixed). This will in turn cause <code>pendingUntilFixed</code> to throw <code>TestFailedException</code>
* with a detail message explaining you need to go back and remove the <code>pendingUntilFixed</code> call as the problem orginally
* causing your test code to fail has been fixed.
* </p>
*
* @param f a block of code, which if it completes abruptly, should trigger a <code>TestPendingException</code>
* @throws TestPendingException if the passed block of code completes abruptly with an <code>Exception</code> or <code>AssertionError</code>
*/
def pendingUntilFixed(f: => Unit) {
val isPending =
try {
f
false
}
catch {
case _: Exception => true
case _: AssertionError => true
}
if (isPending)
throw new TestPendingException
else
throw new TestFailedException(Resources("pendingUntilFixed"), 2)
}
/**
* The total number of tests that are expected to run when this <code>Suite</code>'s <code>run</code> method is invoked.
*
* <p>
* This trait's implementation of this method returns the sum of:
* </p>
*
* <ul>
* <li>the size of the <code>testNames</code> <code>List</code>, minus the number of tests marked as ignored and
* any tests that are exluded by the passed <code>Filter</code></li>
* <li>the sum of the values obtained by invoking
* <code>expectedTestCount</code> on every nested <code>Suite</code> contained in
* <code>nestedSuites</code></li>
* </ul>
*
* @param filter a <code>Filter</code> with which to filter tests to count based on their tags
*/
def expectedTestCount(filter: Filter): Int = {
// [bv: here was another tricky refactor. How to increment a counter in a loop]
def countNestedSuiteTests(nestedSuites: List[Suite], filter: Filter): Int =
nestedSuites match {
case List() => 0
case nestedSuite :: nestedSuites => nestedSuite.expectedTestCount(filter) +
countNestedSuiteTests(nestedSuites, filter)
}
filter.runnableTestCount(testNames, testTags) + countNestedSuiteTests(nestedSuites, filter)
}
// Wrap any non-DispatchReporter, non-CatchReporter in a CatchReporter,
// so that exceptions are caught and transformed
// into error messages on the standard error stream.
private[scalatest] def wrapReporterIfNecessary(reporter: Reporter) = reporter match {
case dr: DispatchReporter => dr
case cr: CatchReporter => cr
case _ => new CatchReporter(reporter)
}
private[scalatest] def getTopOfClass = TopOfClass(this.getClass.getName)
private[scalatest] def getTopOfMethod(method:Method) = TopOfMethod(this.getClass.getName, method.toGenericString())
private[scalatest] def getTopOfMethod(testName:String) = TopOfMethod(this.getClass.getName, getMethodForTestName(testName).toGenericString())
}
private[scalatest] object Suite {
private[scalatest] val TestMethodPrefix = "test"
private[scalatest] val InformerInParens = "(Informer)"
private[scalatest] val IgnoreAnnotation = "org.scalatest.Ignore"
private[scalatest] def getSimpleNameOfAnObjectsClass(o: AnyRef) = stripDollars(parseSimpleName(o.getClass.getName))
// [bv: this is a good example of the expression type refactor. I moved this from SuiteClassNameListCellRenderer]
// this will be needed by the GUI classes, etc.
private[scalatest] def parseSimpleName(fullyQualifiedName: String) = {
val dotPos = fullyQualifiedName.lastIndexOf('.')
// [bv: need to check the dotPos != fullyQualifiedName.length]
if (dotPos != -1 && dotPos != fullyQualifiedName.length)
fullyQualifiedName.substring(dotPos + 1)
else
fullyQualifiedName
}
private[scalatest] def checkForPublicNoArgConstructor(clazz: java.lang.Class[_]) = {
try {
val constructor = clazz.getConstructor(new Array[java.lang.Class[T] forSome { type T }](0): _*)
Modifier.isPublic(constructor.getModifiers)
}
catch {
case nsme: NoSuchMethodException => false
}
}
private[scalatest] def stripDollars(s: String): String = {
val lastDollarIndex = s.lastIndexOf('$')
if (lastDollarIndex < s.length - 1)
if (lastDollarIndex == -1 || !s.startsWith("line")) s else s.substring(lastDollarIndex + 1)
else {
// The last char is a dollar sign
val lastNonDollarChar = s.reverse.find(_ != '$')
lastNonDollarChar match {
case None => s
case Some(c) => {
val lastNonDollarIndex = s.lastIndexOf(c)
if (lastNonDollarIndex == -1) s
else stripDollars(s.substring(0, lastNonDollarIndex + 1))
}
}
}
}
private[scalatest] def diffStrings(s: String, t: String): Tuple2[String, String] = {
def findCommonPrefixLength(s: String, t: String): Int = {
val max = s.length.min(t.length) // the maximum potential size of the prefix
var i = 0
var found = false
while (i < max & !found) {
found = (s.charAt(i) != t.charAt(i))
if (!found)
i = i + 1
}
i
}
def findCommonSuffixLength(s: String, t: String): Int = {
val max = s.length.min(t.length) // the maximum potential size of the suffix
var i = 0
var found = false
while (i < max & !found) {
found = (s.charAt(s.length - 1 - i) != t.charAt(t.length - 1 - i))
if (!found)
i = i + 1
}
i
}
val commonPrefixLength = findCommonPrefixLength(s, t)
val commonSuffixLength = findCommonSuffixLength(s.substring(commonPrefixLength), t.substring(commonPrefixLength))
val prefix = s.substring(0, commonPrefixLength)
val suffix = if (s.length - commonSuffixLength < 0) "" else s.substring(s.length - commonSuffixLength)
val sMiddleEnd = s.length - commonSuffixLength
val tMiddleEnd = t.length - commonSuffixLength
val sMiddle = s.substring(commonPrefixLength, sMiddleEnd)
val tMiddle = t.substring(commonPrefixLength, tMiddleEnd)
val MaxContext = 20
val shortPrefix = if (commonPrefixLength > MaxContext) "..." + prefix.substring(prefix.length - MaxContext) else prefix
val shortSuffix = if (commonSuffixLength > MaxContext) suffix.substring(0, MaxContext) + "..." else suffix
(shortPrefix + "[" + sMiddle + "]" + shortSuffix, shortPrefix + "[" + tMiddle + "]" + shortSuffix)
}
// If the objects are two strings, replace them with whatever is returned by diffStrings.
// Otherwise, use the same objects.
private[scalatest] def getObjectsForFailureMessage(a: Any, b: Any) =
a match {
case aStr: String => {
b match {
case bStr: String => {
Suite.diffStrings(aStr, bStr)
}
case _ => (a, b)
}
}
case _ => (a, b)
}
private[scalatest] def formatterForSuiteStarting(suite: Suite): Option[Formatter] =
Some(IndentedText(suite.suiteName + ":", suite.suiteName, 0))
private[scalatest] def formatterForSuiteCompleted(suite: Suite): Option[Formatter] =
Some(MotionToSuppress)
private[scalatest] def formatterForSuiteAborted(suite: Suite, message: String): Option[Formatter] =
Some(IndentedText(message, message, 0))
private def simpleNameForTest(testName: String) =
if (testName.endsWith(InformerInParens))
testName.substring(0, testName.length - InformerInParens.length)
else
testName
private[scalatest] def anErrorThatShouldCauseAnAbort(throwable: Throwable) =
throwable match {
case _: AnnotationFormatError |
_: AWTError |
_: CoderMalfunctionError |
_: FactoryConfigurationError |
_: LinkageError |
_: ThreadDeath |
_: TransformerFactoryConfigurationError |
_: VirtualMachineError => true
case _ => false
}
def takesInformer(m: Method) = {
val paramTypes = m.getParameterTypes
paramTypes.length == 1 && classOf[Informer].isAssignableFrom(paramTypes(0))
}
def isTestMethodGoodies(m: Method) = {
val isInstanceMethod = !Modifier.isStatic(m.getModifiers())
// name must have at least 4 chars (minimum is "test")
val simpleName = m.getName
val firstFour = if (simpleName.length >= 4) simpleName.substring(0, 4) else ""
val paramTypes = m.getParameterTypes
val hasNoParams = paramTypes.length == 0
// Discover testNames(Informer) because if we didn't it might be confusing when someone
// actually wrote a testNames(Informer) method and it was silently ignored.
val isTestNames = simpleName == "testNames"
val isTestTags = simpleName == "testTags"
(isInstanceMethod, simpleName, firstFour, paramTypes, hasNoParams, isTestNames, isTestTags)
}
def testMethodTakesAnInformer(testName: String) = testName.endsWith(InformerInParens)
def checkRunTestParamsForNull(testName: String, reporter: Reporter, stopper: Stopper, configMap: Map[String, Any], tracker: Tracker) {
if (testName == null)
throw new NullPointerException("testName was null")
if (reporter == null)
throw new NullPointerException("reporter was null")
if (stopper == null)
throw new NullPointerException("stopper was null")
if (configMap == null)
throw new NullPointerException("configMap was null")
if (tracker == null)
throw new NullPointerException("tracker was null")
}
/*
For info and test names, the formatted text should have one level shaved off so that the text will
line up correctly, and the icon is over to the left of that even with the enclosing level.
If a test is at the top level (not nested inside a describe, it's level is 0. So no need to subtract 1
to make room for the icon in that case. An info inside such a test will have level 1. And agin, in that
case no need to subtract 1. Such a test is "outermost test" and the info inside is "in outermost test" in:
class ArghSpec extends Spec with GivenWhenThen {
info("in ArghSpec")
it("outermost test") {
info("in outermost test")
}
describe("Apple") {
info("in Apple")
describe("Boat") {
info("in Boat")
describe("Cat") {
info("in Cat")
describe("Dog") {
info("in Dog")
describe("Elephant") {
info("in Elephant")
it("Factory") {
info("in Factory (test)")
given("an empty Stack")
when("push is invoked")
then("it should have size 1")
and("pop should return the pushed value")
}
}
}
}
}
}
}
It should (and at this point does) output this:
[scalatest] ArghSpec:
[scalatest] + in ArghSpec
[scalatest] - outermost test (5 milliseconds)
[scalatest] + in outermost test
[scalatest] Apple
[scalatest] + in Apple
[scalatest] Boat
[scalatest] + in Boat
[scalatest] Cat
[scalatest] + in Cat
[scalatest] Dog
[scalatest] + in Dog
[scalatest] Elephant
[scalatest] + in Elephant
[scalatest] - Factory (1 millisecond)
[scalatest] + in Factory (test)
[scalatest] + Given an empty Stack
[scalatest] + When push is invoked
[scalatest] + Then it should have size 1
[scalatest] + And pop should return the pushed value
FeatureSpec doesn't want any icons printed out. So adding includeIcon here. It
was already in getIndentedTextForInfo because of descriptions being printed out
without icons.
This should really be named getIndentedTextForTest maybe, because I think it is just
used for test events like succeeded/failed, etc.
*/
def getIndentedText(testText: String, level: Int, includeIcon: Boolean) = {
val decodedTestText = NameTransformer.decode(testText)
val formattedText =
if (includeIcon) {
val testSucceededIcon = Resources("testSucceededIconChar")
(" " * (if (level == 0) 0 else (level - 1))) + Resources("iconPlusShortName", testSucceededIcon, decodedTestText)
}
else {
(" " * level) + decodedTestText
}
IndentedText(formattedText, decodedTestText, level)
}
// The icon is not included for branch description text, but is included for things sent via info(), given(),
// when(), then(), etc. When it is included, reduce the level by 1, unless it is already 1 or 0.
def getIndentedTextForInfo(message: String, level: Int, includeIcon: Boolean, infoIsInsideATest: Boolean) = {
val formattedText =
if (includeIcon) {
val infoProvidedIcon = Resources("infoProvidedIconChar")
//
// Inside a test, you want level 1 to stay 1
// [scalatest] - outermost test (5 milliseconds)
// [scalatest] + in outermost test
//
// But outside a test, level 1 should be transformed to 0
// [scalatest] Apple
// [scalatest] + in Apple
//
val indentationLevel =
level match {
case 0 => 0
case 1 if infoIsInsideATest => 1
case _ => level - 1
}
(" " * indentationLevel) + Resources("iconPlusShortName", infoProvidedIcon, message)
// (" " * (if (level <= 1) level else (level - 1))) + Resources("iconPlusShortName", infoProvidedIcon, message)
}
else {
(" " * level) + message
}
IndentedText(formattedText, message, level)
}
def getMessageForException(e: Throwable): String =
if (e.getMessage != null)
e.getMessage
else
Resources("exceptionThrown", e.getClass.getName) // Say something like, "java.lang.Exception was thrown."
def indentation(level: Int) = " " * level
// Decode suite name enclosed using backtick (`)
def getDecodedName(name:String): Option[String] = {
val decoded = NameTransformer.decode(name)
if(decoded == name) None else Some(decoded)
}
def reportTestFailed(theSuite: Suite, report: Reporter, throwable: Throwable, testName: String, testText: String,
decodedTestName:Option[String], rerunnable: Option[Rerunner], tracker: Tracker, duration: Long, level: Int, includeIcon: Boolean, location: Option[Location]) {
val message = getMessageForException(throwable)
val formatter = getIndentedText(testText, level, includeIcon)
report(TestFailed(tracker.nextOrdinal(), message, theSuite.suiteName, theSuite.suiteId, Some(theSuite.getClass.getName),theSuite.decodedSuiteName, testName, testText, decodedTestName, Some(throwable), Some(duration), Some(formatter), location, rerunnable))
}
// TODO: Possibly separate these out from method tests and function tests, because locations are different
// Update: Doesn't seems to need separation, to be confirmed with Bill.
def reportTestStarting(theSuite: Suite, report: Reporter, tracker: Tracker, testName: String, testText: String, decodedTestName:Option[String], rerunnable: Option[Rerunner], location: Option[Location]) {
report(TestStarting(tracker.nextOrdinal(), theSuite.suiteName, theSuite.suiteId, Some(theSuite.getClass.getName), theSuite.decodedSuiteName, testName, testText, decodedTestName, Some(MotionToSuppress),
location, rerunnable))
}
def reportTestPending(theSuite: Suite, report: Reporter, tracker: Tracker, testName: String, testText: String, decodedTestName:Option[String], duration: Long, formatter: Formatter, location: Option[Location]) {
report(TestPending(tracker.nextOrdinal(), theSuite.suiteName, theSuite.suiteId, Some(theSuite.getClass.getName), theSuite.decodedSuiteName, testName, testText, decodedTestName, Some(duration), Some(formatter),
location))
}
/*
def reportTestCanceled(theSuite: Suite, report: Reporter, tracker: Tracker, testName: String, duration: Long, formatter: Formatter, location: Option[Location]) {
val message = getMessageForException(throwable)
report(TestCanceled(tracker.nextOrdinal(), message, theSuite.suiteName, theSuite.suiteID, Some(theSuite.getClass.getName), testName, Some(duration), Some(formatter),
location))
}
*/
def reportTestCanceled(theSuite: Suite, report: Reporter, throwable: Throwable, testName: String, testText: String,
decodedTestName:Option[String], rerunnable: Option[Rerunner], tracker: Tracker, duration: Long, level: Int, includeIcon: Boolean, location: Option[Location]) {
val message = getMessageForException(throwable)
val formatter = getIndentedText(testText, level, includeIcon)
report(TestCanceled(tracker.nextOrdinal(), message, theSuite.suiteName, theSuite.suiteId, Some(theSuite.getClass.getName), theSuite.decodedSuiteName, testName, testText, decodedTestName, Some(throwable), Some(duration), Some(formatter), location, rerunnable))
}
def reportTestSucceeded(theSuite: Suite, report: Reporter, tracker: Tracker, testName: String, testText: String, decodedTestName:Option[String], duration: Long, formatter: Formatter, rerunnable: Option[Rerunner], location: Option[Location]) {
report(TestSucceeded(tracker.nextOrdinal(), theSuite.suiteName, theSuite.suiteId, Some(theSuite.getClass.getName), theSuite.decodedSuiteName, testName, testText, decodedTestName, Some(duration), Some(formatter),
location, rerunnable))
}
def reportTestIgnored(theSuite: Suite, report: Reporter, tracker: Tracker, testName: String, testText: String, decodedTestName:Option[String], level: Int, location: Option[Location]) {
val testSucceededIcon = Resources("testSucceededIconChar")
val formattedText = indentation(level - 1) + Resources("iconPlusShortName", testSucceededIcon, testText)
report(TestIgnored(tracker.nextOrdinal(), theSuite.suiteName, theSuite.suiteId, Some(theSuite.getClass.getName), theSuite.decodedSuiteName, testName, testText, decodedTestName, Some(IndentedText(formattedText, testText, level)),
location))
}
// If not fired in the context of a test, then testName will be None
def reportInfoProvided(
theSuite: Suite,
report: Reporter,
tracker: Tracker,
testName: Option[String],
message: String,
level: Int,
location: Option[Location],
includeNameInfo: Boolean,
includeIcon: Boolean = true,
aboutAPendingTest: Option[Boolean] = None,
aboutACanceledTest: Option[Boolean] = None
) {
report(
InfoProvided(
tracker.nextOrdinal(),
message,
if (includeNameInfo)
Some(NameInfo(theSuite.suiteName, theSuite.suiteId, Some(theSuite.getClass.getName), theSuite.decodedSuiteName,
testName match {
case Some(name) => Some(TestNameInfo(name, getDecodedName(name)))
case None => None
}
))
else
None,
aboutAPendingTest,
aboutACanceledTest,
None,
Some(getIndentedTextForInfo(message, level, includeIcon, testName.isDefined)),
location
)
)
}
// If not fired in the context of a test, then testName will be None
def reportMarkupProvided(
theSuite: Suite,
report: Reporter,
tracker: Tracker,
testName: Option[String],
message: String,
level: Int,
location: Option[Location],
includeNameInfo: Boolean,
aboutAPendingTest: Option[Boolean] = None,
aboutACanceledTest: Option[Boolean] = None
) {
report(
MarkupProvided(
tracker.nextOrdinal(),
message,
if (includeNameInfo)
Some(NameInfo(theSuite.suiteName, theSuite.suiteId, Some(theSuite.getClass.getName), theSuite.decodedSuiteName,
testName match {
case Some(name) => Some(TestNameInfo(name, getDecodedName(name)))
case None => None
}
))
else
None,
aboutAPendingTest,
aboutACanceledTest,
None, // Some(getIndentedTextForInfo(message, level, includeIcon, testName.isDefined)) for now don't send a formatter
location
)
)
}
// If not fired in the context of a test, then testName will be None
def reportScopeOpened(
theSuite: Suite,
report: Reporter,
tracker: Tracker,
testName: Option[String],
message: String,
level: Int,
includeIcon: Boolean = true,
aboutAPendingTest: Option[Boolean] = None,
aboutACanceledTest: Option[Boolean] = None,
location: Option[Location]
) {
report(
ScopeOpened(
tracker.nextOrdinal(),
message,
NameInfo(theSuite.suiteName, theSuite.suiteId, Some(theSuite.getClass.getName), theSuite.decodedSuiteName,
testName match {
case Some(name) => Some(TestNameInfo(name, getDecodedName(name)))
case None => None
}
),
aboutAPendingTest,
aboutACanceledTest,
Some(getIndentedTextForInfo(message, level, includeIcon, testName.isDefined)),
location
)
)
}
// If not fired in the context of a test, then testName will be None
def reportScopeClosed(
theSuite: Suite,
report: Reporter,
tracker: Tracker,
testName: Option[String],
message: String,
level: Int,
includeIcon: Boolean = true,
aboutAPendingTest: Option[Boolean] = None,
aboutACanceledTest: Option[Boolean] = None,
location: Option[Location]
) {
report(
ScopeClosed(
tracker.nextOrdinal(),
message,
NameInfo(theSuite.suiteName, theSuite.suiteId, Some(theSuite.getClass.getName), theSuite.decodedSuiteName,
testName match {
case Some(name) => Some(TestNameInfo(name, getDecodedName(name)))
case None => None
}
),
aboutAPendingTest,
aboutACanceledTest,
Some(MotionToSuppress),
location
)
)
}
/*def getLineInFile(stackTraceList:List[StackTraceElement], sourceFileName:String, methodName: String):Option[LineInFile] = {
val baseStackDepth = stackTraceList.takeWhile(stackTraceElement => sourceFileName != stackTraceElement.getFileName || stackTraceElement.getMethodName != methodName).length
val stackTraceOpt = stackTraceList.drop(baseStackDepth).find(stackTraceElement => stackTraceElement.getMethodName() == "<init>")
stackTraceOpt match {
case Some(stackTrace) => Some(LineInFile(stackTrace.getLineNumber, stackTrace.getFileName))
case None => None
}
}*/
def getLineInFile(stackTraceList: Array[StackTraceElement], stackDepth: Int) = {
if(stackDepth >= 0 && stackDepth < stackTraceList.length) {
val stackTrace = stackTraceList(stackDepth)
if(stackTrace.getLineNumber >= 0 && stackTrace.getFileName != null)
Some(LineInFile(stackTrace.getLineNumber, stackTrace.getFileName))
else
None
}
else
None
}
}
| epishkin/scalatest-google-code | src/main/scala/org/scalatest/Suite.scala | Scala | apache-2.0 | 141,478 |
/**
* Copyright (C) 2015-2017 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.event
import akka.dispatch.MessageQueue
import akka.dispatch.MailboxType
import akka.dispatch.UnboundedMailbox
import com.typesafe.config.Config
import akka.actor.ActorSystem
import akka.actor.ActorRef
import akka.dispatch.ProducesMessageQueue
import akka.event.Logging.Debug
import akka.event.Logging.LogEvent
trait LoggerMessageQueueSemantics
/**
* INTERNAL API
*/
private[akka] class LoggerMailboxType(settings: ActorSystem.Settings, config: Config) extends MailboxType
with ProducesMessageQueue[LoggerMailbox] {
override def create(owner: Option[ActorRef], system: Option[ActorSystem]) = (owner, system) match {
case (Some(o), Some(s)) ⇒ new LoggerMailbox(o, s)
case _ ⇒ throw new IllegalArgumentException("no mailbox owner or system given")
}
}
/**
* INTERNAL API
*/
private[akka] class LoggerMailbox(owner: ActorRef, system: ActorSystem)
extends UnboundedMailbox.MessageQueue with LoggerMessageQueueSemantics {
override def cleanUp(owner: ActorRef, deadLetters: MessageQueue): Unit = {
if (hasMessages) {
val logLevel = system.eventStream.logLevel
var envelope = dequeue
// Drain all remaining messages to the StandardOutLogger.
// cleanUp is called after switching out the mailbox, which is why
// this kind of look works without a limit.
val loggingEnabled = Logging.AllLogLevels.contains(logLevel)
while (envelope ne null) {
// skip logging if level is OFF
if (loggingEnabled)
envelope.message match {
case e: LogEvent if e.level <= logLevel ⇒
// Logging.StandardOutLogger is a MinimalActorRef, i.e. not a "real" actor
Logging.StandardOutLogger.tell(envelope.message, envelope.sender)
case _ ⇒ // skip
}
envelope = dequeue
}
}
super.cleanUp(owner, deadLetters)
}
}
| rorygraves/perf_tester | corpus/akka/akka-actor/src/main/scala/akka/event/LoggerMailbox.scala | Scala | apache-2.0 | 1,959 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.util.MLTestingUtils
import org.apache.spark.mllib.linalg.{Vector, Vectors}
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.sql.{Row, SQLContext}
/**
* MinMaxScaler最大-最小规范化
* 将所有特征向量线性变换到用户指定最大-最小值之间
*/
class MinMaxScalerSuite extends SparkFunSuite with MLlibTestSparkContext {
test("MinMaxScaler fit basic case") {//基本最大最小权值
val sqlContext = new SQLContext(sc)
/**
* data:= Array([1.0,0.0,-9.223372036854776E18], [2.0,0.0,0.0], (3,[0,2],[3.0,9.223372036854776E18]), (3,[0],[1.5]))
*/
val data = Array(
Vectors.dense(1, 0, Long.MinValue),
Vectors.dense(2, 0, 0),
Vectors.sparse(3, Array(0, 2), Array(3, Long.MaxValue)),
Vectors.sparse(3, Array(0), Array(1.5)))
/**
* expected= Array([-5.0,0.0,-5.0], [0.0,0.0,0.0], (3,[0,2],[5.0,5.0]), (3,[0],[-2.5]))
*/
val expected: Array[Vector] = Array(
Vectors.dense(-5, 0, -5),
Vectors.dense(0, 0, 0),
Vectors.sparse(3, Array(0, 2), Array(5, 5)),
Vectors.sparse(3, Array(0), Array(-2.5)))
val df = sqlContext.createDataFrame(data.zip(expected)).toDF("features", "expected")
val scaler = new MinMaxScaler().setInputCol("features").setOutputCol("scaled")
//max,min是用户可以重新自定义的范围,将数据线性变换到[-5,5]
.setMin(-5).setMax(5)
//fit()方法将DataFrame转化为一个Transformer的算法
val model = scaler.fit(df)
//transform()方法将DataFrame转化为另外一个DataFrame的算法
model.transform(df).select("expected", "scaled").collect()
.foreach { case Row(vector1: Vector, vector2: Vector) =>{
/**
* [-5.0,0.0,-5.0]|||[-5.0,0.0,-5.0]
* [0.0,0.0,0.0]|||[0.0,0.0,0.0]
* (3,[0,2],[5.0,5.0])|||[5.0,0.0,5.0]
* (3,[0],[-2.5])|||[-2.5,0.0,0.0]
*/
println(vector1+"|||"+vector2)
assert(vector1.equals(vector2), "Transformed vector is different with expected.")
}
}
// copied model must have the same parent.
MLTestingUtils.checkCopy(model)
}
//MinMaxScaler将所有特征向量线性变换到用户指定最大-最小值之间
test("MinMaxScaler arguments max must be larger than min") {
withClue("arguments max must be larger than min") {
intercept[IllegalArgumentException] {
val scaler = new MinMaxScaler().setMin(10).setMax(0)
scaler.validateParams()
}
intercept[IllegalArgumentException] {
val scaler = new MinMaxScaler().setMin(0).setMax(0)
scaler.validateParams()
}
}
}
}
| tophua/spark1.52 | mllib/src/test/scala/org/apache/spark/ml/feature/MinMaxScalerSuite.scala | Scala | apache-2.0 | 3,541 |
package collins.models
import org.squeryl.PrimitiveTypeMode._
import org.squeryl.annotations.Column
import org.squeryl.dsl.ast.BinaryOperatorNodeLogicalBoolean
import org.squeryl.dsl.ast.LogicalBoolean
import play.api.libs.json.Json
import collins.models.asset.AssetView
import collins.models.cache.Cache
import collins.models.conversions.IpmiFormat
import collins.models.shared.AddressPool
import collins.models.shared.IpAddressStorage
import collins.models.shared.IpAddressable
import collins.models.shared.Page
import collins.models.shared.PageParams
import collins.util.CryptoCodec
import collins.util.IpAddress
import collins.util.config.IpmiConfig
case class IpmiInfo(
@Column("ASSET_ID") assetId: Long,
username: String,
password: String,
gateway: Long,
address: Long,
netmask: Long,
id: Long = 0) extends IpAddressable {
override def validate() {
super.validate()
List(username, password).foreach { s =>
require(s != null && s.length > 0, "Username and Password must not be empty")
}
}
def toJsValue() = Json.toJson(this)
override def asJson: String = toJsValue.toString
override def compare(z: Any): Boolean = {
if (z == null)
return false
val ar = z.asInstanceOf[AnyRef]
if (!ar.getClass.isAssignableFrom(this.getClass))
false
else {
val other = ar.asInstanceOf[IpmiInfo]
this.assetId == other.assetId && this.gateway == other.gateway &&
this.netmask == other.netmask && this.username == other.username && this.password == other.password
}
}
def decryptedPassword(): String = IpmiInfo.decrypt(password)
def withExposedCredentials(exposeCredentials: Boolean = false) = {
if (exposeCredentials) {
this.copy(password = decryptedPassword())
} else {
this.copy(username = "********", password = "********")
}
}
}
object IpmiInfo extends IpAddressStorage[IpmiInfo] with IpAddressKeys[IpmiInfo] {
import org.squeryl.PrimitiveTypeMode._
def storageName = "IpmiInfo"
val tableDef = table[IpmiInfo]("ipmi_info")
on(tableDef)(i => declare(
i.id is (autoIncremented, primaryKey),
i.assetId is (unique),
i.address is (unique),
i.gateway is (indexed),
i.netmask is (indexed)))
lazy val AddressConfig = IpmiConfig.get()
def createForAsset(asset: Asset, scope: Option[String]): IpmiInfo = inTransaction {
val assetId = asset.id
val username = getUsername(asset)
val password = generateEncryptedPassword()
createWithRetry(10) { attempt =>
val (gateway, address, netmask) = getNextAvailableAddress(scope)
val ipmiInfo = IpmiInfo(
assetId, username, password, gateway, address, netmask)
tableDef.insert(ipmiInfo)
}
}
def encryptPassword(pass: String): String = {
CryptoCodec.withKeyFromFramework.Encode(pass)
}
type IpmiQuerySeq = Seq[Tuple2[Enum, String]]
def findAssetsByIpmi(page: PageParams, ipmi: IpmiQuerySeq, finder: AssetFinder): Page[AssetView] = {
def whereClause(assetRow: Asset, ipmiRow: IpmiInfo) = {
where(
assetRow.id === ipmiRow.assetId and
finder.asLogicalBoolean(assetRow) and
collectParams(ipmi, ipmiRow))
}
inTransaction {
log {
val results = from(Asset.tableDef, tableDef)((assetRow, ipmiRow) =>
whereClause(assetRow, ipmiRow)
select (assetRow)).page(page.offset, page.size).toList
val totalCount = from(Asset.tableDef, tableDef)((assetRow, ipmiRow) =>
whereClause(assetRow, ipmiRow)
compute (count))
Page(results, page.page, page.offset, totalCount)
}
}
}
override def get(i: IpmiInfo): IpmiInfo = Cache.get(findByIdKey(i.id), inTransaction {
tableDef.lookup(i.id).get
})
type Enum = Enum.Value
object Enum extends Enumeration(1) {
val IpmiAddress = Value("IPMI_ADDRESS")
val IpmiUsername = Value("IPMI_USERNAME")
val IpmiPassword = Value("IPMI_PASSWORD")
val IpmiGateway = Value("IPMI_GATEWAY")
val IpmiNetmask = Value("IPMI_NETMASK")
}
def decrypt(password: String) = {
logger.debug("Decrypting %s".format(password))
CryptoCodec.withKeyFromFramework.Decode(password).getOrElse("")
}
protected def getPasswordLength(): Int = IpmiConfig.passwordLength
protected def generateEncryptedPassword(): String = {
val length = getPasswordLength()
CryptoCodec.withKeyFromFramework.Encode(CryptoCodec.randomString(length))
}
protected def getUsername(asset: Asset): String = {
IpmiConfig.genUsername(asset)
}
override def getConfig(scope: Option[String]): Option[AddressPool] = IpmiConfig.get.flatMap(
addressPool => scope match {
case Some(p) => addressPool.pool(p)
case None => addressPool.defaultPool
}
)
// Converts our query parameters to fragments and parameters for a query
private[this] def collectParams(ipmi: Seq[Tuple2[Enum, String]], ipmiRow: IpmiInfo): LogicalBoolean = {
import Enum._
val results: Seq[LogicalBoolean] = ipmi.map {
case (enum, value) =>
enum match {
case IpmiAddress =>
(ipmiRow.address === IpAddress.toLong(value))
case IpmiUsername =>
(ipmiRow.username === value)
case IpmiGateway =>
(ipmiRow.gateway === IpAddress.toLong(value))
case IpmiNetmask =>
(ipmiRow.netmask === IpAddress.toLong(value))
case e =>
throw new Exception("Unhandled IPMI tag: %s".format(e))
}
}
results.reduceRight((a, b) => new BinaryOperatorNodeLogicalBoolean(a, b, "and"))
}
}
| defect/collins | app/collins/models/IpmiInfo.scala | Scala | apache-2.0 | 5,622 |
package com.peterpotts.mancala
import scala.io.StdIn
case class Board(sides: IndexedSeq[Side]) {
require(sides.size == 2, "A board has two sides")
def reap(sideId: Int, bin: Bin): (Int, Board) = {
val (seeds, side) = sides(sideId).reap(bin)
seeds -> copy(sides = sides.updated(sideId, side))
}
def sow(sideId: Int, cup: Cup): Board = {
val side = sides(sideId).sow(cup)
copy(sides = sides.updated(sideId, side))
}
// def next(playerId: Int, sideId: Int, binId)
def readBin(playerId: Int): IO[Bin] = for {
_ <- putLine(display)
_ <- put(s"Player $playerId : Reap bin ? ")
line <- getLine
} yield Bin(line.toInt)
def reapAndSow(playerId: Int): IO[Board] = readBin(playerId).flatMap(bin => reapAndSow(playerId, bin))
def reapAndSow(playerId: Int, bin: Bin): IO[Board] = {
val (reapedSeeds, reapedBoard) = reap(playerId, bin)
val places = Iterator.iterate(Place(playerId, playerId, bin))(_.next).drop(1).take(reapedSeeds).toIndexedSeq
val sowedBoard = places.foldLeft(reapedBoard) {
case (board, place) => board.sow(place.sideId, place.cup)
}
val lastPlace = places.last
lastPlace.cup match {
case bin@Bin(id) =>
if (sowedBoard.sides(lastPlace.sideId).bins(id) > 1)
reapAndSow(playerId, bin)
else
sowedBoard
case Mancala() =>
reapAndSow(playerId)
}
}
// def reapAndSow(playerId: Int, binId: Int): Board = {
// val reap = Move(playerId, Reap(Bin(binId)))
// val (reapedSeeds, reapedBoard) = play(reap)
// val places = Iterator.iterate(Place(playerId, playerId, Bin(binId)))(_.next).drop(1).take(reapedSeeds).toIndexedSeq
// val sows = places.map(place => Move(place.sideId, Sow(place.cup)))
//
// val reapedAndSowedBoard = sows.foldLeft(reapedBoard) {
// case (board, sow) => board.play(sow)._2
// }
// println(reapedAndSowedBoard.display)
// reapedAndSowedBoard
// }
def display: String = List(
sides(1).bins.reverse.mkString("\\t", "\\t", ""),
sides.map(_.mancala).reverse.mkString("", "\\t" * 7, ""),
sides(0).bins.mkString("\\t", "\\t", "")).mkString("\\n")
}
object Board {
val initial = Board(IndexedSeq.fill(2)(Side.initial))
} | peterpotts/mancala | src/main/scala/com/peterpotts/mancala/Board.scala | Scala | mit | 2,225 |
package com.omearac.producers
import akka.actor._
import com.omearac.producers.ProducerStreamManager.InitializeProducerStream
import com.omearac.settings.Settings
import com.omearac.shared.EventMessages.ActivatedProducerStream
import com.omearac.shared.JsonMessageConversion.Conversion
import com.omearac.shared.KafkaMessages.{
ExampleAppEvent,
KafkaMessage,
CollectorStatus
}
/**
*
*/
object ProducerStreamManager {
//CommandMessage
case class InitializeProducerStream(producerActorRef: ActorRef, msgType: Any)
def props: Props = Props(new ProducerStreamManager)
}
class ProducerStreamManager extends Actor with ProducerStream {
implicit val system = context.system
//Get Kafka Producer Config Settings
val settings = Settings(system).KafkaProducers
//Edit this receive method with any new Streamed message types
def receive: Receive = {
case InitializeProducerStream(producerActorRef, KafkaMessage) => {
//Get producer properties
val producerProperties = settings.KafkaProducerInfo("KafkaMessage")
startProducerStream[KafkaMessage](producerActorRef, producerProperties)
}
case InitializeProducerStream(producerActorRef, ExampleAppEvent) => {
//Get producer properties
val producerProperties = settings.KafkaProducerInfo("ExampleAppEvent")
startProducerStream[ExampleAppEvent](producerActorRef, producerProperties)
}
//for collectorProducer
case InitializeProducerStream(producerActorRef, CollectorStatus) => {
val producerProperties = settings.KafkaProducerInfo("CollectorStatus")
startProducerStream[CollectorStatus](producerActorRef, producerProperties)
}
case other =>
println(s"Producer Stream Manager got unknown message: $other")
}
def startProducerStream[msgType: Conversion](
producerActorSource: ActorRef,
producerProperties: Map[String, String]) = {
val streamSource = createStreamSource[msgType]
val streamFlow = createStreamFlow[msgType](producerProperties)
val streamSink = createStreamSink(producerProperties)
val producerStream = streamSource.via(streamFlow).to(streamSink).run()
//Send the completed stream reference to the actor who wants to publish to it
val kafkaTopic = producerProperties("publish-topic")
producerActorSource ! ActivatedProducerStream(producerStream, kafkaTopic)
publishLocalEvent(ActivatedProducerStream(producerStream, kafkaTopic))
}
}
| soujiro0725/market-analysis-microservices | src/main/scala/com/omearac/producers/ProducerStreamManager.scala | Scala | apache-2.0 | 2,450 |
import com.bizo.mighty.csv.CSVReader
import java.net.URLEncoder
import org.apache.jena.riot.RDFFormat
import org.apache.jena.riot.RDFDataMgr
import java.io.FileOutputStream
import org.apache.jena.rdf.model.ResourceFactory
import org.apache.jena.rdf.model.Resource
import org.apache.jena.rdf.model.ModelFactory
import org.apache.jena.rdf.model.Model
import org.apache.jena.vocabulary.RDF
import org.apache.jena.vocabulary.OWL
import org.apache.jena.vocabulary.DC
import org.apache.jena.vocabulary.DC_11
import org.apache.jena.vocabulary.RDFS
import org.apache.jena.sparql.vocabulary.FOAF
import com.github.nscala_time.time.Imports._
import org.joda.time.format.ISODateTimeFormat
import org.apache.jena.shared.PrefixMapping
import org.apache.jena.datatypes.xsd.XSDDatatype
import com.bizo.mighty.csv.CSVReaderSettings
import com.bizo.mighty.csv.CSVDictReader
object KKCSV2RDF extends Anything2RDF {
val sns = "http://ldf.fi/fnewspapers-schema#"
val ns = "http://ldf.fi/fnewspapers/"
def main(args: Array[String]): Unit = {
var wr = CSVDictReader("kansalliskirjastoslmeta.csv")(CSVReaderSettings.Standard.copy(separator=';'))
for (r <- wr) {
val p = R(ns+"paper_"+r("ISSN").trim)
p.addProperty(FOAF.page,R(r("Nimekkeen kotisivun URL").trim))
p.addProperty(RDFS.label,r("Nimeke").trim)
p.addProperty(P("http://seco.hut.fi/u/eeahone/artikkelihakemisto#KIELI"),R("http://www.lingvoj.org/lang/"+r("Kieli (RFC3066)").trim))
val place = I(ns+"place_"+encode(r("Ilmestymispaikka").trim),r("Ilmestymispaikka").trim,CIDOC.Place)
val coords = r("Koordinaatit").trim.split(",")
place.addProperty(WGS84.lat, coords(0).trim, XSDDatatype.XSDdecimal)
place.addProperty(WGS84.long, coords(1).trim, XSDDatatype.XSDdecimal)
p.addProperty(P(sns+"ISSN"),r("ISSN"))
p.addProperty(CIDOC.has_former_or_current_location,place)
val d1 = r("Ilmestymispvm (dd/mm/yyyy)").trim.split("/")
val d12 = makeDateTime(d1(2), d1(1), d1(0))
val d2 = r("Julkaisuloppuu (dd/mm/yyyy)").trim.split("[/\\.]")
val d22 = makeDateTime(d2(2), d2(1), d2(0))
p.addProperty(CIDOC.has_timeSpan,makeTimeSpan(d1(2)+"-"+d1(1)+"-"+d1(0)+" -- "+d2(2)+"-"+d2(1)+"-"+d2(0),d12,d22));
}
m.setNsPrefixes(PrefixMapping.Standard)
m.setNsPrefix("crm",CIDOC.ns)
m.setNsPrefix("skos",SKOS.ns)
m.setNsPrefix("wgs84",WGS84.ns)
m.setNsPrefix("fn",ns)
m.setNsPrefix("fns",sns)
RDFDataMgr.write(new FileOutputStream("kk.ttl"), m, RDFFormat.TTL)
}
}
| jiemakel/anything2rdf | src/main/scala/kkcsv2rdf.scala | Scala | mit | 2,522 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.utils.tf.loaders
import java.nio.ByteOrder
import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.dllib.utils.tf.Context
import org.tensorflow.framework.NodeDef
import scala.reflect.ClassTag
abstract class TensorflowOpsLoader() {
def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])
(implicit ev: TensorNumeric[T]): Module[T]
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/utils/tf/loaders/TensorflowOpsLoader.scala | Scala | apache-2.0 | 1,107 |
package almhirt.converters
import java.time._
import scalaz._, Scalaz._
import almhirt.common._
import almhirt.almvalidation.kit._
import org.scalatest._
import java.nio.ByteBuffer
class DateTimeTests extends FunSuite with Matchers {
import DateTimeConverter._
val epochZero = LocalDateTime.parse("1970-01-01T00:00")
test("LocalDatetime(1970-01-01T00:00) should convert to 0 millis(UTC)") {
val millis = localDateTimeToUtcEpochMillis(epochZero)
millis should equal(0L)
}
test("LocalDatetime(1970-01-01T00:01) should convert to 1000 millis(UTC)") {
val millis = localDateTimeToUtcEpochMillis(epochZero.plusSeconds(1))
millis should equal(1000L)
}
test("0 millis(UTC) should convert to the correct LocalDateTime 1970-01-01T00:00") {
val ldt = utcEpochMillisToLocalDateTime(0L)
ldt should equal(epochZero)
}
test("1000 millis(UTC) should convert to the correct LocalDateTime 1970-01-01T00:01") {
val ldt = utcEpochMillisToLocalDateTime(1000L)
ldt should equal(epochZero.plusSeconds(1))
}
test("A LocalDateTime(1970-01-01T00:00) must be converted to the correct ZonedDateTime(UTC") {
val expected = ZonedDateTime.parse("1970-01-01T00:00+00:00")
val res = localDateTimeToUtcZonedDateTime(epochZero)
res should equal(expected)
}
test("A LocalDateTime(1970-01-01T02:00) must be converted to the correct ZonedDateTime(UTC") {
val expected = ZonedDateTime.parse("1970-01-01T02:00+00:00")
val res = localDateTimeToUtcZonedDateTime(epochZero.plusHours(2))
res should equal(expected)
}
test("A ZonedDateTime(UTC) must be converted to the correct LocalDateTime") {
val sample = ZonedDateTime.parse("1970-01-01T00:00+00:00")
val expected = LocalDateTime.parse("1970-01-01T00:00")
zonedDateTimeToUtcLocalDateTime(sample) should equal(expected)
}
test("A ZonedDateTime(1970-01-01T02:00+00:00) must be converted to the correct LocalDateTime") {
val sample = ZonedDateTime.parse("1970-01-01T02:00+00:00")
val expected = LocalDateTime.parse("1970-01-01T02:00")
zonedDateTimeToUtcLocalDateTime(sample) should equal(expected)
}
test("A ZonedDateTime(1970-01-01T02:00 Europe/Paris) must be converted to the correct LocalDateTime") {
val sample = LocalDateTime.parse("1970-01-01T02:00").atZone(ZoneId.of("Europe/Paris"))
info(sample.toString)
val expected = LocalDateTime.parse("1970-01-01T01:00")
zonedDateTimeToUtcLocalDateTime(sample) should equal(expected)
}
test("A ZonedDateTime(+2.0) must be converted to the correct LocalDateTime") {
val sample = ZonedDateTime.parse("1970-01-01T02:00+02:00")
val expected = LocalDateTime.parse("1970-01-01T00:00")
zonedDateTimeToUtcLocalDateTime(sample) should equal(expected)
}
test("A ZonedDateTime(-2.0) must be converted to the correct LocalDateTime") {
val sample = ZonedDateTime.parse("1970-01-01T04:00-02:00")
val expected = LocalDateTime.parse("1970-01-01T06:00")
zonedDateTimeToUtcLocalDateTime(sample) should equal(expected)
}
} | chridou/almhirt | almhirt-common/src/test/scala/almhirt/converters/DateTimeTests.scala | Scala | apache-2.0 | 3,049 |
package object iso8601 {
/**
* @param datetime invalid datetime
* @param msg error message
*/
case class InvalidDateTime(datetime: DateTime, msg: String)
extends IllegalArgumentException
}
| softprops/iso8601 | src/main/scala/package.scala | Scala | mit | 209 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.clustering
import breeze.linalg.{max, sum, DenseMatrix => BDM, DenseVector => BDV}
import breeze.numerics._
/**
* Utility methods for LDA.
*/
private[spark] object LDAUtils {
/**
* Log Sum Exp with overflow protection using the identity:
* For any a: $\\log \\sum_{n=1}^N \\exp\\{x_n\\} = a + \\log \\sum_{n=1}^N \\exp\\{x_n - a\\}$
*/
private[clustering] def logSumExp(x: BDV[Double]): Double = {
val a = max(x)
a + log(sum(exp(x -:- a)))
}
/**
* For theta ~ Dir(alpha), computes E[log(theta)] given alpha. Currently the implementation
* uses [[breeze.numerics.digamma]] which is accurate but expensive.
*/
private[clustering] def dirichletExpectation(alpha: BDV[Double]): BDV[Double] = {
digamma(alpha) - digamma(sum(alpha))
}
/**
* Computes [[dirichletExpectation()]] row-wise, assuming each row of alpha are
* Dirichlet parameters.
*/
private[spark] def dirichletExpectation(alpha: BDM[Double]): BDM[Double] = {
val rowSum = sum(alpha(breeze.linalg.*, ::))
val digAlpha = digamma(alpha)
val digRowSum = digamma(rowSum)
val result = digAlpha(::, breeze.linalg.*) - digRowSum
result
}
}
| maropu/spark | mllib/src/main/scala/org/apache/spark/mllib/clustering/LDAUtils.scala | Scala | apache-2.0 | 1,995 |
package com.karasiq.shadowcloud.config
import com.typesafe.config.Config
import com.karasiq.common.configs.ConfigImplicits
import com.karasiq.shadowcloud.providers.SessionProvider
final case class PersistenceConfig(rootConfig: Config,
sessionProvider: Class[SessionProvider],
journalPlugin: String,
snapshotPlugin: String) extends WrappedConfig
object PersistenceConfig extends WrappedConfigFactory[PersistenceConfig] with ConfigImplicits {
def apply(config: Config) = {
PersistenceConfig(
config,
config.getClass("session-provider"),
config.withDefault("", _.getString("journal-plugin")),
config.withDefault("", _.getString("snapshot-plugin"))
)
}
}
| Karasiq/shadowcloud | core/src/main/scala/com/karasiq/shadowcloud/config/PersistenceConfig.scala | Scala | apache-2.0 | 796 |
//
// Translator.scala -- Scala object Translator
// Project OrcScala
//
// $Id: Translator.scala 3099 2012-07-21 02:33:18Z laurenyew $
//
// Created by dkitchin on May 27, 2010.
//
// Copyright (c) 2011 The University of Texas at Austin. All rights reserved.
//
// Use and redistribution of this file is governed by the license terms in
// the LICENSE file found in the project's top-level directory and also found at
// URL: http://orc.csres.utexas.edu/license.shtml .
//
package orc.compile.translate
import orc.ast.ext
import orc.ast.oil._
import orc.ast.oil.named._
import orc.ast.oil.named.Conversions._
import orc.compile.translate.ClassForms._
import orc.compile.translate.PrimitiveForms._
import orc.error.OrcException
import orc.error.OrcExceptionExtension._
import orc.error.compiletime._
import orc.lib.builtin
import orc.values.{ Signal, Field }
import orc.values.sites.{ JavaSiteForm, OrcSiteForm }
import scala.collection.mutable
import scala.collection.immutable._
class Translator(val reportProblem: CompilationException with ContinuableSeverity => Unit) {
/** Translate an extended AST to a named OIL AST.
*/
def translate(extendedAST: ext.Expression): named.Expression = {
val rootContext: Map[String, Argument] = HashMap.empty withDefault { UnboundVar(_) }
val rootTypeContext: Map[String, Type] = HashMap.empty withDefault { UnboundTypevar(_) }
convert(extendedAST)(rootContext, rootTypeContext)
}
/** Convert an extended AST expression to a named OIL expression.
*/
def convert(e: ext.Expression)(implicit context: Map[String, Argument], typecontext: Map[String, Type]): Expression = {
e -> {
case ext.Stop() => Stop()
case ext.Constant(c) => Constant(c)
case ext.Variable(x) => context(x)
case ext.TupleExpr(es) => unfold(es map convert, makeTuple)
//SL Expression to Named. convert from extended expr to named expr
case ext.SecurityLevelExpression(e,l) => HasSecurityLevel(convert(e),l)
case ext.ListExpr(es) => unfold(es map convert, makeList)
case ext.RecordExpr(es) => {
val seen = new scala.collection.mutable.HashSet[String]()
val tuples = es map
{
case (s, e) => {
if (seen contains s) { reportProblem(DuplicateKeyException(s) at e) } else { seen += s }
val f = Constant(Field(s))
unfold(List(f, convert(e)), makeTuple)
}
}
unfold(tuples, makeRecord)
}
case ext.Call(target, gs) => {
var expr = convert(target)
for (g <- gs) {
expr = unfold(List(expr), { case List(m) => convertArgumentGroup(m, g) ; case _ => throw new AssertionError("Translator internal failure (convert Call arg group match error)")})
}
expr
}
case ext.PrefixOperator(opName, exp) => {
val actualOpName = if (opName == "-") "0-" else opName
val op = context(actualOpName)
unfold(List(exp) map convert, { Call(op, _, None) })
}
case ext.InfixOperator(l, opName, r) => {
val op = context(opName)
unfold(List(l, r) map convert, { Call(op, _, None) })
}
case ext.Sequential(l, None, r) => convert(l) >> convert(r)
case ext.Sequential(l, Some(ext.VariablePattern(name)), r) => {
val x = new BoundVar(Some(name))
val newl = convert(l)
val newr = convert(r)(context + ((name, x)), typecontext)
newl > x > newr
}
case ext.Sequential(l, Some(p), r) => {
val x = new BoundVar()
val (source, dcontext, target) = convertPattern(p, x)
val newl = convert(l)
val newr = convert(r)(context ++ dcontext, typecontext)
source(newl) > x > target(newr)
}
case ext.Pruning(l, None, r) => convert(l) << convert(r)
case ext.Pruning(l, Some(ext.VariablePattern(name)), r) => {
val x = new BoundVar(Some(name))
val newl = convert(l)(context + ((name, x)), typecontext)
val newr = convert(r)
newl < x < newr
}
case ext.Pruning(l, Some(p), r) => {
val x = new BoundVar()
val (source, dcontext, target) = convertPattern(p, x)
val newl = convert(l)(context ++ dcontext, typecontext)
val newr = convert(r)
target(newl) < x < source(newr)
}
case ext.Parallel(l, r) => convert(l) || convert(r)
case ext.Otherwise(l, r) => convert(l) ow convert(r)
case lambda: ext.Lambda => {
val lambdaName = new BoundVar()
val newdef = AggregateDef(lambda)(this).convert(lambdaName, context, typecontext)
DeclareDefs(List(newdef), lambdaName)
}
case ext.DefClassBody(b) => {
var capThunk = ext.Lambda(None, Nil, None, None, makeClassBody(b, reportProblem))
convert(ext.Call(
ext.Call(ext.Constant(builtin.MakeSite), List(ext.Args(None, List(capThunk)))), List(ext.Args(None, Nil))))
}
case ext.Conditional(ifE, thenE, elseE) => {
makeConditional(convert(ifE), convert(thenE), convert(elseE))
}
case ext.DefGroup(defs, body) => {
val (newdefs, dcontext) = convertDefs(defs)
val newbody = convert(body)(context ++ dcontext, typecontext)
DeclareDefs(newdefs, newbody)
}
case ext.Declare(ext.Val(p, f), body) => {
convert(ext.Pruning(body, Some(p), f))
}
case ext.Declare(ext.SiteImport(name, sitename), body) => {
try {
val site = Constant(OrcSiteForm.resolve(sitename))
convert(body)(context + { (name, site) }, typecontext)
} catch {
case oe: OrcException => throw oe at e
}
}
case ext.Declare(ext.ClassImport(name, classname), body) => {
try {
val u = new BoundTypevar(Some(name))
val site = Constant(JavaSiteForm.resolve(classname))
val newbody = convert(body)(context + { (name, site) }, typecontext + { (name, u) })
DeclareType(u, ClassType(classname), newbody)
} catch {
case oe: OrcException => throw oe at e
}
}
case ext.Declare(ext.Include(_, decls), body) => convert((decls foldRight body) { ext.Declare })
case ext.Declare(ext.TypeImport(name, classname), body) => {
val u = new BoundTypevar(Some(name))
val newbody = convert(body)(context, typecontext + { (name, u) })
DeclareType(u, ImportedType(classname), newbody)
}
//Declare SL from Extended translate to Named
//body is the expression we convert to named expression
case ext.Declare(ext.SecurityLevelDeclaration(name,parents,children), body) => {
DeclareSecurityLevel(name,parents,children, convert(body))
}
case ext.Declare(decl @ ext.TypeAlias(name, typeformals, t), body) => {
val u = new BoundTypevar(Some(name))
val newtype =
typeformals match {
case Nil => convertType(t)
case _ => {
val (newTypeFormals, dtypecontext) = convertTypeFormals(typeformals, decl)
val enclosedType = convertType(t)(typecontext ++ dtypecontext)
TypeAbstraction(newTypeFormals, enclosedType)
}
}
val newbody = convert(body)(context, typecontext + { (name, u) })
DeclareType(u, newtype, newbody)
}
case ext.Declare(decl @ ext.Datatype(name, typeformals, constructors), body) => {
val d = new BoundTypevar(Some(name))
val variantType = {
val selfVar = new BoundTypevar(Some(name))
val (newTypeFormals, dtypecontext) = convertTypeFormals(typeformals, decl)
val newtypecontext = typecontext ++ dtypecontext + { (name, selfVar) }
val variants =
for (ext.Constructor(name, types) <- constructors) yield {
val newtypes = types map {
case Some(t) => convertType(t)(newtypecontext)
case None => Top()
}
(name, newtypes)
}
VariantType(selfVar, newTypeFormals, variants)
}
val names = constructors map { _.name }
val p = ext.TuplePattern(names map { ext.VariablePattern(_) })
val x = new BoundVar()
val (source, dcontext, target) = convertPattern(p, x)
val newbody = convert(body)(context ++ dcontext, typecontext + { (name, d) })
val makeSites = makeDatatype(d, typeformals.size, constructors, this)
DeclareType(d, variantType, target(newbody) < x < source(makeSites))
}
case ext.Declare(decl, _) => throw (MalformedExpression("Invalid declaration form") at decl)
case ext.TypeAscription(body, t) => HasType(convert(body), convertType(t))
case ext.TypeAssertion(body, t) => HasType(convert(body), AssertedType(convertType(t)))
case ext.Hole => Hole(context, typecontext)
}
}
def convertArgumentGroup(target: Argument, ag: ext.ArgumentGroup)(implicit context: Map[String, Argument], typecontext: Map[String, Type]): Expression = {
ag match {
case ext.Args(typeargs, args) => {
val newtypeargs = typeargs map { _ map convertType }
unfold(args map convert, { Call(target, _, newtypeargs) })
}
case ext.FieldAccess(field) => {
Call(target, List(Constant(Field(field))), None)
}
case ext.Dereference => {
Call(context("?"), List(target), None)
}
}
}
/** Convert a list of extended AST def declarations to:
*
* a list of named OIL definitions
* and
* a mapping from their string names to their new bound names
*/
def convertDefs(defs: List[ext.DefDeclaration])(implicit context: Map[String, Argument], typecontext: Map[String, Type]): (List[Def], Map[String, BoundVar]) = {
var defsMap: Map[String, AggregateDef] = HashMap.empty.withDefaultValue(AggregateDef.empty(this))
for (d <- defs; n = d.name) {
defsMap = defsMap + { (n, defsMap(n) + d) }
}
defsMap.values foreach { _.ClassCheck }
// we generate these names beforehand since defs can be bound recursively in their own bodies
val namesMap: Map[String, BoundVar] = Map.empty ++ (for (name <- defsMap.keys) yield (name, new BoundVar(Some(name))))
val recursiveContext = context ++ namesMap
val newdefs = for ((n, d) <- defsMap) yield {
d.convert(namesMap(n), recursiveContext, typecontext)
}
(newdefs.toList, namesMap)
}
/** Convert an extended AST type to a named OIL type.
*/
def convertType(t: ext.Type)(implicit typecontext: Map[String, Type]): named.Type = {
t -> {
case ext.TypeVariable(name) => typecontext(name)
case ext.TupleType(ts) => TupleType(ts map convertType)
case ext.RecordType(entries) => {
val newEntries = (HashMap.empty ++ entries) mapValues convertType
RecordType(newEntries)
}
case ext.TypeApplication(name, typeactuals) => {
TypeApplication(typecontext(name), typeactuals map convertType)
}
case ext.LambdaType(typeformals, argtypes, returntype) => {
val (newTypeFormals, dtypecontext) = convertTypeFormals(typeformals, t)
val newtypecontext = typecontext ++ dtypecontext
val newArgTypes = argtypes map { convertType(_)(newtypecontext) }
val newReturnType = convertType(returntype)(newtypecontext)
FunctionType(newTypeFormals, newArgTypes, newReturnType)
}
}
}
/** Convert a list of type formal names to:
*
* A list of bound type formal variables
* and
* A context mapping those names to those vars
*/
def convertTypeFormals(typeformals: List[String], ast: orc.ast.AST): (List[BoundTypevar], Map[String, BoundTypevar]) = {
var newTypeFormals: List[BoundTypevar] = Nil
var formalsMap = new HashMap[String, BoundTypevar]()
for (name <- typeformals.reverse) {
if (formalsMap contains name) {
reportProblem(DuplicateTypeFormalException(name) at ast)
} else {
val w = new BoundTypevar(Some(name))
newTypeFormals = w :: newTypeFormals
formalsMap = formalsMap + { (name, w) }
}
}
(newTypeFormals, formalsMap)
}
type Conversion = Expression => Expression
val id: Conversion = { e => e }
/** Convert an extended AST pattern to:
*
* A filtering conversion for the source expression
* and
* A binding conversion for the target expression,
* parameterized on the variable carrying the result
*/
def convertPattern(p: ext.Pattern, bridge: BoundVar)(implicit context: Map[String, Argument], typecontext: Map[String, Type]): (Conversion, Map[String, Argument], Conversion) = {
var bindingMap: mutable.Map[String, BoundVar] = new mutable.HashMap()
def bind(name: String, x: BoundVar) {
if (bindingMap contains name) {
reportProblem(NonlinearPatternException(name) at p)
} else {
bindingMap += { (name, x) }
}
}
def unravel(p: ext.Pattern, focus: BoundVar): (Conversion) = {
p match {
case ext.Wildcard() => {
id
}
case ext.ConstantPattern(c) => {
val b = new BoundVar();
{ callEq(focus, Constant(c)) > b > callIft(b) >> _ }
}
case ext.VariablePattern(name) => {
bind(name, focus)
id
}
case ext.TuplePattern(Nil) => {
unravel(ext.ConstantPattern(Signal), focus)
}
case ext.TuplePattern(List(p)) => {
unravel(p, focus)
}
case ext.TuplePattern(ps) => {
/* Test that the pattern's size matches the focus tuple's size */
val tuplesize = Constant(BigInt(ps.size))
val sizecheck = { callTupleArityChecker(focus, tuplesize) >> _ }
/* Match each element of the tuple against its corresponding pattern */
var elements = id
for ((p, i) <- ps.zipWithIndex) {
val y = new BoundVar()
val bindElement: Conversion = { makeNth(focus, i) > y > _ }
elements = elements compose bindElement compose unravel(p, y)
}
sizecheck compose elements
}
case ext.ListPattern(Nil) => {
{ callIsNil(focus) >> _ }
}
case ext.ListPattern(List(p)) => {
val consp = ext.ConsPattern(p, ext.ListPattern(Nil))
unravel(consp, focus)
}
case ext.ListPattern(ps) => {
val seed: ext.Pattern = ext.ListPattern(Nil)
val folded = (ps foldRight seed)(ext.ConsPattern)
unravel(folded, focus)
}
case ext.ConsPattern(ph, pt) => {
val y = new BoundVar()
val p = ext.TuplePattern(List(ph, pt));
{ callIsCons(focus) > y > _ } compose unravel(p, y)
}
case ext.RecordPattern(elements) => {
val y = new BoundVar()
val (labels, patterns) = elements.unzip
val p = ext.TuplePattern(patterns);
{ callRecordMatcher(focus, labels) > y > _ } compose unravel(p, y)
}
case ext.CallPattern(name, args) => {
val y = new BoundVar()
val p = ext.TuplePattern(args)
val C = context(name);
{ makeUnapply(C, focus) > y > _ } compose unravel(p, y)
}
case ext.AsPattern(p, name) => {
bind(name, focus)
unravel(p, focus)
}
//SL for patterns Ex:x@A = 1
//unravel: turns pattern into an expression
//compose: attaches SL to the expression
//ascribe laces expression into _
case ext.SecurityLevelPattern(p,l) => {
val ascribe: Conversion = { HasSecurityLevel(_, l) }
ascribe compose unravel(p, focus)
}
case ext.TypedPattern(p, t) => {
val T = convertType(t)
val ascribe: Conversion = { HasType(_, T) }
ascribe compose unravel(p, focus)
}
}
}
val sourceVar = new BoundVar()
val filterInto = unravel(p, sourceVar)
bindingMap.values.toList.distinct match {
case Nil => {
/* None of the computed results are needed; the pattern had only guards, and no bindings. */
val sourceConversion: Conversion =
{ _ > sourceVar > filterInto(Constant(Signal)) }
(sourceConversion, HashMap.empty, id)
}
case List(neededResult) => {
/* Only one result is needed */
val sourceConversion: Conversion =
{ _ > sourceVar > filterInto(neededResult) }
val dcontext = HashMap.empty ++ (for ((name, `neededResult`) <- bindingMap) yield { (name, bridge) })
(sourceConversion, dcontext, id)
}
case neededResults => {
/* More than one result is needed */
/* Note: This can only occur in a strict pattern.
* Thus, the source conversion for a non-strict pattern is always the identity function. */
val sourceConversion: Conversion =
{ _ > sourceVar > filterInto(makeLet(neededResults)) }
var dcontext: Map[String, Argument] = HashMap.empty
var targetConversion = id
for ((r, i) <- neededResults.zipWithIndex) {
val y = new BoundVar()
for ((name, `r`) <- bindingMap) {
dcontext = dcontext + { (name, y) }
}
targetConversion = targetConversion compose { makeNth(bridge, i) > y > _ }
}
(sourceConversion, dcontext, targetConversion)
}
}
}
}
| laurenyew/cOrcS | src/orc/compile/translate/Translator.scala | Scala | bsd-3-clause | 17,580 |
package scala.tasty
package reflect
/** TASTy Reflect tree traverser.
*
* Usage:
* ```
* class MyTraverser[R <: scala.tasty.Reflection & Singleton](val reflect: R)
* extends scala.tasty.reflect.TreeTraverser {
* import reflect.{given _, _}
* override def traverseTree(tree: Tree)(using ctx: Context): Unit = ...
* }
* ```
*/
trait TreeTraverser extends TreeAccumulator[Unit] {
import reflect._
def traverseTree(tree: Tree)(using ctx: Context): Unit = traverseTreeChildren(tree)
def foldTree(x: Unit, tree: Tree)(using ctx: Context): Unit = traverseTree(tree)
protected def traverseTreeChildren(tree: Tree)(using ctx: Context): Unit = foldOverTree((), tree)
}
| som-snytt/dotty | library/src/scala/tasty/reflect/TreeTraverser.scala | Scala | apache-2.0 | 699 |
/*******************************************************************************
Copyright (c) 2013-2014, KAIST, S-Core.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.analysis.typing.models.DOMHtml
import scala.collection.mutable.{Map=>MMap, HashMap=>MHashMap}
import kr.ac.kaist.jsaf.analysis.typing.domain._
import kr.ac.kaist.jsaf.analysis.typing.domain.{BoolFalse => F, BoolTrue => T}
import kr.ac.kaist.jsaf.analysis.typing.models._
import org.w3c.dom.Node
import org.w3c.dom.Element
import kr.ac.kaist.jsaf.analysis.cfg.CFG
import kr.ac.kaist.jsaf.analysis.typing.models.AbsConstValue
import kr.ac.kaist.jsaf.analysis.typing.AddressManager._
import kr.ac.kaist.jsaf.Shell
object HTMLLabelElement extends DOM {
private val name = "HTMLLabelElement"
/* predefined locatoins */
val loc_cons = newSystemRecentLoc(name + "Cons")
val loc_proto = newSystemRecentLoc(name + "Proto")
val loc_ins = newSystemRecentLoc(name + "Ins")
/* constructor */
private val prop_cons: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Function")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(ObjProtoLoc), F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue))),
("@hasinstance", AbsConstValue(PropValueNullTop)),
("length", AbsConstValue(PropValue(ObjectValue(Value(AbsNumber.alpha(0)), F, F, F)))),
("prototype", AbsConstValue(PropValue(ObjectValue(Value(loc_proto), F, F, F))))
)
/* instance */
private val prop_ins: List[(String, AbsProperty)] =
HTMLElement.getInsList2() ++ List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Object")))),
("@proto", AbsConstValue(PropValue(ObjectValue(loc_proto, F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue))),
// DOM Level 1
("form", AbsConstValue(PropValue(ObjectValue(Value(HTMLFormElement.loc_ins), F, T, T)))),
("accessKey", AbsConstValue(PropValue(ObjectValue(StrTop, T, T, T)))),
("htmlFor", AbsConstValue(PropValue(ObjectValue(StrTop, T, T, T))))
)
/* prorotype */
private val prop_proto: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Object")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(HTMLElement.loc_proto), F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue)))
)
/* global */
private val prop_global: List[(String, AbsProperty)] = List(
(name, AbsConstValue(PropValue(ObjectValue(loc_cons, T, F, T))))
)
def getInitList(): List[(Loc, List[(String, AbsProperty)])] = if(Shell.params.opt_Dommodel2) List(
(loc_cons, prop_cons), (loc_proto, prop_proto), (GlobalLoc, prop_global), (loc_ins, prop_ins)
) else List(
(loc_cons, prop_cons), (loc_proto, prop_proto), (GlobalLoc, prop_global) )
def getSemanticMap(): Map[String, SemanticFun] = {
Map()
}
def getPreSemanticMap(): Map[String, SemanticFun] = {
Map()
}
def getDefMap(): Map[String, AccessFun] = {
Map()
}
def getUseMap(): Map[String, AccessFun] = {
Map()
}
/* semantics */
// no function
/* instance */
override def getInstance(cfg: CFG): Option[Loc] = Some(newRecentLoc())
/* list of properties in the instance object */
override def getInsList(node: Node): List[(String, PropValue)] = node match {
case e: Element =>
// This object has all properties of the HTMLElement object
HTMLElement.getInsList(node) ++ List(
("@class", PropValue(AbsString.alpha("Object"))),
("@proto", PropValue(ObjectValue(loc_proto, F, F, F))),
("@extensible", PropValue(BoolTrue)),
// DOM Level 1
("accessKey", PropValue(ObjectValue(AbsString.alpha(e.getAttribute("accessKey")), T, T, T))),
("htmlFor", PropValue(ObjectValue(AbsString.alpha(e.getAttribute("htmlFor")), T, T, T))))
// TODO: 'form' in DOM Level 1
case _ => {
System.err.println("* Warning: " + node.getNodeName + " cannot have instance objects.")
List()
}
}
def getInsList(accessKey: PropValue, htmlFor: PropValue, xpath: PropValue): List[(String, PropValue)] = List(
("@class", PropValue(AbsString.alpha("Object"))),
("@proto", PropValue(ObjectValue(loc_proto, F, F, F))),
("@extensible", PropValue(BoolTrue)),
// DOM Level 1
("accessKey", accessKey),
("htmlFor", htmlFor),
("xpath", xpath)
)
override def default_getInsList(): List[(String, PropValue)] = {
val accessKey = PropValue(ObjectValue(AbsString.alpha(""), T, T, T))
val htmlFor = PropValue(ObjectValue(AbsString.alpha(""), T, T, T))
val xpath = PropValue(ObjectValue(AbsString.alpha(""), F, F, F))
// This object has all properties of the HTMLElement object
HTMLElement.default_getInsList :::
getInsList(accessKey, htmlFor, xpath)
}
}
| darkrsw/safe | src/main/scala/kr/ac/kaist/jsaf/analysis/typing/models/DOMHtml/HTMLLabelElement.scala | Scala | bsd-3-clause | 5,061 |
package utils.pageobjects
/**
* Base class of all the PageFactories, used by utils.pageobjects.Page to determine thanks to a page title which Page Object to build.
*/
trait PageFactory {
def buildPageFromUrl(url: String,ctx:PageObjectsContext):Page
}
| Department-for-Work-and-Pensions/ClaimCapture | c3/test/utils/pageobjects/PageFactory.scala | Scala | mit | 256 |
package chapter01
/**
* 1.4 스칼라의 뿌리
*
* 자바, C#, C, C++
* 자바와 가장 큰 차이점은 "타입 변수" 가 아닌 "변수: 타입"을 쓰는 것인데, 이 구조를 쓰는 이유는 타입추론 때문이다.
*
* - uniform object model은 smalltalk : ruby
* - 보편적인 내포 : algol, simula, Beta, gbeta
* - 단일 접근 원칙 : Eiffel
* - 함수형 프로그래밍 접근 방식 : SML, Ocaml, F#
* - high order function : ML, Haskel
* - 액터 기반 동시성 라이브러리 : Erlang
*/
object c01_i04 {
} | seraekim/srkim-lang-scala | src/main/java/chapter01/c01_i04.scala | Scala | bsd-3-clause | 560 |
/* PimpedScalaTable is a wrapper arround scalas swing.table or Javas JTable (haven't decided jet ;) ) that is meant to evercome all shortcomings I find lacking in scalas swing.table
Copyright (C) 2010 Axel Gschaider
This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program; if not, see <http://www.gnu.org/licenses/>.
If you would like to obtain this programm under another license, feel free to contact me. Probably I won't charge you for commercial projects. Just would like to receive a courtesy call.
axel[dot]gschaider[at]gmx[dot]at
or http://github.com/axelGschaider
*/
import at.axelGschaider.pimpedTable._
import scala.swing._
import scala.swing.GridBagPanel.Fill
import event._
import java.awt.Color
import java.util.Comparator
import java.awt.Graphics2D
case class Data(i:Int, s:String)
sealed trait Value extends ColumnValue[Data]
case class IntValue(i:Int, father:Row[Data]) extends Value
case class StringValue(s:String, father:Row[Data]) extends Value
/*case class RowData(data:Data) extends Row[Data] {
override val isExpandable = true
override def expandedData() = {
(1 to 10).toList.map(x => Data(data.i, data.s + x.toString))
}
}*/
/*case class UnexpandableRowData(data:Data) extends RowData(data) {
override val isExpandable = false
override def expandedData() = List.empty[RowData]
}*/
sealed trait MyColumns[+Value] extends ColumnDescription[Data, Value] {
override val isSortable = true
def renderComponent(data:Row[Data], isSelected: Boolean, focused: Boolean):PimpedRenderer = {
val l = new Label(extractValue(data) match {
case StringValue(s,_) => s
case IntValue(i,_) => i.toString
}) {
override def paint(g: Graphics2D) {
super.paint(g)
val c = g.getColor
val height = this.size.getHeight.toInt
val width = this.size.getWidth.toInt
extractValue(data) match {
case StringValue(_,_) => {
g setColor Color.GREEN
/*val xPoints = new java.util.ArrayList[Int]
val yPoints = new java.util.ArrayList[Int]
xPoints add width
yPoints add height
xPoints add width
yPoints add (height - 10)
xPoints add (width - 10)
yPoints add height
g.fillPolygon(xPoints.toArray[Int], yPoints.toArray, 3)*/
g.fillPolygon(Array(width, width, width-10), Array(height, height-10, height), 3)
}
case _ => {}
}
g setColor c
}
}
SetMyBackgroundRenderer(l, (x => {
l.opaque = true
l.background = x
}))
}
}
case class StringColumn(name:String) extends MyColumns[StringValue] {
//override val isSortable = false
def extractValue(x:Row[Data]) = StringValue(x.data.s, x)
override val paintGroupColourWhileExpanding = true
def comparator = Some(new ComparatorRelais[StringValue] {
def compare(o1:StringValue, o2:StringValue):Int = (o1,o2) match {
case (StringValue(s1,_), StringValue(s2,_)) =>
if(s1 < s2) -1
else if (s1 > s2) 1
else 0
}
})
}
case class IntColumn(name:String) extends MyColumns[IntValue] {
def extractValue(x:Row[Data]) = IntValue(x.data.i, x)
override val ignoreWhileExpanding = true
def comparator = Some(new ComparatorRelais[IntValue] {
def compare(o1:IntValue, o2:IntValue):Int = (o1,o2) match {
case (IntValue(i1,_), IntValue(i2,_)) =>
if(i1 < i2) -1
else if (i1 > i2) 1
else 0
}
})
}
object InternalIntColumnComparator extends Comparator[Leaf[Data]] {
def compare(d1:Leaf[Data], d2:Leaf[Data]):Int = (d1.data, d2.data) match {
case(Data(i1,_),Data(i2,_)) if i1 == i2 => 0
case(Data(i1,_),Data(i2,_)) if i1 > i2 => 1
case _ => -1
}
}
object Test extends SimpleSwingApplication {
def top = new MainFrame {
title = "Table Test"
//DO SOME INIT
//MainFleetSummaryDistributer registerClient this
//SET SIZE AND LOCATION
val framewidth = 480
val frameheight = 480
val lt1 = LivingTree(Data(101, "201xxx")
, (0 to 9).toList.map(y =>
Data(y+1, "201xxx"+y.toString))
, Some(InternalIntColumnComparator)
, true
)
val lt2 = LivingTree(Data(102, "202xxx")
, (0 to 9).toList.map(y =>
Data(y+1, "202xxx"+y.toString))
, None
, true
)
val data:List[Row[Data]] = (0 to 100).toList.filter(_%3 == 0).map(x =>
DeadTree(Data(x, (100-x).toString + "xxx"))
) ++ (0 to 100).toList.filter(_%3 == 1).map(x =>
DeadTree(Data(x, (100-x).toString + "xxx"))
) ++ (0 to 100).toList.filter(_%3 == 2).map(x =>
(LivingTree((Data(x, (100-x).toString + "xxx"))
,(0 to 9).toList.map(y =>
Data(x, (100-x).toString + "xxx" + y.toString))
,None
,false))
) ++ List(lt1, lt2)
/*++ (101 to 102).toList.map(x =>
(LivingTree((Data(x, (100+x).toString + "xxx"))
,(0 to 9).toList.map(y =>
Data(y+1, (100+x).toString + "xxx" + y.toString))
,None
,true))
) */
/*val data:List[RowData] = (0 to 100).toList.map(x => RowData(Data(x, (100-x).toString + "xxx")){
isExpandable = true
expandedData = (0 to 10).toList.map(y => RowData(Data(x, (100-x).toString + "xxx"+y.toString)))
}) */
val columns:List[MyColumns[Value]] = List(new IntColumn("some int"), new StringColumn("some string") )
val table = new PimpedTable(data, columns) {
showGrid = true
gridColor = Color.BLACK
selectionBackground = Color.RED
selectionForeground = Color.GREEN
paintExpandColumn = true
}
val screenSize = java.awt.Toolkit.getDefaultToolkit().getScreenSize()
location = new java.awt.Point((screenSize.width - framewidth)/2, (screenSize.height - frameheight)/2)
minimumSize = new java.awt.Dimension(framewidth, frameheight)
val buttonPannel = new GridBagPanel() {
add(new Button(Action("Test") {
//println("Test:")
//table.paintExpandColumn = !table.paintExpandColumn
//table unselectAll// data(0).data*/
lt2.expanded = !lt2.expanded
table.refresh
/*if(table.isFiltered) table.unfilter
else table filter (_ match {
case Data(i, _) => i%2 == 0
})*/
//table.data = (0 to 101).toList.map(x => RowData(Data(x, (100-x).toString + "xxx")))
})
, new Constraints() {
grid = (0,0)
gridheight = 1
gridwidth = 1
weightx = 1
weighty = 1
fill = Fill.Both
})
}
val tablePane = new ScrollPane(table) {
horizontalScrollBarPolicy = scala.swing.ScrollPane.BarPolicy.AsNeeded
verticalScrollBarPolicy = scala.swing.ScrollPane.BarPolicy.AsNeeded
viewportView = table
}
contents = new SplitPane(Orientation.Vertical, buttonPannel, tablePane)
listenTo(table/*.selection*/)
reactions += {
case PimpedTableSelectionEvent(_) => {
//println("\\nCLICK.")
//println(" Adjusting: " + adjusting)
//println("range: " + range + "\\n")
/*table.selectedData.foreach(_ match {
case Data(i, s) => println("i:"+i+" s:"+s)
})*/
}
//case _ => println("da ist was faul im Staate Denver")
}
}
}
| axelGschaider/PimpedScalaTable | PimpedTableTest.scala | Scala | gpl-3.0 | 8,194 |
object Foo {
inline def (self: Int).foo(that: Int): Int = 5
def (self: Int).bar: Int = self
1.foo(2).bar
} | som-snytt/dotty | tests/pos/i6395.scala | Scala | apache-2.0 | 112 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.optim
import com.intel.analytics.bigdl._
import com.intel.analytics.bigdl.dllib.feature.dataset.segmentation.{MaskUtils, RLEMasks}
import com.intel.analytics.bigdl.dllib.nn.ClassNLLCriterion
import com.intel.analytics.bigdl.dllib.nn.AbsCriterion
import com.intel.analytics.bigdl.dllib.nn.abstractnn.Activity
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.dllib.feature.transform.vision.image.RoiImageInfo
import com.intel.analytics.bigdl.dllib.feature.transform.vision.image.label.roi.RoiLabel
import com.intel.analytics.bigdl.dllib.utils.Table
import org.apache.commons.lang3.SerializationUtils
import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag
/**
* A method defined to evaluate the model.
* This trait can be extended by user-defined method. Such
* as Top1Accuracy
*/
trait ValidationMethod[T] extends Serializable {
def apply(output: Activity, target: Activity): ValidationResult
// return the name of this method
protected def format(): String
// return the name of this method
override def toString(): String = format()
// deep clone the object
override def clone(): ValidationMethod[T] = SerializationUtils.clone(this)
}
/**
* A result that calculate the numeric value of a validation method.
* User-defined valuation results must override the + operation and result() method.
* It is executed over the samples in each batch.
*/
trait ValidationResult extends Serializable {
// return the calculation results over all the samples in the batch
def result(): (Float, Int) // (Result, TotalNum)
// scalastyle:off methodName
def +(other: ValidationResult): ValidationResult
// return the name of this trait
protected def format(): String
// return the name of this trait
override def toString(): String = format()
}
/**
* Represent an accuracy result. Accuracy means a ratio of correct number and total number.
* @param correct correct number
* @param count total count number
*/
class AccuracyResult(private var correct: Int, private var count: Int)
extends ValidationResult {
override def result(): (Float, Int) = (correct.toFloat/count, count)
// scalastyle:off methodName
override def +(other: ValidationResult): ValidationResult = {
val otherResult = other.asInstanceOf[AccuracyResult]
this.correct += otherResult.correct
this.count += otherResult.count
this
}
// scalastyle:on methodName
override protected def format(): String = {
s"Accuracy(correct: $correct, count: $count, accuracy: ${correct.toDouble / count})"
}
override def equals(obj: Any): Boolean = {
if (obj == null) {
return false
}
if (!obj.isInstanceOf[AccuracyResult]) {
return false
}
val other = obj.asInstanceOf[AccuracyResult]
if (this.eq(other)) {
return true
}
this.correct == other.correct && this.count == other.count
}
override def hashCode(): Int = {
val seed = 37
var hash = 1
hash = hash * seed + this.correct
hash = hash * seed + this.count
hash
}
}
/**
* This is a metric to measure the accuracy of Tree Neural Network/Recursive Neural Network
*
*/
class TreeNNAccuracy[T: ClassTag]()(
implicit ev: TensorNumeric[T])
extends ValidationMethod[T] {
override def apply(output: Activity, target: Activity):
ValidationResult = {
var correct = 0
var count = 0
var _output = output.asInstanceOf[Tensor[T]]
val _target = target.asInstanceOf[Tensor[T]].select(2, 1)
if (_output.dim() == 3) {
_output = _output.select(2, 1)
(if (_output.size(2) == 1) {
_output.clone().apply1(x => if (ev.isGreater(ev.fromType(0.5), x)) ev.zero else ev.one)
} else {
_output.max(2)._2.squeeze()
}).map(_target, (a, b) => {
if (a == b) {
correct += 1
}
a
})
count += _output.size(1)
} else if (_output.dim == 2) {
_output = _output.select(1, 1)
require(_target.size(1) == 1)
(if (_output.size(1) == 1) {
_output.clone().apply1(x => if (ev.isGreater(ev.fromType(0.5), x)) ev.zero else ev.one)
} else {
_output.max(1)._2.squeeze()
}).map(_target, (a, b) => {
if (a == b) {
correct += 1
}
a
})
count += 1
} else {
throw new IllegalArgumentException
}
new AccuracyResult(correct, count)
}
override def format(): String =
s"TreeNNAccuracy()"
}
/**
* Caculate the percentage that output's max probability index equals target
*/
class Top1Accuracy[T: ClassTag](
implicit ev: TensorNumeric[T])
extends ValidationMethod[T] {
override def apply(output: Activity, target: Activity):
ValidationResult = {
var correct = 0
var count = 0
val _target = target.asInstanceOf[Tensor[T]]
val _output = if (output.toTensor[T].nDimension() != 1 &&
output.toTensor[T].size().head != _target.size().head) {
output.toTensor[T].narrow(1, 1, _target.size().head)
} else {
output.toTensor[T]
}
if (_output.dim() == 2) {
(if (_output.size(2) == 1) {
_output.clone().apply1(x => if (ev.isGreater(ev.fromType(0.5), x)) ev.zero else ev.one)
} else {
_output.max(2)._2.squeeze()
}).map(_target, (a, b) => {
if (a == b) {
correct += 1
}
a
})
count += _output.size(1)
} else if (_output.dim == 1) {
require(_target.size(1) == 1)
(if (_output.size(1) == 1) {
_output.clone().apply1(x => if (ev.isGreater(ev.fromType(0.5), x)) ev.zero else ev.one)
} else {
_output.max(1)._2
}).map(_target, (a, b) => {
if (a == b) {
correct += 1
}
a
})
count += 1
} else {
throw new IllegalArgumentException
}
new AccuracyResult(correct, count)
}
override def format(): String = "Top1Accuracy"
}
/**
* Calculate the Mean Average Precision (MAP). The algorithm follows VOC Challenge after 2007
* Require class label beginning with 0
* @param k Take top-k confident predictions into account. If k=-1, calculate on all predictions
* @param classes The number of classes
*/
class MeanAveragePrecision[T: ClassTag](k: Int, classes: Int)(
implicit ev: TensorNumeric[T]) extends ValidationMethod[T] {
require(classes > 0 && classes <= classes, s"The number of classes should be "
+ s"> 0 and <= $classes, but got $classes")
require(k > 0, s"k should be > 0, but got $k")
override def apply(output: Activity, target: Activity): ValidationResult = {
var _target = target.asInstanceOf[Tensor[T]].squeezeNewTensor()
val outTensor = output.toTensor[T]
val _output = if (outTensor.nDimension() != 1 &&
outTensor.size(1) != _target.size(1)) {
outTensor.narrow(1, 1, _target.size().head)
} else {
outTensor
}
require(_output.dim()==1 && _target.nElement() == 1 ||
_output.size(1) == _target.nElement(), "The number of samples in the output should " +
"be the same as in the target")
val posCnt = new Array[Int](classes)
for (i <- 1 to _target.nElement()) {
val clazz = ev.toType[Float](_target.valueAt(i))
require(clazz == math.ceil(clazz), s"The class for $i-th test sample should be an integer, "
+ s"got $clazz")
val intClazz = clazz.toInt
require(intClazz >= 0 && intClazz < classes, s"The class for $i-th test sample should be "
+ s">= 0 and < $classes, but got $intClazz")
posCnt(intClazz) += 1
}
val confidenceArr = (0 until classes).map(_ => new ArrayBuffer[(Float, Boolean)]).toArray
if (_output.nDimension() == 2) {
(1 to _output.size(1)).foreach(i => {
val row = _output.select(1, i)
val gtClz = ev.toType[Float](_target.valueAt(i))
for(clz <- 0 until classes) {
confidenceArr(clz) += ((ev.toType[Float](row.valueAt(clz + 1)), gtClz == clz))
}
})
} else {
require(_output.dim() == 1, "The output should have 1 or 2 dimensions")
val row = _output
val gtClz = ev.toType[Float](_target.valueAt(1))
for(clz <- 0 until classes) {
confidenceArr(clz) += ((ev.toType[Float](row.valueAt(clz + 1)), gtClz == clz))
}
}
new MAPValidationResult(classes, k, confidenceArr, posCnt)
}
override def format(): String = s"MAP@$k"
}
object MAPUtil {
// find top k values & indices in a column of a matrix
def findTopK(k: Int, arr: Array[Array[Float]], column: Int): Array[(Int, Float)] = {
val q = collection.mutable.PriorityQueue[(Int, Float)]()(Ordering.by[(Int, Float), Float](_._2))
arr.indices.foreach(i => {
q.enqueue((i, arr(i)(column)))
})
val end = Math.min(k, q.size)
(1 to end).map(_ => q.dequeue()).toArray
}
/**
* convert the ground truth into parsed GroundTruthRegions
* @param gtTable
* @param classes
* @param isCOCO if using COCO's algorithm for IOU computation
* @param isSegmentation
* @return (array of GT BBoxes of images, # of GT bboxes for each class)
*/
def gtTablesToGroundTruthRegions(gtTable: Table, classes: Int, numIOU: Int, isCOCO: Boolean,
isSegmentation: Boolean): (Array[ArrayBuffer[GroundTruthRegion]], Array[Int]) = {
// the number of GT bboxes for each class
val gtCntByClass = new Array[Int](classes)
// one image may contain multiple Ground truth bboxes
val gtImages = (1 to gtTable.length()).map { i =>
val gtImage = new ArrayBuffer[GroundTruthRegion]()
val roiLabel = gtTable[Table](i)
if (roiLabel.length() > 0) {
val bbox = RoiImageInfo.getBBoxes(roiLabel)
val tclasses = RoiImageInfo.getClasses(roiLabel)
val isCrowd = RoiImageInfo.getIsCrowd(roiLabel)
val masks = if (isSegmentation) RoiImageInfo.getMasks(roiLabel) else null
val bboxCnt = bbox.size(1)
require(bboxCnt == tclasses.size(1), "CLASSES of target tables should have the" +
"same size of the bbox counts")
require(bboxCnt == isCrowd.nElement(), "ISCROWD of target tables should have the" +
"same size of the bbox counts")
require(masks == null || bboxCnt == masks.length, "MASKS of target tables should have the" +
"same size of the bbox counts")
for (j <- 1 to bboxCnt) {
val (label, _diff) = if (tclasses.dim() == 2) {
(tclasses.valueAt(1, j).toInt, tclasses.valueAt(2, j))
} else {
(tclasses.valueAt(j).toInt, 0f)
}
val diff = if (isCrowd.valueAt(j) != 0 || _diff != 0) 1f else 0f
val newGt = if (isSegmentation) {
new GroundTruthRLE(numIOU, label, diff, masks(j - 1))
} else {
new GroundTruthBBox(isCOCO, numIOU, label, diff, bbox.valueAt(j, 1),
bbox.valueAt(j, 2), bbox.valueAt(j, 3), bbox.valueAt(j, 4))
}
gtImage += newGt
require(label >= 0 && label < classes, s"Bad label id $label")
if (diff == 0) {
gtCntByClass(label) += 1
}
}
}
gtImage
}.toArray
(gtImages, gtCntByClass)
}
/**
* For a detection, match it with all GT boxes. Record the match in "predictByClass"
*/
def parseDetection(gtBbox: ArrayBuffer[GroundTruthRegion], label: Int, score: Float, x1: Float,
y1: Float, x2: Float, y2: Float, mask: RLEMasks, classes: Int, iou: Array[Float],
predictByClasses: Array[Array[ArrayBuffer[(Float, Boolean)]]]): Unit = {
require(label >= 0 && label < classes, s"Bad label id $label")
for (i <- iou.indices) {
// for each GT boxes, try to find a matched one with current prediction
val matchedGt = gtBbox.toIterator.filter(gt => label == gt.label && gt.canOccupy(i))
.flatMap(gt => { // calculate and filter out the bbox
val iouRate = gt.getIOURate(x1, y1, x2, y2, mask)
if (iouRate >= iou(i)) Iterator.single((gt, iouRate)) else Iterator.empty
})
.reduceOption((gtArea1, gtArea2) => { // find max IOU bbox
if (gtArea1._1.diff != gtArea2._1.diff) {
if (gtArea1._1.diff > gtArea2._1.diff) gtArea2 else gtArea1
} else {
if (gtArea1._2 > gtArea2._2) gtArea1 else gtArea2
}
})
.map(bbox => { // occupy the bbox
bbox._1.occupy(i)
bbox._1
})
if (matchedGt.isEmpty || matchedGt.get.diff == 0) {
predictByClasses(i)(label).append((score, matchedGt.isDefined))
}
// else: when the prediction matches a "difficult" GT, do nothing
// it is neither TP nor FP
// "difficult" is defined in PASCAL VOC dataset, meaning the image is difficult to detect
}
}
def parseSegmentationTensorResult(outTensor: Tensor[Float],
func: (Int, Int, Float, Float, Float, Float, Float) => Unit): Unit = {
require(outTensor.dim() == 2, "the output tensor should have 2 dimensions")
for (imgId <- 0 until outTensor.size(1)) {
// for each image
val batch = outTensor.select(1, imgId + 1)
val batchSize = batch.valueAt(1).toInt
var offset = 2
for (bboxIdx <- 0 until batchSize) {
// for each predicted bboxes
val label = batch.valueAt(offset).toInt
val score = batch.valueAt(offset + 1)
val x1 = batch.valueAt(offset + 2)
val y1 = batch.valueAt(offset + 3)
val x2 = batch.valueAt(offset + 4)
val y2 = batch.valueAt(offset + 5)
func(imgId, label, score, x1, y1, x2, y2)
offset += 6
}
}
}
}
class MAPType extends Serializable
object MAPPascalVoc2007 extends MAPType
object MAPPascalVoc2010 extends MAPType
object MAPCOCO extends MAPType
/**
* The MAP Validation Result. The results are not calculated until result() or format() is called
* require class label beginning with 0
*/
class MAPValidationResult(
private val nClass: Int,
// take the first k samples, or -1 for all samples
private val k: Int,
// the predicts for each classes. (Confidence, GT)
private[bigdl] var predictForClass: Array[ArrayBuffer[(Float, Boolean)]],
private[bigdl] var gtCntForClass: Array[Int],
private val theType: MAPType = MAPPascalVoc2010,
private val skipClass: Int = -1,
private val isSegmentation: Boolean = false
)
extends ValidationResult {
if (skipClass < 0) {
require(skipClass == -1, s"Invalid skipClass $skipClass")
} else {
require(skipClass >= 0 && skipClass < nClass, s"Invalid skipClass $skipClass")
}
private def sortPredictions(p: ArrayBuffer[(Float, Boolean)]): ArrayBuffer[(Float, Boolean)] = {
p.sortBy(v => v._1)(Ordering.Float.reverse) // decending order
}
private[bigdl] def calculateClassAP(clz: Int): Float = {
val posCnt = gtCntForClass
// for each class, first find top k confident samples
val sorted = sortPredictions(predictForClass(clz))
var tp = 0
val refinedK = if (k > 0) k else sorted.size
// calculate the max precision for each different recall
// for each top-j items, calculate the (precision, recall)
val PnR = sorted.take(refinedK).zipWithIndex.flatMap { case (predict, j) =>
if (predict._2) {
// if it is a hit
tp += 1
// j + 1 is the total number of samples marked positive by the model
val precision = tp.toFloat / (j + 1)
val recall = tp.toFloat / posCnt(clz)
Iterator.single(recall, precision)
} else {
Iterator.empty
}
}
// get Average precision over each different recall
theType match {
case _: MAPPascalVoc2007.type =>
(0 to 10).map(r => {
val recall = 0.1f * r
// for every (R,P), where R>=recall, get max(P)
PnR.filter(_._1 >= recall).map(_._2).reduceOption(_ max _).getOrElse(0f)
})
.reduceOption(_ + _)
.map(_ / 11)
.getOrElse(0f)
case _: MAPPascalVoc2010.type =>
(1 to posCnt(clz)).map(r => {
val recall = r.toFloat / posCnt(clz)
// for every (R,P), where R>=recall, get max(P)
PnR.filter(_._1 >= recall).map(_._2).reduceOption(_ max _).getOrElse(0f)
})
.reduceOption(_ + _)
.map(_ / posCnt(clz))
.getOrElse(0f)
case _: MAPCOCO.type =>
if (posCnt(clz) == 0) {
-1f
} else {
(0 to 100).map(r => {
val recall = 0.01f * r
// for every (R,P), where R>=recall, get max(P)
PnR.filter(_._1 >= recall).map(_._2).reduceOption(_ max _).getOrElse(0f)
})
.reduceOption(_ + _)
.map(_ / 101)
.getOrElse(0f)
}
}
}
override def result(): (Float, Int) = {
// get the indices of top-k confident samples
val AP = (0 until nClass).filter(_ != skipClass).map { clz => calculateClassAP(clz) }
// APs are got. Now we get MAP
val result = theType match {
case t: MAPCOCO.type =>
val filtered = AP.filter(_ != -1f)
filtered.sum / filtered.length
case _ => AP.sum / (nClass - (if (skipClass == -1) 0 else 1))
}
(result, 1)
}
private[optim] def mergeWithoutGtCnt(o: MAPValidationResult): MAPValidationResult = {
require(predictForClass.length == o.predictForClass.length)
require(gtCntForClass.length == o.gtCntForClass.length)
for (i <- predictForClass.indices) {
val (left, right) = (predictForClass(i), o.predictForClass(i))
left ++= right
predictForClass(i) = if (k < 0) {
left
} else {
val sorted = sortPredictions(left)
sorted.take(k)
}
}
this
}
// scalastyle:off methodName
override def +(other: ValidationResult): ValidationResult = {
val o = other.asInstanceOf[MAPValidationResult]
mergeWithoutGtCnt(o)
gtCntForClass.indices.foreach( i => gtCntForClass(i) += o.gtCntForClass(i))
this
}
// scalastyle:on methodName
override protected def format(): String = {
val segOrBbox = if (isSegmentation) "segm" else "bbox"
val resultStr = (0 until nClass).map { clz => calculateClassAP(clz) }.zipWithIndex
.map { t => s"AP of class ${t._2} = ${t._1}\\n"}.reduceOption( _ + _).getOrElse("")
s"MeanAveragePrecision_$segOrBbox@$k(${result()._1})\\n $resultStr"
}
}
abstract private[bigdl] class GroundTruthRegion(isCOCO: Boolean, numIOU: Int, val label: Int,
val diff: Float) {
// if is false, the region is not matched with any predictions
// indexed by the IOU threshold index
private val isOccupied = new Array[Boolean](numIOU)
/**
* Returns if any previous prediction is matched with the current region
*
* @return
*/
def canOccupy(iouIdx: Int): Boolean = (isCOCO && diff == 1) || !isOccupied(iouIdx)
def occupy(iouIdx: Int): Unit = {
isOccupied(iouIdx) = true
}
/** get the IOU rate of another region with the current region
*
* @param x1 the min x
* @param y1 the min y
* @param x2 the max x
* @param y2 the max y
* @param rle RLE mask data, can be null
* @return
*/
def getIOURate(x1: Float, y1: Float, x2: Float, y2: Float, rle: RLEMasks = null): Float
}
private[bigdl] class GroundTruthBBox(isCOCO: Boolean, numIOU: Int, label: Int, diff: Float,
val xmin: Float, val ymin: Float, val xmax: Float, val ymax: Float)
extends GroundTruthRegion(isCOCO, numIOU, label, diff) {
private val area = (xmax - xmin + 1) * (ymax - ymin + 1)
override def getIOURate(x1: Float, y1: Float, x2: Float, y2: Float,
rle: RLEMasks = null): Float = {
val ixmin = Math.max(xmin, x1)
val iymin = Math.max(ymin, y1)
val ixmax = Math.min(xmax, x2)
val iymax = Math.min(ymax, y2)
val inter = Math.max(ixmax - ixmin + 1, 0) * Math.max(iymax - iymin + 1, 0)
val detectionArea = (x2 - x1 + 1) * (y2 - y1 + 1)
val union = if (isCOCO && diff != 0) detectionArea else (detectionArea + area - inter)
inter / union
}
}
private[bigdl] class GroundTruthRLE(numIOU: Int, label: Int, diff: Float, rle: RLEMasks)
extends GroundTruthRegion(true, numIOU, label, diff) {
override def getIOURate(x1: Float, y1: Float, x2: Float, y2: Float,
detRLE: RLEMasks): Float = {
MaskUtils.rleIOU(detRLE, rle, diff != 0)
}
}
class MAPMultiIOUValidationResult(
private val nClass: Int,
// take the first k samples, or -1 for all samples
private val k: Int,
// the predicts for each classes.
// predictForClassIOU(iouIdx)(cls) is an array of (Confidence, GT)
private val predictForClassIOU: Array[Array[ArrayBuffer[(Float, Boolean)]]],
private var gtCntForClass: Array[Int],
private val iouRange: (Float, Float),
private val theType: MAPType = MAPPascalVoc2010,
private val skipClass: Int = -1,
private val isSegmentation: Boolean = false) extends ValidationResult {
val impl = predictForClassIOU.map(predictForClass => {
new MAPValidationResult(nClass, k, predictForClass,
gtCntForClass, theType, skipClass, isSegmentation)
})
override def result(): (Float, Int) = (impl.map(_.result()._1).sum / impl.length, 1)
// scalastyle:off methodName
override def +(other: ValidationResult): ValidationResult = {
val o = other.asInstanceOf[MAPMultiIOUValidationResult]
require(o.predictForClassIOU.length == predictForClassIOU.length,
"To merge MAPMultiIOUValidationResult, the length of predictForClassIOU should be" +
"the same")
impl.zip(o.impl).foreach { case (v1, v2) => v1.mergeWithoutGtCnt(v2) }
gtCntForClass.indices.foreach( i => gtCntForClass(i) += o.gtCntForClass(i))
this
}
// scalastyle:on methodName
override protected def format(): String = {
val step = (iouRange._2 - iouRange._1) / (predictForClassIOU.length - 1)
val results = impl.map(_.result()._1)
val resultStr = results.zipWithIndex
.map { t => s"\\t IOU(${iouRange._1 + t._2 * step}) = ${t._1}\\n"}
.reduceOption( _ + _).getOrElse("")
val segOrBbox = if (isSegmentation) "segm" else "bbox"
f"MAP_$segOrBbox@IOU(${iouRange._1}%1.3f:$step%1.3f:${iouRange._2}%1.3f)=" +
s"${results.sum / impl.length}\\n$resultStr"
}
}
/** MeanAveragePrecision for Object Detection
* The class label begins with 0
*
* The expected output from the last layer should be a Tensor[Float] or a Table
* If output is a tensor, it should be [num_of_batch X (1 + maxDetection * 6)] matrix
* The format of the matrix should be [<batch>, <batch>, ...], where each row vector is
* <batch> = [<size_of_batch>, <sample>,...]. Each sample has format:
* <sample> = <label, score, bbox x4>
* imgId is the batch number of the sample. imgId begins with 0.
* Multiple samples may share one imgId
*
* If output is a table, it is a table of tables.
* output(i) is the results of the i-th image in the batch, where i = 1 to sizeof(batch)
* output(i) is a table, which contains the same keys (fields) of image info in the "target"
* Please refer to RoiMiniBatch/RoiImageInfo's documents. Besides, the inner tables also contain
* the scores for the detections in the image.
*
* The "target" (Ground truth) is a table with the same structure of "output", except that
* it does not have "score" field
*
* @param classes the number of classes
* @param topK only take topK confident predictions (-1 for all predictions)
* @param iouThres the IOU thresholds
* @param theType the type of MAP algorithm. (voc2007/voc2010/COCO)
* @param skipClass skip calculating on a specific class (e.g. background)
* the class index starts from 0, or is -1 if no skipping
* @param isSegmentation if check the IOU of segmentations instead of bounding boxes. If true,
* the output and target must have "masks" data
*/
class MeanAveragePrecisionObjectDetection[T: ClassTag](
classes: Int, topK: Int = -1, iouThres: Array[Float] = Array(0.5f),
theType: MAPType = MAPPascalVoc2010, skipClass: Int = -1, isSegmentation: Boolean = false)(
implicit ev: TensorNumeric[T]) extends ValidationMethod[T] {
override def apply(output: Activity, target: Activity): ValidationResult = {
// one image may contain multiple Ground truth bboxes
val (gtImages, gtCntByClass) =
MAPUtil.gtTablesToGroundTruthRegions(target.toTable, classes, iouThres.length,
theType.isInstanceOf[MAPCOCO.type], isSegmentation)
// the predicted bboxes for each classes
// predictByClasses(iouIdx)(classIdx)(bboxNum) is (Confidence, GT)
val predictByClasses = iouThres.map(_iou => {
(0 until classes).map(_ => new ArrayBuffer[(Float, Boolean)]).toArray
})
output match {
case _outTensor: Tensor[_] =>
require(!isSegmentation, "Cannot get segmentation data from tensor output for MAP")
val outTensor = _outTensor.asInstanceOf[Tensor[Float]]
MAPUtil.parseSegmentationTensorResult(outTensor,
(imgIdx, label, score, x1, y1, x2, y2) => {
val gtBbox = gtImages(imgIdx)
MAPUtil.parseDetection(gtBbox, label, score, x1, y1, x2, y2, null, classes, iouThres,
predictByClasses = predictByClasses)
})
case outTable: Table =>
require(gtImages.length == outTable.length(), "The number of images in the output and " +
"in the target should be the same")
for (imgId <- 1 to outTable.length()) {
val gtBbox = gtImages(imgId - 1)
val imgOut = outTable[Table](imgId)
// if the image contains empty predictions, do nothing
if (imgOut.length() > 0) {
val bboxes = RoiImageInfo.getBBoxes(imgOut)
val scores = RoiImageInfo.getScores(imgOut)
val labels = RoiImageInfo.getClasses(imgOut)
require(bboxes.dim() == 2, "the bbox tensor should have 2 dimensions")
val masks = if (isSegmentation) Some(RoiImageInfo.getMasks(imgOut)) else None
val batchSize = bboxes.size(1)
require(batchSize == labels.size(1), "CLASSES of target tables should have the" +
"same size of the bbox counts")
require(batchSize == scores.nElement(), "ISCROWD of target tables should have the" +
"same size of the bbox counts")
require(masks.isEmpty || batchSize == masks.get.length, "MASKS of target tables " +
"should have the same size of the bbox counts")
val detections = new ArrayBuffer[(Int, Float, Float, Float, Float,
Float, RLEMasks)]()
for (bboxIdx <- 1 to batchSize) {
val score = scores.valueAt(bboxIdx)
val x1 = bboxes.valueAt(bboxIdx, 1)
val y1 = bboxes.valueAt(bboxIdx, 2)
val x2 = bboxes.valueAt(bboxIdx, 3)
val y2 = bboxes.valueAt(bboxIdx, 4)
val label = labels.valueAt(bboxIdx).toInt
val mask = masks.map(_ (bboxIdx - 1)).orNull
detections.append((label, score, x1, y1, x2, y2, mask))
}
detections.sortBy(v => v._2)(Ordering.Float.reverse).foreach {
case (label, score, x1, y1, x2, y2, mask) =>
MAPUtil.parseDetection(gtBbox, label, score, x1, y1, x2, y2, mask, classes,
iouThres, predictByClasses)
}
}
}
}
if (iouThres.length != 1) {
new MAPMultiIOUValidationResult(classes, topK, predictByClasses, gtCntByClass,
(iouThres.head, iouThres.last), theType, skipClass, isSegmentation)
} else {
new MAPValidationResult(classes, topK, predictByClasses.head, gtCntByClass, theType,
skipClass, isSegmentation)
}
}
override protected def format(): String = s"MAPObjectDetection"
}
object MeanAveragePrecision {
/**
* Create MeanAveragePrecision validation method using COCO's algorithm for object detection.
* IOU computed by the segmentation masks
*
* @param nClasses the number of classes (including skipped class)
* @param topK only take topK confident predictions (-1 for all predictions)
* @param skipClass skip calculating on a specific class (e.g. background)
* the class index starts from 0, or is -1 if no skipping
* @param iouThres the IOU thresholds, (rangeStart, stepSize, numOfThres), inclusive
* @return MeanAveragePrecisionObjectDetection
*/
def cocoSegmentation(nClasses: Int, topK: Int = -1, skipClass: Int = 0,
iouThres: (Float, Float, Int) = (0.5f, 0.05f, 10))
: MeanAveragePrecisionObjectDetection[Float] = {
createCOCOMAP(nClasses, topK, skipClass, iouThres, true)
}
/**
* Create MeanAveragePrecision validation method using COCO's algorithm for object detection.
* IOU computed by the bounding boxes
*
* @param nClasses the number of classes (including skipped class)
* @param topK only take topK confident predictions (-1 for all predictions)
* @param skipClass skip calculating on a specific class (e.g. background)
* the class index starts from 0, or is -1 if no skipping
* @param iouThres the IOU thresholds, (rangeStart, stepSize, numOfThres), inclusive
* @return MeanAveragePrecisionObjectDetection
*/
def cocoBBox(nClasses: Int, topK: Int = -1, skipClass: Int = 0,
iouThres: (Float, Float, Int) = (0.5f, 0.05f, 10))
: MeanAveragePrecisionObjectDetection[Float] = {
createCOCOMAP(nClasses, topK, skipClass, iouThres, false)
}
/**
* Calculate the Mean Average Precision (MAP) for classification output and target
* The algorithm follows VOC Challenge after 2007
* Require class label beginning with 0
*
* @param nClasses The number of classes
* @param topK Take top-k confident predictions into account. If k=-1,calculate on all predictions
*/
def classification(nClasses: Int, topK: Int = -1)
: MeanAveragePrecision[Float] = new MeanAveragePrecision[Float](topK, nClasses)
private def createCOCOMAP(nClasses: Int, topK: Int, skipClass: Int,
iouThres: (Float, Float, Int), isSegmentation: Boolean)
: MeanAveragePrecisionObjectDetection[Float] = {
new MeanAveragePrecisionObjectDetection[Float](nClasses, topK,
(0 until iouThres._3).map(iouThres._1 + _ * iouThres._2).toArray,
MAPCOCO, skipClass, isSegmentation)
}
/**
* Create MeanAveragePrecision validation method using Pascal VOC's algorithm for object detection
*
* @param nClasses the number of classes
* @param useVoc2007 if using the algorithm in Voc2007 (11 points). Otherwise, use Voc2010
* @param topK only take topK confident predictions (-1 for all predictions)
* @param skipClass skip calculating on a specific class (e.g. background)
* the class index starts from 0, or is -1 if no skipping
* @return MeanAveragePrecisionObjectDetection
*/
def pascalVOC(nClasses: Int, useVoc2007: Boolean = false, topK: Int = -1,
skipClass: Int = 0) : MeanAveragePrecisionObjectDetection[Float] = {
new MeanAveragePrecisionObjectDetection[Float](nClasses, topK,
theType = if (useVoc2007) MAPPascalVoc2007 else MAPPascalVoc2010,
skipClass = skipClass)
}
}
/**
* Calculate the percentage that target in output's top5 probability indexes
*/
class Top5Accuracy[T: ClassTag](
implicit ev: TensorNumeric[T]) extends ValidationMethod[T] {
override def apply(output: Activity, target: Activity):
AccuracyResult = {
var _target = target.asInstanceOf[Tensor[T]].squeezeNewTensor()
val _output = if (output.toTensor[T].nDimension() != 1 &&
output.toTensor[T].size(1) != _target.size(1)) {
output.toTensor[T].narrow(1, 1, _target.size().head)
} else {
output.toTensor[T]
}
var correct = 0
var count = 0
if (_output.dim() == 2) {
val indices = _output.topk(5, 2, false)._2
var i = 1
while (i <= _output.size(1)) {
if (indices.valueAt(i, 1) == _target.valueAt(i)
|| indices.valueAt(i, 2) == _target.valueAt(i)
|| indices.valueAt(i, 3) == _target.valueAt(i)
|| indices.valueAt(i, 4) == _target.valueAt(i)
|| indices.valueAt(i, 5) == _target.valueAt(i)) {
correct += 1
}
i += 1
}
count += _output.size(1)
} else if (_output.dim == 1) {
require(_target.size(1) == 1)
val indices = _output.topk(5, 1, false)._2
if (indices.valueAt(1) == _target.valueAt(1) || indices.valueAt(2) == _target.valueAt(1)
|| indices.valueAt(3) == _target.valueAt(1) || indices.valueAt(4) == _target.valueAt(1)
|| indices.valueAt(5) == _target.valueAt(1)) {
correct += 1
}
count += 1
} else {
throw new IllegalArgumentException
}
new AccuracyResult(correct, count)
}
override def format(): String = "Top5Accuracy"
}
/**
* Hit Ratio(HR).
* HR intuitively measures whether the test item is present on the top-k list.
*
* @param k top k.
* @param negNum number of negative items.
*/
class HitRatio[T: ClassTag](k: Int = 10, negNum: Int = 100)(
implicit ev: TensorNumeric[T])
extends ValidationMethod[T] {
/**
* Output and target should belong to the same user.
* And have (negNum + 1) elements.
* Target should have only one positive label, means one element is 1, others
* are all 0.
* A couple of output and target will be count as one record.
*/
override def apply(output: Activity, target: Activity): ValidationResult = {
require(output.toTensor[T].nElement() == negNum + 1,
s"negNum is $negNum, output's nElement should be ${negNum}, but got" +
s" ${output.toTensor[T].nElement()}")
require(target.toTensor[T].nElement() == negNum + 1,
s"negNum is $negNum, target's nElement should be ${negNum}, but got" +
s" ${output.toTensor[T].nElement()}")
val o = output.toTensor[T].resize(1 + negNum)
val t = target.toTensor[T].resize(1 + negNum)
var positiveItem = 0
var positiveCount = 0
var i = 1
while(i <= t.nElement()) {
if (t.valueAt(i) == 1) {
positiveItem = i
positiveCount += 1
}
i += 1
}
require(positiveItem != 0, s"${format()}: no positive item.")
require(positiveCount == 1, s"${format()}: too many positive items, excepted 1," +
s" but got $positiveCount")
val hr = calHitRate(positiveItem, o, k)
new ContiguousResult(hr, 1, s"HitRatio@$k")
}
// compute hit rate
private def calHitRate(index: Int, o: Tensor[T], k: Int): Float = {
var topK = 1
var i = 1
val precision = ev.toType[Float](o.valueAt(index))
while (i < o.nElement() && topK <= k) {
if (ev.toType[Float](o.valueAt(i)) > precision) {
topK += 1
}
i += 1
}
if(topK <= k) {
1
} else {
0
}
}
override def format(): String = "HitRate@10"
}
/**
* Normalized Discounted Cumulative Gain(NDCG).
* NDCG accounts for the position of the hit by assigning higher scores to hits at top ranks.
*
* @param k top k.
* @param negNum number of negative items.
*/
class NDCG[T: ClassTag](k: Int = 10, negNum: Int = 100)(
implicit ev: TensorNumeric[T])
extends ValidationMethod[T] {
/**
* Output and target should belong to the same user.
* And have (negNum + 1) elements.
* Target should have only one positive label, means one element is 1, others
* are all 0.
* A couple of output and target will be count as one record.
*/
override def apply(output: Activity, target: Activity): ValidationResult = {
require(output.toTensor[T].nElement() == negNum + 1,
s"negNum is $negNum, output's nElement should be ${negNum}, but got" +
s" ${output.toTensor[T].nElement()}")
require(target.toTensor[T].nElement() == negNum + 1,
s"negNum is $negNum, target's nElement should be ${negNum}, but got" +
s" ${output.toTensor[T].nElement()}")
val o = output.toTensor[T].resize(1 + negNum)
val t = target.toTensor[T].resize(1 + negNum)
var positiveItem = 0
var positiveCount = 0
var i = 1
while(i <= t.nElement()) {
if (t.valueAt(i) == 1) {
positiveItem = i
positiveCount += 1
}
i += 1
}
require(positiveItem != 0, s"${format()}: no positive item.")
require(positiveCount == 1, s"${format()}: too many positive items, excepted 1," +
s" but got $positiveCount")
val ndcg = calNDCG(positiveItem, o, k)
new ContiguousResult(ndcg, 1, s"NDCG")
}
// compute NDCG
private def calNDCG(index: Int, o: Tensor[T], k: Int): Float = {
var ranking = 1
var i = 1
val precision = ev.toType[Float](o.valueAt(index))
while (i < o.nElement() && ranking <= k) {
if (ev.toType[Float](o.valueAt(i)) > precision) {
ranking += 1
}
i += 1
}
if(ranking <= k) {
(math.log(2) / math.log(ranking + 1)).toFloat
} else {
0
}
}
override def format(): String = "NDCG"
}
/**
* Use loss as a validation result
*
* @param loss loss calculated by forward function
* @param count recording the times of calculating loss
*/
class LossResult(private var loss: Float, private var count: Int)
extends ContiguousResult(loss, count, name = "Loss")
/**
* A generic result type who's data is contiguous float.
*
* @param contiResult loss calculated by forward function
* @param count recording the times of calculating loss
* @param name name of the result
*/
class ContiguousResult(
private var contiResult: Float,
private var count: Int,
private val name: String)
extends ValidationResult {
override def result(): (Float, Int) = (contiResult.toFloat / count, count)
// scalastyle:off methodName
override def +(other: ValidationResult): ValidationResult = {
val otherResult = other.asInstanceOf[ContiguousResult]
this.contiResult += otherResult.contiResult
this.count += otherResult.count
this
}
// scalastyle:on methodName
override protected def format(): String = {
s"($name: $contiResult, count: $count, Average $name: ${contiResult.toFloat / count})"
}
override def equals(obj: Any): Boolean = {
if (obj == null) {
return false
}
if (!obj.isInstanceOf[ContiguousResult]) {
return false
}
val other = obj.asInstanceOf[ContiguousResult]
if (this.eq(other)) {
return true
}
this.contiResult == other.contiResult && this.count == other.count
}
override def hashCode(): Int = {
val seed = 37
var hash = 1
hash = hash * seed + this.contiResult.toInt
hash = hash * seed + this.count
hash
}
}
/**
* This evaluation method is calculate loss of output with respect to target
*
* @param criterion criterion method for evaluation
* The default criterion is [[ClassNLLCriterion]]
*/
class Loss[@specialized(Float, Double)T: ClassTag](
var criterion: Criterion[T] = null)
(implicit ev: TensorNumeric[T]) extends ValidationMethod[T] {
if (criterion == null) criterion = ClassNLLCriterion[T]()
override def apply(output: Activity, target: Activity): LossResult = {
val _target = target.asInstanceOf[Tensor[T]]
val _output = if (output.toTensor[T].nDimension() != 1 &&
output.toTensor[T].size().head != _target.size().head) {
output.toTensor[T].narrow(1, 1, _target.size().head)
} else {
output.toTensor[T]
}
val loss = ev.toType[Float](criterion.forward(_output, _target))
val count = _target.size().head
new LossResult(loss * count, count)
}
override def format(): String = "Loss"
}
/**
* This evaluation method is calculate mean absolute error of output with respect to target
*
*/
class MAE[@specialized(Float, Double)T: ClassTag]()
(implicit ev: TensorNumeric[T]) extends ValidationMethod[T] {
private val criterion = AbsCriterion[T]()
override def apply(output: Activity, target: Activity): LossResult = {
val _output = output.asInstanceOf[Tensor[T]]
val (max_prob, max_index) = _output.max(2)
val _target = target.asInstanceOf[Tensor[T]]
val loss = ev.toType[Float](criterion.forward(max_index, _target))
val count = 1
new LossResult(loss, count)
}
override def format(): String = "MAE"
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/optim/ValidationMethod.scala | Scala | apache-2.0 | 40,876 |
package mesosphere.marathon.core.election.impl
import java.util
import akka.actor.ActorSystem
import akka.event.EventStream
import com.codahale.metrics.MetricRegistry
import mesosphere.chaos.http.HttpConf
import mesosphere.marathon.MarathonConf
import mesosphere.marathon.core.base.{ CurrentRuntime, ShutdownHooks }
import mesosphere.marathon.metrics.Metrics
import org.apache.curator.framework.api.ACLProvider
import org.apache.curator.{ RetrySleeper, RetryPolicy }
import org.apache.curator.framework.{ CuratorFramework, CuratorFrameworkFactory, AuthInfo }
import org.apache.curator.framework.recipes.leader.{ LeaderLatch, LeaderLatchListener }
import org.apache.zookeeper.data.ACL
import org.apache.zookeeper.{ ZooDefs, KeeperException, CreateMode }
import org.slf4j.LoggerFactory
import scala.util.control.NonFatal
import scala.collection.JavaConversions._
class CuratorElectionService(
config: MarathonConf,
system: ActorSystem,
eventStream: EventStream,
http: HttpConf,
metrics: Metrics = new Metrics(new MetricRegistry),
hostPort: String,
backoff: ExponentialBackoff,
shutdownHooks: ShutdownHooks) extends ElectionServiceBase(
config, system, eventStream, metrics, backoff, shutdownHooks
) {
private lazy val log = LoggerFactory.getLogger(getClass.getName)
private lazy val client = provideCuratorClient()
private var maybeLatch: Option[LeaderLatch] = None
override def leaderHostPortImpl: Option[String] = synchronized {
try {
maybeLatch.flatMap { l =>
val participant = l.getLeader
if (participant.isLeader) Some(participant.getId) else None
}
} catch {
case NonFatal(e) =>
log.error("error while getting current leader", e)
None
}
}
override def offerLeadershipImpl(): Unit = synchronized {
log.info("Using HA and therefore offering leadership")
maybeLatch match {
case Some(l) =>
log.info("Offering leadership while being candidate")
l.close()
case _ =>
}
maybeLatch = Some(new LeaderLatch(
client, config.zooKeeperLeaderPath + "-curator", hostPort, LeaderLatch.CloseMode.NOTIFY_LEADER
))
maybeLatch.get.addListener(Listener)
maybeLatch.get.start()
}
private object Listener extends LeaderLatchListener {
override def notLeader(): Unit = CuratorElectionService.this.synchronized {
log.info(s"Defeated (LeaderLatchListener Interface). New leader: ${leaderHostPort.getOrElse("-")}")
// remove tombstone for twitter commons
twitterCommonsTombstone.delete(onlyMyself = true)
stopLeadership()
}
override def isLeader(): Unit = CuratorElectionService.this.synchronized {
log.info("Elected (LeaderLatchListener Interface)")
startLeadership(error => CuratorElectionService.this.synchronized {
maybeLatch match {
case None => log.error("Abdicating leadership while not being leader")
case Some(l) =>
maybeLatch = None
l.close()
}
// stopLeadership() is called in notLeader
})
// write a tombstone into the old twitter commons leadership election path which always
// wins the selection. Check that startLeadership was successful and didn't abdicate.
if (CuratorElectionService.this.isLeader) {
twitterCommonsTombstone.create()
}
}
}
private def provideCuratorClient(): CuratorFramework = {
log.info(s"Will do leader election through ${config.zkHosts}")
// let the world read the leadership information as some setups depend on that to find Marathon
val acl = new util.ArrayList[ACL]()
acl.addAll(config.zkDefaultCreationACL)
acl.addAll(ZooDefs.Ids.READ_ACL_UNSAFE)
val builder = CuratorFrameworkFactory.builder().
connectString(config.zkHosts).
sessionTimeoutMs(config.zooKeeperSessionTimeout().toInt).
aclProvider(new ACLProvider {
override def getDefaultAcl: util.List[ACL] = acl
override def getAclForPath(path: String): util.List[ACL] = acl
}).
retryPolicy(new RetryPolicy {
override def allowRetry(retryCount: Int, elapsedTimeMs: Long, sleeper: RetrySleeper): Boolean = {
log.error("ZooKeeper access failed - Committing suicide to avoid invalidating ZooKeeper state")
CurrentRuntime.asyncExit()(scala.concurrent.ExecutionContext.global)
false
}
})
// optionally authenticate
val client = (config.zkUsername, config.zkPassword) match {
case (Some(user), Some(pass)) =>
builder.authorization(List(
new AuthInfo("digest", (user + ":" + pass).getBytes("UTF-8"))
)).build()
case _ =>
builder.build()
}
client.start()
client.getZookeeperClient.blockUntilConnectedOrTimedOut()
client
}
private object twitterCommonsTombstone {
def memberPath(member: String): String = {
config.zooKeeperLeaderPath.stripSuffix("/") + "/" + member
}
// - precedes 0-9 in ASCII and hence this instance overrules other candidates
lazy val memberName = "member_-00000000"
lazy val path = memberPath(memberName)
var fallbackCreated = false
def create(): Unit = {
try {
delete(onlyMyself = false)
client.createContainers(config.zooKeeperLeaderPath)
// Create a ephemeral node which is not removed when loosing leadership. This is necessary to avoid a
// race of old Marathon instances which think that they can become leader in the moment
// the new instances failover and no tombstone is existing (yet).
if (!fallbackCreated) {
client.create().
creatingParentsIfNeeded().
withMode(CreateMode.EPHEMERAL_SEQUENTIAL).
forPath(memberPath("member_-1"), hostPort.getBytes("UTF-8"))
fallbackCreated = true
}
log.info("Creating tombstone for old twitter commons leader election")
client.create().
creatingParentsIfNeeded().
withMode(CreateMode.EPHEMERAL).
forPath(path, hostPort.getBytes("UTF-8"))
} catch {
case e: Exception =>
log.error(s"Exception while creating tombstone for twitter commons leader election: ${e.getMessage}")
abdicateLeadership(error = true)
}
}
def delete(onlyMyself: Boolean = false): Unit = {
Option(client.checkExists().forPath(path)) match {
case None =>
case Some(tombstone) =>
try {
if (!onlyMyself || client.getData.forPath(memberPath(memberName)).toString == hostPort) {
log.info("Deleting existing tombstone for old twitter commons leader election")
client.delete().guaranteed().withVersion(tombstone.getVersion).forPath(path)
}
} catch {
case _: KeeperException.NoNodeException =>
case _: KeeperException.BadVersionException =>
}
}
}
}
}
| yp-engineering/marathon | src/main/scala/mesosphere/marathon/core/election/impl/CuratorElectionService.scala | Scala | apache-2.0 | 6,967 |
/*
* Copyright (c) 2013 Bridgewater Associates, LP
*
* Distributed under the terms of the Modified BSD License. The full license is in
* the file COPYING, distributed as part of this software.
*/
package notebook
import akka.actor._
/**
* An actor which is expecting to transition through several states guarded by one or more expected messages, and
* which will stash any unexpected messages to be replayed at each state transition.
*/
abstract class GuardedActor extends Actor with Stash {
type &[+A, +B] = (A, B)
object & {
def unapply[A, B](tuple: A & B) = Some(tuple)
}
private sealed trait GuardResult[+A] {
def transformResult[B](ifTransition: Guard[A] => Guard[B], ifComplete: A => GuardResult[B]): GuardResult[B] = this match {
case Pass => Pass
case Continue(next) => Continue(ifTransition(next))
case Restart(next) => Restart(ifTransition(next))
case Complete(value) => ifComplete(value)
}
def orElse[B >: A](next: => GuardResult[B]): GuardResult[B] = if (this == Pass) next else this
}
private case object Pass extends GuardResult[Nothing]
private final case class Continue[A](next: Guard[A]) extends GuardResult[A]
private final case class Restart[A](next: Guard[A]) extends GuardResult[A]
private final case class Complete[A](value: A) extends GuardResult[A]
trait Guard[+A] { self =>
protected[GuardedActor] def >>:(msg: Any): GuardResult[A]
def &[B](other: Guard[B]): Guard[A & B] = new Guard[A & B] {
private class Partial[X](remaining: Guard[X])(result: X => A & B) extends Guard[A & B] {
protected[GuardedActor] def >>:(msg: Any) = msg >>: remaining transformResult (new Partial(_)(result), result andThen Complete.apply)
}
protected[GuardedActor] def >>:(msg: Any) =
(msg >>: self transformResult (_ & other, a => Continue(new Partial(other)((a, _))))) orElse
(msg >>: other transformResult (self & _ , b => Continue(new Partial(self) ((_, b)))))
}
def filter(f: A => Boolean): Guard[A] = new Guard[A] {
protected[GuardedActor] def >>:(msg: Any) = msg >>: self transformResult (_ filter f, a => if (f(a)) Complete(a) else Pass)
}
def map[B](f: A => B): Guard[B] = new Guard[B] {
protected[GuardedActor] def >>:(msg: Any) = msg >>: self transformResult (_ map f, f andThen Complete.apply)
}
def flatMap[B](f: A => Guard[B]): Guard[B] = new Guard[B] {
protected[GuardedActor] def >>:(msg: Any) = msg >>: self transformResult (_ flatMap f, f andThen Restart.apply)
}
}
def get[A](receive: PartialFunction[Any, A]): Guard[A] = new Guard[A] {
protected[GuardedActor] def >>:(msg: Any) = receive.lift(msg) match {
case None => Pass
case Some(x) => Complete(x)
}
}
def getType[A](implicit tag : reflect.ClassTag[A]): Guard[A] = new Guard[A] {
protected[GuardedActor] def >>:(msg: Any) = if (tag.runtimeClass.isInstance(msg)) Complete(msg.asInstanceOf[A]) else Pass
}
private[this] def receiveGuarded(guard: Guard[Unit]): Receive = {
case msg => msg >>: guard match {
case Pass =>
stash()
case Continue(next) =>
context.become(receiveGuarded(next))
case Restart(next) =>
unstashAll()
context.become(receiveGuarded(next))
case Complete(()) =>
unstashAll()
}
}
def receive = receiveGuarded(guard.map(context.become(_)))
def guard: Guard[Receive]
} | bigdatagenomics/mango-notebook | modules/subprocess/src/main/scala/notebook/GuardedActor.scala | Scala | apache-2.0 | 3,455 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.