code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
package mesosphere.marathon
package api.validation
import mesosphere.UnitTest
import mesosphere.marathon.api.v2.ValidationHelper
import mesosphere.marathon.core.health._
import mesosphere.marathon.core.plugin.PluginManager
import mesosphere.marathon.state._
class AppDefinitionMesosHealthCheckValidationTest extends UnitTest {
lazy val validAppDefinition = AppDefinition.validAppDefinition(Set.empty, ValidationHelper.roleSettings())(PluginManager.None)
"AppDefinitionMesosHealthCheckValidation" should {
"app with 0 Mesos health checks is valid" in {
val f = new Fixture
Given("an app with only Marathon Health Checks")
val app = f.app()
Then("the app is considered valid")
validAppDefinition(app).isSuccess shouldBe true
}
"app with 1 Mesos health check is valid" in {
val f = new Fixture
Given("an app with one health check")
val app = f.app(healthChecks = Set(MesosCommandHealthCheck(command = Command("true"))))
Then("the app is considered valid")
validAppDefinition(app).isSuccess shouldBe true
}
"app with more than 1 non-command Mesos health check is invalid" in {
val f = new Fixture
Given("an app with one health check")
val app = f.app(healthChecks = Set(MesosHttpHealthCheck(port = Some(80)), MesosHttpHealthCheck()))
Then("the app is considered invalid")
validAppDefinition(app).isFailure shouldBe true
}
"app with more than 1 command Mesos health check is valid" in {
val f = new Fixture
Given("an app with one health check")
val app =
f.app(healthChecks = Set(MesosCommandHealthCheck(command = Command("true")), MesosCommandHealthCheck(command = Command("true"))))
Then("the app is considered valid")
validAppDefinition(app).isSuccess shouldBe true
}
"health check with port validates port references" in {
val f = new Fixture
Given("an app with one Mesos Health Check but without port")
val mesosHealthByIndex = f.app(Set(MesosHttpHealthCheck(portIndex = Some(PortReference.ByIndex(0)))))
val marathonHealthByIndex = f.app(Set(MesosHttpHealthCheck(portIndex = Some(PortReference.ByIndex(0)))))
val mesosHealthNoPort = mesosHealthByIndex.copy(portDefinitions = Seq.empty)
val marathonHealthNoPort = marathonHealthByIndex.copy(portDefinitions = Seq.empty)
Then("the app is considered valid")
validAppDefinition(mesosHealthByIndex).isSuccess shouldBe true
validAppDefinition(marathonHealthByIndex).isSuccess shouldBe true
validAppDefinition(mesosHealthNoPort).isSuccess shouldBe false
validAppDefinition(marathonHealthNoPort).isSuccess shouldBe false
}
}
class Fixture {
def app(healthChecks: Set[HealthCheck] = Set(MarathonHttpHealthCheck())): AppDefinition =
AppDefinition(
id = AbsolutePathId("/test"),
role = "*",
cmd = Some("sleep 1000"),
instances = 1,
healthChecks = healthChecks,
portDefinitions = Seq(PortDefinition(0))
)
}
}
|
mesosphere/marathon
|
src/test/scala/mesosphere/marathon/api/validation/AppDefinitionMesosHealthCheckValidationTest.scala
|
Scala
|
apache-2.0
| 3,068
|
// Copied from log4s project and licensed under Apache 2.0 license.
// See LogLevel.scala.license
// Verbatim from: https://github.com/Log4s/log4s/blob/v1.3.4/src/main/scala/org/log4s/LogLevel.scala
package org.log4s
/** A severity level that can be assigned to log statements. */
sealed trait LogLevel {
/** The name of this log level. It is spelled with initial capitals */
def name: String = this.toString
/** The name of the SLF4J method that does logging at this level */
private[log4s] def methodName = name.toLowerCase
}
/** The highest logging severity. This generally indicates an
* application or system error that causes undesired outcomes.
* An error generally indicates a bug or an environment
* problem that warrants some kind of immediate intervention.
*/
case object Error extends LogLevel
/** Generally indicates something is not expected but the system is
* able to continue operating. This generally indicates a bug or
* environment problem that does not require urgent intervention.
*/
case object Warn extends LogLevel
/** Indicates normal high-level activity. Generally a single user– or
* system-initiated activity will trigger one or two info-level statements.
* (E.g., one when starting and one when finishing for complex requests.)
*/
case object Info extends LogLevel
/** Log statements that provide the ability to trace the progress and
* behavior involved in tracking a single activity. These are useful for
* debugging general issues, identifying how modules are interacting, etc.
*/
case object Debug extends LogLevel
/** Highly localized log statements useful for tracking the decisions made
* inside a single unit of code. These may occur at a very high frequency.
*/
case object Trace extends LogLevel
|
indyscala/sbt-plugin-tour
|
mima/src/main/scala/org/log4s/LogLevel.scala
|
Scala
|
mit
| 1,786
|
package pub.ayada.scala.sparkUtils.etl.read
trait ReadProps {
def taskType : String
def id : String
def propsFile : String
def printSchema : Boolean = true
def loadCount : Boolean = false
def repartition : Int = 0
def broadcast : Boolean = false
def forceNoPersist : Boolean = false
}
|
k-ayada/SparkETL
|
pub/ayada/scala/sparkUtils/etl/read/ReadProps.scala
|
Scala
|
apache-2.0
| 318
|
package eventstore.examples
import akka.actor.{ Props, ActorSystem }
import eventstore.TransactionActor._
import eventstore.tcp.ConnectionActor
import eventstore.{ EventData, TransactionActor, EventStream, TransactionStart }
object StartTransactionExample extends App {
val system = ActorSystem()
val connection = system.actorOf(ConnectionActor.props(), "connection")
val kickoff = Start(TransactionStart(EventStream.Id("my-stream")))
val transaction = system.actorOf(TransactionActor.props(connection, kickoff), "transaction")
implicit val transactionResult = system.actorOf(Props[TransactionResult], "result")
transaction ! GetTransactionId // replies with `TransactionId(transactionId)`
transaction ! Write(EventData("transaction-event")) // replies with `WriteCompleted`
transaction ! Write(EventData("transaction-event")) // replies with `WriteCompleted`
transaction ! Write(EventData("transaction-event")) // replies with `WriteCompleted`
transaction ! Commit // replies with `CommitCompleted`
}
|
pawelkaczor/EventStore.JVM
|
src/main/scala/eventstore/examples/StartTransactionExample.scala
|
Scala
|
bsd-3-clause
| 1,025
|
package org.tejo.iza.tests
import akka.actor.{ActorRef, ActorSystem}
import akka.testkit.{ImplicitSender, TestKit, TestProbe}
import org.scalamock.scalatest.MockFactory
import org.scalatest._
import org.tejo.iza.actor.cirkulerilo.DissenduActor.Msg.CirkuleroMsg
import org.tejo.iza.actor.cirkulerilo.redaktilo.Redaktilo
import org.tejo.iza.actor.di.IzaActorModule
import org.tejo.iza.actor.ws.TrelloService
import org.tejo.iza.rules.ClojureNamespace
import org.tejo.iza.rules.facts._
import org.tejo.model._
import scala.concurrent.Future
import scala.concurrent.duration._
class IzaSuite (_system: ActorSystem) extends TestKit(_system) with ImplicitSender with FunSuiteLike with BeforeAndAfterAll with MockFactory with TestData {
def this() = this(ActorSystem("test"))
val trelloMock = mock[TrelloService]
lazy val dissenduProbe = TestProbe()
class TestModule extends IzaActorModule {
override def actorSystem: ActorSystem = _system
override def trelloService: TrelloService = trelloMock
override val redaktilo: Redaktilo = new Redaktilo {
override def redaktu(cirkulero: Cirkulero): String = cirkuleroText
}
override lazy val dissenduActor: ActorRef = dissenduProbe.ref
override lazy val tejoModel: TEJO = tejoData
}
test("integation test") {
// (trelloMock.actionFacts _).expects(boardId).returning(Future.successful{List[ActionFact]()})
(trelloMock.boardFact _).expects(boardId).returning(Future.successful{BoardFact(boardId)})
(trelloMock.cardFacts _).expects(boardId).returning(Future.successful{cardFacts})
(trelloMock.checklistFacts _).expects(boardId).returning(Future.successful{checklistFacts})
(trelloMock.listFacts _).expects(boardId).returning(Future.successful{List[ListFact](listFact)})
val m = new TestModule
val iza = m.izaActor
import org.tejo.iza.actor.IzaActor.Msg._
iza ! ResetWorkingMemory(ClojureNamespace.toLoadCirkuleriloRules::Nil)
iza ! LoadFacts(boardId)
iza ! FireRules
dissenduProbe.expectMsg(20 seconds, CirkuleroMsg(cirkuleroText))
}
override def afterAll() {
TestKit.shutdownActorSystem(system)
}
}
trait TestData {
val tejoData = TEJO(
aktivuloj = List(
Persono("Łukasz ŻEBROWSKI", "lukasz@tejo.org", None, List(Estrarano(Some(Prezidanto)))),
Persono("Tomasz SZYMULA", "tomasz@tejo.org", None, List(Estrarano(Some(Gxensek))))
),
komisionoj = Nil,
sekcioj = Nil
)
val boardId = "<boardId>"
val listId = "<listId>"
val checklistId = "<checklistId>"
val stirkartoId = "<cardId>"
val listFact = ListFact(listId, name = "Aktuala")
val cardFacts: List[CardFact] = List(
CardFact(stirkartoId, listId, name = "Stirkarto"),
CardFact("1", listId, "tomasz@tejo.org", desc = "Mi faris malmulte."),
CardFact("2", listId, "lukasz@tejo.org", desc = "Mi faris multe.")
)
val checklistFacts: List[(ChecklistFact, List[CheckItemFact])] = List((
ChecklistFact(id = checklistId, cardId = stirkartoId),
List(
CheckItemFact(idx = 0, id = "", checklistId = checklistId, name = "", pos = 1, complete = true),
CheckItemFact(idx = 1, id = "", checklistId = checklistId, name = "", pos = 1, complete = true),
CheckItemFact(idx = 2, id = "", checklistId = checklistId, name = "", pos = 1, complete = true),
CheckItemFact(idx = 3, id = "", checklistId = checklistId, name = "", pos = 1, complete = true),
CheckItemFact(idx = 4, id = "", checklistId = checklistId, name = "", pos = 1, complete = true),
CheckItemFact(idx = 5, id = "", checklistId = checklistId, name = "", pos = 1, complete = false),
CheckItemFact(idx = 6, id = "", checklistId = checklistId, name = "", pos = 1, complete = false)
)
))
val cirkuleroText = "Freŝa, bela cirkulero."
}
|
tomaszym/izabela
|
actor/src/test/scala/org/tejo/iza/tests/IzaSuite.scala
|
Scala
|
gpl-2.0
| 3,804
|
package ch.epfl.directembedding
import scala.language.experimental.macros
import scala.reflect.macros.blackbox.Context
package object test {
// For testing purposes
def persisted(x: Any): String = macro Persisted.persisted
def inline[T](x: T): T = macro Persisted.inline[T]
private object Persisted {
def persisted(c: Context)(x: c.Expr[Any]): c.Expr[String] = {
import c.universe._
val methodSym = x.tree match {
case Apply(TypeApply(x, targs), args) =>
x.symbol
case Apply(x, args) =>
x.symbol
case TypeApply(x, targs) =>
x.symbol
case field @ Select(x, y) =>
val symbolAnnotations = field.symbol.annotations.filter(_.tree.tpe <:< c.typeOf[persist])
if (symbolAnnotations.isEmpty)
// unfortunately the annotation goes only to the getter
field.symbol.owner.info.members.filter(x => x.name.toString == field.symbol.name + " ").head
else field.symbol
}
val annotArg = methodSym.annotations.filter(_.tree.tpe <:< c.typeOf[persist]).head.tree.children.tail.head
val q"""(new ch.epfl.directembedding.MethodTree({
$tree;
()}): ${ _ })""" = annotArg
c.Expr(q"${showRaw(tree)}")
}
def inline[T](c: Context)(x: c.Expr[T]): c.Expr[T] = {
import c.universe._
val (methodSym, args) = x.tree match {
case Apply(TypeApply(x, targs), args) =>
(x.symbol, args)
case Apply(x, args) =>
(x.symbol, args)
case TypeApply(x, targs) =>
(x.symbol, Nil)
case field @ Select(x, y) =>
val symbolAnnotations = field.symbol.annotations.filter(_.tree.tpe <:< c.typeOf[persist])
if (symbolAnnotations.isEmpty)
// unfortunately the annotation goes only to the getter
(field.symbol.owner.info.members.filter(x => x.name.toString == field.symbol.name + " ").head, Nil)
else (field.symbol, Nil)
}
val annotArg = methodSym.annotations.filter(_.tree.tpe <:< c.typeOf[persist]).head.tree.children.tail.head
val q"""(new ch.epfl.directembedding.MethodTree({
$tree;
()}): ${ _ })""" = annotArg
val res = Macros.inlineMethod(c)(tree, args, methodSym.asMethod.paramLists.head)
c.Expr[T](res)
}
}
}
|
directembedding/directembedding
|
dsls/src/main/scala/ch/epfl/directembedding/test/package.scala
|
Scala
|
bsd-3-clause
| 2,332
|
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import monix.execution.Cancelable
import monix.execution.ChannelType.SingleProducer
import monix.reactive.Observable
import monix.reactive.observers.{BufferedSubscriber, Subscriber}
private[reactive] final class BufferIntrospectiveObservable[+A](source: Observable[A], maxSize: Int)
extends Observable[List[A]] {
def unsafeSubscribeFn(subscriber: Subscriber[List[A]]): Cancelable =
source.unsafeSubscribeFn(BufferedSubscriber.batched(subscriber, maxSize, SingleProducer))
}
|
alexandru/monifu
|
monix-reactive/shared/src/main/scala/monix/reactive/internal/operators/BufferIntrospectiveObservable.scala
|
Scala
|
apache-2.0
| 1,202
|
package pl.writeonly.son2.vaadin.ui
import com.vaadin.ui.{Button, Component, Label, Panel}
import pl.writeonly.son2.vaadin.util.UIUtil
trait Components extends UIUtil {
val components: Seq[Component]
val inputs: Seq[Component]
val convert: Button
val output: Label
def optionsPanel(components: Seq[Component]): Panel = {
val result = new Panel("Options", optionsHorizontalLayout(components))
setWidth(result)
result
}
def list: List[Component] = List(optionsPanel(components)) ++ inputs ++ List(convert, output)
}
|
writeonly/scalare
|
scalare-adin/src/main/scala/pl/writeonly/son2/vaadin/ui/Components.scala
|
Scala
|
artistic-2.0
| 544
|
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.collector.processor
import com.twitter.finagle.Service
import com.twitter.scrooge.BinaryThriftStructSerializer
import com.twitter.zipkin.common.{Annotation, Span}
import com.twitter.zipkin.conversions.thrift._
import com.twitter.zipkin.gen
import org.specs.Specification
import org.specs.mock.{JMocker, ClassMocker}
class ScribeFilterSpec extends Specification with JMocker with ClassMocker {
val serializer = new BinaryThriftStructSerializer[gen.Span] {
def codec = gen.Span
}
val mockService = mock[Service[Span, Unit]]
"ScribeFilter" should {
val category = "zipkin"
val base64 = Seq("CgABAAAAAAAAAHsLAAMAAAADYm9vCgAEAAAAAAAAAcgPAAYMAAAAAQoAAQAAAAAAAAABCwACAAAAA2JhaAAPAAgMAAAAAAA=")
val endline = Seq("CgABAAAAAAAAAHsLAAMAAAADYm9vCgAEAAAAAAAAAcgPAAYMAAAAAQoAAQAAAAAAAAABCwACAAAAA2JhaAAPAAgMAAAAAAA=\\n")
val validSpan = Span(123, "boo", 456, None, List(new Annotation(1, "bah", None)), Nil)
val serialized = Seq(serializer.toString(validSpan.toThrift))
val bad = Seq("garbage!")
val filter = new ScribeFilter
"convert gen.LogEntry to Span" in {
expect {
one(mockService).apply(validSpan)
}
filter.apply(base64, mockService)
}
"convert gen.LogEntry with endline to Span" in {
expect {
one(mockService).apply(validSpan)
}
filter.apply(endline, mockService)
}
"convert serialized thrift to Span" in {
expect {
one(mockService).apply(validSpan)
}
filter.apply(serialized, mockService)
}
"deal with garbage" in {
expect {}
filter.apply(bad, mockService)
}
}
}
|
pteichman/zipkin
|
zipkin-collector-scribe/src/test/scala/com/twitter/zipkin/collector/processor/ScribeFilterSpec.scala
|
Scala
|
apache-2.0
| 2,257
|
package com.twitter.finagle.loadbalancer
import com.twitter.finagle._
import com.twitter.finagle.service.FailingFactory
import com.twitter.util.{Future, Time}
import java.util.concurrent.atomic.AtomicReference
import scala.annotation.tailrec
import scala.util.control.NonFatal
/**
* A specialized [[ServiceFactory]] which admits that it backs a
* concrete endpoint. The extra information and functionality provided
* here is used by Finagle's load balancers.
*/
trait EndpointFactory[Req, Rep] extends ServiceFactory[Req, Rep] {
/**
* Returns the address which this endpoint connects to.
*/
def address: Address
/**
* Signals to the endpoint that it should close and rebuild
* its underlying resources. That is, `close` is terminal
* but `remake` is not.
*/
def remake(): Unit
}
/**
* An [[EndpointFactory]] that fails to construct services.
*/
private final class FailingEndpointFactory[Req, Rep](cause: Throwable)
extends EndpointFactory[Req, Rep] {
val address: Address = Address.Failed(cause)
def apply(conn: ClientConnection): Future[Service[Req, Rep]] = Future.exception(cause)
def close(deadline: Time): Future[Unit] = Future.Done
def remake(): Unit = {}
override def status: Status = Status.Open
}
private object LazyEndpointFactory {
sealed trait State[-Req, +Rep]
/**
* Indicates that the underlying resource needs to be materialized.
*/
case object Init extends State[Any, Nothing]
/**
* Indicates that the EndpointFactory is closed and will no longer
* admit any service acquisition requests.
*/
case object Closed extends State[Any, Nothing]
/**
* Indicates that the process of building the underlying resources
* is in progress.
*/
case object Making extends State[Any, Nothing]
/**
* Indicates that the EndpointFactory has a materialized backing
* resource which it will proxy service acquisition requests to.
*/
case class Made[Req, Rep](underlying: ServiceFactory[Req, Rep]) extends State[Req, Rep]
}
/**
* An implementation of [[EndpointFactory]] which is lazy. That is, it delays
* the creation of its implementation until it receives the first service acquisition
* request. This is designed to allow the load balancer to construct the stacks
* for a large collection of endpoints and amortize the cost across requests.
* Note, this isn't related to session establishment. Session establishment is
* lazy and on the request path already, but rather creating a large number of
* objects per namer updates can be expensive.
*/
private final class LazyEndpointFactory[Req, Rep](
mk: () => ServiceFactory[Req, Rep],
val address: Address)
extends EndpointFactory[Req, Rep] {
import LazyEndpointFactory._
private[this] val state = new AtomicReference[State[Req, Rep]](Init)
@tailrec def apply(conn: ClientConnection): Future[Service[Req, Rep]] =
state.get match {
case Init =>
if (state.compareAndSet(Init, Making)) {
val underlying = try mk() catch { case NonFatal(exc) =>
new FailingFactory[Req, Rep](exc)
}
// This is the only place where we can transition from `Making`
// to any other state so this is safe. All other spin loops wait
// for the thread that has entered here to exit the `Making`
// state.
state.set(Made(underlying))
}
apply(conn)
case Making => apply(conn)
case Made(underlying) => underlying(conn)
case Closed => Future.exception(new ServiceClosedException)
}
/**
* Returns the underlying [[ServiceFactory]] if it is
* materialized otherwise None. This is useful for testing.
*/
def self: Option[ServiceFactory[Req, Rep]] = state.get match {
case Made(underlying) => Some(underlying)
case _ => None
}
@tailrec def remake(): Unit = state.get match {
case Init | Closed => // nop
case Making => remake()
case s@Made(underlying) =>
// Note, underlying is responsible for draining any outstanding
// service acquistion requests gracefully.
if (!state.compareAndSet(s, Init)) remake()
else underlying.close()
}
@tailrec def close(when: Time): Future[Unit] = state.get match {
case Closed => Future.Done
case Making => close(when)
case Init =>
if (!state.compareAndSet(Init, Closed)) close(when)
else Future.Done
case s@Made(underlying) =>
if (!state.compareAndSet(s, Closed)) close(when)
else underlying.close(when)
}
override def status: Status = state.get match {
case Init | Making => Status.Open
case Closed => Status.Closed
case Made(underlying) => underlying.status
}
override def toString: String = s"EndpointFactory($address)"
}
|
koshelev/finagle
|
finagle-core/src/main/scala/com/twitter/finagle/loadbalancer/EndpointFactory.scala
|
Scala
|
apache-2.0
| 4,768
|
package unfiltered.request
/** Note that extractors based on this ignore anything beyond a semicolon in a header */
class RequestHeader(val name: String) {
def unapply[T](req: HttpRequest[T]) = {
def split(raw: String) = raw.split(",") map {
_.trim.takeWhile { _ != ';' } mkString
}
def headers(e: Iterator[String]): List[String] =
List.fromIterator(e).flatMap(split)
headers(req.headers(name)) match {
case Nil => None
case hs => Some(hs)
}
}
def apply[T](req: HttpRequest[T]) = req.headers(name).toList
}
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.10
object Accept extends RequestHeader("Accept")
object AcceptCharset extends RequestHeader("Accept-Charset")
object AcceptEncoding extends RequestHeader("Accept-Encoding")
object AcceptLanguage extends RequestHeader("Accept-Language")
object Authorization extends RequestHeader("Authorization")
object Connection extends RequestHeader("Connection")
object RequestContentType extends RequestHeader("Content-Type")
object Expect extends RequestHeader("Expect")
object From extends RequestHeader("From")
object Host extends RequestHeader("Host")
object IfMatch extends RequestHeader("If-Match")
object IfModifiedSince extends RequestHeader("If-Modified-Since")
object IfNoneMatch extends RequestHeader("If-None-Match")
object IfRange extends RequestHeader("If-Range")
object IfUnmodifiedSince extends RequestHeader("If-Unmodified-Since")
object MaxForwards extends RequestHeader("Max-Forwards")
object ProxyAuthorization extends RequestHeader("Proxy-Authorization")
object Range extends RequestHeader("Range")
object Referer extends RequestHeader("Referer")
object TE extends RequestHeader("TE")
object Upgrade extends RequestHeader("Upgrade")
object UserAgent extends RequestHeader("User-Agent")
object Via extends RequestHeader("Via")
object XForwardedFor extends RequestHeader("X-Forwarded-For")
object Charset {
val Setting = """.*;.*\\bcharset=(\\S+).*""".r
def unapply[T](req: HttpRequest[T]) = {
List.fromIterator(req.headers(RequestContentType.name)).flatMap {
case Setting(cs) => (cs, req) :: Nil
case _ => Nil
}.firstOption
}
}
|
softprops/Unfiltered
|
library/src/main/scala/request/headers.scala
|
Scala
|
mit
| 2,196
|
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.observers.buffers
import monix.execution.internal.Platform
import monix.reactive.observers.Subscriber
import scala.collection.mutable.ListBuffer
/** A `BufferedSubscriber` implementation for the
* [[monix.reactive.OverflowStrategy.BackPressure BackPressured]]
* buffer overflowStrategy that sends events in bundles.
*/
private[monix] final class BatchedBufferedSubscriber[A] private
(out: Subscriber[List[A]], bufferSize: Int)
extends AbstractBackPressuredBufferedSubscriber[A, List[A]](out, bufferSize) { self =>
override protected def fetchNext(): List[A] =
if (queue.isEmpty) null else {
val buffer = ListBuffer.empty[A]
queue.drainToBuffer(buffer, Platform.recommendedBatchSize)
buffer.toList
}
}
private[monix] object BatchedBufferedSubscriber {
/** Builder for [[BatchedBufferedSubscriber]] */
def apply[A](underlying: Subscriber[List[A]], bufferSize: Int): BatchedBufferedSubscriber[A] =
new BatchedBufferedSubscriber[A](underlying, bufferSize)
}
|
Wogan/monix
|
monix-reactive/js/src/main/scala/monix/reactive/observers/buffers/BatchedBufferedSubscriber.scala
|
Scala
|
apache-2.0
| 1,701
|
/*
Copyright (C) 2013-2018 Expedia Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.hotels.styx.admin
import java.nio.file.StandardCopyOption.REPLACE_EXISTING
import java.nio.file.{Files, Paths}
import com.google.common.io.Files.createTempDir
import com.hotels.styx.api.HttpRequest.post
import com.hotels.styx.api.HttpResponseStatus.INTERNAL_SERVER_ERROR
import com.hotels.styx.proxy.backends.file.FileBackedBackendServicesRegistry
import com.hotels.styx.support.ResourcePaths.fixturesHome
import com.hotels.styx.support.configuration._
import com.hotels.styx.{StyxClientSupplier, StyxServer, StyxServerSupport}
import org.scalatest.concurrent.Eventually
import org.scalatest.{BeforeAndAfterAll, FunSpec, Matchers}
import scala.collection.JavaConverters._
import scala.concurrent.duration._
class OriginsReloadCommandSpec extends FunSpec
with StyxServerSupport
with StyxClientSupplier
with Matchers
with BeforeAndAfterAll
with Eventually {
val tempDir = createTempDir()
val originsOk = fixturesHome(classOf[OriginsReloadCommandSpec], "/conf/origins/origins-correct.yml")
val originsNok = fixturesHome(classOf[OriginsReloadCommandSpec], "/conf/origins/origins-incorrect.yml")
val styxOriginsFile = Paths.get(tempDir.toString, "origins.yml")
var styxServer: StyxServer = _
it("Responds with INTERNAL_SERVER_ERROR when the origins cannot be read") {
val fileBasedBackendsRegistry = FileBackedBackendServicesRegistry.create(styxOriginsFile.toString)
styxServer = StyxConfig().startServer(fileBasedBackendsRegistry)
Files.copy(originsNok, styxOriginsFile, REPLACE_EXISTING)
val resp = decodedRequest(post(styxServer.adminURL("/admin/tasks/origins/reload")).build())
resp.status() should be(INTERNAL_SERVER_ERROR)
BackendService.fromJava(fileBasedBackendsRegistry.get().asScala.head) should be(
BackendService(
appId = "app",
path = "/",
connectionPoolConfig = ConnectionPoolSettings(
maxConnectionsPerHost = 45,
maxPendingConnectionsPerHost = 15,
connectTimeoutMillis = 1000,
pendingConnectionTimeoutMillis = 8000
),
healthCheckConfig = HealthCheckConfig(
uri = Some("/version.txt"),
interval = 5.seconds
),
responseTimeout = 60.seconds,
origins = Origins(
Origin("localhost", 9090, appId = "app", id = "app1"),
Origin("localhost", 9091, appId = "app", id = "app2")
)
))
}
override protected def beforeAll(): Unit = {
Files.copy(originsOk, styxOriginsFile)
}
override protected def afterAll(): Unit = {
styxServer.stopAsync().awaitTerminated()
Files.delete(styxOriginsFile)
Files.delete(tempDir.toPath)
}
}
|
mikkokar/styx
|
system-tests/e2e-suite/src/test/scala/com/hotels/styx/admin/OriginsReloadCommandSpec.scala
|
Scala
|
apache-2.0
| 3,280
|
package zzz.akka.avionics
/**
* Kevin Ying 2015
*/
import akka.actor.{Actor, ActorSystem, Props}
import akka.testkit.{TestActorRef, TestKit, TestLatch, ImplicitSender}
import scala.concurrent.duration._
import scala.concurrent.Await
import org.scalatest.{WordSpecLike, BeforeAndAfterAll}
import org.scalatest.MustMatchers
class AltimeterSpec extends TestKit(ActorSystem("AltimeterSpec"))
with ImplicitSender
with WordSpecLike
with MustMatchers
with BeforeAndAfterAll {
import Altimeter._
override def afterAll() {
system.shutdown()
}
// We'll instantiate a Helper class for every test, making
// things nicely reusable.
class Helper {
object EventSourceSpy {
// The latch gives us fast feedback when
// something happens
val latch = TestLatch(1)
}
// Our special derivation of EventSource gives us the
// hooks into concurrency
trait EventSourceSpy extends EventSource {
def sendEvent[T](event: T): Unit =
EventSourceSpy.latch.countDown()
// We don't care about processing the messages that
// EventSource usually processes so we simply don't
// worry about them.
def eventSourceReceive = Actor.emptyBehavior
}
// The slicedAltimeter constructs our Altimeter with
// the EventSourceSpy
def slicedAltimeter = new Altimeter with EventSourceSpy
// This is a helper method that will give us an ActorRef
// and our plain ol' Altimeter that we can work with
// directly.
def actor() = {
val a = TestActorRef[Altimeter](Props(slicedAltimeter))
(a, a.underlyingActor)
}
}
"Altimeter" should {
"record rate of climb changes" in new Helper {
val (_, real) = actor()
real.receive(RateChange(1f))
real.rateOfClimb must be(real.maxRateOfClimb)
}
"keep rate of climb changes within bounds" in new Helper {
val (_, real) = actor()
real.receive(RateChange(2f))
real.rateOfClimb must be(real.maxRateOfClimb)
}
"calculate altitude changes" in new Helper {
val ref = system.actorOf(Props(Altimeter()))
ref ! EventSource.RegisterListener(testActor)
ref ! RateChange(1f)
fishForMessage() {
case AltitudeUpdate(altitude) if altitude == 0f =>
false
case AltitudeUpdate(altitude) =>
true
}
}
"send events" in new Helper {
val (ref, _) = actor()
Await.ready(EventSourceSpy.latch, 1.second)
EventSourceSpy.latch.isOpen must be(true)
}
}
}
|
kevyin/akka-book-wyatt
|
src/test/scala/zzz/akka/avionics/AltimeterTest.scala
|
Scala
|
mit
| 2,525
|
package at.fabricate.liftdev.common
package model
import net.liftweb.mapper.LongKeyedMapper
// This is the basic mapper entity type
// especially for use in User (without IdPK)
trait BaseEntityWithTitleDescriptionAndIcon [T <: (BaseEntityWithTitleDescriptionAndIcon[T]) ] extends BaseEntity[T] with BaseEntityWithTitleAndDescription[T] with AddIcon[T]
{
self: T =>
}
trait BaseMetaEntityWithTitleDescriptionAndIcon[ModelType <: ( BaseEntityWithTitleDescriptionAndIcon[ModelType]) ] extends BaseMetaEntity[ModelType] with BaseMetaEntityWithTitleAndDescription[ModelType] with AddIconMeta[ModelType]
{
self: ModelType =>
}
|
Fabricate/OpenthingsImplementation
|
src/main/scala/at/fabricate/liftdev/common/model/BaseEntityWithTitleDescriptionAndIcon.scala
|
Scala
|
lgpl-3.0
| 644
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.broadcast
import java.io.Serializable
import scala.reflect.ClassTag
import org.apache.spark.SparkException
import org.apache.spark.internal.Logging
import org.apache.spark.util.Utils
/**
* A broadcast variable. Broadcast variables allow the programmer to keep a read-only variable
* cached on each machine rather than shipping a copy of it with tasks. They can be used, for
* example, to give every node a copy of a large input dataset in an efficient manner. Spark also
* attempts to distribute broadcast variables using efficient broadcast algorithms to reduce
* communication cost.
*
* 广播变量:广播变量允许程序员保存每个机器上缓存的只读变量,而不是发送task的副本。Spark还尝试使用高效的广播算法
* 来分发广播变量以降低通信成本。
*
* Broadcast variables are created from a variable `v` by calling
* [[org.apache.spark.SparkContext#broadcast]].
* The broadcast variable is a wrapper around `v`, and its value can be accessed by calling the
* `value` method. The interpreter session below shows this:
*
* 通过调用[[org.apache.spark.SparkContext#broadcast]]从变量`v`创建广播变量。 广播变量是`v`的包装,
* 它的值可以通过调用`value`方法来访问。 下面的解释器会话显示:
*
* {{{
* scala> val broadcastVar = sc.broadcast(Array(1, 2, 3))
* broadcastVar: org.apache.spark.broadcast.Broadcast[Array[Int]] = Broadcast(0)
*
* scala> broadcastVar.value
* res0: Array[Int] = Array(1, 2, 3)
* }}}
*
* After the broadcast variable is created, it should be used instead of the value `v` in any
* functions run on the cluster so that `v` is not shipped to the nodes more than once.
* In addition, the object `v` should not be modified after it is broadcast in order to ensure
* that all nodes get the same value of the broadcast variable (e.g. if the variable is shipped
* to a new node later).
*
* 创建广播变量后,应该在群集上运行的任何函数中使用它而不是值“v”,以使'v'不会多次发送到节点。
* 此外,对象 'v' 不应该在广播之后被修改,以便确保所有节点获得相同的广播变量值(例如,如果变量以后被发送到一个新节点)。
*
* @param id A unique identifier for the broadcast variable.
* @tparam T Type of the data contained in the broadcast variable.
*/
abstract class Broadcast[T: ClassTag](val id: Long) extends Serializable with Logging {
/**
* Flag signifying whether the broadcast variable is valid
* (that is, not already destroyed) or not.
*/
@volatile private var _isValid = true
private var _destroySite = ""
/** Get the broadcasted value. */
def value: T = {
assertValid()
getValue()
}
/**
* Asynchronously delete cached copies of this broadcast on the executors.
* If the broadcast is used after this is called, it will need to be re-sent to each executor.
* 异步删除执行器上的此广播的缓存副本。
* 如果在调用该广播之后使用广播,则需要将其重新发送给每个执行器。就是说这个变量其实还在Driver上有存储,只是executor中删除
*/
def unpersist() {
unpersist(blocking = false)
}
/**
* Delete cached copies of this broadcast on the executors. If the broadcast is used after
* this is called, it will need to be re-sent to each executor.
* 不是异步
* @param blocking Whether to block until unpersisting has completed
*/
def unpersist(blocking: Boolean) {
assertValid()
doUnpersist(blocking)
}
/**
* Destroy all data and metadata related to this broadcast variable. Use this with caution;
* once a broadcast variable has been destroyed, it cannot be used again.
* This method blocks until destroy has completed
* 销毁与此广播变量相关的所有数据和元数据。 请谨慎使用;
* 一旦广播变量已被销毁,则不能再次使用。
* 此方法阻塞,直到销毁完成
*/
def destroy() {
destroy(blocking = true)
}
/**
* Destroy all data and metadata related to this broadcast variable. Use this with caution;
* once a broadcast variable has been destroyed, it cannot be used again.
* @param blocking Whether to block until destroy has completed
*/
private[spark] def destroy(blocking: Boolean) {
assertValid()
_isValid = false
_destroySite = Utils.getCallSite().shortForm
logInfo("Destroying %s (from %s)".format(toString, _destroySite))
doDestroy(blocking)
}
/**
* Whether this Broadcast is actually usable. This should be false once persisted state is
* removed from the driver.
* 此广播是否实际可用。 一旦持久状态从Driver中删除,这应该是false。
*/
private[spark] def isValid: Boolean = {
_isValid
}
/**
* Actually get the broadcasted value. Concrete implementations of Broadcast class must
* define their own way to get the value.
* 实际上得到广播的值。 Broadcast类的具体实现必须定义自己的方式来获取值。
*/
protected def getValue(): T
/**
* Actually unpersist the broadcasted value on the executors. Concrete implementations of
* Broadcast class must define their own logic to unpersist their own data.
* 删除executors上broadcast的值。 Broadcast类的具体实现必须定义他们自己的逻辑来解开自己的数据。
*/
protected def doUnpersist(blocking: Boolean)
/**
* Actually destroy all data and metadata related to this broadcast variable.
* Implementation of Broadcast class must define their own logic to destroy their own
* state.
* 实际上会销毁与此广播变量相关的所有数据和元数据。
*实现Broadcast类必须定义自己的逻辑来销毁自己的状态。
*/
protected def doDestroy(blocking: Boolean)
/** Check if this broadcast is valid. If not valid, exception is thrown. */
protected def assertValid() {
if (!_isValid) {
throw new SparkException(
"Attempted to use %s after it was destroyed (%s) ".format(toString, _destroySite))
}
}
override def toString: String = "Broadcast(" + id + ")"
}
|
MrCodeYu/spark
|
core/src/main/scala/org/apache/spark/broadcast/Broadcast.scala
|
Scala
|
apache-2.0
| 7,020
|
package com.twitter.finagle.memcached.protocol.text
import com.twitter.finagle.framer.{Framer => FinagleFramer}
import com.twitter.finagle.memcached.util.ParserUtils
import com.twitter.io.Buf
import scala.collection.mutable.ArrayBuffer
private[memcached] object Framer {
private sealed trait State
private case object AwaitingTextFrame extends State
private case class AwaitingDataFrame(bytesNeeded: Int) extends State
private val EmptySeq = IndexedSeq.empty[Buf]
private val TokenDelimiter: Byte = ' '
}
/**
* Frames Bufs into Memcached frames. Memcached frames are one of two types;
* text frames and data frames. Text frames are delimited by `\\r\\n`. If a text
* frame starts with the token `VALUE`, a data frame will follow. The length of the
* data frame is given by the string representation of the third token in the
* text frame. The data frame also ends with `\\r\\n`.
*
* For more information, see https://github.com/memcached/memcached/blob/master/doc/protocol.txt.
*
* To simplify the decoding logic, we have decoupled framing and decoding; however, because of the
* complex framing logic, we must partially decode messages during framing to frame correctly.
*
* @note Class contains mutable state. Not thread-safe.
*/
private[memcached] trait Framer extends FinagleFramer {
import Framer._
private[this] var accum: Buf = Buf.Empty
private[this] var state: State = AwaitingTextFrame
protected val byteArrayForBuf2Int: Array[Byte] = ParserUtils.newByteArrayForBuf2Int()
/**
* Return the number of bytes before `\\r\\n` (newline), or -1 if no newlines found
*/
private[this] def bytesBeforeLineEnd(buf: Buf): Int = {
val finder = new Buf.Indexed.Processor {
private[this] var prevCh: Byte = _
def apply(byte: Byte): Boolean = {
if (byte == '\\n' && prevCh == '\\r') {
false
} else {
prevCh = byte
true
}
}
}
val pos = Buf.Indexed.coerce(buf).process(finder)
if (pos == -1) -1 else pos - 1
}
/**
* Using the current accumulation of Bufs, read the next frame. If no frame can be read,
* return null.
*/
private def extractFrame(): Buf =
state match {
case AwaitingTextFrame =>
val frameLength = bytesBeforeLineEnd(accum)
if (frameLength < 0) {
null
} else {
// We have received a text frame. Extract the frame.
val frameBuf: Buf = accum.slice(0, frameLength)
// Remove the extracted frame from the accumulator, stripping the newline (2 chars)
accum = accum.slice(frameLength + 2, accum.length)
val tokens = ParserUtils.split(Buf.ByteArray.Owned.extract(frameBuf), TokenDelimiter)
val bytesNeeded = dataLength(tokens)
// If the frame starts with "VALUE", we expect a data frame to follow,
// of length `bytesNeeded`.
if (bytesNeeded != -1) state = AwaitingDataFrame(bytesNeeded)
frameBuf
}
case AwaitingDataFrame(bytesNeeded) =>
// A data frame ends with `\\r\\n', so we must wait for `bytesNeeded + 2` bytes.
if (accum.length >= bytesNeeded + 2) {
// Extract the data frame
val frameBuf: Buf = accum.slice(0, bytesNeeded)
// Remove the extracted frame from the accumulator, stripping the newline (2 chars)
accum = accum.slice(bytesNeeded + 2 , accum.length)
state = AwaitingTextFrame
frameBuf
} else {
null
}
}
/**
* Frame a Buf and any accumulated partial frames into as many Memcached frames as possible.
*/
def apply(buf: Buf): IndexedSeq[Buf] = {
accum = accum.concat(buf)
var frame = extractFrame()
if (frame != null) {
// The average Gizmoduck memcached pipeline has 0-1 requests pending, and the average server
// response is split into 2 memcached protocol frames, so we chose 2 as the initial array
// size.
val frames = new ArrayBuffer[Buf](2)
do {
frames += frame
frame = extractFrame()
} while (frame != null)
frames.toIndexedSeq
} else {
EmptySeq
}
}
/**
* Given a sequence of Buf tokens that comprise a Memcached frame,
* return the length of data expected in the next frame, or -1
* if the length cannot be extracted.
*/
def dataLength(tokens: IndexedSeq[Buf]): Int
}
|
spockz/finagle
|
finagle-memcached/src/main/scala/com/twitter/finagle/memcached/protocol/text/Framer.scala
|
Scala
|
apache-2.0
| 4,425
|
package com.github.acrisci.commander.errors
class ProgramParseException(message: String) extends RuntimeException {
}
|
acrisci/commander-scala
|
src/main/scala/com/github/acrisci/commander/errors/ProgramParseException.scala
|
Scala
|
mit
| 119
|
package scalakurs.basics.koans.support
import org.scalatest.Stopper
object Master extends Stopper {
var studentNeedsToMeditate = false
override def apply() = studentNeedsToMeditate
type HasTestNameAndSuiteName = {
val suiteName: String
val testName: String
}
def studentFailed (event: HasTestNameAndSuiteName): String = {
studentNeedsToMeditate = true
meditationMessage(event)
}
private def meditationMessage(event: HasTestNameAndSuiteName) = {
"Please meditate on koan \\"%s\\" of suite \\"%s\\"" format (event.testName, event.suiteName)
}
}
|
elacin/scala-kurs
|
oppgaver/src/test/scala/scalakurs/basics/koans/support/Master.scala
|
Scala
|
apache-2.0
| 583
|
package scadoop
import collection._
import System._
import util.Random
import org.apache.hadoop.conf._
import org.apache.hadoop.filecache._
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io._
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat
import org.apache.hadoop.util._
import java.net._
object Implicits {
implicit def text2str(text: Text) = text.toString
implicit def text2richstr(text: Text) = new immutable.WrappedString(text.toString)
implicit def intwritable2int(n: IntWritable) = n.get
}
// must be serializable to transmit options, etc to mappers/reducers
// uses DelayedInit similar to Scala's App trait to let user type code directly in app's constructor
// however, this constructor code is run in run() instead of main() to conform to the Hadoop tool pattern
@serializable trait ScadoopApp extends Configured with Tool with DelayedInit {
private var _args: Array[String] = _
protected def args: Array[String] = _args
private val code = new mutable.ListBuffer[() => Unit]
override def delayedInit(body: => Unit) = code += (() => body)
override def run(args: Array[String]): Int = {
this._args = args
for(cmd <- code) cmd()
0 // ignored since client code should call exit
}
def main(args: Array[String]): Unit = { // child code should call exit()
ToolRunner.run(new Configuration, this, args)
}
}
object Pipeline {
// enforces key-value types match via scala's type system
// notice that KIn and VIn are bound at the class level here to enforce that IO types are compatible
def add[KIn, VIn, KX, VX, KOut:Manifest, VOut:Manifest](
name: String,
mapper: (Iterator[(KIn,VIn)],MapContext) => Iterator[(KX,VX)],
reducer: (Iterator[(KX,Iterator[VX])],ReduceContext) => Iterator[(KOut, VOut)],
combiner: Option[(Iterator[(KX,Iterator[VX])],ReduceContext) => Iterator[(KX, VX)]]
): Pipeline[KOut,VOut]
= addPriv(List(), name, mapper, reducer, combiner)
private def addPriv[KIn, VIn, KX, VX, KOut:Manifest, VOut:Manifest](
prevJobs: List[Job],
name: String,
mapper: (Iterator[(KIn,VIn)],MapContext) => Iterator[(KX,VX)],
reducer: (Iterator[(KX,Iterator[VX])],ReduceContext) => Iterator[(KOut, VOut)],
combiner: Option[(Iterator[(KX,Iterator[VX])],ReduceContext) => Iterator[(KX, VX)]]
): Pipeline[KOut,VOut] = {
val job = new Job(new Configuration, name)
job.setJarByClass(mapper.getClass)
val mapperBin = "mapper.bin"
IOUtil.write(mapper, mapperBin)
job.getConfiguration.set(SimpleMapper.SERIALIZED_NAME, mapperBin)
DistributedCache.addCacheFile(new URI(mapperBin), job.getConfiguration)
job.setMapperClass(classOf[SimpleMapper[KIn,VIn,KX,VX]])
// REDUCER SPECIFIC
job.setReducerClass(classOf[SimpleReducer[KX,VX,KOut,VOut]])
val reducerBin = "reducer.bin"
IOUtil.write(reducer, reducerBin)
job.getConfiguration.set(SimpleReducer.SERIALIZED_NAME, reducerBin)
DistributedCache.addCacheFile(new URI(reducerBin), job.getConfiguration)
combiner match {
case Some(c) => {
job.setCombinerClass(classOf[SimpleCombiner[KX,VX]])
val combinerBin = "combiner.bin"
IOUtil.write(c, combinerBin)
job.getConfiguration.set(SimpleCombiner.SERIALIZED_NAME, combinerBin)
DistributedCache.addCacheFile(new URI(combinerBin), job.getConfiguration)
}
case None => ;
}
job.setOutputKeyClass(manifest[KOut].erasure)
job.setOutputValueClass(manifest[VOut].erasure)
// MAPPER ONLY
// job.setOutputKeyClass(manifest[KX].erasure)
// job.setOutputValueClass(manifest[VX].erasure)
job.setInputFormatClass(classOf[TextInputFormat])
job.setOutputFormatClass(classOf[TextOutputFormat[_,_]])
new Pipeline[KOut,VOut](prevJobs :+ job)
}
}
class Pipeline[KIn,VIn] private[scadoop] (jobs: List[Job]) {
// TODO: Convenience add methods without reducer, etc.
def add[KX, VX, KOut:Manifest, VOut:Manifest](
name: String,
mapper: (Iterator[(KIn,VIn)],MapContext) => Iterator[(KX,VX)],
reducer: (Iterator[(KX,Iterator[VX])],ReduceContext) => Iterator[(KOut, VOut)],
combiner: Option[(Iterator[(KX,Iterator[VX])],ReduceContext) => Iterator[(KX, VX)]]
): Pipeline[KOut, VOut]
= Pipeline.addPriv(jobs, name, mapper, reducer, combiner)
// NOTE: All HDFS paths
def run(inDir: String, outDir: String, tmpDir: String): Boolean = {
var prevDir = new Path(inDir)
for( (job,i) <- jobs.zipWithIndex) {
// XXX: This mutates an otherwise immutable job
// TODO: Ensure path doesn't exist
val curOutDir = i match {
case _ if(i < jobs.size-1) => new Path(tmpDir, "scadoop-%s".format(Random.alphanumeric.take(10)))
case _ => new Path(outDir)
}
FileInputFormat.addInputPath(job, prevDir)
FileOutputFormat.setOutputPath(job, curOutDir)
if(!job.waitForCompletion(true)) {
err.println("%sERROR: JOB FAILED%s".format(Console.RED, Console.RESET))
return false
}
prevDir = curOutDir
}
return true
}
def runWithExitCode(inDir: String, outDir: String, tmpDir: String): Int = {
run(inDir, outDir, tmpDir) match {
case true => 0
case false => 1
}
}
}
object IOUtil {
import java.io._
def write(obj: AnyRef, filename: String) = {
import java.io._
val out = new ObjectOutputStream(new FileOutputStream(filename))
out.writeObject(obj)
out.close
}
// TODO: Determine most generic type parameters here
def read(inStream: InputStream): AnyRef = {
import java.io._
val in = new ObjectInputStream(inStream)
val obj = in.readObject
in.close
obj
}
}
class Context(hContext: TaskInputOutputContext[_,_,_,_]) {
import collection.JavaConversions._
def getCounter(groupName: String, counterName: String): Counter
= hContext.getCounter(groupName, counterName)
def getTaskAttemptID: TaskAttemptID = hContext.getTaskAttemptID
def setStatus(msg: String) = hContext.setStatus(msg)
def getStatus: String = hContext.getStatus
def getConfiguration: Configuration = hContext.getConfiguration
def conf = getConfiguration
def getJobID: JobID = hContext.getJobID
def getJobName: String = hContext.getJobName
def getNumReduceTasks: Int = hContext.getNumReduceTasks
def getWorkingDirectory: Path = hContext.getWorkingDirectory
def progress: Unit = hContext.progress
// XXX: There might be collisions
private def mapify(paths: Array[Path]): Map[String, Path]
= if(paths == null) Map.empty
else paths.map(path => (path.getName, path) ).toMap
lazy val getDistributedCacheFiles: Map[String, Path] = {
// hack for bug in local runner
if(DistributedCache.getLocalCacheFiles(conf) == null) {
DistributedCache.setLocalFiles(conf, conf.get("mapred.cache.files"));
}
mapify(DistributedCache.getLocalCacheFiles(getConfiguration))
}
lazy val getDistributedCacheArchives: Map[String, Path] = {
// hack for bug in local runner
if(DistributedCache.getLocalCacheArchives(conf) == null) {
DistributedCache.setLocalArchives(conf, conf.get("mapred.cache.archives"));
}
mapify(DistributedCache.getLocalCacheArchives(getConfiguration))
}
}
class MapContext(hContext: org.apache.hadoop.mapreduce.MapContext[_,_,_,_]) extends Context(hContext) {
def getInputSplit: InputSplit = hContext.getInputSplit
}
class ReduceContext(hContext: org.apache.hadoop.mapreduce.ReduceContext[_,_,_,_]) extends Context(hContext);
object SimpleMapper {
val SERIALIZED_NAME = "scadoop.mapper.file"
}
class SimpleMapper[KIn,VIn,KOut,VOut] extends Mapper[KIn,VIn,KOut,VOut] {
override def run(hContext: Mapper[KIn,VIn,KOut,VOut]#Context): Unit = {
super.setup(hContext)
val conf = hContext.getConfiguration
val serializedMapper = conf.get(SimpleMapper.SERIALIZED_NAME)
val context = new scadoop.MapContext(hContext)
val mapperPath = context.getDistributedCacheFiles.get(serializedMapper).get
//throw new RuntimeException("Serialized mapper function 'mapper.bin' not found in distributed cache.")
val inStream = mapperPath.getFileSystem(conf).open(mapperPath)
val delegate = IOUtil.read(inStream).
asInstanceOf[Function2[Iterator[(KIn,VIn)],MapContext,Iterator[(KOut,VOut)]]]
def next() = hContext.nextKeyValue() match {
case true => Some( (hContext.getCurrentKey, hContext.getCurrentValue) )
case false => None
}
// create a *lazy* iterator (all iterators are lazy) over input records
val it = Iterator.continually(next).takeWhile(_ != None).map(_.get)
for( (outKey, outValue) <- delegate(it, context)) {
hContext.write(outKey, outValue)
}
super.cleanup(hContext)
}
}
object SimpleReducer {
val SERIALIZED_NAME = "scadoop.reducer.file"
}
// TODO: Type Parameters
class SimpleReducer[KIn,VIn,KOut,VOut] extends Reducer[KIn,VIn,KOut,VOut] {
def serializedName = SimpleReducer.SERIALIZED_NAME
override def run(hContext: Reducer[KIn,VIn,KOut,VOut]#Context): Unit = {
super.setup(hContext)
val conf = hContext.getConfiguration
val serializedReducer = conf.get(serializedName)
val context = new scadoop.ReduceContext(hContext)
val reducerPath = context.getDistributedCacheFiles.get(serializedReducer).get
//throw new RuntimeException("Serialized reducer function 'reduder.bin' not found in distributed cache.")
val inStream = reducerPath.getFileSystem(conf).open(reducerPath)
val delegate = IOUtil.read(inStream).
asInstanceOf[Function2[Iterator[(KIn,Iterator[VIn])], ReduceContext,Iterator[(KOut,VOut)]]]
import collection.JavaConversions._
def next() = hContext.nextKeyValue() match {
case true => Some( (hContext.getCurrentKey, asScalaIterator(hContext.getValues.iterator)) )
case false => None
}
// create a *lazy* iterator (with view) over input records
val it = Iterator.continually(next).takeWhile(_ != None).map(_.get)
for( (outKey, outValue) <- delegate(it, context)) {
hContext.write(outKey, outValue)
}
super.cleanup(hContext)
}
}
object SimpleCombiner {
val SERIALIZED_NAME = "scadoop.combiner.file"
}
class SimpleCombiner[K,V] extends SimpleReducer[K,V,K,V] {
override def serializedName = SimpleCombiner.SERIALIZED_NAME
}
|
jhclark/scadoop
|
src/scadoop.scala
|
Scala
|
lgpl-3.0
| 10,537
|
class A {
val n = {
val z = {
lazy val bb = 1
bb
}
val a = {
lazy val cc = 2
cc
}
lazy val b = {
lazy val dd = 3
dd
}
z
}
}
class B {
locally {
lazy val ms = "as"
ms
}
}
class C {
val things = List("things")
if (things.size < 100) {
lazy val msg = "foo"
msg
}
}
class D {
val things = List("things")
if (things.size < 100) {
if (things.size > 10) {
lazy val msg = "foo"
msg
}
}
}
|
folone/dotty
|
tests/untried/pos/t3670.scala
|
Scala
|
bsd-3-clause
| 570
|
// Copyright 2015-2016 Ricardo Gladwell.
// Licensed under the GNU Lesser General Public License.
// See the README.md file for more information.
package microtesia.properties
import microtesia.{MicrodataString, Tag}
import scala.util.Success
private[microtesia] trait StringPropertyParsing[N] extends PropertyParsing[N] {
abstract override def parseProperty: PropertyParser[N] = super.parseProperty.orElse{
case element @ Tag(_) => Success(MicrodataString(element.value))
}
}
|
rgladwell/microtesia
|
src/main/scala/microtesia/properties/StringPropertyParsing.scala
|
Scala
|
lgpl-3.0
| 490
|
package rta.cron
import java.util.{Arrays, Calendar, Date}
final case class CronExpression(minute: CronExpression.Value, hour: CronExpression.Value,
dayOfMonth: CronExpression.Value, month: CronExpression.Value,
dayOfWeek: CronExpression.Value, year: Option[CronExpression.Value]) {
require(minute.min >= 0 && minute.max <= 59)
require(hour.min >= 0 && hour.max <= 23)
require(dayOfMonth.min >= 1 && dayOfMonth.max <= 31)
require(month.min >= 1 && month.max <= 12)
require(dayOfWeek.min >= 0 && dayOfWeek.max <= 6)
require(year.forall(y => y.min >= 1970 && y.max <= 2099))
private def adjustDate(calendar: Calendar)(field: Int,
value: CronExpression.Value, includeSelf: Boolean, bias: Int): Boolean = {
value.next(calendar.get(field) - bias, includeSelf = includeSelf).fold {
calendar.set(field, value.min + bias)
false
} { v =>
calendar.set(field, v + bias)
true
}
}
def nextDate(from: Date): Option[Date] = {
val calendar = Calendar.getInstance()
calendar.setTime(from)
val adjust = adjustDate(calendar) _
val includeSelf1 = adjust(Calendar.MINUTE, minute, true, 0)
val includeSelf2 = adjust(Calendar.HOUR_OF_DAY, hour, includeSelf1, 0)
val includeSelf3 = adjust(Calendar.DAY_OF_MONTH, dayOfMonth, includeSelf2, 0)
val includeSelf4 = adjust(Calendar.MONTH, month, includeSelf3, -1)
val includeSelf5 = adjust(Calendar.DAY_OF_WEEK, dayOfWeek, includeSelf4, 1)
val success = year.forall(adjust(Calendar.YEAR, _, includeSelf5, 0))
if (success) Some(calendar.getTime) else None
}
}
object CronExpression {
sealed abstract class Value {
def min: Int
def max: Int
def next(from: Int, includeSelf: Boolean): Option[Int]
}
case class List(min: Int, rest: Array[Int]) extends Value {
def max: Int = rest.headOption.getOrElse(min)
def next(from: Int, includeSelf: Boolean) = from match {
case `min` =>
if (includeSelf) Some(min)
else rest.headOption
case _ =>
if (from < min || rest.isEmpty || from > rest.last ||
(!includeSelf && from == rest.last)) None
else {
val idx = Arrays.binarySearch(rest, from)
if (idx >= 0) Some(if (includeSelf) rest(idx) else rest(idx + 1))
else Some(rest(Math.abs(idx) - 1))
}
}
}
case class Range(min: Int, max: Int, step: Int = 1) extends Value {
def next(from: Int, includeSelf: Boolean) = {
if (from < min || from > max || (!includeSelf && from == max)) None
else if ((from - min) % step == 0) {
if (includeSelf) Some(from) else Some(from + step)
} else {
val length = (max - min) / step + 1
val idx = length - ((min + length * step) - from) / step
Some(min + idx * step)
}
}
}
}
|
kjanosz/RuleThemAll
|
utils/src/main/scala/rta/cron/CronExpression.scala
|
Scala
|
apache-2.0
| 2,816
|
package io.jfc
import cats.data.{ NonEmptyList, Validated, Xor }
import io.jfc.test.CursorSuite
class CursorTests extends CursorSuite[Cursor] {
def fromJson(j: Json): Cursor = Cursor(j)
def top(c: Cursor): Option[Json] = Some(c.top)
def focus(c: Cursor): Option[Json] = Some(c.focus)
def fromResult(result: Option[Cursor]): Option[Cursor] = result
}
class HCursorTests extends CursorSuite[HCursor] {
def fromJson(j: Json): HCursor = Cursor(j).hcursor
def top(c: HCursor): Option[Json] = Some(c.top)
def focus(c: HCursor): Option[Json] = Some(c.focus)
def fromResult(result: ACursor): Option[HCursor] = result.success
}
class ACursorTests extends CursorSuite[ACursor] {
def fromJson(j: Json): ACursor = Cursor(j).hcursor.acursor
def top(c: ACursor): Option[Json] = c.top
def focus(c: ACursor): Option[Json] = c.focus
def fromResult(result: ACursor): Option[ACursor] = result.success.map(_.acursor)
}
|
non/circe
|
core/src/test/scala/io/jfc/CursorTests.scala
|
Scala
|
apache-2.0
| 927
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import scala.collection.mutable
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs._
import org.apache.spark.internal.Logging
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.{expressions, InternalRow}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.util.{CaseInsensitiveMap, DateTimeUtils}
import org.apache.spark.sql.types.{StringType, StructType}
/**
* An abstract class that represents [[FileIndex]]s that are aware of partitioned tables.
* It provides the necessary methods to parse partition data based on a set of files.
*
* @param parameters as set of options to control partition discovery
* @param userSpecifiedSchema an optional user specified schema that will be use to provide
* types for the discovered partitions
*/
abstract class PartitioningAwareFileIndex(
sparkSession: SparkSession,
parameters: Map[String, String],
userSpecifiedSchema: Option[StructType],
fileStatusCache: FileStatusCache = NoopCache) extends FileIndex with Logging {
import PartitioningAwareFileIndex.BASE_PATH_PARAM
/** Returns the specification of the partitions inferred from the data. */
def partitionSpec(): PartitionSpec
override def partitionSchema: StructType = partitionSpec().partitionColumns
protected val hadoopConf: Configuration =
sparkSession.sessionState.newHadoopConfWithOptions(parameters)
protected def leafFiles: mutable.LinkedHashMap[Path, FileStatus]
protected def leafDirToChildrenFiles: Map[Path, Array[FileStatus]]
protected lazy val pathGlobFilter = parameters.get("pathGlobFilter").map(new GlobFilter(_))
protected def matchGlobPattern(file: FileStatus): Boolean = {
pathGlobFilter.forall(_.accept(file.getPath))
}
protected lazy val recursiveFileLookup = {
parameters.getOrElse("recursiveFileLookup", "false").toBoolean
}
override def listFiles(
partitionFilters: Seq[Expression], dataFilters: Seq[Expression]): Seq[PartitionDirectory] = {
def isNonEmptyFile(f: FileStatus): Boolean = {
isDataPath(f.getPath) && f.getLen > 0
}
val selectedPartitions = if (partitionSpec().partitionColumns.isEmpty) {
PartitionDirectory(InternalRow.empty, allFiles().filter(isNonEmptyFile)) :: Nil
} else {
if (recursiveFileLookup) {
throw new IllegalArgumentException(
"Datasource with partition do not allow recursive file loading.")
}
prunePartitions(partitionFilters, partitionSpec()).map {
case PartitionPath(values, path) =>
val files: Seq[FileStatus] = leafDirToChildrenFiles.get(path) match {
case Some(existingDir) =>
// Directory has children files in it, return them
existingDir.filter(f => matchGlobPattern(f) && isNonEmptyFile(f))
case None =>
// Directory does not exist, or has no children files
Nil
}
PartitionDirectory(values, files)
}
}
logTrace("Selected files after partition pruning:\\n\\t" + selectedPartitions.mkString("\\n\\t"))
selectedPartitions
}
/** Returns the list of files that will be read when scanning this relation. */
override def inputFiles: Array[String] =
allFiles().map(_.getPath.toUri.toString).toArray
override def sizeInBytes: Long = allFiles().map(_.getLen).sum
def allFiles(): Seq[FileStatus] = {
val files = if (partitionSpec().partitionColumns.isEmpty && !recursiveFileLookup) {
// For each of the root input paths, get the list of files inside them
rootPaths.flatMap { path =>
// Make the path qualified (consistent with listLeafFiles and bulkListLeafFiles).
val fs = path.getFileSystem(hadoopConf)
val qualifiedPathPre = fs.makeQualified(path)
val qualifiedPath: Path = if (qualifiedPathPre.isRoot && !qualifiedPathPre.isAbsolute) {
// SPARK-17613: Always append `Path.SEPARATOR` to the end of parent directories,
// because the `leafFile.getParent` would have returned an absolute path with the
// separator at the end.
new Path(qualifiedPathPre, Path.SEPARATOR)
} else {
qualifiedPathPre
}
// There are three cases possible with each path
// 1. The path is a directory and has children files in it. Then it must be present in
// leafDirToChildrenFiles as those children files will have been found as leaf files.
// Find its children files from leafDirToChildrenFiles and include them.
// 2. The path is a file, then it will be present in leafFiles. Include this path.
// 3. The path is a directory, but has no children files. Do not include this path.
leafDirToChildrenFiles.get(qualifiedPath)
.orElse { leafFiles.get(qualifiedPath).map(Array(_)) }
.getOrElse(Array.empty)
}
} else {
leafFiles.values.toSeq
}
files.filter(matchGlobPattern)
}
protected def inferPartitioning(): PartitionSpec = {
if (recursiveFileLookup) {
PartitionSpec.emptySpec
} else {
// We use leaf dirs containing data files to discover the schema.
val leafDirs = leafDirToChildrenFiles.filter { case (_, files) =>
files.exists(f => isDataPath(f.getPath))
}.keys.toSeq
val caseInsensitiveOptions = CaseInsensitiveMap(parameters)
val timeZoneId = caseInsensitiveOptions.get(DateTimeUtils.TIMEZONE_OPTION)
.getOrElse(sparkSession.sessionState.conf.sessionLocalTimeZone)
PartitioningUtils.parsePartitions(
leafDirs,
typeInference = sparkSession.sessionState.conf.partitionColumnTypeInferenceEnabled,
basePaths = basePaths,
userSpecifiedSchema = userSpecifiedSchema,
caseSensitive = sparkSession.sqlContext.conf.caseSensitiveAnalysis,
validatePartitionColumns = sparkSession.sqlContext.conf.validatePartitionColumns,
timeZoneId = timeZoneId)
}
}
private def prunePartitions(
predicates: Seq[Expression],
partitionSpec: PartitionSpec): Seq[PartitionPath] = {
val PartitionSpec(partitionColumns, partitions) = partitionSpec
val partitionColumnNames = partitionColumns.map(_.name).toSet
val partitionPruningPredicates = predicates.filter {
_.references.map(_.name).toSet.subsetOf(partitionColumnNames)
}
if (partitionPruningPredicates.nonEmpty) {
val predicate = partitionPruningPredicates.reduce(expressions.And)
val boundPredicate = Predicate.createInterpreted(predicate.transform {
case a: AttributeReference =>
val index = partitionColumns.indexWhere(a.name == _.name)
BoundReference(index, partitionColumns(index).dataType, nullable = true)
})
val selected = partitions.filter {
case PartitionPath(values, _) => boundPredicate.eval(values)
}
logInfo {
val total = partitions.length
val selectedSize = selected.length
val percentPruned = (1 - selectedSize.toDouble / total.toDouble) * 100
s"Selected $selectedSize partitions out of $total, " +
s"pruned ${if (total == 0) "0" else s"$percentPruned%"} partitions."
}
selected
} else {
partitions
}
}
/**
* Contains a set of paths that are considered as the base dirs of the input datasets.
* The partitioning discovery logic will make sure it will stop when it reaches any
* base path.
*
* By default, the paths of the dataset provided by users will be base paths.
* Below are three typical examples,
* Case 1) `spark.read.parquet("/path/something=true/")`: the base path will be
* `/path/something=true/`, and the returned DataFrame will not contain a column of `something`.
* Case 2) `spark.read.parquet("/path/something=true/a.parquet")`: the base path will be
* still `/path/something=true/`, and the returned DataFrame will also not contain a column of
* `something`.
* Case 3) `spark.read.parquet("/path/")`: the base path will be `/path/`, and the returned
* DataFrame will have the column of `something`.
*
* Users also can override the basePath by setting `basePath` in the options to pass the new base
* path to the data source.
* For example, `spark.read.option("basePath", "/path/").parquet("/path/something=true/")`,
* and the returned DataFrame will have the column of `something`.
*/
private def basePaths: Set[Path] = {
parameters.get(BASE_PATH_PARAM).map(new Path(_)) match {
case Some(userDefinedBasePath) =>
val fs = userDefinedBasePath.getFileSystem(hadoopConf)
if (!fs.isDirectory(userDefinedBasePath)) {
throw new IllegalArgumentException(s"Option '$BASE_PATH_PARAM' must be a directory")
}
val qualifiedBasePath = fs.makeQualified(userDefinedBasePath)
val qualifiedBasePathStr = qualifiedBasePath.toString
rootPaths
.find(!fs.makeQualified(_).toString.startsWith(qualifiedBasePathStr))
.foreach { rp =>
throw new IllegalArgumentException(
s"Wrong basePath $userDefinedBasePath for the root path: $rp")
}
Set(qualifiedBasePath)
case None =>
rootPaths.map { path =>
// Make the path qualified (consistent with listLeafFiles and bulkListLeafFiles).
val qualifiedPath = path.getFileSystem(hadoopConf).makeQualified(path)
if (leafFiles.contains(qualifiedPath)) qualifiedPath.getParent else qualifiedPath }.toSet
}
}
// SPARK-15895: Metadata files (e.g. Parquet summary files) and temporary files should not be
// counted as data files, so that they shouldn't participate partition discovery.
private def isDataPath(path: Path): Boolean = {
val name = path.getName
!((name.startsWith("_") && !name.contains("=")) || name.startsWith("."))
}
}
object PartitioningAwareFileIndex {
val BASE_PATH_PARAM = "basePath"
}
|
kevinyu98/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningAwareFileIndex.scala
|
Scala
|
apache-2.0
| 10,869
|
import scala.reflect.macros.whitebox.Context
import scala.language.experimental.macros
import scala.annotation.StaticAnnotation
object HelloMacro {
def impl(c: Context)(annottees: c.Tree*): c.Tree = {
import c.universe._
annottees match {
case (classDecl: ClassDef) :: Nil =>
val q"$mods class $name[..$tparams] $ctorMods(...$paramss) extends { ..$earlydefns } with ..$bases { $self => ..$body }" = classDecl
q"""
case class $name(...$paramss) extends ..$bases {
..$body
def hello = "Hello"
}
"""
case _ => c.abort(c.enclosingPosition, "Invalid annottee")
}
}
}
class hello extends StaticAnnotation {
def macroTransform(annottees: Any*): Any = macro HelloMacro.impl
}
|
pdalpra/sbt
|
sbt/src/sbt-test/source-dependencies/macro-annotation/macros/src/main/scala/Macros.scala
|
Scala
|
bsd-3-clause
| 761
|
/*******************************************************************************
* Copyright (c) 2019. Carl Minden
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package com.anathema_roguelike
package entities.items.weapons.types
import com.anathema_roguelike.entities.items.weapons.MetalWeaponMaterial
import com.anathema_roguelike.entities.items.weapons.WeaponMaterial
object MeleeWeaponType {
case class BluntWeapon(name: String, attackSpeed: Double, damage: Double, weight: Double) extends MeleeWeaponType(name, weight, attackSpeed, damage)
case class LongBlade(name: String, attackSpeed: Double, damage: Double, weight: Double) extends MeleeWeaponType(name, weight, attackSpeed, damage)
case class ShortBlade(name: String, attackSpeed: Double, damage: Double, weight: Double) extends MeleeWeaponType(name, weight, attackSpeed, damage)
case class Spear(name: String, attackSpeed: Double, damage: Double, weight: Double) extends MeleeWeaponType(name, weight, attackSpeed, damage)
}
class MeleeWeaponType(name: String, weight: Double, attackSpeed: Double, damage: Double) extends WeaponType(name, weight, attackSpeed, damage) {
override def getRange = 1
override def getMaterialType: Class[_ <: WeaponMaterial] = classOf[MetalWeaponMaterial]
}
|
carlminden/anathema-roguelike
|
src/com/anathema_roguelike/entities/items/weapons/types/MeleeWeaponType.scala
|
Scala
|
gpl-3.0
| 1,947
|
/* *\\
** A Toy Model of Environment **
** https://github.com/cubean/environment-toy-model.git **
\\* */
package Env.Tools
import java.time._
import java.time.format._
/**
* get some date time calculation result
*/
object DateTimeCal {
// the input format of date and time
private val formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss");
/**
* get current local date and time string
*/
val getCurrentLocalTimeStr = {
formatter.format(LocalDateTime.now())
}
/**
* get current local date and time
*
* @param dtStr local date and time string
*
* @return LocalDateTime
*/
def getLocalDateTime(dtStr: String) = {
try {
LocalDateTime.parse(dtStr, formatter)
} catch {
case ex: DateTimeParseException => {
println(ex)
println("Exception 02: Please input correct format of date and time (eg. 2015-12-23 16:02:12).")
null
}
case ex: Exception =>
println(ex)
null
}
}
/**
* get LocalDateTime of some key days in a year:21th March, 21th June, 22th September, 22th December
*
* @param dt local date and time
*
* @return DaysInSomeDay class
*/
def getDTOfSomeDay(dt: LocalDateTime) = {
try {
val strYear = (dt.getYear).toString()
val DT0321 = LocalDateTime.parse(strYear + "-03-21 00:00:00", formatter);
val DT0621 = LocalDateTime.parse(strYear + "-06-21 00:00:00", formatter);
val DT0922 = LocalDateTime.parse(strYear + "-09-22 00:00:00", formatter);
val DT1222 = LocalDateTime.parse(strYear + "-12-22 00:00:00", formatter);
DTOfSomeDay(DT0321, DT0621, DT0922, DT1222)
} catch {
case ex: DateTimeParseException => {
println(ex)
println("Exception 02: Please input correct format of date and time (eg. 2015-12-23 16:02:12).")
null
}
case ex: Exception =>
println(ex)
null
}
}
/**
* get days of some key days in a year:Today, 21th March, 21th June, 22th September, 22th December
*
* @param dt local date and time
*
* @return DaysInSomeDay class
*/
// def getDaysOfSomeDay(dt: LocalDateTime) = {
//
// val dtAll = getDTOfSomeDay(dt)
//
// DaysOfSomeDay(dt.getDayOfYear, dtAll.dt0321.getDayOfYear, dtAll.dt0621.getDayOfYear,
// dtAll.dt0922.getDayOfYear, dtAll.dt1222.getDayOfYear)
// }
/**
* Get total days in one year
*
* @param dt - local date and time
*
* @return days: 365 or 366
*/
def daysOneYear(dt: LocalDateTime): Int = {
val allDays = getDTOfSomeDay(dt)
if(allDays == null) return 365
// The days in one year is decided by the February.
val newDt = if (dt.isAfter(allDays.dt1222)) dt.plusYears(1) else dt
val daysof1231 = LocalDateTime.parse(newDt.getYear.toString() + "-12-31 00:00:00", formatter);
daysof1231.getDayOfYear
}
/**
* get days from 21th Match of this year
*
* @param dt - local date and time
*
* @return days
*/
def getDaysfrom0321(dt: LocalDateTime): Int = {
val dtAll = getDTOfSomeDay(dt)
if(dtAll == null) return 0
if (dt.isEqual(dtAll.dt0321) || dt.isAfter(dtAll.dt0321))
dt.getDayOfYear - dtAll.dt0321.getDayOfYear
else (daysOneYear(dt.minusYears(1)) - dtAll.dt0321.minusYears(1).getDayOfYear) + dt.getDayOfYear
}
/**
* get relative days from 21th Match of this year
*
* @param dt - local date and time
*
* @return days (+/-), which no more than half a year
*/
def getRelaDaysfrom0321(dt: LocalDateTime): Int = {
val dtAll = getDTOfSomeDay(dt)
if(dtAll == null) return 0
val daysOfToday = dt.getDayOfYear
// (Tdays – Tdaysof0321) where Tdays from the begin of this year to 21th June.
// (Tdaysof0922 - Tdays) where Tdays from 22th Jun to 22th Dec.
// (Tdays - Tdaysof0321 of next year – days of next year) where Tdays from 23th Dec to the end of this year.
if (dt.isBefore(dtAll.dt0621.plusDays(1)))
daysOfToday - dtAll.dt0321.getDayOfYear
else if (dt.isBefore(dtAll.dt1222.plusDays(1)))
dtAll.dt0922.getDayOfYear - daysOfToday
else daysOfToday - dtAll.dt0321.plusYears(1).getDayOfYear - daysOneYear(dt.plusYears(1))
}
/**
* Get the total seconds number of a LocalDateTime in one day
*/
def getTotalSecondsOfDay(dt: LocalDateTime): Int = {
dt.getHour * 3600 + dt.getMinute * 60 + dt.getSecond
}
}
/**
* the LocalDateTime of some key dates in a year: 21th March, 21th June, 22th September, 22th December
*/
case class DTOfSomeDay(dt0321: LocalDateTime, dt0621: LocalDateTime, dt0922: LocalDateTime, dt1222: LocalDateTime)
/**
* the days of some key dates in a year: Today, 21th March, 21th June, 22th September, 22th December
*/
//case class DaysOfSomeDay(daysofToday: Int, daysof0321: Int, daysof0621: Int, daysof0922: Int, daysof1222: Int)
|
cubean/environment-toy-model
|
src/main/scala/Env/Tools/DateTimeCal.scala
|
Scala
|
apache-2.0
| 5,070
|
package objsets
import common._
import TweetReader._
/**
* A class to represent tweets.
*/
class Tweet(val user: String, val text: String, val retweets: Int) {
override def toString: String =
"User: " + user + "\\n" +
"Text: " + text + " [" + retweets + "]"
}
/**
* This represents a set of objects of type `Tweet` in the form of a binary search
* tree. Every branch in the tree has two children (two `TweetSet`s). There is an
* invariant which always holds: for every branch `b`, all elements in the left
* subtree are smaller than the tweet at `b`. The eleemnts in the right subtree are
* larger.
*
* Note that the above structure requires us to be able to compare two tweets (we
* need to be able to say which of two tweets is larger, or if they are equal). In
* this implementation, the equality / order of tweets is based on the tweet's text
* (see `def incl`). Hence, a `TweetSet` could not contain two tweets with the same
* text from different users.
*
*
* The advantage of representing sets as binary search trees is that the elements
* of the set can be found quickly. If you want to learn more you can take a look
* at the Wikipedia page [1], but this is not necessary in order to solve this
* assignment.
*
* [1] http://en.wikipedia.org/wiki/Binary_search_tree
*/
abstract class TweetSet {
/**
* This method takes a predicate and returns a subset of all the elements
* in the original set for which the predicate is true.
*
* Question: Can we implment this method here, or should it remain abstract
* and be implemented in the subclasses?
*/
def filter(p: Tweet => Boolean): TweetSet =
filterAcc(p, new Empty)
/**
* This is a helper method for `filter` that propagetes the accumulated tweets.
*/
def filterAcc(p: Tweet => Boolean, acc: TweetSet): TweetSet
/**
* Returns a new `TweetSet` that is the union of `TweetSet`s `this` and `that`.
*
* Question: Should we implment this method here, or should it remain abstract
* and be implemented in the subclasses?
*/
def union(that: TweetSet): TweetSet
/**
* Returns the tweet from this set which has the greatest retweet count.
*
* Calling `mostRetweeted` on an empty set should throw an exception of
* type `java.util.NoSuchElementException`.
*
* Question: Should we implment this method here, or should it remain abstract
* and be implemented in the subclasses?
*/
def mostRetweeted: Tweet
//helper function for mostRetweeted
def maxRetweets(max: Tweet): Tweet
/**
* Returns a list containing all tweets of this set, sorted by retweet count
* in descending order. In other words, the head of the resulting list should
* have the highest retweet count.
*
* Hint: the method `remove` on TweetSet will be very useful.
* Question: Should we implment this method here, or should it remain abstract
* and be implemented in the subclasses?
*/
def descendingByRetweet: TweetList
/**
* The following methods are already implemented
*/
/**
* Returns a new `TweetSet` which contains all elements of this set, and the
* the new element `tweet` in case it does not already exist in this set.
*
* If `this.contains(tweet)`, the current set is returned.
*/
def incl(tweet: Tweet): TweetSet
/**
* Returns a new `TweetSet` which excludes `tweet`.
*/
def remove(tweet: Tweet): TweetSet
/**
* Tests if `tweet` exists in this `TweetSet`.
*/
def contains(tweet: Tweet): Boolean
/**
* This method takes a function and applies it to every element in the set.
*/
def foreach(f: Tweet => Unit): Unit
}
class Empty extends TweetSet {
def filterAcc(p: Tweet => Boolean, acc: TweetSet): TweetSet = acc
def union(that: TweetSet): TweetSet = that
def mostRetweeted: Tweet = throw new NoSuchElementException
def maxRetweets(max: Tweet): Tweet = max
def descendingByRetweet: TweetList = Nil
/**
* The following methods are already implemented
*/
def contains(tweet: Tweet): Boolean = false
def incl(tweet: Tweet): TweetSet = new NonEmpty(tweet, new Empty, new Empty)
def remove(tweet: Tweet): TweetSet = this
def foreach(f: Tweet => Unit): Unit = ()
}
class NonEmpty(elem: Tweet, left: TweetSet, right: TweetSet) extends TweetSet {
def filterAcc(p: Tweet => Boolean, acc: TweetSet): TweetSet = {
if (p(elem))
right.filterAcc(p, left.filterAcc(p, acc.incl(elem)))
else right.filterAcc(p, left.filterAcc(p, acc))
}
def union(that: TweetSet): TweetSet =
right.union(left.union(that.incl(elem)))
def mostRetweeted: Tweet = maxRetweets(elem)
def maxRetweets(max: Tweet): Tweet =
right.maxRetweets(left.maxRetweets(if (elem.retweets > max.retweets) elem else max))
def descendingByRetweet: TweetList = {
val mostRet = mostRetweeted
new Cons(mostRet, remove(mostRet).descendingByRetweet)
}
/**
* The following methods are already implemented
*/
def contains(x: Tweet): Boolean =
if (x.text < elem.text) left.contains(x)
else if (elem.text < x.text) right.contains(x)
else true
def incl(x: Tweet): TweetSet = {
if (x.text < elem.text) new NonEmpty(elem, left.incl(x), right)
else if (elem.text < x.text) new NonEmpty(elem, left, right.incl(x))
else this
}
def remove(tw: Tweet): TweetSet =
if (tw.text < elem.text) new NonEmpty(elem, left.remove(tw), right)
else if (elem.text < tw.text) new NonEmpty(elem, left, right.remove(tw))
else left.union(right)
def foreach(f: Tweet => Unit): Unit = {
f(elem)
left.foreach(f)
right.foreach(f)
}
}
trait TweetList {
def head: Tweet
def tail: TweetList
def isEmpty: Boolean
def foreach(f: Tweet => Unit): Unit =
if (!isEmpty) {
f(head)
tail.foreach(f)
}
}
object Nil extends TweetList {
def head = throw new java.util.NoSuchElementException("head of EmptyList")
def tail = throw new java.util.NoSuchElementException("tail of EmptyList")
def isEmpty = true
}
class Cons(val head: Tweet, val tail: TweetList) extends TweetList {
def isEmpty = false
}
object GoogleVsApple {
val google = List("android", "Android", "galaxy", "Galaxy", "nexus", "Nexus")
val apple = List("ios", "iOS", "iphone", "iPhone", "ipad", "iPad")
def containsWord(tweet: Tweet, words: List[String]): Boolean = {
if (words.isEmpty) false
else if (tweet.text.contains(words.head)) true
else containsWord(tweet, words.tail)
}
lazy val googleTweets: TweetSet = TweetReader.allTweets.filter(tweet => google.exists(word => tweet.text.contains(word)))
lazy val appleTweets: TweetSet = TweetReader.allTweets.filter(tweet => apple.exists(word => tweet.text.contains(word)))
/**
* A list of all tweets mentioning a keyword from either apple or google,
* sorted by the number of retweets.
*/
lazy val trending: TweetList = googleTweets.union(appleTweets).descendingByRetweet
}
object Main extends App {
// Print the trending tweets
GoogleVsApple.trending foreach println
}
|
macarran/coursera-progfun
|
Week 3/objsets/src/main/scala/objsets/TweetSet.scala
|
Scala
|
mit
| 7,050
|
/*
* Copyright © 2013 by Jörg D. Weisbarth
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License 3 as published by
* the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY.
*
* See the License.txt file for more details.
*/
package sim.app.antDefenseAIs.model
/**
* Behaviour configuration of an normal Lasius Niger ant
*
* @param maxAggressiveness As of this value for the relationship between other ants of the same colony and enemy-ants
* , the ant changes state with probability `maxAggressivenessProb`.
* @param minNeutralnessProb Lowest possible probability that an ant ignores others
* @param maxNeutralnessProb Highest possible probability that an ant ignores others
* @param minAggressivenessProb Lowest possible probability that an ant gets aggressive
* @param maxAggressivenessProb Highest possible probability that an ant gets aggressive
*/
private[antDefenseAIs] class LN_RPB_WF_BehaviourConf(
val shoutingRange: Int = 3,
override val maxAggressiveness: Double = 6d,
override val minNeutralnessProb: Double = 0.33d,
override val maxNeutralnessProb: Double = 0.2d,
override val minAggressivenessProb: Double = 0.257d,
override val maxAggressivenessProb: Double = 0.767d,
override val emotionalDwellTime: Int = 8,
override val notBored: Int = 500,
override val alpha: Double = 0.98d,
override val explorationRate: Double = 0.3d,
override val gamma: Double = 0.98d)
extends LN_RPB_BehaviourConf(maxAggressiveness, minNeutralnessProb, maxNeutralnessProb, minAggressivenessProb,
maxAggressivenessProb, emotionalDwellTime, notBored, alpha, explorationRate, gamma)
private[antDefenseAIs] class LN_RPB_WF_Generator(
override val behaviourConf: LN_RPB_WF_BehaviourConf) extends AntGenerator {
/**
* Creates an NormalAntWorker
*
* @param tribeID Tribe the ant belongs to
* @param world World the ant lives on
* @return NormalAntWorker
*/
def apply(tribeID: Int, world: World) = new LN_RPB_WithFetching(tribeID, world, behaviourConf)
def apply(ant: Ant) =
behaviourConf match {
case c: LN_RPB_WF_BehaviourConf => new LN_RPB_WithFetching(ant, behaviourConf)
}
}
private[antDefenseAIs] class LN_RPB_WithFetching(
override val tribeID: Int,
override val world: World,
override val behaviourConf: LN_RPB_WF_BehaviourConf)
extends LN_RandomPB(tribeID, world, behaviourConf) {
// Initialise configuration
import behaviourConf._
/**
* Constructs ant with the information of the given ant
*
* @param ant Ant ant giving the information of construction
* @return Ant of the same colony in the same simulation
*/
def this(ant: Ant, behaviourConf: LN_RPB_WF_BehaviourConf) = this(ant.tribeID, ant.world, behaviourConf)
protected[this] def followWarPheromone() {
val destiny = chooseDirectionBy(warPheroOf)
if (destiny.isDefined) {
moveTo(destiny.get)
adaptAllPheros()
}
mineRes()
}
override protected[model] def actEconomically() {
val bestWarDir = gradientOf(warPheroOf)
val bestWarPhero = warPheroOf(bestWarDir)
if (bestWarPhero > 0)
followWarPheromone()
else
super.actEconomically()
}
// Overrided to set war pheromone
override def adaptState() {
import StrictMath.min
val alpha:Double = min(1d, evalueSituation().get / maxAggressiveness)
val aggressivenessProb = alpha * maxAggressivenessProb + (1 - alpha) * minAggressivenessProb
val neutralnessProb = alpha * maxNeutralnessProb + (1 - alpha) * minNeutralnessProb
assert (aggressivenessProb + neutralnessProb <= 1)
val random = world.random.nextDouble()
emotion = if (random <= aggressivenessProb)
Emotion.aggressive
else if (random <= aggressivenessProb + neutralnessProb) {
shout() // Shout to create a numerical superiority
Emotion.normal
}
else
Emotion.fleeing
nextEmotionChange = emotionalDwellTime
}
/**
* Gives a shout of war pheromones
*/
private[this] def shout() {
import sim.app.antDefenseAIs.maxDistance
val curPos: (Int, Int) = currentPos
val positions: List[(Int, Int)] = world.nearPos(this, shoutingRange).get
def phero(p: (Int, Int)): ((Int, Int), Double) = (p, 1d / (maxDistance(p, curPos).toDouble + 1d))
for ((pos, value) <- positions.map(phero)) {
world.setWarPheroOn(this, pos, value)
}
}
}
|
joergdw/antconflictbeh
|
src/sim/app/antDefenseAIs/model/LN_RPB_WithFetching.scala
|
Scala
|
lgpl-3.0
| 4,618
|
package Project
import scalafx.scene.media.AudioClip
/**
* Games Sounds are in this object and it has methods which play specific sound file if sound is on
*/
object Sound {
var isSoundOn: Boolean = true
//Sound Files
val menuSoundFile = new AudioClip("file:audio/menu.wav")
val shootSoundFile = new AudioClip("file:audio/bullet1.wav")
val explosionSoundFile = new AudioClip("file:audio/explosion.wav")
//Methods check if sound is on and only then play their sound file
def menuSound() {
if (isSoundOn) menuSoundFile.play
}
def shootSound() {
if (isSoundOn) shootSoundFile.play
}
def explosionSound() {
if (isSoundOn) explosionSoundFile.play
}
}
|
TarmoA/ProjectAsteroid
|
Documents/ohjelmointistuff/ProjectAsteroid/src/Project/Sound.scala
|
Scala
|
gpl-3.0
| 688
|
package controllers
import pl.pholda.malpompaaligxilo.ContextJVM
import pl.pholda.malpompaaligxilo.dsl.parser.FormSpecificationParser
import pl.pholda.malpompaaligxilo.form.FormInstanceJVM
import pl.pholda.malpompaaligxilo.i18n.{I18nJVM, Lang}
import play.api.libs.json._
import play.api.mvc._
import views.html
import scala.io.Source
object Main extends Controller {
lazy val spec =
Source.fromInputStream(getClass.getResourceAsStream("/specification.asl")).mkString
implicit val i18n = I18nJVM.fromResources(getClass,
"en" -> "/en.po",
"pl" -> "/pl.po"
)
implicit val context = new ContextJVM()
lazy val specJVM = {
val parser = FormSpecificationParser()
parser(spec).get
}
def index(implicit lang: Lang = "en") = Action {
implicit val formInstance = new FormInstanceJVM(specJVM, {field =>
Seq()
})
Ok(html.index())
}
def submit = Action { implicit request =>
request.body.asJson match {
case Some(json) =>
// case Some(post) =>
implicit val formInstance = new FormInstanceJVM(specJVM, { field =>
json \\ field.name match {
case JsArray(values) =>
values.map{
case JsString(str) =>str
case _ => throw new IllegalArgumentException(s"unexpected token")
}
}
})
Ok(formInstance.validate.toString)
case _ =>
Ok("error")
}
}
def specification() = Action {
Ok(spec)
}
def po(implicit lang: Lang = "en") = Action {
Ok(html.po(i18n, lang))
}
}
|
pholda/MalpompaAligxilo
|
examples/dslPlay/app/controllers/Main.scala
|
Scala
|
gpl-3.0
| 1,569
|
package mypipe
import mypipe.api.consumer.{ BinaryLogConsumer, BinaryLogConsumerListener }
import mypipe.api.event.{ Mutation, InsertMutation }
import mypipe.mysql._
import scala.concurrent.{ Future, Await }
import scala.concurrent.duration._
import mypipe.producer.QueueProducer
import java.util.concurrent.{ TimeUnit, LinkedBlockingQueue }
import akka.actor.ActorDSL._
import akka.pattern.ask
import akka.util.Timeout
import akka.agent.Agent
import scala.collection.mutable.ListBuffer
import org.slf4j.LoggerFactory
import com.github.shyiko.mysql.binlog.event.{ Event ⇒ MEvent, _ }
class LatencySpec extends UnitSpec with DatabaseSpec with ActorSystemSpec {
@volatile var connected = false
val log = LoggerFactory.getLogger(getClass)
val maxLatency = conf.getLong("mypipe.test.max-latency")
val latencies = ListBuffer[Long]()
implicit val timeout = Timeout(1 second)
val maxId = Agent(0)
val insertQueue = new LinkedBlockingQueue[(Int, Long)]()
val binlogQueue = new LinkedBlockingQueue[Mutation]()
case object Insert
case object Consume
case object Quit
"Mypipe" should s"consume messages with a latency lower than $maxLatency millis" in {
// actor1:
// add row into local queue
// add row into mysql
val insertProducer = actor(new Act {
var id = 1
become {
case Insert ⇒ {
try {
val f = db.connection.sendQuery(Queries.INSERT.statement(id = id.toString))
Await.result(f, 1000 millis)
maxId.alter(id)
insertQueue.add((id, System.nanoTime()))
id += 1
} catch { case t: Throwable ⇒ }
self ! Insert
}
case Quit ⇒ sender ! true
}
})
// actor2:
// consumes binlogs from the server and puts the results
// in a local queue for other actors to process
val binlogConsumer = actor(new Act {
val queueProducer = new QueueProducer(binlogQueue)
val consumer = MySQLBinaryLogConsumer(Queries.DATABASE.host, Queries.DATABASE.port, Queries.DATABASE.username, Queries.DATABASE.password)
consumer.registerListener(new BinaryLogConsumerListener[MEvent, BinaryLogFilePosition]() {
override def onMutation(c: BinaryLogConsumer[MEvent, BinaryLogFilePosition], mutation: Mutation): Boolean = {
queueProducer.queue(mutation)
true
}
override def onMutation(c: BinaryLogConsumer[MEvent, BinaryLogFilePosition], mutations: Seq[Mutation]): Boolean = {
queueProducer.queueList(mutations.toList)
true
}
override def onConnect(c: BinaryLogConsumer[MEvent, BinaryLogFilePosition]) { connected = true }
})
val f = Future { consumer.connect() }
become {
case Quit ⇒ {
consumer.disconnect()
Await.result(f, 5 seconds)
sender ! true
}
}
})
// actor3:
// poll local queue and get inserted latest event
// wait on binlog consumer to hand us back the same event
// calculate latency
val insertConsumer = actor(new Act {
become {
case Consume ⇒ {
val id = insertQueue.poll(1, TimeUnit.SECONDS)
var found = false
while (!found) {
val mutation = binlogQueue.poll(1, TimeUnit.SECONDS)
if (mutation.isInstanceOf[InsertMutation]) {
val colName = mutation.table.primaryKey.get.columns.head.name
val primaryKey = mutation.asInstanceOf[InsertMutation].rows.head.columns(colName).value[Int]
if (id._1 == primaryKey) {
latencies += System.nanoTime() - id._2
found = true
} else {
log.debug(s"Did not find a matching mutation with id = $id, cur val is $primaryKey, will keep looking.")
}
}
}
self ! Consume
}
case Quit ⇒ sender ! true
}
})
binlogConsumer ! Consume
while (!connected) Thread.sleep(10)
insertConsumer ! Consume
insertProducer ! Insert
while (maxId.get() < 100) {
Thread.sleep(10)
}
val future = Future.sequence(List(ask(insertProducer, Quit), ask(binlogConsumer, Quit), ask(insertConsumer, Quit)))
try {
Await.result(future, 30 seconds)
} catch {
case e: Exception ⇒ log.debug("Timed out waiting for actors to shutdown, proceeding anyway.")
}
system.stop(insertProducer)
system.stop(binlogConsumer)
system.stop(insertConsumer)
val latency = latencies.fold(0L)(_ + _) / latencies.size
println(s"Latency: ${latency / 1000000.0} millis ($latency nanos)")
assert(latency / 1000000.0 < maxLatency)
}
}
|
Asana/mypipe
|
mypipe-api/src/test/scala/mypipe/LatencySpec.scala
|
Scala
|
apache-2.0
| 4,738
|
package nn.conv
import java.io.{File, PrintWriter}
import java.nio.file.Files.{createDirectory, exists}
import java.nio.file.Paths.get
import java.util.Calendar
import com.typesafe.scalalogging.Logger
import nn.conv.versions.{Conv1, Conv3, Conv4}
import nn.{PaddedArray, Shape}
//import nn.mysql.Connector
import opencl.executor.{Execute, Executor}
import org.junit.{AfterClass, BeforeClass, Test, Ignore}
import org.junit.Assert.assertEquals
/**
* Created by s1569687 on 01/03/17.
*/
@Ignore
object TestConv {
@BeforeClass def before(): Unit = {
Executor.loadLibrary()
println("Initialize the executor")
Executor.init(/*monaco*/0, 0)
// MySQL is disabled in this version
// nn.conv.mysql.CreateTable()
}
@AfterClass def after(): Unit = {
println("Shutdown the executor")
Executor.shutdown()
// MySQL is disabled in this version
// Connector.close()
}
}
@Ignore
class TestConv {
private val logger = Logger(this.getClass)
val precision: Float = 1f
val codeVersion: Int = 1
// The tests below are temporarily deprecated since they were not fixed to comply with new changes
/*@Test
def Sanity_Conv(): Unit = {
val (lift_result: Array[Float], runtime) = Execute(1,1)(
Conv1.Seq(2, 3, nn.Linear), input_K, input_b, input_X)
logger.info(f"\\n1. Convolution sanity check.\\nRuntime: $runtime%1.5f ms")
val lift_result3d = nn.group(lift_result, (gold.length, gold.head.length, gold.head.head.length))
for ((gold2d, lift_result2d) <- gold zip lift_result3d) {
logger.info(lift_result2d.flatten.mkString(", "))
logger.info(gold2d.flatten.mkString(", "))
/*for ((gold1d, lift_result1d) <- gold2d zip lift_result2d) {
assertArrayEquals(gold1d, lift_result1d, precision)
}*/
}
}
@Test
def Sanity_CNN_Par(): Unit = {
/** Build an array of experimental parameters filtering out the experiments that where
* already run or for which the data was not provided; load data for all experiments
*/
var conv: CNN = null
val nKernelsL1: Int = 3
val kernelSize: Int = 3
val nInputs: Int = 2
val cnns = for {
elsPerThreadL1 <- 1 to 16
kernelsPerGroupL1 <- 1 to nKernelsL1
inputTileSize <- kernelSize to input_X.head.head.length
// Check if CNN can be created with the selected parameters (e.g. if WrgGroupSize < maxWrgGroupSize)
if {
try {
conv = new CNN(CNN.Par, Array(nn.ReLU, nn.ReLU), elsPerThreadL1, kernelsPerGroupL1, inputTileSize,
PaddedArray(input_X), Array(input_K), Array(input_b), gold, "")
true
}
catch {
case e: java.lang.IllegalArgumentException =>
false
}
}
} yield conv
logger.info("-----------------------------------------------------------------")
for (conv <- convs)
singleTest(conv)
}
@Test
def TestConv(): Unit = {
// If rerunsAllowed == False, the experiment will not be rerun if result files from previous runs
// are found. Otherwise, new results will be added with a datetime timestamp
val rerunsAllowed: Boolean = true
/** Build an array of experimental parameters filtering out the experiments that where
* already run or for which the data was not provided; load data for all experiments
*/
var aConv: Conv = null
val nLayers: Int = 2
val nBatches: Int = 2
for {
rerun <- 1 until 10
nKernelsL1 <- 8 until 48 by 4//16 until 17 by 4
kernelSize <- 4 until 64 by 4 //8 until 64 by 4
imageSize <- 8 until 64 by 4//8 until 64 by 8//16 until 512 by 16
pathToInputs = Experiment.getPathToInputs(
nKernelsL1, Shape(size=kernelSize, size=kernelSize), Shape(size=imageSize, size=imageSize))
if exists(get(pathToInputs))
pathToResults = Experiment.getPathToResults(pathToInputs)
// Results dir doesn't exist (then create it) or it does, but reruns are allowed:
if rerunsAllowed || {if (!exists(get(pathToResults))) {
createDirectory(get(pathToResults))
true} else false}
nInputs <- 8 until 9/*104*/ by 32//520 by 32 //512 by 32
// Results dir exists, but doesn't contain results of this experiment or it does, but reruns are allowed:
if rerunsAllowed || new File(pathToResults).listFiles.toList.count {
file => file.getName.endsWith("_n%d.csv".format(nInputs))} == 0
// Load datasets once for all experiments (across all multsPerThread and neuronsPerWrg)
if Experiment.datasetsExist(pathToInputs)
elsPerThreadL1 <- List(1) ++ (4 until 16 by 4)
kernelsPerGroupL1 <- List(1) ++ (1 until nKernelsL1 by 4)// until nKernelsL1 //1 until nKernelsL1
inputTileSize <- kernelSize until imageSize by 4 // kernelSize
// Check if Conv can be created with the selected parameters (e.g. if WrgGroupSize < maxWrgGroupSize)
if {
try {
aConv = new Conv(Conv.Par, Array(nn.ReLU, nn.ReLU), elsPerThreadL1, kernelsPerGroupL1, inputTileSize,
nLayers, nBatches, nInputs, Array(16, nKernelsL1), Array(1, 16),
{
val inputShape: Array[Shape] = Array.fill[Shape](nLayers)(Shape())
inputShape(0) = Shape(size=imageSize, size=imageSize, nChannels=1)
inputShape
}, {for (_ <- 0 until nLayers) yield Shape(size=kernelSize, size=kernelSize)}.toArray,
pathToInputs, pathToResults, Experiment.loadDatasets, aConv)
// logger.info(f"Prepared the experiment (nKernelsL1=$nKernelsL1%d, " +
// f"inputTileSize=$inputTileSize%d, elsPerThreadL1=$elsPerThreadL1%d, " +
// f"kernelsPerGroupL1=$kernelsPerGroupL1%d,\\nkernelSize=$kernelSize%d, " +
// f"nBatches=$nBatches%d, nInputs=$nInputs%d).")
true
}
catch {
case e: java.lang.IllegalArgumentException =>
logger.warn("-----------------------------------------------------------------")
logger.warn(f"Cannot start the experiment (nKernelsL1=$nKernelsL1%d, " +
f"inputTileSize=$inputTileSize%d, elsPerThreadL1=$elsPerThreadL1%d, " +
f"kernelsPerGroupL1=$kernelsPerGroupL1%d,\\nkernelSize=$kernelSize%d, " +
f"nBatches=$nBatches%d, nInputs=$nInputs%d).")
logger.warn(e.getMessage)
logger.warn("SKIPPING EXPERIMENT.")
recordFailureInSQL(e)
false
}
}
} {
try {
singleTest(aConv)
} catch {
case e: opencl.executor.Executor.ExecutorFailureException =>
logger.warn("EXCEPTION: opencl.executor.Executor.ExecutorFailureException")
logger.warn(e.getMessage)
recordFailureInSQL(e)
case e: opencl.executor.DeviceCapabilityException =>
logger.warn("EXCEPTION: opencl.executor.DeviceCapabilityException")
logger.warn(e.getMessage)
recordFailureInSQL(e)
}
}
}
def singleTest(aconv: Conv): Unit = {
logger.info("-----------------------------------------------------------------")
System.out.println(f"Starting the experiment (inputTileSize(first)=${aconv.inputTileSize(0)}%d, " +
f"inputTileSize(last)=${aconv.inputTileSize(aconv.nLayers - 1)}%d, " +
f"nKernels(first)=${aconv.nKernels(0)}%d, " +
f"nKernels(last)=${aconv.nKernels(aconv.nLayers - 1)}%d,\\n" +
f"elsPerThread(first)=${aconv.elsPerThread(0)}%d, " +
f"elsPerThread(last)=${aconv.elsPerThread(aconv.nLayers - 1)}%d, " +
f"kernelsPerGroup(first)=${aconv.kernelsPerGroup(0)}%d, " +
f"kernelsPerGroup(last)=${aconv.kernelsPerGroup(aconv.nLayers - 1)}%d, " +
f"kernelSize=${aconv.kernelShape(aconv.nLayers - 1).s}%d, " +
f"nBatches=${aconv.nBatches}%d, nInputs=${aconv.nInputs}%d, " +
f"imageSize=${aconv.inputShape(0).s}%d).")
val now = Calendar.getInstance().getTime
for (layerNo <- 0 until aconv.nLayers) {
aconv.updateInputs(layerNo)
/* Padding */
aconv.padInputs(layerNo)
val (outputFlat: Array[Float], runtime) =
Execute(
aconv.localSize(0)(layerNo), aconv.localSize(1)(layerNo), aconv.localSize(2)(layerNo),
aconv.globalSize(0)(layerNo), aconv.globalSize(1)(layerNo), aconv.globalSize(2)(layerNo), (true, true))(
aconv.liftConv(
aconv.activationFun(layerNo), aconv.inputShape(layerNo), aconv.kernelShape(layerNo),
aconv.nInputs, aconv.nBatches, aconv.nInChannels(layerNo), aconv.nKernels(layerNo),
Tile(kernels_per_group=aconv.kernelsPerGroup(layerNo), els_per_thread=aconv.elsPerThread(layerNo),
inputTileSize=aconv.inputTileSize(layerNo), inputTileSlideStep=aconv.inputTileStep(layerNo),
nInputTilesPerDim=aconv.nTilesPerDim(layerNo),
n_windows_per_tile_per_dim=aconv.nWindowsPerTilePerDim(layerNo))),
aconv.kWeights(layerNo), aconv.kBiases(layerNo), aconv.inputs(layerNo).padded)
aconv.runTimes(layerNo) = runtime
/* Group and unpad */
aconv.outputs = {
def getShapedOutputs = nn.group(outputFlat, (aconv.nBatches, aconv.nInputs, aconv.outputShape(layerNo).hPadded,
aconv.outputShape(layerNo).wPadded, aconv.outputShape(layerNo).ch)).map(
batch => batch.map(
input => input.map(
row => row.slice(0, aconv.outputShape(layerNo).wNonPadded)
).slice(0, aconv.outputShape(layerNo).hNonPadded)
))
if (aconv.outputs == null) Array(PaddedArray(getShapedOutputs)) else
aconv.outputs :+ PaddedArray(getShapedOutputs)
}
logger.info(f"Layer $layerNo%d runtime: $runtime%1.5f ms")
}
logger.info("")
/* Check and save results */
var testFailed: Boolean = false
for {
(liftBatch, targetBatch, batch_no) <-
(aconv.outputs.last.nonPadded, aconv.targets, 0 to aconv.targets.length).zipped.toList
(liftResult, targetResult, input_no) <- (liftBatch, targetBatch, 0 to targetBatch.length).zipped.toList
(liftRow, targetRow, row_no) <- (liftResult, targetResult, 0 to targetResult.length).zipped.toList
(liftElement, targetElement, el_no) <- (liftRow, targetRow, 0 to targetRow.length).zipped.toList
} {
// logger.info(f"target $batch_no%d,$input_no%d,$row_no%d,$el_no%d: " + targetElement.mkString(", "))
// logger.info(f"actual $batch_no%d,$input_no%d,$row_no%d,$el_no%d: " + liftElement.mkString(", "))
for {(liftElementKernel, targetElementKernel, elk_no) <-
(liftElement, targetElement, 0 to targetElement.length).zipped.toList} {
try {
// assertArrayEquals(f"Batch $batch_no%d input $input_no%d row $row_no%d element $el_no%d: " +
// f"the lift output is different to the target output", targetElement, liftElement, precision)
assertEquals("", targetElementKernel, liftElementKernel, precision)
}
catch {
case e: AssertionError =>
logger.info(f"$batch_no%d,$input_no%d,$row_no%d,$el_no%d,$elk_no%d: " +
targetElementKernel + " != " + liftElementKernel)
testFailed = true
}
}
}
if (!testFailed)
logger.info(f"SUCCESS. Processed ${aconv.nInputs}%d inputs, the results were equal to targets " +
f"(precision=$precision%1.4f).")
/* JSON */
if (aconv.pathToResults != "") {
var pw: PrintWriter = null
if (aconv.pathToResults != "") {
val file = new File(nn.resultsFilename(aconv.pathToResults, aconv.nInputs))
file.getParentFile.mkdirs()
pw = new PrintWriter(file)
}
pw.write("device_name,n_batches,n_inputs," + {
for (layerNo <- 0 until aconv.nLayers) yield f"n_kernels_l$layerNo%d"
}.mkString(",") + "," + {
for (layerNo <- 0 until aconv.nLayers) yield f"kernel_size_l$layerNo%d"
}.mkString(",") + "," +
"input_tile_size_l1,input_tile_step_l1,els_per_thread_l1,kernels_per_group_l1," + {
for (layerNo <- 0 until aconv.nLayers) yield f"runtime_l$layerNo%d"
}.mkString(",") + ",tag\\n")
pw.write(nn.deviceName + "," + f"${aconv.nBatches}%d,${aconv.nInputs}%d,")
for (layerNo <- 0 until aconv.nLayers)
pw.write(f"${aconv.nKernels(layerNo)}%d,")
for (layerNo <- 0 until aconv.nLayers)
pw.write(f"${aconv.kernelShape(layerNo).s}%d,")
pw.write(f"${aconv.inputTileSize(aconv.nLayers - 1)}%d,${aconv.inputTileStep(aconv.nLayers - 1)}%d," +
f"${aconv.elsPerThread(aconv.nLayers - 1)}%d,${aconv.kernelsPerGroup(1)}%d")
for (layerNo <- 0 until aconv.nLayers)
pw.write(f",${aconv.runTimes(layerNo)}%1.5f")
pw.write(f",$codeVersion\\n")
pw.close()
if (!testFailed)
new File(nn.resultsFilename(aconv.pathToResults, aconv.nInputs)).delete()
}
/* SQL */
Connector.statement.execute("INSERT INTO lift_results_conv " +
"(batches, images, imagesize, kernels_l0, kernels_l1, kernelsize_l0, kernelsize_l1, " +
"elsperthread_l0, elsperthread_l1, kernelspergroup_l0, kernelspergroup_l1, inputtilesize_l0, " +
"inputtilesize_l1, ran, success, runtime_l0, runtime_l1, code_version, datetime) " +
f"VALUES (${aconv.nBatches}%d, ${aconv.nInputs}%d, ${aconv.inputShape(0).s}%d, ${aconv.nKernels(0)}%d, " +
f"${aconv.nKernels(1)}%d, ${aconv.kernelShape(0).s}%d, ${aconv.kernelShape(1).s}, ${aconv.elsPerThread(0)}%d, " +
f"${aconv.elsPerThread(1)}%d, ${aconv.kernelsPerGroup(0)}%d, ${aconv.kernelsPerGroup(1)}%d, " +
f"${aconv.inputTileSize(0)}%d, ${aconv.inputTileSize(1)}%d, true, ${!testFailed}%b, ${aconv.runTimes(0)}%1.5f, " +
f"${aconv.runTimes(1)}%1.5f, $codeVersion%d, " +
f"'${new java.text.SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(now)}%s');")
}*/
def recordFailureInSQL(e: Exception): Unit = {
/* SQL */
// Commented out due to MySQL being disabled in this version
// Connector.statement.execute("INSERT INTO lift_results_conv (ran, abort_reason) VALUES " +
// "(false, '" + e.getClass.getSimpleName + ": " + e.getMessage + "')")
}
}
|
lift-project/lift
|
src/test/nn/conv/TestConv.scala
|
Scala
|
mit
| 14,180
|
package com.mesosphere.cosmos
import com.netaporter.uri.dsl._
import com.twitter.conversions.storage._
import com.twitter.finagle.http.RequestBuilder
import com.twitter.finagle.http.Status
import com.twitter.util.Await
import com.twitter.util.Return
import org.scalatest.FreeSpec
final class ServicesIntegrationSpec extends FreeSpec {
"Services" - {
"adminRouterClient should" - {
"be able to connect to an https site" in {
val url = "https://www.google.com"
val Return(client) = Services.adminRouterClient(url, 5.megabytes)
val request = RequestBuilder().url(url).buildGet()
val response = Await.result(client(request))
assertResult(response.status)(Status.Ok)
}
}
}
}
|
takirala/cosmos
|
cosmos-integration-tests/src/main/scala/com/mesosphere/cosmos/ServicesIntegrationSpec.scala
|
Scala
|
apache-2.0
| 738
|
/*
* The MIT License
*
* Copyright (c) 2017 Fulcrum Genomics LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.fulcrumgenomics
/** Writer trait to standardize how to interact with classes that write out objects. */
@deprecated(message="Use com.fulcrumgenomics.commons.io.Writer instead.", since="0.8.1")
trait Writer[A] extends com.fulcrumgenomics.commons.io.Writer[A]
|
fulcrumgenomics/fgbio
|
src/main/scala/com/fulcrumgenomics/Writer.scala
|
Scala
|
mit
| 1,419
|
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iht.controllers.registration
import iht.config.AppConfig
import iht.connector.{CachingConnector, IhtConnector}
import iht.metrics.IhtMetrics
import iht.testhelpers.MockObjectBuilder
import iht.utils.IhtSection
import iht.{FakeIhtApp, TestUtils}
import org.mockito.ArgumentMatchers.any
import org.mockito.Mockito._
import org.scalatest.BeforeAndAfterEach
import org.scalatestplus.mockito.MockitoSugar
import play.api.i18n.{I18nSupport, Lang, MessagesApi}
import play.api.mvc.{AnyContentAsEmpty, MessagesControllerComponents}
import play.api.test.{FakeHeaders, FakeRequest}
import uk.gov.hmrc.auth.core.{AuthenticateHeaderParser, PlayAuthConnector}
import uk.gov.hmrc.http.HeaderCarrier
import scala.concurrent.Future
trait RegistrationControllerTest extends FakeIhtApp with MockitoSugar with TestUtils with BeforeAndAfterEach with I18nSupport with MockObjectBuilder {
implicit val messagesApi: MessagesApi = app.injector.instanceOf[MessagesApi]
def loginUrl = buildLoginUrl(IhtSection.Registration)
implicit val headerCarrier = FakeHeaders()
implicit val hc = new HeaderCarrier
val referrerURL = "http://localhost:9070/inheritance-tax/registration/addExecutor"
val host = "localhost:9070"
val mockCachingConnector: CachingConnector = mock[CachingConnector]
val mockAuthConnector: PlayAuthConnector = mock[PlayAuthConnector]
val mockIhtMetrics: IhtMetrics = mock[IhtMetrics]
val mockMessagesApi: MessagesApi = mock[MessagesApi]
val mockIhtConnector: IhtConnector = mock[IhtConnector]
val mockControllerComponents: MessagesControllerComponents = app.injector.instanceOf[MessagesControllerComponents]
implicit val mockAppConfig: AppConfig = app.injector.instanceOf[AppConfig]
implicit val lang = Lang("en")
override def beforeEach(): Unit = {
reset(mockCachingConnector)
reset(mockAuthConnector)
reset(mockIhtConnector)
reset(mockIhtMetrics)
reset(mockMessagesApi)
super.beforeEach()
}
override def createFakeRequest(isAuthorised: Boolean = true, referer: Option[String] = None, authRetrieveNino: Boolean = true): FakeRequest[AnyContentAsEmpty.type] = {
if (isAuthorised) {
if (authRetrieveNino) {
when(mockAuthConnector.authorise[Option[String]](any(), any())(any(), any())).thenReturn(Future.successful(Some(fakeNino)))
} else {
when(mockAuthConnector.authorise[Unit](any(), any())(any(), any())).thenReturn(Future.successful(()))
}
} else {
when(mockAuthConnector.authorise(any(), any())(any(), any())).thenReturn(Future.failed(AuthenticateHeaderParser.parse(Map())))
}
super.createFakeRequest(isAuthorised, referer, authRetrieveNino)
}
def request = createFakeRequest(isAuthorised = true)
def unauthorisedRequest = createFakeRequest(isAuthorised = false)
}
|
hmrc/iht-frontend
|
test/iht/controllers/registration/RegistrationControllerTest.scala
|
Scala
|
apache-2.0
| 3,401
|
package io.transwarp.midas.constant.midas.params
object ProcessParams {
val CronExpr = "cron expression"
val GlobalVars = "global"
val MaxRows = "max data rows"
}
|
transwarpio/rapidminer
|
api-driver/src/main/scala/io/transwarp/midas/constant/midas/params/ProcessParams.scala
|
Scala
|
gpl-3.0
| 170
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.sql.catalyst.expressions.NamedExpression
import org.scalatest.Matchers._
import org.apache.spark.sql.execution.Project
import org.apache.spark.sql.functions._
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
class ColumnExpressionSuite extends QueryTest with SharedSQLContext {
import testImplicits._
private lazy val booleanData = {
sqlContext.createDataFrame(sparkContext.parallelize(
Row(false, false) ::
Row(false, true) ::
Row(true, false) ::
Row(true, true) :: Nil),
StructType(Seq(StructField("a", BooleanType), StructField("b", BooleanType))))
}
test("column names with space") {
val df = Seq((1, "a")).toDF("name with space", "name.with.dot")
checkAnswer(
df.select(df("name with space")),
Row(1) :: Nil)
checkAnswer(
df.select($"name with space"),
Row(1) :: Nil)
checkAnswer(
df.select(col("name with space")),
Row(1) :: Nil)
checkAnswer(
df.select("name with space"),
Row(1) :: Nil)
checkAnswer(
df.select(expr("`name with space`")),
Row(1) :: Nil)
}
test("column names with dot") {
val df = Seq((1, "a")).toDF("name with space", "name.with.dot").as("a")
checkAnswer(
df.select(df("`name.with.dot`")),
Row("a") :: Nil)
checkAnswer(
df.select($"`name.with.dot`"),
Row("a") :: Nil)
checkAnswer(
df.select(col("`name.with.dot`")),
Row("a") :: Nil)
checkAnswer(
df.select("`name.with.dot`"),
Row("a") :: Nil)
checkAnswer(
df.select(expr("`name.with.dot`")),
Row("a") :: Nil)
checkAnswer(
df.select(df("a.`name.with.dot`")),
Row("a") :: Nil)
checkAnswer(
df.select($"a.`name.with.dot`"),
Row("a") :: Nil)
checkAnswer(
df.select(col("a.`name.with.dot`")),
Row("a") :: Nil)
checkAnswer(
df.select("a.`name.with.dot`"),
Row("a") :: Nil)
checkAnswer(
df.select(expr("a.`name.with.dot`")),
Row("a") :: Nil)
}
test("alias") {
val df = Seq((1, Seq(1, 2, 3))).toDF("a", "intList")
assert(df.select(df("a").as("b")).columns.head === "b")
assert(df.select(df("a").alias("b")).columns.head === "b")
}
test("as propagates metadata") {
val metadata = new MetadataBuilder
metadata.putString("key", "value")
val origCol = $"a".as("b", metadata.build())
val newCol = origCol.as("c")
assert(newCol.expr.asInstanceOf[NamedExpression].metadata.getString("key") === "value")
}
test("single explode") {
val df = Seq((1, Seq(1, 2, 3))).toDF("a", "intList")
checkAnswer(
df.select(explode('intList)),
Row(1) :: Row(2) :: Row(3) :: Nil)
}
test("explode and other columns") {
val df = Seq((1, Seq(1, 2, 3))).toDF("a", "intList")
checkAnswer(
df.select($"a", explode('intList)),
Row(1, 1) ::
Row(1, 2) ::
Row(1, 3) :: Nil)
checkAnswer(
df.select($"*", explode('intList)),
Row(1, Seq(1, 2, 3), 1) ::
Row(1, Seq(1, 2, 3), 2) ::
Row(1, Seq(1, 2, 3), 3) :: Nil)
}
test("aliased explode") {
val df = Seq((1, Seq(1, 2, 3))).toDF("a", "intList")
checkAnswer(
df.select(explode('intList).as('int)).select('int),
Row(1) :: Row(2) :: Row(3) :: Nil)
checkAnswer(
df.select(explode('intList).as('int)).select(sum('int)),
Row(6) :: Nil)
}
test("explode on map") {
val df = Seq((1, Map("a" -> "b"))).toDF("a", "map")
checkAnswer(
df.select(explode('map)),
Row("a", "b"))
}
test("explode on map with aliases") {
val df = Seq((1, Map("a" -> "b"))).toDF("a", "map")
checkAnswer(
df.select(explode('map).as("key1" :: "value1" :: Nil)).select("key1", "value1"),
Row("a", "b"))
}
test("self join explode") {
val df = Seq((1, Seq(1, 2, 3))).toDF("a", "intList")
val exploded = df.select(explode('intList).as('i))
checkAnswer(
exploded.join(exploded, exploded("i") === exploded("i")).agg(count("*")),
Row(3) :: Nil)
}
test("collect on column produced by a binary operator") {
val df = Seq((1, 2, 3)).toDF("a", "b", "c")
checkAnswer(df.select(df("a") + df("b")), Seq(Row(3)))
checkAnswer(df.select(df("a") + df("b").as("c")), Seq(Row(3)))
}
test("star") {
checkAnswer(testData.select($"*"), testData.collect().toSeq)
}
test("star qualified by data frame object") {
val df = testData.toDF
val goldAnswer = df.collect().toSeq
checkAnswer(df.select(df("*")), goldAnswer)
val df1 = df.select(df("*"), lit("abcd").as("litCol"))
checkAnswer(df1.select(df("*")), goldAnswer)
}
test("star qualified by table name") {
checkAnswer(testData.as("testData").select($"testData.*"), testData.collect().toSeq)
}
test("+") {
checkAnswer(
testData2.select($"a" + 1),
testData2.collect().toSeq.map(r => Row(r.getInt(0) + 1)))
checkAnswer(
testData2.select($"a" + $"b" + 2),
testData2.collect().toSeq.map(r => Row(r.getInt(0) + r.getInt(1) + 2)))
}
test("-") {
checkAnswer(
testData2.select($"a" - 1),
testData2.collect().toSeq.map(r => Row(r.getInt(0) - 1)))
checkAnswer(
testData2.select($"a" - $"b" - 2),
testData2.collect().toSeq.map(r => Row(r.getInt(0) - r.getInt(1) - 2)))
}
test("*") {
checkAnswer(
testData2.select($"a" * 10),
testData2.collect().toSeq.map(r => Row(r.getInt(0) * 10)))
checkAnswer(
testData2.select($"a" * $"b"),
testData2.collect().toSeq.map(r => Row(r.getInt(0) * r.getInt(1))))
}
test("/") {
checkAnswer(
testData2.select($"a" / 2),
testData2.collect().toSeq.map(r => Row(r.getInt(0).toDouble / 2)))
checkAnswer(
testData2.select($"a" / $"b"),
testData2.collect().toSeq.map(r => Row(r.getInt(0).toDouble / r.getInt(1))))
}
test("%") {
checkAnswer(
testData2.select($"a" % 2),
testData2.collect().toSeq.map(r => Row(r.getInt(0) % 2)))
checkAnswer(
testData2.select($"a" % $"b"),
testData2.collect().toSeq.map(r => Row(r.getInt(0) % r.getInt(1))))
}
test("unary -") {
checkAnswer(
testData2.select(-$"a"),
testData2.collect().toSeq.map(r => Row(-r.getInt(0))))
}
test("unary !") {
checkAnswer(
complexData.select(!$"b"),
complexData.collect().toSeq.map(r => Row(!r.getBoolean(3))))
}
test("isNull") {
checkAnswer(
nullStrings.toDF.where($"s".isNull),
nullStrings.collect().toSeq.filter(r => r.getString(1) eq null))
checkAnswer(
sql("select isnull(null), isnull(1)"),
Row(true, false))
}
test("isNotNull") {
checkAnswer(
nullStrings.toDF.where($"s".isNotNull),
nullStrings.collect().toSeq.filter(r => r.getString(1) ne null))
checkAnswer(
sql("select isnotnull(null), isnotnull('a')"),
Row(false, true))
}
test("isNaN") {
val testData = sqlContext.createDataFrame(sparkContext.parallelize(
Row(Double.NaN, Float.NaN) ::
Row(math.log(-1), math.log(-3).toFloat) ::
Row(null, null) ::
Row(Double.MaxValue, Float.MinValue):: Nil),
StructType(Seq(StructField("a", DoubleType), StructField("b", FloatType))))
checkAnswer(
testData.select($"a".isNaN, $"b".isNaN),
Row(true, true) :: Row(true, true) :: Row(false, false) :: Row(false, false) :: Nil)
checkAnswer(
testData.select(isNaN($"a"), isNaN($"b")),
Row(true, true) :: Row(true, true) :: Row(false, false) :: Row(false, false) :: Nil)
checkAnswer(
sql("select isnan(15), isnan('invalid')"),
Row(false, false))
}
test("nanvl") {
val testData = sqlContext.createDataFrame(sparkContext.parallelize(
Row(null, 3.0, Double.NaN, Double.PositiveInfinity, 1.0f, 4) :: Nil),
StructType(Seq(StructField("a", DoubleType), StructField("b", DoubleType),
StructField("c", DoubleType), StructField("d", DoubleType),
StructField("e", FloatType), StructField("f", IntegerType))))
checkAnswer(
testData.select(
nanvl($"a", lit(5)), nanvl($"b", lit(10)), nanvl(lit(10), $"b"),
nanvl($"c", lit(null).cast(DoubleType)), nanvl($"d", lit(10)),
nanvl($"b", $"e"), nanvl($"e", $"f")),
Row(null, 3.0, 10.0, null, Double.PositiveInfinity, 3.0, 1.0)
)
testData.registerTempTable("t")
checkAnswer(
sql(
"select nanvl(a, 5), nanvl(b, 10), nanvl(10, b), nanvl(c, null), nanvl(d, 10), " +
" nanvl(b, e), nanvl(e, f) from t"),
Row(null, 3.0, 10.0, null, Double.PositiveInfinity, 3.0, 1.0)
)
}
test("===") {
checkAnswer(
testData2.filter($"a" === 1),
testData2.collect().toSeq.filter(r => r.getInt(0) == 1))
checkAnswer(
testData2.filter($"a" === $"b"),
testData2.collect().toSeq.filter(r => r.getInt(0) == r.getInt(1)))
}
test("<=>") {
checkAnswer(
testData2.filter($"a" === 1),
testData2.collect().toSeq.filter(r => r.getInt(0) == 1))
checkAnswer(
testData2.filter($"a" === $"b"),
testData2.collect().toSeq.filter(r => r.getInt(0) == r.getInt(1)))
}
test("!==") {
val nullData = sqlContext.createDataFrame(sparkContext.parallelize(
Row(1, 1) ::
Row(1, 2) ::
Row(1, null) ::
Row(null, null) :: Nil),
StructType(Seq(StructField("a", IntegerType), StructField("b", IntegerType))))
checkAnswer(
nullData.filter($"b" <=> 1),
Row(1, 1) :: Nil)
checkAnswer(
nullData.filter($"b" <=> null),
Row(1, null) :: Row(null, null) :: Nil)
checkAnswer(
nullData.filter($"a" <=> $"b"),
Row(1, 1) :: Row(null, null) :: Nil)
val nullData2 = sqlContext.createDataFrame(sparkContext.parallelize(
Row("abc") ::
Row(null) ::
Row("xyz") :: Nil),
StructType(Seq(StructField("a", StringType, true))))
checkAnswer(
nullData2.filter($"a" <=> null),
Row(null) :: Nil)
}
test(">") {
checkAnswer(
testData2.filter($"a" > 1),
testData2.collect().toSeq.filter(r => r.getInt(0) > 1))
checkAnswer(
testData2.filter($"a" > $"b"),
testData2.collect().toSeq.filter(r => r.getInt(0) > r.getInt(1)))
}
test(">=") {
checkAnswer(
testData2.filter($"a" >= 1),
testData2.collect().toSeq.filter(r => r.getInt(0) >= 1))
checkAnswer(
testData2.filter($"a" >= $"b"),
testData2.collect().toSeq.filter(r => r.getInt(0) >= r.getInt(1)))
}
test("<") {
checkAnswer(
testData2.filter($"a" < 2),
testData2.collect().toSeq.filter(r => r.getInt(0) < 2))
checkAnswer(
testData2.filter($"a" < $"b"),
testData2.collect().toSeq.filter(r => r.getInt(0) < r.getInt(1)))
}
test("<=") {
checkAnswer(
testData2.filter($"a" <= 2),
testData2.collect().toSeq.filter(r => r.getInt(0) <= 2))
checkAnswer(
testData2.filter($"a" <= $"b"),
testData2.collect().toSeq.filter(r => r.getInt(0) <= r.getInt(1)))
}
test("between") {
val testData = sparkContext.parallelize(
(0, 1, 2) ::
(1, 2, 3) ::
(2, 1, 0) ::
(2, 2, 4) ::
(3, 1, 6) ::
(3, 2, 0) :: Nil).toDF("a", "b", "c")
val expectAnswer = testData.collect().toSeq.
filter(r => r.getInt(0) >= r.getInt(1) && r.getInt(0) <= r.getInt(2))
checkAnswer(testData.filter($"a".between($"b", $"c")), expectAnswer)
}
test("in") {
val df = Seq((1, "x"), (2, "y"), (3, "z")).toDF("a", "b")
checkAnswer(df.filter($"a".isin(1, 2)),
df.collect().toSeq.filter(r => r.getInt(0) == 1 || r.getInt(0) == 2))
checkAnswer(df.filter($"a".isin(3, 2)),
df.collect().toSeq.filter(r => r.getInt(0) == 3 || r.getInt(0) == 2))
checkAnswer(df.filter($"a".isin(3, 1)),
df.collect().toSeq.filter(r => r.getInt(0) == 3 || r.getInt(0) == 1))
checkAnswer(df.filter($"b".isin("y", "x")),
df.collect().toSeq.filter(r => r.getString(1) == "y" || r.getString(1) == "x"))
checkAnswer(df.filter($"b".isin("z", "x")),
df.collect().toSeq.filter(r => r.getString(1) == "z" || r.getString(1) == "x"))
checkAnswer(df.filter($"b".isin("z", "y")),
df.collect().toSeq.filter(r => r.getString(1) == "z" || r.getString(1) == "y"))
val df2 = Seq((1, Seq(1)), (2, Seq(2)), (3, Seq(3))).toDF("a", "b")
intercept[AnalysisException] {
df2.filter($"a".isin($"b"))
}
}
test("&&") {
checkAnswer(
booleanData.filter($"a" && true),
Row(true, false) :: Row(true, true) :: Nil)
checkAnswer(
booleanData.filter($"a" && false),
Nil)
checkAnswer(
booleanData.filter($"a" && $"b"),
Row(true, true) :: Nil)
}
test("||") {
checkAnswer(
booleanData.filter($"a" || true),
booleanData.collect())
checkAnswer(
booleanData.filter($"a" || false),
Row(true, false) :: Row(true, true) :: Nil)
checkAnswer(
booleanData.filter($"a" || $"b"),
Row(false, true) :: Row(true, false) :: Row(true, true) :: Nil)
}
test("SPARK-7321 when conditional statements") {
val testData = (1 to 3).map(i => (i, i.toString)).toDF("key", "value")
checkAnswer(
testData.select(when($"key" === 1, -1).when($"key" === 2, -2).otherwise(0)),
Seq(Row(-1), Row(-2), Row(0))
)
// Without the ending otherwise, return null for unmatched conditions.
// Also test putting a non-literal value in the expression.
checkAnswer(
testData.select(when($"key" === 1, lit(0) - $"key").when($"key" === 2, -2)),
Seq(Row(-1), Row(-2), Row(null))
)
// Test error handling for invalid expressions.
intercept[IllegalArgumentException] { $"key".when($"key" === 1, -1) }
intercept[IllegalArgumentException] { $"key".otherwise(-1) }
intercept[IllegalArgumentException] { when($"key" === 1, -1).otherwise(-1).otherwise(-1) }
}
test("sqrt") {
checkAnswer(
testData.select(sqrt('key)).orderBy('key.asc),
(1 to 100).map(n => Row(math.sqrt(n)))
)
checkAnswer(
testData.select(sqrt('value), 'key).orderBy('key.asc, 'value.asc),
(1 to 100).map(n => Row(math.sqrt(n), n))
)
checkAnswer(
testData.select(sqrt(lit(null))),
(1 to 100).map(_ => Row(null))
)
}
test("upper") {
checkAnswer(
lowerCaseData.select(upper('l)),
('a' to 'd').map(c => Row(c.toString.toUpperCase))
)
checkAnswer(
testData.select(upper('value), 'key),
(1 to 100).map(n => Row(n.toString, n))
)
checkAnswer(
testData.select(upper(lit(null))),
(1 to 100).map(n => Row(null))
)
checkAnswer(
sql("SELECT upper('aB'), ucase('cDe')"),
Row("AB", "CDE"))
}
test("lower") {
checkAnswer(
upperCaseData.select(lower('L)),
('A' to 'F').map(c => Row(c.toString.toLowerCase))
)
checkAnswer(
testData.select(lower('value), 'key),
(1 to 100).map(n => Row(n.toString, n))
)
checkAnswer(
testData.select(lower(lit(null))),
(1 to 100).map(n => Row(null))
)
checkAnswer(
sql("SELECT lower('aB'), lcase('cDe')"),
Row("ab", "cde"))
}
test("monotonicallyIncreasingId") {
// Make sure we have 2 partitions, each with 2 records.
val df = sparkContext.parallelize(Seq[Int](), 2).mapPartitions { _ =>
Iterator(Tuple1(1), Tuple1(2))
}.toDF("a")
checkAnswer(
df.select(monotonicallyIncreasingId()),
Row(0L) :: Row(1L) :: Row((1L << 33) + 0L) :: Row((1L << 33) + 1L) :: Nil
)
checkAnswer(
df.select(expr("monotonically_increasing_id()")),
Row(0L) :: Row(1L) :: Row((1L << 33) + 0L) :: Row((1L << 33) + 1L) :: Nil
)
}
test("sparkPartitionId") {
// Make sure we have 2 partitions, each with 2 records.
val df = sparkContext.parallelize(Seq[Int](), 2).mapPartitions { _ =>
Iterator(Tuple1(1), Tuple1(2))
}.toDF("a")
checkAnswer(
df.select(sparkPartitionId()),
Row(0) :: Row(0) :: Row(1) :: Row(1) :: Nil
)
}
test("InputFileName") {
withTempPath { dir =>
val data = sparkContext.parallelize(0 to 10).toDF("id")
data.write.parquet(dir.getCanonicalPath)
val answer = sqlContext.read.parquet(dir.getCanonicalPath).select(inputFileName())
.head.getString(0)
assert(answer.contains(dir.getCanonicalPath))
checkAnswer(data.select(inputFileName()).limit(1), Row(""))
}
}
test("columns can be compared") {
assert('key.desc == 'key.desc)
assert('key.desc != 'key.asc)
}
test("alias with metadata") {
val metadata = new MetadataBuilder()
.putString("originName", "value")
.build()
val schema = testData
.select($"*", col("value").as("abc", metadata))
.schema
assert(schema("value").metadata === Metadata.empty)
assert(schema("abc").metadata === metadata)
}
test("rand") {
val randCol = testData.select($"key", rand(5L).as("rand"))
randCol.columns.length should be (2)
val rows = randCol.collect()
rows.foreach { row =>
assert(row.getDouble(1) <= 1.0)
assert(row.getDouble(1) >= 0.0)
}
def checkNumProjects(df: DataFrame, expectedNumProjects: Int): Unit = {
val projects = df.queryExecution.executedPlan.collect {
case tungstenProject: Project => tungstenProject
}
assert(projects.size === expectedNumProjects)
}
// We first create a plan with two Projects.
// Project [rand + 1 AS rand1, rand - 1 AS rand2]
// Project [key, (Rand 5 + 1) AS rand]
// LogicalRDD [key, value]
// Because Rand function is not deterministic, the column rand is not deterministic.
// So, in the optimizer, we will not collapse Project [rand + 1 AS rand1, rand - 1 AS rand2]
// and Project [key, Rand 5 AS rand]. The final plan still has two Projects.
val dfWithTwoProjects =
testData
.select($"key", (rand(5L) + 1).as("rand"))
.select(($"rand" + 1).as("rand1"), ($"rand" - 1).as("rand2"))
checkNumProjects(dfWithTwoProjects, 2)
// Now, we add one more project rand1 - rand2 on top of the query plan.
// Since rand1 and rand2 are deterministic (they basically apply +/- to the generated
// rand value), we can collapse rand1 - rand2 to the Project generating rand1 and rand2.
// So, the plan will be optimized from ...
// Project [(rand1 - rand2) AS (rand1 - rand2)]
// Project [rand + 1 AS rand1, rand - 1 AS rand2]
// Project [key, (Rand 5 + 1) AS rand]
// LogicalRDD [key, value]
// to ...
// Project [((rand + 1 AS rand1) - (rand - 1 AS rand2)) AS (rand1 - rand2)]
// Project [key, Rand 5 AS rand]
// LogicalRDD [key, value]
val dfWithThreeProjects = dfWithTwoProjects.select($"rand1" - $"rand2")
checkNumProjects(dfWithThreeProjects, 2)
dfWithThreeProjects.collect().foreach { row =>
assert(row.getDouble(0) === 2.0 +- 0.0001)
}
}
test("randn") {
val randCol = testData.select('key, randn(5L).as("rand"))
randCol.columns.length should be (2)
val rows = randCol.collect()
rows.foreach { row =>
assert(row.getDouble(1) <= 4.0)
assert(row.getDouble(1) >= -4.0)
}
}
test("bitwiseAND") {
checkAnswer(
testData2.select($"a".bitwiseAND(75)),
testData2.collect().toSeq.map(r => Row(r.getInt(0) & 75)))
checkAnswer(
testData2.select($"a".bitwiseAND($"b").bitwiseAND(22)),
testData2.collect().toSeq.map(r => Row(r.getInt(0) & r.getInt(1) & 22)))
}
test("bitwiseOR") {
checkAnswer(
testData2.select($"a".bitwiseOR(170)),
testData2.collect().toSeq.map(r => Row(r.getInt(0) | 170)))
checkAnswer(
testData2.select($"a".bitwiseOR($"b").bitwiseOR(42)),
testData2.collect().toSeq.map(r => Row(r.getInt(0) | r.getInt(1) | 42)))
}
test("bitwiseXOR") {
checkAnswer(
testData2.select($"a".bitwiseXOR(112)),
testData2.collect().toSeq.map(r => Row(r.getInt(0) ^ 112)))
checkAnswer(
testData2.select($"a".bitwiseXOR($"b").bitwiseXOR(39)),
testData2.collect().toSeq.map(r => Row(r.getInt(0) ^ r.getInt(1) ^ 39)))
}
}
|
chenc10/Spark-PAF
|
sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala
|
Scala
|
apache-2.0
| 21,109
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.boot
import akka.actor.{ActorRef, ActorSystem}
import com.typesafe.config.Config
import org.apache.toree.boot.layer._
import org.apache.toree.interpreter.Interpreter
import org.apache.toree.kernel.api.Kernel
import org.apache.toree.kernel.protocol.v5.KernelStatusType._
import org.apache.toree.kernel.protocol.v5._
import org.apache.toree.kernel.protocol.v5.kernel.ActorLoader
import org.apache.toree.security.KernelSecurityManager
import org.apache.toree.utils.LogLike
import org.apache.spark.repl.Main
import org.zeromq.ZMQ
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.util.Try
class KernelBootstrap(config: Config) extends LogLike {
this: BareInitialization with ComponentInitialization
with HandlerInitialization with HookInitialization =>
private val DefaultActorSystemName = "spark-kernel-actor-system"
private var actorSystem: ActorSystem = _
private var actorLoader: ActorLoader = _
private var kernelMessageRelayActor: ActorRef = _
private var statusDispatch: ActorRef = _
private var kernel: Kernel = _
private var interpreters: Seq[Interpreter] = Nil
private val rootDir = Main.rootDir
private val outputDir = Main.outputDir
/**
* Initializes all kernel systems.
*/
def initialize() = {
// TODO: Investigate potential to initialize System out/err/in to capture
// Console DynamicVariable initialization (since takes System fields)
// and redirect it to a workable location (like an actor) with the
// thread's current information attached
//
// E.G. System.setOut(customPrintStream) ... all new threads will have
// customPrintStream as their initial Console.out value
//
// ENSURE THAT WE SET THE RIGHT SPARK PROPERTIES
val execUri = System.getenv("SPARK_EXECUTOR_URI")
System.setProperty("spark.repl.class.outputDir", outputDir.getAbsolutePath)
if (execUri != null) {
System.setProperty("spark.executor.uri", execUri)
}
displayVersionInfo()
// Do this first to support shutting down quickly before entire system
// is ready
initializeShutdownHook()
// Initialize the bare minimum to report a starting message
val (actorSystem, actorLoader, kernelMessageRelayActor, statusDispatch) =
initializeBare(
config = config,
actorSystemName = DefaultActorSystemName
)
this.actorSystem = actorSystem
this.actorLoader = actorLoader
this.kernelMessageRelayActor = kernelMessageRelayActor
this.statusDispatch = statusDispatch
// Indicate that the kernel is now starting
publishStatus(KernelStatusType.Starting)
// Initialize components needed elsewhere
val (commStorage, commRegistrar, commManager, interpreter,
kernel, dependencyDownloader,
magicManager, pluginManager, responseMap) =
initializeComponents(
config = config,
actorLoader = actorLoader
)
this.interpreters ++= Seq(interpreter)
this.kernel = kernel
// Initialize our handlers that take care of processing messages
initializeHandlers(
actorSystem = actorSystem,
actorLoader = actorLoader,
kernel = kernel,
interpreter = interpreter,
commRegistrar = commRegistrar,
commStorage = commStorage,
pluginManager = pluginManager,
magicManager = magicManager,
responseMap = responseMap
)
// Initialize our non-shutdown hooks that handle various JVM events
initializeHooks(
interpreter = interpreter
)
logger.debug("Initializing security manager")
System.setSecurityManager(new KernelSecurityManager)
logger.debug("Running postInit for interpreters")
interpreters foreach {_.postInit()}
logger.info("Marking relay as ready for receiving messages")
kernelMessageRelayActor ! true
this
}
/**
* Shuts down all kernel systems.
*/
def shutdown() = {
logger.info("Shutting down interpreters")
Try(interpreters.foreach(_.stop())).failed.foreach(
logger.error("Failed to shutdown interpreters", _: Throwable)
)
logger.info("Shutting down actor system")
Try(actorSystem.terminate()).failed.foreach(
logger.error("Failed to shutdown actor system", _: Throwable)
)
this
}
/**
* Waits for the main actor system to terminate.
*/
def waitForTermination() = {
logger.debug("Waiting for actor system to terminate")
// actorSystem.awaitTermination()
Await.result(actorSystem.whenTerminated, Duration.Inf)
this
}
private def publishStatus(
status: KernelStatusType,
parentHeader: Option[ParentHeader] = None
): Unit = {
parentHeader match {
case Some(header) => statusDispatch ! ((status, header))
case None => statusDispatch ! status
}
}
@inline private def displayVersionInfo() = {
logger.info("Kernel version: " + SparkKernelInfo.implementationVersion)
logger.info("Scala version: " + SparkKernelInfo.scalaVersion)
logger.info("ZeroMQ (JeroMQ) version: " + ZMQ.getVersionString)
}
}
|
chipsenkbeil/incubator-toree
|
kernel/src/main/scala/org/apache/toree/boot/KernelBootstrap.scala
|
Scala
|
apache-2.0
| 6,060
|
package spark.scheduler.cluster
import spark.{Utils, Logging, SparkContext}
import spark.deploy.client.{Client, ClientListener}
import spark.deploy.{Command, ApplicationDescription}
import scala.collection.mutable.HashMap
private[spark] class SparkDeploySchedulerBackend(
scheduler: ClusterScheduler,
sc: SparkContext,
master: String,
appName: String)
extends StandaloneSchedulerBackend(scheduler, sc.env.actorSystem)
with ClientListener
with Logging {
var client: Client = null
var stopping = false
var shutdownCallback : (SparkDeploySchedulerBackend) => Unit = _
val maxCores = System.getProperty("spark.cores.max", Int.MaxValue.toString).toInt
override def start() {
super.start()
// The endpoint for executors to talk to us
val driverUrl = "akka://spark@%s:%s/user/%s".format(
System.getProperty("spark.driver.host"), System.getProperty("spark.driver.port"),
StandaloneSchedulerBackend.ACTOR_NAME)
val args = Seq(driverUrl, "{{WORKER_URL}}", "{{EXECUTOR_ID}}", "{{HOSTNAME}}", "{{CORES}}")
val command = Command("spark.executor.StandaloneExecutorBackend", args, sc.executorEnvs)
val sparkHome = sc.getSparkHome().getOrElse(
throw new IllegalArgumentException("Must supply Spark home for Spark standalone"))
val appDesc = new ApplicationDescription(appName, maxCores, executorMemory, command, sparkHome)
client = new Client(sc.env.actorSystem, master, appDesc, this)
client.start()
}
override def stop() {
stopping = true
super.stop()
client.stop()
if (shutdownCallback != null) {
shutdownCallback(this)
}
}
override def connected(appId: String) {
logInfo("Connected to Spark cluster with app ID " + appId)
}
override def disconnected() {
if (!stopping) {
logError("Disconnected from Spark cluster!")
scheduler.error("Disconnected from Spark cluster")
}
}
override def executorAdded(executorId: String, workerId: String, host: String, cores: Int, memory: Int) {
logInfo("Granted executor ID %s on host %s with %d cores, %s RAM".format(
executorId, host, cores, Utils.memoryMegabytesToString(memory)))
}
override def executorRemoved(executorId: String, message: String, exitStatus: Option[Int]) {
val reason: ExecutorLossReason = exitStatus match {
case Some(code) => ExecutorExited(code)
case None => SlaveLost(message)
}
logInfo("Executor %s removed: %s".format(executorId, message))
removeExecutor(executorId, reason.toString)
}
}
|
prabeesh/Spark-Kestrel
|
core/src/main/scala/spark/scheduler/cluster/SparkDeploySchedulerBackend.scala
|
Scala
|
bsd-3-clause
| 2,545
|
package whisk.docker
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{ Second, Seconds, Span }
import org.scalatest.{ BeforeAndAfterAll, FlatSpec, Matchers }
import whisk.docker.test.DockerTestKit
class MongodbServiceSpec extends FlatSpec with Matchers with BeforeAndAfterAll with ScalaFutures
with DockerTestKit
with DockerMongodbService {
implicit val pc = PatienceConfig(Span(20, Seconds), Span(1, Second))
"mongodb node" should "be ready with log line checker" in {
mongodbContainer.isReady().futureValue shouldBe true
}
}
|
kiequoo/docker-it-scala
|
src/test/scala/whisk/docker/MongodbServiceSpec.scala
|
Scala
|
mit
| 572
|
/*
* Copyright (c) 2013-2014, ARM Limited
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.arm.carp.apps.optimizer.passes
import com.arm.carp.pencil._
import scala.collection.mutable.ListBuffer
import scala.collection.mutable.HashMap
/** Perform global constant propagation. */
object GCP extends Pass("gcp") {
private val clones = ListBuffer[Function]()
/**
* Create new version of the function, replacing constant arguments by
* assignments inside function body.
*
* For example for call:
* foo(1, i)
* to the function
* void foo (int a, int i){...}
* the following function will be generated:
* void foo_clone0 (int i)
* {
* int a = 1;
* ....
* }
*/
private class Propagator(val args: Seq[Expression]) extends FunctionCloner {
private val scalars = HashMap[ScalarVariableDef, ScalarExpression]()
override def walkFunction(in: Function) = {
val params = ListBuffer[Variable]()
(in.params, args).zipped.foreach((p, a) => {
(p, a) match {
case (p: ScalarVariableDef, cst: ScalarExpression with Constant) => scalars.put (p, cst)
case _ => params += p
}
})
in.params = walkFunctionArguments(params)
in.ops = walkFunctionBody(in.ops)
Some(in)
}
override def walkScalarVariable (in: ScalarVariableRef) = {
scalars.get(in.variable) match {
case None => super.walkScalarVariable(in)
case Some(cst) => (convertScalar(cst, in.expType), None)
}
}
}
val config = WalkerConfig.expressions
/** Try to create partially-specialized function for each call. */
override def walkCallExpression(in: CallExpression) = {
val args = in.args.map(walkExpression(_)._1)
val nargs = args.filter(!_.isInstanceOf[Constant]) //Non constant arguments
if (args.size == nargs.size || in.func.ops.isEmpty) {
(in.copy(args = args), None)
} else {
val actor = new Propagator(args)
val copy = actor.cloneFunction(in.func, in.func.name + "_cloned")
clones += copy
set_changed
(new CallExpression(copy, nargs), None)
}
}
override def walkFunctions(in: Traversable[Function]) = {
clones.clear
super.walkFunctions(in) ++ clones.toList
}
}
|
Meinersbur/pencil
|
src/scala/com/arm/carp/apps/optimizer/passes/GCP.scala
|
Scala
|
mit
| 3,319
|
package com.datastax.spark.connector
import org.scalatest.{Matchers, WordSpec}
import com.datastax.spark.connector.cql._
import com.datastax.spark.connector.types.{TimestampType, VarCharType, IntType}
class ColumnSelectorSpec extends WordSpec with Matchers {
"A ColumnSelector#selectFrom method" should {
val column1 = ColumnDef("c1", PartitionKeyColumn, IntType)
val column2 = ColumnDef("c2", PartitionKeyColumn, VarCharType)
val column3 = ColumnDef("c3", ClusteringColumn(0), VarCharType)
val column4 = ColumnDef("c4", ClusteringColumn(1), VarCharType)
val column5 = ColumnDef("c5", RegularColumn, VarCharType)
val column6 = ColumnDef("c6", RegularColumn, TimestampType)
val tableDef = TableDef("keyspace", "table", Seq(column1, column2), Seq(column3, column4), Seq(column5, column6))
"return all columns" in {
val columns = AllColumns.selectFrom(tableDef)
columns should equal(tableDef.columns.map(_.ref))
}
"return partition key columns" in {
val columns = PartitionKeyColumns.selectFrom(tableDef)
columns should equal(tableDef.partitionKey.map(_.ref))
}
"return some columns" in {
val columns = SomeColumns("c1", "c3", "c5").selectFrom(tableDef)
columns.map(_.columnName) should be equals Seq("c1", "c3", "c5")
}
"throw a NoSuchElementException when selected column name is invalid" in {
a[NoSuchElementException] should be thrownBy {
SomeColumns("c1", "c3", "unknown_column").selectFrom(tableDef)
}
}
}
}
|
Stratio/spark-cassandra-connector
|
spark-cassandra-connector/src/test/scala/com/datastax/spark/connector/ColumnSelectorSpec.scala
|
Scala
|
apache-2.0
| 1,541
|
package org.eigengo.sogx.core
import org.springframework.integration.annotation.{Header, Payload}
import java.util.Collections
import org.eigengo.sogx.ContentTypes._
import org.eigengo.sogx._
import java.util
class ChunkDecoder(mjpegDecoder: MJPEGDecoder) {
/**
* Take the chunk arriving on a particular correlationId, examine its content type, and attempt to decode as many
* still frames as possible; now that we have all previous chunks and the new one just arriving.
*
* @param correlationId the correlation id
* @param contentType the content type
* @param chunk the new chunk
* @return collection of individual frames (represented as JPEG data)
*/
def decodeFrame(@Header correlationId: CorrelationId, @Header("content-type") contentType: String,
@Payload chunk: ChunkData): util.Collection[ImageData] = contentType match {
case `video/mjpeg` => decodeMJPEGFrames(correlationId, chunk)
case `image/*` => decodeSingleImage(correlationId, chunk)
}
private def decodeSingleImage(correlationId: CorrelationId, chunk: ChunkData): util.Collection[ImageData] = Collections.singletonList(chunk)
private def decodeMJPEGFrames(correlationId: CorrelationId, chunk: ChunkData): util.Collection[ImageData] = mjpegDecoder.decodeFrames(correlationId, chunk)
}
|
eigengo/springone2gx2013
|
jvm/src/main/scala/org/eigengo/sogx/core/ChunkDecoder.scala
|
Scala
|
apache-2.0
| 1,321
|
package com.generativists.thirdway.core
import org.scalatest.{FunSpec, Matchers}
class EventSpec extends FunSpec with Matchers {
describe("An Event") {
it("should be ordered by time then order") {
val activity = new NoOp[String]
Event(0.0, 0, activity) should be < Event(1.0, 0, activity)
Event(0.0, 2, activity) should be > Event(0.0, 1, activity)
Event(5.0, 5, activity) should be > Event(3.0, 1, activity)
}
}
}
|
generativists/ThirdWay
|
src/test/scala/com/generativists/thirdway/core/EventSpec.scala
|
Scala
|
mit
| 455
|
package com.mentatlabs.nsa
package scalac
package options
/* -Xno-forwarders
* ===============
* 2.8.0 - 2.8.2: Do not generate static forwarders in mirror classes
* 2.9.0 - 2.12.0: Do not generate static forwarders in mirror classes.
*/
case object ScalacXNoForwarders
extends ScalacOptionBoolean("-Xno-forwarders", ScalacVersions.`2.8.0`)
|
melezov/sbt-nsa
|
nsa-core/src/main/scala/com/mentatlabs/nsa/scalac/options/advanced/ScalacXNoForwarders.scala
|
Scala
|
bsd-3-clause
| 357
|
package org.pgscala
package builder
package converters
object PGElemConverterBuilder
extends PGConverterBuilder {
val scalaClazz = "Elem"
override val imports = "import scala.xml.Elem"
val defaultValue = "null // no sane default for XML"
}
|
melezov/pgscala
|
builder/src/main/scala/org/pgscala/builder/converters/scala/PGElemConverterBuilder.scala
|
Scala
|
bsd-3-clause
| 254
|
import swing._
import swing.event.{KeyReleased, Key, SelectionChanged}
import swing.BorderPanel.Position._
import swing.ListView.IntervalMode
import javax.swing.Icon
import swing.event.WindowOpened
import scala.concurrent._
import ExecutionContext.Implicits.global
import java.util.concurrent.atomic.AtomicReference
import org.scilab.forge.jlatexmath.{TeXFormula, TeXConstants, TeXIcon}
/*calc_import*/
import Parser._
import PrintCalc._
import Proofsearch.derTree
class SequentListDialog(owner: Window = null, list : List[(Rule, List[Sequent])], session:CalcSession = CalcSession()) extends Dialog(owner) {
var pair:Option[(Rule, List[Sequent])] = None
modal = true
val listView = new ListView[(Icon, Rule, List[Sequent])]() {
val m = session.abbrevMap.toMap.map{case (k,v) => (k, session.stripBrackets(structureToString(v, PrintCalc.ASCII)))}
listData = for((r,l) <- list) yield (new TeXFormula(ruleToString(r) + " - "+ l.map( session.sequentToIconStr(_, m) ).mkString(", ")).createTeXIcon(TeXConstants.STYLE_DISPLAY, 15), r, l)
renderer = ListView.Renderer(_._1)
selection.intervalMode = IntervalMode.Single
}
val b = new Button("Select Sequent") {
enabled = false
}
listenTo(listView.selection, listView.keys)
reactions += {
case KeyReleased(s, Key.Enter, _, _) => if(b.enabled) close()
case SelectionChanged(`listView`) =>
val sel = listView.selection.items(0)
pair = Some((sel._2, sel._3))
if(!b.enabled){
b.enabled = true
b.action = Action("Select Sequent"){close()}
}
}
contents = new BorderPanel {
layout(new Label("Select a rule to apply:")) = North
layout(listView) = Center
layout(new FlowPanel(FlowPanel.Alignment.Right)( b )) = South
}
centerOnScreen()
open()
}
class FormulaInputDialog(owner: Window = null) extends Dialog(owner) {
var formula:Option[Formula] = None
modal = true
val in = new TextField {
text = ""
columns = 25
//horizontalAlignment = Alignment.Right
}
val inL = new Label
listenTo(in.keys)
reactions += {
case KeyReleased(`in`, k, _, _) =>
parseFormula(in.text) match {
case Some(r) =>
formula = Some(r)
val latex = formulaToString(r)
inL.icon = new TeXFormula(latex).createTeXIcon(TeXConstants.STYLE_DISPLAY, 15)
case None => ;
}
}
contents = new BorderPanel {
layout(new BoxPanel(Orientation.Horizontal) {
border = Swing.EmptyBorder(5,5,5,5)
contents += in
contents += inL
}) = Center
layout(new FlowPanel(FlowPanel.Alignment.Right)( Button("Use Formula") { close() } )) = South
}
centerOnScreen()
open()
}
/*/*uncommentL?Agent*/
class AgentInputDialog(owner: Window = null) extends Dialog(owner) {
var agent:Option[Agent] = None
modal = true
val in = new TextField {
text = ""
columns = 25
//horizontalAlignment = Alignment.Right
}
val inL = new Label
listenTo(in.keys)
reactions += {
case KeyReleased(`in`, k, _, _) =>
parseAgent(in.text) match {
case Some(r) =>
agent = Some(r)
val latex = agentToString(r)
inL.icon = new TeXFormula(latex).createTeXIcon(TeXConstants.STYLE_DISPLAY, 15)
case None => ;
}
}
contents = new BorderPanel {
layout(new BoxPanel(Orientation.Horizontal) {
border = Swing.EmptyBorder(5,5,5,5)
contents += in
contents += inL
}) = Center
layout(new FlowPanel(FlowPanel.Alignment.Right)( Button("Use Agent") { close() } )) = South
}
centerOnScreen()
open()
}
/*uncommentR?Agent*/*/
class SequentInputDialog(owner: Window = null) extends Dialog(owner) {
var sequent:Option[Sequent] = None
modal = true
val in = new TextField {
text = ""
columns = 25
//horizontalAlignment = Alignment.Right
}
val inL = new Label
listenTo(in.keys)
reactions += {
case KeyReleased(`in`, Key.Enter, _, _) =>
close()
case KeyReleased(`in`, k, m, _) =>
parseSequent(in.text) match {
case Some(r) =>
sequent = Some(r)
val latex = sequentToString(r)
inL.icon = new TeXFormula(latex).createTeXIcon(TeXConstants.STYLE_DISPLAY, 15)
case None => ;
}
}
contents = new BorderPanel {
layout(new BoxPanel(Orientation.Horizontal) {
border = Swing.EmptyBorder(5,5,5,5)
contents += in
contents += inL
}) = Center
layout(new FlowPanel(FlowPanel.Alignment.Right)( Button("Use Sequent") { close() } )) = South
}
centerOnScreen()
open()
}
class RuleSelectDialog(owner: Window = null, list : List[(Rule, List[Sequent])] ) extends Dialog(owner) {
var pair:Option[(Rule, List[Sequent])] = None
modal = true
val listView = new ListView[(Icon, Rule, List[Sequent])]() {
listData = for((r,l) <- list) yield (new TeXFormula(ruleToString(r)).createTeXIcon(TeXConstants.STYLE_DISPLAY, 15), r, l)
renderer = ListView.Renderer(_._1)
selection.intervalMode = IntervalMode.Single
}
val b = new Button("Select Rule") {
enabled = false
}
listenTo(listView.selection)
reactions += {
case SelectionChanged(`listView`) =>
val sel = listView.selection.items(0)
pair = Some((sel._2, sel._3))
if(!b.enabled){
b.enabled = true
b.action = Action("Select Rule"){close()}
}
}
contents = new BorderPanel {
layout(new Label("Select a rule to apply:")) = North
layout(listView) = Center
layout(new FlowPanel(FlowPanel.Alignment.Right)( b )) = South
}
centerOnScreen()
open()
}
class PSDialog(owner: Window = null, locale : List[Locale] = List(Empty()), seq : Sequent, depth : Int = 5, useRules : List[Rule] = ruleList) extends Dialog(owner) {
// the following code (interruptableFuture) is from http://stackoverflow.com/questions/16020964/cancellation-with-future-and-promise-in-scala
def interruptableFuture[T](fun: () => T): (Future[T], () => Boolean) = {
val p = Promise[T]()
val f = p.future
val aref = new AtomicReference[Thread](null)
p tryCompleteWith Future {
val thread = Thread.currentThread
aref.synchronized { aref.set(thread) }
try fun() finally {
val wasInterrupted = (aref.synchronized { aref getAndSet null }) ne thread
//Deal with interrupted flag of this thread in desired
}
}
(f, () => {
aref.synchronized { Option(aref getAndSet null) foreach { _.interrupt() } }
p.tryFailure(new CancellationException)
})
}
var pt:Option[Prooftree] = None
modal = true
var cancel :() => Boolean = {() => true}
listenTo(this)
reactions += {
case WindowOpened(_) =>
val (f, c) = interruptableFuture[Option[Prooftree]] { () =>
derTree(depth, locale, seq, 0, useRules)
}
cancel = c
f.onSuccess {
case result =>
pt = result
close()
}
f.onFailure {
case ex =>
println(ex.getClass)
close()
}
}
override def closeOperation {
cancel()
super.closeOperation
}
contents = new BorderPanel {
layout(new BoxPanel(Orientation.Horizontal) {
border = Swing.EmptyBorder(5,5,5,5)
contents += new Label("Searching for a Prooftree... ")
contents += new ProgressBar{
indeterminate = true
}
}) = Center
layout(new FlowPanel(FlowPanel.Alignment.Right)( Button("Cancel") { cancel(); close() } )) = South
}
centerOnScreen()
open()
}
class MacroAddDialog(owner: Window = null, pt : Prooftree, adding : Boolean = true, macroName : String = "", abbrevs : Option[Map[String, Structure]] = None, editable : Boolean = false) extends Dialog(owner) {
var rule : Option[String] = None
val session = CalcSession()
session.currentPT = pt
if(abbrevs.isDefined) {
session.abbrevMap ++= abbrevs.get
session.abbrevsOn = true
}
preferredSize = new java.awt.Dimension(400, 300)
val ptPanel = new ProofTreePanel(session= session, editable=editable)
ptPanel.build()
modal = true
val in = new TextField {
text = ""
columns = 25
//horizontalAlignment = Alignment.Right
}
contents = new BorderPanel {
if (adding) layout(new Label("Save selected PT as macro?")) = North
else layout(new Label("Macro " + macroName)) = North
layout(new ScrollPane(ptPanel){border = Swing.EmptyBorder(0, 0, 0, 0)}) = Center
if (adding) {
layout(new BoxPanel(Orientation.Horizontal) {
border = Swing.EmptyBorder(5,5,5,5)
contents += new Label("Macro name:")
contents += in
contents += Button("Save") { rule = Some(in.text); close() }
contents += Button("Cancel") { close() }
}) = South
}
//layout(new FlowPanel(FlowPanel.Alignment.Right)( )) = South
}
centerOnScreen()
open()
}
class SequentTreeViewDialog(owner: Window = null, sequent : Sequent, selecting:Boolean = false) extends Dialog(owner) {
preferredSize = new java.awt.Dimension(400, 300)
val seqPanel = new SequentViewPanel(sequent=sequent, editable=selecting)
seqPanel.build()
modal = selecting
lazy val fresh = sequent_fresh_name(seqPanel.sequent)
var tuple:Option[(Sequent, Option[Structure])] = None
contents = new BorderPanel {
layout(new ScrollPane(seqPanel){border = Swing.EmptyBorder(0, 0, 0, 0)}) = Center
if(selecting) layout( Button("Display Selected") { tuple = seqPanel.rebuildSeqent(seqPanel.tree.getRoot(), fresh); close() } ) = South
}
centerOnScreen()
open()
}
|
goodlyrottenapple/calculus-toolbox
|
template/gui/Dialogs.scala
|
Scala
|
mit
| 9,614
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command.table
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.execution.command.{MetadataCommand, ShowTablesCommand}
private[sql] case class CarbonShowTablesCommand(showTablesCommand: ShowTablesCommand)
extends MetadataCommand {
override val output: Seq[Attribute] = showTablesCommand.output
override def processMetadata(sparkSession: SparkSession): Seq[Row] = {
val rows = showTablesCommand.run(sparkSession)
val externalCatalog = sparkSession.sharedState.externalCatalog
// this method checks whether the table is mainTable or MV based on property "isVisible"
def isMainTable(db: String, table: String) = {
var isMainTable = true
try {
isMainTable = externalCatalog.getTable(db, table).storage.properties
.getOrElse("isVisible", true).toString.toBoolean
} catch {
case ex: Throwable =>
// ignore the exception for show tables
}
isMainTable
}
// tables will be filtered for all the MVs to show only main tables
rows.filter(row => isMainTable(row.get(0).toString, row.get(1).toString))
}
override protected def opName: String = "SHOW TABLES"
}
|
zzcclp/carbondata
|
integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonShowTablesCommand.scala
|
Scala
|
apache-2.0
| 2,080
|
package dhg.ccg.tag.learn
import dhg.util._
import math.pow
import scalaz.{ \\/ => _, _ }
import scalaz.Scalaz._
import dhg.ccg.cat._
import dhg.ccg.rule._
import dhg.ccg.prob._
import dhg.ccg.tagdict.TagDictionary
/**
* Assign `combinableTransitionMass` proportion of the probability mass of
* the delegate TransitionInitializer to combining transitions, and the rest
* to non-combining transitions.
*
* For example,
* If combinableTransitionMass = 0.95
* and the can-combine total mass is 0.6 and the can't-combine total mass is 0.4,
* Then:
* 1. A transition X that can combine and has probability 0.2 will take one-third
* of the can-combine total mass (0.2/0.6), meaning that in the final result
* it will need to take one-third of the allocated can-combine mass (0.95),
* meaning that p(X) = (0.2/0.6)*0.95 = 0.2*(0.95/0.6)
* 2. A transition X that can't combine and has probability 0.2 will take one-half
* of the can't-combine total mass (0.2/0.4), meaning that in the final result
* it will need to take one-half of the allocated can't-combine mass (0.05),
* meaning that p(X) = (0.2/0.4)*0.05 = 0.2*(0.05/0.4)
*/
class CcgCombinabilityTrInitializer(
delegateInitializer: TransitionInitializer[Cat],
canCombine: CatCanCombine,
combinableTransitionMass: Double = 0.95, // amount of probability mass reserved for combinable transitions
totalSmoothing: LogDouble) extends TransitionInitializer[Cat] {
def fromKnownSupertagSets(sentences: Vector[Vector[(Word, Set[Cat])]], initialTagdict: TagDictionary[Cat]) = {
val tagdict = initialTagdict.withWords(sentences.flatten.map(_._1).toSet).withTags(sentences.flatten.flatMap(_._2).toSet)
val delegate = delegateInitializer.fromKnownSupertagSets(sentences, tagdict)
new CcgCombinabilityTransitionConditionalLogProbabilityDistribution(delegate, tagdict, canCombine, combinableTransitionMass, totalSmoothing)
}
override def toString = f"CcgCombinabilityTrInitializer($delegateInitializer, $canCombine, combinableTransitionMass=$combinableTransitionMass, totalSmoothing=$totalSmoothing)"
}
/**
* This is only used by the above CcgCombinabilityTrInitializer
*/
class CcgCombinabilityTransitionConditionalLogProbabilityDistribution(
delegate: ConditionalLogProbabilityDistribution[Cat, Cat],
tagdict: TagDictionary[Cat],
canCombine: CatCanCombine,
combinableTransitionMass: Double,
totalSmoothing: LogDouble)
extends ConditionalLogProbabilityDistribution[Cat, Cat] {
private[this] def getCombinableSplitSums(t1: Cat): (LogDouble, LogDouble) = {
val (can, cant) =
(tagdict.allTags + tagdict.endTag).mapTo { t2 =>
delegate(t2, t1)
}.toMap.partition { case (t2, p) => canCombine(t1, t2) }
val canTotal = can.values.sum + totalSmoothing
val cantTotal = cant.values.sum + totalSmoothing
// if (Set("NP")(t1.toString)) {
// println(f"CcgCombinabilityTransitionConditionalLogProbabilityDistribution.getCombinableSplitSums: for t1=$t1,")
// println(f" can=$can")
// println(f" cant=$cant")
// println(f" canTotal=$canTotal")
// println(f" cantTotal=$cantTotal")
// }
// println(f"CcgCombinabilityTransitionConditionalLogProbabilityDistribution.getCombinableSplitSums: for t1=$t1, canTotal > combinableTransitionMass (${canTotal.toDouble}%.4f > $combinableTransitionMass%.4f)")
// println(f"CcgCombinabilityTransitionConditionalLogProbabilityDistribution.getCombinableSplitSums: for t1=$t1, cantTotal < 1-combinableTransitionMass (${cantTotal.toDouble}%.4f < ${1 - combinableTransitionMass}%.4f)")
val (canZ, cantZ) =
if (canTotal.toDouble > combinableTransitionMass || cantTotal.toDouble < (1 - combinableTransitionMass)) {
// if (canTotal.toDouble > combinableTransitionMass) println(f"CcgCombinabilityTransitionConditionalLogProbabilityDistribution.getCombinableSplitSums: for t1=$t1, canTotal > combinableTransitionMass (${canTotal.toDouble}%.4f > $combinableTransitionMass%.4f)")
// if (cantTotal.toDouble < (1 - combinableTransitionMass)) println(f"CcgCombinabilityTransitionConditionalLogProbabilityDistribution.getCombinableSplitSums: for t1=$t1, cantTotal < 1-combinableTransitionMass (${cantTotal.toDouble}%.4f < ${1 - combinableTransitionMass}%.4f)")
(LogDouble.one, LogDouble.one) // TODO: THIS IS A TERRIBLE HACK
}
else {
val canZ =
if (canTotal.isZero) {
// println(f"CcgCombinabilityTransitionConditionalLogProbabilityDistribution.getCombinableSplitSums: for t1=$t1, canTotal is zero")
LogDouble.one // TODO: THIS IS A TERRIBLE HACK
}
else {
assert(canTotal.nonZero, f"canTotal is zero for t1=$t1")
canTotal / LogDouble(combinableTransitionMass)
}
val cantZ =
if (cantTotal.isZero) {
// println(f"CcgCombinabilityTransitionConditionalLogProbabilityDistribution.getCombinableSplitSums: for t1=$t1, cantTotal is zero")
LogDouble.one // TODO: THIS IS A TERRIBLE HACK
}
else {
assert(cantTotal.nonZero, f"cantTotal is zero for t1=$t1")
cantTotal / LogDouble(1.0 - combinableTransitionMass)
}
(canZ, cantZ)
}
(canZ, cantZ)
}
//private[this]
val combinableSplitSums: Map[Cat, (LogDouble, LogDouble)] = (tagdict.allTags + tagdict.startTag).mapTo(getCombinableSplitSums).toMap
def apply(x: Cat, given: Cat): LogDouble = {
val (canZ, cantZ) = combinableSplitSums.getOrElse(given, getCombinableSplitSums(given))
val z = if (canCombine(given, x)) canZ else cantZ
val p = delegate(x, given)
p / z
}
def sample(given: Cat): Cat = ???
}
//
//
//
/**
* P(Tag1->Tag2) = P(Tag2), where P(Tag2) is initialized by tagPriorInitializer
*/
class TagPriorTrInitializer[Tag](tagPriorInitializer: TagPriorInitializer[Tag]) extends TransitionInitializer[Tag] {
def fromKnownSupertagSets(sentences: Vector[Vector[(Word, Set[Tag])]], initialTagdict: TagDictionary[Tag]) = {
val tagdict = initialTagdict.withWords(sentences.flatten.map(_._1).toSet).withTags(sentences.flatten.flatMap(_._2).toSet)
new SimpleConditionalLogProbabilityDistribution[Tag, Tag](Map.empty[Tag, LogProbabilityDistribution[Tag]], tagPriorInitializer.fromKnownSupertagSets(sentences, tagdict), Some(tagdict.excludedTags + tagdict.endTag))
}
override def toString = f"TagPriorTrInitializer($tagPriorInitializer)"
}
|
dhgarrette/2015-ccg-parsing
|
src/main/scala/dhg/ccg/tag/learn/CcgHmmInitialization.scala
|
Scala
|
apache-2.0
| 6,601
|
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.pacmin.overlapping
import org.bdgenomics.adam.util.SequenceUtils
import org.bdgenomics.formats.avro.AlignmentRecord
import org.bdgenomics.pacmin.utils.PacMinFunSuite
import scala.math.abs
import scala.util.Random
class OverlapperSuite extends PacMinFunSuite {
val kmerLength = 15
def randomString(seed: Int, len: Int): (String, Random) = {
val r = new Random(seed)
((0 until len).map(i => r.nextInt(4))
.map(i => i match {
case 0 => "A"
case 1 => "C"
case 2 => "G"
case _ => "T"
}).reduceLeft(_ + _), r)
}
test("compute overlap between two strings from the same strand") {
val rString = randomString(10, 1100)._1
val r1 = MinHashableRead(0L, AlignmentRecord.newBuilder()
.setSequence(rString.take(1000))
.build(), kmerLength)
val r2 = MinHashableRead(1L, AlignmentRecord.newBuilder()
.setSequence(rString.takeRight(1000))
.build(), kmerLength)
val readAlignmentOpt = Overlapper.alignReads((r1, r2))
assert(readAlignmentOpt.isDefined)
val readAlignment = readAlignmentOpt.get
assert(readAlignment.srcId === 0L)
assert(readAlignment.dstId === 1L)
assert(readAlignment.attr.estimatedSize === 900)
assert(!readAlignment.attr.switchesStrands)
assert(readAlignment.attr.correspondance.filter(p => p._1.length > 10).length === 1)
assert(readAlignment.attr.correspondance.head._1 === (0 to 867))
assert(readAlignment.attr.correspondance.head._2 === (100 to 967))
}
test("only test two reads if they meet ordering restrictions") {
val r1 = MinHashableRead(0L, AlignmentRecord.newBuilder()
.build(), kmerLength)
val r2 = MinHashableRead(1L, AlignmentRecord.newBuilder()
.build(), kmerLength)
assert(Overlapper.alignReads(r2, r1).isEmpty)
assert(Overlapper.alignReads(r1, r1).isEmpty)
assert(Overlapper.alignReads(r2, r2).isEmpty)
}
test("compute overlap between two strings from different strands") {
val rString = randomString(10, 1100)._1
val r1 = MinHashableRead(0L, AlignmentRecord.newBuilder()
.setSequence(rString.take(1000))
.build(), kmerLength)
val r2 = MinHashableRead(1L, AlignmentRecord.newBuilder()
.setSequence(SequenceUtils.reverseComplement(rString.takeRight(1000)))
.build(), kmerLength)
val readAlignmentOpt = Overlapper.alignReads((r1, r2))
assert(readAlignmentOpt.isDefined)
val readAlignment = readAlignmentOpt.get
assert(readAlignment.srcId === 0L)
assert(readAlignment.dstId === 1L)
assert(readAlignment.attr.estimatedSize === 900)
assert(readAlignment.attr.switchesStrands)
assert(readAlignment.attr.correspondance.filter(p => p._1.length > 10).length === 1)
assert(readAlignment.attr.correspondance.head._1 === (0 to 867))
assert(readAlignment.attr.correspondance.head._2 === (33 to 900))
}
sparkTest("compute overlaps for ten 1000 bp reads, all drawn from the same strand") {
val baseString = randomString(123, 2000)._1
var read = -1
val reads = sc.parallelize(baseString
.sliding(1000, 100)
.toSeq
.map(s => {
read += 1
AlignmentRecord.newBuilder()
.setStart(read)
.setSequence(s)
.build()
}))
val overlapGraph = Overlapper(reads, 0.5, 256, kmerLength, None, Some(123456L))
val edges = overlapGraph.edges
assert(edges.count === 27)
assert(!edges.map(ev => ev.attr.switchesStrands).reduce(_ || _))
}
sparkTest("compute overlaps for ten 1000 bp reads, drawn from different strands") {
val (baseString, rv) = randomString(123, 2000)
var read = -1
val reads = sc.parallelize(baseString
.sliding(1000, 100)
.toSeq
.map(s => {
read += 1
val flipStrand = rv.nextBoolean()
val fs = if (flipStrand) {
SequenceUtils.reverseComplement(s)
} else {
s
}
AlignmentRecord.newBuilder()
.setStart(read)
.setSequence(fs)
.build()
}))
val overlapGraph = Overlapper(reads, 0.5, 256, kmerLength, None, Some(123456L))
val edges = overlapGraph.edges
assert(edges.count === 27)
assert(edges.map(ev => ev.attr.switchesStrands).reduce(_ || _))
assert(!edges.map(ev => ev.attr.switchesStrands).reduce(_ && _))
}
}
|
bigdatagenomics/PacMin
|
pacmin-core/src/test/scala/org/bdgenomics/pacmin/overlapping/OverlapperSuite.scala
|
Scala
|
apache-2.0
| 5,156
|
package nodes.learning
import breeze.linalg._
import edu.berkeley.cs.amplab.mlmatrix.{RowPartition, NormalEquations, BlockCoordinateDescent, RowPartitionedMatrix}
import nodes.stats.{StandardScalerModel, StandardScaler}
import org.apache.spark.rdd.RDD
import nodes.util.{VectorSplitter, Identity}
import utils.{MatrixUtils, Stats}
import workflow.{WeightedNode, Transformer, LabelEstimator}
/**
* Transformer that applies a linear model to an input.
* Different from [[LinearMapper]] in that the matrix representing the transformation
* is split into a seq.
*
* @param xs The chunks of the matrix representing the linear model
* @param blockSize blockSize to split data before applying transformations
* @param bOpt optional intercept term to be added
* @param featureScalersOpt optional seq of transformers to be applied before transformation
*/
class BlockLinearMapper(
val xs: Seq[DenseMatrix[Double]],
val blockSize: Int,
val bOpt: Option[DenseVector[Double]] = None,
val featureScalersOpt: Option[Seq[Transformer[DenseVector[Double], DenseVector[Double]]]] = None)
extends Transformer[DenseVector[Double], DenseVector[Double]] {
// Use identity nodes if we don't need to do scaling
val featureScalers = featureScalersOpt.getOrElse(
Seq.fill(xs.length)(new Identity[DenseVector[Double]]))
val vectorSplitter = new VectorSplitter(blockSize)
/**
* Applies the linear model to feature vectors large enough to have been split into several RDDs.
*
* @param in RDD of vectors to apply the model to
* @return the output vectors
*/
override def apply(in: RDD[DenseVector[Double]]): RDD[DenseVector[Double]] = {
apply(vectorSplitter(in))
}
/**
* Applies the linear model to feature vectors large enough to have been split into several RDDs.
*
* @param ins RDD of vectors to apply the model to, split into same size as model blocks
* @return the output vectors
*/
def apply(in: Seq[RDD[DenseVector[Double]]]): RDD[DenseVector[Double]] = {
val res = in.zip(xs.zip(featureScalers)).map {
case (rdd, xScaler) => {
val (x, scaler) = xScaler
val modelBroadcast = rdd.context.broadcast(x)
scaler(rdd).mapPartitions(rows => {
MatrixUtils.rowsToMatrixIter(rows).map(_ * modelBroadcast.value)
})
}
}
val matOut = res.reduceLeft((sum, next) => sum.zip(next).map(c => c._1 + c._2))
// Add the intercept here
val bBroadcast = matOut.context.broadcast(bOpt)
val matOutWithIntercept = matOut.map { mat =>
bOpt.map { b =>
mat(*, ::) :+= b
mat
}.getOrElse(mat)
}
matOutWithIntercept.flatMap(x => MatrixUtils.matrixToRowArray(x))
}
override def apply(in: DenseVector[Double]): DenseVector[Double] = {
val res = vectorSplitter.splitVector(in).zip(xs.zip(featureScalers)).map {
case (in, xScaler) => {
xScaler._1.t * xScaler._2(in)
}
}
val out = res.reduceLeft((sum, next) => sum + next)
bOpt.map { b =>
out += b
out
}.getOrElse(out)
}
/**
* Applies the linear model to feature vectors. After processing chunk i of every vector, applies
*
* @param evaluator to the intermediate output vector.
* @param in input RDD
*/
def applyAndEvaluate(in: RDD[DenseVector[Double]], evaluator: (RDD[DenseVector[Double]]) => Unit) {
applyAndEvaluate(vectorSplitter(in), evaluator)
}
/**
* Applies the linear model to feature vectors. After processing chunk i of every vector, applies
*
* @param evaluator to the intermediate output vector.
* @param in sequence of input RDD chunks
*/
def applyAndEvaluate(
in: Seq[RDD[DenseVector[Double]]],
evaluator: (RDD[DenseVector[Double]]) => Unit) {
val res = in.zip(xs.zip(featureScalers)).map {
case (rdd, xScaler) => {
val modelBroadcast = rdd.context.broadcast(xScaler._1)
xScaler._2(rdd).mapPartitions(rows => {
MatrixUtils.rowsToMatrixIter(rows).map(_ * modelBroadcast.value)
})
}
}
var prev: Option[RDD[DenseMatrix[Double]]] = None
for (next <- res) {
val sum = prev match {
case Some(prevVal) => prevVal.zip(next).map(c => c._1 + c._2).cache()
case None => next.cache()
}
// NOTE: We should only add the intercept once. So do it right before
// we call the evaluator but don't cache this
val sumAndIntercept = sum.map { mat =>
bOpt.map { b =>
mat(*, ::) :+= b
mat
}.getOrElse(mat)
}
evaluator.apply(sumAndIntercept.flatMap(x => MatrixUtils.matrixToRowArray(x)))
prev.map(_.unpersist())
prev = Some(sum)
}
prev.map(_.unpersist())
}
}
object BlockLeastSquaresEstimator {
def computeCost(
trainingFeatures: Seq[RDD[DenseVector[Double]]],
trainingLabels: RDD[DenseVector[Double]],
lambda: Double,
xs: Seq[DenseMatrix[Double]],
bOpt: Option[DenseVector[Double]]): Double = {
val nTrain = trainingLabels.count
val res = trainingFeatures.zip(xs).map {
case (rdd, x) => {
val modelBroadcast = rdd.context.broadcast(x)
rdd.mapPartitions(rows => {
MatrixUtils.rowsToMatrixIter(rows).map(_ * modelBroadcast.value)
})
}
}
val matOut = res.reduceLeft((sum, next) => sum.zip(next).map(c => c._1 + c._2))
// Add the intercept here
val bBroadcast = matOut.context.broadcast(bOpt)
val matOutWithIntercept = matOut.map { mat =>
bOpt.map { b =>
mat(*, ::) :+= b
mat
}.getOrElse(mat)
}
val axb = matOutWithIntercept.flatMap(x => MatrixUtils.matrixToRowArray(x))
val cost = axb.zip(trainingLabels).map { part =>
val axb = part._1
val labels = part._2
val out = axb - labels
math.pow(norm(out), 2)
}.reduce(_ + _)
if (lambda == 0) {
cost/(2.0*nTrain.toDouble)
} else {
val wNorm = xs.map(part => math.pow(norm(part.toDenseVector), 2)).reduce(_+_)
cost/(2.0*nTrain.toDouble) + lambda/2.0 * wNorm
}
}
}
/**
* Fits a least squares model using block coordinate descent with provided
* training features and labels
*
* @param blockSize size of block to use in the solver
* @param numIter number of iterations of solver to run
* @param lambda L2-regularization to use
*/
class BlockLeastSquaresEstimator(blockSize: Int, numIter: Int, lambda: Double = 0.0, numFeaturesOpt: Option[Int] = None)
extends LabelEstimator[DenseVector[Double], DenseVector[Double], DenseVector[Double]]
with WeightedNode
with CostModel {
override val weight = (3*numIter)+1
/**
* Fit a model using blocks of features and labels provided.
*
* @param trainingFeatures feature blocks to use in RDDs.
* @param trainingLabels RDD of labels to use.
*/
def fit(
trainingFeatures: Seq[RDD[DenseVector[Double]]],
trainingLabels: RDD[DenseVector[Double]]): BlockLinearMapper = {
val labelScaler = new StandardScaler(normalizeStdDev = false).fit(trainingLabels)
// Find out numRows, numCols once
val b = RowPartitionedMatrix.fromArray(
labelScaler.apply(trainingLabels).map(_.toArray)).cache()
val numRows = Some(b.numRows())
val numCols = Some(blockSize.toLong)
// NOTE: This will cause trainingFeatures to be evaluated twice
// which might not be optimal if its not cached ?
val featureScalers = trainingFeatures.map { rdd =>
new StandardScaler(normalizeStdDev = false).fit(rdd)
}
val A = trainingFeatures.zip(featureScalers).map { case (rdd, scaler) =>
new RowPartitionedMatrix(scaler.apply(rdd).mapPartitions { rows =>
MatrixUtils.rowsToMatrixIter(rows)
}.map(RowPartition), numRows, numCols)
}
val bcd = new BlockCoordinateDescent()
val models = if (numIter > 1) {
bcd.solveLeastSquaresWithL2(
A, b, Array(lambda), numIter, new NormalEquations()).transpose
} else {
bcd.solveOnePassL2(A.iterator, b, Array(lambda), new NormalEquations()).toSeq.transpose
}
new BlockLinearMapper(models.head, blockSize, Some(labelScaler.mean), Some(featureScalers))
}
/**
* Fit a model after splitting training data into appropriate blocks.
*
* @param trainingFeatures training data to use in one RDD.
* @param trainingLabels labels for training data in a RDD.
*/
override def fit(
trainingFeatures: RDD[DenseVector[Double]],
trainingLabels: RDD[DenseVector[Double]]): BlockLinearMapper = {
val vectorSplitter = new VectorSplitter(blockSize, numFeaturesOpt)
val featureBlocks = vectorSplitter.apply(trainingFeatures)
fit(featureBlocks, trainingLabels)
}
def fit(
trainingFeatures: RDD[DenseVector[Double]],
trainingLabels: RDD[DenseVector[Double]],
numFeaturesOpt: Option[Int]): BlockLinearMapper = {
val vectorSplitter = new VectorSplitter(blockSize, numFeaturesOpt)
val featureBlocks = vectorSplitter.apply(trainingFeatures)
fit(featureBlocks, trainingLabels)
}
override def cost(
n: Long,
d: Int,
k: Int,
sparsity: Double,
numMachines: Int,
cpuWeight: Double,
memWeight: Double,
networkWeight: Double)
: Double = {
val flops = n.toDouble * d * (blockSize + k) / numMachines
val bytesScanned = n.toDouble * d / numMachines + (d.toDouble * k)
val network = 2.0 * (d.toDouble * (blockSize + k)) * math.log(numMachines) / math.log(2.0)
numIter * (math.max(cpuWeight * flops, memWeight * bytesScanned) + networkWeight * network)
}
}
|
tomerk/keystone
|
src/main/scala/nodes/learning/BlockLinearMapper.scala
|
Scala
|
apache-2.0
| 9,593
|
package com.github.mdr.mash.ns.collections
import com.github.mdr.mash.completions.CompletionSpec
import com.github.mdr.mash.functions.{ BoundParams, MashFunction, Parameter, ParameterModel }
import com.github.mdr.mash.inference._
import com.github.mdr.mash.ns.core.objectClass.GetMethod
import com.github.mdr.mash.runtime._
object DeselectFunction extends MashFunction("collections.deselect") {
object Params {
val Fields = Parameter(
nameOpt = Some("fields"),
summaryOpt = Some("Fields from the object"),
isVariadic = true,
variadicAtLeastOne = true)
val Target = Parameter(
nameOpt = Some("target"),
summaryOpt = Some("Object or sequence of objects to remove fields from"))
}
import Params._
val params = ParameterModel(Fields, Target)
def call(boundParams: BoundParams): MashValue = {
val fields: Seq[MashValue] = boundParams.validateSequence(Fields)
boundParams(Target) match {
case xs: MashList ⇒ xs.map(doDeselect(_, fields))
case x ⇒ doDeselect(x, fields)
}
}
private def doDeselect(value: MashValue, fields: Seq[MashValue]): MashValue = value match {
case obj: MashObject ⇒ MashObject.of(obj.immutableFields.filterNot(fields contains _._1))
case _ ⇒ value
}
override def getCompletionSpecs(argPos: Int, arguments: TypedArguments) = {
val argBindings = params.bindTypes(arguments)
val completionSpecOpt =
for {
param ← argBindings.paramAt(argPos)
if param == Fields
targetType ← argBindings.getType(Target)
actualTargetType = targetType match {
case Type.Seq(elemType) ⇒ elemType
case _ ⇒ targetType
}
} yield CompletionSpec.Items(GetMethod.getFields(actualTargetType))
completionSpecOpt.toSeq
}
override def summaryOpt = Some("Remove fields from an object or sequence of objects")
}
|
mdr/mash
|
src/main/scala/com/github/mdr/mash/ns/collections/DeselectFunction.scala
|
Scala
|
mit
| 1,941
|
package org.linnando.sunmap
import google.maps
import google.maps.canvaslayer._
import org.scalajs.dom
import scala.scalajs.js
class SunMapOverlay(map: maps.Map) {
val rectLatLng = new maps.LatLng(40, -95)
val rectWidth = 6.5
private val options = CanvasLayerOptions(
map = map,
resizeHandler = resize,
updateHandler = update
)
val canvasLayer = new CanvasLayer(options)
val context: dom.CanvasRenderingContext2D = canvasLayer.canvas.getContext("2d") match {
case c: dom.CanvasRenderingContext2D => c
case _ => throw new Error
}
def resize(): Unit = {}
def update(): Unit = {
val canvasWidth = canvasLayer.canvas.width
val canvasHeight = canvasLayer.canvas.height
context.clearRect(0, 0, canvasWidth, canvasHeight)
context.setTransform(1, 0, 0, 1, 0, 0)
val scale = math.pow(2, map.getZoom())
context.scale(scale, scale)
val mapProjection = map.getProjection()
val offset = mapProjection.fromLatLngToPoint(canvasLayer.getTopLeft())
context.translate(-offset.x, -offset.y)
drawNightShadow(canvasWidth, canvasHeight, mapProjection)
}
private def drawNightShadow(canvasWidth: Int, canvasHeight: Int, mapProjection: maps.Projection) = {
val time = js.Date.now()
//val time = new js.Date(2017,5,22,14,0).getTime()
val calculator = SunCalculator(time)
context.fillStyle = "rgba(0, 0, 0, 0.5)"
(0 until canvasWidth) foreach { x =>
(0 until canvasHeight) foreach { y =>
val latLng = mapProjection.fromPointToLatLng(new maps.Point(x, y))
if (!calculator.isDaylight(latLng.lat(), latLng.lng()))
context.fillRect(x, y, 1, 1)
}
}
}
}
|
linnando/sunmap
|
sunmap/src/main/scala/org/linnando/sunmap/SunMapOverlay.scala
|
Scala
|
apache-2.0
| 1,674
|
package org.littlewings.javaee7.service
import javax.enterprise.context.RequestScoped
@RequestScoped
class CalcService {
def add(left: Int, right: Int): Int =
left + right
def multiply(left: Int, right: Int): Int =
left * right
}
|
kazuhira-r/javaee7-scala-examples
|
bean-manager-lookup/src/main/scala/org/littlewings/javaee7/service/CalcService.scala
|
Scala
|
mit
| 245
|
package uni.big_data.spark.betweenness_centrality
import org.apache.spark.graphx._
import uni.big_data.spark.sssp.SingleSourceShortestPath
/**
* Implements an solution to calculate betweeness centrality values for a given graph.
* [https://en.wikipedia.org/wiki/Betweenness_centrality]
*
* @example Given a simple graph we can calculate the betwenness centrality values:
* {{{
*
* //Define Vertices
* val vertices: RDD[(VertexId,Int)] = sc.parallelize(
* Array(
* (1L,0),
* (2L,0),
* (3L,0),
* (4L,0),
* (5L,0),
* )
* )
*
* //Define edges and edge weights
* val relationships: RDD[Edge[Double]] = sc.parallelize(
* Array(
* Edge(1L, 2L, 1.0),
* Edge(1L, 3L, 1.0),
* Edge(3L, 4L, 1.0),
* Edge(2L, 4L, 1.0),
* Edge(3L, 5L, 1.0),
* Edge(4L, 5L, 2.0),
* )
* )
*
* // Generate a GraphX graph object
* val graph = Graph(vertices, relationships)
*
* // Run calculations
* val centralityValues = BetweenessCentrality.run(graph)
*
* println("ID \t Betweeness Centrality")
* shortestPaths.vertices.collect.foreach( (data) => {
* println(s"${data._1} \t ${} \t ${data._2")
* })
* }}}
*
*/
object BetweennessCentrality {
/**
*
* Given a weighted directed graph this calculates the betweenness centrality for every vertex in the graph.
* This value states the number of shortest paths for all vertices to all vertices that pass through that node
* and can be used as a measurement for vertex importance.
* The values are calculated using the following algorithm:
*
* (1) For every vertex v in the graph
* (2) calculate the shortest path to all other vertices
* (3) traverse these shortest paths in opposite direction and increase the counter for every vertex on that path
*
* @tparam T the vertex data type
*
* @param graph the input graph, edge type must be Double (weight).
*
* @return the resulting graph with betweenness centrality values attached to each vertex
*
*/
def run[T](graph: Graph[T,Double]): Graph[Double, Double] ={
//Initialize the betweenness graph with value 0 for every vertex
var centralityValues = graph.mapVertices( (id, _) => 0.0)
var i = 1
// For every vertex in the graph
graph.vertices.collect().foreach { (vertex) =>
//calculate the shortest paths
val sssp = SingleSourceShortestPath.run(graph, vertex._1)
//calculate the centrality values for these paths
val singleCentralityValues = calculateBetwennessCentrality(sssp, vertex._1)
//merge local centrality values with the global ones
centralityValues = centralityValues.joinVertices(singleCentralityValues.vertices)(
(id, oldValue, newValue) => oldValue + newValue
)
}
centralityValues
}
/**
* Given a graph with shortest paths for one vertex calculate Betweenness Centrality values for each vertex
*
* @param graph the input graph, vertex type is (Double, Array[VertexId]) edge type must be Double (weight).
*
* @return the resulting graph with betweenness centrality values attached to each vertex
*
*/
private def calculateBetwennessCentrality(graph: Graph[(Double,Array[VertexId]),Double], sourceId: VertexId)
: Graph[Double, Double] =
{
//initialize calculation graph (centrality value, value of last message, predecessors)
val centralityValues = graph.mapVertices( (_, value) => (0.0,0.0,value._2))
def vertexProgramm(id:VertexId, nodeData:(Double, Double,Array[VertexId]), newData: Double): (Double, Double, Array[VertexId]) = {
//if its the initial message initialize the last message value with 1/#predecessors
if(newData == -1.0)
(0.0, 1.0 / nodeData._3.length, nodeData._3)
// for every other message add the message value to centrality value and set the to be send value
else
(nodeData._1 + newData, newData / nodeData._3.length, nodeData._3)
}
def sendMsg(triplet: EdgeTriplet[(Double, Double, Array[VertexId]),Double]): Iterator[(VertexId,Double)] = {
//Send the to be send value to every predecessor
if(triplet.dstAttr._2 > 0 && triplet.dstAttr._3.contains(triplet.srcId) && !(triplet.srcId==sourceId)) {
Iterator( (triplet.srcId, triplet.dstAttr._2), (triplet.dstId,0.0) )
} else {
Iterator.empty
}
}
def msgCombiner(a:Double, b:Double): Double = {
a+b
}
Pregel(centralityValues,-1.0)(
vertexProgramm,sendMsg,msgCombiner
).mapVertices((_,values) => values._1)
}
}
|
DarthMax/Graph-Centrality-Measures
|
spark/betweenness-centrality/src/main/scala/uni/big_data/spark/betweenness_centrality/BetweennessCentrality.scala
|
Scala
|
mit
| 4,640
|
/*
* ____ ____ _____ ____ ___ ____
* | _ \\ | _ \\ | ____| / ___| / _/ / ___| Precog (R)
* | |_) | | |_) | | _| | | | | /| | | _ Advanced Analytics Engine for NoSQL Data
* | __/ | _ < | |___ | |___ |/ _| | | |_| | Copyright (C) 2010 - 2013 SlamData, Inc.
* |_| |_| \\_\\ |_____| \\____| /__/ \\____| All Rights Reserved.
*
* This program is free software: you can redistribute it and/or modify it under the terms of the
* GNU Affero General Public License as published by the Free Software Foundation, either version
* 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License along with this
* program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.precog.mimir
import org.specs2.mutable._
import com.precog.common._
import com.precog.yggdrasil._
import scalaz._
trait NormalizationSpecs[M[+_]] extends Specification
with EvaluatorTestSupport[M]
with LongIdMemoryDatasetConsumer[M] { self =>
import dag._
import instructions._
import library._
def testEval(graph: DepGraph): Set[SEvent] = {
consumeEval(graph, defaultEvaluationContext) match {
case Success(results) => results
case Failure(error) => throw error
}
}
private val line = Line(1, 1, "")
private def load(path: String) =
dag.AbsoluteLoad(Const(CString(path))(line))(line)
// note: more comprehensive `summary` and `normalization` tests found in muspelheim
"summary" should {
"work with heterogeneous numeric types" in {
val input = dag.Morph1(Summary, load("/hom/numbersHet"))(line)
val result = testEval(input)
result must haveSize(1)
result must haveAllElementsLike {
case (ids, SObject(obj)) => {
ids must haveSize(0)
obj.keySet mustEqual Set("model1")
obj("model1") must beLike { case SObject(summary) =>
summary.keySet mustEqual Set("count", "stdDev", "min", "max", "mean")
summary("count") must beLike { case SDecimal(d) =>
d.toDouble mustEqual(13)
}
summary("mean") must beLike { case SDecimal(d) =>
d.toDouble mustEqual(-37940.51855769231)
}
summary("stdDev") must beLike { case SDecimal(d) =>
d.toDouble mustEqual(133416.18997644997)
}
summary("min") must beLike { case SDecimal(d) =>
d.toDouble mustEqual(-500000)
}
summary("max") must beLike { case SDecimal(d) =>
d.toDouble mustEqual(9999)
}
}
}
case _ => ko
}
}
}
"normalization" should {
"denormalized normalized data with two summaries" in {
val summary1 = dag.Morph1(Summary, load("/hom/numbersHet"))(line)
val summary2 = dag.Morph1(Summary, load("/hom/numbers"))(line)
val model1 = dag.Join(DerefObject, Cross(None),
summary1,
Const(CString("model1"))(line))(line)
val model2 = dag.Join(DerefObject, Cross(None),
summary2,
Const(CString("model1"))(line))(line)
val summaries = dag.IUI(true, model1, model2)(line)
def makeNorm(summary: DepGraph) = {
dag.Morph2(Normalization,
load("hom/numbers"),
summary)(line)
}
val input1 = makeNorm(model1)
val input2 = makeNorm(model2)
val expected = dag.IUI(true, input1, input2)(line)
val input = makeNorm(summaries)
val result = testEval(input)
result must haveSize(10)
val resultValue = result collect {
case (ids, value) if ids.size == 1 => value
}
val expectedValue = testEval(expected) collect {
case (ids, value) if ids.size == 1 => value
}
expectedValue mustEqual resultValue
}
}
}
object NormalizationSpecs extends NormalizationSpecs[test.YId] with test.YIdInstances
|
precog/platform
|
mimir/src/test/scala/com/precog/mimir/NormalizationSpecs.scala
|
Scala
|
agpl-3.0
| 4,265
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.orbeon.apache.xerces.xni
/**
* A structure that holds the components of an XML Namespaces qualified
* name.
*
* To be used correctly, the strings must be identical references for
* equal strings. Within the parser, these values are considered symbols
* and should always be retrieved from the `SymbolTable`.
*/
class QName extends Cloneable {
/**
* The qname prefix. For example, the prefix for the qname "a:foo"
* is "a".
*/
var prefix: String = _
/**
* The qname localpart. For example, the localpart for the qname "a:foo"
* is "foo".
*/
var localpart: String = _
/**
* The qname rawname. For example, the rawname for the qname "a:foo"
* is "a:foo".
*/
var rawname: String = _
/**
* The URI to which the qname prefix is bound. This binding must be
* performed by a XML Namespaces aware processor.
*/
var uri: String = _
clear()
/**
Constructs a QName with the specified values.
*/
def this(
prefix : String,
localpart : String,
rawname : String,
uri : String) = {
this()
setValues(prefix, localpart, rawname, uri)
}
/**
Constructs a copy of the specified QName.
*/
def this(qname: QName) = {
this()
setValues(qname)
}
/**
* Convenience method to set the values of the qname components.
*
* @param qname The qualified name to be copied.
*/
def setValues(qname: QName): Unit = {
prefix = qname.prefix
localpart = qname.localpart
rawname = qname.rawname
uri = qname.uri
}
/**
* Convenience method to set the values of the qname components.
*
* @param prefix The qname prefix. (e.g. "a")
* @param localpart The qname localpart. (e.g. "foo")
* @param rawname The qname rawname. (e.g. "a:foo")
* @param uri The URI binding. (e.g. "http://foo.com/mybinding")
*/
def setValues(
prefix : String,
localpart : String,
rawname : String,
uri : String
): Unit = {
this.prefix = prefix
this.localpart = localpart
this.rawname = rawname
this.uri = uri
}
/**
Clears the values of the qname components.
*/
def clear(): Unit = {
prefix = null
localpart = null
rawname = null
uri = null
}
/**
Returns a clone of this object.
*/
override def clone(): AnyRef = new QName(this)
/**
Returns the hashcode for this object.
*/
override def hashCode(): Int =
if (uri ne null)
uri.hashCode + (if (localpart ne null) localpart.hashCode else 0)
else
if (rawname ne null) rawname.hashCode else 0
/**
Returns true if the two objects are equal.
*/
override def equals(other: Any): Boolean =
other match {
case qname: QName if qname.uri ne null =>
uri == qname.uri && localpart == qname.localpart
case qname: QName =>
rawname == qname.rawname
case _ =>
false
}
/**
Returns a string representation of this object.
*/
override def toString: String = {
val str = new StringBuffer()
var comma = false
if (prefix ne null) {
str.append("prefix=\\"").append(prefix).append('"')
comma = true
}
if (localpart ne null) {
if (comma) {
str.append(',')
}
str.append("localpart=\\"").append(localpart).append('"')
comma = true
}
if (rawname ne null) {
if (comma) {
str.append(',')
}
str.append("rawname=\\"").append(rawname).append('"')
comma = true
}
if (uri ne null) {
if (comma) {
str.append(',')
}
str.append("uri=\\"").append(uri).append('"')
}
str.toString
}
}
|
ebruchez/darius-xml.js
|
xerces/shared/src/main/scala/org/orbeon/apache/xerces/xni/QName.scala
|
Scala
|
apache-2.0
| 4,511
|
package service
import model._
import scala.slick.driver.H2Driver.simple._
import Database.threadLocalSession
trait ActivityService {
def getActivitiesByUser(activityUserName: String, isPublic: Boolean): List[Activity] =
Activities
.innerJoin(Repositories).on((t1, t2) => t1.byRepository(t2.userName, t2.repositoryName))
.filter { case (t1, t2) =>
if(isPublic){
(t1.activityUserName is activityUserName.bind) && (t2.isPrivate is false.bind)
} else {
(t1.activityUserName is activityUserName.bind)
}
}
.sortBy { case (t1, t2) => t1.activityId desc }
.map { case (t1, t2) => t1 }
.take(30)
.list
def getRecentActivities(): List[Activity] =
Activities
.innerJoin(Repositories).on((t1, t2) => t1.byRepository(t2.userName, t2.repositoryName))
.filter { case (t1, t2) => t2.isPrivate is false.bind }
.sortBy { case (t1, t2) => t1.activityId desc }
.map { case (t1, t2) => t1 }
.take(30)
.list
def recordCreateRepositoryActivity(userName: String, repositoryName: String, activityUserName: String): Unit =
Activities.autoInc insert(userName, repositoryName, activityUserName,
"create_repository",
s"[user:${activityUserName}] created [repo:${userName}/${repositoryName}]",
None,
currentDate)
def recordCreateIssueActivity(userName: String, repositoryName: String, activityUserName: String, issueId: Int, title: String): Unit =
Activities.autoInc insert(userName, repositoryName, activityUserName,
"open_issue",
s"[user:${activityUserName}] opened issue [issue:${userName}/${repositoryName}#${issueId}]",
Some(title),
currentDate)
def recordCloseIssueActivity(userName: String, repositoryName: String, activityUserName: String, issueId: Int, title: String): Unit =
Activities.autoInc insert(userName, repositoryName, activityUserName,
"close_issue",
s"[user:${activityUserName}] closed issue [issue:${userName}/${repositoryName}#${issueId}]",
Some(title),
currentDate)
def recordClosePullRequestActivity(userName: String, repositoryName: String, activityUserName: String, issueId: Int, title: String): Unit =
Activities.autoInc insert(userName, repositoryName, activityUserName,
"close_issue",
s"[user:${activityUserName}] closed pull request [pullreq:${userName}/${repositoryName}#${issueId}]",
Some(title),
currentDate)
def recordReopenIssueActivity(userName: String, repositoryName: String, activityUserName: String, issueId: Int, title: String): Unit =
Activities.autoInc insert(userName, repositoryName, activityUserName,
"reopen_issue",
s"[user:${activityUserName}] reopened issue [issue:${userName}/${repositoryName}#${issueId}]",
Some(title),
currentDate)
def recordCommentIssueActivity(userName: String, repositoryName: String, activityUserName: String, issueId: Int, comment: String): Unit =
Activities.autoInc insert(userName, repositoryName, activityUserName,
"comment_issue",
s"[user:${activityUserName}] commented on issue [issue:${userName}/${repositoryName}#${issueId}]",
Some(cut(comment, 200)),
currentDate)
def recordCommentPullRequestActivity(userName: String, repositoryName: String, activityUserName: String, issueId: Int, comment: String): Unit =
Activities.autoInc insert(userName, repositoryName, activityUserName,
"comment_issue",
s"[user:${activityUserName}] commented on pull request [pullreq:${userName}/${repositoryName}#${issueId}]",
Some(cut(comment, 200)),
currentDate)
def recordCreateWikiPageActivity(userName: String, repositoryName: String, activityUserName: String, pageName: String) =
Activities.autoInc insert(userName, repositoryName, activityUserName,
"create_wiki",
s"[user:${activityUserName}] created the [repo:${userName}/${repositoryName}] wiki",
Some(pageName),
currentDate)
def recordEditWikiPageActivity(userName: String, repositoryName: String, activityUserName: String, pageName: String, commitId: String) =
Activities.autoInc insert(userName, repositoryName, activityUserName,
"edit_wiki",
s"[user:${activityUserName}] edited the [repo:${userName}/${repositoryName}] wiki",
Some(pageName + ":" + commitId),
currentDate)
def recordPushActivity(userName: String, repositoryName: String, activityUserName: String,
branchName: String, commits: List[util.JGitUtil.CommitInfo]) =
Activities.autoInc insert(userName, repositoryName, activityUserName,
"push",
s"[user:${activityUserName}] pushed to [branch:${userName}/${repositoryName}#${branchName}] at [repo:${userName}/${repositoryName}]",
Some(commits.map { commit => commit.id + ":" + commit.shortMessage }.mkString("\n")),
currentDate)
def recordCreateTagActivity(userName: String, repositoryName: String, activityUserName: String,
tagName: String, commits: List[util.JGitUtil.CommitInfo]) =
Activities.autoInc insert(userName, repositoryName, activityUserName,
"create_tag",
s"[user:${activityUserName}] created tag [tag:${userName}/${repositoryName}#${tagName}] at [repo:${userName}/${repositoryName}]",
None,
currentDate)
def recordDeleteTagActivity(userName: String, repositoryName: String, activityUserName: String,
tagName: String, commits: List[util.JGitUtil.CommitInfo]) =
Activities.autoInc insert(userName, repositoryName, activityUserName,
"delete_tag",
s"[user:${activityUserName}] deleted tag ${tagName} at [repo:${userName}/${repositoryName}]",
None,
currentDate)
def recordCreateBranchActivity(userName: String, repositoryName: String, activityUserName: String, branchName: String) =
Activities.autoInc insert(userName, repositoryName, activityUserName,
"create_branch",
s"[user:${activityUserName}] created branch [branch:${userName}/${repositoryName}#${branchName}] at [repo:${userName}/${repositoryName}]",
None,
currentDate)
def recordDeleteBranchActivity(userName: String, repositoryName: String, activityUserName: String, branchName: String) =
Activities.autoInc insert(userName, repositoryName, activityUserName,
"delete_branch",
s"[user:${activityUserName}] deleted branch ${branchName} at [repo:${userName}/${repositoryName}]",
None,
currentDate)
def recordForkActivity(userName: String, repositoryName: String, activityUserName: String) =
Activities.autoInc insert(userName, repositoryName, activityUserName,
"fork",
s"[user:${activityUserName}] forked [repo:${userName}/${repositoryName}] to [repo:${activityUserName}/${repositoryName}]",
None,
currentDate)
def recordPullRequestActivity(userName: String, repositoryName: String, activityUserName: String, issueId: Int, title: String): Unit =
Activities.autoInc insert(userName, repositoryName, activityUserName,
"open_pullreq",
s"[user:${activityUserName}] opened pull request [pullreq:${userName}/${repositoryName}#${issueId}]",
Some(title),
currentDate)
def recordMergeActivity(userName: String, repositoryName: String, activityUserName: String, issueId: Int, message: String): Unit =
Activities.autoInc insert(userName, repositoryName, activityUserName,
"merge_pullreq",
s"[user:${activityUserName}] merged pull request [pullreq:${userName}/${repositoryName}#${issueId}]",
Some(message),
currentDate)
private def cut(value: String, length: Int): String =
if(value.length > length) value.substring(0, length) + "..." else value
}
|
ihad28/gitbucket
|
src/main/scala/service/ActivityService.scala
|
Scala
|
apache-2.0
| 7,734
|
/*
* Copyright (C) 2013-2015 by Michael Hombre Brinkmann
*/
package net.twibs.util
object SortOrder extends Enumeration {
type SortOrder = Value
val NotSortable = Value
val Unsorted = Value
val Ascending = Value
val Descending = Value
}
|
hombre/twibs
|
twibs-util/src/main/scala/net/twibs/util/SortOrder.scala
|
Scala
|
apache-2.0
| 251
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.coordinator.group
import java.util.UUID
import java.util.concurrent.locks.ReentrantLock
import kafka.common.OffsetAndMetadata
import kafka.utils.{CoreUtils, Logging, nonthreadsafe}
import org.apache.kafka.common.TopicPartition
import scala.collection.{Seq, immutable, mutable}
private[group] sealed trait GroupState
/**
* Group is preparing to rebalance
*
* action: respond to heartbeats with REBALANCE_IN_PROGRESS
* respond to sync group with REBALANCE_IN_PROGRESS
* remove member on leave group request
* park join group requests from new or existing members until all expected members have joined
* allow offset commits from previous generation
* allow offset fetch requests
* transition: some members have joined by the timeout => CompletingRebalance
* all members have left the group => Empty
* group is removed by partition emigration => Dead
*/
private[group] case object PreparingRebalance extends GroupState
/**
* Group is awaiting state assignment from the leader
*
* action: respond to heartbeats with REBALANCE_IN_PROGRESS
* respond to offset commits with REBALANCE_IN_PROGRESS
* park sync group requests from followers until transition to Stable
* allow offset fetch requests
* transition: sync group with state assignment received from leader => Stable
* join group from new member or existing member with updated metadata => PreparingRebalance
* leave group from existing member => PreparingRebalance
* member failure detected => PreparingRebalance
* group is removed by partition emigration => Dead
*/
private[group] case object CompletingRebalance extends GroupState
/**
* Group is stable
*
* action: respond to member heartbeats normally
* respond to sync group from any member with current assignment
* respond to join group from followers with matching metadata with current group metadata
* allow offset commits from member of current generation
* allow offset fetch requests
* transition: member failure detected via heartbeat => PreparingRebalance
* leave group from existing member => PreparingRebalance
* leader join-group received => PreparingRebalance
* follower join-group with new metadata => PreparingRebalance
* group is removed by partition emigration => Dead
*/
private[group] case object Stable extends GroupState
/**
* Group has no more members and its metadata is being removed
*
* action: respond to join group with UNKNOWN_MEMBER_ID
* respond to sync group with UNKNOWN_MEMBER_ID
* respond to heartbeat with UNKNOWN_MEMBER_ID
* respond to leave group with UNKNOWN_MEMBER_ID
* respond to offset commit with UNKNOWN_MEMBER_ID
* allow offset fetch requests
* transition: Dead is a final state before group metadata is cleaned up, so there are no transitions
*/
private[group] case object Dead extends GroupState
/**
* Group has no more members, but lingers until all offsets have expired. This state
* also represents groups which use Kafka only for offset commits and have no members.
*
* action: respond normally to join group from new members
* respond to sync group with UNKNOWN_MEMBER_ID
* respond to heartbeat with UNKNOWN_MEMBER_ID
* respond to leave group with UNKNOWN_MEMBER_ID
* respond to offset commit with UNKNOWN_MEMBER_ID
* allow offset fetch requests
* transition: last offsets removed in periodic expiration task => Dead
* join group from a new member => PreparingRebalance
* group is removed by partition emigration => Dead
* group is removed by expiration => Dead
*/
private[group] case object Empty extends GroupState
private object GroupMetadata {
private val validPreviousStates: Map[GroupState, Set[GroupState]] =
Map(Dead -> Set(Stable, PreparingRebalance, CompletingRebalance, Empty, Dead),
CompletingRebalance -> Set(PreparingRebalance),
Stable -> Set(CompletingRebalance),
PreparingRebalance -> Set(Stable, CompletingRebalance, Empty),
Empty -> Set(PreparingRebalance))
}
/**
* Case class used to represent group metadata for the ListGroups API
*/
case class GroupOverview(groupId: String,
protocolType: String)
/**
* Case class used to represent group metadata for the DescribeGroup API
*/
case class GroupSummary(state: String,
protocolType: String,
protocol: String,
members: List[MemberSummary])
/**
* We cache offset commits along with their commit record offset. This enables us to ensure that the latest offset
* commit is always materialized when we have a mix of transactional and regular offset commits. Without preserving
* information of the commit record offset, compaction of the offsets topic it self may result in the wrong offset commit
* being materialized.
*/
case class CommitRecordMetadataAndOffset(appendedBatchOffset: Option[Long], offsetAndMetadata: OffsetAndMetadata) {
def olderThan(that: CommitRecordMetadataAndOffset) : Boolean = appendedBatchOffset.get < that.appendedBatchOffset.get
}
/**
* Group contains the following metadata:
*
* Membership metadata:
* 1. Members registered in this group
* 2. Current protocol assigned to the group (e.g. partition assignment strategy for consumers)
* 3. Protocol metadata associated with group members
*
* State metadata:
* 1. group state
* 2. generation id
* 3. leader id
*/
@nonthreadsafe
private[group] class GroupMetadata(val groupId: String, initialState: GroupState = Empty) extends Logging {
private var state: GroupState = initialState
private[group] val lock = new ReentrantLock
private val members = new mutable.HashMap[String, MemberMetadata]
private val offsets = new mutable.HashMap[TopicPartition, CommitRecordMetadataAndOffset]
private val pendingOffsetCommits = new mutable.HashMap[TopicPartition, OffsetAndMetadata]
private val pendingTransactionalOffsetCommits = new mutable.HashMap[Long, mutable.Map[TopicPartition, CommitRecordMetadataAndOffset]]()
private var receivedTransactionalOffsetCommits = false
private var receivedConsumerOffsetCommits = false
var protocolType: Option[String] = None
var generationId = 0
var leaderId: String = null
var protocol: String = null
var newMemberAdded: Boolean = false
def inLock[T](fun: => T): T = CoreUtils.inLock(lock)(fun)
def is(groupState: GroupState) = state == groupState
def not(groupState: GroupState) = state != groupState
def has(memberId: String) = members.contains(memberId)
def get(memberId: String) = members(memberId)
def add(member: MemberMetadata) {
if (members.isEmpty)
this.protocolType = Some(member.protocolType)
assert(groupId == member.groupId)
assert(this.protocolType.orNull == member.protocolType)
assert(supportsProtocols(member.protocols))
if (leaderId == null)
leaderId = member.memberId
members.put(member.memberId, member)
}
def remove(memberId: String) {
members.remove(memberId)
if (memberId == leaderId) {
leaderId = if (members.isEmpty) {
null
} else {
members.keys.head
}
}
}
def currentState = state
def notYetRejoinedMembers = members.values.filter(_.awaitingJoinCallback == null).toList
def allMembers = members.keySet
def allMemberMetadata = members.values.toList
def rebalanceTimeoutMs = members.values.foldLeft(0) { (timeout, member) =>
timeout.max(member.rebalanceTimeoutMs)
}
// TODO: decide if ids should be predictable or random
def generateMemberIdSuffix = UUID.randomUUID().toString
def canRebalance = GroupMetadata.validPreviousStates(PreparingRebalance).contains(state)
def transitionTo(groupState: GroupState) {
assertValidTransition(groupState)
state = groupState
}
def selectProtocol: String = {
if (members.isEmpty)
throw new IllegalStateException("Cannot select protocol for empty group")
// select the protocol for this group which is supported by all members
val candidates = candidateProtocols
// let each member vote for one of the protocols and choose the one with the most votes
val votes: List[(String, Int)] = allMemberMetadata
.map(_.vote(candidates))
.groupBy(identity)
.mapValues(_.size)
.toList
votes.maxBy(_._2)._1
}
private def candidateProtocols = {
// get the set of protocols that are commonly supported by all members
allMemberMetadata
.map(_.protocols)
.reduceLeft((commonProtocols, protocols) => commonProtocols & protocols)
}
def supportsProtocols(memberProtocols: Set[String]) = {
members.isEmpty || (memberProtocols & candidateProtocols).nonEmpty
}
def initNextGeneration() = {
assert(notYetRejoinedMembers == List.empty[MemberMetadata])
if (members.nonEmpty) {
generationId += 1
protocol = selectProtocol
transitionTo(CompletingRebalance)
} else {
generationId += 1
protocol = null
transitionTo(Empty)
}
receivedConsumerOffsetCommits = false
receivedTransactionalOffsetCommits = false
}
def currentMemberMetadata: Map[String, Array[Byte]] = {
if (is(Dead) || is(PreparingRebalance))
throw new IllegalStateException("Cannot obtain member metadata for group in state %s".format(state))
members.map{ case (memberId, memberMetadata) => (memberId, memberMetadata.metadata(protocol))}.toMap
}
def summary: GroupSummary = {
if (is(Stable)) {
val members = this.members.values.map { member => member.summary(protocol) }.toList
GroupSummary(state.toString, protocolType.getOrElse(""), protocol, members)
} else {
val members = this.members.values.map{ member => member.summaryNoMetadata() }.toList
GroupSummary(state.toString, protocolType.getOrElse(""), GroupCoordinator.NoProtocol, members)
}
}
def overview: GroupOverview = {
GroupOverview(groupId, protocolType.getOrElse(""))
}
def initializeOffsets(offsets: collection.Map[TopicPartition, CommitRecordMetadataAndOffset],
pendingTxnOffsets: Map[Long, mutable.Map[TopicPartition, CommitRecordMetadataAndOffset]]) {
this.offsets ++= offsets
this.pendingTransactionalOffsetCommits ++= pendingTxnOffsets
}
def onOffsetCommitAppend(topicPartition: TopicPartition, offsetWithCommitRecordMetadata: CommitRecordMetadataAndOffset) {
if (pendingOffsetCommits.contains(topicPartition)) {
if (offsetWithCommitRecordMetadata.appendedBatchOffset.isEmpty)
throw new IllegalStateException("Cannot complete offset commit write without providing the metadata of the record " +
"in the log.")
if (!offsets.contains(topicPartition) || offsets(topicPartition).olderThan(offsetWithCommitRecordMetadata))
offsets.put(topicPartition, offsetWithCommitRecordMetadata)
}
pendingOffsetCommits.get(topicPartition) match {
case Some(stagedOffset) if offsetWithCommitRecordMetadata.offsetAndMetadata == stagedOffset =>
pendingOffsetCommits.remove(topicPartition)
case _ =>
// The pendingOffsetCommits for this partition could be empty if the topic was deleted, in which case
// its entries would be removed from the cache by the `removeOffsets` method.
}
}
def failPendingOffsetWrite(topicPartition: TopicPartition, offset: OffsetAndMetadata): Unit = {
pendingOffsetCommits.get(topicPartition) match {
case Some(pendingOffset) if offset == pendingOffset => pendingOffsetCommits.remove(topicPartition)
case _ =>
}
}
def prepareOffsetCommit(offsets: Map[TopicPartition, OffsetAndMetadata]) {
receivedConsumerOffsetCommits = true
pendingOffsetCommits ++= offsets
}
def prepareTxnOffsetCommit(producerId: Long, offsets: Map[TopicPartition, OffsetAndMetadata]) {
trace(s"TxnOffsetCommit for producer $producerId and group $groupId with offsets $offsets is pending")
receivedTransactionalOffsetCommits = true
val producerOffsets = pendingTransactionalOffsetCommits.getOrElseUpdate(producerId,
mutable.Map.empty[TopicPartition, CommitRecordMetadataAndOffset])
offsets.foreach { case (topicPartition, offsetAndMetadata) =>
producerOffsets.put(topicPartition, CommitRecordMetadataAndOffset(None, offsetAndMetadata))
}
}
def hasReceivedConsistentOffsetCommits : Boolean = {
!receivedConsumerOffsetCommits || !receivedTransactionalOffsetCommits
}
/* Remove a pending transactional offset commit if the actual offset commit record was not written to the log.
* We will return an error and the client will retry the request, potentially to a different coordinator.
*/
def failPendingTxnOffsetCommit(producerId: Long, topicPartition: TopicPartition): Unit = {
pendingTransactionalOffsetCommits.get(producerId) match {
case Some(pendingOffsets) =>
val pendingOffsetCommit = pendingOffsets.remove(topicPartition)
trace(s"TxnOffsetCommit for producer $producerId and group $groupId with offsets $pendingOffsetCommit failed " +
s"to be appended to the log")
if (pendingOffsets.isEmpty)
pendingTransactionalOffsetCommits.remove(producerId)
case _ =>
// We may hit this case if the partition in question has emigrated already.
}
}
def onTxnOffsetCommitAppend(producerId: Long, topicPartition: TopicPartition,
commitRecordMetadataAndOffset: CommitRecordMetadataAndOffset) {
pendingTransactionalOffsetCommits.get(producerId) match {
case Some(pendingOffset) =>
if (pendingOffset.contains(topicPartition)
&& pendingOffset(topicPartition).offsetAndMetadata == commitRecordMetadataAndOffset.offsetAndMetadata)
pendingOffset.update(topicPartition, commitRecordMetadataAndOffset)
case _ =>
// We may hit this case if the partition in question has emigrated.
}
}
/* Complete a pending transactional offset commit. This is called after a commit or abort marker is fully written
* to the log.
*/
def completePendingTxnOffsetCommit(producerId: Long, isCommit: Boolean): Unit = {
val pendingOffsetsOpt = pendingTransactionalOffsetCommits.remove(producerId)
if (isCommit) {
pendingOffsetsOpt.foreach { pendingOffsets =>
pendingOffsets.foreach { case (topicPartition, commitRecordMetadataAndOffset) =>
if (commitRecordMetadataAndOffset.appendedBatchOffset.isEmpty)
throw new IllegalStateException(s"Trying to complete a transactional offset commit for producerId $producerId " +
s"and groupId $groupId even though the offset commit record itself hasn't been appended to the log.")
val currentOffsetOpt = offsets.get(topicPartition)
if (currentOffsetOpt.forall(_.olderThan(commitRecordMetadataAndOffset))) {
trace(s"TxnOffsetCommit for producer $producerId and group $groupId with offset $commitRecordMetadataAndOffset " +
"committed and loaded into the cache.")
offsets.put(topicPartition, commitRecordMetadataAndOffset)
} else {
trace(s"TxnOffsetCommit for producer $producerId and group $groupId with offset $commitRecordMetadataAndOffset " +
s"committed, but not loaded since its offset is older than current offset $currentOffsetOpt.")
}
}
}
} else {
trace(s"TxnOffsetCommit for producer $producerId and group $groupId with offsets $pendingOffsetsOpt aborted")
}
}
def activeProducers = pendingTransactionalOffsetCommits.keySet
def hasPendingOffsetCommitsFromProducer(producerId: Long) =
pendingTransactionalOffsetCommits.contains(producerId)
def removeOffsets(topicPartitions: Seq[TopicPartition]): immutable.Map[TopicPartition, OffsetAndMetadata] = {
topicPartitions.flatMap { topicPartition =>
pendingOffsetCommits.remove(topicPartition)
pendingTransactionalOffsetCommits.foreach { case (_, pendingOffsets) =>
pendingOffsets.remove(topicPartition)
}
val removedOffset = offsets.remove(topicPartition)
removedOffset.map(topicPartition -> _.offsetAndMetadata)
}.toMap
}
def removeExpiredOffsets(startMs: Long) : Map[TopicPartition, OffsetAndMetadata] = {
val expiredOffsets = offsets
.filter {
case (topicPartition, commitRecordMetadataAndOffset) =>
commitRecordMetadataAndOffset.offsetAndMetadata.expireTimestamp < startMs && !pendingOffsetCommits.contains(topicPartition)
}
.map {
case (topicPartition, commitRecordOffsetAndMetadata) =>
(topicPartition, commitRecordOffsetAndMetadata.offsetAndMetadata)
}
offsets --= expiredOffsets.keySet
expiredOffsets.toMap
}
def allOffsets = offsets.map { case (topicPartition, commitRecordMetadataAndOffset) =>
(topicPartition, commitRecordMetadataAndOffset.offsetAndMetadata)
}.toMap
def offset(topicPartition: TopicPartition): Option[OffsetAndMetadata] = offsets.get(topicPartition).map(_.offsetAndMetadata)
// visible for testing
private[group] def offsetWithRecordMetadata(topicPartition: TopicPartition): Option[CommitRecordMetadataAndOffset] = offsets.get(topicPartition)
def numOffsets = offsets.size
def hasOffsets = offsets.nonEmpty || pendingOffsetCommits.nonEmpty || pendingTransactionalOffsetCommits.nonEmpty
private def assertValidTransition(targetState: GroupState) {
if (!GroupMetadata.validPreviousStates(targetState).contains(state))
throw new IllegalStateException("Group %s should be in the %s states before moving to %s state. Instead it is in %s state"
.format(groupId, GroupMetadata.validPreviousStates(targetState).mkString(","), targetState, state))
}
override def toString: String = {
"GroupMetadata(" +
s"groupId=$groupId, " +
s"generation=$generationId, " +
s"protocolType=$protocolType, " +
s"currentState=$currentState, " +
s"members=$members)"
}
}
|
themarkypantz/kafka
|
core/src/main/scala/kafka/coordinator/group/GroupMetadata.scala
|
Scala
|
apache-2.0
| 19,108
|
/* Title: Pure/term_xml.scala
Author: Makarius
XML data representation of lambda terms.
*/
package isabelle
object Term_XML
{
import Term._
object Encode
{
import XML.Encode._
val sort: T[Sort] = list(string)
def typ: T[Typ] =
variant[Typ](List(
{ case Type(a, b) => (List(a), list(typ)(b)) },
{ case TFree(a, b) => (List(a), sort(b)) },
{ case TVar((a, b), c) => (List(a, int_atom(b)), sort(c)) }))
def term: T[Term] =
variant[Term](List(
{ case Const(a, b) => (List(a), typ(b)) },
{ case Free(a, b) => (List(a), typ(b)) },
{ case Var((a, b), c) => (List(a, int_atom(b)), typ(c)) },
{ case Bound(a) => (List(int_atom(a)), Nil) },
{ case Abs(a, b, c) => (List(a), pair(typ, term)(b, c)) },
{ case App(a, b) => (Nil, pair(term, term)(a, b)) }))
}
object Decode
{
import XML.Decode._
val sort: T[Sort] = list(string)
def typ: T[Typ] =
variant[Typ](List(
{ case (List(a), b) => Type(a, list(typ)(b)) },
{ case (List(a), b) => TFree(a, sort(b)) },
{ case (List(a, b), c) => TVar((a, int_atom(b)), sort(c)) }))
def term: T[Term] =
variant[Term](List(
{ case (List(a), b) => Const(a, typ(b)) },
{ case (List(a), b) => Free(a, typ(b)) },
{ case (List(a, b), c) => Var((a, int_atom(b)), typ(c)) },
{ case (List(a), Nil) => Bound(int_atom(a)) },
{ case (List(a), b) => val (c, d) = pair(typ, term)(b); Abs(a, c, d) },
{ case (Nil, a) => val (b, c) = pair(term, term)(a); App(b, c) }))
}
}
|
MerelyAPseudonym/isabelle
|
src/Pure/term_xml.scala
|
Scala
|
bsd-3-clause
| 1,620
|
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers
import play.api.mvc.{Action, Controller, InjectedController}
import javax.inject._
/**
* Created with IntelliJ IDEA.
* User: israel
* Date: 10/23/13
* Time: 11:05 AM
* To change this template use File | Settings | File Templates.
*/
@Singleton
class Help @Inject() extends InjectedController {
def handleHelp(page: String) = Action { request =>
page match {
case "in" => Ok(views.txt._in(request))
case "out" => Ok(views.txt._out(request))
case "cmd" => Ok(views.txt._cmd(request))
case "sp" => Ok(views.txt._sp(request))
case "sparql" => Ok(views.html._sparql(request))
case "zz" => Ok(views.txt.zz(request))
case _ => NotFound
}
}
def iiBlockedRequests = Action {
//TODO: this is not enough since one can submit a bulk of infotons, or RDF document which contains an infoton named "ii" or "ii/*"
req =>
BadRequest("\\"ii\\" is reserved for infoton id's retrieval.")
}
}
|
hochgi/CM-Well
|
server/cmwell-ws/app/controllers/Help.scala
|
Scala
|
apache-2.0
| 1,618
|
package io.github.datamoth.dm.imp
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File
import java.io.FileWriter
import java.io.FileReader
import java.nio.file.Files
import java.nio.file.Paths
import java.nio.charset.StandardCharsets
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import com.github.mustachejava.DefaultMustacheFactory
import com.github.mustachejava.DefaultMustacheVisitor
import com.github.mustachejava.Mustache
import com.github.mustachejava.MustacheException
import com.github.mustachejava.MustacheVisitor
import com.github.mustachejava.TemplateContext
import com.github.mustachejava.codes.ExtendNameCode
import com.github.mustachejava.codes.ValueCode
import com.github.mustachejava.reflect.MissingWrapper
import com.github.mustachejava.reflect.ReflectionObjectHandler
import com.github.mustachejava.util.Wrapper
import scala.collection.JavaConverters._
import io.github.datamoth.dm.api
object Renderer {
case class Result(errors: List[api.Error], plugins: List[api.Plugin], files: List[api.Location])
case class Config(sysvars: com.typesafe.config.Config)
def create(sysvars: com.typesafe.config.Config): Renderer = {
new Renderer(Config(sysvars))
}
}
class Renderer(cfg: Renderer.Config) {
import scala.collection.mutable.HashMap
import scala.collection.mutable.ArrayBuffer
private val L = LoggerFactory.getLogger(classOf[Renderer])
private type Scope = java.util.Map[String, Object]
def render(srcDir: File, dstDir: File, profile: String): Renderer.Result = {
L.debug("Render: profile {}, from {} into {}", profile, srcDir, dstDir)
errors.clear()
renderTree(profile, srcDir, dstDir)
return Renderer.Result(errors = errors.toList, plugins = plugins.toList, files = files.toList)
}
private def renderTree(profile: String, srcDir: File, dstDir: File): Unit = {
val srcRoot = srcDir
val dstRoot = dstDir
def walk(srcDir: File, dstDir: File, parentCfg: Config, prof: String, tr: (File, File, Scope) => Unit): Unit = {
def transform = (f: File, scope: Scope) => {
val relPath = srcDir.toURI.relativize(f.toURI).getPath
val srcFile = srcDir.toPath.resolve(relPath).toFile
val dstFile = dstDir.toPath.resolve(relPath).toFile
val curPath = new File(dstRoot.toURI.relativize(dstFile.toURI).getPath).getParentFile
scope.put("CURRENT_DIR", if (curPath == null) "" else curPath.getName)
scope.put("CURRENT_PATH", new File(scope.get("PROJECT_DIR").toString, if (curPath == null) "" else curPath.toString).toString)
tr(srcFile, dstFile, scope)
}
def walk(dir: File, parentCfg: Config): Unit = {
if (dir.getName.startsWith(".")) return
val cfgPath = new File(dir, ".conf")
var cnf = parentCfg
if (cfgPath.exists) {
L.debug("Try to load config: {}", cfgPath)
cnf = ConfigFactory.parseFile(cfgPath).withFallback(parentCfg).resolve
if (cnf.hasPath(prof)) {
cnf = cnf.getConfig(prof).withFallback(parentCfg)
}
cnf = cnf.resolve
}
dir.listFiles.filter(_.isDirectory).map(walk(_, cnf))
dir.listFiles.filter(_.isFile).map(transform(_, cnf.root().unwrapped))
}
walk(srcDir, parentCfg)
}
walk(srcDir, dstDir, cfg.sysvars, profile, (srcFile: File, dstFile: File, scope: Scope) => {
val name = srcDir.toURI.relativize(srcFile.toURI).getPath
if (name.endsWith(".plug") || name.endsWith(".plug")) {
val plugName = srcFile.getName.split("\\\\.(?=[^\\\\.]+$)")(0)
L.debug("Render plugin: {}", srcFile)
val content = new String(Files.readAllBytes(srcFile.toPath), StandardCharsets.UTF_8)
val conf = ConfigFactory.parseFile(srcFile).withFallback(ConfigFactory.parseMap(scope)).resolve
val pdir = new File(srcDir, conf.getString("run"))
val plugConf = conf.getConfig(s"with.${profile}").root.unwrapped
plugins += api.Plugin(location = api.Location(name), run = api.Location(conf.getString("run")), conf = Z.render(conf))
walk(pdir, dstFile.getParentFile, cfg.sysvars, profile, (srcFile: File, dstFile: File, sc: Scope) => {
scope.asScala.keys.foreach{ k => if (!sc.containsKey(k)) { sc.put(k, scope.get(k)) } }
plugConf.asScala.keys.foreach{ k => sc.put(k, plugConf.get(k)) }
sc.put("PLUGIN_INSTANCE_PATH", name.split("\\\\.(?=[^\\\\.]+$)")(0))
sc.put("PLUGIN_INSTANCE_NAME", plugName)
renderFile(srcFile, dstFile, name, sc)
})
} else {
renderFile(srcFile, dstFile, name, scope)
}
})
}
private def rndstr(len: Int): String = scala.util.Random.alphanumeric.take(len).mkString
private def renderFile(srcFile: File, dstFile: File, name: String, scope: Scope): Unit = {
dstFile.getParentFile.mkdirs()
for (
out <- AsResource(new FileWriter(dstFile));
tml <- AsResource(new FileReader(srcFile))
) {
val muf = new DefaultMustacheFactory {
override def encode(value: String, writer: java.io.Writer): Unit = {
value match {
case "_rnd_alnum_5_" => writer.write(rndstr(5))
case "_rnd_alnum_10_" => writer.write(rndstr(10))
case "_rnd_alnum_15_" => writer.write(rndstr(15))
case "_rnd_alnum_20_" => writer.write(rndstr(20))
case _ => writer.write(value)
}
}
override def createMustacheVisitor(): MustacheVisitor = new DefaultMustacheVisitor(this) {
override def value(tc: TemplateContext, path: String, encoded: Boolean): Unit = {
if (!contains(path, scope)) {
// TODO: Systematize error codes and kinds
errors += api.VarNotFoundError(
location = Some(api.Location(tc.file, Some(tc.line), None))
, code = -1
, kind = "render error"
, message = s"Variable not found: ${path}"
)
}
list.add(new ValueCode(tc, df, path, encoded))
}
override def name(tc: TemplateContext, path: String, mustache: Mustache): Unit = {
list.add(new ExtendNameCode(tc, df, mustache, path));
}
}
}
// muf.setObjectHandler(new ReflectionObjectHandler{})
val mus = muf.compile(tml, name)
try {
mus.execute(out, scope)
files += api.Location(file = name)
} catch {
case e: MustacheException =>
errors += api.RenderError(
location = Some(api.Location(name, None, None))
, code = -1
, kind = "render error"
, message = e.getMessage
)
}
out.flush
}
}
private val errors = ArrayBuffer[api.Error]()
private val plugins = ArrayBuffer[api.Plugin]()
private val files = ArrayBuffer[api.Location]()
private def contains(path: String, map: java.util.Map[String, Object]): Boolean = {
val items = path.split(".")
var next = map
for (item <- items) {
next = next.get(item).asInstanceOf[java.util.HashMap[String, Object]]
if (next == null) {
return false
}
}
return true
}
}
|
datamoth/datamoth
|
datamot/src/main/scala/io/github/datamoth/dm/imp/Renderer.scala
|
Scala
|
apache-2.0
| 6,763
|
package de.m7w3.signal
import javafx.stage.Stage
import de.m7w3.signal.account.AccountHelper
import de.m7w3.signal.events.GroupsSyncedEvent
import de.m7w3.signal.store.model.{Group, GroupMember, GroupWithMembers}
import de.m7w3.signal.store.{DBActionRunner, SignalDesktopApplicationStore, SignalDesktopProtocolStore}
import org.junit.Test
import org.mockito.Mockito
import org.scalatest.concurrent.Eventually
import org.scalatest.junit.{AssertionsForJUnit, JUnitSuiteLike}
import org.scalatest.mockito.MockitoSugar
import org.testfx.api.FxAssert._
import org.testfx.framework.junit.ApplicationTest
import org.testfx.matcher.base.NodeMatchers._
import org.testfx.util.WaitForAsyncUtils
import scala.reflect.runtime.universe._
import scalafx.Includes._
import scalafx.scene.Scene
import scalafx.stage.{Stage => SStage}
import scalafxml.core.{DependenciesByType, FXMLView}
class ChatsListTest extends ApplicationTest
with JUnitSuiteLike
with AssertionsForJUnit
with MockitoSugar
with Eventually {
val groups: Seq[GroupWithMembers] = Seq(
GroupWithMembers(
Group(1, Array[Byte](1, 2, 3), Some("group1"), None, true),
Seq(
GroupMember("member1", 1),
GroupMember("member2", 2)
)
),
GroupWithMembers(
Group(2, Array[Byte](1, 2, 3, 4), Some("group2"), None, true),
Seq(
GroupMember("member2", 1),
GroupMember("member3", 2)
)
),
GroupWithMembers(
Group(3, Array[Byte](1, 2, 3, 4, 5), Some("group3"), None, false),
Seq(
GroupMember("member2", 1),
GroupMember("member3", 2),
GroupMember("member4", 3)
)
)
)
val appStore: SignalDesktopApplicationStore = mock[SignalDesktopApplicationStore]
val appContext: ApplicationContext = new ApplicationContext(
TestMessageSender,
mock[AccountHelper],
mock[DBActionRunner],
mock[SignalDesktopProtocolStore],
appStore
)
override def start(stage: Stage): Unit = {
Mockito.when(appStore.getGroups).thenReturn(groups.slice(0,2))
Mockito.when(appStore.getContacts).thenReturn(Seq.empty)
val lxmlUri = getClass.getResource("/de/m7w3/signal/recent_chats_list.fxml")
require(lxmlUri != null, "lxmlUri not found")
val dependencies = Map[Type, Any](
typeOf[ApplicationContext] -> appContext
)
val root = FXMLView(lxmlUri, new DependenciesByType(dependencies))
val scene = new Scene(root)
val sStage = new SStage(stage)
sStage.setScene(scene)
sStage.show()
}
@Test
def showUpdateGroupsOnGroupSync(): Unit = {
// just assert that stuff is there
verifyThat("#chatsListView", isVisible)
verifyThat("#newChatBtn", isVisible)
clickOn("#newChatBtn")
WaitForAsyncUtils.waitForFxEvents()
verifyThat("group1", isVisible)
verifyThat("group2", isVisible)
Mockito.when(appStore.getGroups).thenReturn(groups)
appContext.publishEvent(GroupsSyncedEvent)
WaitForAsyncUtils.waitForFxEvents()
verifyThat("group1", isVisible)
verifyThat("group2", isVisible)
verifyThat("group3", isVisible)
}
}
|
ayoub-benali/signal-desktop-client
|
src/test/scala/de/m7w3/signal/ChatsListTest.scala
|
Scala
|
apache-2.0
| 3,089
|
package aecor.example.account
import aecor.MonadActionReject
import aecor.data.Folded.syntax._
import aecor.data._
import aecor.example.account.AccountEvent._
import aecor.example.account.EventsourcedAlgebra.AccountState
import aecor.example.account.Rejection._
import aecor.example.common.Amount
import cats.Monad
import cats.implicits._
final class EventsourcedAlgebra[F[_]](
implicit F: MonadActionReject[F, Option[AccountState], AccountEvent, Rejection]
) extends Algebra[F] {
import F._
override def open(checkBalance: Boolean): F[Unit] =
read.flatMap {
case None =>
append(AccountOpened(checkBalance))
case Some(_) =>
ignore
}
override def credit(transactionId: AccountTransactionId, amount: Amount): F[Unit] =
read.flatMap {
case Some(account) =>
if (account.hasProcessedTransaction(transactionId)) {
ignore
} else {
append(AccountCredited(transactionId, amount))
}
case None =>
reject(AccountDoesNotExist)
}
override def debit(transactionId: AccountTransactionId, amount: Amount): F[Unit] =
read.flatMap {
case Some(account) =>
if (account.hasProcessedTransaction(transactionId)) {
ignore
} else {
if (account.hasFunds(amount)) {
append(AccountDebited(transactionId, amount))
} else {
reject(InsufficientFunds)
}
}
case None =>
reject(AccountDoesNotExist)
}
}
object EventsourcedAlgebra {
def behavior[F[_]: Monad]: EventsourcedBehavior[EitherK[Algebra, Rejection, *[_]], F, Option[
AccountState
], AccountEvent] =
EventsourcedBehavior
.rejectable(new EventsourcedAlgebra, AccountState.fold)
val tagging: Tagging[AccountId] = Tagging.const[AccountId](EventTag("Account"))
final val rootAccountId: AccountId = AccountId("ROOT")
final case class AccountState(balance: Amount,
processedTransactions: Set[AccountTransactionId],
checkBalance: Boolean) {
def hasProcessedTransaction(transactionId: AccountTransactionId): Boolean =
processedTransactions.contains(transactionId)
def hasFunds(amount: Amount): Boolean =
!checkBalance || balance >= amount
def applyEvent(event: AccountEvent): Folded[AccountState] = event match {
case AccountOpened(_) => impossible
case AccountDebited(transactionId, amount) =>
copy(
balance = balance - amount,
processedTransactions = processedTransactions + transactionId
).next
case AccountCredited(transactionId, amount) =>
copy(
balance = balance + amount,
processedTransactions = processedTransactions + transactionId
).next
}
}
object AccountState {
def fromEvent(event: AccountEvent): Folded[AccountState] = event match {
case AccountOpened(checkBalance) => AccountState(Amount.zero, Set.empty, checkBalance).next
case _ => impossible
}
val fold: Fold[Folded, Option[AccountState], AccountEvent] =
Fold.optional(fromEvent)(_.applyEvent(_))
}
}
|
notxcain/aecor
|
modules/example/src/main/scala/aecor/example/account/EventsourcedAlgebra.scala
|
Scala
|
mit
| 3,196
|
/*
* This file is part of Zoe Assistant - https://github.com/guluc3m/gul-zoe
*
* Copyright (c) 2013 David Muñoz Díaz <david@gul.es>
*
* This file is distributed under the MIT LICENSE
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package org.voiser.zoe
import org.clapper.argot._
import ArgotConverters._
import java.io.FileInputStream
object Launcher {
val parser = new ArgotParser("Zoe server")
val opPort = parser.option[Int](List("p", "port"), "n", "TCP port")
val opDomain = parser.option[String](List("d", "domain"), "name", "Server domain")
val opGateway = parser.option[String](List("g", "gateway"), "name", "Gateway domain")
val opConfFile = parser.option[String](List("c", "conf"), "name", "zoe.conf path")
def usage {
println("Parameters:")
println
println(" -p <int>")
println(" --port <int> Zoe server port. (optional, default 30000)")
println
println(" -d <string>")
println(" --domain <string> Zoe server domain (required)")
println
println(" -g <string>")
println(" --gateway <string> Server gateway domain (required)")
println
println(" -c <path>")
println(" --conf <path> zoe.conf path (optional)")
}
def parse(args: Array[String]) {
parser.parse(args)
val port = opPort value match {
case None => 30000
case Some(p) => p
}
val domain = opDomain value match {
case None => throw new Exception("Domain needed")
case Some(s) => s
}
val gateway = opGateway value match {
case None => throw new Exception("Gateway needed")
case Some(s) => s
}
val confpath = opConfFile value match {
case None => "(none)"
case Some(s) => s
}
val conf = opConfFile value match {
case None => Conf()
case Some(s) => ConfFileReader(new FileInputStream(s))
}
val server = new Server(port, domain, gateway, conf)
println("Starting server on port " + port)
println(" conf file: " + confpath)
println(" domain: " + domain)
println(" gateway: " + gateway)
server start
}
def main(args: Array[String]): Unit = {
try {
parse(args)
}
catch {
case e:Exception => usage
}
}
}
|
voiser/gul-zoe-server
|
src/main/scala/org/voiser/zoe/Launcher.scala
|
Scala
|
mit
| 3,392
|
package util
object StringUtil {
implicit class StringExtensions(val s: String) {
import scala.util.control.Exception._
import scala.util.control.Exception.Catch._
def toIntOpt:Option[Int] = catching(classOf[NumberFormatException]) opt s.toInt
def toLongOpt:Option[Long] = catching(classOf[NumberFormatException]) opt s.toLong
}
}
|
vokhotnikov/sevstone-play
|
app/util/StringUtil.scala
|
Scala
|
mit
| 358
|
/*
* Copyright 2013-2015 Websudos, Limited.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Explicit consent must be obtained from the copyright owner, Outworkers Limited before any redistribution is made.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.websudos.phantom.tables
import com.websudos.phantom.builder.query.InsertQuery
import com.websudos.phantom.dsl._
case class Primitive(
pkey: String,
long: Long,
boolean: Boolean,
bDecimal: BigDecimal,
double: Double,
float: Float,
inet: java.net.InetAddress,
int: Int,
date: java.util.Date,
uuid: java.util.UUID,
bi: BigInt
)
sealed class Primitives extends CassandraTable[ConcretePrimitives, Primitive] {
object pkey extends StringColumn(this) with PartitionKey[String]
object long extends LongColumn(this)
object boolean extends BooleanColumn(this)
object bDecimal extends BigDecimalColumn(this)
object double extends DoubleColumn(this)
object float extends FloatColumn(this)
object inet extends InetAddressColumn(this)
object int extends IntColumn(this)
object date extends DateColumn(this)
object uuid extends UUIDColumn(this)
object bi extends BigIntColumn(this)
override def fromRow(r: Row): Primitive = {
Primitive(
pkey = pkey(r),
long = long(r),
boolean = boolean(r),
bDecimal = bDecimal(r),
double = double(r),
float = float(r),
inet = inet(r),
int = int(r),
date = date(r),
uuid = uuid(r),
bi = bi(r)
)
}
}
abstract class ConcretePrimitives extends Primitives with RootConnector {
override val tableName = "Primitives"
def store(row: Primitive): InsertQuery.Default[ConcretePrimitives, Primitive] = {
insert
.value(_.pkey, row.pkey)
.value(_.long, row.long)
.value(_.boolean, row.boolean)
.value(_.bDecimal, row.bDecimal)
.value(_.double, row.double)
.value(_.float, row.float)
.value(_.inet, row.inet)
.value(_.int, row.int)
.value(_.date, row.date)
.value(_.uuid, row.uuid)
.value(_.bi, row.bi)
}
}
|
levinson/phantom
|
phantom-dsl/src/test/scala/com/websudos/phantom/tables/Primitives.scala
|
Scala
|
bsd-2-clause
| 3,334
|
package unfiltered.netty.request
import unfiltered.netty
import unfiltered.netty.{ Http => NHttp, ExceptionHandler }
import unfiltered.netty.cycle.ThreadPool
import unfiltered.request.{ Path => UFPath, POST, & }
import unfiltered.response.{ Pass, ResponseString }
import unfiltered.specs2.netty.Served
import dispatch.classic._
import dispatch.classic.mime.Mime._
import java.io.{ File => JFile }
import io.netty.buffer.Unpooled
import io.netty.channel.{ ChannelFutureListener, ChannelHandlerContext }
import io.netty.channel.ChannelHandler.Sharable
import io.netty.handler.codec.http.{ DefaultFullHttpResponse, HttpResponseStatus, HttpVersion }
import org.specs2.mutable.Specification
import scala.util.control.NonFatal
object NoChunkAggregatorSpec extends Specification
with Served {
trait ExpectedServerErrorResponse { self: ExceptionHandler =>
def onException(ctx: ChannelHandlerContext, t: Throwable) {
val ch = ctx.channel
if (ch.isOpen) try {
println("expected exception occured: '%s'" format t.getMessage())
val res = new DefaultFullHttpResponse(
HttpVersion.HTTP_1_1, HttpResponseStatus.INTERNAL_SERVER_ERROR,
Unpooled.copiedBuffer(
HttpResponseStatus.INTERNAL_SERVER_ERROR.toString.getBytes("utf-8")))
ch.write(res).addListener(ChannelFutureListener.CLOSE)
} catch {
case NonFatal(_) => ch.close()
}
}
}
// note(doug): this would previous trigger a 500 error but no longer does do to the fact that the default pipeline includes chunk aggregation
@Sharable
class ExpectedErrorAsyncPlan extends netty.async.Plan with ExpectedServerErrorResponse {
def intent = {
case POST(UFPath("/async/upload")) =>
Pass
}
}
// note(doug): this would previous trigger a 500 error but no longer does do to the fact that the default pipeline includes chunk aggregation
@Sharable
class ExpectedErrorCyclePlan extends netty.cycle.Plan with ThreadPool with ExpectedServerErrorResponse {
def intent = {
case POST(UFPath("/cycle/upload") & MultiPart(req)) =>
MultiPartParams.Disk(req).files("f") match {
case Seq(f, _*) => ResponseString(
"disk read file f named %s with content type %s" format(
f.name, f.contentType))
case f => ResponseString("what's f?")
}
}
}
def setup = {
_.plan(new ExpectedErrorAsyncPlan)
.plan(netty.cycle.Planify({
case POST(UFPath("/cycle/upload")) => Pass
}))
.plan(netty.cycle.MultiPartDecoder({
case POST(UFPath("/cycle/upload") & MultiPart(req)) => netty.cycle.MultipartPlan.Pass
}))
.plan(netty.async.MultiPartDecoder({
case POST(UFPath("/async/upload") & MultiPart(req)) => netty.async.MultipartPlan.Pass
}))
.plan(netty.async.Planify {
case r@POST(UFPath("/async/upload") & MultiPart(req)) =>
MultiPartParams.Disk(req).files("f") match {
case Seq(f, _*) => r.respond(ResponseString(
"disk read file f named %s with content type %s" format(
f.name, f.contentType)))
case f => r.respond(ResponseString("what's f?"))
}
})
.plan(new ExpectedErrorCyclePlan)
}
"When receiving multipart requests with no chunk aggregator, regular netty plans" should {
step {
val out = new JFile("netty-upload-test-out.txt")
if (out.exists) out.delete
}
// note(doug): in netty3 versions of unfiltered this would result in a 500 error
"respond with a 200 when no chunk aggregator is used in a cycle plan" in {
val http = new dispatch.classic.Http with NoLogging
val file = new JFile(getClass.getResource("/netty-upload-big-text-test.txt").toURI)
file.exists must_==true
try {
http x (host / "cycle" / "upload" <<* ("f", file, "text/plain") >| ) {
case (code,_,_,_) =>
code must_== 200
}
} finally { http.shutdown }
success
}
// note(doug): in netty3 versions of unfiltered this would result in a 500 error
"respond with a 200 when no chunk aggregator is used in an async plan" in {
val http = new dispatch.classic.Http with NoLogging
val file = new JFile(getClass.getResource("/netty-upload-big-text-test.txt").toURI)
file.exists must_==true
try {
http x (host / "async" / "upload" <<* ("f", file, "text/plain") >| ) {
case (code,_,_,_) =>
code must_== 200
}
} finally { http.shutdown }
success
}
"handle multipart uploads which are not chunked" in {
/** This assumes Dispatch doesn't build a chunked request because the data is small */
val file = new JFile(getClass.getResource("/netty-upload-test.txt").toURI)
file.exists must_==true
http(host / "async" / "upload" <<* ("f", file, "text/plain") as_str) must_== "disk read file f named netty-upload-test.txt with content type text/plain"
http(host / "cycle" / "upload" <<* ("f", file, "text/plain") as_str) must_== "disk read file f named netty-upload-test.txt with content type text/plain"
}
}
}
|
peel/unfiltered
|
netty-uploads/src/test/scala/NoChunkAggregatorSpec.scala
|
Scala
|
mit
| 5,141
|
package ru.yandex.mysqlDiff
package util
trait StringImplicits {
implicit def stringExtras(string: String) = new StringExtras(string)
}
object StringImplicits extends StringImplicits
class StringExtras(string: String) {
import string._
def % (args: Any*) =
String.format(string, args.toArray.asInstanceOf[Array[Object]]: _*)
// XXX: add more escapes
def unescapeJava =
replace("\\\\n", "\\n").replace("\\\\t", "\\t").replace("\\\\0", "\\0")
def escapeJava =
replace("\\n", "\\\\n").replace("\\t", "\\\\t").replace("\\0", "\\\\0")
}
object StringTests extends MySpecification {
import StringImplicits._
"%" in {
"distance between %s and %s is %d km" % ("Moscow", "Kiev", 757) must_== "distance between Moscow and Kiev is 757 km"
}
"unescapeJava" in {
"a\\\\t\\\\tb".unescapeJava must_== "a\\t\\tb"
"a\\t\\tb".escapeJava must_== "a\\\\t\\\\tb"
}
}
// vim: set ts=4 sw=4 et:
|
hkerem/mysql-diff
|
src/main/scala/ru/yandex/mysqlDiff/util/string.scala
|
Scala
|
bsd-3-clause
| 969
|
package org.jetbrains.sbt.project.template.techhub
import javax.swing.Icon
import com.intellij.ide.util.projectWizard.AbstractModuleBuilder
import com.intellij.openapi.ui.ValidationInfo
import com.intellij.platform.ProjectTemplate
import org.jetbrains.plugins.scala.icons.Icons
import org.jetbrains.sbt.SbtBundle
class TechHubProjectTemplate extends ProjectTemplate {
override def getName: String = SbtBundle.message("sbt.techhub.lightbend.project.starter")
override def getDescription: String = SbtBundle.message("sbt.techhub.sbt.based.project.from.a.lightbend.tech.hub.template")
override def getIcon: Icon = Icons.LIGHTBEND_LOGO
override def validateSettings(): ValidationInfo = null
override def createModuleBuilder(): AbstractModuleBuilder = new TechHubModuleBuilder
}
|
JetBrains/intellij-scala
|
scala/scala-impl/src/org/jetbrains/sbt/project/template/techhub/TechHubProjectTemplate.scala
|
Scala
|
apache-2.0
| 790
|
package com.geishatokyo.sqlgen.external.s3
import com.amazonaws.services.s3.AmazonS3Client
import com.amazonaws.auth.BasicAWSCredentials
import java.io.ByteArrayInputStream
import com.amazonaws.services.s3.model.{ObjectMetadata, CannedAccessControlList, PutObjectRequest}
import com.geishatokyo.sqlgen.external.FileUploader
import com.geishatokyo.sqlgen.logger.Logger
import com.geishatokyo.sqlgen.util.FileUtil
/**
*
* User: takeshita
* Create: 12/01/31 14:44
*/
class AmazonS3Uploader(bucketName: String, accessKey: String, secretKey: String) extends FileUploader {
var s3: AmazonS3Client = null
connect()
def connect() = {
s3 = new AmazonS3Client(new BasicAWSCredentials(accessKey, secretKey))
Logger.log("Connect to amazon s3 (bucketname = %s)".format(bucketName))
if (!s3.doesBucketExist(bucketName)) {
Logger.log("Bucket:%s is not found!".format(bucketName))
throw new Exception("Bucket:%s is not found!".format(bucketName))
}
}
def exist_?(filename: String) = {
try {
val s3Obj = s3.getObject(bucketName, filename)
if (s3Obj != null) s3Obj.getObjectContent.close()
s3Obj != null
} catch {
case e: Exception => {
Logger.log("file:%s is not found".format(filename))
false
}
}
}
def upload(key: String, data: Array[Byte]): Boolean = {
if (key == null) {
Logger.log("key is null")
return false
}
if (data == null || data.length == 0) {
Logger.log("Data is empty")
return false
}
val keyOnS3 = key
Logger.log("Upload to s3 /%s/%s".format(bucketName, keyOnS3))
val req = new PutObjectRequest(bucketName, keyOnS3,
new ByteArrayInputStream(data), generateObjectMetadata(key, data))
val result = s3.putObject(req)
Logger.log("Success to upload : " + keyOnS3)
addPermissions(keyOnS3)
true
}
def getContentType(key: String) = try {
val (dir, name, ext) = FileUtil.splitPathAndNameAndExt(key)
ext.substring(1) match {
case "svg" => "image/svg+xml"
case "png" => "image/png"
case "jpg" | "jpeg" => "image/jpeg"
case "zip" => "application/zip"
case "xml" => "application/xml"
case _ => "image/svg+xml"
}
} catch {
case e : Throwable => "image/svg+xml"
}
def generateObjectMetadata(key: String, data: Array[Byte]) = {
val metadata = new ObjectMetadata()
metadata.setContentLength(data.length)
metadata.setContentType(getContentType(key))
metadata
}
private def addPermissions(key: String) = {
s3.setObjectAcl(bucketName, key, CannedAccessControlList.PublicRead)
}
}
|
geishatokyo/sql-generator
|
src/main/scala/com/geishatokyo/sqlgen/external/s3/AmazonS3Uploader.scala
|
Scala
|
mit
| 2,643
|
package ch.epfl.yinyang.api
import reflect.runtime.universe.Symbol
/**
* Marker trait for DSLs in which all holes are used for optimizations and
* therefore lifted. This implies that if a DSL program has at least one hole,
* it won't be compiled at compile time. All variables are treated as
* [[RequiredStaticCompVar]] with the default guard.
*
* NOTE: DSLs that inherit this trait will not be reflectively instantiated
* at compile time.
*/
trait FullyStaged { this: BaseYinYang =>
override def compilationVars(symbols: List[Symbol]): List[VarType] =
throw new RuntimeException("This method must not be called!!!")
}
|
vjovanov/scala-yinyang
|
components/yin-yang/src/api/FullyStaged.scala
|
Scala
|
bsd-3-clause
| 637
|
package com.seanshubin.todo.sample.server
trait HttpServer {
def start()
def join()
def stop()
}
|
SeanShubin/javascript-todo-samples
|
server/src/main/scala/com/seanshubin/todo/sample/server/HttpServer.scala
|
Scala
|
unlicense
| 106
|
package com.fortysevendeg.scala.android.ui.textstyles
import android.os.Bundle
import android.support.v7.app.AppCompatActivity
import android.view.MenuItem
import macroid.Contexts
class TextStylesActivity
extends AppCompatActivity
with Contexts[AppCompatActivity]
with Layout {
override def onCreate(savedInstanceState: Bundle) = {
super.onCreate(savedInstanceState)
setContentView(layout)
toolBar map setSupportActionBar
getSupportActionBar.setDisplayHomeAsUpEnabled(true)
}
override def onOptionsItemSelected(item: MenuItem): Boolean = {
item.getItemId match {
case android.R.id.home => {
finish()
false
}
}
super.onOptionsItemSelected(item)
}
}
|
wvandrunen/scala-android
|
src/main/scala/com/fortysevendeg/scala/android/ui/textstyles/TextStylesActivity.scala
|
Scala
|
apache-2.0
| 726
|
/**
* Copyright 2015 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.play.silhouette.impl.providers
import java.net.URLEncoder
import com.mohiva.play.silhouette.api._
import com.mohiva.play.silhouette.api.util.{ ExtractableRequest, HTTPLayer }
import com.mohiva.play.silhouette.impl.exceptions.UnexpectedResponseException
import com.mohiva.play.silhouette.impl.providers.OpenIDProvider._
import play.api.libs.concurrent.Execution.Implicits._
import play.api.mvc._
import scala.concurrent.Future
/**
* Base implementation for all OpenID providers.
*/
trait OpenIDProvider extends SocialProvider with Logger {
/**
* The type of the auth info.
*/
type A = OpenIDInfo
/**
* The settings type.
*/
type Settings = OpenIDSettings
/**
* The HTTP layer implementation.
*/
protected val httpLayer: HTTPLayer
/**
* The OpenID service implementation.
*/
protected val service: OpenIDService
/**
* Starts the authentication process.
*
* @param request The current request.
* @tparam B The type of the request body.
* @return Either a Result or the auth info from the provider.
*/
def authenticate[B]()(implicit request: ExtractableRequest[B]): Future[Either[Result, OpenIDInfo]] = {
request.extractString(Mode) match {
// Tries to verify the user after the provider has redirected back to the application
case Some(_) => service.verifiedID.map(info => Right(info)).recover {
case e => throw new UnexpectedResponseException(ErrorVerification.format(id, e.getMessage), e)
}
// Starts the OpenID authentication process
case None =>
// Either we get the openID from request or we use the provider ID to retrieve the redirect URL
val openID = request.extractString(OpenID).getOrElse(settings.providerURL)
service.redirectURL(openID, resolveCallbackURL(settings.callbackURL)).map { url =>
val redirect = Results.Redirect(fix3749(url))
logger.debug("[Silhouette][%s] Redirecting to: %s".format(id, url))
Left(redirect)
}.recover {
case e => throw new UnexpectedResponseException(ErrorRedirectURL.format(id, e.getMessage), e)
}
}
}
/**
* A temporary fix for: https://github.com/playframework/playframework/pull/3749
*
* @see https://github.com/playframework/playframework/issues/3740
* @see http://stackoverflow.com/questions/22041522/steam-openid-and-play-framework
* @param url The URL to fix.
* @param request The request.
* @tparam B The type of the request body.
* @return The fixed URL.
*/
def fix3749[B](url: String)(implicit request: ExtractableRequest[B]) = {
if (request.extractString(OpenID).isDefined) {
// We've found a non-unique ID so this bug doesn't affect us
url
} else {
// We use "OpenID Provider driven identifier selection", so this bug affects us
val search = URLEncoder.encode(settings.providerURL, "UTF-8")
val replace = URLEncoder.encode("http://specs.openid.net/auth/2.0/identifier_select", "UTF-8")
url
.replace("openid.claimed_id=" + search, "openid.claimed_id=" + replace)
.replace("openid.identity=" + search, "openid.identity=" + replace)
}
}
}
/**
* The OpenIDProvider companion object.
*/
object OpenIDProvider {
/**
* The error messages.
*/
val ErrorVerification = "[Silhouette][%s] Error verifying the ID: %s"
val ErrorRedirectURL = "[Silhouette][%s] Error retrieving the redirect URL: %s"
/**
* The OpenID constants.
*/
val Mode = "openid.mode"
val OpenID = "openID"
}
/**
* The OpenID service trait.
*/
trait OpenIDService {
/**
* Retrieve the URL where the user should be redirected to start the OpenID authentication process.
*
* @param openID The OpenID to use for authentication.
* @param resolvedCallbackURL The full callback URL to the application after a successful authentication.
* @return The redirect URL where the user should be redirected to start the OpenID authentication process.
*/
def redirectURL(openID: String, resolvedCallbackURL: String): Future[String]
/**
* From a request corresponding to the callback from the OpenID server, check the identity of the current user.
*
* @param request The current request.
* @tparam B The type of the request body.
* @return A OpenIDInfo in case of success, Exception otherwise.
*/
def verifiedID[B](implicit request: Request[B]): Future[OpenIDInfo]
}
/**
* The OpenID settings.
*
* @param providerURL The OpenID provider URL used if no openID was given. @see https://willnorris.com/2009/07/openid-directed-identity-identifier-select
* @param callbackURL The callback URL to the application after a successful authentication on the OpenID provider.
* The URL can be a relative path which will be resolved against the current request's host.
* @param axRequired Required attributes to return from the provider after a successful authentication.
* @param axOptional Optional attributes to return from the provider after a successful authentication.
* @param realm An URL pattern that represents the part of URL-space for which an OpenID Authentication request is valid.
*/
case class OpenIDSettings(
providerURL: String,
callbackURL: String,
axRequired: Seq[(String, String)] = Seq.empty,
axOptional: Seq[(String, String)] = Seq.empty,
realm: Option[String] = None)
/**
* The OpenID details.
*
* @param id The openID.
* @param attributes The attributes returned from the provider.
*/
case class OpenIDInfo(id: String, attributes: Map[String, String]) extends AuthInfo
|
rfranco/play-silhouette
|
silhouette/app/com/mohiva/play/silhouette/impl/providers/OpenIDProvider.scala
|
Scala
|
apache-2.0
| 6,245
|
package molt.tokenize
// We find all of the terminal symbols and make sure we split on
// them, then assume everything in between is contiguous.
// We are restricting atoms from containing any of our terminal
// symbols.
class BasicTokenizer(tokens: Set[String]) extends Tokenizer {
private def getOverlaps(toks: List[String]): List[(String, String)] = toks match {
case Nil => Nil
case head :: tail => {
val prefixes = head.scanLeft("")(_ + _).filterNot(_.isEmpty)
val suffixes = head.scanRight("")(_ + _).filterNot(_.isEmpty)
val culprits = tail.filter(_.contains(head)) ++
tail.filter(tok => prefixes.exists(prefix => tok.endsWith(prefix))) ++
tail.filter(tok => suffixes.exists(suffix => tok.startsWith(suffix)))
culprits.map(x => (head, x)) ++ getOverlaps(tail)
}
}
val overlaps = getOverlaps(tokens.toList).toSet
val warning =
if(!overlaps.isEmpty) {
val str = s"Warning: tokens have overlap: $overlaps"
Console.err.println(str)
str
}
else "No token overlap detected :)"
// tokenization as described above
override def tokenizations(s: String): Set[Seq[String]] = {
// split on spaces. this is a reversible decision.
val unTokenizedStringVector = s.split("\\s+").toList
// to turn a single string into a list with the specified terminal split out
def splitSingleString(str: String, tok: String): List[String] = {
if (str.isEmpty)
Nil
else if (!str.contains(tok) || str.equals(tok))
List(str)
else {
val (head, tail) = str.splitAt(str.indexOf(tok))
val remainder = tok :: splitSingleString(tail.substring(tok.length), tok)
if (head.isEmpty)
remainder
else
head :: remainder
}
}
// does the above function over a list of strings to get a new list with all of the tokens
def splitOutToken(strs: List[String], tok: String): List[String] = {
strs.flatMap(splitSingleString(_, tok))
}
// we do the splitting for every terminal and get our final token list
Set(tokens.foldLeft(unTokenizedStringVector)(splitOutToken))
}
}
|
julianmichael/molt
|
molt/shared/src/main/scala/molt/tokenize/BasicTokenizer.scala
|
Scala
|
mit
| 2,160
|
package keystoneml.nodes.images
import breeze.linalg._
import keystoneml.workflow.Transformer
import keystoneml.utils.ChannelMajorArrayVectorizedImage
import keystoneml.utils.Image
import keystoneml.utils.ImageUtils
/**
* Computes the local color statistic of (LCS) on a regular spaced grid [1]:
* "...each patch is also subdivided into 16 square sub-regions and each sub-region
* is described with the means and standard deviations of the 3 RGB
* channels, which leads to a 96 dimensional feature vector."
*
* [1] Clinchant S, Csurka G, Perronnin F, Renders JM XRCE's
* participation to ImageEval. In: ImageEval Workshop at CVIR. 2007
*
* Based on code by Ben Recht <brecht@cs.berkeley.edu>
*
* @param stride Stride for keypoints on the grid
* @param strideStart Starting offset for the keypoints
* @param subPatchSize Size of neighborhood for each keypoint is -2*subPatchSize to subPatchSize
*/
class LCSExtractor(
val stride: Int,
val strideStart: Int,
val subPatchSize: Int)
extends Transformer[Image, DenseMatrix[Float]] {
// Updates sq in place to avoid memory allocation
def getSd(sq: Image, means: Image) = {
require(sq.metadata.numChannels == 1)
var i = 0
while (i < sq.metadata.xDim) {
var j = 0
while (j < sq.metadata.yDim) {
val sqOld = sq.get(i, j, 0)
val mOld = means.get(i, j, 0)
val px = math.sqrt(math.max(sqOld - mOld * mOld, 0))
sq.put(i, j, 0, px)
j = j + 1
}
i = i + 1
}
sq
}
def apply(image: Image): DenseMatrix[Float] = {
val onesVector = Array.fill(subPatchSize)(1.0 / subPatchSize)
val xDim = image.metadata.xDim
val yDim = image.metadata.yDim
val numChannels = image.metadata.numChannels
// Typically this is (256 - 29 * 2) / 4 = 50
val xPoolRange = strideStart until (xDim - strideStart) by stride
val yPoolRange = strideStart until (yDim - strideStart) by stride
val numPoolsX = xPoolRange.length
val numPoolsY = yPoolRange.length
// Typically -2*6 + 3 -1 = -10
val subPatchStart = -2*subPatchSize + subPatchSize/2 - 1
// Typically 6 + 3 - 1 = 8
val subPatchEnd = subPatchSize + subPatchSize/2 - 1
val subPatchStride = subPatchSize
val subPatchRange = subPatchStart to subPatchEnd by subPatchStride
// Typically this is (8 - (-10)) / 6 = 4
val numNeighborhoodX = subPatchRange.length
val numNeighborhoodY = subPatchRange.length
// Typically this is 4 * 4 * 3 * 2 = 96
val numLCSValues = numNeighborhoodX * numNeighborhoodY * numChannels * 2
// Get means, stds for every channel
val channelSplitImgs = ImageUtils.splitChannels(image)
val channelSplitImgsSq = channelSplitImgs.map { img =>
ImageUtils.mapPixels(img, x => x * x)
}
val means = new Array[Image](numChannels)
val stds = new Array[Image](numChannels)
var c = 0
while (c < numChannels) {
val conv = ImageUtils.conv2D(channelSplitImgs(c), onesVector, onesVector)
means(c) = conv
val sq = ImageUtils.conv2D(channelSplitImgsSq(c), onesVector, onesVector)
stds(c) = getSd(sq, means(c))
c = c + 1
}
val lcsValues = new DenseMatrix[Float](numLCSValues, numPoolsX * numPoolsY)
var lcsIdx = 0
// Start at strideStart in (x, y) and
for (x <- strideStart until (xDim - strideStart) by stride;
y <- strideStart until (yDim - strideStart) by stride) {
// This is our keyPoint
val xPos = x
val yPos = y
// Get keypoint ids
val xKeyPoint = (xPos - strideStart)/stride
val yKeyPoint = (yPos - strideStart)/stride
// For each channel, get the neighborhood means and std. deviations
var c = 0
lcsIdx = 0
while (c < numChannels) {
// For this xPos, yPos get means, stdevs of all neighbors
for (nx <- subPatchRange;
ny <- subPatchRange) {
// lcsValues(lcsIdx) = means(c).get((xPos + nx), (yPos + ny), 0)
lcsValues(lcsIdx, xKeyPoint * numPoolsY + yKeyPoint) =
means(c).get((xPos + nx), (yPos + ny), 0).toFloat
lcsIdx = lcsIdx + 1
lcsValues(lcsIdx, xKeyPoint * numPoolsY + yKeyPoint) =
stds(c).get((xPos + nx), (yPos + ny), 0).toFloat
lcsIdx = lcsIdx + 1
}
c = c + 1
}
}
lcsValues
}
}
|
amplab/keystone
|
src/main/scala/keystoneml/nodes/images/LCSExtractor.scala
|
Scala
|
apache-2.0
| 4,379
|
package im.mange.driveby.driver
trait NakedElement {
def attribute(name: String): String
def clear()
def click()
def enter(value: String)
def isDisplayed: Boolean
def isEnabled: Boolean
def hasFocus: Boolean
def childrenCount: Int
//TODO: sort out this shonky mess
def option(value: String): Option[NakedElement]
// def scrollTo()
def text: String
def yAxisLocation: Int
}
|
alltonp/driveby
|
src/main/scala/im/mange/driveby/driver/NakedElement.scala
|
Scala
|
apache-2.0
| 399
|
package redbot.utils
import java.util.concurrent.Executors
import scala.concurrent.{Future, Promise}
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.Try
object TimerUtils {
private val scheduler = Executors.newSingleThreadScheduledExecutor()
private val isFailureDefault: PartialFunction[Try[_], Boolean] = {case x => x.isFailure}
def tryWithBackoff[T](initialBackoff: FiniteDuration,
maxBackoff: Duration,
backoffIncrement: Long => Long = _*2)
(operation: => Future[T],
isFailure: PartialFunction[Try[T], Boolean] = isFailureDefault): Future[T] =
operation.andThen {
case res if isFailure.isDefinedAt(res) && isFailure(res) =>
case _ if maxBackoff.toMillis > initialBackoff.toMillis =>
val next = backoffIncrement(initialBackoff.toMillis)
timer(next.millis).flatMap(_ =>
tryWithBackoff(next.millis, maxBackoff, backoffIncrement)(operation,isFailure))
case res => Future.fromTry(res)
}
def timer(at: FiniteDuration): Future[Unit] = {
val res = Promise[Unit]()
scheduler.schedule(() => res.success(()), at.length, at.unit)
res.future
}
}
|
JamesGallicchio/RedBot
|
src/main/scala/redbot/utils/TimerUtils.scala
|
Scala
|
mit
| 1,284
|
trait A
class B(val x: Int) {
self: A =>
def this() = this()
}
object Test extends B(2) with A {
def main(args: Array[String]) { }
}
|
felixmulder/scala
|
test/pending/run/t4460.scala
|
Scala
|
bsd-3-clause
| 145
|
/*******************************************************************************
* Copyright 2017 Capital One Services, LLC and Bitwise, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package hydrograph.engine.spark.flow
import java.io.IOException
import java.util.Properties
import hydrograph.engine.core.core.{HydrographJob, HydrographRuntimeService}
import hydrograph.engine.core.flowmanipulation.{FlowManipulationContext, FlowManipulationHandler}
import hydrograph.engine.core.helper.JAXBTraversal
import hydrograph.engine.core.props.OrderedProperties
import hydrograph.engine.core.schemapropagation.SchemaFieldHandler
import hydrograph.engine.core.utilities.OrderedPropertiesHelper
import hydrograph.engine.spark.components.adapter.factory.AdapterFactory
import hydrograph.engine.spark.components.base.SparkFlow
import hydrograph.engine.spark.executiontracking.plugin.{CommandComponentsDefaultPlugin, ExecutionTrackingListener, ExecutionTrackingPlugin, HydrographCommandListener}
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.slf4j.{Logger, LoggerFactory}
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.util.matching.Regex
/**
* The Class HydrographRuntime.
*
* @author Bitwise
*
*/
class HydrographRuntime extends HydrographRuntimeService {
private val EXECUTION_TRACKING: String = "hydrograph.execution.tracking"
private val LOG: Logger = LoggerFactory.getLogger(classOf[HydrographRuntime])
private var flowManipulationContext: FlowManipulationContext = null;
// private var flows: mutable.LinkedHashSet[SparkFlow] = null
private var flowBuilder:FlowBuilder = null
var executionTrackingListener : ExecutionTrackingListener = null
var hydrographListener : HydrographCommandListener = new CommandComponentsDefaultPlugin
override def kill(): Unit = {
LOG.info("Kill signal received")
if (RuntimeContext.instance.sparkSession != null) {
LOG.info("Killing Spark jobs")
RuntimeContext.instance.sparkSession.stop()
}
else {
LOG.info("No Spark jobs present to kill. Exiting code.")
System.exit(0)
}
}
override def initialize(properties: Properties, args: Array[String], hydrographJob: HydrographJob,
jobId: String, udfPath: String): Unit = {
val configProperties = getSparkProperties(hydrographJob,properties)
val sparkSessionBuilder: SparkSession.Builder = SparkSession.builder()
.appName(hydrographJob.getJAXBObject.getName)
.config(configProperties)
val schemaFieldHandler = new SchemaFieldHandler(
hydrographJob.getJAXBObject().getInputsOrOutputsOrStraightPulls())
val flowManipulationContext = new FlowManipulationContext(hydrographJob, args, schemaFieldHandler, jobId)
val flowManipulationHandler = new FlowManipulationHandler
val updatedHydrographJob=flowManipulationHandler.execute(flowManipulationContext)
val adapterFactory = AdapterFactory(updatedHydrographJob.getJAXBObject)
val traversal = new JAXBTraversal(updatedHydrographJob.getJAXBObject());
val sparkSession: SparkSession = enableHiveSupport(sparkSessionBuilder, traversal, properties).getOrCreate()
getAndSetHadoopProperties(sparkSession,hydrographJob,properties)
val runtimeContext = RuntimeContext(adapterFactory, traversal, updatedHydrographJob,
flowManipulationContext.getSchemaFieldHandler, sparkSession)
val EXECUTION_TRACKING = "hydrograph.execution.tracking";
// val oproperties = OrderedPropertiesHelper.getOrderedProperties("RegisterPlugin.properties")
// val executionTrackingPluginName = oproperties.getProperty(EXECUTION_TRACKING)
// val trackingInstance = Class.forName(executionTrackingPluginName).newInstance()
// executionTrackingListener = trackingInstance.asInstanceOf[ExecutionTrackingPlugin]
// executionTrackingListener.addListener(runtimeContext)
if (getExecutionTrackingClass(EXECUTION_TRACKING) != null) {
/*var */executionTrackingListener = classLoader(getExecutionTrackingClass(EXECUTION_TRACKING)).asInstanceOf[ExecutionTrackingListener]
val trackingInstance = Class.forName(getExecutionTrackingClass(EXECUTION_TRACKING)).newInstance()
executionTrackingListener = trackingInstance.asInstanceOf[ExecutionTrackingPlugin]
executionTrackingListener.addListener(runtimeContext)
hydrographListener = trackingInstance.asInstanceOf[ExecutionTrackingPlugin]
}
}
def enableHiveSupport(sessionBuilder: SparkSession.Builder, traversal: JAXBTraversal, properties: Properties): SparkSession.Builder = {
LOG.trace("In method checkAndEnableHiveSupport()")
if (traversal.isHiveComponentPresentInFlow) {
LOG.debug("Hive components are present in flow. Enabling Hive support in SparkSession with warehouse location "+properties.getProperty("hydrograph.hive.warehouse"))
sessionBuilder
.config("spark.sql.warehouse.dir", properties.getProperty("hydrograph.hive.warehouse"))
.enableHiveSupport()
} else {
sessionBuilder
.config("spark.sql.warehouse.dir", properties.getProperty("hydrograph.tmp.warehouse"))
}
sessionBuilder
}
override def prepareToExecute(): Unit = {
LOG.info("Building spark flows")
flowBuilder = FlowBuilder(RuntimeContext.instance, hydrographListener)
LOG.info("Spark flows built successfully")
}
override def execute(): Unit = {
/*if (GeneralUtilities.IsArgOptionPresent(args, CommandLineOptionsProcessor.OPTION_NO_EXECUTION)) {
LOG.info(CommandLineOptionsProcessor.OPTION_NO_EXECUTION + " option is provided so skipping execution")
return
}*/
/*for (sparkFlow <- flows) {
try{
hydrographListener.start(sparkFlow)
// HydrographFlowPlugin.getComps()
sparkFlow.execute()
hydrographListener.end(sparkFlow)
/* for(accumulator <- sparkFlow.getAccumulatorOnFlow()){
accumulator.reset()
}*/
}
catch{case e: Exception => {
hydrographListener.failComponentsOfFlow(sparkFlow)
// executionTrackingListener.getStatus().asScala.foreach(println)
throw e
}
}
}*/
flowBuilder.buildAndExecuteFlows()
// RuntimeContext.instance.sparkSession.sparkContext.longAccumulator
RuntimeContext.instance.sparkSession.stop()
// executionTrackingListener.getStatus().asScala.foreach(println)
}
override def getExecutionStatus: AnyRef = {
if (executionTrackingListener != null)
return executionTrackingListener.getStatus()
return null
}
override def oncomplete(): Unit = {
//Deleting TempPath For Debug
if (flowManipulationContext != null && flowManipulationContext.getTmpPath != null) {
flowManipulationContext.getTmpPath.asScala.foreach(tmpPath => {
val fullPath: Path = new Path(tmpPath)
// do not delete the root directory
if (fullPath.depth != 0) {
var fileSystem: FileSystem = null
LOG.info("Deleting temp path:" + tmpPath)
try {
fileSystem = FileSystem.get(RuntimeContext.instance.sparkSession.sparkContext.hadoopConfiguration)
// fileSystem.delete(fullPath, true)
}
catch {
case exception: NullPointerException => {
throw new RuntimeException(exception)
}
case e: IOException => {
throw new RuntimeException(e)
}
}
}
})
}
}
def classLoader[T](className: String): T = {
val clazz = Class.forName(className).getDeclaredConstructors
clazz(0).setAccessible(true)
clazz(0).newInstance().asInstanceOf[T]
}
def getSparkProperties(hydrographJob: HydrographJob, properties: Properties): SparkConf = {
val configProperties = new SparkConf()
val sparkProperties = properties.entrySet().asScala
for (property <- sparkProperties) {
if(property.getKey.toString.startsWith("spark.") || property.getKey.toString.startsWith("hydrograph.")){
configProperties.set(property.getKey.toString.trim, property.getValue.toString.trim)
}
}
val runttimeProperties = hydrographJob.getJAXBObject().getRuntimeProperties
if (runttimeProperties != null) {
for (runtimeProperty <- runttimeProperties.getProperty.asScala) {
if(runtimeProperty.getName.startsWith("spark.") || runtimeProperty.getName.startsWith("hydrograph.")){
configProperties.set(runtimeProperty.getName.trim, runtimeProperty.getValue.trim)
}
}
}
configProperties
}
def getAndSetHadoopProperties(sparkSession: SparkSession, hydrographJob: HydrographJob, properties: Properties): Unit = {
val configProperties = sparkSession.sparkContext.hadoopConfiguration
val sparkProperties = properties.entrySet().asScala
for (property <- sparkProperties) {
property.getKey.toString match {
case a if(!a.startsWith("spark.")) => {
val pattern: Regex = "(.*)\\\\((.*)\\\\)".r
val matchIterator = pattern.findAllIn(property.getValue.toString)
if (matchIterator.hasNext) {
while (matchIterator.hasNext) {
matchIterator.group(1) match {
case x if (x.endsWith("OrElse")) => {
val arrayOfParams: Array[String] = matchIterator.group(2).split(",")
configProperties.set(property.getKey.toString.trim, sys.env.getOrElse(arrayOfParams(0).trim.replaceAll("\\"", ""), arrayOfParams(1).trim.replaceAll("\\"", "")).trim)
}
case y if (y.endsWith("get")) => {
configProperties.set(property.getKey.toString.trim, sys.env.get(matchIterator.group(2).replaceAll("\\"", "")).get.trim)
}
}
matchIterator.next()
}
}
else{
configProperties.set(property.getKey.toString.trim, property.getValue.toString.trim)
}
}
case _ =>
}
}
val runttimeProperties = hydrographJob.getJAXBObject().getRuntimeProperties
if (runttimeProperties != null) {
for (runtimeProperty <- runttimeProperties.getProperty.asScala) {
runtimeProperty.getName match {
case a if(!a.startsWith("spark.")) => {
val pattern: Regex = "(.*)\\\\((.*)\\\\)".r
val matchIterator = pattern.findAllIn(runtimeProperty.getValue)
if (matchIterator.hasNext) {
while (matchIterator.hasNext) {
matchIterator.group(1) match {
case x if (x.endsWith("OrElse")) => {
val arrayOfParams: Array[String] = matchIterator.group(2).split(",")
configProperties.set(runtimeProperty.getName.trim, sys.env.getOrElse(arrayOfParams(0).replaceAll("\\"", ""), arrayOfParams(1).replaceAll("\\"", "")).trim)
}
case y if (y.endsWith("get")) => {
configProperties.set(runtimeProperty.getName.trim, sys.env.get(matchIterator.group(2).replaceAll("\\"", "")).get.trim)
}
}
matchIterator.next()
}
}
else{
configProperties.set(runtimeProperty.getName.trim, runtimeProperty.getValue.trim)
}
}
case _ =>
}
}
}
}
def getExecutionTrackingClass(executionTrackingKey: String): String = {
var properties: OrderedProperties = new OrderedProperties
try {
properties = OrderedPropertiesHelper.getOrderedProperties("RegisterPlugin.properties")
}
catch {
case e: IOException => {
throw new RuntimeException("Error reading the properties file: RegisterPlugin.properties" , e)
}
}
properties.getProperty(executionTrackingKey)
}
}
|
capitalone/Hydrograph
|
hydrograph.engine/hydrograph.engine.spark/src/main/scala/hydrograph/engine/spark/flow/HydrographRuntime.scala
|
Scala
|
apache-2.0
| 12,595
|
import sbt._
import Keys._
import Tests._
import Defaults._
import java.io.{ CharArrayWriter, PrintWriter }
object Ticket543Test extends Build {
val marker = new File("marker")
val check = TaskKey[Unit]("check", "Check correct error has been returned.")
lazy val root = Project("root", file("."), settings = defaultSettings ++ Seq(
libraryDependencies += "org.scalatest" %% "scalatest" % "1.8" % "test",
scalaVersion := "2.9.2",
fork := true,
testListeners += new TestReportListener {
def testEvent(event: TestEvent) {
for (e <- event.detail.filter(_.status == sbt.testing.Status.Failure)) {
if (e.throwable != null && e.throwable.isDefined) {
val caw = new CharArrayWriter
e.throwable.get.printStackTrace(new PrintWriter(caw))
if (caw.toString.contains("Test.scala:"))
marker.createNewFile()
}
}
}
def startGroup(name: String) {}
def endGroup(name: String, t: Throwable) {}
def endGroup(name: String, result: TestResult.Value) {}
},
check := {
val exists = marker.exists
marker.delete()
if (!exists) error("Null or invalid error had been returned previously")
}
))
}
|
jaceklaskowski/sbt
|
sbt/src/sbt-test/tests/t543/project/Ticket543Test.scala
|
Scala
|
bsd-3-clause
| 1,161
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import org.apache.spark.sql.catalyst.analysis
import org.apache.spark.sql.catalyst.analysis.EliminateSubQueries
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.plans.{LeftSemi, PlanTest, LeftOuter, RightOuter}
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.catalyst.dsl.plans._
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.types.IntegerType
class FilterPushdownSuite extends PlanTest {
object Optimize extends RuleExecutor[LogicalPlan] {
val batches =
Batch("Subqueries", Once,
EliminateSubQueries) ::
Batch("Filter Pushdown", Once,
SamplePushDown,
CombineFilters,
PushPredicateThroughProject,
BooleanSimplification,
PushPredicateThroughJoin,
PushPredicateThroughGenerate,
PushPredicateThroughAggregate,
ColumnPruning,
ProjectCollapsing) :: Nil
}
val testRelation = LocalRelation('a.int, 'b.int, 'c.int)
val testRelation1 = LocalRelation('d.int)
// This test already passes.
test("eliminate subqueries") {
val originalQuery =
testRelation
.subquery('y)
.select('a)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer =
testRelation
.select('a.attr)
.analyze
comparePlans(optimized, correctAnswer)
}
test("column pruning for group") {
val originalQuery =
testRelation
.groupBy('a)('a, count('b))
.select('a)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer =
testRelation
.select('a)
.groupBy('a)('a)
.select('a).analyze
comparePlans(optimized, correctAnswer)
}
test("column pruning for group with alias") {
val originalQuery =
testRelation
.groupBy('a)('a as 'c, count('b))
.select('c)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer =
testRelation
.select('a)
.groupBy('a)('a as 'c)
.select('c).analyze
comparePlans(optimized, correctAnswer)
}
test("column pruning for Project(ne, Limit)") {
val originalQuery =
testRelation
.select('a, 'b)
.limit(2)
.select('a)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer =
testRelation
.select('a)
.limit(2).analyze
comparePlans(optimized, correctAnswer)
}
// After this line is unimplemented.
test("simple push down") {
val originalQuery =
testRelation
.select('a)
.where('a === 1)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer =
testRelation
.where('a === 1)
.select('a)
.analyze
comparePlans(optimized, correctAnswer)
}
test("can't push without rewrite") {
val originalQuery =
testRelation
.select('a + 'b as 'e)
.where('e === 1)
.analyze
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer =
testRelation
.where('a + 'b === 1)
.select('a + 'b as 'e)
.analyze
comparePlans(optimized, correctAnswer)
}
test("nondeterministic: can't push down filter with nondeterministic condition through project") {
val originalQuery = testRelation
.select(Rand(10).as('rand), 'a)
.where('rand > 5 || 'a > 5)
.analyze
val optimized = Optimize.execute(originalQuery)
comparePlans(optimized, originalQuery)
}
test("nondeterministic: can't push down filter through project with nondeterministic field") {
val originalQuery = testRelation
.select(Rand(10).as('rand), 'a)
.where('a > 5)
.analyze
val optimized = Optimize.execute(originalQuery)
comparePlans(optimized, originalQuery)
}
test("filters: combines filters") {
val originalQuery = testRelation
.select('a)
.where('a === 1)
.where('a === 2)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer =
testRelation
.where('a === 1 && 'a === 2)
.select('a).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push to either side") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y)
.where("x.b".attr === 1)
.where("y.b".attr === 2)
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('b === 1)
val right = testRelation.where('b === 2)
val correctAnswer =
left.join(right).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push to one side") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y)
.where("x.b".attr === 1)
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('b === 1)
val right = testRelation
val correctAnswer =
left.join(right).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push to one side after transformCondition") {
val x = testRelation.subquery('x)
val y = testRelation1.subquery('y)
val originalQuery = {
x.join(y)
.where(("x.a".attr === 1 && "y.d".attr === "x.b".attr) ||
("x.a".attr === 1 && "y.d".attr === "x.c".attr))
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('a === 1)
val right = testRelation1
val correctAnswer =
left.join(right, condition = Some("d".attr === "b".attr || "d".attr === "c".attr)).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: rewrite filter to push to either side") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y)
.where("x.b".attr === 1 && "y.b".attr === 2)
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('b === 1)
val right = testRelation.where('b === 2)
val correctAnswer =
left.join(right).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push down left semi join") {
val x = testRelation.subquery('x)
val y = testRelation1.subquery('y)
val originalQuery = {
x.join(y, LeftSemi, Option("x.a".attr === "y.d".attr && "x.b".attr >= 1 && "y.d".attr >= 2))
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('b >= 1)
val right = testRelation1.where('d >= 2)
val correctAnswer =
left.join(right, LeftSemi, Option("a".attr === "d".attr)).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push down left outer join #1") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y, LeftOuter)
.where("x.b".attr === 1 && "y.b".attr === 2)
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('b === 1)
val correctAnswer =
left.join(y, LeftOuter).where("y.b".attr === 2).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push down right outer join #1") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y, RightOuter)
.where("x.b".attr === 1 && "y.b".attr === 2)
}
val optimized = Optimize.execute(originalQuery.analyze)
val right = testRelation.where('b === 2).subquery('d)
val correctAnswer =
x.join(right, RightOuter).where("x.b".attr === 1).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push down left outer join #2") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y, LeftOuter, Some("x.b".attr === 1))
.where("x.b".attr === 2 && "y.b".attr === 2)
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('b === 2).subquery('d)
val correctAnswer =
left.join(y, LeftOuter, Some("d.b".attr === 1)).where("y.b".attr === 2).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push down right outer join #2") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y, RightOuter, Some("y.b".attr === 1))
.where("x.b".attr === 2 && "y.b".attr === 2)
}
val optimized = Optimize.execute(originalQuery.analyze)
val right = testRelation.where('b === 2).subquery('d)
val correctAnswer =
x.join(right, RightOuter, Some("d.b".attr === 1)).where("x.b".attr === 2).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push down left outer join #3") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y, LeftOuter, Some("y.b".attr === 1))
.where("x.b".attr === 2 && "y.b".attr === 2)
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('b === 2).subquery('l)
val right = testRelation.where('b === 1).subquery('r)
val correctAnswer =
left.join(right, LeftOuter).where("r.b".attr === 2).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push down right outer join #3") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y, RightOuter, Some("y.b".attr === 1))
.where("x.b".attr === 2 && "y.b".attr === 2)
}
val optimized = Optimize.execute(originalQuery.analyze)
val right = testRelation.where('b === 2).subquery('r)
val correctAnswer =
x.join(right, RightOuter, Some("r.b".attr === 1)).where("x.b".attr === 2).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push down left outer join #4") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y, LeftOuter, Some("y.b".attr === 1))
.where("x.b".attr === 2 && "y.b".attr === 2 && "x.c".attr === "y.c".attr)
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('b === 2).subquery('l)
val right = testRelation.where('b === 1).subquery('r)
val correctAnswer =
left.join(right, LeftOuter).where("r.b".attr === 2 && "l.c".attr === "r.c".attr).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push down right outer join #4") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y, RightOuter, Some("y.b".attr === 1))
.where("x.b".attr === 2 && "y.b".attr === 2 && "x.c".attr === "y.c".attr)
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.subquery('l)
val right = testRelation.where('b === 2).subquery('r)
val correctAnswer =
left.join(right, RightOuter, Some("r.b".attr === 1)).
where("l.b".attr === 2 && "l.c".attr === "r.c".attr).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push down left outer join #5") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y, LeftOuter, Some("y.b".attr === 1 && "x.a".attr === 3))
.where("x.b".attr === 2 && "y.b".attr === 2 && "x.c".attr === "y.c".attr)
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('b === 2).subquery('l)
val right = testRelation.where('b === 1).subquery('r)
val correctAnswer =
left.join(right, LeftOuter, Some("l.a".attr===3)).
where("r.b".attr === 2 && "l.c".attr === "r.c".attr).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push down right outer join #5") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y, RightOuter, Some("y.b".attr === 1 && "x.a".attr === 3))
.where("x.b".attr === 2 && "y.b".attr === 2 && "x.c".attr === "y.c".attr)
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('a === 3).subquery('l)
val right = testRelation.where('b === 2).subquery('r)
val correctAnswer =
left.join(right, RightOuter, Some("r.b".attr === 1)).
where("l.b".attr === 2 && "l.c".attr === "r.c".attr).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: can't push down") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y, condition = Some("x.b".attr === "y.b".attr))
}
val optimized = Optimize.execute(originalQuery.analyze)
comparePlans(analysis.EliminateSubQueries(originalQuery.analyze), optimized)
}
test("joins: conjunctive predicates") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y)
.where(("x.b".attr === "y.b".attr) && ("x.a".attr === 1) && ("y.a".attr === 1))
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('a === 1).subquery('x)
val right = testRelation.where('a === 1).subquery('y)
val correctAnswer =
left.join(right, condition = Some("x.b".attr === "y.b".attr))
.analyze
comparePlans(optimized, analysis.EliminateSubQueries(correctAnswer))
}
test("joins: conjunctive predicates #2") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y)
.where(("x.b".attr === "y.b".attr) && ("x.a".attr === 1))
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('a === 1).subquery('x)
val right = testRelation.subquery('y)
val correctAnswer =
left.join(right, condition = Some("x.b".attr === "y.b".attr))
.analyze
comparePlans(optimized, analysis.EliminateSubQueries(correctAnswer))
}
test("joins: conjunctive predicates #3") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val z = testRelation.subquery('z)
val originalQuery = {
z.join(x.join(y))
.where(("x.b".attr === "y.b".attr) && ("x.a".attr === 1) &&
("z.a".attr >= 3) && ("z.a".attr === "x.b".attr))
}
val optimized = Optimize.execute(originalQuery.analyze)
val lleft = testRelation.where('a >= 3).subquery('z)
val left = testRelation.where('a === 1).subquery('x)
val right = testRelation.subquery('y)
val correctAnswer =
lleft.join(
left.join(right, condition = Some("x.b".attr === "y.b".attr)),
condition = Some("z.a".attr === "x.b".attr))
.analyze
comparePlans(optimized, analysis.EliminateSubQueries(correctAnswer))
}
val testRelationWithArrayType = LocalRelation('a.int, 'b.int, 'c_arr.array(IntegerType))
test("generate: predicate referenced no generated column") {
val originalQuery = {
testRelationWithArrayType
.generate(Explode('c_arr), true, false, Some("arr"))
.where(('b >= 5) && ('a > 6))
}
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer = {
testRelationWithArrayType
.where(('b >= 5) && ('a > 6))
.generate(Explode('c_arr), true, false, Some("arr")).analyze
}
comparePlans(optimized, correctAnswer)
}
test("generate: part of conjuncts referenced generated column") {
val generator = Explode('c_arr)
val originalQuery = {
testRelationWithArrayType
.generate(generator, true, false, Some("arr"))
.where(('b >= 5) && ('c > 6))
}
val optimized = Optimize.execute(originalQuery.analyze)
val referenceResult = {
testRelationWithArrayType
.where('b >= 5)
.generate(generator, true, false, Some("arr"))
.where('c > 6).analyze
}
// Since newly generated columns get different ids every time being analyzed
// e.g. comparePlans(originalQuery.analyze, originalQuery.analyze) fails.
// So we check operators manually here.
// Filter("c" > 6)
assertResult(classOf[Filter])(optimized.getClass)
assertResult(1)(optimized.asInstanceOf[Filter].condition.references.size)
assertResult("c"){
optimized.asInstanceOf[Filter].condition.references.toSeq(0).name
}
// the rest part
comparePlans(optimized.children(0), referenceResult.children(0))
}
test("generate: all conjuncts referenced generated column") {
val originalQuery = {
testRelationWithArrayType
.generate(Explode('c_arr), true, false, Some("arr"))
.where(('c > 6) || ('b > 5)).analyze
}
val optimized = Optimize.execute(originalQuery)
comparePlans(optimized, originalQuery)
}
test("push down project past sort") {
val x = testRelation.subquery('x)
// push down valid
val originalQuery = {
x.select('a, 'b)
.sortBy(SortOrder('a, Ascending))
.select('a)
}
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer =
x.select('a)
.sortBy(SortOrder('a, Ascending)).analyze
comparePlans(optimized, analysis.EliminateSubQueries(correctAnswer))
// push down invalid
val originalQuery1 = {
x.select('a, 'b)
.sortBy(SortOrder('a, Ascending))
.select('b)
}
val optimized1 = Optimize.execute(originalQuery1.analyze)
val correctAnswer1 =
x.select('a, 'b)
.sortBy(SortOrder('a, Ascending))
.select('b).analyze
comparePlans(optimized1, analysis.EliminateSubQueries(correctAnswer1))
}
test("push project and filter down into sample") {
val x = testRelation.subquery('x)
val originalQuery =
Sample(0.0, 0.6, false, 11L, x).select('a)
val originalQueryAnalyzed = EliminateSubQueries(analysis.SimpleAnalyzer.execute(originalQuery))
val optimized = Optimize.execute(originalQueryAnalyzed)
val correctAnswer =
Sample(0.0, 0.6, false, 11L, x.select('a))
comparePlans(optimized, correctAnswer.analyze)
}
test("aggregate: push down filter when filter on group by expression") {
val originalQuery = testRelation
.groupBy('a)('a, count('b) as 'c)
.select('a, 'c)
.where('a === 2)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer = testRelation
.where('a === 2)
.groupBy('a)('a, count('b) as 'c)
.analyze
comparePlans(optimized, correctAnswer)
}
test("aggregate: don't push down filter when filter not on group by expression") {
val originalQuery = testRelation
.select('a, 'b)
.groupBy('a)('a, count('b) as 'c)
.where('c === 2L)
val optimized = Optimize.execute(originalQuery.analyze)
comparePlans(optimized, originalQuery.analyze)
}
test("aggregate: push down filters partially which are subset of group by expressions") {
val originalQuery = testRelation
.select('a, 'b)
.groupBy('a)('a, count('b) as 'c)
.where('c === 2L && 'a === 3)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer = testRelation
.select('a, 'b)
.where('a === 3)
.groupBy('a)('a, count('b) as 'c)
.where('c === 2L)
.analyze
comparePlans(optimized, correctAnswer)
}
test("aggregate: push down filters with alias") {
val originalQuery = testRelation
.select('a, 'b)
.groupBy('a)(('a + 1) as 'aa, count('b) as 'c)
.where(('c === 2L || 'aa > 4) && 'aa < 3)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer = testRelation
.select('a, 'b)
.where('a + 1 < 3)
.groupBy('a)(('a + 1) as 'aa, count('b) as 'c)
.where('c === 2L || 'aa > 4)
.analyze
comparePlans(optimized, correctAnswer)
}
test("aggregate: push down filters with literal") {
val originalQuery = testRelation
.select('a, 'b)
.groupBy('a)('a, count('b) as 'c, "s" as 'd)
.where('c === 2L && 'd === "s")
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer = testRelation
.select('a, 'b)
.where("s" === "s")
.groupBy('a)('a, count('b) as 'c, "s" as 'd)
.where('c === 2L)
.analyze
comparePlans(optimized, correctAnswer)
}
test("aggregate: don't push down filters that are nondeterministic") {
val originalQuery = testRelation
.select('a, 'b)
.groupBy('a)('a + Rand(10) as 'aa, count('b) as 'c, Rand(11).as("rnd"))
.where('c === 2L && 'aa + Rand(10).as("rnd") === 3 && 'rnd === 5)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer = testRelation
.select('a, 'b)
.groupBy('a)('a + Rand(10) as 'aa, count('b) as 'c, Rand(11).as("rnd"))
.where('c === 2L && 'aa + Rand(10).as("rnd") === 3 && 'rnd === 5)
.analyze
comparePlans(optimized, correctAnswer)
}
}
|
chenc10/Spark-PAF
|
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FilterPushdownSuite.scala
|
Scala
|
apache-2.0
| 22,404
|
/**
* Copyright (C) 2014 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.schema
import java.net.URL
import org.orbeon.msv.grammar.Grammar
import org.orbeon.msv.reader.GrammarReaderController
import org.orbeon.oxf.cache.CacheKey
import org.orbeon.oxf.externalcontext.URLRewriter
import org.orbeon.oxf.processor.validation.SchemaValidationException
import org.orbeon.oxf.resources.URLFactory
import org.orbeon.oxf.util.NetUtils
import org.orbeon.oxf.xforms.{XFormsModelSchemaValidator, XFormsUtils, XFormsContainingDocument}
import org.orbeon.oxf.xml.XMLParsing
import org.orbeon.oxf.xml.dom4j.LocationData
import org.xml.sax.{InputSource, Locator}
import scala.util.control.NonFatal
case class SchemaInfo(grammar: Grammar, dependencies: SchemaDependencies)
case class SchemaKey(urlString: String) extends CacheKey
class MSVGrammarReaderController(
containingDocument: XFormsContainingDocument,
dependencies : SchemaDependencies,
baseURL : Option[String]
) extends GrammarReaderController {
def resolveEntity(publicId: String, systemId: String): InputSource = {
// The base URL is specified if the top-level schema is not inline. Imports resolve against the location of that
// base URL. If the base URL is not specified, the top-level schema is inline, and we resolve the imports as
// service URLs, as we do for a top-level schema and instances.
val url =
baseURL match {
case Some(baseURL) ⇒
URLFactory.createURL(baseURL, systemId)
case None ⇒
URLFactory.createURL(XFormsUtils.resolveServiceURL(containingDocument, null, systemId, URLRewriter.REWRITE_MODE_ABSOLUTE))
}
dependencies.addInclude(url)
XMLParsing.ENTITY_RESOLVER.resolveEntity("", url.toString)
}
def warning(locators: Array[Locator], message: String): Unit = {
def locToString(loc: Locator) =
loc.getSystemId + ", line " + loc.getLineNumber + ", column " + loc.getColumnNumber
val formatted =
locators match {
case Array() ⇒ message
case locators ⇒ s"${locators map locToString mkString ", "}: $message"
}
XFormsModelSchemaValidator.logger.warn(formatted)
}
def error(locators: Array[Locator], message: String, exception: Exception): Unit = {
val locationData = locators.headOption map LocationData.createIfPresent
throw new SchemaValidationException(message, exception, locationData.orNull)
}
}
class SchemaDependencies {
private var includes: List[(URL, Long)] = Nil
def addInclude(url: URL): Unit =
includes ::= url → NetUtils.getLastModified(url)
def areIncludesUnchanged: Boolean = {
def isUnchanged(url: URL, last: Long) =
try
NetUtils.getLastModified(url) == last
catch {
// If an include is missing it may just be the case that it isn't included anymore _and_ it has been
// removed. So, we return `false` and then on a reparse we will find out the truth.
case NonFatal(e) ⇒ false
}
includes forall (isUnchanged _).tupled
}
}
|
wesley1001/orbeon-forms
|
src/main/scala/org/orbeon/oxf/xforms/schema/MSVGrammarReaderController.scala
|
Scala
|
lgpl-2.1
| 3,672
|
package tastytest
class >>>[A] // some symbolic operator representing a "pull"
|
scala/scala
|
test/tasty/neg-isolated/src-3-B/publicSymbolicClass.scala
|
Scala
|
apache-2.0
| 80
|
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar
import slamdata.Predef._
import quasar.contrib.pathy.{ADir, AFile}
import quasar.fp._
import quasar.qscript.MapFuncCore._
import matryoshka._
import matryoshka.data._
import matryoshka.implicits._
import matryoshka.patterns._
import scalaz._, Scalaz._
/** The various representations of an arbitrary query, as seen by the filesystem
* connectors, along with the operations for dealing with them.
*
* There are a few patterns that are worth noting:
* - `(src: A, ..., lBranch: FreeQS[T], rBranch: FreeQS[T], ...)` – used in
* operations that combine multiple data sources (notably joins and unions).
* This holds the divergent parts of the data sources in the branches, with
* [[SrcHole]] indicating a reference back to the common `src` of the two
* branches. There is not required to be a [[SrcHole]].
* - `Free[F, A]` – we use this structure as a restricted form of variable
* binding, where `F` is some pattern functor, and `A` is some enumeration
* that has a specific referent. E.g., [[FreeMap]] is a recursive structure
* of [[MapFunc]] that has a single “variable”, [[SrcHole]], which (usually)
* refers to the `src` parameter of that operation. [[JoinFunc]], [[FreeQS]],
* and the `repair` parameter to [[Reduce]] behave similarly.
* - We use the type parameter `QS[_]` to indicate QScript, as well as the type
* parameters `IN[_]` and `OUT[_]` to indicate the input and output
* coproducts in transformations where they can be different.
*/
// NB: Here we no longer care about provenance. Backends can’t do anything with
// it, so we simply represent joins and crosses directly. This also means
// that we don’t need to model certain things – project_d is just a
// data-level function, nest_d & swap_d only modify provenance and so are
// irrelevant here, and autojoin_d has been replaced with a lower-level join
// operation that doesn’t include the cross portion.
package object qscript {
/** This type is _only_ used for join branch-like structures. It’s an
* unfortunate consequence of not having mutually-recursive data structures.
* Once we do, this can go away. It should _not_ be used in other situations.
*
* NB: We're using the "alias" method of building the coproduct here as it
* provides a modest reduction in compilation time (~15%) for this module.
*/
type QScriptTotal[T[_[_]], A] = Coproduct[QScriptCore[T, ?] , QScriptTotal0[T, ?], A]
type QScriptTotal0[T[_[_]], A] = Coproduct[ProjectBucket[T, ?] , QScriptTotal1[T, ?], A]
type QScriptTotal1[T[_[_]], A] = Coproduct[ThetaJoin[T, ?] , QScriptTotal2[T, ?], A]
type QScriptTotal2[T[_[_]], A] = Coproduct[EquiJoin[T, ?] , QScriptTotal3[T, ?], A]
type QScriptTotal3[T[_[_]], A] = Coproduct[Const[ShiftedRead[ADir], ?] , QScriptTotal4[T, ?], A]
type QScriptTotal4[T[_[_]], A] = Coproduct[Const[ShiftedRead[AFile], ?], QScriptTotal5[T, ?], A]
type QScriptTotal5[T[_[_]], A] = Coproduct[Const[Read[ADir], ?] , QScriptTotal6[T, ?], A]
type QScriptTotal6[T[_[_]], A] = Coproduct[Const[Read[AFile], ?] , Const[DeadEnd, ?] , A]
object QCT {
def apply[T[_[_]], A](qc: QScriptCore[T, A]): QScriptTotal[T, A] =
Inject[QScriptCore[T, ?], QScriptTotal[T, ?]].inj(qc)
def unapply[T[_[_]], A](qt: QScriptTotal[T, A]): Option[QScriptCore[T, A]] =
Inject[QScriptCore[T, ?], QScriptTotal[T, ?]].prj(qt)
}
/** Initial QScript. */
// FIXME should not include `Read[ADir]`
type QScriptEducated[T[_[_]], A] =
(QScriptCore[T, ?] :\\: ThetaJoin[T, ?] :\\: Const[Read[ADir], ?] :/: Const[Read[AFile], ?])#M[A]
def educatedToTotal[T[_[_]]]
: Injectable.Aux[QScriptEducated[T, ?], QScriptTotal[T, ?]] =
::\\::[QScriptCore[T, ?]](::\\::[ThetaJoin[T, ?]](::/::[T, Const[Read[ADir], ?], Const[Read[AFile], ?]]))
object QCE {
def apply[T[_[_]], A](qc: QScriptCore[T, A]): QScriptEducated[T, A] =
Inject[QScriptCore[T, ?], QScriptEducated[T, ?]].inj(qc)
def unapply[T[_[_]], A](qt: QScriptEducated[T, A]): Option[QScriptCore[T, A]] =
Inject[QScriptCore[T, ?], QScriptEducated[T, ?]].prj(qt)
}
/** QScript that has not gone through Read conversion. */
type QScript[T[_[_]], A] =
(QScriptCore[T, ?] :\\: ThetaJoin[T, ?] :/: Const[DeadEnd, ?])#M[A]
implicit def qScriptToQscriptTotal[T[_[_]]]
: Injectable.Aux[QScript[T, ?], QScriptTotal[T, ?]] =
::\\::[QScriptCore[T, ?]](::/::[T, ThetaJoin[T, ?], Const[DeadEnd, ?]])
/** QScript that has gone through Read conversion.
*
* NB: Once QScriptTotal goes away, this could become parametric in the path type.
*/
type QScriptRead[T[_[_]], A] =
(QScriptCore[T, ?] :\\: ThetaJoin[T, ?] :\\: Const[Read[ADir], ?] :/: Const[Read[AFile], ?])#M[A]
implicit def qScriptReadToQscriptTotal[T[_[_]]]: Injectable.Aux[QScriptRead[T, ?], QScriptTotal[T, ?]] =
::\\::[QScriptCore[T, ?]](::\\::[ThetaJoin[T, ?]](::/::[T, Const[Read[ADir], ?], Const[Read[AFile], ?]]))
/** QScript that has gone through Read conversion and shifted conversion.
*
* NB: Once QScriptTotal goes away, this could become parametric in the path type.
*/
type QScriptShiftRead[T[_[_]], A] =
(QScriptCore[T, ?] :\\: ThetaJoin[T, ?] :\\: Const[ShiftedRead[ADir], ?] :/: Const[ShiftedRead[AFile], ?])#M[A]
implicit def qScriptShiftReadToQScriptTotal[T[_[_]]]: Injectable.Aux[QScriptShiftRead[T, ?], QScriptTotal[T, ?]] =
::\\::[QScriptCore[T, ?]](::\\::[ThetaJoin[T, ?]](::/::[T, Const[ShiftedRead[ADir], ?], Const[ShiftedRead[AFile], ?]]))
type MapFunc[T[_[_]], A] = (MapFuncCore[T, ?] :/: MapFuncDerived[T, ?])#M[A]
object MFC {
def apply[T[_[_]], A](mfc: MapFuncCore[T, A]): MapFunc[T, A] =
Inject[MapFuncCore[T, ?], MapFunc[T, ?]].inj(mfc)
def unapply[T[_[_]], A](mf: MapFunc[T, A]): Option[MapFuncCore[T, A]] =
Inject[MapFuncCore[T, ?], MapFunc[T, ?]].prj(mf)
}
object MFD {
def apply[T[_[_]], A](mfc: MapFuncDerived[T, A]): MapFunc[T, A] =
Inject[MapFuncDerived[T, ?], MapFunc[T, ?]].inj(mfc)
def unapply[T[_[_]], A](mf: MapFunc[T, A]): Option[MapFuncDerived[T, A]] =
Inject[MapFuncDerived[T, ?], MapFunc[T, ?]].prj(mf)
}
type RecFreeMapA[T[_[_]], A] = Free[RecFreeS[MapFunc[T, ?], ?], A]
type RecFreeMap[T[_[_]]] = RecFreeMapA[T, Hole]
type FreeQS[T[_[_]]] = Free[QScriptTotal[T, ?], Hole]
type FreeMapA[T[_[_]], A] = Free[MapFunc[T, ?], A]
type FreeMap[T[_[_]]] = FreeMapA[T, Hole]
type JoinFunc[T[_[_]]] = FreeMapA[T, JoinSide]
type CoEnvQS[T[_[_]], A] = CoEnv[Hole, QScriptTotal[T, ?], A]
type CoEnvMapA[T[_[_]], A, B] = CoEnv[A, MapFunc[T, ?], B]
type CoEnvMap[T[_[_]], A] = CoEnvMapA[T, Hole, A]
object ExtractFunc {
def unapply[T[_[_]], A](fma: FreeMapA[T, A]): Option[MapFuncCore[T, FreeMapA[T, A]]] = fma match {
case Embed(CoEnv(\\/-(MFC(func)))) => Some(func)
case _ => None
}
}
def HoleF[T[_[_]]]: FreeMap[T] = Free.point[MapFunc[T, ?], Hole](SrcHole)
def HoleQS[T[_[_]]]: FreeQS[T] = Free.point[QScriptTotal[T, ?], Hole](SrcHole)
def LeftSideF[T[_[_]]]: JoinFunc[T] =
Free.point[MapFunc[T, ?], JoinSide](LeftSide)
def RightSideF[T[_[_]]]: JoinFunc[T] =
Free.point[MapFunc[T, ?], JoinSide](RightSide)
def ReduceIndexF[T[_[_]]](i: Int \\/ Int): FreeMapA[T, ReduceIndex] =
Free.point[MapFunc[T, ?], ReduceIndex](ReduceIndex(i))
def rebase[M[_]: Bind, A](in: M[A], key: M[A]): M[A] = in >> key
def rebaseT[T[_[_]]: BirecursiveT, F[_]: Traverse](
target: FreeQS[T])(
src: T[F])(
implicit FI: Injectable.Aux[F, QScriptTotal[T, ?]]):
Option[T[F]] =
target.as(src.transAna[T[QScriptTotal[T, ?]]](FI.inject)).cata(recover(_.embed)).transAnaM(FI project _)
def rebaseTCo[T[_[_]]: BirecursiveT, F[_]: Traverse]
(target: FreeQS[T])
(srcCo: T[CoEnv[Hole, F, ?]])
(implicit FI: Injectable.Aux[F, QScriptTotal[T, ?]])
: Option[T[CoEnv[Hole, F, ?]]] =
// TODO: with the right instances & types everywhere, this should look like
// target.transAnaM(_.htraverse(FI project _)) ∘ (_ >> srcCo)
target.cataM[Option, T[CoEnv[Hole, F, ?]]](
CoEnv.htraverse(λ[QScriptTotal[T, ?] ~> (Option ∘ F)#λ](FI.project(_))).apply(_) ∘ (_.embed)) ∘
(targ => (targ.convertTo[Free[F, Hole]] >> srcCo.convertTo[Free[F, Hole]]).convertTo[T[CoEnv[Hole, F, ?]]])
/** A variant of `repeatedly` that works with `Inject` instances. */
@SuppressWarnings(Array("org.wartremover.warts.Recursion"))
def injectRepeatedly[F [_], G[_], A]
(op: F[A] => Option[G[A]])
(implicit F: F :<: G)
: F[A] => G[A] =
fa => op(fa).fold(F.inj(fa))(ga => F.prj(ga).fold(ga)(injectRepeatedly(op)))
// Helpers for creating `Injectable` instances
object ::\\:: {
def apply[F[_]] = new Aux[F]
final class Aux[F[_]] {
def apply[T[_[_]], G[_]]
(i: Injectable.Aux[G, QScriptTotal[T, ?]])
(implicit F: F :<: QScriptTotal[T, ?])
: Injectable.Aux[Coproduct[F, G, ?], QScriptTotal[T, ?]] =
Injectable.coproduct(Injectable.inject[F, QScriptTotal[T, ?]], i)
}
}
def ::/::[T[_[_]], F[_], G[_]]
(implicit F: F :<: QScriptTotal[T, ?], G: G :<: QScriptTotal[T, ?])
: Injectable.Aux[Coproduct[F, G, ?], QScriptTotal[T, ?]] =
Injectable.coproduct(
Injectable.inject[F, QScriptTotal[T, ?]],
Injectable.inject[G, QScriptTotal[T, ?]])
}
|
jedesah/Quasar
|
connector/src/main/scala/quasar/qscript/package.scala
|
Scala
|
apache-2.0
| 10,121
|
package pureconfig.module.magnolia.auto
import scala.language.experimental.macros
import scala.reflect.ClassTag
import magnolia._
import pureconfig.generic.{CoproductHint, ProductHint}
import pureconfig.module.magnolia.{ExportedMagnolia, MagnoliaConfigWriter}
import pureconfig.{ConfigWriter, Exported}
/** An object that, when imported, provides implicit `ConfigWriter` instances for value classes, tuples, case classes
* and sealed traits. The generation of `ConfigWriter`s is done by Magnolia.
*/
object writer {
type Typeclass[A] = ConfigWriter[A]
def combine[A: ProductHint](ctx: CaseClass[ConfigWriter, A]): ConfigWriter[A] =
MagnoliaConfigWriter.combine(ctx)
def dispatch[A: ClassTag: CoproductHint](ctx: SealedTrait[ConfigWriter, A]): ConfigWriter[A] =
MagnoliaConfigWriter.dispatch(ctx)
implicit def exportWriter[A]: Exported[ConfigWriter[A]] = macro ExportedMagnolia.exportedMagnolia[ConfigWriter, A]
}
|
pureconfig/pureconfig
|
modules/magnolia/src/main/scala/pureconfig/module/magnolia/auto/writer.scala
|
Scala
|
mpl-2.0
| 940
|
/*******************************************************************************
Copyright (c) 2012-2014, KAIST, S-Core.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.analysis.typing.models.DOMHtml
import scala.collection.mutable.{Map=>MMap, HashMap=>MHashMap}
import kr.ac.kaist.jsaf.analysis.typing._
import kr.ac.kaist.jsaf.analysis.typing.domain._
import kr.ac.kaist.jsaf.analysis.typing.domain.{BoolFalse => F, BoolTrue => T}
import kr.ac.kaist.jsaf.analysis.typing.models._
import kr.ac.kaist.jsaf.analysis.typing.ControlPoint
import org.w3c.dom.Node
import org.w3c.dom.Element
import kr.ac.kaist.jsaf.analysis.typing.models.DOMCore.{DOMDocument, DOMNodeList}
import org.w3c.dom.html.{HTMLDocument => HTMLDoc}
import kr.ac.kaist.jsaf.analysis.cfg.{CFG, CFGExpr, InternalError}
import kr.ac.kaist.jsaf.analysis.typing.models.AbsConstValue
import kr.ac.kaist.jsaf.analysis.typing.AddressManager._
import kr.ac.kaist.jsaf.analysis.typing.domain.Heap
import kr.ac.kaist.jsaf.analysis.typing.domain.Context
import kr.ac.kaist.jsaf.analysis.typing.models.AbsBuiltinFunc
import kr.ac.kaist.jsaf.Shell
object HTMLDocument extends DOM {
private val name = "HTMLDocument"
/* predefined locations */
val loc_cons = newSystemRecentLoc(name + "Cons")
val loc_proto = newSystemRecentLoc(name + "Proto")
val loc_ins = newSystemRecentLoc(name + "Ins")
val loc_ins2 = newSystemRecentLoc(name + "2Ins")
val GlobalDocumentLoc = if(Shell.params.opt_Dommodel2) loc_ins
else newSystemRecentLoc(name + "Global")
/* constructor */
private val prop_cons: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Function")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(ObjProtoLoc), F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue))),
("@hasinstance", AbsConstValue(PropValueNullTop)),
("length", AbsConstValue(PropValue(ObjectValue(Value(AbsNumber.alpha(0)), F, F, F)))),
("prototype", AbsConstValue(PropValue(ObjectValue(Value(loc_proto), F, F, F))))
)
/* prorotype */
private val prop_proto: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Object")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(DOMDocument.loc_proto), F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue))),
("open", AbsBuiltinFunc("HTMLDocument.open", 0)),
("close", AbsBuiltinFunc("HTMLDocument.close", 0)),
("write", AbsBuiltinFunc("HTMLDocument.write", 1)),
("writeln", AbsBuiltinFunc("HTMLDocument.writeln", 1)),
("getElementsByName", AbsBuiltinFunc("HTMLDocument.getElementsByName", 1))
)
/* instance */
private val prop_ins: List[(String, AbsProperty)] =
DOMDocument.getInsList2() ++ List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Object")))),
("@proto", AbsConstValue(PropValue(ObjectValue(loc_proto, F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue))),
// DOM Level 1
("title", AbsConstValue(PropValue(ObjectValue(StrTop, T, T, T)))),
("referer", AbsConstValue(PropValue(ObjectValue(StrTop, F, T, T)))),
("domain", AbsConstValue(PropValue(ObjectValue(StrTop, F, T, T)))),
("images", AbsConstValue(PropValue(ObjectValue(Value(HTMLCollection.loc_ins), F, T, T)))),
("applets", AbsConstValue(PropValue(ObjectValue(Value(HTMLCollection.loc_ins), F, T, T)))),
("links", AbsConstValue(PropValue(ObjectValue(Value(HTMLCollection.loc_ins), F, T, T)))),
("forms", AbsConstValue(PropValue(ObjectValue(Value(HTMLCollection.loc_ins), F, T, T)))),
("anchors", AbsConstValue(PropValue(ObjectValue(Value(HTMLCollection.loc_ins), F, T, T)))),
("documentElement", AbsConstValue(PropValue(ObjectValue(Value(HTMLHtmlElement.loc_ins), F, T, T)))),
("body", AbsConstValue(PropValue(ObjectValue(Value(HTMLBodyElement.loc_ins), F, T, T)))),
("cookie", AbsConstValue(PropValue(ObjectValue(Value(StrTop), T, T, T)))),
("width", AbsConstValue(PropValue(ObjectValue(UInt, T, T, T)))),
("height", AbsConstValue(PropValue(ObjectValue(UInt, T, T, T))))
)
/* list of instance properties */
private val prop_ins2: List[(String, AbsProperty)] =
List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Object")))),
("@proto", AbsConstValue(PropValue(ObjectValue(loc_proto, F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue)))
)
/* global */
private val prop_global: List[(String, AbsProperty)] = List(
(name, AbsConstValue(PropValue(ObjectValue(loc_cons, T, F, T))))
)
def getInitList(): List[(Loc, List[(String, AbsProperty)])] = if(Shell.params.opt_Dommodel2) List(
(loc_cons, prop_cons), (loc_proto, prop_proto), (GlobalLoc, prop_global), (loc_ins, prop_ins), (loc_ins2, prop_ins2)
) else List(
(loc_cons, prop_cons), (loc_proto, prop_proto), (GlobalLoc, prop_global) )
def getSemanticMap(): Map[String, SemanticFun] = {
Map(
//TODO: not yet implemented
//case "HTMLDocument.open" => ((h,ctx),(he,ctxe))
//case "HTMLDocument.close" => ((h,ctx),(he,ctxe))
//case "HTMLDocument.write" => ((h,ctx),(he,ctxe))
//case "HTMLDocument.writeln" => ((h,ctx),(he,ctxe))
"HTMLDocument.getElementsByName" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
val lset_env = h(SinglePureLocalLoc)("@env")._2._2
val set_addr = lset_env.foldLeft[Set[Address]](Set())((a, l) => a + locToAddr(l))
if (set_addr.size > 1) throw new InternalError("API heap allocation: Size of env address is " + set_addr.size)
val addr_env = (cp._1._1, set_addr.head)
val addr1 = cfg.getAPIAddress(addr_env, 0)
/* arguments */
val s_name = Helper.toString(Helper.toPrimitive_better(h, getArgValue(h, ctx, args, "0")))
if (s_name </ StrBot) {
if(Shell.params.opt_Dommodel2){
val lset_env = h(SinglePureLocalLoc)("@env")._2._2
val l_r = addrToLoc(addr1, Recent)
val (h_1, ctx_1) = Helper.Oldify(h, ctx, addr1)
val lset = Helper.Proto(h, NameTableLoc, s_name)._2
val proplist = HTMLCollection.getInsList(0)
val obj = proplist.foldLeft(Obj.empty)((o, p) => o.update(p._1, p._2))
val new_obj = if(lset.size > 0)
obj.update("length", PropValue(ObjectValue(Value(UInt), F, T, T))).update(
NumStr, PropValue(ObjectValue(Value(lset), T, T, T)))
else obj
val h_2 = h_1.update(l_r, new_obj)
((Helper.ReturnStore(h_2, Value(l_r)), ctx_1), (he, ctxe))
}
else {
val obj_table = h(NameTableLoc)
val propv_element = obj_table(s_name)
val abs_element = obj_table.domIn(s_name)
val (h_1, ctx_1, v_empty) =
if (BoolFalse <= abs_element) {
val l_r = addrToLoc(addr1, Recent)
val (_h, _ctx) = Helper.Oldify(h, ctx, addr1)
/* empty NodeList */
val o_empty = DOMNodeList.getInsList(0).foldLeft(Obj.empty)((o, pv) =>
o.update(pv._1, pv._2))
val _h1 = _h.update(l_r, o_empty)
(_h1, _ctx, Value(l_r))
} else (h, ctx, ValueBot)
/* imprecise semantic */
((Helper.ReturnStore(h_1, propv_element._1._1 + v_empty), ctx_1), (he, ctxe))
}
}
else
((HeapBot, ContextBot), (he, ctxe))
})
)
}
def getPreSemanticMap(): Map[String, SemanticFun] = {
Map(
//TODO: not yet implemented
//case "HTMLDocument.open" => ((h,ctx),(he,ctxe))
//case "HTMLDocument.close" => ((h,ctx),(he,ctxe))
//case "HTMLDocument.write" => ((h,ctx),(he,ctxe))
//case "HTMLDocument.writeln" => ((h,ctx),(he,ctxe))
//case "HTMLDocument.getElementsByName" => ((h,ctx),(he,ctxe))
)
}
def getDefMap(): Map[String, AccessFun] = {
Map(
//TODO: not yet implemented
//case "HTMLDocument.open" => ((h,ctx),(he,ctxe))
//case "HTMLDocument.close" => ((h,ctx),(he,ctxe))
//case "HTMLDocument.write" => ((h,ctx),(he,ctxe))
//case "HTMLDocument.writeln" => ((h,ctx),(he,ctxe))
//case "HTMLDocument.getElementsByName" => ((h,ctx),(he,ctxe))
)
}
def getUseMap(): Map[String, AccessFun] = {
Map(
//TODO: not yet implemented
//case "HTMLDocument.open" => ((h,ctx),(he,ctxe))
//case "HTMLDocument.close" => ((h,ctx),(he,ctxe))
//case "HTMLDocument.write" => ((h,ctx),(he,ctxe))
//case "HTMLDocument.writeln" => ((h,ctx),(he,ctxe))
//case "HTMLDocument.getElementsByName" => ((h,ctx),(he,ctxe))
)
}
/* instance */
// only one 'document' can be present in the heap
var loc_ins_status: Option[Loc] = None
override def getInstance(cfg: CFG): Option[Loc] = {
val loc_ins = GlobalDocumentLoc //addrToLoc(cfg.newProgramAddr, Recent)
loc_ins_status=Some(loc_ins)
loc_ins_status
}
def getInstance(): Option[Loc] = loc_ins_status
/* list of properties in the instance object */
override def getInsList(node: Node): List[(String, PropValue)] = node match {
case d: HTMLDoc =>
val referrer = d.getReferrer
val domain = d.getDomain
val URL = d.getURL
// This instance object has all properties of the Document object
DOMDocument.getInsList(node) ++ List(
("@class", PropValue(AbsString.alpha("Object"))),
("@proto", PropValue(ObjectValue(loc_proto, F, F, F))),
("@extensible", PropValue(BoolTrue)),
// DOM Level 1
("title", PropValue(ObjectValue(StrTop, T, T, T))),
("referrer", PropValue(ObjectValue(AbsString.alpha(if(referrer!=null) referrer else ""), F, T, T))),
("domain", PropValue(ObjectValue(AbsString.alpha(if(domain!=null) domain else ""), F, T, T))),
("URL", PropValue(ObjectValue(AbsString.alpha(if(URL!=null) URL else ""), F, T, T))),
("cookie", PropValue(ObjectValue(AbsString.alpha(""), T, T, T))),
("body", PropValue(ObjectValue(Value(NullTop), T, T, T))),
// 'compatMode' in WHATWG DOM Living Standard
("compatMode", PropValue(ObjectValue(OtherStr, T, T, T)))
// 'all', 'forms' , 'images', 'body' property is updated in DOMBuilder
)
// TODO: 'applets', 'links', 'anchors' in DOM Level 1
case _ => {
System.err.println("* Warning: " + node.getNodeName + " cannot be an instance of HTMLDocument.")
List()
}
}
/* list of instance properties */
def getInsList(): List[(String, PropValue)] = {
List(
("@class", PropValue(AbsString.alpha("Object"))),
("@proto", PropValue(ObjectValue(loc_proto, F, F, F))),
("@extensible", PropValue(BoolTrue))
)
}
}
|
darkrsw/safe
|
src/main/scala/kr/ac/kaist/jsaf/analysis/typing/models/DOMHtml/HTMLDocument.scala
|
Scala
|
bsd-3-clause
| 11,354
|
/*
* jFin - bond math
*
* Copyright (C) 2005-2008, 2012 Morgan Brown Consultancy Ltd.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package mbc.jfin
import org.specs2.mutable.SpecificationWithJUnit
import org.scala_tools.time.Imports._
import org.scala_tools.time.Imports.Period._
import org.specs2.matcher.{MatchResult, DataTables}
import org.scala_tools.time.Imports
trait TheTest {
this: SpecificationWithJUnit =>
val test: List[(LocalDate,LocalDate)] => (Int, (Int, Int, Int), (Int, Int, Int)) => MatchResult[Imports.LocalDate] = schedule => {
case (index, (ys, ms, ds), (ye, me, se)) =>
schedule(index)._1 must be equalTo (new LocalDate(ys, ms, ds))
schedule(index)._2 must be equalTo (new LocalDate(ye, me, se))
}
}
class ScheduleSpec extends SpecificationWithJUnit with DataTables with TheTest {
"An annual schedule with no stub from 2006-3-28 to 2011-3-28" should {
val schedule = ScheduleGenerator.listToPeriods( ScheduleGenerator.generateNormalScheduleList(new LocalDate(2006, 3, 28), new LocalDate(2011, 3, 28), years(1), NoStub) )
"contain 5 periods" in {
schedule must have size (5)
}
"match the following periods" in {
"period" | "start" | "end" |
0 !(2006, 3, 28) !(2007, 3, 28) |
1 !(2007, 3, 28) !(2008, 3, 28) |
2 !(2008, 3, 28) !(2009, 3, 28) |
3 !(2009, 3, 28) !(2010, 3, 28) |
4 !(2010, 3, 28) !(2011, 3, 28) |> test(schedule)
}
}
"A quarterly schedule with no stub from 2006-03-28 to 2007-03-28" should {
val schedule = ScheduleGenerator.listToPeriods( ScheduleGenerator.generateNormalScheduleList(new LocalDate(2006, 3, 28), new LocalDate(2007, 3, 28), months(3), NoStub) )
"contain 4 periods" in {
schedule must have size (4)
}
"match the following periods" in {
"period" | "start" | "end" |
0 !(2006, 3, 28) !(2006, 6, 28) |
1 !(2006, 6, 28) !(2006, 9, 28) |
2 !(2006, 9, 28) !(2006, 12, 28) |
3 !(2006, 12, 28) !(2007, 3, 28) |> test(schedule)
}
}
"A quarterly schedule with no stub from 2006-03-31 to 2007-03-31" should {
val schedule = ScheduleGenerator.listToPeriods( ScheduleGenerator.generateNormalScheduleList(new LocalDate(2006, 3, 31), new LocalDate(2007, 3, 31), months(3), NoStub) )
"contain 4 periods" in {
schedule must have size (4)
}
"match the following periods" in {
"period" | "start" | "end" |
0 !(2006, 3, 31) !(2006, 6, 30) |
1 !(2006, 6, 30) !(2006, 9, 30) |
2 !(2006, 9, 30) !(2006, 12, 31) |
3 !(2006, 12, 31) !(2007, 3, 31) |> test(schedule)
}
}
"An annual schedule with no stub from 2006-03-28 to 2011-02-28" should {
"fail to generate an irregular schedule" in {
ScheduleGenerator.generateNormalScheduleList(new LocalDate(2006, 3, 28), new LocalDate(2011, 2, 28), years(1), NoStub) must throwA[AssertionError]
}
}
"An annual schedule with a short last stub from 2006-3-28 to 2011-4-28" should {
val schedule = ScheduleGenerator.listToPeriods( ScheduleGenerator.generateNormalScheduleList(new LocalDate(2006, 3, 28), new LocalDate(2011, 4, 28), years(1), ShortLast) )
"contain 6 periods" in {
schedule must have size (6)
}
"match the following periods" in {
"period" | "start" | "end" |
0 !(2006, 3, 28) !(2007, 3, 28) |
1 !(2007, 3, 28) !(2008, 3, 28) |
2 !(2008, 3, 28) !(2009, 3, 28) |
3 !(2009, 3, 28) !(2010, 3, 28) |
4 !(2010, 3, 28) !(2011, 3, 28) |
5 !(2011, 3, 28) !(2011, 4, 28) |> test(schedule)
}
}
"An annual schedule with a long last stub from 2006-3-28 to 2011-4-28" should {
val schedule = ScheduleGenerator.listToPeriods( ScheduleGenerator.generateNormalScheduleList(new LocalDate(2006, 3, 28), new LocalDate(2011, 4, 28), years(1), LongLast) )
"contain 5 periods" in {
schedule must have size (5)
}
"match the following periods" in {
"period" | "start" | "end" |
0 !(2006, 3, 28) !(2007, 3, 28) |
1 !(2007, 3, 28) !(2008, 3, 28) |
2 !(2008, 3, 28) !(2009, 3, 28) |
3 !(2009, 3, 28) !(2010, 3, 28) |
4 !(2010, 3, 28) !(2011, 4, 28) |> test(schedule)
}
}
"An annual schedule with a short first stub from 2006-3-28 to 2011-4-28" should {
val schedule = ScheduleGenerator.listToPeriods( ScheduleGenerator.generateNormalScheduleList(new LocalDate(2006, 3, 28), new LocalDate(2011, 4, 28), years(1), ShortFirst) )
"contain 6 periods" in {
schedule must have size (6)
}
"match the following periods" in {
"period" | "start" | "end" |
0 !(2006, 3, 28) !(2006, 4, 28) |
1 !(2006, 4, 28) !(2007, 4, 28) |
2 !(2007, 4, 28) !(2008, 4, 28) |
3 !(2008, 4, 28) !(2009, 4, 28) |
4 !(2009, 4, 28) !(2010, 4, 28) |
5 !(2010, 4, 28) !(2011, 4, 28) |> test(schedule)
}
}
"An annual schedule with a long first stub from 2006-3-28 to 2011-4-28" should {
val schedule = ScheduleGenerator.listToPeriods(ScheduleGenerator.generateNormalScheduleList(new LocalDate(2006, 3, 28), new LocalDate(2011, 4, 28), years(1), LongFirst))
"contain 5 periods" in {
schedule must have size (5)
}
"match the following periods" in {
"period" | "start" | "end" |
0 !(2006, 3, 28) !(2007, 4, 28) |
1 !(2007, 4, 28) !(2008, 4, 28) |
2 !(2008, 4, 28) !(2009, 4, 28) |
3 !(2009, 4, 28) !(2010, 4, 28) |
4 !(2010, 4, 28) !(2011, 4, 28) |> test(schedule)
}
}
}
|
mbcltd/jfin
|
src/test/scala/mbc/jfin/ScheduleSpec.scala
|
Scala
|
agpl-3.0
| 6,213
|
package com.criteo.slab.lib
import com.criteo.slab.lib.graphite.{DataPoint, GraphiteMetric}
import com.criteo.slab.utils.Jsonable
import org.json4s.DefaultFormats
import org.scalatest.{FlatSpec, Matchers}
import scala.util.Success
class GraphiteMetricSpec extends FlatSpec with Matchers {
"JSON serializer" should "be able to read json" in {
val json = """[{"target":"metric.one", "datapoints":[[1.0, 2000], [null, 2060]]}]""".stripMargin.replace("\\n", "")
val formats = DefaultFormats ++ Jsonable[GraphiteMetric].serializers
val r = Jsonable.parse[List[GraphiteMetric]](json, formats)
r shouldEqual Success(List(GraphiteMetric("metric.one", List(
DataPoint(Some(1.0), 2000),
DataPoint(None, 2060)
))))
}
}
|
criteo/slab
|
src/test/scala/com/criteo/slab/lib/GraphiteMetricSpec.scala
|
Scala
|
apache-2.0
| 746
|
/*
* Copyright 2001-2012 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic
import reflect.macros.Context
/**
* Trait that contains <code>require</code>, and <code>requireState</code>, and <code>requireNonNull</code> methods for checking pre-conditions
* that give descriptive error messages extracted via a macro.
*
* <p>These methods of trait <code>Requirements</code> aim to improve error messages provided when a pre-condition check fails at runtime in
* production code. Although it is recommended practice to supply helpful error messages when doing pre-condition checks, often people
* don't. Instead of this:
*
* <pre class="stREPL">
* scala> val length = 5
* length: Int = 5
*
* scala> val idx = 6
* idx: Int = 6
*
* scala> require(idx >= 0 && idx <= length, "index, " + idx + ", was less than zero or greater than or equal to length, " + length)
* java.lang.IllegalArgumentException: <strong>requirement failed: index, 6, was less than zero or greater than or equal to length, 5</strong>
* at scala.Predef$.require(Predef.scala:233)
* ...
* </pre>
*
* <p>
* People write simply:
* </p>
*
* <pre class="stREPL">
* scala> require(idx >= 0 && idx <= length)
* java.lang.IllegalArgumentException: <strong>requirement failed</strong>
* at scala.Predef$.require(Predef.scala:221)
* ...
* </pre>
*
* <p>
* Note that the detail message of the <code>IllegalArgumentException</code> thrown by the previous line of code is simply, <code>"requirement failed"</code>.
* Such messages often end up in a log file or bug report, where a better error message can save time in debugging the problem.
* By importing the members of <code>Requirements</code> (or mixing in its companion trait), you'll get a more helpful error message
* extracted by a macro, whether or not a clue message is provided:
* </p>
*
* <pre class="stREPL">
* scala> import org.scalactic._
* import org.scalactic._
*
* scala> import Requirements._
* import Requirements._
*
* scala> require(idx >= 0 && idx <= length)
* java.lang.IllegalArgumentException: <strong>6 was greater than or equal to 0, but 6 was not less than or equal to 5</strong>
* at org.scalactic.Requirements$RequirementsHelper.macroRequire(Requirements.scala:56)
* ...
*
* scala> require(idx >= 0 && idx <= length, "(hopefully that helps)")
* java.lang.IllegalArgumentException: <strong>6 was greater than or equal to 0, but 6 was not less than or equal to 5 (hopefully that helps)</strong>
* at org.scalactic.Requirements$RequirementsHelper.macroRequire(Requirements.scala:56)
* ...
* </pre>
*
* <p>
* The <code>requireState</code> method provides identical error messages to <code>require</code>, but throws
* <code>IllegalStateException</code> instead of <code>IllegalArgumentException</code>:
* </p>
*
* <pre class="stREPL">
* scala> val connectionOpen = false
* connectionOpen: Boolean = false
*
* scala> requireState(connectionOpen)
* java.lang.IllegalStateException: <strong>connectionOpen was false</strong>
* at org.scalactic.Requirements$RequirementsHelper.macroRequireState(Requirements.scala:71)
* ...
* </pre>
*
* <p>
* Thus, whereas the <code>require</code> methods throw the Java platform's standard exception indicating a passed argument
* violated a precondition, <code>IllegalArgumentException</code>, the <code>requireState</code> methods throw the standard
* exception indicating an object's method was invoked when the object was in an inappropriate state for that method,
* <code>IllegalStateException</code>.
* </p>
*
* <p>
* The <code>requireNonNull</code> method takes one or more variables as arguments and throws <code>NullPointerException</code>
* with an error messages that includes the variable names if any are <code>null</code>. Here's an example:
* </p>
*
* <pre class="stREPL">
* scala> val e: String = null
* e: String = null
*
* scala> val f: java.util.Date = null
* f: java.util.Date = null
*
* scala> requireNonNull(a, b, c, d, e, f)
* java.lang.NullPointerException: <strong>e and f were null</strong>
* at org.scalactic.Requirements$RequirementsHelper.macroRequireNonNull(Requirements.scala:101)
* ...
* </pre>
*
* <p>
* Although trait <code>Requirements</code> can help you debug problems that occur in production, bear in mind that a much
* better alternative is to make it impossible for such events to occur at all. Use the type system to ensure that all
* pre-conditions are met so that the compiler can find broken pre-conditions and point them out with compiler error messages.
* When this is not possible or practical, however, trait <code>Requirements</code> is helpful.
* </p>
*/
trait Requirements {
import language.experimental.macros
/**
* Helper class used by code generated by the <code>require</code> macro.
*/
class RequirementsHelper {
private def append(currentMessage: String, clue: Any): String = {
val clueStr = clue.toString
if (clueStr.isEmpty)
currentMessage
else {
val firstChar = clueStr.head
if (firstChar.isWhitespace || firstChar == '.' || firstChar == ',' || firstChar == ';' || currentMessage.isEmpty)
currentMessage + clueStr
else
currentMessage + " " + clueStr
}
}
/**
* Require that the passed in <code>Bool</code> is <code>true</code>, else fail with <code>IllegalArgumentException</code>.
*
* @param bool the <code>Bool</code> to check as requirement
* @param clue optional clue to be included in <code>IllegalArgumentException</code>'s error message when the requirement failed
*/
def macroRequire(bool: Bool, clue: Any) {
if (clue == null)
throw new NullPointerException("clue was null")
if (!bool.value) {
val failureMessage = if (Bool.isSimpleWithoutExpressionText(bool)) append("", clue) else append(bool.failureMessage, clue)
throw new IllegalArgumentException(if (failureMessage.isEmpty) FailureMessages("expressionWasFalse") else failureMessage)
}
}
/**
* Require that the passed in <code>Bool</code> is <code>true</code>, else fail with <code>IllegalStateException</code>.
*
* @param bool the <code>Bool</code> to check as requirement
* @param clue optional clue to be included in <code>IllegalStateException</code>'s error message when the requirement failed
*/
def macroRequireState(bool: Bool, clue: Any) {
if (clue == null)
throw new NullPointerException("clue was null")
if (!bool.value) {
val failureMessage = if (Bool.isSimpleWithoutExpressionText(bool)) append("", clue) else append(bool.failureMessage, clue)
throw new IllegalStateException(if (failureMessage.isEmpty) FailureMessages("expressionWasFalse") else failureMessage)
}
}
/**
* Require that all of the passed in arguments are not <code>null</code>, else fail with <code>NullPointerException</code>.
*
* @param variableNames names of variable passed as appear in source
* @param arguments arguments to check for <code>null</code> value
*/
def macroRequireNonNull(variableNames: Array[String], arguments: Array[Any]) {
val nullList = arguments.zipWithIndex.filter { case (e, idx) =>
e == null
}
val nullCount = nullList.size
if (nullCount > 0) {
val nullVariableNames = nullList.map { case (e, idx) =>
variableNames(idx)
}
val errorMessage =
if (nullCount == 1)
FailureMessages("wasNull", UnquotedString(nullVariableNames(0)))
else if (nullCount == 2) {
val combinedVariableNames = Resources("and", nullVariableNames.head, nullVariableNames.last)
FailureMessages("wereNull", UnquotedString(combinedVariableNames))
}
else {
val combinedVariableNames = Resources("commaAnd", nullVariableNames.dropRight(1).mkString(Resources("comma")), nullVariableNames.last)
FailureMessages("wereNull", UnquotedString(combinedVariableNames))
}
throw new NullPointerException(errorMessage)
}
}
}
/**
* Helper instance used by code generated by macro assertion.
*/
val requirementsHelper = new RequirementsHelper
/**
* Require that a boolean condition is true about an argument passed to a method, function, or constructor.
*
* <p>
* If the condition is <code>true</code>, this method returns normally.
* Else, it throws <code>IllegalArgumentException</code>.
* </p>
*
* <p>
* This method is implemented in terms of a Scala macro that will generate an error message.
* See the main documentation for this trait for examples.
* </p>
*
* @param condition the boolean condition to check as requirement
* @throws IllegalArgumentException if the condition is <code>false</code>.
*/
def require(condition: Boolean): Unit = macro RequirementsMacro.require
/**
* Require that a boolean condition about an argument passed to a method, function, or constructor,
* and described in the given <code>clue</code>, is true.
*
* If the condition is <code>true</code>, this method returns normally.
* Else, it throws <code>IllegalArgumentException</code> with the
* <code>String</code> obtained by invoking <code>toString</code> on the
* specified <code>clue</code> and appending that to the macro-generated
* error message as the exception's detail message.
*
* @param condition the boolean condition to check as requirement
* @param clue an objects whose <code>toString</code> method returns a message to include in a failure report.
* @throws IllegalArgumentException if the condition is <code>false</code>.
* @throws NullPointerException if <code>message</code> is <code>null</code>.
*/
def require(condition: Boolean, clue: Any): Unit = macro RequirementsMacro.requireWithClue
/**
* Require that a boolean condition is true about the state of an object on which a method has been invoked.
*
* <p>
* If the condition is <code>true</code>, this method returns normally.
* Else, it throws <code>IllegalStateException</code>.
* </p>
*
* <p>
* This method is implemented in terms of a Scala macro that will generate an error message.
* </p>
*
* @param condition the boolean condition to check as requirement
* @throws IllegalStateException if the condition is <code>false</code>.
*/
def requireState(condition: Boolean): Unit = macro RequirementsMacro.requireState
/**
* Require that a boolean condition about the state of an object on which a method has been
* invoked, and described in the given <code>clue</code>, is true.
*
* <p>
* If the condition is <code>true</code>, this method returns normally.
* Else, it throws <code>IllegalStateException</code> with the
* <code>String</code> obtained by invoking <code>toString</code> on the
* specified <code>clue</code> appended to the macro-generated error message
* as the exception's detail message.
* </p>
*
* @param condition the boolean condition to check as a requirement
* @param clue an object whose <code>toString</code> method returns a message to include in a failure report.
* @throws IllegalStateException if the condition is <code>false</code>.
* @throws NullPointerException if <code>message</code> is <code>null</code>.
*/
def requireState(condition: Boolean, clue: Any): Unit = macro RequirementsMacro.requireStateWithClue
/**
* Require that all passed arguments are non-null.
*
* <p>
* If none of the passed arguments are <code>null</code>, this method returns normally.
* Else, it throws <code>NullPointerException</code> with an error message that includes the name
* (as it appeared in the source) of each argument that was <code>null</code>.
* </p>
*
* @param arguments arguments to check for <code>null</code> value
* @throws NullPointerException if any of the arguments are <code>null</code>.
*/
def requireNonNull(arguments: Any*): Unit = macro RequirementsMacro.requireNonNull
}
/**
* Macro implementation that provides rich error message for boolean expression requirements.
*/
private[scalactic] object RequirementsMacro {
/**
* Provides requirement implementation for <code>Requirements.require(booleanExpr: Boolean)</code>, with rich error message.
*
* @param context macro context
* @param condition original condition expression
* @return transformed expression that performs the requirement check and throw <code>IllegalArgumentException</code> with rich error message if requirement failed
*/
def require(context: Context)(condition: context.Expr[Boolean]): context.Expr[Unit] =
new BooleanMacro[context.type](context, "requirementsHelper").genMacro(condition, "macroRequire", context.literal(""))
/**
* Provides requirement implementation for <code>Requirements.require(booleanExpr: Boolean, clue: Any)</code>, with rich error message.
*
* @param context macro context
* @param condition original condition expression
* @param clue original clue expression
* @return transformed expression that performs the requirement check and throw <code>IllegalArgumentException</code> with rich error message (clue included) if requirement failed
*/
def requireWithClue(context: Context)(condition: context.Expr[Boolean], clue: context.Expr[Any]): context.Expr[Unit] =
new BooleanMacro[context.type](context, "requirementsHelper").genMacro(condition, "macroRequire", clue)
/**
* Provides requirement implementation for <code>Requirements.requireState(booleanExpr: Boolean)</code>, with rich error message.
*
* @param context macro context
* @param condition original condition expression
* @return transformed expression that performs the requirement check and throw <code>IllegalStateException</code> with rich error message if requirement failed
*/
def requireState(context: Context)(condition: context.Expr[Boolean]): context.Expr[Unit] =
new BooleanMacro[context.type](context, "requirementsHelper").genMacro(condition, "macroRequireState", context.literal(""))
/**
* Provides requirement implementation for <code>Requirements.requireState(booleanExpr: Boolean, clue: Any)</code>, with rich error message.
*
* @param context macro context
* @param condition original condition expression
* @param clue original clue expression
* @return transformed expression that performs the requirement check and throw <code>IllegalStateException</code> with rich error message (clue included) if requirement failed
*/
def requireStateWithClue(context: Context)(condition: context.Expr[Boolean], clue: context.Expr[Any]): context.Expr[Unit] =
new BooleanMacro[context.type](context, "requirementsHelper").genMacro(condition, "macroRequireState", clue)
/**
* Provides requirement implementation for <code>Requirements.requireNonNull(arguments: Any*)</code>, with rich error message.
*
* @param context macro context
* @param arguments original arguments expression(s)
* @return transformed expression that performs the requirement check and throw <code>NullPointerException</code> with rich error message if requirement failed
*/
def requireNonNull(context: Context)(arguments: context.Expr[Any]*): context.Expr[Unit] = {
import context.universe._
// generate AST that create array containing the argument name in source (get from calling 'show')
// for example, if you have:
// val a = "1"
// val b = null
// val c = "3"
// requireNonNull(a, b, c)
// it will generate the following code:
//
// Array("a", "b", "c")
val variablesNamesArray =
Apply(
Select(
Ident("Array"),
newTermName("apply")
),
List(arguments.map(e => context.literal(show(e.tree)).tree): _*)
)
// generate AST that create array containing the argument values
// for example, if you have:
// val a = "1"
// val b = null
// val c = "3"
// requireNonNull(a, b, c)
// it will generate the following code:
//
// Array(a, b, c)
val argumentsArray =
Apply(
Select(
Ident("Array"),
newTermName("apply")
),
List(arguments.map(e => e.tree): _*)
)
// Generate AST to call requirementsHelper.macroRequireNonNull and pass in both variable names and values array:
//
// requirementsHelper.macroRequireNonNull(variableNamesArray, valuesArray)
context.Expr(
Apply(
Select(
Ident("requirementsHelper"),
newTermName("macroRequireNonNull")
),
List(variablesNamesArray, argumentsArray)
)
)
}
}
/**
* Companion object that facilitates the importing of <code>Requirements</code> members as
* an alternative to mixing it in. One use case is to import <code>Requirements</code> members so you can use
* them in the Scala interpreter:
*
* <pre class="stREPL">
* $scala -classpath scalatest.jar
* Welcome to Scala version 2.10.3.final (Java HotSpot(TM) Client VM, Java xxxxxx).
* Type in expressions to have them evaluated.
* Type :help for more information.
*
* scala> import org.scalactic.Requirements._
* import org.scalactic.Requirements._
*
* scala> val a = 1
* a: Int = 1
*
* scala> require(a == 2)
* java.lang.IllegalArgumentException: 1 did not equal 2
* at org.scalactic.Requirements$RequirementsHelper.macroRequire(Requirements.scala:56)
* at .<init>(<console>:20)
* at .<clinit>(<console>)
* at .<init>(<console>:7)
* at .<clinit>(<console>)
* at $print(<console>)
* at sun.reflect.NativeMethodAccessorImpl.invoke...
*/
object Requirements extends Requirements
|
travisbrown/scalatest
|
src/main/scala/org/scalactic/Requirements.scala
|
Scala
|
apache-2.0
| 18,611
|
package mesosphere.marathon
package core.group
import javax.inject.Provider
import akka.Done
import akka.event.EventStream
import mesosphere.AkkaUnitTest
import mesosphere.marathon.core.async.ExecutionContexts
import mesosphere.marathon.core.event.GroupChangeSuccess
import mesosphere.marathon.core.group.impl.GroupManagerImpl
import mesosphere.marathon.state.PathId._
import mesosphere.marathon.state._
import mesosphere.marathon.storage.repository.GroupRepository
import mesosphere.marathon.test.GroupCreation
import scala.concurrent.{ Future, Promise }
class GroupManagerTest extends AkkaUnitTest with GroupCreation {
class Fixture(
val servicePortsRange: Range = 1000.to(20000),
val initialRoot: Option[RootGroup] = Some(RootGroup.empty)) {
val config = AllConf.withTestConfig("--local_port_min", servicePortsRange.min.toString,
"--local_port_max", (servicePortsRange.max).toString)
val groupRepository = mock[GroupRepository]
val deploymentService = mock[DeploymentService]
val eventStream = mock[EventStream]
val groupManager = new GroupManagerImpl(config, initialRoot, groupRepository, new Provider[DeploymentService] {
override def get(): DeploymentService = deploymentService
})(eventStream, ExecutionContexts.global)
}
"GroupManager" should {
"return None as a root group, if the initial group has not been passed to it" in new Fixture(initialRoot = None) {
groupManager.rootGroupOption() shouldBe None
}
"Don't store invalid groups" in new Fixture {
val app1 = AppDefinition("/app1".toPath)
val rootGroup = createRootGroup(Map(app1.id -> app1), groups = Set(createGroup("/app1".toPath)), validate = false)
groupRepository.root() returns Future.successful(createRootGroup())
intercept[ValidationFailedException] {
throw groupManager.updateRoot(PathId.empty, _.putGroup(rootGroup, rootGroup.version), rootGroup.version, force = false).failed.futureValue
}
verify(groupRepository, times(0)).storeRoot(any, any, any, any, any)
}
"return multiple apps when asked" in {
val app1 = AppDefinition("/app1".toPath, cmd = Some("sleep"))
val app2 = AppDefinition("/app2".toPath, cmd = Some("sleep"))
val rootGroup = createRootGroup(Map(app1.id -> app1, app2.id -> app2))
val f = new Fixture(initialRoot = Some(rootGroup))
f.groupManager.apps(Set(app1.id, app2.id)) should be(Map(app1.id -> Some(app1), app2.id -> Some(app2)))
}
"publishes GroupChangeSuccess with the appropriate GID on successful deployment" in new Fixture {
val app: AppDefinition = AppDefinition("/group/app1".toPath, cmd = Some("sleep 3"), portDefinitions = Seq.empty)
val group = createGroup("/group".toPath, apps = Map(app.id -> app), version = Timestamp(1))
groupRepository.root() returns Future.successful(createRootGroup())
deploymentService.deploy(any, any) returns Future.successful(Done)
val appWithVersionInfo = app.copy(versionInfo = VersionInfo.forNewConfig(Timestamp(1)))
val groupWithVersionInfo = createRootGroup(
version = Timestamp(1),
groups = Set(
createGroup(
"/group".toPath, apps = Map(appWithVersionInfo.id -> appWithVersionInfo), version = Timestamp(1))))
groupRepository.storeRootVersion(any, any, any) returns Future.successful(Done)
groupRepository.storeRoot(any, any, any, any, any) returns Future.successful(Done)
val groupChangeSuccess = Promise[GroupChangeSuccess]
eventStream.publish(any).answers {
case Array(change: GroupChangeSuccess) =>
groupChangeSuccess.success(change)
case _ =>
???
}
groupManager.updateRoot(PathId.empty, _.putGroup(group, version = Timestamp(1)), version = Timestamp(1), force = false).futureValue
verify(groupRepository).storeRoot(groupWithVersionInfo, Seq(appWithVersionInfo), Nil, Nil, Nil)
verify(groupRepository).storeRootVersion(groupWithVersionInfo, Seq(appWithVersionInfo), Nil)
groupChangeSuccess.future.
futureValue.
groupId shouldBe PathId.empty
}
"Store new apps with correct version infos in groupRepo and appRepo" in new Fixture {
val app: AppDefinition = AppDefinition("/app1".toPath, cmd = Some("sleep 3"), portDefinitions = Seq.empty)
val rootGroup = createRootGroup(Map(app.id -> app), version = Timestamp(1))
groupRepository.root() returns Future.successful(createRootGroup())
deploymentService.deploy(any, any) returns Future.successful(Done)
val appWithVersionInfo = app.copy(versionInfo = VersionInfo.forNewConfig(Timestamp(1)))
val groupWithVersionInfo = createRootGroup(Map(
appWithVersionInfo.id -> appWithVersionInfo), version = Timestamp(1))
groupRepository.storeRootVersion(any, any, any) returns Future.successful(Done)
groupRepository.storeRoot(any, any, any, any, any) returns Future.successful(Done)
groupManager.updateRoot(PathId.empty, _.putGroup(rootGroup, version = Timestamp(1)), version = Timestamp(1), force = false).futureValue
verify(groupRepository).storeRoot(groupWithVersionInfo, Seq(appWithVersionInfo), Nil, Nil, Nil)
verify(groupRepository).storeRootVersion(groupWithVersionInfo, Seq(appWithVersionInfo), Nil)
}
"Expunge removed apps from appRepo" in new Fixture(initialRoot = Option({
val app: AppDefinition = AppDefinition("/app1".toPath, cmd = Some("sleep 3"), portDefinitions = Seq.empty)
createRootGroup(Map(app.id -> app), version = Timestamp(1))
})) {
val groupEmpty = createRootGroup(version = Timestamp(1))
deploymentService.deploy(any, any) returns Future.successful(Done)
groupRepository.storeRootVersion(any, any, any) returns Future.successful(Done)
groupRepository.storeRoot(any, any, any, any, any) returns Future.successful(Done)
groupManager.updateRoot(PathId.empty, _.putGroup(groupEmpty, version = Timestamp(1)), Timestamp(1), force = false).futureValue
verify(groupRepository).storeRootVersion(groupEmpty, Nil, Nil)
verify(groupRepository).storeRoot(groupEmpty, Nil, Seq("/app1".toPath), Nil, Nil)
}
}
}
|
janisz/marathon
|
src/test/scala/mesosphere/marathon/core/group/GroupManagerTest.scala
|
Scala
|
apache-2.0
| 6,217
|
package ddd.support.domain
import ddd.support.domain.IdResolution.EntityIdResolver
import ddd.support.domain.command.Command
class AggregateIdResolution[A] extends EntityIdResolution[A] {
override def entityIdResolver: EntityIdResolver = {
super.entityIdResolver.orElse {
case c: Command => c.aggregateId
}
}
}
|
pawelkaczor/ddd-leaven-akka
|
src/main/scala/ddd/support/domain/AggregateIdResolution.scala
|
Scala
|
mit
| 333
|
package spatial.codegen.pirgen
import argon.core._
import argon.nodes._
import spatial.aliases._
import spatial.metadata._
import spatial.nodes._
import spatial.utils._
import virtualized.SourceContext
import scala.collection.mutable
class PIRMemoryAnalyzer(implicit val codegen:PIRCodegen) extends PIRTraversal {
override val name = "PIR Memory Analyzer"
var IR = codegen.IR
override def preprocess[S:Type](b: Block[S]): Block[S] = {
super.preprocess(b)
}
override def postprocess[S:Type](b: Block[S]): Block[S] = {
super.postprocess(b)
}
override protected def visit(lhs: Sym[_], rhs: Op[_]) = {
lhs match {
case lhs if isRemoteMem(lhs) =>
dbgblk(s"${qdef(lhs)}") {
markInnerDim(lhs)
setOuterDims(lhs)
setNumOuterBanks(lhs)
setStaticBank(lhs)
}
case _ =>
}
super.visit(lhs, rhs)
}
def containsInnerInd(ind:Expr):Boolean = dbgblk(s"containsInnerInd($ind)") {
ind match {
case b:Bound[_] =>
val ctrl = ctrlOf(ind).get.node
extractInnerBounds(ctrl).contains(b)
case Def(d) => d.allInputs.exists(containsInnerInd)
case e => false
}
}
def extractInnerBounds(ctrl:Expr) = ctrl match {
case ctrl if !isInnerControl(ctrl) => Nil
case Def(UnrolledForeach(en, cchain, func, iters, valids)) =>
iters.last
case Def(UnrolledReduce(en, cchain, accum, func, iters, valids)) =>
iters.last
case _ => Nil
}
def markInnerDim(mem:Expr) = {
(readersOf(mem) ++ writersOf(mem)).map(_.node).foreach { access =>
dbgblk(s"markInnerDim(access=$access)") {
val inds:Seq[Expr] = access match {
case Def(ParLocalReader((mem, Some(inds::_), _)::_)) => inds
case Def(ParLocalWriter((mem, _, Some(inds::_), _)::_)) => inds
case Def(ParLocalReader((mem, None, _)::_)) => Nil
case Def(ParLocalWriter((mem, _, None, _)::_)) => Nil
}
if (inds.isEmpty) { //FIFO
innerDimOf(mem) = 0
dbgs(s"innerDim = 0")
}
inds.zipWithIndex.foreach { case (ind, dim) =>
if (containsInnerInd(ind)) {
dbgs(s"innerDim = $dim")
innerDimOf(mem) = dim
}
}
}
}
}
def setOuterDims(mem:Expr) = dbgblk(s"setOuterDims") {
val numDim = mem match {
case Def(SRAMNew(dims)) => dims.size
case Def(FIFONew(size)) => 1
}
outerDimsOf(mem) = (0 until numDim).toSeq.filterNot { _ == innerDimOf(mem) }
}
def setNumOuterBanks(mem:Expr) = numOuterBanksOf(mem) = dbgblk(s"setNumOuterBanks($mem)") {
duplicatesOf(mem).zipWithIndex.map { case (m, i) =>
m match {
case m@BankedMemory(dims, depth, isAccum) =>
dbgs(s"BankedMemory # banks:${dims.map {
case Banking(strides, banks, _) => s"(strides=$strides, banks=$banks)"
}.mkString(",")}")
val outerDims = outerDimsOf(mem)
outerDims.map{ dim => dims(dim).banks}.product
case DiagonalMemory(strides, banks, depth, isAccum) =>
banks
}
}
}
def setStaticBank(mem:Expr):Unit = {
(readersOf(mem) ++ writersOf(mem)).map(_.node).foreach { access =>
if (isFIFO(mem)) staticBanksOf(access) = Seq(0)
else setStaticBank(mem, access)
}
}
def setStaticBank(mem:Expr, access:Expr):Unit = staticBanksOf(access) = dbgblk(s"setStaticBankOf($mem, $access)") {
val instIds = getDispatches(mem, access)
val insts = duplicatesOf(mem).zipWithIndex.filter { case (inst, instId) =>
instIds.contains(instId)
}.map { _._1 }
val addr = access match {
case ParLocalReader(List((_, Some(addr), _))) => addr
case ParLocalWriter(List((_, _, Some(addr), _))) => addr
}
insts.flatMap { inst =>
inst match {
case m@BankedMemory(dims, depth, isAccum) =>
val inds = Seq.tabulate(dims.size) { i => addr.map { _(i) } }
dbgs(s"addr=$addr inds=$inds")
dbgs(s"BankedMemory # banks:${dims.map {
case Banking(strides, banks, _) => s"(strides=$strides, banks=$banks)"
}.mkString(",")}")
val outerInds = outerDimsOf(mem).map { dim => (inds(dim), dims(dim), dim) }
// A list of (bankIndex, # banks) for each outer dimension
val bankInds = outerInds.map { case (inds, memory, dim) =>
val vind::_ = inds
val Banking(stride, banks, _) = memory
dbgs(s"ctrlOf($vind)=${ctrlOf(vind)}")
val bankInds = ctrlOf(vind) match {
case Some((ctrl, _)) =>
val parIdxs = itersOf(ctrl).get.map { iters =>
(iters.indexOf(vind), iters.size)
}.filter { _._1 >= 0 }
dbgs(s"itersOf($ctrl)=${itersOf(ctrl)}")
assert(parIdxs.size == 1 , s"$ctrl doesn't belong to $ctrl but ctrlOf($vind) = $ctrl!")
val (iterIdx, iterPar) = parIdxs.head
if (iterPar==1) {
(0 until banks).map { b => (b, banks)}.toList
} else {
List((iterIdx, banks))
}
case None =>
(0 until banks).map { b => (b, banks)}.toList
}
dbgs(s"dim=$dim banks=${bankInds}")
bankInds
}
dbgs(s"bankInds=$bankInds")
// Compute the combination of flatten bankIndex
def indComb(inds:List[List[(Int, Int)]], prevDims:List[(Int, Int)]):List[Int] = {
if (inds.isEmpty) {
val (inds, banks) = prevDims.unzip
List(flattenND(inds, banks));
} else {
val headDim::restDims = inds
headDim.flatMap { bank => indComb(restDims, prevDims :+ bank) }
}
}
val banks = indComb(bankInds.toList, Nil)
dbgs(s"access=$access uses banks=$banks for inst=$inst")
banks
case DiagonalMemory(strides, banks, depth, isAccum) =>
//TODO
throw new Exception(s"Plasticine doesn't support diagonal banking at the moment!")
}
}
}
}
|
stanford-ppl/spatial-lang
|
spatial/core/src/spatial/codegen/pirgen/PIRMemoryAnalyzer.scala
|
Scala
|
mit
| 6,160
|
/**
* Copyright (c) 2007-2011 Eric Torreborre <etorreborre@yahoo.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial portions of
* the Software. Neither the name of specs nor the names of its contributors may be used to endorse or promote
* products derived from this software without specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
* TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package org.specs.specification
import org.specs.util.Property
import org.specs._
class baseSpecificationSpec extends org.spex.Specification {
def threeSpecs = List(new Specification{}, new Specification{}, new Specification{})
"Specifications" can {
"not be included in each other way" in {
val s1 :: s2 ::x = threeSpecs
s1.include(s2)
s2.include(s1)
s1 contains s2 must beTrue
s2 contains s1 aka "specs2 contains specs1" must beFalse
}
"not be included in a cycle chain" in {
val s1 :: s2 :: s3 ::x = threeSpecs
s1.include(s2)
s2.include(s3)
s3.include(s1)
s1 contains s2 must beTrue
s2 contains s3 must beTrue
s3 contains s1 aka "specs3 contains specs1" must beFalse
}
"return the list of its parent specifications starting with the most immediate one" in {
val s1 :: s2 :: s3 ::x = threeSpecs
s1.include(s2)
s2.include(s3)
s3.parentSpecifications must_== List(s2, s1)
}
"share examples between sus using 'behave like'" in {
object s extends Specification {
var expectedContext = ""
var currentContext = ""
val c1 = beforeContext { currentContext = "c1" }
val c2 = beforeContext { currentContext = "c2" }
"the first sus"->-(c1) should {
"have one example using the context" in {
currentContext must be_==(expectedContext).when(expectedContext == 2)
}
}
"the second sus"->-(c2) should {
expectedContext = "c2"
behave like "the first sus"
"have one example using the context" in { currentContext must_== expectedContext }
}
"the third sus"->-(c2) should {
expectedContext = "c3"
behave like "the first sus"
}
}
s.systems(1).failures must be empty;
s.systems(2).failures aka "a system with improper expectations" must be empty
}
}
}
|
yyuu/specs
|
src/test/scala/org/specs/specification/baseSpecificationSpec.scala
|
Scala
|
mit
| 3,360
|
/* Copyright 2009-2016 EPFL, Lausanne */
import leon.invariant._
object ConcatVariationsAbs {
def genL(n: BigInt): BigInt = {
require(n >= 0)
if (n == 0)
BigInt(2)
else
4 + genL(n - 1)
} ensuring (res => tmpl((a, b) => res <= a * n + b))
def append(l1: BigInt, l2: BigInt): BigInt = {
require(l1 >= 0 && l2 >= 0)
if (l1 == 0)
BigInt(3)
else
append(l1 - 1, l2 + 1) + 5
} ensuring (res => tmpl((a, b) => res <= a * l1 + b))
def f_good(m: BigInt, n: BigInt): BigInt = {
require(0 <= m && 0 <= n)
if (m == 0) BigInt(2)
else {
val t1 = genL(n)
val t2 = f_good(m - 1, n)
val t3 = append(n, n * (m - 1))
(t1 + t2 + t3 + 6)
}
} ensuring (res => tmpl((a, b, c, d) => res <= a * (n * m) + b * n + c * m + d))
def f_worst(m: BigInt, n: BigInt): BigInt = {
require(0 <= m && 0 <= n)
if (m == 0) BigInt(2)
else {
val t1 = genL(n)
val t2 = f_worst(m - 1, n)
val t3 = append(n * (m - 1), n)
(t1 + t2 + t3 + 6)
}
} ensuring (res => tmpl((a, c, d, e, f) => res <= a * ((n * m) * m) + c * (n * m) + d * n + e * m + f))
}
|
regb/leon
|
src/test/resources/regression/orb/numerical/ConcatVariationsAbs.scala
|
Scala
|
gpl-3.0
| 1,154
|
// Equites, a Scala chess playground
// Copyright © 2013 Frank S. Thomas <frank@timepit.eu>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package eu.timepit.equites
package util
// format: OFF
trait AlgebraicAbbr {
def K: WhiteKing = Piece(White, King)
def Q: WhiteQueen = Piece(White, Queen)
def R: WhiteRook = Piece(White, Rook)
def B: WhiteBishop = Piece(White, Bishop)
def N: WhiteKnight = Piece(White, Knight)
def P: WhitePawn = Piece(White, Pawn)
def k: BlackKing = Piece(Black, King)
def q: BlackQueen = Piece(Black, Queen)
def r: BlackRook = Piece(Black, Rook)
def b: BlackBishop = Piece(Black, Bishop)
def n: BlackKnight = Piece(Black, Knight)
def p: BlackPawn = Piece(Black, Pawn)
}
trait FigurineAbbr {
import PieceAbbr.Algebraic._
def ♔ : WhiteKing = K
def ♕ : WhiteQueen = Q
def ♖ : WhiteRook = R
def ♗ : WhiteBishop = B
def ♘ : WhiteKnight = N
def ♙ : WhitePawn = P
def ♚ : BlackKing = k
def ♛ : BlackQueen = q
def ♜ : BlackRook = r
def ♝ : BlackBishop = b
def ♞ : BlackKnight = n
def ♟ : BlackPawn = p
}
trait TextualAbbr {
import PieceAbbr.Algebraic._
def king (color: Color): AnyKing = Piece(color, King)
def queen (color: Color): AnyQueen = Piece(color, Queen)
def rook (color: Color): AnyRook = Piece(color, Rook)
def bishop(color: Color): AnyBishop = Piece(color, Bishop)
def knight(color: Color): AnyKnight = Piece(color, Knight)
def pawn (color: Color): AnyPawn = Piece(color, Pawn)
def whiteKing: WhiteKing = K
def whiteQueen: WhiteQueen = Q
def whiteRook: WhiteRook = R
def whiteBishop: WhiteBishop = B
def whiteKnight: WhiteKnight = N
def whitePawn: WhitePawn = P
def blackKing: BlackKing = k
def blackQueen: BlackQueen = q
def blackRook: BlackRook = r
def blackBishop: BlackBishop = b
def blackKnight: BlackKnight = n
def blackPawn: BlackPawn = p
}
trait WikiAbbr {
import PieceAbbr.Algebraic._
def kl: WhiteKing = K
def ql: WhiteQueen = Q
def rl: WhiteRook = R
def bl: WhiteBishop = B
def nl: WhiteKnight = N
def pl: WhitePawn = P
def kd: BlackKing = k
def qd: BlackQueen = q
def rd: BlackRook = r
def bd: BlackBishop = b
def nd: BlackKnight = n
def pd: BlackPawn = p
}
// format: ON
object PieceAbbr {
object Algebraic extends AlgebraicAbbr
object Figurine extends FigurineAbbr
object Textual extends TextualAbbr
object Wiki extends WikiAbbr
}
|
equites-chess/equites-core
|
src/main/scala/eu/timepit/equites/util/PieceAbbr.scala
|
Scala
|
gpl-3.0
| 3,116
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.