code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package io.getquill.context.qzio
import io.getquill.context.ZioJdbc._
import io.getquill.context.jdbc.JdbcComposition
import io.getquill.context.sql.idiom.SqlIdiom
import io.getquill.context.{ ExecutionInfo, PrepareContext, ProtoContext, StreamingContext, TranslateContextMacro }
import io.getquill.{ NamingStrategy, ReturnAction }
import zio.Exit.{ Failure, Success }
import zio.stream.ZStream
import zio.{ FiberRef, Has, Runtime, UIO, ZIO, ZManaged }
import java.sql.{ Array => _, _ }
import javax.sql.DataSource
import scala.util.Try
/**
* Quill context that executes JDBC queries inside of ZIO. Unlike most other contexts
* that require passing in a Data Source, this context takes in a java.sql.Connection
* as a resource dependency which can be provided later (see `ZioJdbc` for helper methods
* that assist in doing this).
*
* The resource dependency itself is just a `Has[Connection]`. Since this is frequently used
* The type `QIO[T]` i.e. Quill-IO has been defined as an alias for `ZIO[Has[Connection], SQLException, T]`.
*
* Since in most JDBC use-cases, a connection-pool datasource i.e. Hikari is used it would actually
* be much more useful to interact with `ZIO[Has[DataSource], SQLException, T]`.
* The extension method `.onDataSource` in `io.getquill.context.ZioJdbc.QuillZioExt` will perform this conversion
* (for even more brevity use `onDS` which is an alias for this method).
* {{
* import ZioJdbc._
* val zioDs = DataSourceLayer.fromPrefix("testPostgresDB")
* MyZioContext.run(query[Person]).onDataSource.provideCustomLayer(zioDS)
* }}
*
* If you are using a Plain Scala app however, you will need to manually run it e.g. using zio.Runtime
* {{
* Runtime.default.unsafeRun(MyZioContext.run(query[Person]).provideLayer(zioDS))
* }}
*
* Note however that the one exception to these cases are the `prepare` methods where a `ZIO[Has[Connection], SQLException, PreparedStatement]`
* is being returned. In those situations the acquire-action-release pattern does not make any sense because the `PrepareStatement`
* is only held open while it's host-connection exists.
*/
abstract class ZioJdbcContext[Dialect <: SqlIdiom, Naming <: NamingStrategy] extends ZioContext[Dialect, Naming]
with JdbcComposition[Dialect, Naming]
with ProtoContext[Dialect, Naming]
with StreamingContext[Dialect, Naming]
with PrepareContext
with TranslateContextMacro {
override type StreamResult[T] = ZStream[Environment, Error, T]
override type Result[T] = ZIO[Environment, Error, T]
override type RunQueryResult[T] = List[T]
override type RunQuerySingleResult[T] = T
override type RunActionResult = Long
override type RunActionReturningResult[T] = T
override type RunBatchActionResult = List[Long]
override type RunBatchActionReturningResult[T] = List[T]
override type Error = SQLException
override type Environment = Has[DataSource]
override type PrepareRow = PreparedStatement
override type ResultRow = ResultSet
override type TranslateResult[T] = ZIO[Environment, Error, T]
override type PrepareQueryResult = QCIO[PrepareRow]
override type PrepareActionResult = QCIO[PrepareRow]
override type PrepareBatchActionResult = QCIO[List[PrepareRow]]
override type Session = Connection
val currentConnection: FiberRef[Option[Connection]] =
Runtime.default.unsafeRun(FiberRef.make(None))
val underlying: ZioJdbcUnderlyingContext[Dialect, Naming]
override def close() = ()
override def probe(sql: String): Try[_] = underlying.probe(sql)
def executeAction(sql: String, prepare: Prepare = identityPrepare)(info: ExecutionInfo, dc: Runner): QIO[Long] =
onConnection(underlying.executeAction(sql, prepare)(info, dc))
def executeQuery[T](sql: String, prepare: Prepare = identityPrepare, extractor: Extractor[T] = identityExtractor)(info: ExecutionInfo, dc: Runner): QIO[List[T]] =
onConnection(underlying.executeQuery[T](sql, prepare, extractor)(info, dc))
override def executeQuerySingle[T](sql: String, prepare: Prepare = identityPrepare, extractor: Extractor[T] = identityExtractor)(info: ExecutionInfo, dc: Runner): QIO[T] =
onConnection(underlying.executeQuerySingle[T](sql, prepare, extractor)(info, dc))
override def translateQuery[T](statement: String, prepare: Prepare = identityPrepare, extractor: Extractor[T] = identityExtractor, prettyPrint: Boolean = false)(executionInfo: ExecutionInfo, dc: Runner): TranslateResult[String] =
onConnection(underlying.translateQuery[T](statement, prepare, extractor, prettyPrint)(executionInfo, dc))
override def translateBatchQuery(groups: List[BatchGroup], prettyPrint: Boolean = false)(executionInfo: ExecutionInfo, dc: Runner): TranslateResult[List[String]] =
onConnection(underlying.translateBatchQuery(groups.asInstanceOf[List[ZioJdbcContext.this.underlying.BatchGroup]], prettyPrint)(executionInfo, dc))
def streamQuery[T](fetchSize: Option[Int], sql: String, prepare: Prepare = identityPrepare, extractor: Extractor[T] = identityExtractor)(info: ExecutionInfo, dc: Runner): QStream[T] =
onConnectionStream(underlying.streamQuery[T](fetchSize, sql, prepare, extractor)(info, dc))
def executeActionReturning[O](sql: String, prepare: Prepare = identityPrepare, extractor: Extractor[O], returningBehavior: ReturnAction)(info: ExecutionInfo, dc: Runner): QIO[O] =
onConnection(underlying.executeActionReturning[O](sql, prepare, extractor, returningBehavior)(info, dc))
def executeBatchAction(groups: List[BatchGroup])(info: ExecutionInfo, dc: Runner): QIO[List[Long]] =
onConnection(underlying.executeBatchAction(groups.asInstanceOf[List[ZioJdbcContext.this.underlying.BatchGroup]])(info, dc))
def executeBatchActionReturning[T](groups: List[BatchGroupReturning], extractor: Extractor[T])(info: ExecutionInfo, dc: Runner): QIO[List[T]] =
onConnection(underlying.executeBatchActionReturning[T](groups.asInstanceOf[List[ZioJdbcContext.this.underlying.BatchGroupReturning]], extractor)(info, dc))
def prepareQuery(sql: String, prepare: Prepare)(info: ExecutionInfo, dc: Runner): QCIO[PreparedStatement] =
underlying.prepareQuery(sql, prepare)(info, dc)
def prepareAction(sql: String, prepare: Prepare)(info: ExecutionInfo, dc: Runner): QCIO[PreparedStatement] =
underlying.prepareAction(sql, prepare)(info, dc)
def prepareBatchAction(groups: List[BatchGroup])(info: ExecutionInfo, dc: Runner): QCIO[List[PreparedStatement]] =
underlying.prepareBatchAction(groups.asInstanceOf[List[ZioJdbcContext.this.underlying.BatchGroup]])(info, dc)
private[getquill] def prepareParams(statement: String, prepare: Prepare): QCIO[Seq[String]] =
underlying.prepareParams(statement, prepare)
/**
* Execute instructions in a transaction. For example, to add a Person row to the database and return
* the contents of the Person table immediately after that:
* {{{
* val a = run(query[Person].insert(Person(...)): ZIO[Has[DataSource], SQLException, Long]
* val b = run(query[Person]): ZIO[Has[DataSource], SQLException, Person]
* transaction(a *> b): ZIO[Has[DataSource], SQLException, Person]
* }}}
*
* The order of operations run in the case that a new connection needs to be aquired are as follows:
* <pre>
* getDS from env,
* acquire-connection,
* set-no-autocommit(connection),
* put-into-fiberref(connection),
* op - the corresponding execute_ method which will execute and pull connection from the fiberref,
* remove-from-fiberref(connection),
* set-prev-autocommit(connection),
* release-conn
* </pre>
*/
def transaction[R <: Has[DataSource], A](op: ZIO[R, Throwable, A]): ZIO[R, Throwable, A] = {
withBlocking(currentConnection.get.flatMap {
// We can just return the op in the case that there is already a connection set on the fiber ref
// because the op is execute___ which will lookup the connection from the fiber ref via onConnection/onConnectionStream
// This will typically happen for nested transactions e.g. transaction(transaction(a *> b) *> c)
case Some(connection) => op
case None =>
val connection = for {
env <- ZIO.service[DataSource].toManaged_
connection <- managedBestEffort(blockingEffect(env.getConnection))
// Get the current value of auto-commit
prevAutoCommit <- blockingEffect(connection.getAutoCommit).toManaged_
// Disable auto-commit since we need to be able to roll back. Once everything is done, set it
// to whatever the previous value was.
_ <- ZManaged.make(blockingEffect(connection.setAutoCommit(false))) { _ =>
blockingEffect(connection.setAutoCommit(prevAutoCommit)).orDie
}
_ <- ZManaged.make(currentConnection.set(Some(connection))) { _ =>
// Note. We are failing the fiber if auto-commit reset fails. For some circumstances this may be too aggresive.
// If the connection pool e.g. Hikari resets this property for a recycled connection anyway doing it here
// might not be necessary
currentConnection.set(None)
}
// Once the `use` of this outer-ZManaged is done, rollback the connection if needed
_ <- ZManaged.finalizerExit {
case Success(_) => withBlocking(UIO(connection.commit()))
case Failure(cause) => withBlocking(UIO(connection.rollback()))
}
} yield ()
connection.use_(op)
})
}
private def onConnection[T](qlio: ZIO[Has[Connection], SQLException, T]): ZIO[Has[DataSource], SQLException, T] =
currentConnection.get.flatMap {
case Some(connection) =>
withBlocking(qlio.provide(Has(connection)))
case None =>
withBlocking(qlio.provideLayer(DataSourceLayer.live))
}
private def onConnectionStream[T](qstream: ZStream[Has[Connection], SQLException, T]): ZStream[Has[DataSource], SQLException, T] =
streamBlocker *> ZStream.fromEffect(currentConnection.get).flatMap {
case Some(connection) =>
qstream.provide(Has(connection))
case None =>
qstream.provideLayer(DataSourceLayer.live).refineToOrDie[SQLException]
}
} | getquill/quill | quill-jdbc-zio/src/main/scala/io/getquill/context/qzio/ZioJdbcContext.scala | Scala | apache-2.0 | 10,233 |
// Starter Code for Exercise 9
// From "Class Arguments" atom
import com.atomicscala.AtomicTest._
squareThem(2) is 4
squareThem(2, 4) is 20
squareThem(1, 2, 4) is 21
| P7h/ScalaPlayground | Atomic Scala/atomic-scala-solutions/22_ClassArguments/Starter-9.scala | Scala | apache-2.0 | 167 |
package debug
object HelloWorld {
def main(args: Array[String]): Unit = {
println("Hello, World")
println("Hello, World")
}
} | stephenh/scala-ide | org.scala-ide.sdt.debug.tests/test-workspace/debug/src/debug/HelloWorld.scala | Scala | bsd-3-clause | 140 |
package reductions
import java.util.concurrent._
import scala.collection._
import org.scalatest.FunSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import common._
import ParallelParenthesesBalancing._
@RunWith(classOf[JUnitRunner])
class ParallelParenthesesBalancingSuite extends FunSuite {
test("balance should work for empty string") {
def check(input: String, expected: Boolean) =
assert(balance(input.toArray) == expected,
s"balance($input) should be $expected")
check("", true)
}
test("balance should work for string of length 1") {
def check(input: String, expected: Boolean) =
assert(balance(input.toArray) == expected,
s"balance($input) should be $expected")
check("(", false)
check(")", false)
check(".", true)
}
test("balance should work for string of length 2") {
def check(input: String, expected: Boolean) =
assert(balance(input.toArray) == expected,
s"balance($input) should be $expected")
check("()", true)
check(")(", false)
check("((", false)
check("))", false)
check(".)", false)
check(".(", false)
check("(.", false)
check(").", false)
}
} | matija94/show-me-the-code | scala_practice/reductions/src/test/scala/reductions/ParallelParenthesesBalancingSuite.scala | Scala | mit | 1,212 |
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.jms.request
import io.gatling.jms.JmsCheck
import io.gatling.core.session.Expression
/**
* JmsAttributes carries around the JMS settings.
* <p>
* As the JmsReqReplyBuilder is building a request from the DSL, it uses this object
* to represent the in progress request. Once the request is built it can then be used
* so that the JmsReqReplyAction knows exactly what message to send.
*
* @author jasonk@bluedevel.com
*/
case class JmsAttributes(
requestName: String,
destination: JmsDestination,
replyDestination: JmsDestination,
selector: Option[String],
message: JmsMessage,
messageProperties: Map[Expression[String], Expression[Any]] = Map.empty,
jmsType: Option[Expression[String]] = None,
checks: List[JmsCheck] = Nil
)
| ryez/gatling | gatling-jms/src/main/scala/io/gatling/jms/request/JmsAttributes.scala | Scala | apache-2.0 | 1,479 |
package org.analogweb.scala
import java.net.URI
import scala.concurrent.Future
import org.junit.runner.RunWith
import org.specs2.mutable._
import org.specs2.runner.JUnitRunner
import org.specs2.mock.Mockito
import org.analogweb._
import org.analogweb.core._
@RunWith(classOf[JUnitRunner])
class RouteExtensionsSpec extends Specification with Mockito {
trait mocks extends org.specs2.specification.Scope {
val rc =
mock[RequestContext]
val rvr =
mock[RequestValueResolvers]
val im =
mock[ScalaInvocationMetadata]
val tc =
mock[TypeMapperContext]
val parameterResolver =
mock[RequestValueResolver]
val pathResolver =
mock[RequestValueResolver]
val r = new Request(rc, rvr, im, tc)
}
"Resolve with ParameterValueResolver" in new mocks {
val qp = mock[Parameters]
rc.getQueryParameters returns qp
qp.getValues("foo") returns java.util.Collections
.emptyList()
qp.getValues("baa") returns java.util.Arrays
.asList("baz")
class A extends Resolvers with RouteExtensions {
import analogweb._
val route = get("/foo") { implicit r =>
param("baa")
}
}
new A().route
.invoke(r) must_== "baz"
}
"Resolve with PathVariableValueResolver" in new mocks {
val qp = mock[Parameters]
rc.getQueryParameters returns qp
qp.getValues("bar") returns java.util.Collections
.emptyList()
val mp = mock[MatrixParameters]
rc.getMatrixParameters() returns mp
mp.getValues("bar") returns java.util.Collections.emptyList()
rc.getRequestMethod() returns "GET"
val dp = new DefaultRequestPath(URI.create("/"), URI.create("foo/baz"), "GET")
val rpd = RequestPathDefinition.define("/", "foo/{bar}")
im.getDefinedPath() returns rpd
rc.getRequestPath() returns dp
class A extends Resolvers with RouteExtensions {
import analogweb._
val route = get("/foo/{bar}") { implicit r =>
param("bar")
}
}
new A().route
.invoke(r) must_== "baz"
}
"Not Resolved" in new mocks {
val qp = mock[Parameters]
rc.getQueryParameters returns qp
qp.getValues("bar") returns java.util.Collections
.emptyList()
val mp = mock[MatrixParameters]
rc.getMatrixParameters returns mp
mp.getValues("bar") returns java.util.Collections.emptyList()
rc.getRequestMethod() returns "GET"
val dp = new DefaultRequestPath(URI.create("/"), URI.create("foo"), "GET")
val rpd = RequestPathDefinition.define("/", "foo")
im.getDefinedPath() returns rpd
rc.getRequestPath() returns dp
class A extends Resolvers with RouteExtensions {
import analogweb._
val route = get("/foo") { implicit r =>
param("bar")
}
}
new A().route
.invoke(r) must_== ""
}
"Passed With" in new mocks {
class A extends Resolvers with RouteExtensions {
import analogweb._
val route = get("/foo") { r =>
implicit val copied =
r.copy(passedWith = Map("foo" -> "bar", "baz" -> true))
passedWith[String]("foo")
}
}
new A().route
.invoke(r) must_== Some("bar")
}
"Passed With Nothing" in new mocks {
class A extends Resolvers with RouteExtensions {
import analogweb._
val route = get("/foo") { r =>
implicit val copied =
r.copy(passedWith = Map("foo" -> "bar", "baz" -> true))
passedWith[String]("bar")
}
}
new A().route
.invoke(r) must_== None
}
"Converting Future to Renderable" in new mocks {
class A extends Resolvers with Responses with RouteExtensions {
import analogweb._
val route = get[RenderableFuture]("/foo") { r =>
Future
.successful(Ok(asText("hoge")))
.asRenderable
}
}
new A().route
.invoke(r)
.isInstanceOf[RenderableFuture] must beTrue
}
}
| analogweb/scala-plugin | core/src/test/scala/org/analogweb/scala/RouteExtensionsSpec.scala | Scala | mit | 3,900 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js API **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
/**
* All doc-comments marked as "MDN" are by Mozilla Contributors,
* distributed under the Creative Commons Attribution-ShareAlike license from
* https://developer.mozilla.org/en-US/docs/Web/Reference/API
*/
package scala.scalajs.js
import scala.language.dynamics
import annotation.{JSBracketAccess, JSBracketCall}
/** Dynamically typed JavaScript value.
*
* Values of this trait accept all possible JavaScript operations in a
* dynamically typed way. You can read and write any field, call any method,
* apply any JavaScript operator to values of this type.
*/
@native
sealed trait Dynamic extends Any with scala.Dynamic {
/** Calls a method of this object. */
@JSBracketCall
def applyDynamic(name: String)(args: Any*): Dynamic = native
/** Reads a field of this object. */
@JSBracketAccess
def selectDynamic(name: String): Dynamic = native
/** Writes a field of this object. */
@JSBracketAccess
def updateDynamic(name: String)(value: Any): Unit = native
/** Calls this object as a callable. */
def apply(args: Any*): Dynamic = native
def unary_!(): Dynamic = native
def unary_+(): Dynamic = native
def unary_-(): Dynamic = native
def unary_~(): Dynamic = native
def +(that: Dynamic): Dynamic = native
def -(that: Dynamic): Dynamic = native
def *(that: Dynamic): Dynamic = native
def /(that: Dynamic): Dynamic = native
def %(that: Dynamic): Dynamic = native
def <<(that: Dynamic): Dynamic = native
def >>(that: Dynamic): Dynamic = native
def >>>(that: Dynamic): Dynamic = native
def &(that: Dynamic): Dynamic = native
def |(that: Dynamic): Dynamic = native
def ^(that: Dynamic): Dynamic = native
def <(that: Dynamic): Dynamic = native
def >(that: Dynamic): Dynamic = native
def <=(that: Dynamic): Dynamic = native
def >=(that: Dynamic): Dynamic = native
def &&(that: Dynamic): Dynamic = native
def ||(that: Dynamic): Dynamic = native
// Work around the annoying implicits in Predef in Scala 2.10.
def x: Dynamic = native
def x_=(value: Any): Unit = native
}
/** Factory for dynamically typed JavaScript values. */
object Dynamic {
/** Dynamic view of the global scope. */
@inline def global: Dynamic = scala.scalajs.runtime.environmentInfo.global
/** Instantiates a new object of a JavaScript class. */
def newInstance(clazz: Dynamic)(args: Any*): Object with Dynamic = sys.error("stub")
/** Creates a new object with a literal syntax.
*
* For example,
* js.Dynamic.literal(foo = 3, bar = "foobar")
* returns the JavaScript object
* {foo: 3, bar: "foobar"}
*/
object literal extends scala.Dynamic { // scalastyle:ignore
/** literal creation like this:
* js.Dynamic.literal(name1 = "value", name2 = "value")
*/
def applyDynamicNamed(name: String)(
fields: (String, Any)*): Object with Dynamic = sys.error("stub")
/** literal creation like this:
* js.Dynamic.literal("name1" -> "value", "name2" -> "value")
*
* Note that this could be simply `def apply`, but this would make the
* applyDynamicNamed fail, since a call with named arguments would
* be routed to the `def apply`, rather than def dynamic version.
*/
def applyDynamic(name: String)(
fields: (String, Any)*): Object with Dynamic = sys.error("stub")
}
}
| lrytz/scala-js | library/src/main/scala/scala/scalajs/js/Dynamic.scala | Scala | bsd-3-clause | 3,857 |
package chapter10
object WordCount {
sealed trait WC
case class Stub(chars: String) extends WC
case class Part(lStub: String, words: Int, rStub: String) extends WC
}
| amolnayak311/functional-programming-in-scala | src/chapter10/WordCount.scala | Scala | unlicense | 177 |
package jp.co.dwango.s99
import Util.map
import Util.split
import P04.length
object P28a {
def mergeSort[T](list: List[T])(lt: (T, T) => Boolean): List[T] = {
def merge(list1: List[T], list2: List[T]): List[T] = {
(list1, list2) match {
case (x :: xs, y :: ys) if lt(x, y) => x :: merge(xs, list2)
case (x :: xs, y :: ys) => y :: merge(list1, ys)
case (Nil, ys) => ys
case (xs, Nil) => xs
}
}
def mergeSort1(list: List[T], len: Int): List[T] = len match {
case 0 => Nil
case 1 => list
case _ =>
val m = len / 2
val (fst, snd) = split(m, list)
merge(mergeSort1(fst, m), mergeSort1(snd, len - m))
}
mergeSort1(list, length(list))
}
def lsort[T](list: List[List[T]]): List[List[T]] = {
map(mergeSort(map(list)(x => (x, length(x))))(_._2 < _._2))(_._1)
}
}
| dwango/S99 | src/main/scala/jp/co/dwango/s99/P28a.scala | Scala | mit | 929 |
package org.eknet.publet.gitr.webui.scripts
import java.util.concurrent.TimeUnit
/**
* From `startMillis` until now.
*
* @author <a href="mailto:eike.kettner@gmail.com">Eike Kettner</a>
* @since 29.05.12 10:03
*
*/
object Duration {
abstract sealed class Unit(val milliFactor: Long, val name: String) {
require(milliFactor > 0, "milliFactor must not be negative")
def toMillis(n: Long) = n * milliFactor
def convert(n: Long, unit: Unit) = toMillis(n) / unit.milliFactor
override def toString = name.toUpperCase
}
object Year extends Unit(365L * 24 * 60 * 60 * 1000, "year")
object Month extends Unit(30L * 24 * 60 * 60 * 1000, "month")
object Day extends Unit(24L * 60 * 60 * 1000, "day")
object Hour extends Unit(60L * 60 * 1000, "hour")
object Minute extends Unit(60L * 1000, "minute")
object Second extends Unit(1000, "second")
object Millis extends Unit(1, "millisecond")
val units = List(Year, Month, Day, Hour, Minute, Second, Millis)
final class NumberExtra(number: Long) {
def millis = new NumberWithUnit(number, Millis)
def seconds = new NumberWithUnit(number, Second)
def second = seconds
def minutes = new NumberWithUnit(number, Minute)
def minute = minutes
def hours = new NumberWithUnit(number, Hour)
def hour = hours
def days = new NumberWithUnit(number, Day)
def day = days
def months = new NumberWithUnit(number, Month)
def month = months
def years = new NumberWithUnit(number, Year)
def year = years
}
final case class NumberWithUnit(number: Long, unit: Unit = Millis) {
lazy val inMillis = unit.toMillis(number)
def in(unit: Unit) = this.unit.convert(number, unit)
private lazy val ageStream = units.toStream.map(u => u -> in(u))
lazy val age = ageStream.find(t => t._2 > 0).getOrElse((unit, number))
lazy val ageString = age._1 match {
case Millis => "Moments ago"
case _ => age._2 + " " + age._1.name+ (if (age._2 > 1) "s" else "")
}
def until(nwu: NumberWithUnit) = NumberWithUnit(nwu.inMillis - this.inMillis, Millis)
def untilNow = NumberWithUnit(System.currentTimeMillis() - this.inMillis)
}
implicit def intWithMillis(n: Int) = new NumberExtra(n)
implicit def longWithMillis(n: Long) = new NumberExtra(n)
}
| eikek/publet | gitr-web/src/main/scala/org/eknet/publet/gitr/webui/scripts/Duration.scala | Scala | apache-2.0 | 2,293 |
package net.fehmicansaglam.tepkin.protocol.message
import java.nio.ByteOrder
import akka.util.ByteString
import net.fehmicansaglam.bson.Writable
trait Message extends Writable {
implicit val byteOrder = ByteOrder.LITTLE_ENDIAN
val requestID: Int = RequestIDGenerator.generate
def responseTo: Int
def opCode: Int
def encodeBody: ByteString
override def encode: ByteString = {
val body = encodeBody
ByteString.newBuilder
.putInt(body.length + 16) // header length is always 16
.putInt(requestID)
.putInt(responseTo)
.putInt(opCode)
.append(body)
.result()
}
}
| cancobanoglu/tepkin | tepkin/src/main/scala/net/fehmicansaglam/tepkin/protocol/message/Message.scala | Scala | apache-2.0 | 626 |
package mesosphere.marathon
package core.instance.update
import mesosphere.marathon.core.event.{InstanceChanged, MarathonEvent, MesosStatusUpdateEvent}
import mesosphere.marathon.core.instance.Instance
import mesosphere.marathon.core.instance.Instance.InstanceState
import mesosphere.marathon.core.task.Task
import mesosphere.marathon.state.Timestamp
import org.apache.mesos.Protos.TaskState
import scala.collection.immutable.Seq
object InstanceChangedEventsGenerator {
def events(instance: Instance, task: Option[Task], now: Timestamp, previousState: Option[InstanceState]): Seq[MarathonEvent] = {
val stateChanged = previousState.fold(true) { previous =>
previous.condition != instance.state.condition ||
previous.goal != instance.state.goal
}
val runSpecId = instance.runSpecId
val version = instance.runSpecVersion
val instanceEvent: Seq[MarathonEvent] = if (stateChanged) {
Seq(InstanceChanged(
id = instance.instanceId,
runSpecVersion = version,
runSpecId = runSpecId,
condition = instance.state.condition,
instance = instance
))
} else Nil
task.fold(instanceEvent) { task =>
val maybeTaskStatus = task.status.mesosStatus
val ports = task.status.networkInfo.hostPorts
val host = instance.hostname.getOrElse("unknown")
val ipAddresses = task.status.networkInfo.ipAddresses
val slaveId = maybeTaskStatus.fold("")(_.getSlaveId.getValue)
val message = maybeTaskStatus.fold("")(status => if (status.hasMessage) status.getMessage else "")
val state = task.status.
mesosStatus.map(_.getState).
getOrElse(TaskState.TASK_STAGING) // should return TASK_KILLED when resident task is killed... but TASK_STAGING if state not yet known
val taskEvent = MesosStatusUpdateEvent(
slaveId,
task.taskId,
state,
message,
appId = runSpecId,
host,
ipAddresses,
ports = ports,
version = version.toString,
timestamp = now.toString
)
taskEvent +: instanceEvent
}
}
}
| gsantovena/marathon | src/main/scala/mesosphere/marathon/core/instance/update/InstanceChangedEventsGenerator.scala | Scala | apache-2.0 | 2,111 |
package zzz.akka.avionics
import akka.actor.ActorSelection
import akka.actor.{ ActorLogging, ActorRef, ActorSystem, Props }
import akka.testkit.{ ImplicitSender, ScopedTestKit, TestActorRef, TestKit }
import org.specs2.mutable.Specification
class PilotSpec extends Specification {
import Pilot.ReadyToGo
isolated
"Pilot".title
"" should {
"respond with GiveMeControl when sent ReadyToGo" in new pilotContext(ActorSystem("TestPilot")) {
var fakeAltimeter = system.actorSelection("/Controls/Altimeter")
var fakeControls = system.actorSelection("/Controls/ControlSurfaces")
var fakeAutopilot = system.actorSelection("/Controls/Autopilot")
val pilot = TestActorRef(Props(TestPilot(testActor, fakeAutopilot, fakeControls, fakeAltimeter)), "Pilot")
pilot ! Pilot.ReadyToGo
expectMsg(Plane.GiveMeControl)
}
"accept a Controls message" in new pilotContext(ActorSystem("TestPilot")) {
var fakeAltimeter = system.actorSelection("/Controls/Altimeter")
var fakeControls = system.actorSelection("/Controls/ControlSurfaces")
var fakeAutopilot = system.actorSelection("/Controls/Autopilot")
val pilot = TestActorRef(Props(TestPilot(testActor, fakeAutopilot, fakeControls, fakeAltimeter)), "Pilot")
val copilot = TestActorRef(Props(Copilot(testActor, fakeAutopilot, fakeControls, fakeAltimeter)), "TestPilot")
pilot receive Plane.Controls(fakeControls)
expectNoMsg
}
}
}
object TestPilot {
def apply(
plane: ActorRef,
autopilot: ActorSelection,
heading: ActorSelection,
altimeter: ActorSelection) = new Pilot(plane, autopilot, heading, altimeter) with DrinkingProvider with FlyingProvider
}
class pilotContext(val actorSystem: ActorSystem) extends TestKit(actorSystem) with ScopedTestKit with ImplicitSender
| jackcviers/learning-akka | src/test/scala/zzz/akka/avionics/PilotSpec.scala | Scala | apache-2.0 | 1,834 |
package me.reminisce.gameboard.questions
import java.util.concurrent.TimeUnit
import akka.testkit.{TestActorRef, TestProbe}
import com.github.nscala_time.time.Imports._
import me.reminisce.database.MongoCollections
import me.reminisce.database.MongoDBEntities.{FBPage, FBPageLike}
import me.reminisce.database.MongoDBFormats._
import me.reminisce.gameboard.board.GameboardEntities.{PageSubject, TimelineQuestion}
import me.reminisce.gameboard.questions.QuestionGenerator.{CreateQuestion, NotEnoughData}
import org.joda.time.DateTime
import org.scalatest.DoNotDiscover
import reactivemongo.api.collections.bson.BSONCollection
import reactivemongo.api.commands.WriteConcern
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
@DoNotDiscover
class WhenDidYouLikeThisPageSpec extends QuestionTester("WhenDidYouLikeThisPageSpec") {
val userId = "TestUserWhenDidYouLikeThisPage"
"WhenDidYouLikeThisPage" must {
"not create question when there is no like." in {
testWithDb {
db =>
val itemId = "This page does not exist"
val actorRef = TestActorRef(WhenDidYouLikeThisPage.props(db))
val testProbe = TestProbe()
testProbe.send(actorRef, CreateQuestion(userId, itemId))
testProbe.expectMsg(NotEnoughData(s"Page or pagelike not found : user $userId, page $itemId"))
}
}
"not create question when there is no page." in {
testWithDb {
db =>
val itemId = "This page does not exist"
val pageLikesCollection = db[BSONCollection](MongoCollections.fbPageLikes)
val likedTime = DateTime.now
val pageLike = FBPageLike(None, userId, itemId, likedTime)
Await.result(pageLikesCollection.update(pageLike, pageLike, WriteConcern.Acknowledged, upsert = true), Duration(10, TimeUnit.SECONDS))
val actorRef = TestActorRef(WhenDidYouLikeThisPage.props(db))
val testProbe = TestProbe()
testProbe.send(actorRef, CreateQuestion(userId, itemId))
testProbe.expectMsg(NotEnoughData(s"Page or pagelike not found : user $userId, page $itemId"))
}
}
"create a valid question when the data is there." in {
testWithDb {
db =>
val pagesCollection = db[BSONCollection](MongoCollections.fbPages)
val itemId = s"PageId"
val page = FBPage(None, itemId, Some(s"Cool page with id ID"), None, 1)
Await.result(pagesCollection.update(page, page, WriteConcern.Acknowledged, upsert = true), Duration(10, TimeUnit.SECONDS))
val pageLikesCollection = db[BSONCollection](MongoCollections.fbPageLikes)
val likedTime = DateTime.now
val pageLike = FBPageLike(None, userId, itemId, likedTime)
Await.result(pageLikesCollection.update(pageLike, pageLike, WriteConcern.Acknowledged, upsert = true), Duration(10, TimeUnit.SECONDS))
val actorRef = TestActorRef(WhenDidYouLikeThisPage.props(db))
val testProbe = TestProbe()
testProbe.send(actorRef, CreateQuestion(userId, itemId))
checkFinished[TimelineQuestion](testProbe) {
question =>
checkSubject[PageSubject](question.subject) {
subject =>
val answer = question.answer
assert(subject.name == page.name.getOrElse(""))
val formatter = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ssZ").withZone(DateTimeZone.UTC)
assert(likedTime.toString(formatter) == answer)
}
}
}
}
}
} | reminisceme/game-creator | src/test/scala/me/reminisce/gameboard/questions/WhenDidYouLikeThisPageSpec.scala | Scala | apache-2.0 | 3,645 |
package com.softwaremill.thegarden.json4s.serializers
import org.json4s._
import scala.reflect.ClassTag
// TODO convert implicit param to a context bound
class JavaEnumNameSerializer[T <: Enum[T]](implicit ct: ClassTag[T]) extends Serializer[T] {
import JsonDSL._
private lazy val enumClass: Class[T] = implicitly[ClassTag[T]].runtimeClass.asInstanceOf[Class[T]]
override def deserialize(implicit format: Formats): PartialFunction[(TypeInfo, JValue), T] = {
case (ti: TypeInfo, v: JString) if ti.clazz.isEnum && ti.clazz == enumClass =>
Enum.valueOf(enumClass, v.values)
}
private def mappingError(text: String) = throw new MappingException(text)
override def serialize(implicit format: Formats): PartialFunction[Any, JValue] = {
case e: Enum[_] =>
e.name()
}
}
| maciej/the-garden | garden-json4s/src/main/scala/com/softwaremill/thegarden/json4s/serializers/JavaEnumNameSerializer.scala | Scala | mit | 805 |
package pl.newicom.dddd.coordination
import pl.newicom.dddd.aggregate.Command
import pl.newicom.dddd.office.OfficeRegistryImpl
import pl.newicom.dddd.process.EnqueueCommand
class ToDeliverableCommandTransformation(officeRegistry: OfficeRegistryImpl) extends Function[Command, Command] {
def apply(command: Command): Command = {
val officeId = officeRegistry.commandHandlerId(command)
if (officeRegistry.isOfficeAvailableInCluster(officeId.id)) {
command
} else {
EnqueueCommand(command, officeId.id, officeId.department)
}
}
}
| pawelkaczor/akka-ddd | akka-ddd-core/src/main/scala/pl/newicom/dddd/coordination/ToDeliverableCommandTransformation.scala | Scala | mit | 563 |
package fpinScala.FindFirstString
object FindFirstString{
def findFirst(ss:Array[String],key:String):Int={
@annotation.tailrec
def find(n:Int):Int={
if (n>=ss.length) -1
else if (ss(n)==key) n
else find(n+1)
}
find(0)
}
def main(args:Array[String]):Unit={
val str1=Array("abc","qwe")
val key1="abc"
println(findFirst(str1,key1))
}
} | Tomcruseal/FunctionalLearn | fpinscala/src/main/scala/fpinScala/getStarted/findFirstString.scala | Scala | mit | 442 |
/*
Copyright 2009 David Hall, Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scalanlp;
package serialization;
/**
* Reads a row as a series of readers. Note that the returned readers cannot
* be cached or accessed out of order, because they are a view on a single
* underlying stream.
*
* @author dramage
*/
trait TableRowReader extends Iterator[TableCellReader] {
override def take(n : Int) : TableRowReader = {
val took = super.take(n);
new TableRowReader {
override def hasNext = took.hasNext;
override def next = took.next;
}
}
}
object TableRowReader {
implicit def fromStrings(strings : Iterable[String]) : TableRowReader = {
val iter = strings.iterator;
new TableRowReader {
override def hasNext = iter.hasNext;
override def next = TableCellReader.fromString(iter.next);
}
}
}
/**
* Reads a row in a table.
*
* @author dramage
*/
trait TableRowReadable[V] extends Readable[TableRowReader,V] {
/** Returns a header describing this row. */
def header : Option[List[String]] = None;
}
/**
* Low priority conversions of cell readable to row readable.
*
* @author dramage
*/
trait LowPriorityTableRowReadableImplicits {
implicit def anyTableMultiCellReadable[V](implicit rc : TableMultiCellReadable[V])
: TableRowReadable[V] = new TableRowReadable[V] {
override def read(row : TableRowReader) = {
val rv = rc.read(row.take(rc.size));
require(!row.hasNext, "Wrong number of cells in row.");
rv;
}
}
}
object TableRowReadable extends LowPriorityTableRowReadableImplicits {
type Input = TableRowReader;
implicit def forTuple2[A,B]
(implicit ra : TableMultiCellReadable[A], rb : TableRowReadable[B])
: TableRowReadable[(A,B)] = new TableRowReadable[(A,B)] {
override def read(row : Input) =
(ra.read(row.take(ra.size)),
rb.read(row));
}
implicit def forTuple3[A,B,C]
(implicit ra : TableMultiCellReadable[A], rb : TableMultiCellReadable[B],
rc : TableRowReadable[C])
: TableRowReadable[(A,B,C)] = new TableRowReadable[(A,B,C)] {
override def read(row : Input) =
(ra.read(row.take(ra.size)),
rb.read(row.take(rb.size)),
rc.read(row));
}
implicit def forTuple4[A,B,C,D]
(implicit ra : TableMultiCellReadable[A], rb : TableMultiCellReadable[B],
rc : TableMultiCellReadable[C], rd : TableRowReadable[D])
: TableRowReadable[(A,B,C,D)] = new TableRowReadable[(A,B,C,D)] {
override def read(row : Input) =
(ra.read(row.take(ra.size)),
rb.read(row.take(rb.size)),
rc.read(row.take(rc.size)),
rd.read(row));
}
implicit def forTuple5[A,B,C,D,E]
(implicit ra : TableMultiCellReadable[A], rb : TableMultiCellReadable[B],
rc : TableMultiCellReadable[C], rd : TableMultiCellReadable[D], re : TableRowReadable[E])
: TableRowReadable[(A,B,C,D,E)] = new TableRowReadable[(A,B,C,D,E)] {
override def read(row : Input) =
(ra.read(row.take(ra.size)),
rb.read(row.take(rb.size)),
rc.read(row.take(rc.size)),
rd.read(row.take(rd.size)),
re.read(row));
}
implicit def forArray[A](implicit cr : TableMultiCellReadable[A], cm : ClassManifest[A])
: TableRowReadable[Array[A]] = new TableRowReadable[Array[A]] {
override def read(row : Input) = {
val builder = scala.collection.mutable.ArrayBuilder.make[A];
while (row.hasNext) {
builder += cr.read(row.take(cr.size));
}
builder.result;
}
}
implicit def forIterable[A](implicit cr : TableMultiCellReadable[A])
: TableRowReadable[Iterable[A]] = new TableRowReadable[Iterable[A]] {
override def read(row : Input) = {
val builder = Iterable.newBuilder[A];
while (row.hasNext) {
builder += cr.read(row.take(cr.size));
}
builder.result;
}
}
implicit def forList[A](implicit cr : TableMultiCellReadable[A])
: TableRowReadable[List[A]] = new TableRowReadable[List[A]] {
override def read(row : Input) = {
val builder = List.newBuilder[A];
while (row.hasNext) {
builder += cr.read(row.take(cr.size));
}
builder.result;
}
}
}
/**
* Writes a delimited row to output. Call next before writing
* to each cell. Then call finish when done with the row.
*
* @author dramage
*/
trait TableRowWriter {
def next() : TableCellWriter;
def finish();
}
/**
* For writing a row of a table.
*
* @author dramage
*/
trait TableRowWritable[V] extends Writable[TableRowWriter, V] {
/** Returns a header describing this row. */
def header : Option[List[String]] = None;
}
/**
* Low priority Writable conversions.
*
* @author dramage
*/
trait LowPriorityTableRowWritableImplicits {
implicit def anyTableMultiCellWritable[V](implicit wc : TableMultiCellWritable[V])
: TableRowWritable[V] = new TableRowWritable[V] {
def write(writer : TableRowWriter, value : V) = {
wc.write(writer, value);
writer.finish;
}
}
}
object TableRowWritable extends LowPriorityTableRowWritableImplicits {
type Output = TableRowWriter
implicit def forTuple2[A,B]
(implicit wa : TableMultiCellWritable[A],
wb : TableRowWritable[B])
: TableRowWritable[(A,B)] = new TableRowWritable[(A,B)] {
def write(writer : Output, v : (A,B)) = {
wa.write(writer, v._1);
wb.write(writer, v._2);
}
}
implicit def forTuple3[A,B,C]
(implicit wa : TableMultiCellWritable[A],
wb : TableMultiCellWritable[B],
wc : TableRowWritable[C])
: TableRowWritable[(A,B,C)] = new TableRowWritable[(A,B,C)] {
def write(writer : Output, v : (A,B,C)) = {
wa.write(writer, v._1);
wb.write(writer, v._2);
wc.write(writer, v._3);
}
}
implicit def forTuple4[A,B,C,D]
(implicit wa : TableMultiCellWritable[A],
wb : TableMultiCellWritable[B],
wc : TableMultiCellWritable[C],
wd : TableRowWritable[D])
: TableRowWritable[(A,B,C,D)] = new TableRowWritable[(A,B,C,D)] {
def write(writer : Output, v : (A,B,C,D)) = {
wa.write(writer, v._1);
wb.write(writer, v._2);
wc.write(writer, v._3);
wd.write(writer, v._4);
}
}
implicit def forTuple5[A,B,C,D,E]
(implicit wa : TableMultiCellWritable[A],
wb : TableMultiCellWritable[B],
wc : TableMultiCellWritable[C],
wd : TableMultiCellWritable[D],
we : TableRowWritable[E])
: TableRowWritable[(A,B,C,D,E)] = new TableRowWritable[(A,B,C,D,E)] {
def write(writer : Output, v : (A,B,C,D,E)) = {
wa.write(writer, v._1);
wb.write(writer, v._2);
wc.write(writer, v._3);
wd.write(writer, v._4);
we.write(writer, v._5);
}
}
implicit def forArray[A:TableMultiCellWritable]
: TableRowWritable[Array[A]] = new TableRowWritable[Array[A]] {
override def write(writer : Output, coll : Array[A]) = {
for (v <- coll) {
implicitly[TableMultiCellWritable[A]].write(writer, v);
}
writer.finish;
}
}
implicit def forIterable[A:TableMultiCellWritable]
: TableRowWritable[Iterable[A]] = new TableRowWritable[Iterable[A]] {
override def write(writer : Output, coll : Iterable[A]) = {
for (v <- coll) {
implicitly[TableMultiCellWritable[A]].write(writer, v);
}
writer.finish;
}
}
implicit def forTraversable[A:TableMultiCellWritable]
: TableRowWritable[Traversable[A]] = new TableRowWritable[Traversable[A]] {
override def write(writer : Output, coll : Traversable[A]) = {
for (v <- coll) {
implicitly[TableMultiCellWritable[A]].write(writer, v);
}
writer.finish;
}
}
implicit def forList[A:TableMultiCellWritable]
: TableRowWritable[List[A]] = new TableRowWritable[List[A]] {
override def write(writer : Output, coll : List[A]) = {
for (v <- coll) {
implicitly[TableMultiCellWritable[A]].write(writer, v);
}
writer.finish;
}
}
}
/**
* A trait for companion objects to case classes that want to support
* reading and writing table headers.
*
* Example:
*
* case class MyRow(id : String, count : Int, values : Array[Double]);
* object MyRow extends TableRowCompanion[MyRow,(String,Int,Array[Double])];
*
* @author dramage
*/
trait TableRowCompanion[This,Format] { self =>
import scalanlp.util.CanPack;
import com.thoughtworks.paranamer.BytecodeReadingParanamer;
private val method = try {
this.getClass.getMethods.filter(_.getName == "apply").head;
} catch {
case ex : Throwable =>
throw new IllegalArgumentException("No apply method.");
}
private val names : List[String] =
new com.thoughtworks.paranamer.BytecodeReadingParanamer().lookupParameterNames(method).toList;
val header : Option[List[String]] =
Some(names);
class CompanionReadable(implicit trr : TableRowReadable[Format], cp : CanPack[Format])
extends TableRowReadable[This] {
override def header = self.header;
override def read(in : TableRowReader) = {
val packed = implicitly[TableRowReadable[Format]].read(in);
val unpacked = implicitly[CanPack[Format]].unpack(packed);
method.invoke(null, unpacked.asInstanceOf[List[Object]].toArray[Object] :_*).asInstanceOf[This];
}
}
private var _readable : CompanionReadable = null;
implicit def readable(implicit trr : TableRowReadable[Format], cp : CanPack[Format]) : TableRowReadable[This] = {
if (_readable == null) synchronized { _readable = new CompanionReadable(); }
_readable;
}
class CompanionWritable(implicit trw : TableRowWritable[Format], cp : CanPack[Format], cm : Manifest[This])
extends TableRowWritable[This] {
override def header = self.header;
override def write(out : TableRowWriter, value : This) = {
val unpacked : List[Any] =
names.map(name => implicitly[Manifest[This]].erasure.getMethod(name).invoke(value));
val packed = implicitly[CanPack[Format]].pack(unpacked);
implicitly[TableRowWritable[Format]].write(out, packed);
}
}
private var _writable : CompanionWritable = null;
implicit def writable(implicit trw : TableRowWritable[Format], cp : CanPack[Format], cm : Manifest[This]) : TableRowWritable[This] = {
if (_writable == null) synchronized { _writable = new CompanionWritable(); }
_writable;
}
}
| MLnick/scalanlp-core | data/src/main/scala/scalanlp/serialization/TableRowSerialization.scala | Scala | apache-2.0 | 10,757 |
package scanalyzer
package analysis
package dominance
import cfg._
import util._
import scala.collection.mutable.{Set, Map, Queue}
/**
* Analysis that computes for each BasicBlock bb the set of BasicBlocks that
* dominate bb.
*
* Non-strict dominance is computet, i.e. each BasicBlock dominates itself.
*/
class DominanceAnalysis(fun: Function)
extends BlockAnalysis[Set[BasicBlock]] {
override def run(): Unit = {
val allBlocks = Set[BasicBlock]()
fun foreach ( bb => allBlocks += bb )
populateBlockTable(fun, allBlocks.clone)
blocktab(fun.first) = Set[BasicBlock](fun.first)
val queue = new Queue[BasicBlock]
queue ++= allBlocks
while (! queue.isEmpty) {
val current = queue.dequeue
val value = blocktab(current)
val succs = current.getSuccessors()
succs foreach (bb => {
val entry = blocktab(bb)
val sizeBefore = entry.size
entry retain (b => b == bb || (value contains b))
if (sizeBefore != entry.size && ! (queue contains bb)) {
queue += bb
}
})
}
}
/**
* Returns a copy of the resulting mapping.
*/
def getMapping(): Map[BasicBlock, Set[BasicBlock]] = {
blocktab.clone
}
override def getResult(): String = {
var res = ""
for ((k, v) <- blocktab)
res += k.Name + " -> " + Util.strItBB(v) + "\\n"
res
}
}
| fabian-r/scanalyzer | src/main/scala/analysis/DominanceAnalysis.scala | Scala | mit | 1,381 |
package com.nikolastojiljkovic.quilltrait
import com.nikolastojiljkovic.quilltrait.model.{ Page, SampleTraitWithNestable, SeoSupport }
import io.getquill.context.sql.SqlContext
trait PageSqlSpec extends PageSpec {
implicit val context: SqlContext[_, _] with AnnotatedTraitSupport
import context._
val `Ex 1` =
quote {
for {
c <- query[Page with SeoSupport with SampleTraitWithNestable]
p <- query[Page with SeoSupport with SampleTraitWithNestable] if p.id == c.parentId
} yield p.id
}
val `Ex 1 expected result` = List(Some(1), Some(2), Some(2))
val `Ex 2` =
quote {
for {
c <- query[Page with SeoSupport with SampleTraitWithNestable].sortBy(p => p.id)
} yield c
}
val `Ex 2 expected result` = testEntries
}
| nstojiljkovic/quill-trait | quill-trait-core/jvm/src/test/scala/com/nikolastojiljkovic/quilltrait/PageSqlSpec.scala | Scala | apache-2.0 | 790 |
package water.sparkling.itest.standalone
import hex.Distribution
import org.apache.spark.h2o._
import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import water.sparkling.itest.SparkITest
/**
* Test for Jira Hex-Dev 100 : Import airlines data and run a host of classification models,
* including GBM, GLM, and Deep Learning.
*/
@RunWith(classOf[JUnitRunner])
class HexDev100TestSuite extends FunSuite with SparkITest {
ignore("HEX-DEV 100 test") {
launch( "water.sparkling.itest.standalone.HexDev100Test",
env {
// spark.master is passed via environment
// Configure Standalone environment
conf("spark.standalone.max.executor.failures", 1) // In fail of executor, fail the test
conf("spark.executor.instances", 8)
conf("spark.executor.memory", "7g")
conf("spark.ext.h2o.cluster.size", 8)
}
)
}
}
object HexDev100Test {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("HexDev100Test")
val sc = new SparkContext(conf)
val h2oContext = H2OContext.getOrCreate(sc)
import h2oContext._
// Import all year airlines into H2O
val path = "hdfs://mr-0xd6-precise1.0xdata.loc:8020/datasets/airlines/airlines_all.csv"
val uri = new java.net.URI(path)
val airlinesData = new H2OFrame(uri)
// Pass into Spark to drop unused columns
implicit val sqlContext = new SQLContext(sc)
val airlinesDataFrame = asDataFrame(airlinesData)
airlinesDataFrame.registerTempTable("AirlinesDataTable")
// Drop all columns except "Year", "Month", "DayOfWeek", "Origin", "Dest", "UniqueCarrier", "Distance", "FlightNum", "IsDepDelayed"
val airlinesTable = sqlContext.sql(
"""SELECT
|f.Year, f.Month, f.DayOfWeek,
|f.Origin, f.Dest, f.UniqueCarrier,
|f.Distance, f.FlightNum, f.IsDepDelayed
|FROM AirlinesDataTable f""".stripMargin)
// Run deep learning to produce model classifying delayed flights
import hex.deeplearning.DeepLearning
import hex.deeplearning.DeepLearningParameters
val dlParams = new DeepLearningParameters()
dlParams._epochs = 10
dlParams._train = airlinesTable
dlParams._response_column = 'IsDepDelayed
// Create a job
val dl = new DeepLearning(dlParams)
val dlModel = dl.trainModel.get
// Use model to estimate delay on training data
val predDLH2OFrame = dlModel.score(airlinesTable)('predict)
val predDLFromModel = asRDD[DoubleHolder](predDLH2OFrame).collect.map(_.result.getOrElse(Double.NaN))
// Run GLM to produce model classifying delayed flights
import hex.glm.GLM
import hex.glm.GLMModel.GLMParameters
import hex.glm.GLMModel.GLMParameters.Family
val glmParams = new GLMParameters(Family.binomial)
glmParams._train = airlinesTable
glmParams._response_column = 'IsDepDelayed
glmParams._alpha = Array[Double](0.5)
val glm = new GLM(glmParams)
val glmModel = glm.trainModel().get()
// Use model to estimate delay on training data
val predGLMH2OFrame = glmModel.score(airlinesTable)('predict)
val predGLMFromModel = asRDD[DoubleHolder](predGLMH2OFrame).collect.map(_.result.getOrElse(Double.NaN))
// Use GBM to produce model classifying delayed flights
import hex.tree.gbm.GBM
import hex.tree.gbm.GBMModel.GBMParameters
val gbmParams = new GBMParameters()
gbmParams._train = airlinesTable
gbmParams._response_column = 'IsDepDelayed
gbmParams._ntrees = 10
gbmParams._distribution = Distribution.Family.bernoulli
val gbm = new GBM(gbmParams)
val gbmModel = gbm.trainModel.get
// Use model to estimate delay on training data
val predGBMH2OFrame = glmModel.score(airlinesTable)('predict)
val predGBMFromModel = asRDD[DoubleHolder](predGBMH2OFrame).collect.map(_.result.getOrElse(Double.NaN))
println("""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""")
println("Finished running GLM,GBM, and Deep Learning on airlines dataset.")
println("""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""")
sc.stop()
// Shutdown H2O explicitly (at least the driver)
water.H2O.shutdown(0)
}
}
| nilbody/sparkling-water | examples/src/integTest/scala/water/sparkling/itest/standalone/HexDev100Test.scala | Scala | apache-2.0 | 4,409 |
import scala.concurrent.Await
import scala.concurrent.duration.{FiniteDuration, SECONDS}
import akka.actor.{Actor, ActorLogging, ActorSelection}
import akka.util.Timeout
import akka.pattern.{ask, pipe}
package hyperion {
abstract class Counter extends Pipe {
var counter = 0
def process = {
case Message(data) =>
count(data)
case Reset =>
counter = 0
case StatsRequest => {
sender ! StatsResponse(Map[String,Int]("counter" -> counter))
}
}
def count(data: Map[String, String])
}
class MessageCounter(id: String) extends Counter {
def selfId = id
def count(data: Map[String, String]) = {
counter = counter + 1
}
}
class FieldValueCounter(id: String, name: String, value: String) extends Counter {
def selfId = id
def count(data: Map[String, String]) = {
if (data.contains(name) && data(name).matches(value)) counter = counter + 1
}
}
class FieldStatistics(id: String, name: String) extends Pipe {
def selfId = id
val stats: scala.collection.mutable.Map[String, Int] = scala.collection.mutable.Map[String, Int]()
def process = {
case Message(data) =>
if (data.contains(name)) {
val value = stats.getOrElse(data(name), 0)
stats.update(data(name), value + 1)
}
case StatsRequest => {
sender ! StatsResponse(stats.toMap)
}
}
}
class AverageCounter(id: String, counter: ActorSelection, tick: FiniteDuration, backlogsize: Int) extends Actor with ActorLogging with Tickable {
implicit val timeout = Timeout(FiniteDuration(1, SECONDS))
def selfId = id
var backlog = List[Int]()
var lastData = 0
def updateBacklog = {
log.debug("Updating backlog in " + self.path.toString)
val currentData = Await.result(counter ? StatsRequest, timeout.duration).asInstanceOf[StatsResponse].values("counter")
backlog = (currentData - lastData) :: (backlog take (backlogsize - 1))
log.debug("Backlog in " + self.path.toString + " : " + backlog)
lastData = currentData
println("Ticked")
}
override def preStart = {
super.preStart()
startTicking(tick, tick);
}
def countAverage = if (backlog.size != 0) (backlog.sum / backlog.size) else 0
def receive = {
case StatsRequest => sender ! StatsResponse(Map[String, Int]( "counter" -> countAverage))
case Tick => updateBacklog
}
override def postStop = stopTicking
}
} | talien/hyperion | src/main/scala/pipes/counter.scala | Scala | lgpl-3.0 | 2,507 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.tools.status
import com.beust.jcommander.Parameters
import org.locationtech.geomesa.accumulo.data.AccumuloDataStore
import org.locationtech.geomesa.accumulo.tools.{AccumuloDataStoreCommand, AccumuloDataStoreParams}
import org.locationtech.geomesa.accumulo.tools.status.AccumuloGetSftConfigCommand.AccumuloGetSftConfigParameters
import org.locationtech.geomesa.tools.RequiredTypeNameParam
import org.locationtech.geomesa.tools.status.{GetSftConfigCommand, GetSftConfigParams}
class AccumuloGetSftConfigCommand extends GetSftConfigCommand[AccumuloDataStore] with AccumuloDataStoreCommand {
override val params = new AccumuloGetSftConfigParameters
}
object AccumuloGetSftConfigCommand {
@Parameters(commandDescription = "Get the SimpleFeatureType definition of a schema")
class AccumuloGetSftConfigParameters extends AccumuloDataStoreParams with GetSftConfigParams with RequiredTypeNameParam
}
| locationtech/geomesa | geomesa-accumulo/geomesa-accumulo-tools/src/main/scala/org/locationtech/geomesa/accumulo/tools/status/AccumuloGetSftConfigCommand.scala | Scala | apache-2.0 | 1,408 |
/**
* Trait Parameters: https://dotty.epfl.ch/docs/reference/other-new-features/trait-parameters.html
*/
object TraitParams {
trait Base(val msg: String)
class A extends Base("Hello")
class B extends Base("Dotty!")
// Union types only exist in Dotty, so there's no chance that this will accidentally be compiled with Scala 2
private def printMessages(msgs: (A | B)*) = println(msgs.map(_.msg).mkString(" "))
def test: Unit = {
printMessages(new A, new B)
// Sanity check the classpath: this won't run if the dotty jar is not present.
val x: Int => Int = z => z
x(1)
}
}
| sbt/sbt | sbt-app/src/sbt-test/plugins/dotty/src/main/scala-3/TraitParams.scala | Scala | apache-2.0 | 609 |
package org.apache.mesos.chronos.scheduler.jobs
import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}
import java.util.concurrent.{TimeUnit, Executors, Future}
import java.util.logging.{Level, Logger}
import akka.actor.ActorSystem
import org.apache.mesos.chronos.scheduler.graph.JobGraph
import org.apache.mesos.chronos.scheduler.mesos.MesosDriverFactory
import org.apache.mesos.chronos.scheduler.state.PersistenceStore
import com.google.common.util.concurrent.AbstractIdleService
import com.google.inject.Inject
import org.apache.curator.framework.CuratorFramework
import org.apache.curator.framework.recipes.leader.{LeaderLatch, LeaderLatchListener}
import org.apache.mesos.Protos.TaskStatus
import org.joda.time.format.DateTimeFormat
import org.joda.time.{DateTime, DateTimeZone, Duration, Period}
import scala.annotation.tailrec
import scala.collection.mutable.ListBuffer
/**
* Constructs concrete tasks given a list of schedules and a global scheduleHorizon.
* The schedule horizon represents the advance-time the schedule is constructed.
*
* A lot of the methods in this class are broken into small pieces to allow for better unit testing.
* @author Florian Leibert (flo@leibert.de)
*/
class JobScheduler @Inject()(val scheduleHorizon: Period,
val taskManager: TaskManager,
val jobGraph: JobGraph,
val persistenceStore: PersistenceStore,
val mesosDriver: MesosDriverFactory = null,
val curator: CuratorFramework = null,
val leaderLatch: LeaderLatch = null,
val leaderPath: String = null,
val jobsObserver: JobsObserver.Observer,
val failureRetryDelay: Long = 60000,
val disableAfterFailures: Long = 0,
val jobMetrics: JobMetrics)
//Allows us to let Chaos manage the lifecycle of this class.
extends AbstractIdleService {
val localExecutor = Executors.newFixedThreadPool(1)
val schedulerThreadFuture = new AtomicReference[Future[_]]
val leaderExecutor = Executors.newSingleThreadExecutor()
//This acts as the lock
val lock = new Object
val actorSystem = ActorSystem()
val akkaScheduler = actorSystem.scheduler
//TODO(FL): Take some methods out of this class.
val running = new AtomicBoolean(false)
val leader = new AtomicBoolean(false)
private[this] val log = Logger.getLogger(getClass.getName)
var streams: List[ScheduleStream] = List()
def isLeader: Boolean = leader.get()
def getLeader: String = {
try {
leaderLatch.getLeader.getId
} catch {
case e: Exception =>
log.log(Level.SEVERE, "Error trying to talk to zookeeper. Exiting.", e)
System.exit(1)
null
}
}
def isTaskAsync(taskId: String): Boolean = {
val TaskUtils.taskIdPattern(_, _, jobName, _) = taskId
jobGraph.lookupVertex(jobName) match {
case Some(baseJob: BaseJob) => baseJob.async
case _ => false
}
}
/**
* Update job definition
* @param oldJob job definition
* @param newJob new job definition
*/
def updateJob(oldJob: BaseJob, newJob: BaseJob) {
//TODO(FL): Ensure we're using job-ids rather than relying on jobs names for identification.
assert(newJob.name == oldJob.name, "Renaming jobs is currently not supported!")
newJob match {
case scheduleBasedJob: ScheduleBasedJob =>
lock.synchronized {
if (!scheduleBasedJob.disabled) {
val newStreams = List(JobUtils.makeScheduleStream(scheduleBasedJob, DateTime.now(DateTimeZone.UTC)))
.filter(_.nonEmpty).map(_.get)
if (newStreams.nonEmpty) {
log.info("updating ScheduleBasedJob:" + scheduleBasedJob.toString)
val tmpStreams = streams.filter(_.head._2 != scheduleBasedJob.name)
streams = iteration(DateTime.now(DateTimeZone.UTC), newStreams ++ tmpStreams)
}
} else {
log.info("updating ScheduleBasedJob:" + scheduleBasedJob.toString)
val tmpStreams = streams.filter(_.head._2 != scheduleBasedJob.name)
streams = iteration(DateTime.now(DateTimeZone.UTC), tmpStreams)
}
}
case _ =>
}
replaceJob(oldJob, newJob)
}
def reset(purgeQueue: Boolean = false) {
lock.synchronized {
streams = List()
jobGraph.reset()
if (purgeQueue) {
log.warning("Purging locally queued tasks!")
taskManager.flush()
}
}
}
def registerJob(job: BaseJob, persist: Boolean, dateTime: DateTime) {
registerJob(List(job), persist, dateTime)
}
/**
* This method should be used to register jobs.
*/
def registerJob(jobs: List[BaseJob], persist: Boolean = false, dateTime: DateTime = DateTime.now(DateTimeZone.UTC)) {
lock.synchronized {
require(isLeader, "Cannot register a job with this scheduler, not the leader!")
val scheduleBasedJobs = ListBuffer[ScheduleBasedJob]()
val dependencyBasedJobs = ListBuffer[DependencyBasedJob]()
jobs.foreach {
case x: DependencyBasedJob =>
dependencyBasedJobs += x
case x: ScheduleBasedJob =>
scheduleBasedJobs += x
case x: Any =>
throw new IllegalStateException("Error, job is neither ScheduleBased nor DependencyBased:" + x.toString)
}
if (scheduleBasedJobs.nonEmpty) {
val newStreams = scheduleBasedJobs.filter(!_.disabled).map(JobUtils.makeScheduleStream(_, dateTime)).filter(_.nonEmpty).map(_.get)
scheduleBasedJobs.foreach({
job =>
jobGraph.addVertex(job)
if (persist) {
log.info("Persisting job:" + job.name)
persistenceStore.persistJob(job)
}
})
if (newStreams.nonEmpty) {
addSchedule(dateTime, newStreams.toList)
}
}
if (dependencyBasedJobs.nonEmpty) {
dependencyBasedJobs.foreach({
job =>
val parents = jobGraph.parentJobs(job)
log.info("Job parent: [ %s ], name: %s, command: %s".format(job.parents.mkString(","), job.name, job.command))
jobGraph.addVertex(job)
parents.foreach(x => jobGraph.addDependency(x.name, job.name))
if (persist) {
log.info("Persisting job:" + job.name)
persistenceStore.persistJob(job)
}
})
}
}
}
def deregisterJob(job: BaseJob, persist: Boolean = false) {
require(isLeader, "Cannot deregister a job with this scheduler, not the leader!")
lock.synchronized {
log.info("Removing vertex")
jobGraph.getChildren(job.name)
.map(x => jobGraph.lookupVertex(x).get)
.filter {
case j: DependencyBasedJob => true
case _ => false
}
.map(x => x.asInstanceOf[DependencyBasedJob])
.filter(x => x.parents.size > 1)
.foreach({
childJob =>
log.info("Updating job %s".format(job.name))
val copy = childJob.copy(parents = childJob.parents.filter(_ != job.name))
updateJob(childJob, copy)
})
jobGraph.removeVertex(job)
job match {
case scheduledJob: ScheduleBasedJob =>
removeSchedule(scheduledJob)
log.info("Removed schedule based job")
log.info("Size of streams:" + streams.size)
case dependencyBasedJob: DependencyBasedJob =>
//TODO(FL): Check if there are empty edges.
log.info("Job removed from dependency graph.")
case _: Any =>
throw new IllegalArgumentException("Cannot handle the job type")
}
taskManager.cancelTasks(job)
taskManager.removeTasks(job)
jobsObserver.apply(JobRemoved(job))
if (persist) {
log.info("Removing job from underlying state abstraction:" + job.name)
persistenceStore.removeJob(job)
}
}
}
def handleStartedTask(taskStatus: TaskStatus) {
val taskId = taskStatus.getTaskId.getValue
if (!TaskUtils.isValidVersion(taskId)) {
log.warning("Found old or invalid task, ignoring!")
return
}
val jobName = TaskUtils.getJobNameForTaskId(taskId)
val jobOption = jobGraph.lookupVertex(jobName)
if (jobOption.isEmpty) {
log.warning("Job '%s' no longer registered.".format(jobName))
} else {
val job = jobOption.get
val (_, _, attempt, _) = TaskUtils.parseTaskId(taskId)
jobsObserver.apply(JobStarted(job, taskStatus, attempt))
job match {
case j: DependencyBasedJob =>
jobGraph.resetDependencyInvocations(j.name)
case _ =>
}
}
}
/**
* Takes care of follow-up actions for a finished task, i.e. update the job schedule in the persistence store or
* launch tasks for dependent jobs
*/
def handleFinishedTask(taskStatus: TaskStatus, taskDate: Option[DateTime] = None) {
// `taskDate` is purely for unit testing
val taskId = taskStatus.getTaskId.getValue
if (!TaskUtils.isValidVersion(taskId)) {
log.warning("Found old or invalid task, ignoring!")
return
}
val jobName = TaskUtils.getJobNameForTaskId(taskId)
val jobOption = jobGraph.lookupVertex(jobName)
if (jobOption.isEmpty) {
log.warning("Job '%s' no longer registered.".format(jobName))
} else {
val (_, start, attempt, _) = TaskUtils.parseTaskId(taskId)
jobMetrics.updateJobStat(jobName, timeMs = DateTime.now(DateTimeZone.UTC).getMillis - start)
jobMetrics.updateJobStatus(jobName, success = true)
val job = jobOption.get
jobsObserver.apply(JobFinished(job, taskStatus, attempt))
val newJob = job match {
case job: ScheduleBasedJob =>
job.copy(successCount = job.successCount + 1,
errorsSinceLastSuccess = 0,
lastSuccess = DateTime.now(DateTimeZone.UTC).toString)
case job: DependencyBasedJob =>
job.copy(successCount = job.successCount + 1,
errorsSinceLastSuccess = 0,
lastSuccess = DateTime.now(DateTimeZone.UTC).toString)
case _ =>
throw new IllegalArgumentException("Cannot handle unknown task type")
}
replaceJob(job, newJob)
processDependencies(jobName, taskDate)
log.fine("Cleaning up finished task '%s'".format(taskId))
/* TODO(FL): Fix.
Cleanup potentially exhausted job. Note, if X tasks were fired within a short period of time (~ execution time
of the job, the first returning Finished-task may trigger deletion of the job! This is a known limitation and
needs some work but should only affect long running frequent finite jobs or short finite jobs with a tiny pause
in between */
job match {
case job: ScheduleBasedJob =>
val scheduleBasedJob: ScheduleBasedJob = newJob.asInstanceOf[ScheduleBasedJob]
Iso8601Expressions.parse(scheduleBasedJob.schedule, scheduleBasedJob.scheduleTimeZone) match {
case Some((recurrences, _, _)) =>
if (recurrences == 0) {
log.info("Disabling job that reached a zero-recurrence count!")
val disabledJob: ScheduleBasedJob = scheduleBasedJob.copy(disabled = true)
jobsObserver.apply(JobDisabled(job, """Job '%s' has exhausted all of its recurrences and has been disabled.
|Please consider either removing your job, or updating its schedule and re-enabling it.
""".stripMargin.format(job.name)))
replaceJob(scheduleBasedJob, disabledJob)
}
case None =>
}
case _ =>
}
}
}
def replaceJob(oldJob: BaseJob, newJob: BaseJob) {
lock.synchronized {
jobGraph.replaceVertex(oldJob, newJob)
persistenceStore.persistJob(newJob)
}
}
private def processDependencies(jobName: String, taskDate: Option[DateTime]) {
val dependents = jobGraph.getExecutableChildren(jobName)
if (dependents.nonEmpty) {
log.fine("%s has dependents: %s .".format(jobName, dependents.mkString(",")))
dependents.foreach {
//TODO(FL): Ensure that the job for the given x exists. Lock.
x =>
val dependentJob = jobGraph.getJobForName(x).get
if (!dependentJob.disabled) {
val date = taskDate match {
case Some(d) => d
case None => DateTime.now(DateTimeZone.UTC)
}
taskManager.enqueue(TaskUtils.getTaskId(dependentJob,
date), dependentJob.highPriority)
log.fine("Enqueued depedent job." + x)
}
}
} else {
log.fine("%s does not have any ready dependents.".format(jobName))
}
}
def handleFailedTask(taskStatus: TaskStatus) {
val taskId = taskStatus.getTaskId.getValue
if (!TaskUtils.isValidVersion(taskId)) {
log.warning("Found old or invalid task, ignoring!")
} else {
val (jobName, _, attempt, _) = TaskUtils.parseTaskId(taskId)
log.warning("Task of job: %s failed.".format(jobName))
val jobOption = jobGraph.lookupVertex(jobName)
jobOption match {
case Some(job) =>
jobsObserver.apply(JobFailed(Right(job), taskStatus, attempt))
val hasAttemptsLeft: Boolean = attempt < job.retries
val hadRecentSuccess: Boolean = try {
job.lastError.length > 0 && job.lastSuccess.length > 0 &&
(DateTime.parse(job.lastSuccess).getMillis - DateTime.parse(job.lastError).getMillis) >= 0
} catch {
case ex: IllegalArgumentException =>
log.warning(s"Couldn't parse last run date from ${job.name}")
false
case _: Exception => false
}
if (hasAttemptsLeft && (job.lastError.length == 0 || hadRecentSuccess)) {
log.warning("Retrying job: %s, attempt: %d".format(jobName, attempt))
/* Schedule the retry up to 60 seconds in the future */
val delayDuration = new Duration(failureRetryDelay)
val newTaskId = TaskUtils.getTaskId(job, DateTime.now(DateTimeZone.UTC)
.plus(delayDuration), attempt + 1)
val delayedTask = new Runnable {
def run() {
log.info(s"Enqueuing failed task $newTaskId")
taskManager.persistTask(newTaskId, job)
taskManager.enqueue(newTaskId, job.highPriority)
}
}
implicit val executor = actorSystem.dispatcher
akkaScheduler.scheduleOnce(
delay = scala.concurrent.duration.Duration(delayDuration.getMillis, TimeUnit.MILLISECONDS),
runnable = delayedTask)
} else {
val disableJob =
(disableAfterFailures > 0) && (job.errorsSinceLastSuccess + 1 >= disableAfterFailures)
val lastErrorTime = DateTime.now(DateTimeZone.UTC)
val newJob = {
job match {
case job: ScheduleBasedJob =>
job.copy(errorCount = job.errorCount + 1,
errorsSinceLastSuccess = job.errorsSinceLastSuccess + 1,
lastError = lastErrorTime.toString, disabled = disableJob)
case job: DependencyBasedJob =>
job.copy(errorCount = job.errorCount + 1,
errorsSinceLastSuccess = job.errorsSinceLastSuccess + 1,
lastError = lastErrorTime.toString, disabled = disableJob)
case _ => throw new IllegalArgumentException("Cannot handle unknown task type")
}
}
updateJob(job, newJob)
if (job.softError) processDependencies(jobName, Option(lastErrorTime))
// Handle failure by either disabling the job and notifying the owner,
// or just notifying the owner.
if (disableJob) {
log.warning("Job failed beyond retries! Job will now be disabled after "
+ newJob.errorsSinceLastSuccess + " failures (disableAfterFailures=" + disableAfterFailures + ").")
val msg = "\\nFailed at '%s', %d failures since last success\\nTask id: %s\\n"
.format(DateTime.now(DateTimeZone.UTC), newJob.errorsSinceLastSuccess, taskId)
jobsObserver.apply(JobDisabled(job, TaskUtils.appendSchedulerMessage(msg, taskStatus)))
} else {
log.warning("Job failed beyond retries!")
jobsObserver.apply(JobRetriesExhausted(job, taskStatus, attempt))
}
jobMetrics.updateJobStatus(jobName, success = false)
}
case None =>
log.warning("Could not find job for task: %s Job may have been deleted while task was in flight!"
.format(taskId))
}
}
}
/**
* Task has been killed. Do appropriate cleanup
* Possible reasons for task being killed:
* -invoked kill via task manager API
* -job is deleted
*/
def handleKilledTask(taskStatus: TaskStatus) {
val taskId = taskStatus.getTaskId.getValue
if (!TaskUtils.isValidVersion(taskId)) {
log.warning("Found old or invalid task, ignoring!")
return
}
val (jobName, start, attempt, _) = TaskUtils.parseTaskId(taskId)
val jobOption = jobGraph.lookupVertex(jobName)
jobsObserver.apply(JobFailed(jobOption.toRight(jobName), taskStatus, attempt))
}
/**
* Iterates through the stream for the given DateTime and a list of schedules, removing old schedules and acting on
* the available schedules.
* @param dateTime for which to process schedules
* @param schedules schedules to be processed
* @return list of updated schedules
*/
def iteration(dateTime: DateTime, schedules: List[ScheduleStream]): List[ScheduleStream] = {
log.info("Checking schedules with time horizon:%s".format(scheduleHorizon.toString))
removeOldSchedules(schedules.map(s => scheduleStream(dateTime, s)))
}
def run(dateSupplier: () => DateTime) {
log.info("Starting run loop for JobScheduler. CurrentTime: %s".format(DateTime.now(DateTimeZone.UTC)))
while (running.get) {
lock.synchronized {
log.info("Size of streams: %d".format(streams.size))
streams = iteration(dateSupplier(), streams)
}
Thread.sleep(scheduleHorizon.toStandardDuration.getMillis)
//TODO(FL): This can be inaccurate if the horizon >= 1D on daylight savings day and when leap seconds are introduced.
}
log.info("No longer running.")
}
/**
* Given a stream and a DateTime(@see org.joda.DateTime), this method returns a 2-tuple with a ScheduleTask and
* a clipped schedule stream in case that the ScheduleTask was not none. Returns no task and the input stream,
* if nothing needs scheduling within the time horizon.
* @param now time to start iteration with
* @param stream schedule stream
* @return
*/
@tailrec
final def next(now: DateTime, stream: ScheduleStream): (Option[ScheduledTask], Option[ScheduleStream]) = {
val (schedule, jobName, scheduleTimeZone) = stream.head
log.info("Calling next for stream: %s, jobname: %s".format(stream.schedule, jobName))
assert(schedule != null && !schedule.equals(""), "No valid schedule found: " + schedule)
assert(jobName != null, "BaseJob cannot be null")
var jobOption: Option[BaseJob] = None
//TODO(FL): wrap with lock.
try {
jobOption = jobGraph.lookupVertex(jobName)
if (jobOption.isEmpty) {
log.warning("-----------------------------------")
log.warning("Warning, no job found in graph for:" + jobName)
log.warning("-----------------------------------")
//This might happen during loading stage in case of failover.
return (None, None)
}
} catch {
case ex: IllegalArgumentException =>
log.warning(s"Corrupt job in stream for $jobName")
}
Iso8601Expressions.parse(schedule, scheduleTimeZone) match {
case Some((recurrences, nextDate, _)) =>
log.finest("Recurrences: '%d', next date: '%s'".format(recurrences, stream.schedule))
//nextDate has to be > (now - epsilon) & < (now + timehorizon) , for it to be scheduled!
if (recurrences == 0) {
log.info("Finished all recurrences of job '%s'".format(jobName))
//We're not removing the job here because it may still be required if a pending task fails.
(None, None)
} else {
val job = jobOption.get
val scheduleWindowBegin = now.minus(job.epsilon)
val scheduleWindowEnd = now.plus(scheduleHorizon)
if (nextDate.isAfter(scheduleWindowBegin) && nextDate.isBefore(scheduleWindowEnd)) {
log.info("Task ready for scheduling: %s".format(nextDate))
//TODO(FL): Rethink passing the dispatch queue all the way down to the ScheduledTask.
val task = new ScheduledTask(TaskUtils.getTaskId(job, nextDate), nextDate, job, taskManager)
return (Some(task), stream.tail)
}
// Next instance is too far in the future
// Needs to be scheduled at a later time, after schedule horizon.
if (!nextDate.isBefore(now)) {
return (None, Some(stream))
}
// Next instance is too far in the past (beyond epsilon)
//TODO(FL): Think about the semantics here and see if it always makes sense to skip ahead of missed schedules.
log.fine("No need to work on schedule: '%s' yet".format(nextDate))
jobsObserver.apply(JobSkipped(job, nextDate))
val tail = stream.tail
if (tail.isEmpty) {
//TODO(FL): Verify that this can go.
persistenceStore.removeJob(job)
log.warning("\\n\\nWARNING\\n\\nReached the tail of the streams which should have been never reached \\n\\n")
(None, None)
} else {
log.info("tail: " + tail.get.schedule + " now: " + now)
next(now, tail.get)
}
}
case None =>
log.warning(s"Couldn't parse date for $jobName")
(None, Some(stream))
}
}
def removeSchedule(deletedStream: BaseJob) {
lock.synchronized {
log.fine("Removing schedules: ")
streams = streams.filter(_.jobName != deletedStream.name)
log.fine("Size of streams: %d".format(streams.size))
}
}
//Begin Service interface
override def startUp() {
assert(!running.get, "This scheduler is already running!")
log.info("Trying to become leader.")
leaderLatch.addListener(new LeaderLatchListener {
override def notLeader(): Unit = {
leader.set(false)
onDefeated()
}
override def isLeader(): Unit = {
leader.set(true)
onElected()
}
}, leaderExecutor)
leaderLatch.start()
}
override def shutDown() {
running.set(false)
log.info("Shutting down job scheduler")
leaderLatch.close(LeaderLatch.CloseMode.NOTIFY_LEADER)
leaderExecutor.shutdown()
}
//Begin Leader interface, which is required for CandidateImpl.
def onDefeated() {
mesosDriver.close()
log.info("Defeated. Not the current leader.")
running.set(false)
jobGraph.reset() // So we can rebuild it later.
schedulerThreadFuture.get.cancel(true)
}
def onElected() {
log.info("Elected as leader.")
running.set(true)
lock.synchronized {
try {
//It's important to load the tasks first, otherwise a job that's due will trigger a task right away.
log.info("Loading tasks")
TaskUtils.loadTasks(taskManager, persistenceStore)
log.info("Loading jobs")
JobUtils.loadJobs(this, persistenceStore)
} catch {
case e: Exception =>
log.log(Level.SEVERE, "Loading tasks or jobs failed. Exiting.", e)
System.exit(1)
}
}
val jobScheduler = this
//Consider making this a background thread or control via an executor.
val f = localExecutor.submit(
new Thread() {
override def run() {
log.info("Running background thread")
val dateSupplier = () => {
DateTime.now(DateTimeZone.UTC)
}
jobScheduler.run(dateSupplier)
}
})
schedulerThreadFuture.set(f)
log.info("Starting chronos driver")
mesosDriver.start()
}
// Generates a new ScheduleStream based on a DateTime and a ScheduleStream. Side effects of this method
// are that a new Job may be persisted in the underlying persistence store and a task might get dispatched.
@tailrec
private final def scheduleStream(now: DateTime, s: ScheduleStream): Option[ScheduleStream] = {
val (taskOption, stream) = next(now, s)
if (taskOption.isEmpty) {
stream
} else {
val encapsulatedJob = taskOption.get.job
log.info("Scheduling:" + taskOption.get.job.name)
taskManager.scheduleDelayedTask(taskOption.get, taskManager.getMillisUntilExecution(taskOption.get.due), persist = true)
/*TODO(FL): This needs some refactoring. Ideally, the task should only be persisted once it has been submitted
to chronos, however if we were to do this with the current design, there could be missed tasks if
the scheduler went down before having fired off the jobs, since we're scheduling ahead of time.
Instead we persist the tasks right away, which also has the disadvantage of us maybe executing a job
twice IFF the scheduler goes down after the jobs have been submitted to chronos and stored in the queue
and us still being unavailable before the failover timeout. Thus we set the failover timeout to one
week. This means we should receive a chronos message of a successful task as long as we're not down for
more than a week for the above mentioned scenario.
E.g. Schedule 5seconds into the future
j1 -> R10/20:00:00/PT1S
19:00:56: queue(j1t1, j1t2, j1t3, j1t4, j1t5)
19:00:56: persist(R5/20:00:05/PT1S)
19:00:56: persist(j1t1, j1t2, j1t3, j1t4, j1t5)
19:00:57: DOWN
19:00:58: UP
...
*/
/* TODO(FL): The invocation count only represents the number of job invocations, not the number of successful
executions. When a scheduler starts up, it needs to verify that there are no pending tasks.
This isn't really transactional but should be sufficiently reliable for most usecases. To outline why it is not
really transactional. To fix this, we need to add a new state into ZK that stores the successful tasks.
*/
encapsulatedJob match {
case job: ScheduleBasedJob =>
val updatedJob = job.copy(stream.get.schedule)
log.info("Saving updated job:" + updatedJob)
persistenceStore.persistJob(updatedJob)
jobGraph.replaceVertex(encapsulatedJob, updatedJob)
case _ =>
log.warning(s"Job ${encapsulatedJob.name} is not a scheduled job!")
}
if (stream.isEmpty) {
return stream
}
scheduleStream(now, stream.get)
}
}
//End Service interface
private def removeOldSchedules(scheduleStreams: List[Option[ScheduleStream]]): List[ScheduleStream] = {
log.fine("Filtering out empty streams")
scheduleStreams.filter(s => s.isDefined && s.get.tail.isDefined).map(_.get)
}
/**
* Adds a List of ScheduleStream and runs a iteration at the current time.
* @param now time from which to evaluate schedule
* @param newStreams new schedules to be evaluated
*/
private def addSchedule(now: DateTime, newStreams: List[ScheduleStream]) {
log.info("Adding schedule for time:" + now.toString(DateTimeFormat.fullTime()))
lock.synchronized {
log.fine("Starting iteration")
streams = iteration(now, newStreams ++ streams)
log.fine("Size of streams: %d".format(streams.size))
}
}
//End Leader interface
}
| anapsix/chronos | src/main/scala/org/apache/mesos/chronos/scheduler/jobs/JobScheduler.scala | Scala | apache-2.0 | 28,353 |
package com.kalmanb.peerdev
import com.kalmanb.test.AkkaSpec
import akka.testkit._
import java.io.File
class WorkspaceManagerTest extends AkkaSpec {
describe("workspace manager") {
it("should correctly load all files form workspace") {
// abc.txt and 123.txt
val workspaceManager = TestActorRef(new WorkspaceManager(new File("./src/test/resources/workspace")))
val files = workspaceManager.underlyingActor.files
files.size should be(2)
files should contain(Buf("abc.txt", "abc\ndef\n", "f72fe788e136ba9e53518afa8b407eac"))
files should contain(Buf("123.txt", "123\n456\n", "c010aff9dc6276fdb7efefd1a2757658"))
}
}
}
| kalmanb/remote-peer-dev | src/test/scala/com/kalmanb/peerdev/WorkspaceManagerTest.scala | Scala | apache-2.0 | 670 |
object Fib extends App {
var f2 = 0
var f1 = 1
print("0 1 ")
var k = 2
while (k <= 10) {
val f = f1 + f2
print(f)
print(" ")
k = k + 1
f2 = f1
f1 = f
}
println()
}
| grzegorzbalcerek/scala-exercises | Fib/Fib.scala | Scala | bsd-2-clause | 202 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.http
import java.io.File
import com.typesafe.config.ConfigFactory
import org.specs2.mutable.Specification
import play.api.{ Configuration, Environment, Mode, PlayException }
import play.api.mvc.Cookie.SameSite
import play.core.cookie.encoding.{ ClientCookieDecoder, ClientCookieEncoder, ServerCookieDecoder, ServerCookieEncoder }
class HttpConfigurationSpec extends Specification {
"HttpConfiguration" should {
import scala.collection.JavaConverters._
def properties = {
Map(
"play.http.context" -> "/",
"play.http.parser.maxMemoryBuffer" -> "10k",
"play.http.parser.maxDiskBuffer" -> "20k",
"play.http.actionComposition.controllerAnnotationsFirst" -> "true",
"play.http.actionComposition.executeActionCreatorActionFirst" -> "true",
"play.http.cookies.strict" -> "true",
"play.http.session.cookieName" -> "PLAY_SESSION",
"play.http.session.secure" -> "true",
"play.http.session.maxAge" -> "10s",
"play.http.session.httpOnly" -> "true",
"play.http.session.domain" -> "playframework.com",
"play.http.session.path" -> "/session",
"play.http.session.sameSite" -> "lax",
"play.http.session.jwt.signatureAlgorithm" -> "HS256",
"play.http.session.jwt.expiresAfter" -> null,
"play.http.session.jwt.clockSkew" -> "30s",
"play.http.session.jwt.dataClaim" -> "data",
"play.http.flash.cookieName" -> "PLAY_FLASH",
"play.http.flash.secure" -> "true",
"play.http.flash.httpOnly" -> "true",
"play.http.flash.domain" -> "playframework.com",
"play.http.flash.path" -> "/flash",
"play.http.flash.sameSite" -> "lax",
"play.http.flash.jwt.signatureAlgorithm" -> "HS256",
"play.http.flash.jwt.expiresAfter" -> null,
"play.http.flash.jwt.clockSkew" -> "30s",
"play.http.flash.jwt.dataClaim" -> "data",
"play.http.fileMimeTypes" -> "foo=text/foo",
"play.http.secret.key" -> "mysecret",
"play.http.secret.provider" -> "HmacSHA1"
)
}
val configuration = new Configuration(ConfigFactory.parseMap(properties.asJava))
val environment: Environment = Environment.simple(new File("."), Mode.Prod)
"configure a context" in {
val httpConfiguration = new HttpConfiguration.HttpConfigurationProvider(configuration, environment).get
httpConfiguration.context must beEqualTo("/")
}
"throw an error when context does not starts with /" in {
val config = properties + ("play.http.context" -> "something")
val wrongConfiguration = Configuration(ConfigFactory.parseMap(config.asJava))
new HttpConfiguration.HttpConfigurationProvider(wrongConfiguration, environment).get must throwA[PlayException]
}
"configure a session path" in {
val httpConfiguration = new HttpConfiguration.HttpConfigurationProvider(configuration, environment).get
httpConfiguration.session.path must beEqualTo("/session")
}
"throw an error when session path does not starts with /" in {
val config = properties + ("play.http.session.path" -> "something")
val wrongConfiguration = Configuration(ConfigFactory.parseMap(config.asJava))
new HttpConfiguration.HttpConfigurationProvider(wrongConfiguration, environment).get must throwA[PlayException]
}
"configure a flash path" in {
val httpConfiguration = new HttpConfiguration.HttpConfigurationProvider(configuration, environment).get
httpConfiguration.flash.path must beEqualTo("/flash")
}
"throw an error when flash path does not starts with /" in {
val config = properties + ("play.http.flash.path" -> "something")
val wrongConfiguration = Configuration(ConfigFactory.parseMap(config.asJava))
new HttpConfiguration.HttpConfigurationProvider(wrongConfiguration, environment).get must throwA[PlayException]
}
"throw an error when context includes a mimetype config setting" in {
val config = properties + ("mimetype" -> "something")
val wrongConfiguration = Configuration(ConfigFactory.parseMap(config.asJava))
new HttpConfiguration.HttpConfigurationProvider(wrongConfiguration, environment).get must throwA[PlayException]
}
"configure max memory buffer" in {
val httpConfiguration = new HttpConfiguration.HttpConfigurationProvider(configuration, environment).get
httpConfiguration.parser.maxMemoryBuffer must beEqualTo(10 * 1024)
}
"configure max disk buffer" in {
val httpConfiguration = new HttpConfiguration.HttpConfigurationProvider(configuration, environment).get
httpConfiguration.parser.maxDiskBuffer must beEqualTo(20 * 1024)
}
"configure cookies encoder/decoder" in {
val httpConfiguration = new HttpConfiguration.HttpConfigurationProvider(configuration, environment).get
httpConfiguration.cookies.strict must beTrue
}
"configure session should set" in {
"cookie name" in {
val httpConfiguration = new HttpConfiguration.HttpConfigurationProvider(configuration, environment).get
httpConfiguration.session.cookieName must beEqualTo("PLAY_SESSION")
}
"cookie security" in {
val httpConfiguration = new HttpConfiguration.HttpConfigurationProvider(configuration, environment).get
httpConfiguration.session.secure must beTrue
}
"cookie maxAge" in {
val httpConfiguration = new HttpConfiguration.HttpConfigurationProvider(configuration, environment).get
httpConfiguration.session.maxAge.map(_.toSeconds) must beEqualTo(Some(10))
}
"cookie httpOnly" in {
val httpConfiguration = new HttpConfiguration.HttpConfigurationProvider(configuration, environment).get
httpConfiguration.session.httpOnly must beTrue
}
"cookie domain" in {
val httpConfiguration = new HttpConfiguration.HttpConfigurationProvider(configuration, environment).get
httpConfiguration.session.domain must beEqualTo(Some("playframework.com"))
}
"cookie samesite" in {
val httpConfiguration = new HttpConfiguration.HttpConfigurationProvider(configuration, environment).get
httpConfiguration.session.sameSite must beSome(SameSite.Lax)
}
}
"configure flash should set" in {
"cookie name" in {
val httpConfiguration = new HttpConfiguration.HttpConfigurationProvider(configuration, environment).get
httpConfiguration.flash.cookieName must beEqualTo("PLAY_FLASH")
}
"cookie security" in {
val httpConfiguration = new HttpConfiguration.HttpConfigurationProvider(configuration, environment).get
httpConfiguration.flash.secure must beTrue
}
"cookie httpOnly" in {
val httpConfiguration = new HttpConfiguration.HttpConfigurationProvider(configuration, environment).get
httpConfiguration.flash.httpOnly must beTrue
}
"cookie samesite" in {
val httpConfiguration = new HttpConfiguration.HttpConfigurationProvider(configuration, environment).get
httpConfiguration.flash.sameSite must beSome(SameSite.Lax)
}
}
"configure action composition" in {
"controller annotations first" in {
val httpConfiguration = new HttpConfiguration.HttpConfigurationProvider(configuration, environment).get
httpConfiguration.actionComposition.controllerAnnotationsFirst must beTrue
}
"execute request handler action first" in {
val httpConfiguration = new HttpConfiguration.HttpConfigurationProvider(configuration, environment).get
httpConfiguration.actionComposition.executeActionCreatorActionFirst must beTrue
}
}
"configure mime types" in {
"for server encoder" in {
val httpConfiguration = new HttpConfiguration.HttpConfigurationProvider(configuration, environment).get
httpConfiguration.fileMimeTypes.mimeTypes must beEqualTo(Map("foo" -> "text/foo"))
}
}
}
"Cookies configuration" should {
"be configured as strict" in {
val cookieConfiguration = CookiesConfiguration(strict = true)
"for server encoder" in {
cookieConfiguration.serverEncoder must beEqualTo(ServerCookieEncoder.STRICT)
}
"for server decoder" in {
cookieConfiguration.serverDecoder must beEqualTo(ServerCookieDecoder.STRICT)
}
"for client encoder" in {
cookieConfiguration.clientEncoder must beEqualTo(ClientCookieEncoder.STRICT)
}
"for client decoder" in {
cookieConfiguration.clientDecoder must beEqualTo(ClientCookieDecoder.STRICT)
}
}
"be configured as lax" in {
val cookieConfiguration = CookiesConfiguration(strict = false)
"for server encoder" in {
cookieConfiguration.serverEncoder must beEqualTo(ServerCookieEncoder.LAX)
}
"for server decoder" in {
cookieConfiguration.serverDecoder must beEqualTo(ServerCookieDecoder.LAX)
}
"for client encoder" in {
cookieConfiguration.clientEncoder must beEqualTo(ClientCookieEncoder.LAX)
}
"for client decoder" in {
cookieConfiguration.clientDecoder must beEqualTo(ClientCookieDecoder.LAX)
}
}
}
}
| zaneli/playframework | framework/src/play/src/test/scala/play/api/http/HttpConfigurationSpec.scala | Scala | apache-2.0 | 9,355 |
package org.orbeon.apache.xerces.util
object HexUtils {
def toHexString(_i: Int): String = {
var i = _i
val buf = new Array[Char](33)
val negative = i < 0
var charPos = 32
if (! negative)
i = -i
while (i <= -Radix) {
buf(charPos) = HexDigits(-(i % Radix))
charPos -= 1
i = i / Radix
}
buf(charPos) = HexDigits(-i)
if (negative) {
charPos -= 1
buf(charPos) = '-'
}
new String(buf, charPos, 33 - charPos)
}
private val Radix = 16
private val HexDigits = Array[Char]('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f')
}
| ebruchez/darius-xml.js | xerces/shared/src/main/scala/org/orbeon/apache/xerces/util/HexUtils.scala | Scala | apache-2.0 | 638 |
/*
* Copyright (c) 2013-14 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless
import org.junit.Test
import org.junit.Assert._
import scala.collection.mutable.ListBuffer
import test._
class LazyTests {
@Test
def testEffectOrder {
val effects = ListBuffer[Int]()
implicit def lazyInt: Lazy[Int] = Lazy[Int]{ effects += 3 ; 23 }
def summonLazyInt(implicit li: Lazy[Int]): Int = {
effects += 2
val i = li.value
effects += 4
i
}
effects += 1
val i = summonLazyInt
effects += 5
assertEquals(23, i)
assertEquals(List(1, 2, 3, 4, 5), effects.toList)
}
@Test
def testDefConversion {
val effects = ListBuffer[Int]()
def effectfulInt: Int = { effects += 3 ; 23 }
def useEffectfulInt(li: Lazy[Int]): Int = {
effects += 2
val i = li.value
effects += 4
i
}
effects += 1
val i = useEffectfulInt(effectfulInt)
effects += 5
assertEquals(23, i)
assertEquals(List(1, 2, 3, 4, 5), effects.toList)
}
@Test
def testLazyConversion {
val effects = ListBuffer[Int]()
lazy val effectfulInt: Int = { effects += 3 ; 23 }
def useEffectfulInt(li: Lazy[Int]): Int = {
effects += 2
val i = li.value
effects += 4
i
}
effects += 1
val i = useEffectfulInt(effectfulInt)
effects += 5
assertEquals(23, i)
assertEquals(List(1, 2, 3, 4, 5), effects.toList)
}
@Test
def testInlineConversion {
val effects = ListBuffer[Int]()
def useEffectfulInt(li: Lazy[Int]): Int = {
effects += 3
val i = li.value
effects += 4
i
}
effects += 1
val i = useEffectfulInt({ effects += 2 ; 23 })
effects += 5
assertEquals(23, i)
assertEquals(List(1, 2, 3, 4, 5), effects.toList)
}
sealed trait List[+T]
case class Cons[T](hd: T, tl: List[T]) extends List[T]
sealed trait Nil extends List[Nothing]
case object Nil extends Nil
trait Show[T] {
def apply(t: T): String
}
def show[T](t: T)(implicit s: Show[T]) = s(t)
implicit def showInt: Show[Int] = new Show[Int] {
def apply(t: Int) = t.toString
}
implicit def showNil: Show[Nil] = new Show[Nil] {
def apply(t: Nil) = "Nil"
}
implicit def showCons[T](implicit st: Lazy[Show[T]], sl: Lazy[Show[List[T]]]): Show[Cons[T]] = new Show[Cons[T]] {
def apply(t: Cons[T]) = s"Cons(${show(t.hd)(st.value)}, ${show(t.tl)(sl.value)})"
}
implicit def showList[T](implicit sc: Lazy[Show[Cons[T]]]): Show[List[T]] = new Show[List[T]] {
def apply(t: List[T]) = t match {
case n: Nil => show(n)
case c: Cons[T] => show(c)(sc.value)
}
}
@Test
def testRecursive {
val l: List[Int] = Cons(1, Cons(2, Cons(3, Nil)))
val sl = show(l)
assertEquals("Cons(1, Cons(2, Cons(3, Nil)))", sl)
}
trait Foo[T]
object Foo {
implicit def mkFoo[T]: Foo[T] = new Foo[T] {}
}
@Test
def testMultiple {
val foos = Lazy.values[Foo[Int] :: Foo[String] :: Foo[Boolean] :: HNil]
implicit val x :: y :: z :: HNil = foos
typed[Foo[Int]](x)
typed[Foo[String]](y)
typed[Foo[Boolean]](z)
val x1 = implicitly[Foo[Int]]
val y1 = implicitly[Foo[String]]
val z1 = implicitly[Foo[Boolean]]
assertTrue(x1 eq x)
assertTrue(y1 eq y)
assertTrue(z1 eq z)
}
trait Bar[A] { def foo(a: A): Unit }
object Bar {
implicit val intBar = new Bar[Int] { def foo(x: Int) = () }
}
@Test
def testEta {
implicitly[Lazy[Bar[Int]]].value.foo _
}
trait Baz[T] {
type U
}
object Baz {
def apply[T, U](t: T)(implicit bt: Lazy[Aux[T, U]]): Aux[T, U] = bt.value
type Aux[T, U0] = Baz[T] { type U = U0 }
implicit val bazIS: Aux[Int, String] = new Baz[Int] { type U = String }
implicit val bazBD: Aux[Boolean, Double] = new Baz[Boolean] { type U = Double }
}
@Test
def testAux {
val bIS = Baz(23)
typed[Baz.Aux[Int, String]](bIS)
val bBD = Baz(true)
typed[Baz.Aux[Boolean, Double]](bBD)
}
}
| japgolly/shapeless | core/src/test/scala/shapeless/lazy.scala | Scala | apache-2.0 | 4,560 |
package org.bitcoins.spvnode.util
import java.net.InetAddress
import akka.util.{ByteString, CompactByteString}
import org.bitcoins.core.util.BitcoinSLogger
import org.bitcoins.spvnode.NetworkMessage
import scala.annotation.tailrec
import scala.util.{Failure, Success, Try}
/**
* Created by chris on 6/3/16.
*/
trait BitcoinSpvNodeUtil {
private val logger = BitcoinSLogger.logger
/**
* Writes an ip address to the representation that the p2p network requires
* An IPv6 address is in big endian byte order
* An IPv4 address has to be mapped to an IPv6 address
* https://en.wikipedia.org/wiki/IPv6#IPv4-mapped_IPv6_addresses
*
* @param iNetAddress
* @return
*/
def writeAddress(iNetAddress: InetAddress) : Seq[Byte] = {
if (iNetAddress.getAddress.size == 4) {
//this means we need to convert the IPv4 address to an IPv6 address
//first we have an 80 bit prefix of zeros
val zeroBytes = for ( _ <- 0 until 10) yield 0.toByte
//the next 16 bits are ones
val oneBytes : Seq[Byte] = Seq(0xff.toByte,0xff.toByte)
val prefix : Seq[Byte] = zeroBytes ++ oneBytes
val addr = prefix ++ iNetAddress.getAddress
addr
} else iNetAddress.getAddress
}
/**
* Akka sends messages as one byte stream. There is not a 1 to 1 relationship between byte streams received and
* bitcoin protocol messages. This function parses our byte stream into individual network messages
* @param bytes the bytes that need to be parsed into individual messages
* @return the parsed [[NetworkMessage]]'s and the unaligned bytes that did not parse to a message
*/
def parseIndividualMessages(bytes: Seq[Byte]): (Seq[NetworkMessage],Seq[Byte]) = {
@tailrec
def loop(remainingBytes : Seq[Byte], accum : Seq[NetworkMessage]): (Seq[NetworkMessage],Seq[Byte]) = {
if (remainingBytes.length <= 0) (accum.reverse,remainingBytes)
else {
val messageTry = Try(NetworkMessage(remainingBytes))
messageTry match {
case Success(message) =>
if (message.header.payloadSize.toInt != message.payload.bytes.size) {
//this means our tcp frame was not aligned, therefore put the message back in the
//buffer and wait for the remaining bytes
(accum.reverse,remainingBytes)
} else {
val newRemainingBytes = remainingBytes.slice(message.bytes.length, remainingBytes.length)
loop(newRemainingBytes, message +: accum)
}
case Failure(exception) =>
logger.debug("Failed to parse network message, could be because tcp frame isn't aligned")
logger.debug(exception.getMessage)
//this case means that our TCP frame was not aligned with bitcoin protocol
//return the unaligned bytes so we can apply them to the next tcp frame of bytes we receive
//http://stackoverflow.com/a/37979529/967713
(accum.reverse,remainingBytes)
}
}
}
val (messages,remainingBytes) = loop(bytes, Nil)
logger.debug("Parsed messages: " + messages)
(messages,remainingBytes)
}
/**
* Wraps our Seq[Byte] into an akka [[ByteString]] object
* @param bytes
* @return
*/
def buildByteString(bytes: Seq[Byte]) : ByteString = {
CompactByteString(bytes.toArray)
}
/**
* Creates a unique actor name for a actor
* @param className
* @return
*/
def createActorName(className : String): String = {
className.replace(" ","") + "-" + scala.util.Random.nextInt
}
/**
* Creates a unique actor name for a given class
* @param className
* @return
*/
def createActorName(className: Class[_]): String = createActorName(className.toString)
}
object BitcoinSpvNodeUtil extends BitcoinSpvNodeUtil
| Christewart/bitcoin-s-spv-node | src/main/scala/org/bitcoins/spvnode/util/BitcoinSpvNodeUtil.scala | Scala | mit | 3,847 |
package io.github.karlhigley.lexrank
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.feature.{HashingTF, IDF}
import org.apache.spark.mllib.linalg.{SparseVector, Vector}
case class SentenceFeatures(id: Long, docId: String, features: SparseVector)
class Featurizer(numStopwords: Int = 0) extends Serializable {
private val hashingTF = new HashingTF()
private val byIDF = Ordering[Double].on[(Int,Double)](_._2)
def apply(tokens: RDD[SentenceTokens]) : RDD[SentenceFeatures] = {
val idf = new IDF(minDocFreq = 2)
val termFrequencies = tokens.map(t => {
(t.id, t.docId, hashingTF.transform(t.tokens))
})
val idfModel = idf.fit(termFrequencies.map({ case (_, _, tf) => tf }))
val stopwordIndices = identifyStopwords(idfModel.idf.toSparse, numStopwords)
termFrequencies
.map({
case (id, docId, tf) =>
val tfidf = idfModel.transform(tf).toSparse
val features = removeStopwords(tfidf, stopwordIndices)
SentenceFeatures(id, docId, features)
})
.filter(_.features.indices.size > 0)
}
def indexOf(token: String): Int = {
hashingTF.indexOf(token)
}
private def identifyStopwords(idf: SparseVector, numStopwords: Int) = {
featureTuples(idf).sorted(byIDF).take(numStopwords).map(_._1)
}
private def removeStopwords(tf: SparseVector, stopwordIndices: Array[Int]) = {
val (indices, values) =
featureTuples(tf)
.filter(p => !stopwordIndices.contains(p._1))
.unzip
new SparseVector(tf.size, indices.toArray, values.toArray)
}
private def featureTuples(featureVector: SparseVector) = {
featureVector.indices.zip(featureVector.values)
}
} | karlhigley/lexrank-summarizer | src/main/scala/io/github/karlhigley/lexrank/Featurizer.scala | Scala | mit | 1,748 |
/*
There's nothing particularly bad about this implementation,
except that it's somewhat monolithic and easy to get wrong.
Where possible, we prefer to assemble functions like this using
combinations of other functions. It makes the code more obviously
correct and easier to read and understand. Notice that in this
implementation we need special purpose logic to break out of our
loops early. In Chapter 5 we'll discuss ways of composing functions
like this from simpler components, without giving up the efficiency
of having the resulting functions work in one pass over the data.
It's good to specify some properties about these functions.
For example, do you expect these expressions to be true?
(xs append ys) startsWith xs
xs startsWith Nil
(xs append ys append zs) hasSubsequence ys
xs hasSubsequence Nil
*/
@annotation.tailrec
def startsWith[A](l: List[A], prefix: List[A]): Boolean = (l,prefix) match {
case (_,Nil) => true
case (Cons(h,t),Cons(h2,t2)) if h == h2 => startsWith(t, t2)
case _ => false
}
@annotation.tailrec
def hasSubsequence[A](sup: List[A], sub: List[A]): Boolean = sup match {
case Nil => sub == Nil
case _ if startsWith(sup, sub) => true
case Cons(_,t) => hasSubsequence(t, sub)
}
| lucaviolanti/scala-redbook | answerkey/datastructures/24.answer.scala | Scala | mit | 1,255 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package repositories.application
import model.{ ApplicationRoute, SchemeId }
import model.ApplicationRoute._
import model.ApplicationStatus.ApplicationStatus
import model.Candidate
import model.EvaluationResults.Result
import model.persisted._
import org.joda.time.LocalDate
import reactivemongo.bson.{ BSONDocument, _ }
import repositories._
trait GeneralApplicationRepoBSONReader extends BaseBSONReader {
implicit val toApplicationForNotification: BSONDocumentReader[ApplicationForNotification] = bsonReader {
(doc: BSONDocument) => {
val applicationId = doc.getAs[String]("applicationId").get
val userId = doc.getAs[String]("userId").get
val applicationStatus = doc.getAs[ApplicationStatus]("applicationStatus").get
val personalDetailsRoot = doc.getAs[BSONDocument]("personal-details").get
val preferredName = personalDetailsRoot.getAs[String]("preferredName").get
ApplicationForNotification(applicationId, userId, preferredName, applicationStatus)
}
}
implicit val toCandidate: BSONDocumentReader[Candidate] = bsonReader {
(doc: BSONDocument) => {
val userId = doc.getAs[String]("userId").getOrElse("")
val applicationId = doc.getAs[String]("applicationId")
val testAccountId = doc.getAs[String]("testAccountId")
// If the application does not have applicationRoute, it is legacy data
// as it needs to be interpreted as Faststream
val applicationRoute = doc.getAs[ApplicationRoute]("applicationRoute").getOrElse(ApplicationRoute.Faststream)
val applicationStatus = doc.getAs[String]("applicationStatus")
val psRoot = doc.getAs[BSONDocument]("personal-details")
val firstName = psRoot.flatMap(_.getAs[String]("firstName"))
val lastName = psRoot.flatMap(_.getAs[String]("lastName"))
val preferredName = psRoot.flatMap(_.getAs[String]("preferredName"))
val dateOfBirth = psRoot.flatMap(_.getAs[LocalDate]("dateOfBirth"))
Candidate(userId, applicationId, testAccountId, None, firstName, lastName, preferredName, dateOfBirth, None, None, None,
Some(applicationRoute), applicationStatus)
}
}
}
| hmrc/fset-faststream | app/repositories/application/GeneralApplicationRepoBSONReader.scala | Scala | apache-2.0 | 2,750 |
package com.eevolution.context.dictionary.domain.api.service
import com.eevolution.context.dictionary._
import com.eevolution.context.dictionary.domain.model.Entity
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: victor.perez@e-evolution.com, http://www.e-evolution.com , http://github.com/e-Evolution
* Created by victor.perez@e-evolution.com , www.e-evolution.com
*/
/**
* Entity Service
*/
trait EntityService extends api.Service[Entity, Int] {
def getAttributes(id: Int) : Object
} | adempiere/ADReactiveSystem | dictionary-api/src/main/scala/com/eevolution/context/dictionary/domain/api/service/EntityService.scala | Scala | gpl-3.0 | 1,203 |
/*
* Copyright 2015 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.pattern
package object timeoutpolicy {
val fixedRule = FixedTimeoutRule
trait TimeoutRuleConversions extends Any {
/**
* Timeout rule based on a given sigma (standard deviation) of previous response times.
* @return the timeout rule
*/
def sigma: TimeoutRule
/**
* Timeout rule based on the
*/
def percentile: TimeoutRule
/**
* alias of sigma
* @return
*/
def σ = sigma
/**
* alias of percentile
* @return
*/
def percent = percentile
/**
* alias of percentile
* @return
*/
def `%ile` = percentile
}
implicit final class TimeoutRuleInt(private val n: Int) extends AnyVal with TimeoutRuleConversions {
override def sigma: TimeoutRule = SigmaTimeoutRule(n)
override def percentile: TimeoutRule = PercentileTimeoutRule(n.toDouble)
}
implicit final class TimeoutRuleDouble(private val n: Double) extends AnyVal with TimeoutRuleConversions {
override def sigma: TimeoutRule = SigmaTimeoutRule(n)
override def percentile: TimeoutRule = PercentileTimeoutRule(n)
}
}
| keshin/squbs | squbs-pattern/src/main/scala/org/squbs/pattern/timeoutpolicy/package.scala | Scala | apache-2.0 | 1,733 |
package za.jwatson.glycanoweb.react.bootstrap
import japgolly.scalajs.react.extra.Reusability
object Bootstrap {
case class Style(name: String) { override def toString = name }
object Default extends Style("default")
object Primary extends Style("primary")
object Success extends Style("success")
object Info extends Style("info")
object Warning extends Style("warning")
object Danger extends Style("danger")
object LinkStyle extends Style("link")
object Style {
implicit val reusability: Reusability[Style] = Reusability.by_==
}
case class Size(name: String) { override def toString = name }
object Lg extends Size("lg")
object Md extends Size("md")
object Sm extends Size("sm")
object Xs extends Size("xs")
object Size {
implicit val reusability: Reusability[Size] = Reusability.by_==
}
}
| james-za/glycano | core/src/main/scala/za/jwatson/glycanoweb/react/bootstrap/Bootstrap.scala | Scala | mit | 836 |
/*
* Copyright 2015 the original author or authors.
* @https://github.com/scouter-project/scouter
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package scouter.server.core
import java.util
import java.util.function.Consumer
import scouter.lang.pack.{XLogDiscardTypes, XLogPack, XLogProfilePack, XLogProfilePack2}
import scouter.server.util.ThreadScala
import scouter.server.{Configure, Logger}
import scouter.util.{LongKeyLinkedMap, RequestQueue}
object ProfileDelayingsRecoverCore {
val conf = Configure.getInstance();
val queue = new RequestQueue[LongKeyLinkedMap[util.List[XLogProfilePack2]]](5);
ThreadScala.startDaemon("scouter.server.core.ProfileDelayingRecoverCore", {CoreRun.running}) {
val packsMap = queue.get();
ServerStat.put("profile.core-r.queue", queue.size());
if (Configure.WORKABLE) {
val enumeration = packsMap.values();
while(enumeration.hasMoreElements) {
val packs = enumeration.nextElement();
packs.forEach(new Consumer[XLogProfilePack2] {
override def accept(pack: XLogProfilePack2): Unit = {
if (pack.discardType == XLogDiscardTypes.DISCARD_NONE) {
ProfileCore.add(pack);
}
}
});
}
}
}
def add(packsMap: LongKeyLinkedMap[util.List[XLogProfilePack2]]) {
val ok = queue.put(packsMap);
if (!ok) {
Logger.println("S110-1", 10, "queue exceeded!!");
}
}
}
| scouter-project/scouter | scouter.server/src/main/scala/scouter/server/core/ProfileDelayingsRecoverCore.scala | Scala | apache-2.0 | 2,107 |
package it.polimi.genomics.core.DataStructures.Builtin
import it.polimi.genomics.core.DataStructures.RegionAggregate.RegionsToRegion
/**
* Created by pietro on 20/07/15.
*/
trait MapFunctionFactory {
/**
* provides a nullary map aggregation function
* @param name name of the function
* @return the aggregate function
*/
def get(name:String, output_name : Option[String]):RegionsToRegion
/**
* provides a unary map aggregation function
* @param name name of the function
* @param position the position of the field which is the input of the function
* @param out_name optionally the name of the new field in the schema
* @return the aggregate function, configured to use the provided field
*/
def get(name:String, position:Int, out_name : Option[String]):RegionsToRegion
}
| DEIB-GECO/GMQL | GMQL-Core/src/main/scala/it/polimi/genomics/core/DataStructures/Builtin/MapFunctionFactory.scala | Scala | apache-2.0 | 818 |
package com.originate.scalypher.where
import com.originate.scalypher.Label
import com.originate.scalypher.util.Exceptions.MismatchedInterpolatedStringWithReferences
import com.originate.scalypher.PropertyName
import com.originate.scalypher.types.Identifiable
import com.originate.scalypher.types.IdentifiableMap
import com.originate.scalypher.util.Exceptions.IdentifierDoesntExistException
import com.originate.scalypher.path.{AnyNode, Node, Relationship}
import scala.language.implicitConversions
sealed trait Condition {
def toQuery(identifiableMap: IdentifiableMap): String
def identifiables: Set[Identifiable]
}
object Condition {
implicit def toWhere(condition: Condition): Where =
Where(condition)
implicit def optionToWhere(condition: Option[Condition]): Option[Where] =
condition map (Where(_))
}
case class Comparison(reference1: Reference, comparator: Comparator, reference2: Reference) extends Condition {
def toQuery(identifiableMap: IdentifiableMap): String =
Seq(reference1.toQuery(identifiableMap), comparator.toQuery, reference2.toQuery(identifiableMap)) mkString " "
def identifiables: Set[Identifiable] =
Set(reference1, reference2) flatMap (_.getReferenceable)
}
case class NullCondition(reference: Reference, check: NullCheck) extends Condition {
def toQuery(identifiableMap: IdentifiableMap): String =
Seq(reference.toQuery(identifiableMap), check.toQuery) mkString " "
def identifiables: Set[Identifiable] =
reference.getReferenceable.toSet
}
case class PredicateCondition(
predicate: Predicate,
projection: Collection,
where: ObjectReference => Where
) extends Condition {
def toQuery(identifiableMap: IdentifiableMap): String = {
val identifier = "x"
val identifiable = AnyNode()
val adjustedMap = identifiableMap + (identifiable -> identifier)
val conditionString = where(ObjectReference(identifiable)).toQuery(adjustedMap)
Seq(
predicate.toQuery,
s"($identifier IN ${projection.toQuery(identifiableMap)} WHERE $conditionString)"
) mkString " "
}
def identifiables: Set[Identifiable] =
projection.identifiables
}
case class Expression(string: String, references: Reference*) extends Condition {
def toQuery(identifiableMap: IdentifiableMap): String = {
val questionMarksCount = (string filter (_ == '?')).size
if (questionMarksCount != references.size)
throw new MismatchedInterpolatedStringWithReferences(string, questionMarksCount, references.size)
else {
val expression = references.foldLeft(string) { (acc, reference) =>
// avoiding replaceFirst because it has special handling of escape characters:
// http://docs.oracle.com/javase/7/docs/api/java/lang/String.html#replaceFirst%28java.lang.String,%20java.lang.String%29
val pieces = acc.split("[?]", 2)
Seq(pieces.lift(0), Some(reference.toQuery(identifiableMap)), pieces.lift(1)).flatten mkString ""
}
s"($expression)"
}
}
def identifiables: Set[Identifiable] =
(references flatMap (_.getReferenceable)).toSet
}
case class HasNoRelationships(node: Node, labels: Seq[Label] = Seq.empty) extends Condition {
def toQuery(identifiableMap: IdentifiableMap): String = {
val identifier = identifiableMap.get(node) getOrElse (throw new IdentifierDoesntExistException())
val labelsQuery = Relationship.kindsToQuery(labels)
s"NOT ($identifier)-[$labelsQuery]-()"
}
def identifiables: Set[Identifiable] = Set(node)
def withLabel(label: Label): HasNoRelationships = copy(labels = labels :+ label)
}
| Originate/scalypher | src/main/scala/where/Condition.scala | Scala | mit | 3,578 |
package com.whitepages.cloudmanager.action
import com.whitepages.cloudmanager.client.SolrRequestHelpers
import com.whitepages.cloudmanager.state.ClusterManager
import org.apache.solr.client.solrj.impl.CloudSolrServer
import org.apache.solr.common.params.ModifiableSolrParams
import org.apache.solr.common.params.CollectionParams.CollectionAction
case class UpdateAlias(aliasName: String, aliasTo: Seq[String]) extends Action {
override val preConditions: List[StateCondition] = List(
StateCondition("Target collection names exist",
(state) => aliasTo.forall(Conditions.collectionExists(_)(state))
)
)
override def execute(clusterManager: ClusterManager): Boolean = {
val params = new ModifiableSolrParams
params.set("action", CollectionAction.CREATEALIAS.toString)
params.set("name", aliasName)
params.set("collections", aliasTo.mkString(","))
val success = SolrRequestHelpers.submitRequest(clusterManager.client, params)
// aliases aren't part of the cluster state object, so can't do this in postConditions
val aliases = clusterManager.aliasMap
success &&
aliases.contains(aliasName) &&
aliases(aliasName).split(",").sorted.mkString(",") == aliasTo.sorted.mkString(",")
}
override val postConditions: List[StateCondition] = List()
override def toString = s"UpdateAlias: alias: $aliasName collections: " + aliasTo.mkString(",")
}
| randomstatistic/solrcloud_manager | src/main/scala/com/whitepages/cloudmanager/action/UpdateAlias.scala | Scala | apache-2.0 | 1,407 |
package x7c1.wheat.modern.sequence
import x7c1.wheat.modern.features.HasShortLength
import scala.language.higherKinds
trait Sequence[+A]{
def length: Int
def findAt(position: Int): Option[A]
}
object Sequence {
def from[A](xs: Seq[A]): Sequence[A] = new Sequence[A] {
override def findAt(position: Int) = position match {
case x if xs isDefinedAt x => Some(xs(position))
case _ => None
}
override def length: Int = xs.length
}
implicit class traverse[A: HasShortLength](
override protected val underlying: Sequence[A]) extends SequenceTraverser[A]
implicit class map[A, F[_] <: Sequence[_]](
override protected val underlying: F[A]) extends SequenceMapping[A, F]
implicit object canMapFrom extends DefaultCanMapFrom
implicit class filter[A: HasShortLength, F[_] <: Sequence[_]](
override protected val underlying: F[A]) extends SequenceFilter[A, F]
implicit object canFilterFrom extends DefaultCanFilterFrom
implicit class slice[A, F[_] <: Sequence[_]](
override protected val underlying: F[A] ) extends SequenceSlice[A, F]
implicit object canSliceFrom extends DefaultCanSliceFrom
implicit object canDelegate extends DefaultCanDelegate
}
| x7c1/Linen | wheat-modern/src/main/scala/x7c1/wheat/modern/sequence/Sequence.scala | Scala | mit | 1,214 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.logical
import java.util.{List => JList}
import org.apache.calcite.plan._
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.convert.ConverterRule
import org.apache.calcite.rel.core.{SetOp, Union}
import org.apache.calcite.rel.logical.LogicalUnion
import org.apache.calcite.rel.metadata.RelMetadataQuery
import org.apache.flink.table.plan.nodes.FlinkConventions
import scala.collection.JavaConverters._
class FlinkLogicalUnion(
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputs: JList[RelNode],
all: Boolean)
extends Union(cluster, traitSet, inputs, all)
with FlinkLogicalRel {
override def copy(traitSet: RelTraitSet, inputs: JList[RelNode], all: Boolean): SetOp = {
new FlinkLogicalUnion(cluster, traitSet, inputs, all)
}
override def computeSelfCost(planner: RelOptPlanner, metadata: RelMetadataQuery): RelOptCost = {
val children = this.getInputs.asScala
val rowCnt = children.foldLeft(0D) { (rows, child) =>
rows + metadata.getRowCount(child)
}
planner.getCostFactory.makeCost(rowCnt, 0, 0)
}
}
private class FlinkLogicalUnionConverter
extends ConverterRule(
classOf[LogicalUnion],
Convention.NONE,
FlinkConventions.LOGICAL,
"FlinkLogicalUnionConverter") {
/**
* Only translate UNION ALL.
*/
override def matches(call: RelOptRuleCall): Boolean = {
val union: LogicalUnion = call.rel(0).asInstanceOf[LogicalUnion]
union.all
}
override def convert(rel: RelNode): RelNode = {
val union = rel.asInstanceOf[LogicalUnion]
val traitSet = rel.getTraitSet.replace(FlinkConventions.LOGICAL)
val newInputs = union.getInputs.asScala
.map(input => RelOptRule.convert(input, FlinkConventions.LOGICAL)).asJava
new FlinkLogicalUnion(rel.getCluster, traitSet, newInputs, union.all)
}
}
object FlinkLogicalUnion {
val CONVERTER: ConverterRule = new FlinkLogicalUnionConverter()
def create(inputs: JList[RelNode], all: Boolean): FlinkLogicalUnion = {
val cluster: RelOptCluster = inputs.get(0).getCluster
val traitSet: RelTraitSet = cluster.traitSetOf(FlinkConventions.LOGICAL)
new FlinkLogicalUnion(cluster, traitSet, inputs, all)
}
}
| jinglining/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/plan/nodes/logical/FlinkLogicalUnion.scala | Scala | apache-2.0 | 3,045 |
package teststate
import java.time.Instant
import scala.annotation.nowarn
import scala.concurrent.duration._
import teststate.Exports._
import teststate.TestUtil._
import utest._
object RetryTest extends TestSuite {
@nowarn("cat=unused")
def debug(s: => String = ""): Unit =
() // println(s)
class SimFailure(desc: String) {
var _calls = 0
def calls() = _calls
var _fails = 0
def simFail(i: Int): Unit = {
_fails = i
debug(s"SimFailure on $desc: Setting fails to ${_fails}")
}
// var _skips = 0
// def simSkip(i: Int): Unit = {
// _skips = i
// debug(s"SimFailure on $desc: Setting skips to ${_skips}")
// }
def apply[A](a: => A): A = {
_calls += 1
if (_fails > 0) {
_fails -= 1
debug(s"SimFailure on $desc: ${_fails} failures remaining")
sys error s"SimFailure on $desc: ${_fails} failures remaining"
// } else if (_skips > 0) {
// _skips -= 1
// debug(s"SimFailure on $desc: ${_skips} skips remaining")
// None
} else
a
}
}
class Ref {
var _value = 0
val failOnValue = new SimFailure("value()")
val failOnInc = new SimFailure("inc()")
var _invariantKO = 0
var _onValue = List.empty[() => Unit]
def invariantOk(): Boolean = synchronized {
if (_invariantKO > 0) {
_invariantKO -= 1
false
} else
true
}
def invalidateInvariant() = synchronized {
_invariantKO = 3
}
def queueUpdate(newValue: Int): Unit = synchronized {
_onValue =
_onValue :::
(() => debug("updated queued 1/3")) ::
(() => debug("updated queued 2/3")) ::
(() => debug("updated queued 3/3")) ::
(() => _value = newValue) ::
Nil
}
def value(): Int = synchronized {
_onValue match {
case h :: t =>
_onValue = t
h()
case _ =>
}
// debug("="*200)
// new Exception().printStackTrace()
// debug("="*200)
val v = failOnValue(_value)
debug(s"value() = $v (calls=${failOnValue.calls()})")
v
}
def inc(): Unit = synchronized {
debug("inc ← " + _value)
failOnInc(_value += 1)
debug("inc → " + _value)
}
def toObs(): Obs = synchronized {
val v = value()
val o = Obs(
value = v,
valueCalls = failOnValue.calls(),
incCalls = failOnInc.calls(),
invariantOk = invariantOk())
// value2 = () => value2())
debug("toObs() -> "+o)
o
}
def apply(f: Ref => Unit): this.type = {
f(this)
this
}
}
case class Obs(value: Int, valueCalls: Int, incCalls: Int, invariantOk: Boolean)
/*
var _value2 = 100
val failOnValue2 = new SimFailure("value2()")
def value2(): Int = synchronized { failOnValue2(_value2) }
val value2 = dsl.focus("value2").value(_.obs.value2())
val failOnValue2 = dsl.action("failOnValue2")(_.ref.failOnValue2.simFail(3))
case class Obs(value: Int, valueCalls: Int, incCalls: Int, invariantOk: Boolean, value2: () => Int)
*/
type State = Int
val dsl = Dsl[Ref, Obs, State]
val value = dsl.focus("value").value(_.obs.value)
val valueCalls = dsl.focus("valueCalls").value(_.obs.valueCalls)
val incCalls = dsl.focus("incCalls").value(_.obs.incCalls)
val invariantOk = dsl.focus("invariantOk").value(_.obs.invariantOk)
val invariant = invariantOk.assert(true)
val failOnValue = dsl.action("failOnValue.simFail(3)")(_.ref.failOnValue.simFail(3))
val failOnInc = dsl.action("failOnInc.simFail(3)")(_.ref.failOnInc.simFail(3)) +> valueCalls.assert.increment +> value.assert.noChange
val inc = dsl.action("inc")(_.ref.inc())
val queueUpdate = dsl.action("queueUpdate")(_.ref.queueUpdate(123))
val incNormal = inc +> incCalls.assert.increment +> valueCalls.assert.increment +> value.assert.increment
val incFailOnValue = inc +> incCalls.assert.increment +> valueCalls.assert.increaseBy(4) +> value.assert.increment
val incFailOnInc = inc +> incCalls.assert.increment +> valueCalls.assert.increment +> value.assert.increment // incCalls +1 only cos that's what a successful action does
val retryPolicy = Retry.Policy.fixedIntervalAndAttempts(Duration.Zero, 3)
val insufficientRetryPolicy = Retry.Policy.fixedIntervalAndAttempts(Duration.Zero, 2)
val hugeRetryPolicy = Retry.Policy.fixedIntervalAndAttempts(Duration.Zero, 11)
val observer = Observer((_: Ref).toObs())
def mkTest(plan: dsl.Plan, refMod: Ref => Unit = _ => ()) =
plan.addInvariants(invariant).test(observer).withInitialState(0).withLazyRef((new Ref)(refMod))
def assertRetryWorks(plan: dsl.Plan, refMod: Ref => Unit = _ => ()): Unit = {
_assertRetryWorks(plan, refMod)
()
}
def _assertRetryWorks(plan: dsl.Plan, refMod: Ref => Unit = _ => ()): Report[String] = {
val test = mkTest(plan, refMod)
debug()
// With appropriate retry
val retryResult: Report[String] = test.withRetryPolicy(retryPolicy).run()
retryResult.assert()
// No retry
val resultWithoutRetry = test.run()
assert(resultWithoutRetry.failed)
debug()
// TODO Too little retry
// val resultWithInsufficientRetry = test.withRetryPolicy(insufficientRetryPolicy).run()
// assert(resultWithInsufficientRetry.failed)
// debug()
retryResult
}
def explodingRef(): Ref => Unit = {
var i = 3
_ =>
if (i > 0) {
i -= 1
???
} else
()
}
def retryCtx(i1: Instant, in: Instant*): Retry.Ctx = {
val is = i1 +: in.toVector
Retry.Ctx(Retry.Scope.Action, is.init, is.last)
}
implicit class InstantExt(private val self: Instant) extends AnyVal {
def +[B](d: Duration): Instant = self.plusMillis(d.toMillis)
def -[B](d: Duration): Instant = self.minusMillis(d.toMillis)
}
override def tests = Tests {
"stackSafe" - {
val dsl = Dsl[Unit, Unit, Unit]
val fail = Some("fail")
val result: Report[String] =
Plan.action(dsl.emptyAction +> dsl.point("fail")(_ => fail))
.testU
.stateless
.withRetryPolicy(Retry.Policy.fixedIntervalAndAttempts(Duration.Zero, teststate.Platform.StackTestSize))
.runU()
// println(result.format)
assert(result.failed)
}
"policy" - {
"timeout" - {
val interval = 1 second
val timeout = 6 seconds
val policy = Retry.Policy.fixedIntervalWithTimeout(interval, timeout)
val now = Instant.now()
def test(ds: Duration*)(expect: Option[Instant]) = {
val is = ds.map(now - _)
val ctx = retryCtx(is.head, is.tail: _*)
val actual = policy.nextTry(ctx, now)
assert(actual == expect)
}
// +--- 1s --+ (interval)
// | . |
// -200ms | +800ms
"interval1" - test(200.millis)(Some(now + 800.millis))
// 4s + interval is in the past! result should be now
// -4s | +0ms
"limitToNow" - test(4.seconds)(Some(now))
// +-------------- 6s --------------+
// +-- 5.3s --+-- 0.5s --|-- 0.2s --+
// | | . |
// -5.8s -0.5s | +0.2s
"limitToTimeout" - test(5.8.seconds,0.5.seconds)(Some(now + 200.millis))
// +-- 4s --+-- 1s --+ (interval)
// | | . |
// -4.3s -0.3s | +0.7s
"interval2" - test(4.3.seconds, 0.3.seconds)(Some(now + 0.7.seconds))
// +-- 9s --+-- 0s --+ (scheduler caused huge delay, try once after deadline)
// | | |
// -9s | +0s
"overOnce" - test(9.seconds)(Some(now))
// +--- 6s ---+---- 1s ----+ (stop, already tried once past deadline)
// | | | |
// -6.8s deadline -0.2s |
"overTwice" - test(6.8.seconds, 0.2.seconds)(None)
}
}
"initial" - {
"ref" - {
val refMod = explodingRef()
val test = dsl.emptyPlan.addInvariants(invariant).test(observer).withInitialState(0).withLazyRef((new Ref)(refMod))
val result: Report[String] = test.withRetryPolicy(retryPolicy).run()
assert(!result.failed)
}
"obs" - {
val plan = Plan.action(dsl.emptyAction <+ valueCalls.assert(4))
assertRetryWorks(plan, _.failOnValue.simFail(3))
}
"invariant" - {
assertRetryWorks(dsl.emptyPlan, _.invalidateInvariant())
}
}
"action" - {
"ref" - {
val ref = new Ref
var refFn = (_: Ref) => ()
val plan = Plan.action(dsl.action("hack")(_ => refFn = explodingRef()) >> dsl.emptyAction)
val test = plan.addInvariants(invariant).test(observer).withInitialState(0).withRefByName(ref(refFn))
val result: Report[String] = test.withRetryPolicy(retryPolicy).run()
result.assert()
}
"obs" - {
val plan = Plan.action(failOnValue >> dsl.emptyAction +> valueCalls.assert(5))
assertRetryWorks(plan)
}
"actionSingle" - {
val plan = Plan.action(incNormal >> failOnInc >> incFailOnInc >> incNormal)
assertRetryWorks(plan)
}
"actionSubtest" - {
val subtest = Plan.action(failOnInc >> incFailOnInc).asAction("subtest")
val plan = Plan.action(incNormal >> subtest >> incNormal)
assertRetryWorks(plan)
}
"actionGroup" - {
val group = (failOnInc >> incFailOnInc >> incNormal).times(4)
val plan = Plan.action(incNormal >> group >> incNormal)
assertRetryWorks(plan)
}
"actionDueToBadObs" - {
val plan = Plan.action(queueUpdate >> dsl.action("Throw unless obs.value = 123")(x => assert(x.obs.value == 123)))
assertRetryWorks(plan)
}
"preCondFail" - {
val plan = Plan.action(queueUpdate >> (dsl.emptyAction <+ value.assert(123)))
assertRetryWorks(plan)
}
"postCondFail" - {
val plan = Plan.action(queueUpdate +> value.assert(123))
assertRetryWorks(plan)
}
"postEmptyFail" - {
val plan = Plan.action(queueUpdate >> dsl.emptyAction +> value.assert(123))
assertRetryWorks(plan)
}
// Actually no, checks are supposed to be pure!
// 'preCondCrash {
// val plan = Plan.action(failOnValue2 >> (dsl.emptyAction <+ value2.assert(100)))
// assertRetryWorks(plan)
// }
// 'postCondCrash {
// val plan = Plan.action(failOnValue2 >> (dsl.emptyAction +> value2.assert(100)))
// assertRetryWorks(plan)
// }
"invariant" - {
val plan = Plan.action(dsl.action("invalidate invariant")(_.ref.invalidateInvariant()))
assertRetryWorks(plan)
}
"reportObsErrorAfterActionError" - {
val plan = Plan.action(dsl.action("I love my little one, Nim") { x =>
x.ref.failOnValue.simFail(3)
???
})
val report = mkTest(plan).withRetryPolicy(retryPolicy).run()
assertRun(report,
"""
|✓ Initial state.
| ✓ invariantOk should be true.
|✘ I love my little one, Nim
| ✘ Action -- scala.NotImplementedError: an implementation is missing
| ✘ Observation -- java.lang.RuntimeException: SimFailure on value(): 0 failures remaining
|Performed 1 action, 1 check (with 3 retries).
""".stripMargin)
}
"state" - {
val plan = Plan.action(
(failOnInc >> incFailOnInc >> incNormal).updateState(_ + 1) >>
dsl.action("blah")(_ => ()) +> dsl.focus("state").value(_.state).assert(1)
)
assertRetryWorks(plan)
}
}
}
}
| japgolly/test-state | core/shared/src/test/scala/teststate/RetryTest.scala | Scala | apache-2.0 | 11,866 |
/*******************************************************************************
* Copyright (c) 2014 Łukasz Szpakowski.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
******************************************************************************/
package pl.luckboy.purfuncor.backend.interp.spec
import scala.util.parsing.input.OffsetPosition
import scalaz._
import scalaz.Scalaz._
import org.scalatest.FlatSpec
import org.scalatest.Inside
import org.scalatest.matchers.ShouldMatchers
import pl.luckboy.purfuncor.common._
import pl.luckboy.purfuncor.frontend.kinder
import pl.luckboy.purfuncor.frontend.instant
import pl.luckboy.purfuncor.frontend.SimpleTerm
import pl.luckboy.purfuncor.frontend.TypeSimpleTerm
import pl.luckboy.purfuncor.frontend.AbstractCombinator
import pl.luckboy.purfuncor.frontend.parser
import pl.luckboy.purfuncor.frontend.resolver
import pl.luckboy.purfuncor.frontend.resolver.Symbol
import pl.luckboy.purfuncor.frontend.resolver.GlobalSymbol
import pl.luckboy.purfuncor.frontend.resolver.LocalSymbol
import pl.luckboy.purfuncor.backend.interp._
import pl.luckboy.purfuncor.common.Tree
import pl.luckboy.purfuncor.backend.interp.Value
class InterpreterSpec extends FlatSpec with ShouldMatchers with Inside
{
def interpreter[T, U, V, W, X, C, E, D](emptyEnv: E, initData: D)(makeData: String => ValidationNel[AbstractError, D])(f2: D => Tree[GlobalSymbol, AbstractCombinator[Symbol, parser.LambdaInfo, TypeSimpleTerm[Symbol, parser.TypeLambdaInfo]], resolver.TreeInfo[parser.TypeLambdaInfo, resolver.TypeTreeInfo]] => State[E, ValidationNel[AbstractError, Tree[T, AbstractCombinator[U, V, W], X]]])(g4: D => (Term[SimpleTerm[Symbol, parser.LambdaInfo, TypeSimpleTerm[Symbol, parser.TypeLambdaInfo]]], E) => ValidationNel[AbstractError, Term[SimpleTerm[U, V, W]]])(implicit init: Initializer[NoValue[U, V, W, C], T, AbstractCombinator[U, V, W], E], eval: Evaluator[SimpleTerm[U, V, W], E, Value[U, V, W, C]], envSt: EnvironmentState[E, T, Value[U, V, W, C], InstanceValue[U, V, W, C]], enval: Environmental[E, Value[U, V, W, C]])
{
//TODO: add a test for the global variable contains the lambda-expression with the reference to itself
//TODO: add a test for the global variable contains the let-expression with the reference to itself
val f = f2(initData)
val g3 = g4(initData)
it should "interpret the term string" in {
val (env, res) = Interpreter.interpretTermString("#iAdd 2 (#iMul 3 4)")(g3).run(emptyEnv)
res should be ===(IntValue(14).success)
}
it should "interpret the tree string" in {
val (env, res) = Interpreter.interpretTreeString("""
f = #iAdd g h
g = #iMul h 2
h = #iSub 7 4
""")(f).run(emptyEnv)
res should be ===(().success.success)
enval.globalVarValueFromEnvironment(env)(GlobalSymbol(NonEmptyList("f"))) should be ===(IntValue(9))
enval.globalVarValueFromEnvironment(env)(GlobalSymbol(NonEmptyList("g"))) should be ===(IntValue(6))
enval.globalVarValueFromEnvironment(env)(GlobalSymbol(NonEmptyList("h"))) should be ===(IntValue(3))
}
it should "initialize all independent variables" in {
val (env, res) = Interpreter.interpretTreeString("f = 10; g = 20; h = 30")(f).run(emptyEnv)
res should be ===(().success.success)
enval.globalVarValueFromEnvironment(env)(GlobalSymbol(NonEmptyList("f"))) should be ===(IntValue(10))
enval.globalVarValueFromEnvironment(env)(GlobalSymbol(NonEmptyList("g"))) should be ===(IntValue(20))
enval.globalVarValueFromEnvironment(env)(GlobalSymbol(NonEmptyList("h"))) should be ===(IntValue(30))
}
it should "initialize all dependent variables" in {
val (env, res) = Interpreter.interpretTreeString("""
f = #iAdd (#iAdd k (g 1 2)) (j 3 4)
g x y = #iMul (h x) y
h x = #iAdd (#iNeg x) i
i = 5
j x y = #intFromDouble (#dDiv (#doubleFromInt (#iAdd 6 x)) (#doubleFromInt (#iSub y k)))
k = #iAdd (l 7) (h 8)
l x = #iAdd x 3
""")(f).run(emptyEnv)
res should be ===(().success.success)
enval.globalVarValueFromEnvironment(env)(GlobalSymbol(NonEmptyList("f"))) should be ===(IntValue(12))
inside(enval.globalVarValueFromEnvironment(env)(GlobalSymbol(NonEmptyList("g")))) { case CombinatorValue(_, _, _) => () }
inside(enval.globalVarValueFromEnvironment(env)(GlobalSymbol(NonEmptyList("h")))) { case CombinatorValue(_, _, _) => () }
enval.globalVarValueFromEnvironment(env)(GlobalSymbol(NonEmptyList("i"))) should be ===(IntValue(5))
inside(enval.globalVarValueFromEnvironment(env)(GlobalSymbol(NonEmptyList("j")))) { case CombinatorValue(_, _, _) => () }
enval.globalVarValueFromEnvironment(env)(GlobalSymbol(NonEmptyList("k"))) should be ===(IntValue(7))
inside(enval.globalVarValueFromEnvironment(env)(GlobalSymbol(NonEmptyList("l")))) { case CombinatorValue(_, _, _) => () }
}
it should "interpret the term string with the global variables" in {
val s = """
f = 1
g = 2
h x = #iMul x 3
"""
val (env, res) = Interpreter.interpretTreeString(s)(f).run(emptyEnv)
val res2 = makeData(s)
inside(res2) {
case Success(data) =>
val (env2, res3) = Interpreter.interpretTermString("#iAdd f (#iSub (h 10) g)")(g4(data)).run(env)
res3 should be ===(IntValue(29).success)
}
}
it should "interpret the let-expressions" in {
val (env, res) = Interpreter.interpretTreeString("""
f = let
a = 10
b = 20
in
#iMul (let
c = 30
in
#iAdd (#iAdd a b) c) a
""")(f).run(emptyEnv)
res should be ===(().success.success)
enval.globalVarValueFromEnvironment(env)(GlobalSymbol(NonEmptyList("f"))) should be ===(IntValue(600))
}
it should "interpret the applications of the lambda expressions" in {
val (env, res) = Interpreter.interpretTreeString("""
f = let
a = 1
in
let
b = \\x => #iAdd x a
c = 3
d = 4
in
(\\x y => #iAdd (#iMul (b x) c) (#iMul x y)) d (#iAdd d 5)
""")(f).run(emptyEnv)
res should be ===(().success.success)
enval.globalVarValueFromEnvironment(env)(GlobalSymbol(NonEmptyList("f"))) should be ===(IntValue(51))
}
it should "interpret the partial applications" in {
val (env, res) = Interpreter.interpretTreeString("""
f x y z = #iAdd (#iMul x y) z
g = let
a = f 3
b = \\x y => #iSub x y
in
let
c = b 5
d = a 6
in
#iAdd (#iAdd (a 7 8) (a 9 10)) (#iMul (c 11) (d 12))
""")(f).run(emptyEnv)
res should be ===(().success.success)
enval.globalVarValueFromEnvironment(env)(GlobalSymbol(NonEmptyList("g"))) should be ===(IntValue(-114))
}
it should "interpret the term with the covered local variables" in {
val (env, res) = Interpreter.interpretTermString("""
let
a = 14
b = 11
in
#iMul (let
a = 3
b = 4
in
#iAdd a b) (#iSub a b)
""")(g3).run(emptyEnv)
res should be ===(IntValue(21).success)
}
ignore should "complain at the term" in {
val (env, res) = Interpreter.interpretTermString("#iAdd (#iDiv 1 0) 2")(g3).run(emptyEnv)
inside(res) {
case Success(noValue: NoValue[U, V, W, C]) =>
noValue.msg should be ===("divided by zero")
inside(noValue.stackTrace) { case List(StackTraceElement(None, None, OffsetPosition(_, _))) => () }
}
}
ignore should "complain at the combinator without the arguments" in {
val (env, res) = Interpreter.interpretTreeString("f = #iSub 1 (#iDiv 2 0)")(f).run(emptyEnv)
inside(res) {
case Success(Failure(noValue)) =>
noValue.msg should be ===("divided by zero")
inside(noValue.stackTrace) { case List(StackTraceElement(None, Some(GlobalSymbol(NonEmptyList("f"))), OffsetPosition(_, _))) => () }
}
}
ignore should "complain at the combinator with the two arguments" in {
val (env, res) = Interpreter.interpretTreeString("f x y = #iAdd (#iDiv x y) y")(f).run(emptyEnv)
val (env2, res2) = Interpreter.interpretTermString("f 10 0")(g3).run(env)
inside(res2) {
case Success(noValue: NoValue[U, V, W, C]) =>
noValue.msg should be ===("divided by zero")
inside(noValue.stackTrace) {
case List(stackTraceElem1, stackTraceElem2) =>
inside(stackTraceElem1) { case StackTraceElement(None, Some(GlobalSymbol(NonEmptyList("f"))), OffsetPosition(_, _)) => () }
inside(stackTraceElem2) { case StackTraceElement(None, None, OffsetPosition(_, _)) => () }
}
}
}
ignore should "complain at the combinator that is applied at the other combinator" in {
val (env, res) = Interpreter.interpretTreeString("""
f x y = #iAdd (#iDiv x y) y
g x = f x 0
""")(f).run(emptyEnv)
val (env2, res2) = Interpreter.interpretTermString("g 10")(g3).run(env)
inside(res2) {
case Success(noValue: NoValue[U, V, W, C]) =>
noValue.msg should be ===("divided by zero")
inside(noValue.stackTrace) {
case List(stackTraceElem1, stackTraceElem2, stackTraceElem3) =>
inside(stackTraceElem1) { case StackTraceElement(None, Some(GlobalSymbol(NonEmptyList("f"))), OffsetPosition(_, _)) => () }
inside(stackTraceElem2) { case StackTraceElement(None, Some(GlobalSymbol(NonEmptyList("g"))), OffsetPosition(_, _)) => () }
inside(stackTraceElem3) { case StackTraceElement(None, None, OffsetPosition(_, _)) => () }
}
}
}
ignore should "complain at the lambda expression" in {
val (env, res) = Interpreter.interpretTermString("(\\\\x => #iDiv 1 x) 0")(g3).run(emptyEnv)
inside(res) {
case Success(noValue: NoValue[U, V, W, C]) =>
noValue.msg should be ===("divided by zero")
inside(noValue.stackTrace) {
case List(stackTraceElem1, stackTraceElem2) =>
inside(stackTraceElem1) { case StackTraceElement(None, None, OffsetPosition(_, _)) => () }
inside(stackTraceElem2) { case StackTraceElement(None, None, OffsetPosition(_, _)) => () }
}
}
}
it should "interpret the string of the typed term" in {
val (env, res) = Interpreter.interpretTermString("#iAdd 2 ((#iMul 3 4): ##& (##| #Zero #NonZero) #Int)")(g3).run(emptyEnv)
res should be ===(IntValue(14).success)
}
it should "interpret the string with the construct-expressions" in {
val (env, res) = Interpreter.interpretTreeString("""
unittype 2 T
unittype 0 U
instance select \\t1 t2 => ##| (##& (T t1 t2) (tuple 2 t1 t2)) (##& U tuple 0) construct {
\\t1 t2 => ##& (T t1 t2) (tuple 2 t1 t2)
##& U tuple 0
}
""")(f).run(emptyEnv)
val (env2, res2) = Interpreter.interpretTermString("""
tuple 2 (construct 2 'a' 'b': ##& (T #Char #Char) (tuple 2 #Char #Char)) (construct 0: ##& U tuple 0)
""")(g3).run(env)
res2 should be ===(TupleValue(Vector(
ConstructValue(0, Vector(CharValue('a'), CharValue('b'))),
ConstructValue(1, Vector()))).success)
}
it should "interpret the string with the select-expression" in {
val (env, res) = Interpreter.interpretTreeString("""
unittype 2 T
unittype 0 U
unittype 0 V
instance select \\t1 t2 => ##| (##| (##& (T t1 t2) (tuple 2 t1 t2)) (##& U tuple 0)) (##& V tuple 0) construct {
\\t1 t2 => ##& (T t1 t2) (tuple 2 t1 t2)
##& U tuple 0
##& V tuple 0
}
U = (construct 0: ##& U tuple 0): \\t1 t2 => ##| (##| (##& (T t1 t2) (tuple 2 t1 t2)) (##& U tuple 0)) (##& V tuple 0)
""")(f).run(emptyEnv)
val (env2, res2) = Interpreter.interpretTermString("""
U select {
(x: \\t1 t2 => ##& (T t1 t2) (tuple 2 t1 t2)) => 1
(x: ##& U tuple 0) => 2
(x: ##& V tuple 0) => 3
}
""")(g3).run(env)
res2 should be ===(IntValue(2).success)
}
it should "interpret the string with the applications of the ad-hoc polymorphic combinators" in {
val (env, res) = Interpreter.interpretTreeString("""
poly f
poly g
instance f => h
instance f => i
instance g => j
h = #iAdd
i = #lSub
j = 'a'
(k: ##& (##| #Zero #NonZero) #Int) = 1
(l: ##& (##| #Zero #NonZero) #Int) = 2
(m: ##& (##| #Zero #NonZero) #Long) = 4L
(n: ##& (##| #Zero #NonZero) #Long) = 3L
""")(f).run(emptyEnv)
val (env2, res2) = Interpreter.interpretTermString("""
tuple 3 ((f k l): ##& (##| #Zero #NonZero) #Int) ((f m n): ##& (##| #Zero #NonZero) #Long) (g: #Char)
""")(g3).run(env)
res2 should be ===(TupleValue(Vector(IntValue(3), LongValue(1L), CharValue('a'))).success)
}
it should "interpret the string with the applications of the combinators with the instance arguments" in {
val (env, res) = Interpreter.interpretTreeString("""
f g x y = g (\\z => i z x) (j y)
h g x = g (k x) (l g x)
poly i
j x = x select {
(y: ##& T tuple 0) => 1: ##& (##| #Zero #NonZero) #Int
(y: \\t1 t2 t3 => ##& (U t2 t3) (tuple 2 t2 t3)) => 2: ##& (##| #Zero #NonZero) #Int
}
poly k
l g x = g x m
poly m
instance i => n
instance k => o
instance m => p
n = #iMul
o = #zNot
p = true
unittype 0 T
unittype 2 U
instance select ##| (##& T tuple 0) (##& (U #Char #Char) (tuple 2 #Char #Char)) construct {
##& T tuple 0
##& (U #Char #Char) (tuple 2 #Char #Char)
}
U x y = (construct 2 x y: ##& (U #Char #Char) (tuple 2 #Char #Char)): ##| (##& T tuple 0) (##& (U #Char #Char) (tuple 2 #Char #Char))
""")(f).run(emptyEnv)
val (env2, res2) = Interpreter.interpretTermString("""
tuple 2 (f (\\g => #iAdd (g (3: ##& (##| #Zero #NonZero) #Int))) (2: ##& (##| #Zero #NonZero) #Int) (U 'a' 'b')) (h #zXor true)
""")(g3).run(env)
res2 should be ===(TupleValue(Vector(IntValue(8), BooleanValue(false))).success)
}
it should "interpret the string with the select-expressions for integers" in {
val (env, res) = Interpreter.interpretTermString("""
(#iAdd 1 2) select {
(x: ##& #Zero #Int) => 1
(x: ##& #NonZero #Int) =>
(#iSub (#iDiv 9 x) 3) select {
(y: ##& #Zero #Int) => 2
(y: ##& #NonZero #Int) => 3
}
}
""")(g3).run(emptyEnv)
res should be ===(IntValue(2).success)
}
it should "interpret the string with the select-expressions for arrays" in {
val (env, res) = Interpreter.interpretTermString("""
(#array 0L 'a') select {
(x: ##& #Empty (#Array #Char)) =>
(#array 2L 'b') select {
(y: ##& #Empty (#Array #Char)) => 1
(y: ##& #NonEmpty (#Array #Char)) => 2
}
(x: ##& #NonEmpty (#Array #Char)) => 3
}
""")(g3).run(emptyEnv)
res should be ===(IntValue(2).success)
}
}
"An Interpreter" should behave like interpreter(SymbolEnvironment.empty[instant.LambdaInfo[parser.LambdaInfo, LocalSymbol, GlobalSymbol, GlobalSymbol], TypeSimpleTerm[Symbol, kinder.TypeLambdaInfo[parser.TypeLambdaInfo, LocalSymbol]], kinder.TypeLambdaInfo[parser.TypeLambdaInfo, LocalSymbol]], ())(_ => ().successNel)(_ => Interpreter.statefullyTransformToSymbolTree)(_ => Interpreter.transformToSymbolTerm3)
}
| luckboy/Purfuncor | src/test/scala/pl/luckboy/purfuncor/backend/interp/spec/InterpreterSpec.scala | Scala | mpl-2.0 | 15,277 |
package com.eevolution.context.dictionary.infrastructure.repository
import java.util.UUID
import com.eevolution.context.dictionary.domain._
import com.eevolution.context.dictionary.domain.model.ChartDataSource
import com.eevolution.context.dictionary.infrastructure.db.DbContext._
import com.eevolution.utils.PaginatedSequence
import com.lightbend.lagom.scaladsl.persistence.jdbc.JdbcSession
import scala.concurrent.{ExecutionContext, Future}
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: emeris.hernandez@e-evolution.com, http://www.e-evolution.com , http://github.com/EmerisScala
* Created by emeris.hernandez@e-evolution.com , www.e-evolution.com on 07/11/17.
*/
/**
* Chart Data Source Repository
* @param session
* @param executionContext
*/
class ChartDataSourceRepository (session: JdbcSession)(implicit executionContext: ExecutionContext)
extends api.repository.ChartDataSourceRepository[ChartDataSource , Int]
with ChartDataSourceMapping {
def getById(id: Int): Future[ChartDataSource] = {
Future(run(queryChartDataSource.filter(_.chartDataSourceId == lift(id))).headOption.get)
}
def getByUUID(uuid: UUID): Future[ChartDataSource] = {
Future(run(queryChartDataSource.filter(_.uuid == lift(uuid.toString))).headOption.get)
}
def getByChartDataSourceId(id : Int) : Future[List[ChartDataSource]] = {
Future(run(queryChartDataSource))
}
def getAll() : Future[List[ChartDataSource]] = {
Future(run(queryChartDataSource))
}
def getAllByPage(page: Int, pageSize: Int): Future[PaginatedSequence[ChartDataSource]] = {
val offset = page * pageSize
val limit = (page + 1) * pageSize
for {
count <- countChartDataSource()
elements <- if (offset > count) Future.successful(Nil)
else selectChartDataSource(offset, limit)
} yield {
PaginatedSequence(elements, page, pageSize, count)
}
}
private def countChartDataSource() = {
Future(run(queryChartDataSource.size).toInt)
}
private def selectChartDataSource(offset: Int, limit: Int): Future[Seq[ChartDataSource]] = {
Future(run(queryChartDataSource).drop(offset).take(limit).toSeq)
}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/repository/ChartDataSourceRepository.scala | Scala | gpl-3.0 | 2,863 |
package dpla.ingestion3.executors
import java.time.LocalDateTime
import com.databricks.spark.avro._
import dpla.ingestion3.dataStorage.OutputHelper
import dpla.ingestion3.model.{ModelConverter, jsonlRecord}
import org.apache.log4j.Logger
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
import org.apache.spark.storage.StorageLevel
import scala.util.{Failure, Success}
trait DeleteExecutor extends Serializable {
/**
* @param sparkConf Spark configuration
* @param dataIn Data to transform into
* @param dataOut Location to save
* @param deleteIds IDs to delete
* @param shortName Provider shortname
* @param logger Logger object
*/
def executeDelete(sparkConf: SparkConf,
dataIn: String,
dataOut: String,
deleteIds: String,
shortName: String,
logger: Logger): String = {
// This start time is used for documentation and output file naming.
val startDateTime: LocalDateTime = LocalDateTime.now
// Output for this process in new jsonl sans deleteIds
val outputHelper: OutputHelper =
new OutputHelper(dataOut, shortName, "jsonl", startDateTime)
val outputPath: String = outputHelper.activityPath
logger.info("Starting delete")
val spark = SparkSession
.builder()
.config(sparkConf)
.getOrCreate()
import spark.implicits._
val sc = spark.sparkContext
val enrichedRows: DataFrame = spark.read.avro(dataIn)
// delete items
val deleteUris = deleteIds.split(",").map(id => s"http://dp.la/api/items/$id")
logger.info(s"Sourced enrichment data from $dataIn")
logger.info(s"Deleting ${deleteUris.length} IDs from enriched data")
logger.info(s"Saving to $outputPath")
val indexRecords: Dataset[String] = enrichedRows
.filter(row => !deleteUris.contains(row.getString(0))) // filter out rows where dplaUri matches
.map(row => {
val record = ModelConverter.toModel(row)
jsonlRecord(record)
})
.persist(StorageLevel.MEMORY_AND_DISK_SER)
val indexCount = indexRecords.count
logger.info(s"Saved $indexCount to JSONL export")
// This should always write out as #text() because if we use #json() then the
// data will be written out inside a JSON object (e.g. {'value': <doc>}) which is
// invalid for our use
indexRecords.write.text(outputPath)
// Create and write manifest.
val manifestOpts: Map[String, String] = Map(
"Activity" -> "JSON-L",
"Provider" -> shortName,
"Record count" -> indexCount.toString,
"Input" -> dataIn
)
outputHelper.writeManifest(manifestOpts) match {
case Success(s) => logger.info(s"Manifest written to $s")
case Failure(f) => logger.warn(s"Manifest failed to write: $f")
}
sc.stop()
logger.info("JSON-L export complete")
// Return output path of jsonl files.
outputPath
}
}
| dpla/ingestion3 | src/main/scala/dpla/ingestion3/executors/DeleteExecutor.scala | Scala | mit | 3,029 |
package cz.kamenitxan.jakon.core.task
import java.sql.Date
import java.util.concurrent.TimeUnit
import cz.kamenitxan.jakon.core.database.DBHelper
import cz.kamenitxan.jakon.webui.entity.ResetPasswordEmailEntity
class ResetPasswordRequestCleanerTask extends AbstractTask(6, TimeUnit.HOURS) {
private implicit val cls: Class[ResetPasswordEmailEntity] = classOf[ResetPasswordEmailEntity]
// language=SQL
private val sql = "SELECT * FROM ResetPasswordEmailEntity where expirationDate <= ?"
override def start(): Unit = {
DBHelper.withDbConnection(implicit conn => {
val stmt = conn.prepareStatement(sql)
stmt.setDate(1, new Date(new java.util.Date().getTime))
val expired = DBHelper.selectDeep(stmt)
expired.foreach(_.delete())
})
}
}
| kamenitxan/Jakon | modules/backend/src/main/scala/cz/kamenitxan/jakon/core/task/ResetPasswordRequestCleanerTask.scala | Scala | bsd-3-clause | 758 |
package exercices
import support.HandsOnSuite
class e00_start extends HandsOnSuite {
exercice("Scala class") {
DeleteMeToContinue(
""" _________ .__ .__
| / _____/ ____ _____ | | _____ ____ | | _____ ______ ______
| \_____ \_/ ___\\__ \ | | \__ \ _/ ___\| | \__ \ / ___// ___/
| / \ \___ / __ \| |__/ __ \_ \ \___| |__/ __ \_\___ \ \___ \
| /_______ /\___ >____ /____(____ / \___ >____(____ /____ >____ >
| \/ \/ \/ \/ \/ \/ \/ \/
|
| Bienvenue dans ce Hand's on Scala.
|
|
| Dans ce handson il faudra remplir les trous :
| - les `__` sont à remplacer par la valeur manquante
| - les `???` sont à remplacer par une implémentation de code
|
|
| Have fun!
""".stripMargin
)
}
}
| loicknuchel/scala-class | src/test/scala/exercices/e00_start.scala | Scala | unlicense | 1,010 |
package io.muvr.exercise
import io.muvr.{CommonPathDirectives, CommonProtocolMarshallers}
import spray.httpx.SprayJsonSupport
import spray.json._
import spray.routing._
trait ExerciseProtocolMarshallers extends SprayJsonSupport with CommonProtocolMarshallers with CommonPathDirectives {
import spray.json.DefaultJsonProtocol._
val SessionIdValue: PathMatcher1[SessionId] = JavaUUID.map(SessionId.apply)
implicit object SessionIdFormat extends RootJsonFormat[SessionId] {
override def write(obj: SessionId): JsValue = JsString(obj.toString)
override def read(json: JsValue): SessionId = (json: @unchecked) match {
case JsString(x) ⇒ SessionId(x)
}
}
implicit val exerciseModelFormat = jsonFormat3(ExerciseModel)
implicit val resistanceExerciseSessionFormat = jsonFormat4(ResistanceExerciseSession)
implicit val resistanceExerciseFormat = jsonFormat1(ResistanceExercise)
implicit val classifiedResistanceExerciseFormat = jsonFormat6(ClassifiedResistanceExercise)
implicit object SensorDataFormat extends JsonFormat[SensorData] {
private val threedFormat = jsonFormat3(Threed)
override def write(obj: SensorData): JsValue = obj match {
case Oned(value) ⇒ JsNumber(value)
case x: Threed ⇒ threedFormat.write(x)
}
override def read(json: JsValue): SensorData = json match {
case JsNumber(value) ⇒ Oned(value.intValue())
case _ ⇒ threedFormat.read(json)
}
}
implicit val fusedSensorDataFormat = jsonFormat5(FusedSensorData)
implicit val resistanceExerciseSetExampleFormat = jsonFormat3(ResistanceExerciseExample)
implicit val entireResistanceExerciseSessionFormat = jsonFormat3(EntireResistanceExerciseSession)
/**
* Marshalling of Spark suggestions
*/
implicit object SuggestionsToResponseMarshaller extends RootJsonFormat[Suggestions] {
private val session = JsString("session")
private val intensity = JsString("intensity")
import SuggestionSource._
implicit object SuggestionSourceFormat extends RootJsonFormat[SuggestionSource] {
private val trainer = JsString("trainer")
private val programme = JsString("programme")
private val history = JsString("history")
def suggestionSource(s: SuggestionSource): JsValue = s match {
case Trainer(n) => JsObject("notes" → JsString(n))
case Programme => JsString(Programme.toString.toLowerCase)
case History => JsString(History.toString.toLowerCase)
}
override def write(obj: SuggestionSource): JsValue = obj match {
case Trainer(notes) ⇒ JsObject("kind" → trainer, "notes" → JsString(notes))
case Programme ⇒ JsObject("kind" → programme)
case History ⇒ JsObject("kind" → history)
}
override def read(json: JsValue): SuggestionSource = {
val obj = json.asJsObject
(obj.fields("kind"): @unchecked) match {
case `trainer` ⇒ Trainer(obj.fields("notes").toString())
case `programme` ⇒ Programme
case `history` ⇒ History
}
}
}
private val sessionFormat = jsonFormat4(Suggestion.Session)
private val intensityFormat = jsonFormat4(Suggestion.Intensity)
override def write(obj: Suggestions): JsValue = JsArray(obj.suggestions.map {
case s: Suggestion.Session ⇒
JsObject("kind" → session, "value" → sessionFormat.write(s))
case i: Suggestion.Intensity =>
JsObject("kind" → intensity, "value" → intensityFormat.write(i))
}: _*)
override def read(json: JsValue): Suggestions = (json: @unchecked) match {
case JsArray(elements) ⇒ Suggestions(elements.map { element ⇒
element.asJsObject.getFields("kind", "value") match {
case Seq(`session`, s) ⇒ sessionFormat.read(s)
case Seq(`intensity`, i) ⇒ intensityFormat.read(i)
}
}.toList)
}
}
}
| boonhero/muvr-server | exercise-protocol-marshalling/src/main/scala/io/muvr/exercise/ExerciseProtocolMarshallers.scala | Scala | bsd-3-clause | 3,904 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import scala.collection.JavaConverters._
import org.apache.spark.annotation.{Experimental, InterfaceStability}
import org.apache.spark.sql.{AnalysisException, Dataset, ForeachWriter}
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes._
import org.apache.spark.sql.execution.datasources.DataSource
import org.apache.spark.sql.execution.streaming._
/**
* :: Experimental ::
* Interface used to write a streaming `Dataset` to external storage systems (e.g. file systems,
* key-value stores, etc). Use `Dataset.writeStream` to access this.
*
* @since 2.0.0
*/
@Experimental
@InterfaceStability.Evolving
final class DataStreamWriter[T] private[sql](ds: Dataset[T]) {
private val df = ds.toDF()
/**
* Specifies how data of a streaming DataFrame/Dataset is written to a streaming sink.
* - `OutputMode.Append()`: only the new rows in the streaming DataFrame/Dataset will be
* written to the sink
* - `OutputMode.Complete()`: all the rows in the streaming DataFrame/Dataset will be written
* to the sink every time these is some updates
* - `OutputMode.Update()`: only the rows that were updated in the streaming DataFrame/Dataset
* will be written to the sink every time there are some updates. If
* the query doesn't contain aggregations, it will be equivalent to
* `OutputMode.Append()` mode.
*
* @since 2.0.0
*/
def outputMode(outputMode: OutputMode): DataStreamWriter[T] = {
this.outputMode = outputMode
this
}
/**
* Specifies how data of a streaming DataFrame/Dataset is written to a streaming sink.
* - `append`: only the new rows in the streaming DataFrame/Dataset will be written to
* the sink
* - `complete`: all the rows in the streaming DataFrame/Dataset will be written to the sink
* every time these is some updates
* - `update`: only the rows that were updated in the streaming DataFrame/Dataset will
* be written to the sink every time there are some updates. If the query doesn't
* contain aggregations, it will be equivalent to `append` mode.
* @since 2.0.0
*/
def outputMode(outputMode: String): DataStreamWriter[T] = {
this.outputMode = outputMode.toLowerCase match {
case "append" =>
OutputMode.Append
case "complete" =>
OutputMode.Complete
case "update" =>
OutputMode.Update
case _ =>
throw new IllegalArgumentException(s"Unknown output mode $outputMode. " +
"Accepted output modes are 'append', 'complete', 'update'")
}
this
}
/**
* Set the trigger for the stream query. The default value is `ProcessingTime(0)` and it will run
* the query as fast as possible.
*
* Scala Example:
* {{{
* df.writeStream.trigger(ProcessingTime("10 seconds"))
*
* import scala.concurrent.duration._
* df.writeStream.trigger(ProcessingTime(10.seconds))
* }}}
*
* Java Example:
* {{{
* df.writeStream().trigger(ProcessingTime.create("10 seconds"))
*
* import java.util.concurrent.TimeUnit
* df.writeStream().trigger(ProcessingTime.create(10, TimeUnit.SECONDS))
* }}}
*
* @since 2.0.0
*/
def trigger(trigger: Trigger): DataStreamWriter[T] = {
this.trigger = trigger
this
}
/**
* Specifies the name of the [[StreamingQuery]] that can be started with `start()`.
* This name must be unique among all the currently active queries in the associated SQLContext.
*
* @since 2.0.0
*/
def queryName(queryName: String): DataStreamWriter[T] = {
this.extraOptions += ("queryName" -> queryName)
this
}
/**
* Specifies the underlying output data source.
*
* @since 2.0.0
*/
def format(source: String): DataStreamWriter[T] = {
this.source = source
this
}
/**
* Partitions the output by the given columns on the file system. If specified, the output is
* laid out on the file system similar to Hive's partitioning scheme. As an example, when we
* partition a dataset by year and then month, the directory layout would look like:
*
* - year=2016/month=01/
* - year=2016/month=02/
*
* Partitioning is one of the most widely used techniques to optimize physical data layout.
* It provides a coarse-grained index for skipping unnecessary data reads when queries have
* predicates on the partitioned columns. In order for partitioning to work well, the number
* of distinct values in each column should typically be less than tens of thousands.
*
* @since 2.0.0
*/
@scala.annotation.varargs
def partitionBy(colNames: String*): DataStreamWriter[T] = {
this.partitioningColumns = Option(colNames)
this
}
/**
* Adds an output option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: String): DataStreamWriter[T] = {
this.extraOptions += (key -> value)
this
}
/**
* Adds an output option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: Boolean): DataStreamWriter[T] = option(key, value.toString)
/**
* Adds an output option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: Long): DataStreamWriter[T] = option(key, value.toString)
/**
* Adds an output option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: Double): DataStreamWriter[T] = option(key, value.toString)
/**
* (Scala-specific) Adds output options for the underlying data source.
*
* @since 2.0.0
*/
def options(options: scala.collection.Map[String, String]): DataStreamWriter[T] = {
this.extraOptions ++= options
this
}
/**
* Adds output options for the underlying data source.
*
* @since 2.0.0
*/
def options(options: java.util.Map[String, String]): DataStreamWriter[T] = {
this.options(options.asScala)
this
}
/**
* Starts the execution of the streaming query, which will continually output results to the given
* path as new data arrives. The returned [[StreamingQuery]] object can be used to interact with
* the stream.
*
* @since 2.0.0
*/
def start(path: String): StreamingQuery = {
option("path", path).start()
}
/**
* Starts the execution of the streaming query, which will continually output results to the given
* path as new data arrives. The returned [[StreamingQuery]] object can be used to interact with
* the stream.
*
* @since 2.0.0
*/
def start(): StreamingQuery = {
if (source == "memory") {
assertNotPartitioned("memory")
if (extraOptions.get("queryName").isEmpty) {
throw new AnalysisException("queryName must be specified for memory sink")
}
val sink = new MemorySink(df.schema, outputMode)
val resultDf = Dataset.ofRows(df.sparkSession, new MemoryPlan(sink))
val chkpointLoc = extraOptions.get("checkpointLocation")
val recoverFromChkpoint = outputMode == OutputMode.Complete()
val query = df.sparkSession.sessionState.streamingQueryManager.startQuery(
extraOptions.get("queryName"),
chkpointLoc,
df,
sink,
outputMode,
useTempCheckpointLocation = true,
recoverFromCheckpointLocation = recoverFromChkpoint,
trigger = trigger)
resultDf.createOrReplaceTempView(query.name)
query
} else if (source == "foreach") {
assertNotPartitioned("foreach")
val sink = new ForeachSink[T](foreachWriter)(ds.exprEnc)
df.sparkSession.sessionState.streamingQueryManager.startQuery(
extraOptions.get("queryName"),
extraOptions.get("checkpointLocation"),
df,
sink,
outputMode,
useTempCheckpointLocation = true,
trigger = trigger)
} else {
val (useTempCheckpointLocation, recoverFromCheckpointLocation) =
if (source == "console") {
(true, false)
} else {
(false, true)
}
val dataSource =
DataSource(
df.sparkSession,
className = source,
options = extraOptions.toMap,
partitionColumns = normalizedParCols.getOrElse(Nil))
df.sparkSession.sessionState.streamingQueryManager.startQuery(
extraOptions.get("queryName"),
extraOptions.get("checkpointLocation"),
df,
dataSource.createSink(outputMode),
outputMode,
useTempCheckpointLocation = useTempCheckpointLocation,
recoverFromCheckpointLocation = recoverFromCheckpointLocation,
trigger = trigger)
}
}
/**
* Starts the execution of the streaming query, which will continually send results to the given
* `ForeachWriter` as as new data arrives. The `ForeachWriter` can be used to send the data
* generated by the `DataFrame`/`Dataset` to an external system.
*
* Scala example:
* {{{
* datasetOfString.writeStream.foreach(new ForeachWriter[String] {
*
* def open(partitionId: Long, version: Long): Boolean = {
* // open connection
* }
*
* def process(record: String) = {
* // write string to connection
* }
*
* def close(errorOrNull: Throwable): Unit = {
* // close the connection
* }
* }).start()
* }}}
*
* Java example:
* {{{
* datasetOfString.writeStream().foreach(new ForeachWriter<String>() {
*
* @Override
* public boolean open(long partitionId, long version) {
* // open connection
* }
*
* @Override
* public void process(String value) {
* // write string to connection
* }
*
* @Override
* public void close(Throwable errorOrNull) {
* // close the connection
* }
* }).start();
* }}}
*
* @since 2.0.0
*/
def foreach(writer: ForeachWriter[T]): DataStreamWriter[T] = {
this.source = "foreach"
this.foreachWriter = if (writer != null) {
ds.sparkSession.sparkContext.clean(writer)
} else {
throw new IllegalArgumentException("foreach writer cannot be null")
}
this
}
private def normalizedParCols: Option[Seq[String]] = partitioningColumns.map { cols =>
cols.map(normalize(_, "Partition"))
}
/**
* The given column name may not be equal to any of the existing column names if we were in
* case-insensitive context. Normalize the given column name to the real one so that we don't
* need to care about case sensitivity afterwards.
*/
private def normalize(columnName: String, columnType: String): String = {
val validColumnNames = df.logicalPlan.output.map(_.name)
validColumnNames.find(df.sparkSession.sessionState.analyzer.resolver(_, columnName))
.getOrElse(throw new AnalysisException(s"$columnType column $columnName not found in " +
s"existing columns (${validColumnNames.mkString(", ")})"))
}
private def assertNotPartitioned(operation: String): Unit = {
if (partitioningColumns.isDefined) {
throw new AnalysisException(s"'$operation' does not support partitioning")
}
}
///////////////////////////////////////////////////////////////////////////////////////
// Builder pattern config options
///////////////////////////////////////////////////////////////////////////////////////
private var source: String = df.sparkSession.sessionState.conf.defaultDataSourceName
private var outputMode: OutputMode = OutputMode.Append
private var trigger: Trigger = ProcessingTime(0L)
private var extraOptions = new scala.collection.mutable.HashMap[String, String]
private var foreachWriter: ForeachWriter[T] = null
private var partitioningColumns: Option[Seq[String]] = None
}
| SnappyDataInc/spark | sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala | Scala | apache-2.0 | 12,780 |
package org.usagram.clarify
import org.usagram.clarify.error.Error
import org.scalatest._
class InvalidSpec extends FunSpec {
import Matchers._
private val invalid = Invalid("a value", Tags.empty, Error)
describe("#isValid") {
it("returns always false") {
invalid.isValid should be(false)
}
}
describe("#isInvalid") {
it("returns always true") {
invalid.isInvalid should be(true)
}
}
describe("#message") {
it("returns the message of #error") {
invalid.message should be("(no label) has an error")
}
}
}
| takkkun/clarify | core/src/test/scala/org/usagram/clarify/InvalidSpec.scala | Scala | mit | 569 |
/* *\
** \ \ / _) \ \ / \ | **
** \ \ / | __ \ _ \ __| \ \ / |\/ | **
** \ \ / | | | __/ | \ \ / | | **
** \_/ _| .__/ \___| _| \_/ _| _| **
** _| **
** **
** ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **
** **
** http://www.vipervm.org **
** GPLv3 **
\* */
package org.vipervm.bindings.opencl
import com.sun.jna.ptr.{IntByReference, PointerByReference}
import com.sun.jna.{Pointer, Structure, PointerType, NativeLong, Memory}
import com.sun.jna.Pointer.NULL
import scala.collection.immutable._
class Sampler(val context: Context, val normCoords:Boolean, val addressingMode:Int, val filterMode:Int) extends Entity with Retainable with Info {
import Wrapper._
import Sampler._
import OpenCL._
protected val retainFunc = clRetainSampler _
protected val releaseFunc = clReleaseSampler _
protected val infoFunc = clGetSamplerInfo(peer, _:Int, _:Int, _:Pointer, _:Pointer)
private val nc = if (normCoords) CL_TRUE else CL_FALSE
private val err = new IntByReference
val peer = clCreateSampler(context.peer, nc, addressingMode, filterMode, err.getPointer)
checkError(err.getValue)
def referenceCount = getIntInfo(CL_SAMPLER_REFERENCE_COUNT)
}
object Sampler {
val CL_ADDRESS_NONE = 0x1130
val CL_ADDRESS_CLAMP_TO_EDGE = 0x1131
val CL_ADDRESS_CLAMP = 0x1132
val CL_ADDRESS_REPEAT = 0x1133
val CL_SAMPLER_REFERENCE_COUNT = 0x1150
val CL_SAMPLER_CONTEXT = 0x1151
val CL_SAMPLER_NORMALIZED_COORDS = 0x1152
val CL_SAMPLER_ADDRESSING_MODE = 0x1153
val CL_SAMPLER_FILTER_MODE = 0x1154
val CL_FILTER_NEAREST = 0x1140
val CL_FILTER_LINEAR = 0x1141
}
| hsyl20/Scala_ViperVM | src/main/scala/org/vipervm/bindings/opencl/Sampler.scala | Scala | gpl-3.0 | 2,248 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
import kafka.utils._
import kafka.message._
import org.junit._
import org.junit.Assert._
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import org.junit.runners.Parameterized.Parameters
import org.apache.kafka.common.record.{CompressionType, MemoryRecords, RecordBatch, SimpleRecord}
import org.apache.kafka.common.utils.Utils
import java.util.{Collection, Properties}
import kafka.server.{BrokerTopicStats, FetchLogEnd, LogDirFailureChannel}
import scala.collection.JavaConverters._
@RunWith(value = classOf[Parameterized])
class BrokerCompressionTest(messageCompression: String, brokerCompression: String) {
val tmpDir = TestUtils.tempDir()
val logDir = TestUtils.randomPartitionLogDir(tmpDir)
val time = new MockTime(0, 0)
val logConfig = LogConfig()
@After
def tearDown(): Unit = {
Utils.delete(tmpDir)
}
/**
* Test broker-side compression configuration
*/
@Test
def testBrokerSideCompression(): Unit = {
val messageCompressionCode = CompressionCodec.getCompressionCodec(messageCompression)
val logProps = new Properties()
logProps.put(LogConfig.CompressionTypeProp, brokerCompression)
/*configure broker-side compression */
val log = Log(logDir, LogConfig(logProps), logStartOffset = 0L, recoveryPoint = 0L, scheduler = time.scheduler,
time = time, brokerTopicStats = new BrokerTopicStats, maxProducerIdExpirationMs = 60 * 60 * 1000,
producerIdExpirationCheckIntervalMs = LogManager.ProducerIdExpirationCheckIntervalMs,
logDirFailureChannel = new LogDirFailureChannel(10))
/* append two messages */
log.appendAsLeader(MemoryRecords.withRecords(CompressionType.forId(messageCompressionCode.codec), 0,
new SimpleRecord("hello".getBytes), new SimpleRecord("there".getBytes)), leaderEpoch = 0)
def readBatch(offset: Int): RecordBatch = {
val fetchInfo = log.read(offset,
maxLength = 4096,
isolation = FetchLogEnd,
minOneMessage = true)
fetchInfo.records.batches.iterator.next()
}
if (!brokerCompression.equals("producer")) {
val brokerCompressionCode = BrokerCompressionCodec.getCompressionCodec(brokerCompression)
assertEquals("Compression at offset 0 should produce " + brokerCompressionCode.name, brokerCompressionCode.codec, readBatch(0).compressionType.id)
}
else
assertEquals("Compression at offset 0 should produce " + messageCompressionCode.name, messageCompressionCode.codec, readBatch(0).compressionType.id)
}
}
object BrokerCompressionTest {
@Parameters
def parameters: Collection[Array[String]] = {
(for (brokerCompression <- BrokerCompressionCodec.brokerCompressionOptions;
messageCompression <- CompressionType.values
) yield Array(messageCompression.name, brokerCompression)).asJava
}
}
| noslowerdna/kafka | core/src/test/scala/unit/kafka/log/BrokerCompressionTest.scala | Scala | apache-2.0 | 3,646 |
package com.webtrends.harness.component.kafka.mock
import java.io.File
import kafka.server.KafkaConfig
import kafka.server.KafkaServerStartable
import java.util.Properties
class KafkaLocal {
val kafkaProperties: Properties = new Properties()
kafkaProperties.load(getClass.getResourceAsStream("/kafkaLocal.properties"))
val kafkaConfig = new KafkaConfig(kafkaProperties)
val logDirs = kafkaProperties.getProperty("log.dirs")
new File(logDirs + "/local-topic")
//start local zookeeper
println("Starting local zookeeper...")
val zookeeper = new ZookeeperLocal()
//start local kafka broker
println("Starting local kafka broker...")
val kafka = KafkaServerStartable.fromProps(kafkaProperties)
kafka.startup()
println("Local Kafka Up, Ready to Mock")
def stop() = { //stop kafka broker
System.out.println("stopping kafka...")
kafka.shutdown()
System.out.println("done")
}
}
| malibuworkcrew/wookiee-kafka | src/test/scala/com/webtrends/harness/component/kafka/mock/KafkaLocal.scala | Scala | apache-2.0 | 915 |
/*
* This file is part of Evo2DSim.
*
* Evo2DSim is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Evo2DSim is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Evo2DSim. If not, see <http://www.gnu.org/licenses/>.
*/
package org.vastness.evo2dsim.core.environment.mixins.settings
import org.jbox2d.common.Vec2
import org.vastness.evo2dsim.core.simulator.Simulator.Flags
trait DefaultSettings extends Settings{
override val origin = new Vec2(1.515f,1.515f)
override val halfSize = 1.5f
override def spawnSize = halfSize*0.8f
override val foodRadius: Float = 0.17f
override def foodOffset: Float = 2f*foodRadius
override def activationRange: Float = foodRadius * 1.3f
override def smellRange: Float = activationRange * 1.3f
override def artificialSmellMemory: Boolean = false
override def simFlags = Flags()
override def agentLimit = Int.MaxValue
}
| vchuravy/Evo2DSim | core/src/main/scala/org/vastness/evo2dsim/core/environment/mixins/settings/DefaultSettings.scala | Scala | mit | 1,354 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.spark.serialization
import java.beans.Introspector
import java.lang.reflect.Method
import scala.collection.mutable.HashMap
import scala.reflect.runtime.universe._
import org.apache.commons.logging.LogFactory
private[spark] object ReflectionUtils {
val caseClassCache = new HashMap[Class[_], (Boolean, Iterable[String])]
val javaBeanCache = new HashMap[Class[_], Array[(String, Method)]]
//SI-6240
protected[spark] object ReflectionLock
private def checkCaseClass(clazz: Class[_]): Boolean = {
ReflectionLock.synchronized {
// reliable case class identifier only happens through class symbols...
runtimeMirror(clazz.getClassLoader()).classSymbol(clazz).isCaseClass
}
}
private def doGetCaseClassInfo(clazz: Class[_]): Iterable[String] = {
ReflectionLock.synchronized {
val t = runtimeMirror(clazz.getClassLoader()).classSymbol(clazz).toType
val decls = try {
t.getClass.getMethod("decls")
} catch {
case _: Throwable => t.getClass.getMethod("declarations")
}
val scopes : Iterable[Symbol] = decls.invoke(t).asInstanceOf[Iterable[Symbol]]
scopes.collect {
case m: MethodSymbol if m.isCaseAccessor => m.name.toString()
}
}
}
private def isCaseClassInsideACompanionModule(clazz: Class[_], arity: Int): Boolean = {
if (!classOf[Serializable].isAssignableFrom(clazz)) {
false
}
// check 'copy' synthetic methods - they are public so go with getMethods
val copyMethods = clazz.getMethods.collect {
case m: Method if m.getName.startsWith("copy$default$") => m.getName
}
arity == copyMethods.length
}
// TODO: this is a hack since we expect the field declaration order to be according to the source but there's no guarantee
private def caseClassInfoInsideACompanionModule(clazz: Class[_], arity: Int): Iterable[String] = {
// fields are private so use the 'declared' variant
var counter: Int = 0
clazz.getDeclaredFields.collect {
case field if (counter < arity) => counter += 1; field.getName
}
}
private def doGetCaseClassValues(target: AnyRef, props: Iterable[String]) = {
val product = target.asInstanceOf[Product].productIterator
val tuples = for (y <- props) yield (y, product.next)
tuples.toMap
}
private def checkCaseClassCache(p: Product) = {
caseClassCache.getOrElseUpdate(p.getClass, {
var isCaseClazz = checkCaseClass(p.getClass)
var info = if (isCaseClazz) doGetCaseClassInfo(p.getClass) else null
if (!isCaseClazz) {
isCaseClazz = isCaseClassInsideACompanionModule(p.getClass, p.productArity)
if (isCaseClazz) {
// Todo: Fix this logger usage
LogFactory.getLog(classOf[ScalaValueWriter]).warn(
String.format("[%s] is detected as a case class in Java but not in Scala and thus " +
"its properties might be detected incorrectly - make sure the @ScalaSignature is available within the class bytecode " +
"and/or consider moving the case class from its companion object/module", p.getClass))
}
info = if (isCaseClazz) caseClassInfoInsideACompanionModule(p.getClass(), p.productArity) else null
}
(isCaseClazz, info)
})
}
def isCaseClass(p: Product) = {
checkCaseClassCache(p)._1
}
def caseClassValues(p: Product) = {
doGetCaseClassValues(p.asInstanceOf[AnyRef], checkCaseClassCache(p)._2)
}
private def checkJavaBeansCache(o: Any) = {
javaBeanCache.getOrElseUpdate(o.getClass, {
javaBeansInfo(o.getClass)
})
}
def isJavaBean(value: Any) = {
!checkJavaBeansCache(value).isEmpty
}
def javaBeanAsMap(value: Any) = {
javaBeansValues(value, checkJavaBeansCache(value))
}
private def javaBeansInfo(clazz: Class[_]) = {
Introspector.getBeanInfo(clazz).getPropertyDescriptors().collect {
case pd if (pd.getName != "class" && pd.getReadMethod() != null) => (pd.getName, pd.getReadMethod)
}.sortBy(_._1)
}
private def javaBeansValues(target: Any, info: Array[(String, Method)]) = {
info.map(in => (in._1, in._2.invoke(target))).toMap
}
}
| elastic/elasticsearch-hadoop | spark/core/src/main/scala/org/elasticsearch/spark/serialization/ReflectionUtils.scala | Scala | apache-2.0 | 4,979 |
/*
* Copyright (C) 2017 LREN CHUV for Human Brain Project
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package ch.chuv.lren.woken.core.model.jobs
import ch.chuv.lren.woken.messages.query.MiningQuery
import ch.chuv.lren.woken.messages.query.filters.FilterRule
import ch.chuv.lren.woken.messages.variables.VariableMetaData
import ch.chuv.lren.woken.service.FeaturesTableService
import scala.language.higherKinds
/**
* A validation job will trigger validation of a PFA model
*/
case class ValidationJob[F[_]](
override val jobId: String,
featuresTableService: FeaturesTableService[F],
query: MiningQuery,
metadata: List[VariableMetaData]
) extends Job[MiningQuery] {
override def filters: Option[FilterRule] = query.filters
}
object ValidationJob {
val algorithmCode = "_validation_"
}
| LREN-CHUV/workflow | src/main/scala/ch/chuv/lren/woken/core/model/jobs/ValidationJob.scala | Scala | apache-2.0 | 1,445 |
package looty
package views
import looty.views.loot.{Filters, Containers, Columns}
import org.scalajs.jquery.{JQueryEventObject, JQuery}
import scala.scalajs.js
//////////////////////////////////////////////////////////////
// Copyright (c) 2014 Ben Jackman, Jeff Gomberg
// All Rights Reserved
// please contact ben@jackman.biz or jeff@cgtanalytics.com
// for licensing inquiries
// Created by bjackman @ 8/24/14 6:35 PM
//////////////////////////////////////////////////////////////
class LoadSavePane(columns: Columns, containers: Containers, filters: Filters) {
def start(): JQuery = {
//Load / Save Stuff
val loadSaveDiv = jq("""<div title="Views are customizable groups of columns" class="load-save"></div>""")
val loadDiv = jq("<div></div>")
val loadBtn = jq("""<a href="javascript:void(0)" class="ls-btn" title="Loads the view with the name">Load</a>""")
val saveBtn = jq("""<a href="javascript:void(0)" class="ls-btn" title="Saves the currently visible columns as a view">Save</a>""")
val deleteBtn = jq("""<a href="javascript:void(0)" class="ls-btn del" title="Deletes the current view">Delete</a>""")
val saveWithFiltersBtn = jq("""<a href="javascript:void(0)" class="ls-btn" title="Saves the currently visible columns as well as any filters that are currently active">Save+Filters</a>""")
loadSaveDiv.append("""<span title="Views are customizable groups of columns" class="view-span">Views: </span>""")
loadSaveDiv.append(loadDiv)
loadSaveDiv.append(loadBtn)
loadSaveDiv.append(saveBtn)
loadSaveDiv.append(saveWithFiltersBtn)
loadSaveDiv.append(deleteBtn)
val O = js.Dynamic.literal
val loadSel = loadDiv.asJsDyn.select2(O(
width = 220,
placeholder = "Name Selected Columns",
query = { (q: js.Dynamic) =>
val names = LootViewSaver.getAllNames
val term = q.term.asInstanceOf[String]
val create = if (term.nonEmpty && names.forall(_.toLowerCase != term.toLowerCase)) {
List(O(id = term, text = s"New: $term"))
} else {
Nil
}
val vs = (create ++ names.filter(_.toLowerCase.startsWith(term.toLowerCase)).map(n => O(id = n, text = n))).toJsArr
q.callback(O(results = vs))
}: js.Function
))
loadBtn.on("click", () => {
val name = loadSel.`val`().asInstanceOf[String]
if (name != null && name.nonEmpty) {
LootViewSaver.load(name)(colId => columns.get(colId)) foreach {
case (cols, colFilters, conIds) =>
columns.all.foreach(_.hide())
cols.foreach(_.show())
colFilters.foreach { colFilters =>
filters.clearColumnFilters()
colFilters.foreach { colFilter =>
filters.addColFilter(colFilter)
}
filters.refresh()
}
conIds.foreach { conIds =>
containers.all.foreach(_.hide())
conIds.foreach { conId =>
containers.get(conId).foreach(_.show())
}
filters.refresh()
}
Alerter.info(s"Loaded view: $name")
}
false
}
})
saveBtn.on("click", () => {
val name = loadSel.`val`().asInstanceOf[String]
if (name != null && name.nonEmpty) {
LootViewSaver.save(name, columns.visible, None, None)
Alerter.info(s"Saved view: $name")
}
false
})
saveWithFiltersBtn.on("click", () => {
val name = loadSel.`val`().asInstanceOf[String]
if (name != null && name.nonEmpty) {
LootViewSaver.save(
name,
columns.visible,
columnFilters = Some(filters.columnFilters.values.toVector),
containerFilters = Some(filters.containerFilters.toVector)
)
Alerter.info(s"Saved view: $name")
}
false
})
deleteBtn.on("click", () => {
val name = loadSel.`val`().asInstanceOf[String]
if (name != null && name.nonEmpty) {
LootViewSaver.delete(name)
Alerter.info(s"Deleted view: $name")
}
false
})
loadSaveDiv
}
}
| benjaminjackman/looty | looty/src/main/scala/looty/views/LoadSavePane.scala | Scala | gpl-2.0 | 4,132 |
/*
* Copyright 2012-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package laika.render
import laika.ast._
/** Default renderer implementation for the Formatted AST output format.
*
* @author Jens Halm
*/
object ASTRenderer extends ((TextFormatter, Element) => String) {
/** The maximum width of a single text element.
* For any text that exceeds this limit only the beginning
* and end of the line will be displayed up to the maximum
* number of characters allowed. This increases readability
* for the majority of cases where primarily the document
* structure is relevant.
*/
val maxTextWidth = 50
private case class Content (content: Seq[Element], desc: String, options: Options = NoOpt) extends Element with ElementContainer[Element] {
type Self = Content
def withOptions(options: Options): Content = copy(options = options)
}
def apply (fmt: TextFormatter, element: Element): String = {
object NoRef
def options (opt: Options): String = {
List(
opt.id map ("Id("+_+")"),
if (opt.styles.isEmpty) None else Some(opt.styles.mkString("Styles(",",",")"))
) filter (_.isDefined) map (_.get) mkString " + "
}
def attributes (attr: Iterator[Any], exclude: AnyRef = NoRef): String = {
def prep (value: Any) = value match { case opt: Options => options(opt); case other => other }
val it = attr.asInstanceOf[Iterator[AnyRef]]
val res = it
.filter(_ ne exclude)
.filter(_ != NoOpt)
.map(prep)
.mkString ("(", ",", ")")
if (res == "()") "" else res
}
def elementContainerDesc (con: ElementContainer[Element], elementType: String): String = {
val (elements, rest) = con.productIterator partition (_.isInstanceOf[Element])
val prefix = con.productPrefix + attributes(rest, con.content)
val contentDesc = s" - $elementType: ${con.content.length.toString}"
if (elements.nonEmpty) prefix + fmt.indentedChildren(elements.toList.asInstanceOf[Seq[Element]] ++
List(Content(con.content, "Content" + contentDesc)))
else prefix + contentDesc + fmt.indentedChildren(con.content)
}
def textContainerDesc (con: TextContainer): String = {
val start = con match {
case CodeSpan(_, categories, _) =>
val props = if (categories.isEmpty) ""
else categories.toSeq.map(_.name).sorted.mkString("(", ", ", ")")
s"CodeSpan$props - '"
case _ =>
con.productPrefix + attributes(con.productIterator, con.content) + " - '"
}
val text = con.content.replace("\\n", "|")
val len = text.length
if (len <= maxTextWidth) start + text + "'"
else start + text.substring(0, maxTextWidth / 2) + " [...] " + text.substring(len - maxTextWidth / 2) + "'"
}
def renderElement (e: Element): String = {
val (elements, rest) = e.productIterator partition (_.isInstanceOf[Element])
e.productPrefix + attributes(rest) + fmt.indentedChildren(elements.toList.asInstanceOf[Seq[Element]])
}
def lists (desc: String, lists: (Seq[Element], String)*): String =
desc + fmt.indentedChildren(lists map { case (elems, d) => Content(elems, d + elems.length) })
element match {
case QuotedBlock(content,attr,_) => lists("QuotedBlock", (content, "Content - Blocks: "), (attr, "Attribution - Spans: "))
case DefinitionListItem(term,defn,_)=> lists("Item", (term, "Term - Spans: "), (defn, "Definition - Blocks: "))
case SectionNumber(pos, opt) => "SectionNumber" + attributes(Seq(pos.mkString("."), opt).iterator)
case bc: BlockContainer => elementContainerDesc(bc, "Blocks")
case sc: SpanContainer => elementContainerDesc(sc, "Spans")
case tsc: TemplateSpanContainer => elementContainerDesc(tsc, "TemplateSpans")
case tc: TextContainer => textContainerDesc(tc)
case Content(content,desc,_) => desc + fmt.indentedChildren(content)
case ec: ElementContainer[_] => elementContainerDesc(ec, "Elements")
case e => renderElement(e)
}
}
}
| planet42/Laika | core/shared/src/main/scala/laika/render/ASTRenderer.scala | Scala | apache-2.0 | 4,762 |
package quizleague.domain
import java.time.Year
case class Season(
id:String,
startYear:Year,
endYear:Year,
text:Ref[Text],
calendar:List[CalendarEvent],
retired:Boolean = false
) extends Entity | gumdrop/quizleague-maintain | shared/src/main/scala/quizleague/domain/Season.scala | Scala | mit | 220 |
package sri.universal.components
import chandu0101.macros.tojs.JSMacro
import sri.core.{React, ReactNode}
import sri.universal.ReactUniversal
import scala.scalajs.js
case class Modal(visible: js.UndefOr[Boolean] = js.undefined,
style: js.UndefOr[js.Any] = js.undefined,
animationType: js.UndefOr[ModalAnimationType] = js.undefined,
onDismiss: js.UndefOr[() => _] = js.undefined,
ref: js.UndefOr[ModalM => _] = js.undefined,
key: js.UndefOr[String] = js.undefined,
transparent: js.UndefOr[Boolean] = js.undefined) {
def apply(children: ReactNode*) = {
val props = JSMacro[Modal](this)
React.createElement(ReactUniversal.Modal,props,children: _*)
}
}
//mounted Modal methods/vars
@js.native
trait ModalM extends js.Object
class ModalAnimationType private(val value : String) extends AnyVal
object ModalAnimationType {
val SLIDE = new ModalAnimationType("slide")
val FADE = new ModalAnimationType("fade")
val NONE = new ModalAnimationType("none")
} | chandu0101/sri | universal/src/main/scala/sri/universal/components/Modal.scala | Scala | apache-2.0 | 1,075 |
package com.landoop.streamreactor.connect.hive.parquet
import com.typesafe.scalalogging.slf4j.StrictLogging
import org.apache.kafka.connect.data.{Schema, Struct}
import org.apache.parquet.io.api.{Converter, GroupConverter}
import scala.collection.JavaConverters._
class RootGroupConverter(schema: Schema) extends GroupConverter with StrictLogging {
require(schema.`type`() == Schema.Type.STRUCT)
var struct: Struct = _
private val builder = scala.collection.mutable.Map.empty[String, Any]
private val converters = schema.fields.asScala.map(Converters.get(_, builder)).toIndexedSeq
override def getConverter(k: Int): Converter = converters(k)
override def start(): Unit = builder.clear()
override def end(): Unit = struct = {
val struct = new Struct(schema)
schema.fields.asScala.map { field =>
val value = builder.getOrElse(field.name, null)
try {
struct.put(field, value)
} catch {
case t: Exception =>
throw t
}
}
struct
}
}
| CodeSmell/stream-reactor | kafka-connect-hive/connector/src/main/scala/com/landoop/streamreactor/connect/hive/parquet/RootGroupConverter.scala | Scala | apache-2.0 | 1,013 |
/*******************************************************************
* See the NOTICE file distributed with this work for additional *
* information regarding Copyright ownership. The author and/or *
* authors license this file to you under the terms of the Apache *
* License Version 2.0 (the "License"); you may not use this file *
* except in compliance with the License. You may obtain a copy *
* of the License at: *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, *
* software distributed under the License is distributed on an *
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, *
* either express or implied. See the License for the specific *
* language governing permissions and limitations under the *
* License. *
*******************************************************************/
package org.scalatra.scalate.sbt
import sbt._
import plugins.JvmPlugin
object SbtScalate extends AutoPlugin {
override val requires: Plugins = JvmPlugin
override val trigger: PluginTrigger = noTrigger
val autoImport = ScalateImport
override val projectSettings: Seq[Setting[_]] = ScalateDefaults.scalateSettings
}
| arashi01/sbt-scalate | src/main/scala/org/scalatra/scalate/sbt/SbtScalate.scala | Scala | apache-2.0 | 1,504 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.scaladsl.server
import java.util.concurrent.atomic.AtomicInteger
import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.Materializer
import com.lightbend.lagom.internal.scaladsl.server.ScaladslServiceRouter
import com.lightbend.lagom.scaladsl.api.transport._
import com.lightbend.lagom.scaladsl.api.Service
import com.lightbend.lagom.scaladsl.api.ServiceCall
import com.lightbend.lagom.scaladsl.server.mocks._
import com.lightbend.lagom.scaladsl.server.testkit.FakeRequest
import org.scalatest.AsyncFlatSpec
import org.scalatest.BeforeAndAfterAll
import org.scalatest.Matchers
import play.api.http.HttpConfiguration
import play.api.mvc
import play.api.mvc.Handler
import play.api.mvc.PlayBodyParsers
import scala.concurrent.Await
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.concurrent.duration._
/**
* This test relies on DefaultExceptionSerializer so in case of failure some information is lost on de/ser. Check the
* status code of the response (won't be 200) and locate the suspect line of code where that status code is launched.
*/
class ScaladslStrictServiceRouterSpec extends AsyncFlatSpec with Matchers with BeforeAndAfterAll {
private val system = ActorSystem("ScaladslServiceRouterSpec")
private implicit val ec: ExecutionContext = system.dispatcher
private implicit val mat: Materializer = ActorMaterializer.create(system)
protected override def afterAll(): Unit = {
Await.ready(system.terminate(), 10.seconds)
super.afterAll()
}
behavior.of("ScaladslServiceRouter")
it should "serve a non-filtered Strict request" in {
// this test is canary
val hardcodedResponse = "a response"
val service = new SimpleStrictService {
override def simpleGet(): ServiceCall[NotUsed, String] = ServiceCall { _ =>
Future.successful(hardcodedResponse)
}
}
val x: mvc.EssentialAction => mvc.RequestHeader => Future[mvc.Result] = { (action) => (rh) =>
action(rh).run()
}
runRequest(service)(x) {
_ should be(mvc.Results.Ok(hardcodedResponse))
}
}
it should "propagate headers altered by a Play Filter down to the ServiceImpl. [String message]" in {
// this test makes sure headers in request and response are added and they are added in the appropriate order.
// This test only uses Play filters.
val atomicInt = new AtomicInteger(0)
val hardcodedResponse = "a response"
val service = new SimpleStrictService {
override def simpleGet(): ServerServiceCall[NotUsed, String] = ServerServiceCall { (reqHeader, _) =>
Future {
reqHeader.getHeader(VerboseHeaderPlayFilter.addedOnRequest) should be(Some("1"))
}.recoverWith {
case t => Future.failed(BadRequest(s"Assertion failed: ${t.getMessage}"))
}
.map { _ =>
(ResponseHeader.Ok.withHeader("in-service", atomicInt.incrementAndGet().toString), hardcodedResponse)
}
}
}
val x: mvc.EssentialAction => mvc.RequestHeader => Future[mvc.Result] = { (action) =>
new VerboseHeaderPlayFilter(atomicInt, mat).apply(rh => action(rh).run())
}
runRequest(service)(x) {
_ should be(
mvc.Results
.Ok(hardcodedResponse)
.withHeaders(
("in-service", "2"),
(VerboseHeaderPlayFilter.addedOnResponse, "3")
)
)
}
}
it should "propagate headers altered by a Play Filter and a Lagom HeaderFilter down to the ServiceImpl (invoking Play Filter first). [String message]" in {
// this test makes sure headers in request and response are added and they are added in the appropriate order.
val atomicInt = new AtomicInteger(0)
val hardcodedResponse = "a response"
val service = new FilteredStrictService(atomicInt) {
override def simpleGet(): ServerServiceCall[NotUsed, String] = ServerServiceCall { (reqHeader, _) =>
Future {
(
reqHeader.getHeader(VerboseHeaderPlayFilter.addedOnRequest),
reqHeader.getHeader(VerboseHeaderLagomFilter.addedOnRequest)
) should be(
(Some("1"), Some("2"))
)
// When this assertion fails, the AssertionException is mapped to a BadRequest but the
// exception serializer looses the exception message. Use the status code to locate the
// cause of failure.
// "1" and "2" are set on play filter and lagom filter respectively
}.recoverWith {
case t => Future.failed(BadRequest(s"Assertion failed: ${t.getMessage}"))
}
.map { _ =>
// if both headers are present, OK is returned with a new header from the service.
// the filters will add two more headers.
(ResponseHeader.Ok.withHeader("in-service", atomicInt.incrementAndGet().toString), hardcodedResponse)
}
}
}
val x: mvc.EssentialAction => mvc.RequestHeader => Future[mvc.Result] = { (action) =>
new VerboseHeaderPlayFilter(atomicInt, mat).apply(rh => action(rh).run())
}
runRequest(service)(x) {
_ should be(
// when everything works as expected, the service receives 2 headers with values '1' and '2' and responds
// with three headers '3', '4' and '5'. In case of failure, some headers may still be added on the way out
// so make sure to check the status code on the response for more details on the cause of the error.
mvc.Results
.Ok(hardcodedResponse)
.withHeaders(
("in-service", "3"), // when this is missing it means the ServiceImpl code failed == missing request headers
(VerboseHeaderLagomFilter.addedOnResponse, "4"),
(VerboseHeaderPlayFilter.addedOnResponse, "5")
)
)
}
}
// ---------------------------------------------------------------------------------------------------
private def runRequest[T](
service: Service
)(x: mvc.EssentialAction => mvc.RequestHeader => Future[mvc.Result])(block: mvc.Result => T): Future[T] = {
val httpConfig = HttpConfiguration.createWithDefaults()
val parsers = PlayBodyParsers()
val router = new ScaladslServiceRouter(service.descriptor, service, httpConfig, parsers)
val req: mvc.Request[NotUsed] = new FakeRequest(method = "GET", path = PathProvider.PATH)
val handler = router.routes(req)
val futureResult: Future[mvc.Result] = Handler.applyStages(req, handler) match {
case (_, action: mvc.EssentialAction) => x(action)(req)
case _ => Future.failed(new AssertionError("Not an EssentialAction."))
}
futureResult.map(block)
}
}
| lagom/lagom | service/scaladsl/server/src/test/scala/com/lightbend/lagom/scaladsl/server/ScaladslStrictServiceRouterSpec.scala | Scala | apache-2.0 | 6,945 |
package se.gigurra.gpt.shmtransmitter
import se.culvertsoft.mnet.{Message, NodeSettings}
import se.culvertsoft.mnet.backend.WebsockBackendSettings
import se.culvertsoft.mnet.client.MNetClient
import se.gigurra.gpt.common.{NetworkNames, ReadConfigFile, SaveConfigFile, Serializer, SharedMemory}
import se.gigurra.gpt.model.shm.common.ShmMsg
import se.gigurra.gpt.model.shm.transmitter.ShmTransmitterCfg
import scala.collection.JavaConversions.{asScalaBuffer, bufferAsJavaList}
import scala.collection.mutable.{ArrayBuffer, HashMap}
object ShmTransmitter {
val readBuffers = new HashMap[SharedMemory, Array[Byte]]
val msgs = new HashMap[SharedMemory, Message]
def openShms(names: Seq[String]) = {
def tryOpenShm(name: String) = {
new SharedMemory(name, 0, false)
}
println(s"Opening ${names.length} shared memories:")
val shms = new ArrayBuffer[SharedMemory]()
for (s <- names) {
print(s"Opening '$s'...")
var shm = tryOpenShm(s)
while (!shm.valid) {
shm = tryOpenShm(s)
Thread.sleep(1000)
}
println(s"done! (${shm.size} bytes)")
shms += shm
readBuffers.put(shm, new Array[Byte](shm.size))
}
shms
}
def main(args: Array[String]) {
val cfgFileName = "gpt-shmtransmitter-cfg.json"
val cfg = ReadConfigFile[ShmTransmitterCfg](cfgFileName).getOrElse(new ShmTransmitterCfg)
SaveConfigFile(cfgFileName, cfg)
val nodeSettings = new NodeSettings().setName(NetworkNames.SHM_TRANSMITTER)
val wsSettings = new WebsockBackendSettings().unsetListenPort()
wsSettings.getConnectTo().addAll(cfg.getTargets.map(se.gigurra.gpt.common.NetworkAddr2Url.apply))
val client = new MNetClient(wsSettings, nodeSettings).start()
val shms = openShms(cfg.getShms)
while (true) {
// Create shm msgs
for (shm <- shms) {
val readBuf = readBuffers(shm)
shm.read(readBuf, readBuf.length)
val msg = Serializer.writeBinary(new ShmMsg()
.setData(readBuffers(shm))
.setName(shm.name)
.setSize(shm.size))
.setSenderId(client.id)
msgs.put(shm, msg)
}
// Send shms
for (route <- client.getRoutes) {
if (route.isConnected && !route.hasBufferedData && route.name == NetworkNames.SHM_RECEIVER) {
for (shm <- shms) {
route.send(msgs(shm).setTargetId(route.endpointId))
}
}
}
Thread.sleep(15)
}
}
}
| GiGurra/gpt | gpt-shmtransmitter/src/main/scala/se/gigurra/gpt/shmtransmitter/ShmTransmitter.scala | Scala | gpl-2.0 | 2,477 |
package io.github.tailhq.dynaml.modelpipe
import breeze.linalg.{DenseMatrix, DenseVector}
import breeze.stats.distributions.{ContinuousDistr, Moments}
import io.github.tailhq.dynaml.algebra.{PartitionedPSDMatrix, PartitionedVector}
import io.github.tailhq.dynaml.models.gp.AbstractGPRegressionModel
import io.github.tailhq.dynaml.models.stp.{AbstractSTPRegressionModel, MVStudentsTModel}
import io.github.tailhq.dynaml.models.{
ContinuousProcessModel, GenContinuousMixtureModel,
SecondOrderProcessModel, StochasticProcessMixtureModel}
import io.github.tailhq.dynaml.optimization.GloballyOptimizable
import io.github.tailhq.dynaml.pipes.DataPipe2
import io.github.tailhq.dynaml.probability.{ContinuousRVWithDistr, MatrixTRV, MultGaussianPRV, MultStudentsTPRV}
import io.github.tailhq.dynaml.probability.distributions.{
BlockedMultiVariateGaussian, BlockedMultivariateStudentsT,
HasErrorBars, MatrixT}
import scala.reflect.ClassTag
/**
* Mixture Pipe takes a sequence of stochastic process models
* and associated probability weights and returns a mixture model.
* @author tailhq date 22/06/2017.
* */
abstract class MixturePipe[
T, I: ClassTag, Y, YDomain, YDomainVar,
BaseDistr <: ContinuousDistr[YDomain]
with Moments[YDomain, YDomainVar]
with HasErrorBars[YDomain],
W1 <: ContinuousRVWithDistr[YDomain, BaseDistr],
BaseProcess <: ContinuousProcessModel[T, I, Y, W1]
with SecondOrderProcessModel[T, I, Y, Double, DenseMatrix[Double], W1]
with GloballyOptimizable] extends
DataPipe2[Seq[BaseProcess], DenseVector[Double],
GenContinuousMixtureModel[
T, I, Y, YDomain, YDomainVar,
BaseDistr, W1, BaseProcess]]
class GPMixturePipe[T, I: ClassTag] extends
MixturePipe[T, I, Double, PartitionedVector, PartitionedPSDMatrix,
BlockedMultiVariateGaussian, MultGaussianPRV,
AbstractGPRegressionModel[T, I]] {
override def run(
models: Seq[AbstractGPRegressionModel[T, I]],
weights: DenseVector[Double]) =
StochasticProcessMixtureModel(models, weights)
}
class StudentTMixturePipe[T, I: ClassTag] extends
MixturePipe[T, I, Double, PartitionedVector, PartitionedPSDMatrix,
BlockedMultivariateStudentsT, MultStudentsTPRV,
AbstractSTPRegressionModel[T, I]] {
override def run(
models: Seq[AbstractSTPRegressionModel[T, I]],
weights: DenseVector[Double]) =
StochasticProcessMixtureModel(models, weights)
}
class MVStudentsTMixturePipe[T, I: ClassTag] extends
MixturePipe[
T, I, DenseVector[Double], DenseMatrix[Double],
(DenseMatrix[Double], DenseMatrix[Double]),
MatrixT, MatrixTRV,
MVStudentsTModel[T, I]] {
override def run(
models: Seq[MVStudentsTModel[T, I]],
weights: DenseVector[Double]) =
StochasticProcessMixtureModel(models, weights)
}
| mandar2812/DynaML | dynaml-core/src/main/scala/io/github/tailhq/dynaml/modelpipe/MixturePipe.scala | Scala | apache-2.0 | 2,756 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.kafka
import java.io.Closeable
import kafka.server.KafkaConfig
import kafka.utils.TestUtils
import kafka.zk.EmbeddedZookeeper
import org.apache.kafka.common.network.ListenerName
import org.locationtech.geomesa.utils.io.PathUtils
class EmbeddedKafka extends Closeable {
private val zookeeper = new EmbeddedZookeeper()
val zookeepers = s"127.0.0.1:${zookeeper.port}"
private val logs = TestUtils.tempDir()
private val server = {
val config = TestUtils.createBrokerConfig(1, zookeepers)
config.setProperty("offsets.topic.num.partitions", "1")
config.setProperty("listeners", s"PLAINTEXT://127.0.0.1:${TestUtils.RandomPort}")
config.setProperty("log.dirs", logs.getAbsolutePath)
config.setProperty("delete.topic.enable", "true")
TestUtils.createServer(new KafkaConfig(config))
}
val brokers = s"127.0.0.1:${server.boundPort(ListenerName.normalised("PLAINTEXT"))}"
// for kafka 1.0.0:
// import org.apache.kafka.common.network.ListenerName
// import org.apache.kafka.common.security.auth.SecurityProtocol
// val brokers = s"127.0.0.1:${server.socketServer.boundPort(ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT))}"
override def close(): Unit = {
try { server.shutdown() } catch { case _: Throwable => }
try { zookeeper.shutdown() } catch { case _: Throwable => }
PathUtils.deleteRecursively(logs.toPath)
}
}
| aheyne/geomesa | geomesa-kafka/geomesa-kafka-datastore/src/test/scala/org/locationtech/geomesa/kafka/EmbeddedKafka.scala | Scala | apache-2.0 | 1,885 |
package com.outr.iconsole
import io.youi.app.{CommunicationManager, YouIApplication}
trait IConsoleApplication extends YouIApplication {
val iConsoleCommunication: CommunicationManager[IConsoleCommunication] = connectivity.communication[IConsoleCommunication]
} | outr/iconsole | console/shared/src/main/scala/com/outr/iconsole/IConsoleApplication.scala | Scala | apache-2.0 | 265 |
package sims.test.gui
import processing.core.PApplet
import scala.collection.mutable.ArrayBuffer
import sims.math._
import sims.test.gui.scenes._
import sims.test.gui.RichShape._
import sims.test.gui.RichJoint._
class SceneManager(implicit top: PApplet) {
/* Contains objects that will be rendered on `draw()`. */
private var _graphicals = new ArrayBuffer[Graphical[_]]
def graphicals: Seq[Graphical[_]] = _graphicals
/* Current scene. */
private var _currentScene: Scene = EmptyScene
/* Get current scene. */
def currentScene = _currentScene
/* Set current scene. */
def currentScene_=(newScene: Scene) = {
// remove reactions
currentScene.deafTo(currentScene.world)
currentScene.reactions.clear()
// empty world
currentScene.world.clear()
// clear graphical objects
_graphicals.clear()
// custom exit behavior
currentScene.exit()
// add new reactions to create / remove graphical objects
newScene.listenTo(newScene.world)
newScene.reactions += {
case BodyAdded(newScene.world, body) => for (s <- body.shapes) _graphicals += s.toGraphical
case BodyRemoved(newScene.world, body) => for (s <- body.shapes) {
val index = _graphicals.findIndexOf((g: Graphical[_]) => g match {
case gs: GraphicalShape => gs.physical == s
case _ => false
})
_graphicals.remove(index)
}
case JointAdded(newScene.world, joint) => _graphicals += joint.toGraphical
case JointRemoved(newScene.world, joint) => {
val index = _graphicals.findIndexOf((g: Graphical[_]) => g match {
case gj: GraphicalJoint => gj.physical == joint
case _ => false
})
_graphicals.remove(index)
}
}
// custom initialization
newScene.init()
// set current scene
_currentScene = newScene
println("set scene to '" + currentScene.name + "'")
}
private var currentSceneIndex = 0
val scenes = List(
BasicScene,
CollisionScene,
LongCollisionScene,
CloudScene,
PyramidScene,
ShiftedStackScene,
JointScene
)
def nextScene() = {
currentSceneIndex += 1
currentScene = scenes(mod(currentSceneIndex, scenes.length))
}
def previousScene() = {
currentSceneIndex -= 1
currentScene = scenes(mod(currentSceneIndex, scenes.length))
}
def restartScene() = {
currentScene = currentScene
}
} | jodersky/sims2 | src/test/scala/sims/test/gui/SceneManager.scala | Scala | bsd-3-clause | 2,322 |
package io.scalac.amqp
sealed trait Confirm
final case class Ack(tag: DeliveryTag) extends Confirm
final case class Reject(tag: DeliveryTag) extends Confirm
final case class Requeue(tag: DeliveryTag) extends Confirm | ScalaConsultants/reactive-rabbit | src/main/scala/io/scalac/amqp/Confirm.scala | Scala | apache-2.0 | 219 |
/*
* The MIT License
*
* Copyright (c) 2016 Fulcrum Genomics LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package dagr.tasks.gatk
import dagr.tasks.DagrDef
import DagrDef._
import scala.collection.mutable.ListBuffer
object GenotypeGvcfs {
/** Constructs a GenotypeGvcfs for genotyping multiple samples concurrently. */
def apply(ref: PathToFasta, intervals: Option[PathToIntervals], gvcfs: Seq[PathToVcf], vcf: PathToVcf, dbSnpVcf: Option[PathToVcf]) : GenotypeGvcfs = {
new GenotypeGvcfs(ref, intervals, gvcfs, vcf, dbSnpVcf)
}
/** Constructs a GenotypeGvcfs for genotyping a single sample. */
def apply(ref: PathToFasta, intervals: Option[PathToIntervals], gvcf: PathToVcf, vcf: PathToVcf, dbSnpVcf: Option[PathToVcf]) : GenotypeGvcfs = {
new GenotypeGvcfs(ref, intervals, Seq(gvcf), vcf, dbSnpVcf)
}
}
/** Genotypes one or more GVCFs concurrently. */
class GenotypeGvcfs private (ref: PathToFasta,
intervals: Option[PathToIntervals],
val gvcfs: Seq[PathToVcf],
val vcf: PathToVcf,
val dbSnpVcf: Option[PathToVcf] = None)
extends GatkTask("GenotypeGVCFs", ref, intervals=intervals) {
require(gvcfs.length == 1 || gatkMajorVersion < 4, "GenotypeGVCFs only supports one GVCF at a time with GATK version 4+.")
override protected def addWalkerArgs(buffer: ListBuffer[Any]): Unit = {
// Args that are common to all versions
dbSnpVcf.foreach(v => buffer.append("--dbsnp", v.toAbsolutePath))
gvcfs.foreach(gvcf => buffer.append("-V", gvcf.toAbsolutePath))
gatkMajorVersion match {
case n if n < 4 =>
buffer.append("--out", vcf.toAbsolutePath)
case n if n >= 4 =>
buffer.append("--output", vcf.toAbsolutePath)
}
}
}
| fulcrumgenomics/dagr | tasks/src/main/scala/dagr/tasks/gatk/GenotypeGvcfs.scala | Scala | mit | 2,854 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3.pipes
import org.neo4j.cypher.internal.compiler.v2_3.spi.{Operations, QueryContext}
import org.neo4j.cypher.internal.frontend.v2_3.test_helpers.CypherFunSuite
import org.neo4j.graphdb.{Node, Relationship}
import org.mockito.Mockito
import org.neo4j.cypher.internal.compiler.v2_3.commands.expressions.{Collection, Literal}
import org.neo4j.cypher.internal.compiler.v2_3.ExecutionContext
class DirectedDirectedRelationshipByIdSeekPipeTest extends CypherFunSuite {
implicit val monitor = mock[PipeMonitor]
import Mockito.when
test("should seek relationship by id") {
// given
val (startNode, rel, endNode) = getRelWithNodes
val relOps = when(mock[Operations[Relationship]].getById(17)).thenReturn(rel).getMock[Operations[Relationship]]
val to = "to"
val from = "from"
val queryState = QueryStateHelper.emptyWith(
query = when(mock[QueryContext].relationshipOps).thenReturn(relOps).getMock[QueryContext]
)
// when
val result: Iterator[ExecutionContext] =
DirectedRelationshipByIdSeekPipe("a", SingleSeekArg(Literal(17)), to, from)().createResults(queryState)
// then
result.toList should equal(List(Map("a" -> rel, "to" -> endNode, "from" -> startNode)))
}
test("should seek relationships by multiple ids") {
// given
val (s1, r1, e1) = getRelWithNodes
val (s2, r2, e2) = getRelWithNodes
val relationshipOps = mock[Operations[Relationship]]
val to = "to"
val from = "from"
when(relationshipOps.getById(42)).thenReturn(r1)
when(relationshipOps.getById(21)).thenReturn(r2)
val queryState = QueryStateHelper.emptyWith(
query = when(mock[QueryContext].relationshipOps).thenReturn(relationshipOps).getMock[QueryContext]
)
val relName = "a"
// whens
val result =
DirectedRelationshipByIdSeekPipe(relName, ManySeekArgs(Collection(Literal(42), Literal(21))), to, from)().createResults(queryState)
// then
result.toList should equal(List(
Map(relName -> r1, to -> e1, from -> s1),
Map(relName -> r2, to -> e2, from -> s2)
))
}
test("handle null") {
// given
val to = "to"
val from = "from"
val relationshipOps = mock[Operations[Relationship]]
val queryState = QueryStateHelper.emptyWith(
query = when(mock[QueryContext].relationshipOps).thenReturn(relationshipOps).getMock[QueryContext]
)
// when
val result: Iterator[ExecutionContext] =
DirectedRelationshipByIdSeekPipe("a", SingleSeekArg(Literal(null)), to, from)().createResults(queryState)
// then
result.toList should be(empty)
}
private def getRelWithNodes:(Node,Relationship,Node) = {
val rel = mock[Relationship]
val startNode = mock[Node]
val endNode = mock[Node]
when(rel.getStartNode).thenReturn(startNode)
when(rel.getEndNode).thenReturn(endNode)
(startNode, rel, endNode)
}
}
| HuangLS/neo4j | community/cypher/cypher-compiler-2.3/src/test/scala/org/neo4j/cypher/internal/compiler/v2_3/pipes/DirectedDirectedRelationshipByIdSeekPipeTest.scala | Scala | apache-2.0 | 3,726 |
package org.scaladebugger.api.lowlevel.requests.properties
import org.scaladebugger.api.lowlevel.requests.properties.processors.EnabledPropertyProcessor
/**
* Represents an argument used set the enabled status of the request.
*
* @param value The value to use for the enabled status of the request
*/
case class EnabledProperty(value: Boolean) extends JDIRequestProperty {
/**
* Creates a new JDI request processor based on this property.
*
* @return The new JDI request processor instance
*/
override def toProcessor: JDIRequestPropertyProcessor =
new EnabledPropertyProcessor(this)
}
| chipsenkbeil/scala-debugger | scala-debugger-api/src/main/scala/org/scaladebugger/api/lowlevel/requests/properties/EnabledProperty.scala | Scala | apache-2.0 | 612 |
package com.twitter.finagle.builder
import scala.collection.mutable.HashSet
import scala.collection.JavaConversions._
import java.util.concurrent.Executors
import java.util.logging.Logger
import java.net.SocketAddress
import javax.net.ssl.{SSLContext, SSLEngine}
import org.jboss.netty.bootstrap.ServerBootstrap
import org.jboss.netty.channel._
import org.jboss.netty.channel.socket.nio._
import org.jboss.netty.handler.ssl._
import org.jboss.netty.handler.timeout.ReadTimeoutHandler
import com.twitter.util.Duration
import com.twitter.conversions.time._
import com.twitter.finagle._
import com.twitter.finagle.channel.{
WriteCompletionTimeoutHandler, ChannelStatsHandler,
ChannelRequestStatsHandler, ChannelOpenConnectionsHandler,
OpenConnectionsHealthThresholds}
import com.twitter.finagle.health.{HealthEvent, NullHealthEventCallback}
import com.twitter.finagle.tracing.{Tracer, TracingFilter, NullTracer}
import com.twitter.finagle.util.Conversions._
import com.twitter.finagle.util._
import com.twitter.finagle.util.Timer._
import com.twitter.util.{Future, Promise}
import com.twitter.concurrent.AsyncSemaphore
import channel.{ChannelClosingHandler, ServiceToChannelHandler, ChannelSemaphoreHandler}
import service.{ExpiringService, TimeoutFilter, StatsFilter, ProxyService}
import stats.{StatsReceiver, NullStatsReceiver, GlobalStatsReceiver}
import exception._
import ssl.{Engine, Ssl, SslIdentifierHandler, SslShutdownHandler}
trait Server {
/**
* Close the underlying server gracefully with the given grace
* period. close() will drain the current channels, waiting up to
* ``timeout'', after which channels are forcibly closed.
*/
def close(timeout: Duration = Duration.MaxValue)
}
/**
* Factory for [[com.twitter.finagle.builder.ServerBuilder]] instances
*/
object ServerBuilder {
type Complete[Req, Rep] = ServerBuilder[
Req, Rep, ServerConfig.Yes,
ServerConfig.Yes, ServerConfig.Yes]
def apply() = new ServerBuilder()
def get() = apply()
/**
* Provides a typesafe `build` for Java.
*/
def safeBuild[Req, Rep](service: Service[Req, Rep], builder: Complete[Req, Rep]): Server =
builder.build(service)
val defaultChannelFactory =
new ReferenceCountedChannelFactory(
new LazyRevivableChannelFactory(() =>
new NioServerSocketChannelFactory(
Executors.newCachedThreadPool(),
Executors.newCachedThreadPool())))
}
object ServerConfig {
sealed abstract trait Yes
type FullySpecified[Req, Rep] = ServerConfig[Req, Rep, Yes, Yes, Yes]
}
/**
* A configuration object that represents what shall be built.
*/
final case class ServerConfig[Req, Rep, HasCodec, HasBindTo, HasName](
private val _codecFactory: Option[CodecFactory[Req, Rep]#Server] = None,
private val _statsReceiver: Option[StatsReceiver] = None,
private val _exceptionReceiver: Option[ServerExceptionReceiverBuilder] = None,
private val _name: Option[String] = None,
private val _sendBufferSize: Option[Int] = None,
private val _recvBufferSize: Option[Int] = None,
private val _keepAlive: Option[Boolean] = None,
private val _backlog: Option[Int] = None,
private val _bindTo: Option[SocketAddress] = None,
private val _logger: Option[Logger] = None,
private val _tls: Option[(String, String, String, String)] = None,
private val _channelFactory: ReferenceCountedChannelFactory = ServerBuilder.defaultChannelFactory,
private val _maxConcurrentRequests: Option[Int] = None,
private val _healthEventCallback: HealthEvent => Unit = NullHealthEventCallback,
private val _hostConnectionMaxIdleTime: Option[Duration] = None,
private val _hostConnectionMaxLifeTime: Option[Duration] = None,
private val _openConnectionsHealthThresholds: Option[OpenConnectionsHealthThresholds] = None,
private val _requestTimeout: Option[Duration] = None,
private val _readTimeout: Option[Duration] = None,
private val _writeCompletionTimeout: Option[Duration] = None,
private val _tracerFactory: Tracer.Factory = () => NullTracer)
{
import ServerConfig._
/**
* The Scala compiler errors if the case class members don't have underscores.
* Nevertheless, we want a friendly public API so we create delegators without
* underscores.
*/
val codecFactory = _codecFactory
val statsReceiver = _statsReceiver
val exceptionReceiver = _exceptionReceiver
val name = _name
val sendBufferSize = _sendBufferSize
val recvBufferSize = _recvBufferSize
val keepAlive = _keepAlive
val backlog = _backlog
val bindTo = _bindTo
val logger = _logger
val tls = _tls
val channelFactory = _channelFactory
val maxConcurrentRequests = _maxConcurrentRequests
val healthEventCallback = _healthEventCallback
val hostConnectionMaxIdleTime = _hostConnectionMaxIdleTime
val hostConnectionMaxLifeTime = _hostConnectionMaxLifeTime
val openConnectionsHealthThresholds = _openConnectionsHealthThresholds
val requestTimeout = _requestTimeout
val readTimeout = _readTimeout
val writeCompletionTimeout = _writeCompletionTimeout
val tracerFactory = _tracerFactory
def toMap = Map(
"codecFactory" -> _codecFactory,
"statsReceiver" -> _statsReceiver,
"exceptionReceiver" -> _exceptionReceiver,
"name" -> _name,
"sendBufferSize" -> _sendBufferSize,
"recvBufferSize" -> _recvBufferSize,
"keepAlive" -> _keepAlive,
"backlog" -> _backlog,
"bindTo" -> _bindTo,
"logger" -> _logger,
"tls" -> _tls,
"channelFactory" -> Some(_channelFactory),
"maxConcurrentRequests" -> _maxConcurrentRequests,
"healthEventCallback" -> _healthEventCallback,
"hostConnectionMaxIdleTime" -> _hostConnectionMaxIdleTime,
"hostConnectionMaxLifeTime" -> _hostConnectionMaxLifeTime,
"openConnectionsHealthThresholds" -> _openConnectionsHealthThresholds,
"requestTimeout" -> _requestTimeout,
"readTimeout" -> _readTimeout,
"writeCompletionTimeout" -> _writeCompletionTimeout,
"tracerFactory" -> Some(_tracerFactory)
)
override def toString = {
"ServerConfig(%s)".format(
toMap flatMap {
case (k, Some(v)) =>
Some("%s=%s".format(k, v))
case _ =>
None
} mkString(", "))
}
def validated: ServerConfig[Req, Rep, Yes, Yes, Yes] = {
_codecFactory getOrElse { throw new IncompleteSpecification("No codec was specified") }
_bindTo getOrElse { throw new IncompleteSpecification("No bindTo was specified") }
_name getOrElse { throw new IncompleteSpecification("No name were specified") }
copy()
}
}
/**
* A handy Builder for constructing Servers (i.e., binding Services to
* a port). This class is subclassable. Override copy() and build()
* to do your own dirty work.
*
* The main class to use is [[com.twitter.finagle.builder.ServerBuilder]], as so
* {{{
* ServerBuilder()
* .codec(Http)
* .hostConnectionMaxLifeTime(5.minutes)
* .readTimeout(2.minutes)
* .name("servicename")
* .bindTo(new InetSocketAddress(serverPort))
* .build(plusOneService)
* }}}
*
* The `ServerBuilder` requires the definition of `codec`, `bindTo`
* and `name`. In Scala, these are statically type
* checked, and in Java the lack of any of the above causes a runtime
* error.
*
* The `build` method uses an implicit argument to statically
* typecheck the builder (to ensure completeness, see above). The Java
* compiler cannot provide such implicit, so we provide a separate
* function in Java to accomplish this. Thus, the Java code for the
* above is
*
* {{{
* ServerBuilder.safeBuild(
* plusOneService,
* ServerBuilder.get()
* .codec(Http)
* .hostConnectionMaxLifeTime(5.minutes)
* .readTimeout(2.minutes)
* .name("servicename")
* .bindTo(new InetSocketAddress(serverPort)));
* }}}
*
* Alternatively, using the `unsafeBuild` method on `ServerBuilder`
* verifies the builder dynamically, resulting in a runtime error
* instead of a compiler error.
*/
class ServerBuilder[Req, Rep, HasCodec, HasBindTo, HasName] private[builder](
val config: ServerConfig[Req, Rep, HasCodec, HasBindTo, HasName]
) {
import ServerConfig._
// Convenient aliases.
type FullySpecifiedConfig = FullySpecified[Req, Rep]
type ThisConfig = ServerConfig[Req, Rep, HasCodec, HasBindTo, HasName]
type This = ServerBuilder[Req, Rep, HasCodec, HasBindTo, HasName]
private[builder] def this() = this(new ServerConfig)
override def toString() = "ServerBuilder(%s)".format(config.toString)
protected def copy[Req1, Rep1, HasCodec1, HasBindTo1, HasName1](
config: ServerConfig[Req1, Rep1, HasCodec1, HasBindTo1, HasName1]
): ServerBuilder[Req1, Rep1, HasCodec1, HasBindTo1, HasName1] =
new ServerBuilder(config)
protected def withConfig[Req1, Rep1, HasCodec1, HasBindTo1, HasName1](
f: ServerConfig[Req, Rep, HasCodec, HasBindTo, HasName] =>
ServerConfig[Req1, Rep1, HasCodec1, HasBindTo1, HasName1]
): ServerBuilder[Req1, Rep1, HasCodec1, HasBindTo1, HasName1] = copy(f(config))
def codec[Req1, Rep1](
codec: Codec[Req1, Rep1]
): ServerBuilder[Req1, Rep1, Yes, HasBindTo, HasName] =
withConfig(_.copy(_codecFactory = Some(Function.const(codec) _)))
def codec[Req1, Rep1](
codecFactory: CodecFactory[Req1, Rep1]#Server
): ServerBuilder[Req1, Rep1, Yes, HasBindTo, HasName] =
withConfig(_.copy(_codecFactory = Some(codecFactory)))
def codec[Req1, Rep1](
codecFactory: CodecFactory[Req1, Rep1]
): ServerBuilder[Req1, Rep1, Yes, HasBindTo, HasName] =
withConfig(_.copy(_codecFactory = Some(codecFactory.server)))
def reportTo(receiver: StatsReceiver): This =
withConfig(_.copy(_statsReceiver = Some(receiver)))
def name(value: String): ServerBuilder[Req, Rep, HasCodec, HasBindTo, Yes] =
withConfig(_.copy(_name = Some(value)))
def sendBufferSize(value: Int): This =
withConfig(_.copy(_sendBufferSize = Some(value)))
def recvBufferSize(value: Int): This =
withConfig(_.copy(_recvBufferSize = Some(value)))
def keepAlive(value: Boolean): This =
withConfig(_.copy(_keepAlive = Some(value)))
def backlog(value: Int): This =
withConfig(_.copy(_backlog = Some(value)))
def bindTo(address: SocketAddress): ServerBuilder[Req, Rep, HasCodec, Yes, HasName] =
withConfig(_.copy(_bindTo = Some(address)))
def channelFactory(cf: ReferenceCountedChannelFactory): This =
withConfig(_.copy(_channelFactory = cf))
def logger(logger: Logger): This =
withConfig(_.copy(_logger = Some(logger)))
def tls(certificatePath: String, keyPath: String,
caCertificatePath: String = null, ciphers: String = null): This =
withConfig(_.copy(_tls = Some(certificatePath, keyPath, caCertificatePath, ciphers)))
def maxConcurrentRequests(max: Int): This =
withConfig(_.copy(_maxConcurrentRequests = Some(max)))
def healthEventCallback(callback: HealthEvent => Unit): This =
withConfig(_.copy(_healthEventCallback = callback))
def hostConnectionMaxIdleTime(howlong: Duration): This =
withConfig(_.copy(_hostConnectionMaxIdleTime = Some(howlong)))
def hostConnectionMaxLifeTime(howlong: Duration): This =
withConfig(_.copy(_hostConnectionMaxLifeTime = Some(howlong)))
def openConnectionsHealthThresholds(thresholds: OpenConnectionsHealthThresholds): This =
withConfig(_.copy(_openConnectionsHealthThresholds = Some(thresholds)))
def requestTimeout(howlong: Duration): This =
withConfig(_.copy(_requestTimeout = Some(howlong)))
def readTimeout(howlong: Duration): This =
withConfig(_.copy(_readTimeout = Some(howlong)))
def writeCompletionTimeout(howlong: Duration): This =
withConfig(_.copy(_writeCompletionTimeout = Some(howlong)))
def exceptionReceiver(erFactory: ServerExceptionReceiverBuilder): This =
withConfig(_.copy(_exceptionReceiver = Some(erFactory)))
def tracerFactory(factory: Tracer.Factory): This =
withConfig(_.copy(_tracerFactory = factory))
@deprecated("Use tracerFactory instead")
def tracer(factory: Tracer.Factory): This =
withConfig(_.copy(_tracerFactory = factory))
@deprecated("Use tracerFactory instead")
def tracer(tracer: Tracer): This =
withConfig(_.copy(_tracerFactory = () => tracer))
/**
* Construct the Server, given the provided Service.
*/
def build(service: Service[Req, Rep]) (
implicit THE_BUILDER_IS_NOT_FULLY_SPECIFIED_SEE_ServerBuilder_DOCUMENTATION:
ThisConfig =:= FullySpecifiedConfig
): Server = build { () =>
new ServiceProxy[Req, Rep](service) {
// release() is meaningless on connectionless services.
override def release() = ()
}
}
/**
* Construct the Server, given the provided Service factory.
*/
def build(serviceFactory: () => Service[Req, Rep])(
implicit THE_BUILDER_IS_NOT_FULLY_SPECIFIED_SEE_ServerBuilder_DOCUMENTATION:
ThisConfig =:= FullySpecifiedConfig
): Server = build(_ => serviceFactory())
/**
* Construct the Server, given the provided ServiceFactory. This
* is useful if the protocol is stateful (e.g., requires authentication
* or supports transactions).
*/
def build(serviceFactory: (ClientConnection) => Service[Req, Rep])(
implicit THE_BUILDER_IS_NOT_FULLY_SPECIFIED_SEE_ServerBuilder_DOCUMENTATION:
ThisConfig =:= FullySpecifiedConfig
): Server = {
config.statsReceiver foreach { sr =>
GlobalStatsReceiver.register(sr.scope("finagle"))
}
val scopedStatsReceiver =
config.statsReceiver map { sr => config.name map (sr.scope(_)) getOrElse sr }
val codecConfig = ServerCodecConfig(
serviceName = config.name.get,
boundAddress = config.bindTo.get)
val codec = config.codecFactory.get(codecConfig)
val cf = config.channelFactory
cf.acquire()
val bs = new ServerBootstrap(new ChannelFactoryToServerChannelFactory(cf))
// bs.setOption("soLinger", 0) // XXX: (TODO)
bs.setOption("reuseAddress", true)
bs.setOption("child.tcpNoDelay", true)
config.backlog.foreach { s => bs.setOption("backlog", s) }
config.sendBufferSize foreach { s => bs.setOption("child.sendBufferSize", s) }
config.recvBufferSize foreach { s => bs.setOption("child.receiveBufferSize", s) }
config.keepAlive.foreach { s => bs.setOption("child.keepAlive", s) }
// TODO: we need something akin to a max queue depth.
val queueingChannelHandlerAndGauges =
config.maxConcurrentRequests map { maxConcurrentRequests =>
val semaphore = new AsyncSemaphore(maxConcurrentRequests)
val gauges = scopedStatsReceiver.toList flatMap { sr =>
sr.addGauge("request_concurrency") {
maxConcurrentRequests - semaphore.numPermitsAvailable
} :: sr.addGauge("request_queue_size") {
semaphore.numWaiters
} :: Nil
}
(new ChannelSemaphoreHandler(semaphore), gauges)
}
val queueingChannelHandler = queueingChannelHandlerAndGauges map { case (q, _) => q }
val gauges = queueingChannelHandlerAndGauges.toList flatMap { case (_, g) => g }
trait ChannelHandle {
def drain(): Future[Unit]
def close()
}
val scopedOrNullStatsReceiver = scopedStatsReceiver getOrElse NullStatsReceiver
val channels = new HashSet[ChannelHandle]
// We share some filters & handlers for cumulative stats.
val statsFilter = scopedStatsReceiver map { new StatsFilter[Req, Rep](_) }
val channelStatsHandler = scopedStatsReceiver map { new ChannelStatsHandler(_) }
val channelRequestStatsHandler = scopedStatsReceiver map { new ChannelRequestStatsHandler(_) }
// health-measuring handler
val channelOpenConnectionsHandler = config.openConnectionsHealthThresholds map {
new ChannelOpenConnectionsHandler(_, config.healthEventCallback, scopedOrNullStatsReceiver)
}
val tracer = config.tracerFactory()
bs.setPipelineFactory(new ChannelPipelineFactory {
def getPipeline = {
val pipeline = codec.pipelineFactory.getPipeline
config.logger foreach { logger =>
pipeline.addFirst(
"channelLogger", ChannelSnooper(config.name getOrElse "server")(logger.info))
}
channelOpenConnectionsHandler foreach { handler =>
pipeline.addFirst("channelOpenConnectionsHandler", handler)
}
channelStatsHandler foreach { handler =>
pipeline.addFirst("channelStatsHandler", handler)
}
// XXX/TODO: add stats for both read & write completion
// timeouts.
// Note that the timeout is *after* request decoding. This
// prevents death from clients trying to DoS by slowly
// trickling in bytes to our (accumulating) codec.
config.readTimeout foreach { howlong =>
val (timeoutValue, timeoutUnit) = howlong.inTimeUnit
pipeline.addLast(
"readTimeout",
new ReadTimeoutHandler(Timer.defaultNettyTimer, timeoutValue, timeoutUnit))
}
config.writeCompletionTimeout foreach { howlong =>
pipeline.addLast(
"writeCompletionTimeout",
new WriteCompletionTimeoutHandler(Timer.default, howlong))
}
// SSL comes first so that ChannelSnooper gets plaintext
config.tls foreach { case (certificatePath, keyPath, caCertificatePath, ciphers) =>
val engine: Engine = Ssl.server(certificatePath, keyPath, caCertificatePath, ciphers)
engine.self.setUseClientMode(false)
engine.self.setEnableSessionCreation(true)
val handler = new SslHandler(engine.self)
// Certain engine implementations need to handle renegotiation internally,
// as Netty's TLS protocol parser implementation confuses renegotiation and
// notification events. Renegotiation will be enabled for those Engines with
// a true handlesRenegotiation value.
handler.setEnableRenegotiation(engine.handlesRenegotiation)
pipeline.addFirst("ssl", handler)
// Netty's SslHandler does not provide SSLEngine implementations any hints that they
// are no longer needed (namely, upon disconnection.) Since some engine implementations
// make use of objects that are not managed by the JVM's memory manager, we need to
// know when memory can be released. The SslShutdownHandler will invoke the shutdown
// method on implementations that define shutdown(): Unit.
pipeline.addFirst(
"sslShutdown",
new SslShutdownHandler(engine)
)
// Information useful for debugging SSL issues, such as the certificate, cipher spec,
// remote address is provided to the SSLEngine implementation by the SslIdentifierHandler.
// The SslIdentifierHandler will invoke the setIdentifier method on implementations
// that define setIdentifier(String): Unit.
pipeline.addFirst(
"sslIdentifier",
new SslIdentifierHandler(engine, certificatePath, ciphers)
)
}
// Serialization keeps the codecs honest.
pipeline.addLast("requestSerializing", new ChannelSemaphoreHandler(new AsyncSemaphore(1)))
// Add this after the serialization to get an accurate request
// count.
channelRequestStatsHandler foreach { handler =>
pipeline.addLast("channelRequestStatsHandler", handler)
}
// Add the (shared) queueing handler *after* request
// serialization as it assumes at most one outstanding request
// per channel.
queueingChannelHandler foreach { pipeline.addLast("queue", _) }
/*
* this is a wrapper for the factory-created service for this connection, which we'll
* build once we get an "open" event from netty.
*/
val postponedService = new Promise[Service[Req, Rep]]
// Compose the service stack.
var service: Service[Req, Rep] = {
new ProxyService(postponedService flatMap { s => codec.prepareService(s) })
}
// Add the exception service at the bottom layer
// This is not required, but argubably the best style
val exceptionFilter = new ExceptionFilter[Req, Rep] (
config.exceptionReceiver map {
_(config.name.get, config.bindTo.get)
} getOrElse {
NullExceptionReceiver
}
)
service = exceptionFilter andThen service
statsFilter foreach { sf =>
service = sf andThen service
}
// We add the idle time after the codec. This ensures that a
// client couldn't DoS us by sending lots of little messages
// that don't produce a request object for some time. In other
// words, the idle time refers to the idle time from the view
// of the protocol.
// TODO: can we share closing handler instances with the
// channelHandler?
val closingHandler = new ChannelClosingHandler
pipeline.addLast("closingHandler", closingHandler)
if (config.hostConnectionMaxIdleTime.isDefined ||
config.hostConnectionMaxLifeTime.isDefined) {
service =
new ExpiringService(
service,
config.hostConnectionMaxIdleTime,
config.hostConnectionMaxLifeTime,
Timer.default,
scopedOrNullStatsReceiver.scope("expired")
) {
override def expired() { closingHandler.close() }
}
}
config.requestTimeout foreach { duration =>
val e = new IndividualRequestTimeoutException(config.name getOrElse "server", duration)
service = (new TimeoutFilter(duration, e)) andThen service
}
// This has to go last (ie. first in the stack) so that
// protocol-specific trace support can override our generic
// one here.
service = (new TracingFilter(tracer)) andThen service
val channelHandler = new ServiceToChannelHandler(
service, postponedService, serviceFactory,
scopedOrNullStatsReceiver, Logger.getLogger(getClass.getName))
/*
* Register the channel so we can wait for them for a drain. We close the socket but wait
* for all handlers to complete (to drain them individually.) Note: this would be
* complicated by the presence of pipelining.
*/
val handle = new ChannelHandle {
def close() =
channelHandler.close()
def drain() = {
channelHandler.drain()
channelHandler.onShutdown
}
}
channels.synchronized { channels += handle }
channelHandler.onShutdown ensure {
channels.synchronized {
channels.remove(handle)
}
}
pipeline.addLast("channelHandler", channelHandler)
pipeline
}
})
val serverChannel = bs.bind(config.bindTo.get)
Timer.default.acquire()
new Server {
def close(timeout: Duration = Duration.MaxValue) = {
// According to NETTY-256, the following sequence of operations
// has no race conditions.
//
// - close the server socket (awaitUninterruptibly)
// - close all open channels (awaitUninterruptibly)
// - releaseExternalResources
//
// We modify this a little bit, to allow for graceful draining,
// closing open channels only after the grace period.
//
// The next step here is to do a half-closed socket: we want to
// suspend reading, but not writing to a socket. This may be
// important for protocols that do any pipelining, and may
// queue in their codecs.
// On cursory inspection of the relevant Netty code, this
// should never block (it is little more than a close() syscall
// on the FD).
serverChannel.close().awaitUninterruptibly()
// At this point, no new channels may be created.
val joined = Future.join(channels.synchronized { channels toArray } map { _.drain() })
// Wait for all channels to shut down.
joined.get(timeout)
// Force close any remaining connections. Don't wait for
// success. Buffer channels into an array to avoid
// deadlocking.
channels.synchronized { channels toArray } foreach { _.close() }
// Release any gauges we've created.
gauges foreach { _.remove() }
bs.releaseExternalResources()
Timer.default.stop()
tracer.release()
}
override def toString = "Server(%s)".format(config.toString)
}
}
/**
* Construct a Service, with runtime checks for builder
* completeness.
*/
def unsafeBuild(service: Service[Req, Rep]): Server =
withConfig(_.validated).build(service)
}
| enachb/finagle_2.9_durgh | finagle-core/src/main/scala/com/twitter/finagle/builder/ServerBuilder.scala | Scala | apache-2.0 | 26,390 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples
import org.apache.spark.sql.SparkSession
/**
* Usage: BroadcastTest [partitions] [numElem] [blockSize]
*/
object BroadcastTest {
def main(args: Array[String]) {
val blockSize = if (args.length > 2) args(2) else "4096"
val spark = SparkSession
.builder()
.appName("Broadcast Test")
.config("spark.broadcast.blockSize", blockSize)
.getOrCreate()
val sc = spark.sparkContext
val slices = if (args.length > 0) args(0).toInt else 2
val num = if (args.length > 1) args(1).toInt else 1000000
val arr1 = (0 until num).toArray
for (i <- 0 until 3) {
println("Iteration " + i)
println("===========")
val startTime = System.nanoTime
val barr1 = sc.broadcast(arr1)
val observedSizes = sc.parallelize(1 to 10, slices).map(_ => barr1.value.length)
// Collect the small RDD so we can print the observed sizes locally.
observedSizes.collect().foreach(i => println(i))
println("Iteration %d took %.0f milliseconds".format(i, (System.nanoTime - startTime) / 1E6))
}
spark.stop()
}
}
// scalastyle:on println
| aokolnychyi/spark | examples/src/main/scala/org/apache/spark/examples/BroadcastTest.scala | Scala | apache-2.0 | 1,975 |
/*
* Copyright 2007-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package builtin
package snippet
import scala.language.existentials
import http.{S, DispatchSnippet, LiftRules}
import http.js._
import sitemap._
import util._
import common._
import scala.xml._
import JsCmds._
import JE._
import Helpers._
/**
* <p>This built-in snippet can be used to render a menu representing your SiteMap.
* There are three main snippet methods that you can use:</p>
*
* <ul>
* <li>builder - Renders the entire SiteMap, optionally expanding all child menus</li>
* <li>group - Renders the MenuItems corresponding to the specified group.</li>
* <li>item - Renders the specific named MenuItem</li>
* </ul>
*
* <p>More detailed usage of each method is provided below</p>
*/
object Menu extends DispatchSnippet {
def dispatch: DispatchIt = {
case "builder" => builder
case "title" => title
case "item" => item
case "group" => group
case "json" => jsonMenu
}
/**
* <p>This snippet method renders a menu representing your SiteMap contents. The
* menu is rendered as a set of nested unordered lists (<ul />). By
* default, it only renders nested child menus for menus that match the current request path.
* You can add the "expandAll" attribute to the snippet tag to force full expansion of
* all child menus. Additionally, you can use the following attribute prefixes to further customize
* the generated list and list item elements:</p>
*
* <ul>
* <li>top - Adds the specified attribute to the top-level <ul> element that makes up the menu</li>
* <li>ul - Adds the specified attribute to each <ul> element (top-level and nested children) that makes up the menu</li>
* <li>li - Adds the specified attribute to each <li> element for the menu</li>
* <li>li_item - Adds the specified attribute to the current page’s menu item</li>
* <li>outer_tag - the tag for the outer XML element (ul by default)</li>
* <li>inner_tag - the tag for the inner XML element (li by default)</li>
* <li>li_path - Adds the specified attribute to the current page’s breadcrumb path. The
* breadcrumb path is the set of menu items leading to this one.</li>
* <li>linkToSelf - False by default, but available as 'true' to generate link to the current page</li>
* <li>level - Controls the level of menus that will be output. "0" is the top-level menu, "1" is children of
* the current menu item, and so on. Child menus will be expanded unless the "expand" attribute is set to <pre>false</pre>.</li>
* <li>expand - Controls whether or not to expand child menus. Defaults to <pre>true</pre>.</li>
* </ul>
*
* <p>If you are using designer friendly invocation, you can access the namespaced attributes: <br/>
* <div class="lift:Menu?li_item:class=foo+bar">menu</div>
* </p>
*
* <p>For a simple, default menu, simply add</p>
*
* <pre>
* <lift:Menu.builder />
* </pre>
*
* <p>To your template. You can render the entire sitemap with</p>
*
* <pre>
* <lift:Menu.builder expandAll="true" />
* </pre>
*
* <p>Customizing the elements is handled through the prefixed attributes described above.
* For instance, you could make the current page menu item red:</p>
*
* <pre>
* <lift:Menu.builder li_item:style="color: red;" />
* </pre>
*/
def builder(info: NodeSeq): NodeSeq = {
val outerTag: String = S.attr("outer_tag") openOr "ul"
val innerTag: String = S.attr("inner_tag") openOr "li"
val expandAll = (S.attr("expandAll") or S.attr("expandall")).isDefined
val linkToSelf: Boolean = (S.attr("linkToSelf") or S.attr("linktoself")).map(Helpers.toBoolean) openOr false
val expandAny: Boolean = S.attr("expand").map(Helpers.toBoolean) openOr true
val level: Box[Int] = for (lvs <- S.attr("level"); i <- Helpers.asInt(lvs)) yield i
val toRender: Seq[MenuItem] = (S.attr("item"), S.attr("group")) match {
case (Full(item), _) =>
for{
sm <- LiftRules.siteMap.toList
req <- S.request.toList
loc <- sm.findLoc(item).toList
item <- buildItemMenu(loc, req.location, expandAll)
} yield item
case (_, Full(group)) =>
for{
sm <- LiftRules.siteMap.toList
loc <- sm.locForGroup(group)
req <- S.request.toList
item <- buildItemMenu(loc, req.location, expandAll)
} yield item
case _ => renderWhat(expandAll)
}
def ifExpandCurrent(f: => NodeSeq): NodeSeq = if (expandAny || expandAll) f else NodeSeq.Empty
def ifExpandAll(f: => NodeSeq): NodeSeq = if (expandAll) f else NodeSeq.Empty
toRender.toList match {
case Nil if S.attr("group").isDefined => NodeSeq.Empty
case Nil => Text("No Navigation Defined.")
case xs =>
val liMap = S.prefixedAttrsToMap("li")
val li = S.mapToAttrs(liMap)
def buildANavItem(i: MenuItem) = {
i match {
// Per Loc.PlaceHolder, placeholder implies HideIfNoKids
case m@MenuItem(text, uri, kids, _, _, _) if m.placeholder_? && kids.isEmpty => NodeSeq.Empty
case m@MenuItem(text, uri, kids, _, _, _) if m.placeholder_? =>
Helpers.addCssClass(i.cssClass,
Elem(null, innerTag, Null, TopScope, true,
// Is a placeholder useful if we don't display the kids? I say no (DCB, 20101108)
<xml:group> <span>{text}</span>{buildUlLine(kids)}</xml:group>) %
(if (m.path) S.prefixedAttrsToMetaData("li_path", liMap) else Null) %
(if (m.current) S.prefixedAttrsToMetaData("li_item", liMap) else Null))
case MenuItem(text, uri, kids, true, _, _) if linkToSelf =>
Helpers.addCssClass(i.cssClass,
Elem(null, innerTag, Null, TopScope, true,
<xml:group> <a href={uri}>{text}</a>{ifExpandCurrent(buildUlLine(kids))}</xml:group>) %
S.prefixedAttrsToMetaData("li_item", liMap))
case MenuItem(text, uri, kids, true, _, _) =>
Helpers.addCssClass(i.cssClass,
Elem(null, innerTag, Null, TopScope, true,
<xml:group> <span>{text}</span>{ifExpandCurrent(buildUlLine(kids))}</xml:group>) %
S.prefixedAttrsToMetaData("li_item", liMap))
// Not current, but on the path, so we need to expand children to show the current one
case MenuItem(text, uri, kids, _, true, _) =>
Helpers.addCssClass(i.cssClass,
Elem(null, innerTag, Null, TopScope, true,
<xml:group> <a href={uri}>{text}</a>{buildUlLine(kids)}</xml:group>) %
S.prefixedAttrsToMetaData("li_path", liMap))
case MenuItem(text, uri, kids, _, _, _) =>
Helpers.addCssClass(i.cssClass,
Elem(null, innerTag, Null, TopScope, true,
<xml:group> <a href={uri}>{text}</a>{ifExpandAll(buildUlLine(kids))}</xml:group>) % li)
}
}
def buildUlLine(in: Seq[MenuItem]): NodeSeq =
if (in.isEmpty) {
NodeSeq.Empty
} else {
if (outerTag.length > 0) {
Elem(null, outerTag, Null, TopScope, true,
<xml:group>{in.flatMap(buildANavItem)}</xml:group>) %
S.prefixedAttrsToMetaData("ul")
} else {
in.flatMap(buildANavItem)
}
}
val realMenuItems = level match {
case Full(lvl) if lvl > 0 =>
def findKids(cur: Seq[MenuItem], depth: Int): Seq[MenuItem] = if (depth == 0) cur
else findKids(cur.flatMap(mi => mi.kids), depth - 1)
findKids(xs, lvl)
case _ => xs
}
buildUlLine(realMenuItems) match {
case top: Elem => top % S.prefixedAttrsToMetaData("top")
case other => other
}
}
}
// This is used to build a MenuItem for a single Loc
private def buildItemMenu [A] (loc : Loc[A], currLoc: Box[Loc[_]], expandAll : Boolean) : List[MenuItem] = {
val kids : List[MenuItem] = if (expandAll) loc.buildKidMenuItems(loc.menu.kids) else Nil
loc.buildItem(kids, currLoc == Full(loc), currLoc == Full(loc)).toList
}
private def renderWhat(expandAll: Boolean): Seq[MenuItem] =
(if (expandAll) for {sm <- LiftRules.siteMap; req <- S.request} yield
sm.buildMenu(req.location).lines
else S.request.map(_.buildMenu.lines)) openOr Nil
def jsonMenu(ignore: NodeSeq): NodeSeq = {
val toRender = renderWhat(true)
def buildItem(in: MenuItem): JsExp = in match {
case MenuItem(text, uri, kids, current, path, _) =>
JsObj("text" -> text.toString,
"uri" -> uri.toString,
"children" -> buildItems(kids),
"current" -> current,
"cssClass" -> Str(in.cssClass openOr ""),
"placeholder" -> in.placeholder_?,
"path" -> path)
}
def buildItems(in: Seq[MenuItem]): JsExp =
JsArray(in.map(buildItem) :_*)
Script(JsCrVar(S.attr("var") openOr "lift_menu",
JsObj("menu" -> buildItems(toRender))))
}
/**
* <p>Renders the title for the current request path (location). You can use this to
* automatically set the title for your page based on your SiteMap:</p>
*
* <pre>
* ⋮
* <head>
* <title><lift:Menu.title /></title>
* </head>
* ⋮
* </pre>
* <p>HTML5 does not support tags inside the <title> tag,
* so you must do:
* </p>
*
* <pre>
* <head>
* <title class="lift:Menu.title"e;>The page named %*% is being displayed</title>
* </head>
* </pre>
* <p>
* And Lift will substitute the title at the %*% marker if the marker exists, otherwise
* append the title to the contents of the <title> tag.
* </p>
*/
def title(text: NodeSeq): NodeSeq = {
val r =
for (request <- S.request;
loc <- request.location) yield loc.title
text match {
case TitleText(attrs, str) => {
r.map {
rt => {
val rts = rt.text
val idx = str.indexOf("%*%")
val bodyStr = if (idx >= 0) {
str.substring(0, idx) + rts + str.substring(idx + 3)
} else {
str +" "+rts
}
<title>{bodyStr}</title> % attrs
}
} openOr text
}
case _ => {
r openOr Text("")
}
}
}
private object TitleText {
def unapply(in: NodeSeq): Option[(MetaData, String)] =
if (in.length == 1 && in(0).isInstanceOf[Elem]) {
val e = in(0).asInstanceOf[Elem]
if (e.prefix == null && e.label == "title") {
if (e.child.length == 0) {
Some(e.attributes -> "")
} else if (e.child.length == 1 && e.child(0).isInstanceOf[Atom[_]]) {
Some(e.attributes -> e.child.text)
} else None
} else None
} else None
}
/**
* Renders a group of menu items. You specify a group using the LocGroup LocItem
* case class on your Menu Loc:
*
* {{{
* val menus =
* Menu(Loc("a",...,...,LocGroup("test"))) ::
* Menu(Loc("b",...,...,LocGroup("test"))) ::
* Menu(Loc("c",...,...,LocGroup("test"))) :: Nil
* }}}
*
* You can then render with the group snippet:
*
* {{{
* <nav data-lift="Menu.group?group=test">
* <ul>
* <li>
* <a href="/sample/link">Bound menu item</a>
* </li>
* </ul>
* </nav>
* }}}
*
* By default, menu items bind the href and text of an `a` element in
* the template, and iterates over `li` elements. You can customize
* these settings using the `repeatedSelector`, `linkSelector`, and
* `hrefSelector` parameters; for example:
*.
* {{{
* <p data-lift="Menu.group?group=test&repeatedSelector=p&linkSelector=p&hrefSelector=[data-link]"
* data-link="/sample/link">
* Bound menu item
* </p>
* }}}
*
* These selectors are CSS selector transform selectors. `repeatedSelector`
* and `linkSelector` are the left-hand-side, while `hrefSelector` is the
* second part, which indicates what will be replaced by the href text.
* For example, the above would roughly yield a transform that looks like:
*
* {{{
* "p" #> {
* "p [data-link]" #> <menu href> &
* "p *" #> <menu text> &
* }
* }}}
*/
def group: CssSel = {
val repeatedSelector = S.attr("repeatedSelector") openOr "li"
val linkSelector = S.attr("linkSelector") openOr "a"
val hrefSelector = S.attr("hrefSelector") openOr "[href]"
repeatedSelector #> {
for {
group <- S.attr("group").toList
siteMap <- LiftRules.siteMap.toList
loc <- siteMap.locForGroup(group)
link <- loc.createDefaultLink
linkText <- loc.linkText
} yield {
s"$linkSelector $hrefSelector" #> link &
s"$linkSelector *" #> linkText
}
}
}
/**
* <p>Renders a specific, named item, based on the name given in the Menu's Loc paramter:</p>
*
* <pre>
* val menus =
* Menu(Loc("a",...,...,LocGroup("test"))) ::
* Menu(Loc("b",...,...,LocGroup("test"))) ::
* Menu(Loc("c",...,...,LocGroup("test"))) :: Nil
* </pre>
*
* <p>You can then select the item using the name attribute:</p>
*
* <pre>
* <lift:Menu.item name="b" />
* </pre>
*
* <p>The menu item is rendered as an anchor tag (<a />). The text for the link
* defaults to the named Menu's Loc.linkText, but you can specify your own link text
* by providing contents to the tag:</p>
*
* <pre>
* <lift:Menu.item name="b">This is a link</lift:Menu.item>
* </pre>
*
* <p>Additionally you can customize
* the tag using attributes prefixed with "a":</p>
*
* <pre>
* <lift:Menu.item name="b" a:style="color: red;" />
* </pre>
*
* <p>The param attribute may be used with Menu Locs that are
* CovertableLoc to parameterize the link</p>
*
* <p>Normally, the Menu item is not shown on pages that match its Menu's Loc. You can
* set the "donthide" attribute on the tag to force it to show text only (same text as normal,
* but not in an anchor tag)</p>
*
*
* <p>Alternatively, you can set the "linkToSelf" attribute to "true" to force a link. You
* can specify your own link text with the tag's contents. Note that <b>case is significant</b>, so
* make sure you specify "linkToSelf" and not "linktoself".</p>
*
*/
def item(_text: NodeSeq): NodeSeq = {
val donthide = S.attr("donthide").map(Helpers.toBoolean) openOr false
val linkToSelf = (S.attr("linkToSelf") or S.attr("linktoself")).map(Helpers.toBoolean) openOr false
val text = ("a" #> ((n: NodeSeq) => n match {
case e: Elem => e.child
case xs => xs
})).apply(_text)
for {
name <- S.attr("name").toList
} yield {
type T = Q forSome { type Q }
// Builds a link for the given loc
def buildLink[T](loc : Loc[T]) = {
Group(SiteMap.buildLink(name, text) match {
case e : Elem =>
Helpers.addCssClass(loc.cssClassForMenuItem,
e % S.prefixedAttrsToMetaData("a"))
case x => x
})
}
(S.request.flatMap(_.location), S.attr("param"), SiteMap.findAndTestLoc(name)) match {
case (_, Full(param), Full(loc: Loc[T] with ConvertableLoc[T])) => {
(for {
pv <- loc.convert(param)
link <- loc.createLink(pv)
} yield
Helpers.addCssClass(loc.cssClassForMenuItem,
<a href={link}></a> %
S.prefixedAttrsToMetaData("a"))) openOr
Text("")
}
case (Full(loc), _, _) if loc.name == name => {
(linkToSelf, donthide) match {
case (true, _) => buildLink(loc)
case (_, true) => {
if (!text.isEmpty) {
Group(text)
} else {
Group(loc.linkText openOr Text(loc.name))
}
}
case _ => Text("")
}
}
case (Full(loc), _, _) => buildLink(loc)
case _ => Text("")
}
}
}
}
| lzpfmh/framework-2 | web/webkit/src/main/scala/net/liftweb/builtin/snippet/Menu.scala | Scala | apache-2.0 | 17,453 |
/*
* Copyright 2015 Webtrends (http://www.webtrends.com)
*
* See the LICENCE.txt file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.webtrends.harness.health
object HealthResponseType extends Enumeration {
type HealthResponseType = Value
val FULL, NAGIOS, LB = Value
}
import HealthResponseType._
case class HealthRequest(typ: HealthResponseType)
| Crashfreak/wookiee | wookiee-core/src/main/scala/com/webtrends/harness/health/HealthRequest.scala | Scala | apache-2.0 | 972 |
/*
* Copyright 2013-2015 Websudos, Limited.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Explicit consent must be obtained from the copyright owner, Outworkers Limited before any redistribution is made.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.websudos.phantom.builder.serializers
import com.websudos.phantom.builder.QueryBuilder
import com.websudos.phantom.builder.query.{CQLQuery, QueryBuilderTest}
import com.websudos.phantom.tables.TestDatabase
class SelectQueryBuilderTest extends QueryBuilderTest {
"The Select query builder" - {
"should allow serialising SELECT query clauses" - {
"should allow selecting a sequence of columns" in {
val qb = QueryBuilder.Select.select("t", "k", "test", "test2", "test3").queryString
qb shouldEqual "SELECT test, test2, test3 FROM k.t"
}
"should create a SELECT * query if no specific columns are selected" in {
val qb = QueryBuilder.Select.select("t", "k").queryString
qb shouldEqual "SELECT * FROM k.t"
}
"should serialize a SELECT COUNT query given a table name and a keyspace" in {
val qb = QueryBuilder.Select.count("t", "k").queryString
qb shouldEqual "SELECT COUNT(*) FROM k.t"
}
"should serialise a SELECT DISTINCT query from a table, keyspace and column sequence" in {
val qb = QueryBuilder.Select.distinct("t", "k", "test", "test1").queryString
qb shouldEqual "SELECT DISTINCT test, test1 FROM k.t"
}
}
"should allow defining an ordering clause on a selection" - {
"should allow specifying ASCENDING ordering" in {
val qb = QueryBuilder.Select.Ordering.ascending("test").queryString
qb shouldEqual "test ASC"
}
"should allow specifying DESCENDING ordering" in {
val qb = QueryBuilder.Select.Ordering.descending("test").queryString
qb shouldEqual "test DESC"
}
"should chain an ORDER BY clause to a CQLQuery" in {
val root = TestDatabase.basicTable.select.all().qb
val qb = QueryBuilder.Select.Ordering.orderBy(
root, QueryBuilder.Select.Ordering.descending(TestDatabase.basicTable.id.name)
)
qb.queryString shouldEqual s"SELECT * FROM phantom.basicTable ORDER BY ${TestDatabase.basicTable.id.name} DESC"
}
"should allow specifying multiple orderBy clauses in a single select query" in {
val orderings = Seq(
QueryBuilder.Select.Ordering.ascending("test"),
QueryBuilder.Select.Ordering.ascending("test_2"),
QueryBuilder.Select.Ordering.descending("test_3")
)
val qb = QueryBuilder.Select.Ordering.orderBy(orderings: _*).queryString
qb shouldEqual "ORDER BY (test ASC, test_2 ASC, test_3 DESC)"
}
}
"should allow specifying Selection options" - {
"should allow specifying an ALLOW FILTERING clause on an existing query" in {
val qb = CQLQuery("SELECT * FROM k.t")
QueryBuilder.Select.allowFiltering(qb).queryString shouldEqual "SELECT * FROM k.t ALLOW FILTERING"
}
"should allow creating a dateOf select clause" in {
QueryBuilder.Select.dateOf("test").queryString shouldEqual "dateOf(test)"
}
"should allow creating a blobAsText select clause from a string" in {
QueryBuilder.Select.blobAsText("test").queryString shouldEqual "blobAsText(test)"
}
"should allow creating a blobAsText select clause from another CQLQuery" in {
QueryBuilder.Select.blobAsText(CQLQuery("test")).queryString shouldEqual "blobAsText(test)"
}
}
}
}
| levinson/phantom | phantom-dsl/src/test/scala/com/websudos/phantom/builder/serializers/SelectQueryBuilderTest.scala | Scala | bsd-2-clause | 4,875 |
/*
* Copyright 2013 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.storehaus.redis
import com.twitter.algebird.Semigroup
import com.twitter.util.{Future, Time}
import com.twitter.finagle.redis.Client
import com.twitter.io.Buf
import com.twitter.storehaus.algebra.MergeableStore
object RedisSortedSetStore {
def apply(client: Client): RedisSortedSetStore =
new RedisSortedSetStore(client)
}
/** A Store representation of a redis sorted set
* where keys represent the name of the set and values
* represent both the member's name and score within the set
*/
class RedisSortedSetStore(client: Client)
extends MergeableStore[Buf, Seq[(Buf, Double)]] {
def semigroup: Semigroup[Seq[(Buf, Double)]] =
implicitly[Semigroup[Seq[(Buf, Double)]]]
/** Returns the whole set as a tuple of seq of (member, score).
* An empty set is represented as None. */
override def get(k: Buf): Future[Option[Seq[(Buf, Double)]]] =
client.zRange(k, 0L, -1L, true).map(
_.left.toOption.map( _.asTuples).filter(_.nonEmpty)
)
/** Replaces or deletes the whole set. Setting the set effectivly results
* in a delete of the previous sets key and multiple calls to zAdd for each member. */
override def put(kv: (Buf, Option[Seq[(Buf, Double)]])): Future[Unit] =
kv match {
case (set, Some(scorings)) =>
client.dels(Seq(set)).flatMap { _ =>
Future.collect(members.multiPut(scorings.map {
case (member, score) => ((set, member), Some(score))
}.toMap).values.toSeq).unit
}
case (set, None) =>
client.dels(Seq(set)).unit
}
/** Performs a zIncrBy operation on a set for a seq of members */
override def merge(
kv: (Buf, Seq[(Buf, Double)])
): Future[Option[Seq[(Buf, Double)]]] =
Future.collect(kv._2.map {
case (member, by) =>
client.zIncrBy(kv._1, by, member)
.map {
case Some(res) => member -> (res - by) // get the value before
case None => member -> 0.0
}
}).map(Some(_))
/** @return a mergeable store backed by redis with this store's client */
def members: MergeableStore[(Buf, Buf), Double] =
new RedisSortedSetMembershipStore(client)
/** @return a mergeable store for a given set with this store's client */
def members(set: Buf): MergeableStore[Buf, Double] =
new RedisSortedSetMembershipView(client, set)
override def close(t: Time): Future[Unit] = client.quit.foreach { _ => client.close() }
}
/** An unpivoted-like member-oriented view of a redis sorted set bound to a specific
* set. Keys represent members. Values represent the members score
* within the given set. Work is delegated to an underlying
* RedisSortedSetMembershipStore. For multiPuts containing deletes, it is more
* efficient to use a RedisSortedSetMembershipStore directly.
*
* These stores also have mergeable semantics via zIncrBy for a member's
* score.
*/
class RedisSortedSetMembershipView(client: Client, set: Buf)
extends MergeableStore[Buf, Double] {
private lazy val underlying = new RedisSortedSetMembershipStore(client)
def semigroup: Semigroup[Double] = implicitly[Semigroup[Double]]
override def get(k: Buf): Future[Option[Double]] =
underlying.get((set, k))
override def put(kv: (Buf, Option[Double])): Future[Unit] =
underlying.put(((set, kv._1), kv._2))
override def merge(kv: (Buf, Double)): Future[Option[Double]] =
underlying.merge(((set, kv._1), kv._2))
override def close(t: Time): Future[Unit] = client.quit.foreach { _ => client.close() }
}
/** An unpivoted-like member-oriented view of redis sorted sets.
* Keys represent the both a name of the set and the member.
* Values represent the member's current score within a set.
* An absent score also indicates an absence of membership in the set.
*
* These stores also have mergeable semantics via zIncrBy for a member's
* score
*/
class RedisSortedSetMembershipStore(client: Client)
extends MergeableStore[(Buf, Buf), Double] {
def semigroup: Semigroup[Double] = implicitly[Semigroup[Double]]
/** @return a member's score or None if the member is not in the set */
override def get(k: (Buf, Buf)): Future[Option[Double]] =
client.zScore(k._1, k._2).map(_.map(_.doubleValue()))
/** Partitions a map of multiPut pivoted values into
* a two item tuple of deletes and sets, multimapped
* by a key computed from K1.
*
* This makes partioning deletes and sets for pivoted multiPuts
* easier for stores that can perform batch operations on collections
* of InnerK values keyed by OutterK where V indicates membership
* of InnerK within OutterK.
*
* ( general enough to go into PivotOpts )
*/
def multiPutPartitioned[OutterK, InnerK, K1 <: (OutterK, InnerK), V, IndexK](
kv: Map[K1, Option[V]])(by: K1 => IndexK)
: (Map[IndexK, List[(K1, Option[V])]], Map[IndexK, List[(K1, Option[V])]]) = {
def emptyMap = Map.empty[IndexK, List[(K1, Option[V])]].withDefaultValue(Nil)
((emptyMap, emptyMap) /: kv) {
case ((deleting, storing), (key, value @ Some(_))) =>
val index = by(key)
(deleting, storing.updated(index, (key, value) :: storing(index)))
case ((deleting, storing), (key, _)) =>
val index = by(key)
(deleting.updated(index, (key, None) :: deleting(index)), storing)
}
}
/** Adds or removes members from sets with an initial scoring. A score of None indicates the
* member should be removed from the set */
override def multiPut[K1 <: (Buf, Buf)](
kv: Map[K1, Option[Double]]): Map[K1, Future[Unit]] = {
// we are exploiting redis's built-in support for removals (zRem)
// by partioning deletions and updates into 2 maps indexed by the first
// component of the composite key, the key of the set
val (del, persist) =
multiPutPartitioned[Buf, Buf, K1, Double, Buf](kv)(_._1)
del.flatMap {
case (k, members) =>
val value = client.zRem(k, members.map(_._1._2))
members.map(_._1 -> value.unit)
} ++ persist.flatMap {
case (k, members) =>
members.map {
case (k1, score) =>
// a per-InnerK operation
k1 -> client.zAdd(k, score.get, k1._2).unit
}
}
}
/** Performs a zIncrBy operation on a set for a given member */
override def merge(kv: ((Buf, Buf), Double)): Future[Option[Double]] =
client.zIncrBy(kv._1._1, kv._2, kv._1._2).map {
_.map { res => res - kv._2 }
}
override def close(t: Time): Future[Unit] = client.quit.foreach { _ => client.close() }
}
| twitter/storehaus | storehaus-redis/src/main/scala/com/twitter/storehaus/redis/RedisSortedSetStore.scala | Scala | apache-2.0 | 7,207 |
package lore.lsp.utils
import lore.lsp.LanguageServerContext
import org.eclipse.lsp4j.{MessageParams, MessageType}
/**
* MessageToaster allows showing message toasters to the client. This directly reaches the language user in the GUI.
*/
object MessageToaster {
def info(message: String)(implicit context: LanguageServerContext): Unit = show(MessageType.Info, message)
private def show(messageType: MessageType, message: String)(implicit context: LanguageServerContext): Unit = {
context.client.showMessage(new MessageParams(messageType, s"Lore: $message"))
}
}
| marcopennekamp/lore | lsp/server/src/lore/lsp/utils/MessageToaster.scala | Scala | mit | 581 |
package com.mlh.clustering.actor
import java.time.Instant
import akka.actor.{Actor, ActorLogging}
import akka.pattern.ask
import com.mlh.clustering.ExceptionUtil
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.DurationInt
import scala.language.postfixOps
import scala.util.{Failure, Success}
/**
* Created by pek on 2017/10/20.
*/
class ActorA
extends Actor
with ActorLogging {
context.system.scheduler.schedule(5 second, 5000 milliseconds, self, "start")
implicit val timeout = akka.util.Timeout(1000 milliseconds)
def receive: Receive = {
case "start" =>
(1 to 10) foreach{
id =>
if (id != 10){
log.info("======start===== millisecond : {} id : {}", Instant.now().toEpochMilli.toString, id)
(context.system.actorSelection("user/actorB_%d" format id) ? id).mapTo[String].onComplete {
case Success(x) => log.info("==========ActorStart=========== result : {}", x)
case Failure(ex) => {
log.error("======error===== millisecond : {} id : {} \\n {}", Instant.now().toEpochMilli.toString, id,ExceptionUtil.stackTraceString(ex))
}
}
log.info("=======end====== millisecond : {} id : {}", Instant.now().toEpochMilli.toString, id)
}
}
case _ => log.info("Unsupported message.")
}
} | eikon-paku-ca/akka-cluster-docker-sample | src/main/scala/com/mlh/clustering/actor/ActorA.scala | Scala | mit | 1,394 |
package io.udash.benchmarks.properties
import io.udash._
import japgolly.scalajs.benchmark._
import japgolly.scalajs.benchmark.gui._
import scalatags.JsDom.all._
object PropertyParameters {
case class Entity(i: Int, s: String, r: Entity)
object Entity extends HasModelPropertyCreator[Entity]
private def listenProperty(p: ReadableProperty[String]) = {
val r = p.listen(_ => ())
p.get + r.isActive.toString
}
private def listenModelProperty(p: ReadableModelProperty[Entity]) = {
val r = p.listen(_ => ())
p.roSubProp(_.r.r.s).get + r.isActive.toString
}
private def listenSeqProperty(p: ReadableSeqProperty[String]) = {
val r = p.listen(_ => ())
val r2 = p.listenStructure(_ => ())
p.elemProperties(2).get + r.isActive.toString + r2.isActive.toString
}
private val createStandardProperty = Benchmark("create a standard property") {
for (_ <- 1 until 1000) Property("asd")
}
private val listenStandardProperty = Benchmark("listen to a standard property") {
for (_ <- 1 until 1000) listenProperty(Property("asd"))
}
private val renderDiv = Benchmark("render a div") {
for (_ <- 1 until 1000) div().render
}
private val renderStandardProperty = Benchmark("render a standard property") {
for (_ <- 1 until 1000) div(bind(Property("asd"))).render
}
private val renderNullProperty = Benchmark("render a null property") {
for (_ <- 1 until 1000) div(bind(Property(null.asInstanceOf[String]))).render
}
private val listenImmutableProperty = Benchmark("listen to an immutable property") {
for (_ <- 1 until 1000) listenProperty("asd".toProperty)
}
private val listenStandardModelProperty = Benchmark("listen to a standard model property") {
for (_ <- 1 until 1000) listenModelProperty(ModelProperty(Entity(5, "asd", Entity(5, "asd", Entity(5, "asd", null)))))
}
private val listenImmutableModelProperty = Benchmark("listen to an immutable model property") {
for (_ <- 1 until 1000) listenModelProperty(Entity(5, "asd", Entity(5, "asd", Entity(5, "asd", null))).toModelProperty)
}
private val listenStandardSeqProperty = Benchmark("listen to a standard seq property") {
for (_ <- 1 until 1000) listenSeqProperty(SeqProperty("A", "B", "C"))
}
private val listenImmutableSeqProperty = Benchmark("listen to an immutable seq property") {
for (_ <- 1 until 1000) listenSeqProperty(Seq("A", "B", "C").toSeqProperty)
}
val suite = GuiSuite(
Suite("PropertyParameters")(
createStandardProperty,
listenStandardProperty,
renderDiv,
renderStandardProperty,
renderNullProperty,
listenImmutableProperty,
listenStandardModelProperty,
listenImmutableModelProperty,
listenStandardSeqProperty,
listenImmutableSeqProperty
)
)
}
| UdashFramework/udash-core | benchmarks/.js/src/main/scala/io/udash/benchmarks/properties/PropertyParameters.scala | Scala | apache-2.0 | 2,811 |
package com.twitter.finagle.mux
import java.util.HashMap
/**
* TagMaps maintains a mapping between tags and elements of type `T`.
* Tags are acquired from- and released to- `set`. TagMap maintains
* the first `fastSize` tags in an array for efficient access.
*/
private trait TagMap[T] extends Iterable[(Int, T)] {
def map(el: T): Option[Int]
def maybeRemap(tag: Int, newEl: T): Option[T]
def unmap(tag: Int): Option[T]
}
private object TagMap {
def apply[T <: Object: ClassManifest](
set: TagSet,
fastSize: Int = 256
): TagMap[T] = new TagMap[T] {
require(fastSize >= 0)
private[this] val fast = new Array[T](fastSize)
private[this] val fallback = new HashMap[Int, T]
private[this] val fastOff = set.range.start
private[this] def inFast(tag: Int): Boolean = tag < fastSize+fastOff
private[this] def getFast(tag: Int): T = fast(tag-fastOff)
private[this] def setFast(tag: Int, el: T) { fast(tag-fastOff) = el }
def map(el: T): Option[Int] = synchronized {
set.acquire() map { tag =>
if (inFast(tag))
setFast(tag, el)
else
fallback.put(tag, el)
tag
}
}
def maybeRemap(tag: Int, newEl: T): Option[T] = synchronized {
if (!contains(tag)) return None
val oldEl = if (inFast(tag)) {
val oldEl = getFast(tag)
setFast(tag, newEl)
oldEl
} else {
val oldEl = fallback.remove(tag)
fallback.put(tag, newEl)
oldEl
}
Some(oldEl)
}
def unmap(tag: Int): Option[T] = synchronized {
val res = if (inFast(tag)) {
val el = getFast(tag)
setFast(tag, null.asInstanceOf[T])
Option(el)
} else
Option(fallback.remove(tag))
set.release(tag)
res
}
private[this] def contains(tag: Int) =
(inFast(tag) && getFast(tag) != null) || fallback.containsKey(tag)
def iterator: Iterator[(Int, T)] = set.iterator flatMap { tag =>
synchronized {
val el = if (inFast(tag)) getFast(tag) else fallback.get(tag)
if (el == null) Iterable.empty
else Iterator.single((tag, el))
}
}
}
}
| firebase/finagle | finagle-mux/src/main/scala/com/twitter/finagle/mux/TagMap.scala | Scala | apache-2.0 | 2,176 |
package com.sksamuel.scapegoat.inspections.akka
import com.sksamuel.scapegoat.{ Inspection, InspectionContext, Inspector }
/** @author Stephen Samuel */
class AkkaSenderClosure extends Inspection {
override def inspector(context: InspectionContext): Inspector = new Inspector(context) {
import context.global._
override def postTyperTraverser = Some apply new context.Traverser {
override def inspect(tree: Tree): Unit = {
tree match {
case Function(List(ValDef(_, _, _, _)), body) =>
case _ => continue(tree)
}
}
}
}
}
| pwwpche/scalac-scapegoat-plugin | src/main/scala/com/sksamuel/scapegoat/inspections/akka/AkkaSenderClosure.scala | Scala | apache-2.0 | 629 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.cassandra
import java.net.InetSocketAddress
import com.datastax.driver.core.{HostDistance, PoolingOptions, Cluster}
import com.datastax.driver.core.policies.{RoundRobinPolicy, DCAwareRoundRobinPolicy, LatencyAwarePolicy, TokenAwarePolicy}
import com.google.common.net.HostAndPort
import com.twitter.app.App
import com.twitter.finagle.stats.{DefaultStatsReceiver, StatsReceiver}
import com.twitter.util.Duration
import com.twitter.zipkin.storage.cassandra._
import com.twitter.zipkin.storage.cassandra.CassandraSpanStoreDefaults._
import org.twitter.zipkin.storage.cassandra.Repository
import org.twitter.zipkin.storage.cassandra.ZipkinRetryPolicy
import scala.collection.JavaConversions
import scala.collection.JavaConverters._
trait CassandraSpanStoreFactory {self: App =>
val ensureSchema = flag[Boolean] ("zipkin.store.cassandra.ensureSchema", false, "ensures schema exists")
val keyspace = flag[String] ("zipkin.store.cassandra.keyspace", KeyspaceName, "name of the keyspace to use")
val cassandraDest = flag[String] ("zipkin.store.cassandra.dest", "localhost:9042", "dest of the cassandra cluster; comma-separated list of host:port pairs")
val cassandraSpanTtl = flag[Duration] ("zipkin.store.cassandra.spanTTL", SpanTtl, "length of time cassandra should store spans")
val cassandraIndexTtl = flag[Duration] ("zipkin.store.cassandra.indexTTL", IndexTtl, "length of time cassandra should store span indexes")
val cassandraMaxTraceCols = flag[Int] ("zipkin.store.cassandra.maxTraceCols", MaxTraceCols, "max number of spans to return from a query")
val cassandraUsername = flag[String] ("zipkin.store.cassandra.username", "cassandra authentication user name")
val cassandraPassword = flag[String] ("zipkin.store.cassandra.password", "cassandra authentication password")
val cassandraLocalDc = flag[String] ("zipkin.store.cassandra.localDc", "name of the datacenter that will be considered \\"local\\" for load balancing")
val cassandraMaxConnections = flag[Int] ("zipkin.store.cassandra.maxConnections", MaxConnections, "max pooled connections per datacenter-local host")
// eagerly makes network connections, so lazy
private[this] lazy val lazyRepository = new Repository(keyspace(), createClusterBuilder().build(), ensureSchema())
def newCassandraStore(stats: StatsReceiver = DefaultStatsReceiver.scope("CassandraSpanStore")) = {
new CassandraSpanStore(stats.scope(keyspace()), cassandraSpanTtl(), cassandraIndexTtl(), cassandraMaxTraceCols()) {
override lazy val repository = lazyRepository
}
}
def newCassandraDependencies(stats: StatsReceiver = DefaultStatsReceiver.scope("CassandraDependencyStore")) = {
new CassandraDependencyStore() {
override lazy val repository = lazyRepository
}
}
def createClusterBuilder(): Cluster.Builder = {
val builder = Cluster.builder()
val contactPoints = parseContactPoints()
val defaultPort = findConnectPort(contactPoints)
builder.addContactPointsWithPorts(contactPoints)
builder.withPort(defaultPort) // This ends up config.protocolOptions.port
if (cassandraUsername.isDefined && cassandraPassword.isDefined)
builder.withCredentials(cassandraUsername(), cassandraPassword())
builder.withRetryPolicy(ZipkinRetryPolicy.INSTANCE)
builder.withLoadBalancingPolicy(new TokenAwarePolicy(new LatencyAwarePolicy.Builder(
if (cassandraLocalDc.isDefined)
DCAwareRoundRobinPolicy.builder().withLocalDc(cassandraLocalDc()).build()
else
new RoundRobinPolicy()
).build()))
builder.withPoolingOptions(new PoolingOptions().setMaxConnectionsPerHost(
HostDistance.LOCAL, cassandraMaxConnections()
))
}
def parseContactPoints() = {
JavaConversions.seqAsJavaList(cassandraDest().split(",")
.map(HostAndPort.fromString)
.map(cp => new java.net.InetSocketAddress(cp.getHostText, cp.getPortOrDefault(9042))))
}
/** Returns the consistent port across all contact points or 9042 */
def findConnectPort(contactPoints: java.util.List[InetSocketAddress]) = {
val ports = contactPoints.asScala.map(_.getPort).toSet
if (ports.size == 1) {
ports.head
} else {
9042
}
}
}
| prat0318/zipkin | zipkin-cassandra/src/main/scala/com/twitter/zipkin/cassandra/CassandraSpanStoreFactory.scala | Scala | apache-2.0 | 4,908 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.fuberlin.wiwiss.silk.learning.reproduction
import de.fuberlin.wiwiss.silk.util.DPair
import de.fuberlin.wiwiss.silk.learning.individual.ComparisonNode
/**
* A crossover operator which combines the thresholds of two comparisons.
*/
case class ThresholdCrossover() extends NodePairCrossoverOperator[ComparisonNode] {
def crossover(nodes: DPair[ComparisonNode]) = {
nodes.source.copy(threshold = (nodes.source.threshold + nodes.target.threshold) / 2)
}
}
| fusepoolP3/p3-silk | silk-learning/src/main/scala/de/fuberlin/wiwiss/silk/learning/reproduction/ThresholdCrossover.scala | Scala | apache-2.0 | 1,042 |
package com.github.mdr.mash.ns.os.pathClass
import com.github.mdr.mash.functions.{ BoundParams, FunctionHelpers, MashMethod, ParameterModel }
import com.github.mdr.mash.ns.core.StringClass
import com.github.mdr.mash.runtime.{ MashString, MashValue }
import org.apache.commons.io.FilenameUtils
object BaseNameMethod extends MashMethod("baseName") {
val params = ParameterModel.Empty
def call(target: MashValue, boundParams: BoundParams): MashString = {
val name = FunctionHelpers.interpretAsPath(target).getFileName.toString
MashString(FilenameUtils.getBaseName(name))
}
override def typeInferenceStrategy = StringClass
override def summaryOpt = Some("Name without extension")
}
| mdr/mash | src/main/scala/com/github/mdr/mash/ns/os/pathClass/BaseNameMethod.scala | Scala | mit | 703 |
/*
* Licensed to Cloudera, Inc. under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Cloudera, Inc. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloudera.hue.livy.repl.sparkr
import java.nio.file.Files
import java.util.concurrent.locks.ReentrantLock
import com.cloudera.hue.livy.repl.process.ProcessInterpreter
import org.apache.commons.codec.binary.Base64
import org.json4s.jackson.JsonMethods._
import org.json4s.jackson.Serialization.write
import org.json4s.{JValue, _}
import scala.annotation.tailrec
import scala.io.Source
private object SparkRInterpreter {
val LIVY_END_MARKER = "----LIVY_END_OF_COMMAND----"
val PRINT_MARKER = f"""print("$LIVY_END_MARKER")"""
val EXPECTED_OUTPUT = f"""[1] "$LIVY_END_MARKER""""
val PLOT_REGEX = (
"(" +
"(?:bagplot)|" +
"(?:barplot)|" +
"(?:boxplot)|" +
"(?:dotchart)|" +
"(?:hist)|" +
"(?:lines)|" +
"(?:pie)|" +
"(?:pie3D)|" +
"(?:plot)|" +
"(?:qqline)|" +
"(?:qqnorm)|" +
"(?:scatterplot)|" +
"(?:scatterplot3d)|" +
"(?:scatterplot\\\\.matrix)|" +
"(?:splom)|" +
"(?:stripchart)|" +
"(?:vioplot)" +
")"
).r.unanchored
}
private class SparkRInterpreter(process: Process)
extends ProcessInterpreter(process)
{
import SparkRInterpreter._
implicit val formats = DefaultFormats
private[this] var executionCount = 0
final override protected def waitUntilReady(): Unit = {
// Set the option to catch and ignore errors instead of halting.
sendExecuteRequest("options(error = dump.frames)")
executionCount = 0
}
override protected def sendExecuteRequest(command: String): Option[JValue] = synchronized {
var code = command
// Create a image file if this command is trying to plot.
val tempFile = PLOT_REGEX.findFirstIn(code).map { case _ =>
val tempFile = Files.createTempFile("", ".png")
val tempFileString = tempFile.toAbsolutePath
code = f"""png("$tempFileString")\\n$code\\ndev.off()"""
tempFile
}
try {
executionCount += 1
var content = Map(
"text/plain" -> (sendRequest(code) + takeErrorLines())
)
// If we rendered anything, pass along the last image.
tempFile.foreach { case file =>
val bytes = Files.readAllBytes(file)
if (bytes.nonEmpty) {
val image = Base64.encodeBase64String(bytes)
content = content + (("image/png", image))
}
}
Some(parse(write(
Map(
"status" -> "ok",
"execution_count" -> (executionCount - 1),
"data" -> content
))))
} catch {
case e: Error =>
Some(parse(write(
Map(
"status" -> "error",
"ename" -> "Error",
"evalue" -> e.output,
"data" -> Map(
"text/plain" -> takeErrorLines()
)
))))
case e: Exited =>
None
} finally {
tempFile.foreach(Files.delete)
}
}
private def sendRequest(code: String): String = {
stdin.println(code)
stdin.flush()
stdin.println(PRINT_MARKER)
stdin.flush()
readTo(EXPECTED_OUTPUT)
}
override protected def sendShutdownRequest() = {
stdin.println("q()")
stdin.flush()
while (stdout.readLine() != null) {}
}
@tailrec
private def readTo(marker: String, output: StringBuilder = StringBuilder.newBuilder): String = {
var char = readChar(output)
// Remove any ANSI color codes which match the pattern "\\u001b\\\\[[0-9;]*[mG]".
// It would be easier to do this with a regex, but unfortunately I don't see an easy way to do
// without copying the StringBuilder into a string for each character.
if (char == '\\u001b') {
if (readChar(output) == '[') {
char = readDigits(output)
if (char == 'm' || char == 'G') {
output.delete(output.lastIndexOf('\\u001b'), output.length)
}
}
}
if (output.endsWith(marker)) {
val result = output.toString()
result.substring(0, result.length - marker.length)
.stripPrefix("\\n")
.stripSuffix("\\n")
} else {
readTo(marker, output)
}
}
private def readChar(output: StringBuilder): Char = {
val byte = stdout.read()
if (byte == -1) {
throw new Exited(output.toString())
} else {
val char = byte.toChar
output.append(char)
char
}
}
@tailrec
private def readDigits(output: StringBuilder): Char = {
val byte = stdout.read()
if (byte == -1) {
throw new Exited(output.toString())
}
val char = byte.toChar
if (('0' to '9').contains(char)) {
output.append(char)
readDigits(output)
} else {
char
}
}
private class Exited(val output: String) extends Exception {}
private class Error(val output: String) extends Exception {}
private[this] val _lock = new ReentrantLock()
private[this] var stderrLines = Seq[String]()
private def takeErrorLines(): String = {
var lines: Seq[String] = null
_lock.lock()
try {
lines = stderrLines
stderrLines = Seq[String]()
} finally {
_lock.unlock()
}
lines.mkString("\\n")
}
private[this] val stderrThread = new Thread("sparkr stderr thread") {
override def run() = {
val lines = Source.fromInputStream(process.getErrorStream).getLines()
for (line <- lines) {
_lock.lock()
try {
stderrLines :+= line
} finally {
_lock.unlock()
}
}
}
}
stderrThread.setDaemon(true)
stderrThread.start()
}
| x303597316/hue | apps/spark/java/livy-repl/src/main/scala/com/cloudera/hue/livy/repl/sparkr/SparkRInterpreter.scala | Scala | apache-2.0 | 6,270 |
import java.io._
import akka.actor.ActorSystem
import akka.event.{Logging, LoggingAdapter}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.stream.{ActorMaterializer, Materializer}
import com.typesafe.config.{Config, ConfigFactory}
import scala.concurrent.ExecutionContextExecutor
import scala.sys.process.Process
import akka.http.scaladsl.model.headers._
import akka.http.scaladsl.server.Directives._
trait types {
type Vector = List[Double]
}
trait Service extends types with db with processCom{
implicit val system: ActorSystem
implicit def executor: ExecutionContextExecutor
implicit val materializer: Materializer
def config: Config
val logger: LoggingAdapter
val THRESHOLD = 0.35
val routes =
respondWithHeader(
RawHeader("Access-Control-Allow-Origin", "*")
) { (path("add") & post) {
uploadedFile("image") {
case (metadata, file) =>
formFields('name.as[String]) { (name) =>
val vector = getTorchVector(file.toString)
addPerson(name, vector)
file.delete()
complete(s"Successfully uploaded a picture for user: $name")
}
}
} ~ (path("verify") & post) {
uploadedFile("image") {
case (metadata, file) =>
val (name, res) = check(file.toString)
file.delete()
if (res > THRESHOLD) {
val conf = (res * 100).round
val c = conf * 0.2 + 80 // we are really proud of our results!
complete(s"Enjoy your ride, $name!")
} else
complete(s"STOP! (could be $name with confidence $res)")
}
} ~ {
pathPrefix("") {
// optionally compresses the response with Gzip or Deflate
// if the client accepts compressed responses
encodeResponse {
// serve up static content from a JAR resource
getFromResourceDirectory("static/build")
}
}
}
}
private def getTorchVector(filePath: String): Vector = {
writeToStream(filePath)
//val luaScriptDir = "/home/markus/techfest/vgg_face_torch"
//val output = Process("th demo.lua " + filePath, new File(luaScriptDir)).!!
//output.split("\\n").toList.dropRight(1).map(_.toDouble)
}
def check(filePath: String): (String, Double) = {
// 1. calculate vector for image
val vector = getTorchVector(filePath: String)
// get all from db
val list = getAll.get
// find closest vector
val res = list.foldLeft("", 0.0) {
case ((name, currentHighestConfidence), (id, name2, vec)) =>
val d = confidence(dist(vector, vec))
if (d > currentHighestConfidence) (name2, d)
else (name, currentHighestConfidence)
}
logger.info(s"detected ${res._1} with distance ${res._2} ")
res
}
def dist(vec1: Vector, vec2: Vector): Double = {
vec1.zip(vec2).foldLeft(0.0) {
case (sum, (x, y)) => sum + (x - y) * (x - y)
}
}
def confidence(x: Double): Double = {
Math.exp(-x)
}
}
object FlixFace extends App with Service with db{
override implicit val system = ActorSystem()
override implicit val executor = system.dispatcher
override implicit val materializer = ActorMaterializer()
override val config = ConfigFactory.load()
override val logger = Logging(system, getClass)
initDB()
initFaceDetection()
/*
addPerson("test", List(2.1,2.2,4.7))
addPerson("markus", List(0.0,0.0,0.2,47.47))
logger.info(getAll.get mkString ",")
*/
logger.info(dist(List(2.1, 2.2, 2.5), List(2.1, 2.2, 2.5)).toString)
Http().bindAndHandle(routes, config.getString("http.interface"), config.getInt("http.port"))
}
| Sukram21/FlixFace | src/main/scala/FlixFace.scala | Scala | mit | 3,736 |
implicit def readerFunctor[A] = new Functor[Reader[A, ?]] {
def fmap[X, B](f: X => B)(h: Reader[A, X]): Reader[A, B] =
f compose h
}
| hmemcpy/milewski-ctfp-pdf | src/content/2.4/code/scala/snippet02.scala | Scala | gpl-3.0 | 139 |
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt
import org.scalacheck._
import Prop.{ Exception => _, _ }
import Gen.{ alphaNumChar, frequency, nonEmptyListOf }
import java.io.File
import sbt.internal.TestLogger
import sbt.io.{ IO, Path }
import OutputStrategy._
import sbt.internal.util.Util._
object ForkTest extends Properties("Fork") {
/**
* Heuristic for limiting the length of the classpath string.
* Longer than this will hit hard limits in the total space
* allowed for process initialization, which includes environment variables, at least on linux.
*/
final val MaximumClasspathLength = 100000
lazy val genOptionName = frequency((9, "-cp".some), (9, "-classpath".some), (1, none))
lazy val pathElement = nonEmptyListOf(alphaNumChar).map(_.mkString)
lazy val path = nonEmptyListOf(pathElement).map(_.mkString(File.separator))
lazy val genRelClasspath = nonEmptyListOf(path)
lazy val requiredEntries =
IO.classLocationPath[scala.Option[_]].toFile ::
IO.classLocationPath[sbt.exit.type].toFile ::
Nil
lazy val mainAndArgs =
"sbt.exit" ::
"0" ::
Nil
property("Arbitrary length classpath successfully passed.") =
forAllNoShrink(genOptionName, genRelClasspath) {
(optionName: Option[String], relCP: List[String]) =>
IO.withTemporaryDirectory { dir =>
TestLogger { log =>
val withScala = requiredEntries ::: relCP.map(rel => new File(dir, rel))
val absClasspath = trimClasspath(Path.makeString(withScala))
val args = optionName.map(_ :: absClasspath :: Nil).toList.flatten ++ mainAndArgs
val config = ForkOptions().withOutputStrategy(LoggedOutput(log))
val exitCode = try Fork.java(config, args)
catch { case e: Exception => e.printStackTrace; 1 }
val expectedCode = if (optionName.isEmpty) 1 else 0
s"temporary directory: ${dir.getAbsolutePath}" |:
s"required classpath: ${requiredEntries.mkString("\n\t", "\n\t", "")}" |:
s"main and args: ${mainAndArgs.mkString(" ")}" |:
s"args length: ${args.mkString(" ").length}" |:
s"exitCode: $exitCode, expected: $expectedCode" |:
(exitCode == expectedCode)
}
}
}
private[this] def trimClasspath(cp: String): String =
if (cp.length > MaximumClasspathLength) {
val lastEntryI = cp.lastIndexOf(File.pathSeparatorChar.toInt, MaximumClasspathLength)
if (lastEntryI > 0)
cp.substring(0, lastEntryI)
else
cp
} else
cp
}
// Object used in the tests
object exit {
def main(args: Array[String]): Unit = {
System.exit(java.lang.Integer.parseInt(args(0)))
}
}
| xuwei-k/xsbt | run/src/test/scala/sbt/ForkTest.scala | Scala | apache-2.0 | 2,849 |
/**
*
* ${FILE_NAME}
* Ledger wallet
*
* Created by Pierre Pollastri on 09/01/15.
*
* The MIT License (MIT)
*
* Copyright (c) 2015 Ledger
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
package co.ledger.wallet.core.utils
import android.app.Activity
import android.content.Context
import android.graphics.drawable.Drawable
import android.app.Fragment
import android.view.View
import scala.reflect.ClassTag
class TR(id: Int, context: AnyRef) {
def as[A](implicit ct: ClassTag[A]): A = {
context match {
case v: View => fromView(v, ct)
case a: Activity => fromActivity(a, ct)
case f: Fragment => fromFragment(f, ct)
case c: Context => fromContext(c, ct)
}
}
def asColor: Int = {
val myContext = context match {
case v: View => v.getContext
case a: Activity => a
case f: Fragment => f.getActivity
case c: Context => c
}
myContext.getResources.getColor(id)
}
private def fromView[A](v: View, classTag: ClassTag[A]): A = {
classTag match {
case view if classOf[View].isAssignableFrom(classTag.runtimeClass) => v.findViewById(id).asInstanceOf[A]
case _ => fromContext(v.getContext, classTag)
}
}
private def fromActivity[A](a: Activity, classTag: ClassTag[A]): A = {
classTag match {
case view if classOf[View].isAssignableFrom(classTag.runtimeClass) => a.findViewById(id).asInstanceOf[A]
case _ => fromContext(a, classTag)
}
}
private def fromFragment[A](f: Fragment, classTag: ClassTag[A]): A = {
classTag match {
case view if classOf[View].isAssignableFrom(classTag.runtimeClass) => f.getView.findViewById(id).asInstanceOf[A]
case _ => fromContext(f.getActivity, classTag)
}
}
private def fromContext[A](c: Context, classTag: ClassTag[A]): A = {
classTag match {
case string if classOf[String] == classTag.runtimeClass => c.getResources.getString(id).asInstanceOf[A]
case float if classOf[Float] == classTag.runtimeClass => c.getResources.getDimension(id).asInstanceOf[A]
case int if classOf[Int] == classTag.runtimeClass => c.getResources.getColor(id).asInstanceOf[A]
case drawable if classOf[Drawable] == classTag.runtimeClass => c.getResources.getDrawable(id).asInstanceOf[A]
}
}
}
object TR {
def apply(id: Int)(implicit context: Context, fragment: Fragment = null, view: View = null): TR = new TR(id, getHighestPriorityArgument(context, fragment, view))
def apply(context: Context, id: Int): TR = new TR(id, context)
def apply(view: View, id: Int): TR = new TR(id, view)
def apply(fragment: Fragment, id: Int): TR = new TR(id, fragment)
def getHighestPriorityArgument(args: AnyRef*): AnyRef = {
var out: AnyRef = null
for (arg <- args) {
if (arg != null)
out = arg
}
out
}
} | LedgerHQ/ledger-wallet-android | app/src/main/scala/co/ledger/wallet/core/utils/TR.scala | Scala | mit | 3,860 |
package dbtarzan.gui.browsingtable
import scalafx.scene.control.SplitPane
import scalafx.scene.layout.BorderPane
import scalafx.Includes._
import scalafx.geometry.Orientation
import dbtarzan.gui.info.Info
/* splitter with the foreign keys view on the top and the info view on the bottom */
class ForeignKeysInfoSplitter(foreignKeys : BorderPane, info: Info) {
private val center = buildCenter()
def control : SplitPane = center
/* builds the split panel */
private def buildCenter() = new SplitPane {
items ++= List(foreignKeys, info.control)
orientation() = Orientation.VERTICAL
maxHeight = Double.MaxValue
maxWidth = Double.MaxValue
dividerPositions = 0.8
SplitPane.setResizableWithParent(info.control, false)
}
} | aferrandi/dbtarzan | src/main/scala/dbtarzan/gui/browsingtable/ForeignKeysInfoSplitter.scala | Scala | apache-2.0 | 762 |
package spatutorial.shared
import boopickle.Default._
sealed trait ReturnResult
case object Success extends ReturnResult
case object Failure extends ReturnResult
object ReturnResult {
implicit val priorityPickler: Pickler[ReturnResult] = generatePickler[ReturnResult]
}
| MikaelMayer/scalajs-spa-tutorial | shared/src/main/scala/spatutorial/shared/ReturnResult.scala | Scala | apache-2.0 | 277 |
package sjc.delta.cats
import scala.language.higherKinds
import sjc.delta.Delta
import sjc.delta.Delta.Aux
import cats.Apply
object syntax {
implicit class DeltaCatsOps[A, B](val value: Aux[A, B]) extends AnyVal {
def liftA[F[_]: Apply]: Aux[F[A], F[B]] = new DeltaApply[F, A, B](value)
}
private case class DeltaApply[F[_]: Apply, A, B](delta: Aux[A, B]) extends Delta[F[A]] {
type Out = F[B]
def apply(leftF: F[A], rightF: F[A]): F[B] = Apply[F].map2(leftF, rightF)(delta.apply)
}
} | stacycurl/delta | cats/src/main/scala/sjc/delta/cats/syntax.scala | Scala | apache-2.0 | 510 |
package example
import example.ScalaJSCode._
import japgolly.scalajs.react._
import org.scalajs.dom
import org.scalajs.dom.html._
import shared.UltraRapidImage
import scala.collection.mutable.ArrayBuffer
import scala.scalajs.js
import scala.util.Random
class MultiChoiceBackend(stateController: BackendScope[_, MultiChoiceState], var clicked: Boolean, var report: scala.Option[Report2]) {
var startTime: Long = 0
var endTime: Long = 0
var debugTime: Long = 0
var currentInterval: Int = 0
val random = new Random()
var currentImageDurationInd = 0
val durations = List(33, 53, 80, 105, 500)
var questionId = ""
def nextCorrectAnswer: Int = new Random().nextInt(3) + 1
def nextImage1(e: ReactEventI): Unit = {
e.preventDefault()
stateController.modState(s => {
if (s.correctAnswer == 1) {
val next = new RestPeriod(random.nextInt(1500) + 500)
clearAndSetInterval(interval, next.getDuration, new ArrayBuffer[Int](), s.res._2.length)
MultiChoiceState((null, s.res._2),
next,
s.questionType, s.numberOfQuestions, nextCorrectAnswer)
} else {
val next = new Cross(500)
if (currentImageDurationInd < 4)
currentImageDurationInd += 1
next.setDuration(durations(currentImageDurationInd))
clearAndSetInterval(interval, next.getDuration, new ArrayBuffer[Int](), s.res._2.length)
MultiChoiceState((s.res._1, s.res._2),
next,
s.questionType, s.numberOfQuestions, nextCorrectAnswer)
}
})
}
def nextImage2(e: ReactEventI): Unit = {
e.preventDefault()
stateController.modState(s => {
if (s.correctAnswer == 2) {
val next = new RestPeriod(random.nextInt(1500) + 500)
clearAndSetInterval(interval, next.getDuration, new ArrayBuffer[Int](), s.res._2.length)
MultiChoiceState((null, s.res._2),
next,
s.questionType, s.numberOfQuestions, nextCorrectAnswer)
} else {
val next = new Cross(500)
if (currentImageDurationInd < 4)
currentImageDurationInd += 1
next.setDuration(durations(currentImageDurationInd))
clearAndSetInterval(interval, next.getDuration, new ArrayBuffer[Int](), s.res._2.length)
MultiChoiceState((s.res._1, s.res._2),
next,
s.questionType, s.numberOfQuestions, nextCorrectAnswer)
}
})
}
def nextImage3(e: ReactEventI): Unit = {
e.preventDefault()
stateController.modState(s => {
if (s.correctAnswer == 3) {
val next = new RestPeriod(random.nextInt(1500) + 500)
clearAndSetInterval(interval, next.getDuration, new ArrayBuffer[Int](), s.res._2.length)
MultiChoiceState((null, s.res._2),
next,
s.questionType, s.numberOfQuestions, nextCorrectAnswer)
} else {
val next = new Cross(500)
if (currentImageDurationInd < 4)
currentImageDurationInd += 1
next.setDuration(durations(currentImageDurationInd))
clearAndSetInterval(interval, next.getDuration, new ArrayBuffer[Int](), s.res._2.length)
MultiChoiceState((s.res._1, s.res._2),
next,
s.questionType, s.numberOfQuestions, nextCorrectAnswer)
}
})
}
val user = getElementById[Heading]("user")
val userID: String = user.getAttribute("data-user-id")
var interval: js.UndefOr[js.timers.SetIntervalHandle] =
js.undefined
def clearAndSetInterval(interval: js.UndefOr[js.timers.SetIntervalHandle], duration: Int,
questionTypes: ArrayBuffer[Int], questionMargin: Int): Unit = {
js.timers.clearInterval(interval.get)
this.interval = js.timers.setInterval(duration)(showPicture(questionTypes, questionMargin))
}
def clearInterval(interval: js.UndefOr[js.timers.SetIntervalHandle]): Unit = {
js.timers.clearInterval(interval.get)
}
def fromBooleanToInt(b: Boolean): Int = if (b) 1 else 0
def extractImageType(image: UltraRapidImage): Int = Integer.parseInt(image.imageType)
def showPicture(questionTypes: ArrayBuffer[Int], questionMargin: Int) =
stateController.modState(s => {
s.whatToShow match {
case r: RestPeriod => {
report.get.addAnswerToReport(questionId, (endTime - startTime).toString, durations(currentImageDurationInd), System.currentTimeMillis() - debugTime)
currentImageDurationInd = 0
val next = r.moveToNext()
clearAndSetInterval(interval, next.getDuration, questionTypes, questionMargin)
debugTime = System.currentTimeMillis()
if (!s.res._2.isEmpty)
MultiChoiceState(GlobalRecognitionTest.getRandomQuestion(s.res._2), next,
s.questionType, s.numberOfQuestions - 1, s.correctAnswer)
else
MultiChoiceState((null, null), next,
s.questionType, -1, s.correctAnswer)
}
case f: Cross => {
debugTime = System.currentTimeMillis()
startTime = System.currentTimeMillis()
questionId = s.res._1.imageType
val nextState = f.moveToNext()
currentInterval = nextState.getDuration
println(currentInterval)
clearAndSetInterval(interval, nextState.getDuration, questionTypes, questionMargin)
MultiChoiceState(s.res, nextState, s.questionType, s.numberOfQuestions, s.correctAnswer)
}
case i: ImageQ => {
endTime = System.currentTimeMillis()
val nextState = i.moveToNext()
clearAndSetInterval(interval, nextState.getDuration, questionTypes, questionMargin)
MultiChoiceState(s.res, nextState, s.questionType, s.numberOfQuestions, s.correctAnswer)
}
case m: Mask => {
val nextState = m.moveToNext()
clearAndSetInterval(interval, nextState.getDuration, questionTypes, questionMargin)
MultiChoiceState(s.res, nextState, s.questionType, s.numberOfQuestions, s.correctAnswer)
}
case n: ChoiceQuestion => {
MultiChoiceState(s.res, ChoiceQuestion(-1), s.questionType, s.numberOfQuestions, s.correctAnswer)
}
case w: WhatToShow2 => {
questionId = s.res._1.imageType
val nextState = w.moveToNext()
clearAndSetInterval(interval, nextState.getDuration, questionTypes, questionMargin)
MultiChoiceState(s.res, nextState, s.questionType, s.numberOfQuestions, s.correctAnswer)
}
}
})
def init(state: MultiChoiceState, questionTypes: ArrayBuffer[Int], questionMargin: Int) = {
dom.document.cookie = ""
report match {
case None => report = Some(new Report2(userID))
case _ =>
}
interval = js.timers.setInterval(state.whatToShow.getDuration)(showPicture(questionTypes, questionMargin))
}
} | MysterionRise/psycho-test-framework | psycho-test-client/src/main/scala/example/MultiChoiceBackend.scala | Scala | mit | 6,801 |
package com.nthportal.euler
package h0.t3
import com.nthportal.euler.maths.{NumericFormat, streams}
import scala.annotation.tailrec
object Problem38 extends ProjectEulerProblem {
private val maxLength = 9
override def apply(): Long = {
streams.naturals()
.takeWhile(_ < 10 ** 5)
.map(concatenatedProduct)
.filter(_.length == maxLength)
.filter(NumericFormat.isPandigital)
.map(_.toLong)
.max
}
private def concatenatedProduct(base: Long): String = remainingConcatenatedProduct(base)
@tailrec
private def remainingConcatenatedProduct(base: Long, n: Int = 1, beginning: String = ""): String = {
val nextPart = (base * n).toString
if (nextPart.length + beginning.length > maxLength) beginning
else remainingConcatenatedProduct(base, n + 1, beginning + nextPart)
}
}
| NthPortal/euler-n-scala | src/main/scala/com/nthportal/euler/h0/t3/Problem38.scala | Scala | mit | 835 |
/**
* ____ __ ____ ____ ____,,___ ____ __ __ ____
* ( _ \\ /__\\ (_ )(_ _)( ___)/ __) ( _ \\( )( )( _ \\ Read
* ) / /(__)\\ / /_ _)(_ )__) \\__ \\ )___/ )(__)( ) _ < README.txt
* (_)\\_)(__)(__)(____)(____)(____)(___/ (__) (______)(____/ LICENSE.txt
*/
package razie.wiki.model
import com.novus.salat._
import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}
import model.CMDWID
import org.bson.types.ObjectId
import razie.base.data.TripleIdx
import razie.db.RazSalatContext._
import razie.diesel.dom.DieselAssets
import razie.hosting.WikiReactors
import razie.tconf.TSpecRef
import razie.wiki.Services
/**
* a wiki id, a pair of cat and name - can reference a wiki entry or a section of an entry
*
* THIS IS how topics are referenced everywhere in code. In DB they are reference with UWID
*
* format is parent/realm.cat:name#section
*
* assumption is that when we link between wikis, we'll have wiki/parent/cat:name#section -
*
* NOTE: equals does not look at parent !!!
*
* @param sourceUrl not persisted/read, just temporary
*/
case class WID(
cat: String,
name: String,
parent: Option[ObjectId] = None,
section: Option[String] = None,
realm: Option[String] = None,
@transient sourceUrl: Option[String] = None) {
override def toString = "[[" + wpath + "]]"
lazy val grated = grater[WID].asDBObject(this)
lazy val findParent = parent flatMap (p => Wikis(getRealm).find(p))
lazy val parentWid = parent flatMap (p => WikiIndex.withIndex(getRealm)
{ index => index.find { case (a, b, c) => c == p }.map(_._2) }) orElse (findParent map (_.wid))
/** clone with new sourceUrl */
def withSourceUrl(s: String) = this.copy(sourceUrl = Some(s))
/** find a parent of the given category */
def parentOf(category: String => Boolean) = {
def f(p: Option[WID]) = if (p.isEmpty) None else p.filter(x => category(x.cat)).orElse(p.flatMap(_.parentWid))
f(parentWid)
}
private val _cachedPage : AtomicReference[Option[Option[WikiEntry]]] = new AtomicReference[Option[Option[WikiEntry]]](None)
def hasCachedPage = _cachedPage.get().nonEmpty
def resetCachedPage(newp:Option[WikiEntry]) = _cachedPage.set(Some(newp))
/** find the page for this, if any - respects the NOCATS */
def page : Option[WikiEntry] = {
if(hasCachedPage) _cachedPage.get().get
else {
val w = if (Services.config.cacheWikis) {
WikiCache.getEntry(this.wpathFullNoSection + ".page").map { x => x
}
} else None
w.orElse {
val ret = findPageNocache
_cachedPage.compareAndSet(None, Some(ret))
// the page may not be processed in the context of a user, so we can't cache it
ret
}
}
}
/** find the page without using caches, if any - respects the NOCATS */
def findPageNocache : Option[WikiEntry] = {
val ret = {
if (cat.isEmpty)
findId flatMap Wikis(getRealm).find // special for NOCATS
else
Wikis(getRealm).find(this)
}
ret
}
def isEmpty = cat == "?" && name == "?" || cat == "-" && name == "-"
/** should this entry be indexed in memory */
def shouldIndex = !(Wikis.PERSISTED contains cat)
/** find the full section, if one is referenced */
def findSection = section.flatMap { s => page.flatMap(_.sections.find(_.name == s)) }
/** get textual content, unprocessed, of this object, if found */
def content = section.map { s =>
// allow that section contains signature
val sar = s.split(":", 2)
page.flatMap(p =>
p.sections.find(t => t.name == s && (sar.size < 2 || t.signature == sar.last)) orElse
p.templateSections.find(t => t.name == s && (sar.size < 2 || t.signature == sar.last))).map(
_.content) getOrElse
s"`[Section $s not found in $toString!]`"
} orElse {
page.map(_.content)
}
/** withRealm - convienience builder. Note that you can't override a category prefix */
def r(r: String) =
if (CAT.unapply(cat).flatMap(_.realm).isDefined || r.length <= 0) this
else this.copy(realm = Some(r))
// def r(r:String) = if(Wikis.DFLT == r || CAT.unapply(cat).flatMap(_.realm).isDefined) this else this.copy(realm
// = Some(r))
/** withRealm - dn't overwrite realm if set already */
def defaultRealmTo(r: String) =
if (realm.isDefined) this
else this.r(r)
/** withRealm - dn't overwrite realm if set already or if you pass in empty or None */
def defaultRealmTo(r: Option[String]) =
if (realm.isDefined || r.isEmpty || r.exists(_.length == 0)) this
else this.r(r.get)
/** if wid has no realm, should get the realm or the default - note taht the CAT prefix rules */
def getRealm = realm orElse CAT.unapply(cat).flatMap(_.realm) getOrElse Wikis.DFLT
/** find the ID for this page, if any - respects the NOCATS */
def findId =
if (ObjectId.isValid(name)) Some(new ObjectId(name))
else findCatId().map(_._2)
/** find the category, if missing */
def findCat = findCatId().map(_._1)
/** find the ID for this page, if any - respects the NOCATS */
def findId (curRealm:String) =
if(ObjectId.isValid(name)) Some(new ObjectId(name))
else findCatId(curRealm).map(_._2)
/** find the category, if missing */
def findCat (curRealm:String) = findCatId(curRealm).map(_._1)
/** find the proper category and ID for this wid (name, or name and cat etc) */
private def findCatId(curRealm:String=""): Option[(String, ObjectId)] = {
def q = {idx: TripleIdx[String, WID, ObjectId] =>
if(! cat.isEmpty)
idx.get2(name, this).map((cat, _))
else {
// get by name and see if cat we found is NOCATS
idx.get1k(name).filter(x=>
WID.NOCATS.contains(x.cat)
).headOption.flatMap(x=>
idx.get2(name, x)).map((cat, _)
)
//todo maybe forget this branch and enhance equals to look at nocats ?
}
}
//todo performance of these is horrendous
//todo i should ignore curRealm if I have my own
// first current realm
if(curRealm.isEmpty) {
realm.orElse(CAT.unapply(cat).flatMap(_.realm)).map{r=>
// was there a
WikiIndex.withIndex(r)(q) orElse Wikis.find(this).map(x=>(x.category, x._id))
} getOrElse {
// try all indexes real quick
var y : Option[(String, ObjectId)] = None
WikiReactors.reactors.map(_._2.wiki.index).find{x=>
x.withIndex(q).map{catid=>
y = Some(catid)
y
}.isDefined
}
y orElse Wikis.find(this).map(x=>(x.category, x._id))
}
}
else
WikiIndex.withIndex(curRealm)(q) orElse WikiIndex.withIndex(getRealm)(q) orElse Wikis.find(this).map(x=>(x.category, x._id))
}
/** some topics don't use cats */
override def equals (other:Any) = other match {
case o: WID =>
this.cat == o.cat && this.name == o.name &&
(this.getRealm == o.getRealm || this.realm.isEmpty || o.realm.isEmpty) &&
(this.section.isEmpty && o.section.isEmpty || this.section == o.section)
case _ => false
}
def uwid = findCatId() map {x=>UWID(x._1, x._2, realm)}
/** cat with realm */
def cats = if(realm.exists(_.length > 0)) (realm.get + "." + cat) else cat
/** format into nice url */
def wpath: String = parentWid.map(_.wpath + "/").getOrElse("") + (
if (cat != null && cat.length > 0 && !WID.NOCATS.contains(cat)) (cats + ":") else "") + name + (section.map("#" + _).getOrElse(""))
/** this one used for simple cats with /w/:realm */
def wpathnocats: String = parentWid.map(_.wpath + "/").getOrElse("") + (
if (cat != null && cat.length > 0 && !WID.NOCATS.contains(cat)) (cat + ":") else "") + name + (section.map("#" + _).getOrElse(""))
/** full categories allways, with realm prefix if not RK */
def wpathFull: String = wpathFullNoSection +
(section.map("#" + _).getOrElse(""))
/** full path but without the section part */
def wpathFullNoSection: String = parentWid.map(_.wpath + "/").getOrElse("") + (
if (cat != null && cat.length > 0 ) (cats + ":") else "") + name
def formatted = this.copy(name=Wikis.formatName(this))
/** even when running in localhost mode, use the remote url */
def urlRemote: String = {
val hasRealm = realm.isDefined && WikiReactors(realm.get).websiteProps.prop("domain").exists(_.length > 0)
"http://" + {
if (hasRealm)
WikiReactors(realm.get).websiteProps.prop("domain").get
else
//todo current realm
Services.config.hostport
} + "/" + {
if (realm.isDefined) DieselAssets.mkLink(this, wpath)
else canonpath
}
}
/** remote url when pointing at a specific server, not the default cloud */
def urlForTarget(target: String): String = {
if ("www.dieselapps.com".equals(target))
urlRemote // reuse this to use proper realm domains
else
"http://" + {
target
} + "/" + {
if (realm.isDefined) DieselAssets.mkLink(this, wpath)
else canonpath
}
}
/** the canonical URL with the proper hostname for reactor */
def urlForEdit: String = intUrl(DieselAssets.mkEditLink)
/** the canonical URL with the proper hostname for reactor */
def url: String = intUrl(DieselAssets.mkLink)
/** find hostport */
def hostport(forceRemote: Boolean = false): String = {
val hasRealm = realm.isDefined && WikiReactors(realm.get).websiteProps.prop("domain").exists(_.length > 0)
(if (hasRealm && (forceRemote || !Services.config.isLocalhost)) {
"http://" + WikiReactors(realm.get).websiteProps.prop("domain").get
} else {
//todo current realm
// todo in localhost, it may run wiht a different IP/port to don't use the hostport, just relative
// Services.config.hostport
""
})
}
/** the canonical URL with the proper hostname for reactor */
private def intUrl(func: (WID, String) => String): String = {
hostport() + "/" + {
if (realm.isDefined) func(this, wpath)
else canonpath
}
}
//todo this is stupid
def urlRelative : String = urlRelative(Wikis.RK)
/** use when coming from a known realm */
def urlRelative (fromRealm:String) : String =
"/" + {
if(realm.exists(_ != fromRealm)) DieselAssets.mkLink(this, wpathFull)
else canonpath
}
/** canonical path - may be different from wpath */
def canonpath =
if(parent.isEmpty && WID.PATHCATS.contains(cat))
s"$cat/$name" + (section.map("#" + _).getOrElse(""))
else
DieselAssets.mkLink(this, wpathnocats)
def ahref: String = "<a href=\\"" + url + "\\">" + toString + "</a>"
def ahrefRelative (fromRealm:String=Wikis.RK): String = "<a href=\\"" + urlRelative(fromRealm) + "\\">" + toString + "</a>"
def ahrefNice (fromRealm:String=Wikis.RK): String = "<a href=\\"" + urlRelative(fromRealm) + "\\">" + getLabel() + "</a>"
/** helper to get a label, if defined or the default provided */
//todo labels should not be in domain but in index...
def label(action: String, alt: String) = WikiReactors(getRealm).wiki.labelFor(this, action).getOrElse(alt)
def label(action: String) = WikiReactors(getRealm).wiki.labelFor(this, action).getOrElse(action)
// this is my label
def getLabel() = WikiReactors(getRealm).wiki.label(this)
def toSpecPath = WID.WidSpecPath(this)
def w: String = urlRelative
def w(shouldCount: Boolean = true): String = urlRelative + (if (!shouldCount) "?count=0" else "")
}
/** wid utils */
object WID {
/** SEO optimization - WIDs in these categories do not require the category in wpath */
final val NOCATS = Array("Blog", "Post")
final val PATHCATS = Array("Club", "Pro", "School", "Talk", "Topic", "Session", "Drill", "Pathway") // timid start to shift to /cat/name
//cat:name#section OR cat:name::section - # doesn't go through path
private val REGEX =
"""([^/:\\]]*[:])?([^#|\\]]+)(#[^|\\]]+)?""".r
/** parse a wid
*
* @param a the list of wids from a path, parent to child */
private def widFromSeg(a: Array[String], curRealm: String = "") = {
val w = a.map { x =>
x match {
case REGEX(c, n, s) => {
val cat =
if (c == null) ""
else c.replaceFirst("[^.]+\\\\.", "").replaceFirst(":", "")
val name =
if (n == null) ""
else if (cat.length <= 0) n.replaceFirst("[^.]+\\\\.", "")
else n
WID(
cat,
name,
None,
Option(s).filter(_.length > 1).map(_.substring(1)),
// if cat has realm, use it
if (c != null && c.contains("."))
Some(c.replaceFirst("\\\\..*", ""))
else if (cat.length <= 0 && n != null && n.contains("."))
// only if cat is not specified
Some(n.replaceFirst("\\\\..*", ""))
else None)
}
case _ => empty
}
}
val res = w.foldLeft[Option[WID]](None)((x, y) => Some(WID(y.cat, y.name, x.flatMap(_.findId(curRealm)), y.section,
//always follow parent's realm if provided
x.flatMap(_.realm).orElse(y.realm))))
res
}
/** parse WID from path */
def fromPath(path: String): Option[WID] = fromPath(path, "")
/** parse WID from path */
def fromPath(path: String, curRealm: String): Option[WID] = {
if (path == null || path.length() == 0)
None
else {
val a = path.split("/")
widFromSeg(a, curRealm)
}
}
/** more weird parse - never returs a None, also use realm as default, if none in wpath */
def fromPathWithRealm(path: String, curRealm: String): Option[WID] = {
val ret = if (path == null || path.length() == 0)
Some(WID("",""))
else {
val a = path.split("/")
widFromSeg(a, curRealm)
}
if(ret.exists(_.realm.isDefined)) ret
else ret.map(_.r(curRealm))
}
/** parse CMDWID in path */
def cmdfromPath(path: String): Option[CMDWID] = {
if (path == null || path.length() == 0)
None
else {
def splitIt(tag: String, path: String) = {
val b = path split tag
val a = b.head split "/"
CMDWID(b.headOption, widFromSeg(a), tag.replaceAllLiterally("/", ""), b.tail.headOption.getOrElse(""))
}
// TODO optimize this copy/paste later
//todo if the name contains the sequence /debug this won't work - should check i.e. /debug$
Array("/xp/", "/xpl/", "/tag/", "/react/").collectFirst {
case tag if path contains tag => splitIt(tag, path)
} orElse
Array("/rss.xml", "/debug", "/edit", "/content", "/included", "/dualView").collectFirst {
case tag if path endsWith tag => splitIt(tag, path)
} orElse
Some(CMDWID(Some(path), widFromSeg(path split "/"), "", ""))
}
}
final val wikip2 = """\\[\\[alias:([^\\]]*)\\]\\]"""
final val wikip2r = wikip2.r
/** is this just an alias?
*
* an alias is a topic that only contains the alias markup: [[alias:xxx]]
*/
def alias(content: String): Option[WID] = {
if (wikip2r.findFirstMatchIn(content).isDefined) {
// if (content.matches(wikip2)) {
val wikip2r(wpath) = content
WID.fromPath(wpath)
} else None
}
/** is this just an alias?
*
* an alias is a topic that only contains the alias markup: [[alias:xxx]]
*
* todo is it faster to check startsWith and then pattern?
*/
def isAlias(content: String): Boolean =
content.startsWith("[[alias:") && wikip2r.findFirstMatchIn(content).isDefined
final val empty = WID("-", "-")
implicit class WidSpecPath(val wid: WID) extends TSpecRef {
def source: String = ""
def wpath: String = wid.wpath
def key: String = wid.name
def realm: String = wid.getRealm
def ver: Option[String] = None
def draft: Option[String] = None
def ahref: Option[String] = Some(wid.ahref)
def category: String = wid.cat
override def toString = wid.toString
}
implicit def fromSpecPath (s:TSpecRef) : WID = fromPath(s.wpath).get
implicit def fromSpecPathList (s:List[TSpecRef]) : List[WID] = s.map(fromSpecPath)
}
/** a unique ID - it is less verbose than the WID - used in data modelling.
*
* also, having a wid means a page exists or existed
*/
case class UWID(cat: String, id:ObjectId, realm:Option[String]=None) {
/** find the name and build a wid - possibly expensive for non-indexed topics */
def findWid = {
WikiIndex.withIndex(getRealm) { idx =>
idx.find((_,_,x)=>x == id).map(_._2)
} orElse Wikis(getRealm).findById(cat, id).map(_.wid)
}
/** force finding or building a surrogate wid */
lazy val wid = findWid orElse Some(WID(cat, id.toString).copy(realm=realm)) // used in too many places to refactor properly
def nameOrId = wid.map(_.name).getOrElse(id.toString)
lazy val grated = grater[UWID].asDBObject(this) //.copy(realm=None)) // todo I erase the realm for backwards compatibility
lazy val page = Wikis(getRealm).find(this)
/** get the realm or the default */
def getRealm = realm.getOrElse(Wikis.DFLT)
/** withRealm - convienience builder */
def r(r:String) = if(Wikis.DFLT == r) this else this.copy(realm=Some(r))
/** some topics don't use cats */
override def equals (other:Any) = other match {
case o: UWID => this.id == o.id // this way you can change cats without much impact
case _ => false
}
}
object UWID {
final val empty = UWID("?", new ObjectId())
}
/** a wrapper for categories, since they can now have a realm */
case class CAT(cat: String, realm:Option[String]) { // don't give realm a None defaut, eh? see object.apply
/** get the realm or the default */
def getRealm = realm.getOrElse(Wikis.DFLT)
}
object CAT {
def unapply (cat:String) : Option[CAT] = Some(apply(cat))
def apply (cat:String) : CAT =
if(cat.contains(".")) {
val cs = cat.split("\\\\.")
CAT(cs(1), Some(cs(0)))
}
else CAT(cat, None)
}
| razie/diesel-rx | diesel/src/main/scala/razie/wiki/model/WID.scala | Scala | apache-2.0 | 17,958 |
package dbis.pig.cep.ops
import dbis.pig.cep.engines._
import scala.reflect.ClassTag
import dbis.pig.cep.ops.SelectionStrategy._
import dbis.pig.cep.nfa.NFAController
import dbis.pig.backends.{SchemaClass => Event}
abstract class EngineConf[T <: Event: ClassTag](nfa: NFAController[T], sstr: SelectionStrategy) {
val collector: MatchCollector[T] = new MatchCollector()
var engine: CEPEngine[T] = sstr match {
case SelectionStrategy.FirstMatch => new FirstMatch(nfa, collector)
case SelectionStrategy.AllMatches => new AnyMatch(nfa, collector)
case SelectionStrategy.NextMatches => new NextMatch(nfa, collector)
case SelectionStrategy.ContiguityMatches => new ContiguityMatch(nfa, collector)
case _ => throw new Exception("The Strategy is not supported")
}
}
/*
trait EngineConfig [T] extends EngineConf[T] {
implicit def event: Event
}*/ | ksattler/piglet | ceplib/src/main/scala/dbis/pig/cep/ops/EngineConf.scala | Scala | apache-2.0 | 932 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.