code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package com.v_standard.scalikejdbc.orm
import com.v_standard.scalikejdbc.orm.adapter.DBAdapter
import com.v_standard.scalikejdbc.orm.condition.{Condition, LogicalExpressionCondition, ValueEqualCondition}
import com.v_standard.scalikejdbc.orm.definition.{AbstractEntityDefinition, EntityBase, TableBase}
import com.v_standard.scalikejdbc.orm.query.Query
import scala.collection.mutable.ListBuffer
import scala.collection.JavaConverters._
/**
* SQL 生成オブジェクト。
*/
object SqlBuilder {
/**
* INSERT 文生成。
*
* @param entityDef テーブル定義
* @param e エンティティー
* @return SQL と値のリストのタプル
*/
def insertStatement[Entity <: TableBase](entityDef: AbstractEntityDefinition[Entity], e: Entity): (String, Seq[Any]) = {
val params = ListBuffer.empty[Any]
val colNames = new StringBuilder
entityDef.columns.foreach { c =>
val value = c.value(e)
if (!c.autoNumber && value != null && !value.isEmpty) {
if (colNames.length > 0) colNames.append(", ")
colNames.append(c.name)
params += convertParameterType(value)
}
}
("INSERT INTO " + entityDef.entityName + "(" + colNames + ") VALUES(" +
Array.fill(params.size)("?").mkString(", ") + ")", params)
}
/**
* UPDATE 文生成。
*
* @param entityDef テーブル定義
* @param e エンティティー
* @param cond クエリー
* @param adapter DB アダプター
* @return SQL と値のリストのタプル
*/
def updateStatement[Entity <: TableBase](entityDef: AbstractEntityDefinition[Entity], e: Entity,
cond: Option[Condition], adapter: DBAdapter): (String, Seq[Any]) = {
val params = ListBuffer.empty[Any]
val colVals = new StringBuilder
val keys = ListBuffer.empty[String]
entityDef.columns.foreach { c =>
val value = c.value(e)
if (!c.primaryKey.isEmpty) keys += c.name
else if (!c.autoNumber && (value == null || !value.isEmpty)) {
if (colVals.length > 0) colVals.append(", ")
colVals.append(c.name).append(" = ?")
params += convertParameterType(value)
}
}
("UPDATE " + entityDef.entityName + " SET " + colVals + createUpdateWhere(entityDef, Option(e), cond, keys,
params, adapter), params)
}
/**
* DELETE 文生成。
*
* @param entityDef テーブル定義
* @param e エンティティー
* @param cond クエリー
* @param adapter DB アダプター
* @return SQL と値のリストのタプル
*/
def deleteStatement[Entity <: TableBase](entityDef: AbstractEntityDefinition[Entity], e: Option[Entity],
cond: Option[Condition], adapter: DBAdapter): (String, Seq[Any]) = {
val params = ListBuffer.empty[Any]
val keys = ListBuffer.empty[String]
entityDef.columns.foreach(c => c.primaryKey.foreach(v => keys += c.name))
("DELETE FROM " + entityDef.entityName + createUpdateWhere(entityDef, e, cond, keys, params, adapter), params)
}
/**
* SELECT 文生成。
*
* @param query クエリー
* @param adapter DB アダプター
* @return SQL と値のリストのタプル
*/
def selectStatement(query: Query, adapter: DBAdapter): (String, Seq[Any]) = {
val sql = new StringBuilder("SELECT ")
val params = ListBuffer.empty[Any]
if (query.distinct) sql.append("DISTINCT ").append(query.columnsSqlWithoutKey.mkString(", "))
else sql.append(query.columnsSql.mkString(", "))
sql.append(" FROM ").append(query.fromSql)
val cond = query.whereCondition.sql(true, adapter)
if (cond._1.length > 0) {
sql.append(" WHERE ").append(cond._1)
if (!cond._2.isEmpty) params ++= cond._2.map(convertParameterType(_))
}
val orderBy = query.orderBySql.mkString(", ")
if (orderBy.length > 0) sql.append(" ORDER BY ").append(orderBy)
sql.append(adapter.pagingClause(query, params))
(sql.toString, params)
}
/**
* SELECT count(*) 文生成。
*
* @param query クエリー
* @param adapter DB アダプター
* @return SQL と値のリストのタプル
*/
def selectCountStatement(query: Query, adapter: DBAdapter): (String, Seq[Any]) = {
val sql = new StringBuilder("SELECT count(*) FROM ")
val params = ListBuffer.empty[Any]
if (query.distinct) sql.append("(SELECT DISTINCT ").append(query.columnsSqlWithoutKey.mkString(", ")).append(" FROM ")
sql.append(query.fromSql)
val cond = query.whereCondition.sql(true, adapter)
if (cond._1.length > 0) {
sql.append(" WHERE ").append(cond._1)
if (!cond._2.isEmpty) params ++= cond._2.map(convertParameterType(_))
}
if (query.distinct) sql.append(") distinct0")
(sql.toString, params)
}
/**
* LIKE 用エスケープ。
*
* @param value 検索値
* @param escapeChar エスケープ文字
* @return エスケープした文字列
*/
def likeEscape(value: String, escapeChar: Char): String = {
require(value != null, "value is null")
val sb = new StringBuilder
value.foreach(ch => ch match {
case '%' => sb.append(escapeChar).append(ch)
case '_' => sb.append(escapeChar).append(ch)
case ch if ch == escapeChar => sb.append(escapeChar).append(ch)
case ch => sb.append(ch)
})
sb.toString
}
/**
* 更新削除用WHERE 生成。<br />
* 条件が指定されない場合、主キーより作成。
*
* @param entityDef テーブル定義
* @param e エンティティー
* @param cond 条件
* @param keys 主キー名一覧
* @param params パラメータ格納先
* @param adapter DB アダプター
* @return WHERE 文字列
*/
private def createUpdateWhere[Entity <: EntityBase](entityDef: AbstractEntityDefinition[Entity], e: Option[Entity],
cond: Option[Condition], keys: Seq[String], params: ListBuffer[Any], adapter: DBAdapter): String = {
val where = cond.map { v =>
val c = v.sql(false, adapter)
if (c == Condition.EMPTY) ""
else {
if (!c._2.isEmpty) params ++= c._2.map(convertParameterType(_))
c._1
}
}.getOrElse {
if (e.isEmpty) throw new IllegalArgumentException("cond and entity is empty.")
val logicalCond = new LogicalExpressionCondition
keys.foreach(cn => logicalCond.add(new ValueEqualCondition(cn, entityDef.columnsMap(cn).value(e.get))))
val newCond = logicalCond.sql(false, adapter)
if (newCond == Condition.EMPTY) throw new IllegalStateException("keys is empty.")
if (!newCond._2.isEmpty) params ++= newCond._2.map(convertParameterType(_))
newCond._1
}
if (where.isEmpty) "" else " WHERE " + where
}
/**
* パラメータ用型変更。
*
* @param value 値
* @return 値
*/
private[orm] def convertParameterType(value: Any): Any = value match {
case None => null
case Some(v) => convertParameterType(v)
case v: Seq[_] => v.asJava
case v: Map[_, _] => v.asJava
case v => v
}
}
| VanishStandard/scalikejdbc-orm | src/main/scala/com/v_standard/scalikejdbc/orm/SqlBuilder.scala | Scala | bsd-3-clause | 6,702 |
/*
* Accio is a platform to launch computer science experiments.
* Copyright (C) 2016-2018 Vincent Primault <v.primault@ucl.ac.uk>
*
* Accio is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Accio is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Accio. If not, see <http://www.gnu.org/licenses/>.
*/
package fr.cnrs.liris.infra.thriftserver
import com.twitter.util.Future
object TrustAuthStrategy extends AuthStrategy {
override def authenticate(credentials: String): Future[Option[UserInfo]] = {
Future(Some(UserInfo.parse(credentials)))
}
}
| privamov/accio | accio/java/fr/cnrs/liris/infra/thriftserver/TrustAuthStrategy.scala | Scala | gpl-3.0 | 1,019 |
package cn.gridx.scala.lang.implicits.conversions
/**
* Created by tao on 11/24/15.
*/
class Prompt(val msg: String) {
}
| TaoXiao/Scala | lang/src/main/scala/cn/gridx/scala/lang/implicits/conversions/Prompt.scala | Scala | apache-2.0 | 125 |
package slick.dbio
import org.reactivestreams.Subscription
import scala.collection.mutable.ArrayBuffer
import scala.language.higherKinds
import scala.collection.generic.{CanBuild, CanBuildFrom}
import scala.collection.mutable
import scala.concurrent.{ExecutionContext, Future}
import slick.SlickException
import slick.basic.BasicBackend
import slick.util.{DumpInfo, Dumpable, ignoreFollowOnError}
import scala.util.{Try, Failure, Success}
import scala.util.control.NonFatal
/** A Database I/O Action that can be executed on a database. The DBIOAction type allows a
* separation of execution logic and resource usage management logic from composition logic.
* DBIOActions can be composed with methods such as `andThen`, `andFinally` and `flatMap`.
* Individual parts of a composite DBIOAction are always executed serially on a single database,
* but possibly in different database sessions, unless the session is pinned either explicitly
* (using `withPinnedSession`) or implicitly (e.g. through a transaction).
*
* The actual implementation base type for all Actions is `DBIOAction`. `StreamingDBIO` and
* `DBIO` are type aliases which discard the effect type (and the streaming result type in the
* latter case) to make DBIOAction types easier to write when these features are not needed. All
* primitive DBIOActions and all DBIOActions produced by the standard combinators in Slick have
* correct Effect types and are streaming (if possible).
*
* @tparam R The result type when executing the DBIOAction and fully materializing the result.
* @tparam S An encoding of the result type for streaming results. If this action is capable of
* streaming, it is `Streaming[T]` for an element type `T`. For non-streaming
* DBIOActions it is `NoStream`.
* @tparam E The DBIOAction's effect type, e.g. `Effect.Read with Effect.Write`. When composing
* actions, the correct combined effect type will be inferred. Effects can be used in
* user code, e.g. to automatically direct all read-only Actions to a slave database
* and write Actions to the master copy.
*/
sealed trait DBIOAction[+R, +S <: NoStream, -E <: Effect] extends Dumpable {
/** Transform the result of a successful execution of this action. If this action fails, the
* resulting action also fails. */
def map[R2](f: R => R2)(implicit executor: ExecutionContext): DBIOAction[R2, NoStream, E] =
flatMap[R2, NoStream, E](r => SuccessAction[R2](f(r)))
/** Use the result produced by the successful execution of this action to compute and then
* run the next action in sequence. The resulting action fails if either this action, the
* computation, or the computed action fails. */
def flatMap[R2, S2 <: NoStream, E2 <: Effect](f: R => DBIOAction[R2, S2, E2])(implicit executor: ExecutionContext): DBIOAction[R2, S2, E with E2] =
FlatMapAction[R2, S2, R, E with E2](this, f, executor)
/** Creates a new DBIOAction with one level of nesting flattened, this method is equivalent
* to `flatMap(identity)`.
*/
def flatten[R2, S2 <: NoStream, E2 <: Effect](implicit ev : R <:< DBIOAction[R2,S2,E2]) = flatMap(ev)(DBIO.sameThreadExecutionContext)
/** Run another action after this action, if it completed successfully, and return the result
* of the second action. If either of the two actions fails, the resulting action also fails. */
def andThen[R2, S2 <: NoStream, E2 <: Effect](a: DBIOAction[R2, S2, E2]): DBIOAction[R2, S2, E with E2] = a match {
case AndThenAction(as2) => AndThenAction[R2, S2, E with E2](this +: as2)
case a => AndThenAction[R2, S2, E with E2](Vector(this, a))
}
/** Run another action after this action, if it completed successfully, and return the result
* of both actions. If either of the two actions fails, the resulting action also fails. */
def zip[R2, E2 <: Effect](a: DBIOAction[R2, NoStream, E2]): DBIOAction[(R, R2), NoStream, E with E2] =
SequenceAction[Any, ArrayBuffer[Any], E with E2](Vector(this, a)).map { r =>
(r(0).asInstanceOf[R], r(1).asInstanceOf[R2])
} (DBIO.sameThreadExecutionContext)
/** Run another action after this action, if it completed successfully, and zip the result
* of both actions with a function `f`, then create a new DBIOAction holding this result,
* If either of the two actions fails, the resulting action also fails. */
def zipWith[R2, E2 <: Effect,R3](a: DBIOAction[R2, NoStream, E2])(f:(R,R2) =>R3)(implicit executor: ExecutionContext): DBIOAction[R3, NoStream, E with E2] =
SequenceAction[Any, ArrayBuffer[Any], E with E2](Vector(this, a)).map { r =>
f(r(0).asInstanceOf[R], r(1).asInstanceOf[R2])
} (executor)
/** Run another action after this action, whether it succeeds or fails, and then return the
* result of the first action. If the first action fails, its failure is propagated, whether
* the second action fails or succeeds. If the first action succeeds, a failure of the second
* action is propagated. */
def andFinally[E2 <: Effect](a: DBIOAction[_, NoStream, E2]): DBIOAction[R, S, E with E2] =
cleanUp[E2](_ => a)(DBIO.sameThreadExecutionContext)
/** Run another action after this action, whether it succeeds or fails, in order to clean up or
* transform an error produced by this action. The clean-up action is computed from the failure
* of this action, wrapped in `Some`, or `None` if this action succeeded.
*
* @param keepFailure If this action returns successfully, the resulting action also returns
* successfully unless the clean-up action fails. If this action fails and
* `keepFailure` is set to `true` (the default), the resulting action fails
* with the same error, no matter whether the clean-up action succeeds or
* fails. If `keepFailure` is set to `false`, an error from the clean-up
* action will override the error from this action. */
def cleanUp[E2 <: Effect](f: Option[Throwable] => DBIOAction[_, NoStream, E2], keepFailure: Boolean = true)(implicit executor: ExecutionContext): DBIOAction[R, S, E with E2] =
CleanUpAction[R, S, E with E2](this, f, keepFailure, executor)
/** A shortcut for `andThen`. */
final def >> [R2, S2 <: NoStream, E2 <: Effect](a: DBIOAction[R2, S2, E2]): DBIOAction[R2, S2, E with E2] =
andThen[R2, S2, E2](a)
/** Filter the result of this action with the given predicate. If the predicate matches, the
* original result is returned, otherwise the resulting action fails with a
* NoSuchElementException. */
final def filter(p: R => Boolean)(implicit executor: ExecutionContext): DBIOAction[R, NoStream, E] =
withFilter(p)
def withFilter(p: R => Boolean)(implicit executor: ExecutionContext): DBIOAction[R, NoStream, E] =
flatMap(v => if(p(v)) SuccessAction(v) else throw new NoSuchElementException("Action.withFilter failed"))
/** Transform the result of a successful execution of this action, if the given partial function is defined at that value,
* otherwise, the result DBIOAction will fail with a `NoSuchElementException`.
*
* If this action fails, the resulting action also fails. */
def collect[R2](pf: PartialFunction[R,R2])(implicit executor: ExecutionContext): DBIOAction[R2, NoStream, E] =
map(r1 => pf.applyOrElse(r1,(r:R) => throw new NoSuchElementException(s"DBIOAction.collect partial function is not defined at: $r")))
/** Return an action which contains the Throwable with which this action failed as its result.
* If this action succeeded, the resulting action fails with a NoSuchElementException. */
def failed: DBIOAction[Throwable, NoStream, E] = FailedAction[E](this)
/** Convert a successful result `v` of this action into a successful result `Success(v)` and a
* failure `t` into a successful result `Failure(t)`. This is the most generic combinator that
* can be used for error recovery. If possible, use [[andFinally]] or [[cleanUp]] instead,
* because those combinators, unlike `asTry`, support streaming. */
def asTry: DBIOAction[Try[R], NoStream, E] = AsTryAction[R, E](this)
/** Use a pinned database session when running this action. If it is composed of multiple
* database actions, they will all use the same session, even when sequenced with non-database
* actions. For non-composite or non-database actions, this has no effect. */
def withPinnedSession: DBIOAction[R, S, E] = DBIO.Pin andThen this andFinally DBIO.Unpin
/** Get a wrapping action which has a name that will be included in log output. */
def named(name: String): DBIOAction[R, S, E] =
NamedAction[R, S, E](this, name)
/** Get the equivalent non-fused action if this action has been fused, otherwise this
* action is returned. */
def nonFusedEquivalentAction: DBIOAction[R, S, E] = this
/** Whether or not this action should be included in log output by default. */
def isLogged: Boolean = false
}
object DBIOAction {
/** Convert a `Future` to a [[DBIOAction]]. */
def from[R](f: Future[R]): DBIOAction[R, NoStream, Effect] = FutureAction[R](f)
/** Lift a constant value to a [[DBIOAction]]. */
def successful[R](v: R): DBIOAction[R, NoStream, Effect] = SuccessAction[R](v)
/** Create a [[DBIOAction]] that always fails. */
def failed(t: Throwable): DBIOAction[Nothing, NoStream, Effect] = FailureAction(t)
private[this] def groupBySynchronicity[R, E <: Effect](in: TraversableOnce[DBIOAction[R, NoStream, E]]): Vector[Vector[DBIOAction[R, NoStream, E]]] = {
var state = 0 // no current = 0, sync = 1, async = 2
var current: mutable.Builder[DBIOAction[R, NoStream, E], Vector[DBIOAction[R, NoStream, E]]] = null
val total = Vector.newBuilder[Vector[DBIOAction[R, NoStream, E]]]
(in: TraversableOnce[Any]).foreach { a =>
val msgState = if(a.isInstanceOf[SynchronousDatabaseAction[_, _, _, _]]) 1 else 2
if(msgState != state) {
if(state != 0) total += current.result()
current = Vector.newBuilder
state = msgState
}
current += a.asInstanceOf[DBIOAction[R, NoStream, E]]
}
if(state != 0) total += current.result()
total.result()
}
/** Transform a `Option[ DBIO[R] ]` into a `DBIO[ Option[R] ]`. */
def sequenceOption[R, E <: Effect](in: Option[DBIOAction[R, NoStream, E]]): DBIOAction[Option[R], NoStream, E] = {
implicit val ec = DBIO.sameThreadExecutionContext
sequence(in.toList).map(_.headOption)
}
/** Transform a `TraversableOnce[ DBIO[R] ]` into a `DBIO[ TraversableOnce[R] ]`. */
def sequence[R, M[+_] <: TraversableOnce[_], E <: Effect](in: M[DBIOAction[R, NoStream, E]])(implicit cbf: CanBuildFrom[M[DBIOAction[R, NoStream, E]], R, M[R]]): DBIOAction[M[R], NoStream, E] = {
implicit val ec = DBIO.sameThreadExecutionContext
def sequenceGroupAsM(g: Vector[DBIOAction[R, NoStream, E]]): DBIOAction[M[R], NoStream, E] = {
if(g.head.isInstanceOf[SynchronousDatabaseAction[_, _, _, _]]) { // fuse synchronous group
new SynchronousDatabaseAction.Fused[M[R], NoStream, BasicBackend, E] {
def run(context: BasicBackend#Context) = {
val b = cbf()
g.foreach(a => b += a.asInstanceOf[SynchronousDatabaseAction[R, NoStream, BasicBackend, E]].run(context))
b.result()
}
override def nonFusedEquivalentAction = SequenceAction[R, M[R], E](g)
}
} else SequenceAction[R, M[R], E](g)
}
def sequenceGroupAsSeq(g: Vector[DBIOAction[R, NoStream, E]]): DBIOAction[Seq[R], NoStream, E] = {
if(g.length == 1) {
if(g.head.isInstanceOf[SynchronousDatabaseAction[_, _, _, _]]) { // fuse synchronous group
new SynchronousDatabaseAction.Fused[Seq[R], NoStream, BasicBackend, E] {
def run(context: BasicBackend#Context) =
g.head.asInstanceOf[SynchronousDatabaseAction[R, NoStream, BasicBackend, E]].run(context) :: Nil
override def nonFusedEquivalentAction = g.head.map(_ :: Nil)
}
} else g.head.map(_ :: Nil)
} else {
if(g.head.isInstanceOf[SynchronousDatabaseAction[_, _, _, _]]) { // fuse synchronous group
new SynchronousDatabaseAction.Fused[Seq[R], NoStream, BasicBackend, E] {
def run(context: BasicBackend#Context) = {
val b = new ArrayBuffer[R](g.length)
g.foreach(a => b += a.asInstanceOf[SynchronousDatabaseAction[R, NoStream, BasicBackend, E]].run(context))
b
}
override def nonFusedEquivalentAction = SequenceAction[R, Seq[R], E](g)
}
} else SequenceAction[R, Seq[R], E](g)
}
}
val grouped = groupBySynchronicity[R, E](in.asInstanceOf[TraversableOnce[DBIOAction[R, NoStream, E]]])
grouped.length match {
case 0 => DBIO.successful(cbf().result())
case 1 => sequenceGroupAsM(grouped.head)
case n =>
grouped.foldLeft(DBIO.successful(cbf(in)): DBIOAction[mutable.Builder[R, M[R]], NoStream, E]) { (ar, g) =>
for (r <- ar; ge <- sequenceGroupAsSeq(g)) yield r ++= ge
} map (_.result)
}
}
/** A simpler version of `sequence` that takes a number of DBIOActions with any return type as
* varargs and returns a DBIOAction that performs the individual actions in sequence, returning
* `()` in the end. */
def seq[E <: Effect](actions: DBIOAction[_, NoStream, E]*): DBIOAction[Unit, NoStream, E] = {
def sequenceGroup(g: Vector[DBIOAction[Any, NoStream, E]], forceUnit: Boolean): DBIOAction[Any, NoStream, E] = {
if(g.length == 1 && !forceUnit) g.head
else if(g.head.isInstanceOf[SynchronousDatabaseAction[_, _, _, _]]) sequenceSync(g)
else if(forceUnit) AndThenAction[Any, NoStream, E](g :+ DBIO.successful(()))
else AndThenAction[Any, NoStream, E](g)
}
def sequenceSync(g: Vector[DBIOAction[Any, NoStream, E]]): DBIOAction[Unit, NoStream, E] = {
new SynchronousDatabaseAction.Fused[Unit, NoStream, BasicBackend, E] {
def run(context: BasicBackend#Context) = {
g.foreach(_.asInstanceOf[SynchronousDatabaseAction[Any, NoStream, BasicBackend, E]].run(context))
}
override def nonFusedEquivalentAction = AndThenAction[Unit, NoStream, E](g)
}
}
if(actions.isEmpty) DBIO.successful(()) else {
val grouped = groupBySynchronicity[Any, E](actions :+ DBIO.successful(()))
grouped.length match {
case 1 => sequenceGroup(grouped.head, true).asInstanceOf[DBIOAction[Unit, NoStream, E]]
case n =>
val last = grouped.length - 1
val as = grouped.iterator.zipWithIndex.map { case (g, i) => sequenceGroup(g, i == last) }.toVector
AndThenAction[Unit, NoStream, E](as)
}
}
}
/** Create a DBIOAction that runs some other actions in sequence and combines their results
* with the given function. */
def fold[T, E <: Effect](actions: Seq[DBIOAction[T, NoStream, E]], zero: T)(f: (T, T) => T)(implicit ec: ExecutionContext): DBIOAction[T, NoStream, E] =
actions.foldLeft[DBIOAction[T, NoStream, E]](DBIO.successful(zero)) { (za, va) => za.flatMap(z => va.map(v => f(z, v))) }
/** A DBIOAction that pins the current session */
private[slick] object Pin extends SynchronousDatabaseAction[Unit, NoStream, BasicBackend, Effect] {
def run(context: BasicBackend#Context): Unit = context.pin
def getDumpInfo = DumpInfo(name = "SynchronousDatabaseAction.Pin")
}
/** A DBIOAction that unpins the current session */
private[slick] object Unpin extends SynchronousDatabaseAction[Unit, NoStream, BasicBackend, Effect] {
def run(context: BasicBackend#Context): Unit = context.unpin
def getDumpInfo = DumpInfo(name = "SynchronousDatabaseAction.Unpin")
}
/** An ExecutionContext used internally for executing plumbing operations during DBIOAction
* composition. */
private[slick] object sameThreadExecutionContext extends ExecutionContext {
private[this] val trampoline = new ThreadLocal[List[Runnable]]
private[this] def runTrampoline(first: Runnable): Unit = {
trampoline.set(Nil)
try {
var err: Throwable = null
var r = first
while(r ne null) {
try r.run() catch { case t: Throwable => err = t }
trampoline.get() match {
case r2 :: rest =>
trampoline.set(rest)
r = r2
case _ => r = null
}
}
if(err ne null) throw err
} finally trampoline.set(null)
}
override def execute(runnable: Runnable): Unit = trampoline.get() match {
case null => runTrampoline(runnable)
case r => trampoline.set(runnable :: r)
}
override def reportFailure(t: Throwable): Unit = throw t
}
}
/** A DBIOAction that represents a database operation. Concrete implementations are backend-specific. */
trait DatabaseAction[+R, +S <: NoStream, -E <: Effect] extends DBIOAction[R, S, E] {
override def isLogged = true
}
/** A DBIOAction that returns a constant value. */
case class SuccessAction[+R](value: R) extends SynchronousDatabaseAction[R, NoStream, BasicBackend, Effect] {
def getDumpInfo = DumpInfo("success", String.valueOf(value))
def run(ctx: BasicBackend#Context): R = value
}
/** A DBIOAction that fails. */
case class FailureAction(t: Throwable) extends SynchronousDatabaseAction[Nothing, NoStream, BasicBackend, Effect] {
def getDumpInfo = DumpInfo("failure", String.valueOf(t))
def run(ctx: BasicBackend#Context): Nothing = throw t
}
/** An asynchronous DBIOAction that returns the result of a Future. */
case class FutureAction[+R](f: Future[R]) extends DBIOAction[R, NoStream, Effect] {
def getDumpInfo = DumpInfo("future", String.valueOf(f))
override def isLogged = true
}
/** A DBIOAction that represents a `flatMap` operation for sequencing in the DBIOAction monad. */
case class FlatMapAction[+R, +S <: NoStream, P, -E <: Effect](base: DBIOAction[P, NoStream, E], f: P => DBIOAction[R, S, E], executor: ExecutionContext) extends DBIOAction[R, S, E] {
def getDumpInfo = DumpInfo("flatMap", String.valueOf(f), children = Vector(("base", base)))
}
/** A DBIOAction that represents a `seq` or `andThen` operation for sequencing in the DBIOAction
* monad. Unlike `SequenceAction` it only keeps the last result. */
case class AndThenAction[R, +S <: NoStream, -E <: Effect](as: IndexedSeq[DBIOAction[Any, NoStream, E]]) extends DBIOAction[R, S, E] {
def getDumpInfo = DumpInfo("andThen", children = as.zipWithIndex.map { case (a, i) => (String.valueOf(i+1), a) })
override def andThen[R2, S2 <: NoStream, E2 <: Effect](a: DBIOAction[R2, S2, E2]): DBIOAction[R2, S2, E with E2] = a match {
case AndThenAction(as2) => AndThenAction[R2, S2, E with E2](as ++ as2)
case a => AndThenAction[R2, S2, E with E2](as :+ a)
}
}
/** A DBIOAction that represents a `sequence` or operation for sequencing in the DBIOAction monad. */
case class SequenceAction[R, +R2, -E <: Effect](as: IndexedSeq[DBIOAction[R, NoStream, E]])(implicit val cbf: CanBuild[R, R2]) extends DBIOAction[R2, NoStream, E] {
def getDumpInfo = DumpInfo("sequence", children = as.zipWithIndex.map { case (a, i) => (String.valueOf(i+1), a) })
}
/** A DBIOAction that represents a `cleanUp` operation for sequencing in the DBIOAction monad. */
case class CleanUpAction[+R, +S <: NoStream, -E <: Effect](base: DBIOAction[R, S, E], f: Option[Throwable] => DBIOAction[_, NoStream, E], keepFailure: Boolean, executor: ExecutionContext) extends DBIOAction[R, S, E] {
def getDumpInfo = DumpInfo("cleanUp", children = Vector(("try", base)))
}
/** A DBIOAction that represents a `failed` operation. */
case class FailedAction[-E <: Effect](a: DBIOAction[_, NoStream, E]) extends DBIOAction[Throwable, NoStream, E] {
def getDumpInfo = DumpInfo("failed", children = Vector(("base", a)))
}
/** A DBIOAction that represents an `asTry` operation. */
case class AsTryAction[+R, -E <: Effect](a: DBIOAction[R, NoStream, E]) extends DBIOAction[Try[R], NoStream, E] {
def getDumpInfo = DumpInfo("asTry")
}
/** A DBIOAction that attaches a name for logging purposes to another action. */
case class NamedAction[+R, +S <: NoStream, -E <: Effect](a: DBIOAction[R, S, E], name: String) extends DBIOAction[R, S, E] {
def getDumpInfo = DumpInfo("named", mainInfo = DumpInfo.highlight(name))
override def isLogged = true
}
/** The base trait for the context object passed to synchronous database actions by the execution
* engine. */
trait ActionContext {
private[this] var stickiness = 0
/** Check if the session is pinned. May only be called from a synchronous action context. */
final def isPinned = stickiness > 0
/** Pin the current session. Multiple calls to `pin` may be nested. The same number of calls
* to `unpin` is required in order to mark the session as not pinned anymore. A pinned
* session will not be released at the end of a primitive database action. Instead, the same
* pinned session is passed to all subsequent actions until it is unpinned. Note that pinning
* does not force an actual database connection to be opened. This still happens on demand.
* May only be called from a synchronous action context. */
final def pin: Unit = stickiness += 1
/** Unpin this session once. May only be called from a synchronous action context. */
final def unpin: Unit = stickiness -= 1
}
/** An ActionContext with extra functionality required for streaming DBIOActions. */
trait StreamingActionContext extends ActionContext {
/** Emit a single result of the stream. Any Exception thrown by this method should be passed on
* to the caller. */
def emit(v: Any): Unit
/** Get the Subscription for this stream. */
def subscription: Subscription
}
/** A synchronous database action provides a function from an `ActionContext` to the result
* type. `BasicBackend.DatabaseDef.run` supports this kind of action out of the box
* through `BasicBackend.DatabaseDef.runSynchronousDatabaseAction` so that `run` does not
* need to be extended if all primitive database actions can be expressed in this way. These
* actions also implement construction-time fusion for the `andFinally`, `andThen`, `asTry`,
* `failed`, `withPinnedSession` and `zip` operations.
*
* The execution engine ensures that an [[ActionContext]] is never used concurrently and that
* all state changes performed by one invocation of a SynchronousDatabaseAction are visible
* to the next invocation of the same or a different SynchronousDatabaseAction. */
trait SynchronousDatabaseAction[+R, +S <: NoStream, -B <: BasicBackend, -E <: Effect] extends DatabaseAction[R, S, E] { self =>
/** The type used by this action for the state of a suspended stream. A call to `emitStream`
* produces such a state which is then fed back into the next call. */
type StreamState >: Null <: AnyRef
/** Run this action synchronously and produce a result, or throw an Exception to indicate a
* failure. */
def run(context: B#Context): R
/** Run this action synchronously and emit results to the context. This methods may throw an
* Exception to indicate a failure.
*
* @param limit The maximum number of results to emit, or Long.MaxValue for no limit.
* @param state The state returned by a previous invocation of this method, or `null` if
* a new stream should be produced.
* @return A stream state if there are potentially more results available, or null if the
* stream is finished. */
def emitStream(context: B#StreamingContext, limit: Long, state: StreamState): StreamState =
throw new SlickException("Internal error: Streaming is not supported by this Action")
/** Dispose of a `StreamState` when a streaming action is cancelled. Whenever `emitStream`
* returns `null` or throws an Exception, it needs to dispose of the state itself. This
* method will not be called in these cases. */
def cancelStream(context: B#StreamingContext, state: StreamState): Unit = ()
/** Whether or not this action supports streaming results. An action with a `Streaming` result
* type must either support streaming directly or have a [[nonFusedEquivalentAction]] which
* supports streaming. This flag is not used if the Action has a `NoStream` result type. */
def supportsStreaming: Boolean = true
override def andThen[R2, S2 <: NoStream, E2 <: Effect](a: DBIOAction[R2, S2, E2]): DBIOAction[R2, S2, E with E2] = a match {
case a: SynchronousDatabaseAction.FusedAndThenAction[_, _, _, _] =>
new SynchronousDatabaseAction.FusedAndThenAction[R2, S2, B, E with E2](
self.asInstanceOf[SynchronousDatabaseAction[Any, S2, B, E with E2]] +:
a.as.asInstanceOf[IndexedSeq[SynchronousDatabaseAction[Any, S2, B, E with E2]]])
case a: SynchronousDatabaseAction[_, _, _, _] =>
new SynchronousDatabaseAction.FusedAndThenAction[R2, S2, B, E with E2](
Vector(self.asInstanceOf[SynchronousDatabaseAction[Any, S2, B, E with E2]],
a.asInstanceOf[SynchronousDatabaseAction[Any, S2, B, E with E2]]))
case a => super.andThen[R2, S2, E2](a)
}
private[this] def superZip[R2, E2 <: Effect](a: DBIOAction[R2, NoStream, E2]) = super.zip[R2, E2](a)
override def zip[R2, E2 <: Effect](a: DBIOAction[R2, NoStream, E2]): DBIOAction[(R, R2), NoStream, E with E2] = a match {
case a: SynchronousDatabaseAction[_, _, _, _] => new SynchronousDatabaseAction.Fused[(R, R2), NoStream, B, E with E2] {
def run(context: B#Context): (R, R2) = {
val r1 = self.run(context)
val r2 = a.asInstanceOf[SynchronousDatabaseAction[R2, NoStream, B, E2]].run(context)
(r1, r2)
}
override def nonFusedEquivalentAction: DBIOAction[(R, R2), NoStream, E with E2] = superZip(a)
}
case a => superZip(a)
}
private[this] def superAndFinally[E2 <: Effect](a: DBIOAction[_, NoStream, E2]) = super.andFinally[E2](a)
override def andFinally[E2 <: Effect](a: DBIOAction[_, NoStream, E2]): DBIOAction[R, S, E with E2] = a match {
case a: SynchronousDatabaseAction[_, _, _, _] => new SynchronousDatabaseAction.Fused[R, S, B, E with E2] {
def run(context: B#Context): R = {
val res = try self.run(context) catch {
case NonFatal(ex) =>
try a.asInstanceOf[SynchronousDatabaseAction[Any, NoStream, B, E2]].run(context) catch ignoreFollowOnError
throw ex
}
a.asInstanceOf[SynchronousDatabaseAction[Any, S, B, E2]].run(context)
res
}
override def nonFusedEquivalentAction: DBIOAction[R, S, E with E2] = superAndFinally(a)
}
case a => superAndFinally(a)
}
private[this] def superWithPinnedSession = super.withPinnedSession
override def withPinnedSession: DBIOAction[R, S, E] = new SynchronousDatabaseAction.Fused[R, S, B, E] {
def run(context: B#Context): R = {
context.pin
val res = try self.run(context) catch {
case NonFatal(ex) =>
context.unpin
throw ex
}
context.unpin
res
}
override def nonFusedEquivalentAction = superWithPinnedSession
}
private[this] def superFailed: DBIOAction[Throwable, NoStream, E] = super.failed
override def failed: DBIOAction[Throwable, NoStream, E] = new SynchronousDatabaseAction.Fused[Throwable, NoStream, B, E] {
def run(context: B#Context): Throwable = {
var ok = false
try {
self.run(context)
ok = true
throw new NoSuchElementException("Action.failed (fused) not completed with a Throwable")
} catch {
case NonFatal(ex) if !ok => ex
}
}
override def nonFusedEquivalentAction = superFailed
}
private[this] def superAsTry: DBIOAction[Try[R], NoStream, E] = super.asTry
override def asTry: DBIOAction[Try[R], NoStream, E] = new SynchronousDatabaseAction.Fused[Try[R], NoStream, B, E] {
def run(context: B#Context): Try[R] = {
try Success(self.run(context)) catch {
case NonFatal(ex) => Failure(ex)
}
}
override def nonFusedEquivalentAction = superAsTry
}
}
object SynchronousDatabaseAction {
/** A fused SynchronousDatabaseAction */
trait Fused[+R, +S <: NoStream, B <: BasicBackend, -E <: Effect] extends SynchronousDatabaseAction[R, S, B, E] {
def getDumpInfo = DumpInfo(name = "SynchronousDatabaseAction.Fused", children = Vector(("non-fused", nonFusedEquivalentAction)))
override def supportsStreaming: Boolean = false
}
class FusedAndThenAction[+R, +S <: NoStream, B <: BasicBackend, -E <: Effect](val as: IndexedSeq[SynchronousDatabaseAction[Any, S, B, E]]) extends Fused[R, S, B, E] {
def run(context: B#Context): R = {
var res: Any = null
as.foreach(a => res = a.run(context))
res.asInstanceOf[R]
}
override def nonFusedEquivalentAction: DBIOAction[R, S, E] = AndThenAction[R, S, E](as)
override def andThen[R2, S2 <: NoStream, E2 <: Effect](a: DBIOAction[R2, S2, E2]): DBIOAction[R2, S2, E with E2] = a match {
case a: SynchronousDatabaseAction.FusedAndThenAction[_, _, _, _] =>
new SynchronousDatabaseAction.FusedAndThenAction[R2, S2, B, E with E2](
as.asInstanceOf[IndexedSeq[SynchronousDatabaseAction[Any, S2, B, E with E2]]] ++
a.as.asInstanceOf[IndexedSeq[SynchronousDatabaseAction[Any, S2, B, E with E2]]])
case a: SynchronousDatabaseAction[_, _, _, _] =>
new SynchronousDatabaseAction.FusedAndThenAction[R2, S2, B, E with E2](
as.asInstanceOf[IndexedSeq[SynchronousDatabaseAction[Any, S2, B, E with E2]]] :+
a.asInstanceOf[SynchronousDatabaseAction[Any, S2, B, E with E2]])
case a => super.andThen(a)
}
}
/** Fuse `flatMap` / `map`, `cleanUp` and `filter` / `withFilter` combinators if they use
* `DBIO.sameThreadExecutionContext` and produce a `SynchronousDatabaseAction` in their
* evaluation function (where applicable). This cannot be verified at fusion time, so a wrongly
* fused action can fail with a `ClassCastException` during evaluation. */
private[slick] def fuseUnsafe[R, S <: NoStream, E <: Effect](a: DBIOAction[R, S, E]): DBIOAction[R, S, E] = {
a match {
case FlatMapAction(base: SynchronousDatabaseAction[_, _, _, _], f, ec) if ec eq DBIO.sameThreadExecutionContext =>
new SynchronousDatabaseAction.Fused[R, S, BasicBackend, E] {
def run(context: BasicBackend#Context): R = {
val b = base.asInstanceOf[SynchronousDatabaseAction[Any, NoStream, BasicBackend, Effect]].run(context)
val a2 = f(b)
a2.asInstanceOf[SynchronousDatabaseAction[R, S, BasicBackend, E]].run(context)
}
override def nonFusedEquivalentAction = a
}
case CleanUpAction(base: SynchronousDatabaseAction[_, _, _, _], f, keepFailure, ec) if ec eq DBIO.sameThreadExecutionContext =>
new SynchronousDatabaseAction.Fused[R, S, BasicBackend, E] {
def run(context: BasicBackend#Context): R = {
val res = try {
base.asInstanceOf[SynchronousDatabaseAction[R, S, BasicBackend, Effect]].run(context)
} catch { case NonFatal(ex) =>
try {
val a2 = f(Some(ex))
a2.asInstanceOf[SynchronousDatabaseAction[Any, NoStream, BasicBackend, Effect]].run(context)
} catch { case NonFatal(_) if keepFailure => () }
throw ex
}
val a2 = f(None)
a2.asInstanceOf[SynchronousDatabaseAction[Any, NoStream, BasicBackend, Effect]].run(context)
res
}
override def nonFusedEquivalentAction = a
}
case a => a
}
}
}
| nafg/slick | slick/src/main/scala/slick/dbio/DBIOAction.scala | Scala | bsd-2-clause | 31,806 |
package model
import skinny.orm._, feature._
import scalikejdbc._
import org.joda.time._
case class LoginUserPassword(
loginUserInfoId: Long,
password: String
)
object LoginUserPassword extends SkinnyCRUDMapper[LoginUserPassword] {
override lazy val tableName = "login_user_password"
override lazy val defaultAlias = createAlias("lup")
override lazy val primaryKeyFieldName = "loginUserInfoId"
/*
* If you're familiar with ScalikeJDBC/Skinny ORM, using #autoConstruct makes your mapper simpler.
* (e.g.)
* override def extract(rs: WrappedResultSet, rn: ResultName[LoginUserPassword]) = autoConstruct(rs, rn)
*
* Be aware of excluding associations like this:
* (e.g.)
* case class Member(id: Long, companyId: Long, company: Option[Company] = None)
* object Member extends SkinnyCRUDMapper[Member] {
* override def extract(rs: WrappedResultSet, rn: ResultName[Member]) =
* autoConstruct(rs, rn, "company") // "company" will be skipped
* }
*/
override def extract(rs: WrappedResultSet, rn: ResultName[LoginUserPassword]): LoginUserPassword = new LoginUserPassword(
loginUserInfoId = rs.get(rn.loginUserInfoId),
password = rs.get(rn.password)
)
/**
* 登録.
* @param entity 対象Entity
* @param session Session
*/
def create(entity: LoginUserPassword)(implicit session: DBSession): Unit = {
LoginUserPassword.createWithAttributes(
'loginUserInfoId -> entity.loginUserInfoId,
'password -> entity.password
)
}
/**
* 更新.
* @param loginUserInfoId ログインユーザ情報ID
* @param password 暗号化パスワード
* @param session Session
*/
def update(loginUserInfoId: Long, password: String)(implicit session: DBSession): Unit = {
LoginUserPassword.updateById(loginUserInfoId).withAttributes(
'password -> password
)
}
}
| nemuzuka/vss-kanban | src/main/scala/model/LoginUserPassword.scala | Scala | mit | 1,872 |
package de.zalando.model
import de.zalando.apifirst.Application._
import de.zalando.apifirst.Domain._
import de.zalando.apifirst.ParameterPlace
import de.zalando.apifirst.naming._
import de.zalando.apifirst.Hypermedia._
import de.zalando.apifirst.Http._
import de.zalando.apifirst.Security
import java.net.URL
import Security._
//noinspection ScalaStyle
object nakadi_yaml extends WithModel {
def types = Map[Reference, Type](
Reference("⌿definitions⌿EventMetaData") →
TypeDef(Reference("⌿definitions⌿EventMetaData"),
Seq(
Field(Reference("⌿definitions⌿EventMetaData⌿root_id"), TypeRef(Reference("⌿definitions⌿EventMetaData⌿parent_id"))),
Field(Reference("⌿definitions⌿EventMetaData⌿parent_id"), TypeRef(Reference("⌿definitions⌿EventMetaData⌿parent_id"))),
Field(Reference("⌿definitions⌿EventMetaData⌿scopes"), TypeRef(Reference("⌿definitions⌿EventMetaData⌿scopes"))),
Field(Reference("⌿definitions⌿EventMetaData⌿id"), TypeRef(Reference("⌿definitions⌿EventMetaData⌿parent_id"))),
Field(Reference("⌿definitions⌿EventMetaData⌿created"), TypeRef(Reference("⌿definitions⌿Event⌿event_type")))
), TypeMeta(Some("Named types: 5"), List())),
Reference("⌿definitions⌿Topic") →
TypeDef(Reference("⌿definitions⌿Topic"),
Seq(
Field(Reference("⌿definitions⌿Topic⌿name"), Str(None, TypeMeta(Some("Topic name"), List())))
), TypeMeta(Some("Named types: 1"), List())),
Reference("⌿definitions⌿Metrics") →
TypeDef(Reference("⌿definitions⌿Metrics"),
Seq(
Field(Reference("⌿definitions⌿Metrics⌿name"), TypeRef(Reference("⌿definitions⌿Event⌿event_type")))
), TypeMeta(Some("Named types: 1"), List())),
Reference("⌿definitions⌿Event") →
TypeDef(Reference("⌿definitions⌿Event"),
Seq(
Field(Reference("⌿definitions⌿Event⌿event_type"), TypeRef(Reference("⌿definitions⌿Event⌿event_type"))),
Field(Reference("⌿definitions⌿Event⌿partitioning_key"), TypeRef(Reference("⌿definitions⌿Event⌿event_type"))),
Field(Reference("⌿definitions⌿Event⌿metadata"), TypeRef(Reference("⌿definitions⌿Event⌿metadata")))
), TypeMeta(Some("Named types: 3"), List())),
Reference("⌿definitions⌿Cursor") →
TypeDef(Reference("⌿definitions⌿Cursor"),
Seq(
Field(Reference("⌿definitions⌿Cursor⌿partition"), Str(None, TypeMeta(None, List()))),
Field(Reference("⌿definitions⌿Cursor⌿offset"), Str(None, TypeMeta(None, List())))
), TypeMeta(Some("Named types: 2"), List())),
Reference("⌿definitions⌿Problem") →
TypeDef(Reference("⌿definitions⌿Problem"),
Seq(
Field(Reference("⌿definitions⌿Problem⌿detail"), Str(None, TypeMeta(Some("Problem description"), List())))
), TypeMeta(Some("Named types: 1"), List())),
Reference("⌿definitions⌿TopicPartition") →
TypeDef(Reference("⌿definitions⌿TopicPartition"),
Seq(
Field(Reference("⌿definitions⌿TopicPartition⌿partition"), Str(None, TypeMeta(None, List()))),
Field(Reference("⌿definitions⌿TopicPartition⌿oldest_available_offset"), Str(None, TypeMeta(None, List()))),
Field(Reference("⌿definitions⌿TopicPartition⌿newest_available_offset"), Str(None, TypeMeta(None, List())))
), TypeMeta(Some("Named types: 3"), List())),
Reference("⌿definitions⌿SimpleStreamEvent") →
TypeDef(Reference("⌿definitions⌿SimpleStreamEvent"),
Seq(
Field(Reference("⌿definitions⌿SimpleStreamEvent⌿cursor"), TypeRef(Reference("⌿definitions⌿Cursor"))),
Field(Reference("⌿definitions⌿SimpleStreamEvent⌿events"), TypeRef(Reference("⌿definitions⌿SimpleStreamEvent⌿events")))
), TypeMeta(Some("Named types: 2"), List())),
Reference("⌿definitions⌿Event⌿event_type") →
Opt(Str(None, TypeMeta(None, List())), TypeMeta(None, List())),
Reference("⌿definitions⌿EventMetaData⌿parent_id") →
Opt(UUID(TypeMeta(Some("uuid"), List())), TypeMeta(None, List())),
Reference("⌿definitions⌿Event⌿metadata") →
Opt(TypeRef(Reference("⌿definitions⌿EventMetaData")), TypeMeta(None, List())),
Reference("⌿definitions⌿SimpleStreamEvent⌿events") →
Opt(TypeRef(Reference("⌿definitions⌿SimpleStreamEvent⌿events⌿Opt")), TypeMeta(None, List())),
Reference("⌿definitions⌿EventMetaData⌿scopes") →
Opt(TypeRef(Reference("⌿definitions⌿EventMetaData⌿scopes⌿Opt")), TypeMeta(None, List())),
Reference("⌿paths⌿/topics/{topic}/events/batch⌿post⌿topic") →
Str(None, TypeMeta(None, List())),
Reference("⌿paths⌿/topics/{topic}/events⌿get⌿stream_timeout") →
Opt(Intgr(TypeMeta(Some("int32"), List())), TypeMeta(None, List())),
Reference("⌿paths⌿/topics/{topic}/events⌿get⌿batch_limit") →
Intgr(TypeMeta(Some("int32"), List())),
Reference("⌿definitions⌿SimpleStreamEvent⌿events⌿Opt") →
Arr(TypeRef(Reference("⌿definitions⌿Event")), TypeMeta(None, List()), "csv"),
Reference("⌿definitions⌿EventMetaData⌿scopes⌿Opt") →
Arr(Str(None, TypeMeta(None, List())), TypeMeta(None, List()), "csv"),
Reference("⌿paths⌿/topics/{topic}/events/batch⌿post⌿event") →
Opt(TypeRef(Reference("⌿definitions⌿Event")), TypeMeta(None, List())),
Reference("⌿paths⌿/topics/{topic}/events⌿post⌿responses⌿201") →
Null(TypeMeta(None, List())),
Reference("⌿paths⌿/topics/{topic}/partitions⌿get⌿responses⌿200") →
ArrResult(TypeRef(Reference("⌿definitions⌿TopicPartition")), TypeMeta(None, List())),
Reference("⌿paths⌿/topics⌿get⌿responses⌿200") →
ArrResult(TypeRef(Reference("⌿definitions⌿Topic")), TypeMeta(None, List()))
)
def parameters = Map[ParameterRef, Parameter](
ParameterRef( Reference("⌿paths⌿/topics/{topic}/partitions/{partition}/events⌿get⌿start_from")) → Parameter("start_from", Str(None, TypeMeta(None, List())), None, None, ".+", encode = true, ParameterPlace.withName("query")),
ParameterRef( Reference("⌿paths⌿/topics/{topic}/partitions⌿get⌿topic")) → Parameter("topic", Str(None, TypeMeta(None, List())), None, None, "[^/]+", encode = true, ParameterPlace.withName("path")),
ParameterRef( Reference("⌿paths⌿/topics/{topic}/events⌿get⌿stream_timeout")) → Parameter("stream_timeout", TypeRef(Reference("⌿paths⌿/topics/{topic}/events⌿get⌿stream_timeout")), None, None, ".+", encode = true, ParameterPlace.withName("query")),
ParameterRef( Reference("⌿paths⌿/topics/{topic}/events⌿get⌿stream_limit")) → Parameter("stream_limit", TypeRef(Reference("⌿paths⌿/topics/{topic}/events⌿get⌿stream_timeout")), None, None, ".+", encode = true, ParameterPlace.withName("query")),
ParameterRef( Reference("⌿paths⌿/topics/{topic}/events⌿get⌿batch_flush_timeout")) → Parameter("batch_flush_timeout", TypeRef(Reference("⌿paths⌿/topics/{topic}/events⌿get⌿stream_timeout")), None, None, ".+", encode = true, ParameterPlace.withName("query")),
ParameterRef( Reference("⌿paths⌿/topics/{topic}/partitions/{partition}⌿get⌿topic")) → Parameter("topic", Str(None, TypeMeta(None, List())), None, None, "[^/]+", encode = true, ParameterPlace.withName("path")),
ParameterRef( Reference("⌿paths⌿/topics/{topic}/events⌿get⌿x_nakadi_cursors")) → Parameter("x_nakadi_cursors", Str(None, TypeMeta(None, List())), None, None, ".+", encode = false, ParameterPlace.withName("header")),
ParameterRef( Reference("⌿paths⌿/topics/{topic}/events⌿get⌿batch_limit")) → Parameter("batch_limit", Intgr(TypeMeta(Some("int32"), List())), None, Some("1"), ".+", encode = true, ParameterPlace.withName("query")),
ParameterRef( Reference("⌿paths⌿/topics/{topic}/partitions/{partition}/events⌿get⌿partition")) → Parameter("partition", Str(None, TypeMeta(None, List())), None, None, "[^/]+", encode = true, ParameterPlace.withName("path")),
ParameterRef( Reference("⌿paths⌿/topics/{topic}/partitions/{partition}/events⌿get⌿stream_limit")) → Parameter("stream_limit", TypeRef(Reference("⌿paths⌿/topics/{topic}/events⌿get⌿stream_timeout")), None, None, ".+", encode = true, ParameterPlace.withName("query")),
ParameterRef( Reference("⌿paths⌿/topics/{topic}/partitions/{partition}⌿get⌿partition")) → Parameter("partition", Str(None, TypeMeta(None, List())), None, None, "[^/]+", encode = true, ParameterPlace.withName("path")),
ParameterRef( Reference("⌿paths⌿/topics/{topic}/events⌿get⌿batch_keep_alive_limit")) → Parameter("batch_keep_alive_limit", TypeRef(Reference("⌿paths⌿/topics/{topic}/events⌿get⌿stream_timeout")), None, None, ".+", encode = true, ParameterPlace.withName("query")),
ParameterRef( Reference("⌿paths⌿/topics/{topic}/events⌿post⌿event")) → Parameter("event", TypeRef(Reference("⌿paths⌿/topics/{topic}/events/batch⌿post⌿event")), None, None, ".+", encode = false, ParameterPlace.withName("body")),
ParameterRef( Reference("⌿paths⌿/topics/{topic}/events/batch⌿post⌿topic")) → Parameter("topic", Str(None, TypeMeta(None, List())), None, None, "[^/]+", encode = true, ParameterPlace.withName("path")),
ParameterRef( Reference("⌿paths⌿/topics/{topic}/events/batch⌿post⌿event")) → Parameter("event", TypeRef(Reference("⌿paths⌿/topics/{topic}/events/batch⌿post⌿event")), None, None, ".+", encode = false, ParameterPlace.withName("body")),
ParameterRef( Reference("⌿paths⌿/topics/{topic}/partitions/{partition}/events⌿get⌿topic")) → Parameter("topic", Str(None, TypeMeta(None, List())), None, None, "[^/]+", encode = true, ParameterPlace.withName("path")),
ParameterRef( Reference("⌿paths⌿/topics/{topic}/partitions/{partition}/events⌿get⌿batch_limit")) → Parameter("batch_limit", Intgr(TypeMeta(Some("int32"), List())), None, Some("1"), ".+", encode = true, ParameterPlace.withName("query")),
ParameterRef( Reference("⌿paths⌿/topics/{topic}/events⌿post⌿topic")) → Parameter("topic", Str(None, TypeMeta(None, List())), None, None, "[^/]+", encode = true, ParameterPlace.withName("path")),
ParameterRef( Reference("⌿paths⌿/topics/{topic}/partitions/{partition}/events⌿get⌿batch_flush_timeout")) → Parameter("batch_flush_timeout", TypeRef(Reference("⌿paths⌿/topics/{topic}/events⌿get⌿stream_timeout")), None, None, ".+", encode = true, ParameterPlace.withName("query")),
ParameterRef( Reference("⌿paths⌿/topics/{topic}/events⌿get⌿topic")) → Parameter("topic", Str(None, TypeMeta(None, List())), None, None, "[^/]+", encode = true, ParameterPlace.withName("path")),
ParameterRef( Reference("⌿paths⌿/topics/{topic}/partitions/{partition}/events⌿get⌿stream_timeout")) → Parameter("stream_timeout", TypeRef(Reference("⌿paths⌿/topics/{topic}/events⌿get⌿stream_timeout")), None, None, ".+", encode = true, ParameterPlace.withName("query")),
ParameterRef( Reference("⌿paths⌿/topics/{topic}/partitions/{partition}/events⌿get⌿batch_keep_alive_limit")) → Parameter("batch_keep_alive_limit", TypeRef(Reference("⌿paths⌿/topics/{topic}/events⌿get⌿stream_timeout")), None, None, ".+", encode = true, ParameterPlace.withName("query"))
)
def basePath: String =null
def discriminators: DiscriminatorLookupTable = Map[Reference, Reference](
)
def securityDefinitions: SecurityDefinitionsTable = Map[String, Security.Definition](
)
def stateTransitions: StateTransitionsTable = Map[State, Map[State, TransitionProperties]]()
def calls: Seq[ApiCall] = Seq(
ApiCall(GET, Path(Reference("⌿metrics")),
HandlerCall(
"nakadi.yaml",
"NakadiYaml",
instantiate = false,
"nakadiHackGet_metrics",parameters =
Seq(
)
),
Set(MimeType("application/json")),
Set(MimeType("application/json")),
Map.empty[String, Seq[Class[Exception]]],
TypesResponseInfo(
Map[Int, ParameterRef](
401 -> ParameterRef(Reference("⌿definitions⌿Problem")),
503 -> ParameterRef(Reference("⌿definitions⌿Problem")),
200 -> ParameterRef(Reference("⌿definitions⌿Metrics"))
), None),
StateResponseInfo(
Map[Int, State](
200 -> Self,
401 -> Self,
503 -> Self
), None),
Set.empty[Security.Constraint]),
ApiCall(GET, Path(Reference("⌿topics⌿{topic}⌿partitions⌿{partition}⌿events")),
HandlerCall(
"nakadi.yaml",
"NakadiYaml",
instantiate = false,
"nakadiHackGet_events_from_single_partition",parameters =
Seq(
ParameterRef(Reference("⌿paths⌿/topics/{topic}/partitions/{partition}/events⌿get⌿start_from")),
ParameterRef(Reference("⌿paths⌿/topics/{topic}/partitions/{partition}/events⌿get⌿partition")),
ParameterRef(Reference("⌿paths⌿/topics/{topic}/partitions/{partition}/events⌿get⌿stream_limit")),
ParameterRef(Reference("⌿paths⌿/topics/{topic}/partitions/{partition}/events⌿get⌿topic")),
ParameterRef(Reference("⌿paths⌿/topics/{topic}/partitions/{partition}/events⌿get⌿batch_limit")),
ParameterRef(Reference("⌿paths⌿/topics/{topic}/partitions/{partition}/events⌿get⌿batch_flush_timeout")),
ParameterRef(Reference("⌿paths⌿/topics/{topic}/partitions/{partition}/events⌿get⌿stream_timeout")),
ParameterRef(Reference("⌿paths⌿/topics/{topic}/partitions/{partition}/events⌿get⌿batch_keep_alive_limit"))
)
),
Set(MimeType("application/json")),
Set(MimeType("application/json")),
Map.empty[String, Seq[Class[Exception]]],
TypesResponseInfo(
Map[Int, ParameterRef](
500 -> ParameterRef(Reference("⌿definitions⌿Problem")),
404 -> ParameterRef(Reference("⌿definitions⌿Problem")),
401 -> ParameterRef(Reference("⌿definitions⌿Problem")),
400 -> ParameterRef(Reference("⌿definitions⌿Problem")),
200 -> ParameterRef(Reference("⌿definitions⌿SimpleStreamEvent"))
), None),
StateResponseInfo(
Map[Int, State](
500 -> Self,
404 -> Self,
401 -> Self,
400 -> Self,
200 -> Self
), None),
Set.empty[Security.Constraint]),
ApiCall(GET, Path(Reference("⌿topics⌿{topic}⌿partitions⌿{partition}")),
HandlerCall(
"nakadi.yaml",
"NakadiYaml",
instantiate = false,
"nakadiHackGet_partition",parameters =
Seq(
ParameterRef(Reference("⌿paths⌿/topics/{topic}/partitions/{partition}⌿get⌿topic")),
ParameterRef(Reference("⌿paths⌿/topics/{topic}/partitions/{partition}⌿get⌿partition"))
)
),
Set(MimeType("application/json")),
Set(MimeType("application/json")),
Map.empty[String, Seq[Class[Exception]]],
TypesResponseInfo(
Map[Int, ParameterRef](
200 -> ParameterRef(Reference("⌿definitions⌿TopicPartition"))
), None),
StateResponseInfo(
Map[Int, State](
200 -> Self
), None),
Set.empty[Security.Constraint]),
ApiCall(GET, Path(Reference("⌿topics")),
HandlerCall(
"nakadi.yaml",
"NakadiYaml",
instantiate = false,
"nakadiHackGet_topics",parameters =
Seq(
)
),
Set(MimeType("application/json")),
Set(MimeType("application/json")),
Map.empty[String, Seq[Class[Exception]]],
TypesResponseInfo(
Map[Int, ParameterRef](
200 -> ParameterRef(Reference("⌿paths⌿/topics⌿get⌿responses⌿200")),
401 -> ParameterRef(Reference("⌿definitions⌿Problem")),
503 -> ParameterRef(Reference("⌿definitions⌿Problem"))
), None),
StateResponseInfo(
Map[Int, State](
200 -> Self,
401 -> Self,
503 -> Self
), None),
Set.empty[Security.Constraint]),
ApiCall(GET, Path(Reference("⌿topics⌿{topic}⌿events")),
HandlerCall(
"nakadi.yaml",
"NakadiYaml",
instantiate = false,
"nakadiHackGet_events_from_multiple_partitions",parameters =
Seq(
ParameterRef(Reference("⌿paths⌿/topics/{topic}/events⌿get⌿stream_timeout")),
ParameterRef(Reference("⌿paths⌿/topics/{topic}/events⌿get⌿stream_limit")),
ParameterRef(Reference("⌿paths⌿/topics/{topic}/events⌿get⌿batch_flush_timeout")),
ParameterRef(Reference("⌿paths⌿/topics/{topic}/events⌿get⌿x_nakadi_cursors")),
ParameterRef(Reference("⌿paths⌿/topics/{topic}/events⌿get⌿batch_limit")),
ParameterRef(Reference("⌿paths⌿/topics/{topic}/events⌿get⌿batch_keep_alive_limit")),
ParameterRef(Reference("⌿paths⌿/topics/{topic}/events⌿get⌿topic"))
)
),
Set(MimeType("application/json")),
Set(MimeType("application/json")),
Map.empty[String, Seq[Class[Exception]]],
TypesResponseInfo(
Map[Int, ParameterRef](
500 -> ParameterRef(Reference("⌿definitions⌿Problem")),
404 -> ParameterRef(Reference("⌿definitions⌿Problem")),
401 -> ParameterRef(Reference("⌿definitions⌿Problem")),
400 -> ParameterRef(Reference("⌿definitions⌿Problem")),
200 -> ParameterRef(Reference("⌿definitions⌿SimpleStreamEvent"))
), None),
StateResponseInfo(
Map[Int, State](
500 -> Self,
404 -> Self,
401 -> Self,
400 -> Self,
200 -> Self
), None),
Set.empty[Security.Constraint]),
ApiCall(POST, Path(Reference("⌿topics⌿{topic}⌿events")),
HandlerCall(
"nakadi.yaml",
"NakadiYaml",
instantiate = false,
"nakadiHackPost_event",parameters =
Seq(
ParameterRef(Reference("⌿paths⌿/topics/{topic}/events⌿post⌿topic")),
ParameterRef(Reference("⌿paths⌿/topics/{topic}/events⌿post⌿event"))
)
),
Set(MimeType("application/json")),
Set(MimeType("application/json")),
Map.empty[String, Seq[Class[Exception]]],
TypesResponseInfo(
Map[Int, ParameterRef](
201 -> ParameterRef(Reference("⌿paths⌿/topics/{topic}/events⌿post⌿responses⌿201")),
403 -> ParameterRef(Reference("⌿definitions⌿Problem")),
503 -> ParameterRef(Reference("⌿definitions⌿Problem")),
401 -> ParameterRef(Reference("⌿definitions⌿Problem")),
422 -> ParameterRef(Reference("⌿definitions⌿Problem"))
), None),
StateResponseInfo(
Map[Int, State](
201 -> Self,
403 -> Self,
503 -> Self,
401 -> Self,
422 -> Self
), None),
Set.empty[Security.Constraint]),
ApiCall(GET, Path(Reference("⌿topics⌿{topic}⌿partitions")),
HandlerCall(
"nakadi.yaml",
"NakadiYaml",
instantiate = false,
"nakadiHackGet_partitions",parameters =
Seq(
ParameterRef(Reference("⌿paths⌿/topics/{topic}/partitions⌿get⌿topic"))
)
),
Set(MimeType("application/json")),
Set(MimeType("application/json")),
Map.empty[String, Seq[Class[Exception]]],
TypesResponseInfo(
Map[Int, ParameterRef](
200 -> ParameterRef(Reference("⌿paths⌿/topics/{topic}/partitions⌿get⌿responses⌿200"))
), None),
StateResponseInfo(
Map[Int, State](
200 -> Self
), None),
Set.empty[Security.Constraint]),
ApiCall(POST, Path(Reference("⌿topics⌿{topic}⌿events⌿batch")),
HandlerCall(
"nakadi.yaml",
"NakadiYaml",
instantiate = false,
"nakadiHackPost_events",parameters =
Seq(
ParameterRef(Reference("⌿paths⌿/topics/{topic}/events/batch⌿post⌿topic")),
ParameterRef(Reference("⌿paths⌿/topics/{topic}/events/batch⌿post⌿event"))
)
),
Set(MimeType("application/json")),
Set(MimeType("application/json")),
Map.empty[String, Seq[Class[Exception]]],
TypesResponseInfo(
Map[Int, ParameterRef](
201 -> ParameterRef(Reference("⌿paths⌿/topics/{topic}/events⌿post⌿responses⌿201")),
403 -> ParameterRef(Reference("⌿definitions⌿Problem")),
503 -> ParameterRef(Reference("⌿definitions⌿Problem")),
401 -> ParameterRef(Reference("⌿definitions⌿Problem")),
422 -> ParameterRef(Reference("⌿definitions⌿Problem"))
), None),
StateResponseInfo(
Map[Int, State](
201 -> Self,
403 -> Self,
503 -> Self,
401 -> Self,
422 -> Self
), None),
Set.empty[Security.Constraint]))
def packageName: Option[String] = Some("nakadi.yaml")
def model = new StrictModel(calls, types, parameters, discriminators, basePath, packageName, stateTransitions, securityDefinitions)
} | zalando/play-swagger | play-scala-generator/src/test/scala/model/resources.nakadi_yaml.scala | Scala | mit | 20,115 |
package org.jetbrains.plugins.scala.lang.psi.api.toplevel
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElement
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScMember
/**
* @author Alexander Podkhalyuzin
* Date: 22.02.2008
*/
trait ScEarlyDefinitions extends ScalaPsiElement {
def members: Seq[ScMember]
} | gtache/intellij-lsp | intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/psi/api/toplevel/ScEarlyDefinitions.scala | Scala | apache-2.0 | 348 |
package epam.idobrovolskiy.wikipedia.trending.time.extractor.stack
import epam.idobrovolskiy.wikipedia.trending.time.WikiDate
/**
* Created by Igor_Dobrovolskiy on 08.08.2017.
*/
trait InAndYearExtrMixin extends BasicStackedDatesExtractor {
private val re = """(?:[^\\w]|^)in\\s+(\\d{3,4})\\s+and\\s+(\\d{3,4})(?:[\\s\\.,]|$)""".r
def extractInAndYearDates(id: Int, s: String): Iterator[DateExtraction] = (
for {
cMatch <- (re findAllIn s).matchData
} yield Iterator(
DateExtraction(WikiDate.AD(cMatch.group(1).toInt), cMatch.start(1), cMatch),
DateExtraction(WikiDate.AD(cMatch.group(2).toInt), cMatch.start(2), cMatch)
)
).flatMap(x => x)
def extractInAndYearRanges(id: Int, s: String): Iterator[RangeExtraction] =
extractInAndYearDates(id, s).map(interpretDateAsRange(_))
abstract override protected def appendDates(id: Int, s: String, it: Iterator[DateExtraction]) =
super.appendDates(id, s, it ++ extractInAndYearDates(id, s))
abstract override protected def appendRanges(id: Int, s: String, it: Iterator[RangeExtraction]) =
super.appendRanges(id, s, it ++ extractInAndYearRanges(id, s))
}
| igor-dobrovolskiy-epam/wikipedia-analysis-scala-core | src/main/scala/epam/idobrovolskiy/wikipedia/trending/time/extractor/stack/InAndYearExtrMixin.scala | Scala | apache-2.0 | 1,166 |
package nlpdata.datasets.ptb3
import cats._
import cats.arrow.FunctionK
import cats.implicits._
import nlpdata.util._
import scala.util.Try
import scala.language.implicitConversions
import java.nio.file.{Files, Path, Paths}
class PTB3FileSystemInterpreter(location: Path) extends (PTB3ServiceRequestA ~> Try) {
def apply[A](request: PTB3ServiceRequestA[A]): Try[A] = request match {
case GetFile(path) => getFile(path)
case GetAllPaths => allPTBPaths
}
private[this] val annotationPath =
location.resolve(Paths.get("TREEBANK_3/PARSED/MRG"))
import com.softwaremill.macmemo.memoize
import com.softwaremill.macmemo.MemoCacheBuilder
private[this] implicit val cacheProvider =
MemoCacheBuilder.guavaMemoCacheBuilder
import scala.concurrent.duration._
import scala.language.postfixOps
@memoize(maxSize = 1000, expiresAfter = 1 hour)
private[this] def getFileUnsafe(path: PTB3Path): PTB3File = {
val fullPath = path match {
case WSJPath(section, number) =>
annotationPath.resolve(f"WSJ/${section}%02d/WSJ_${section}%02d${number}%02d.MRG")
case BrownPath(domain, number) =>
annotationPath.resolve(f"BROWN/$domain/${domain}${number}%02d.MRG")
}
val fileResource = loadFile(fullPath).map(Parsing.readFile(path, _))
fileResource.tried.get
}
private[this] def getFile(path: PTB3Path): Try[PTB3File] =
Try(getFileUnsafe(path))
private[this] def allPTBPaths = Try {
val wsjPrefix = annotationPath.resolve("WSJ")
val wsjPaths = for {
sectionName <- new java.io.File(wsjPrefix.toString).listFiles
.map(_.getName)
.iterator
sectionFolder = new java.io.File(wsjPrefix.resolve(sectionName).toString)
if sectionFolder.isDirectory
sectionNumber <- Try(sectionName.toInt).toOption.iterator
fileName <- sectionFolder.listFiles.map(_.getName).iterator
if fileName.endsWith(".MRG")
// filename format: WSJ_SSNN.MRG
// where SS is section number and NN is file number
// so file number is substring at [6, 8)
fileNumber <- Try(fileName.substring(6, 8).toInt).toOption.iterator
} yield WSJPath(sectionNumber, fileNumber)
val brownPrefix = annotationPath.resolve("BROWN")
val brownPaths = for {
domain <- new java.io.File(brownPrefix.toString).listFiles
.map(_.getName)
.iterator
domainFolder = new java.io.File(brownPrefix.resolve(domain).toString)
if domainFolder.isDirectory
fileName <- domainFolder.listFiles.map(_.getName).iterator
if fileName.endsWith(".MRG")
// filename format: DDNN.MRG
// where DD is domain and NN is file number
// so file number is substring at [2, 4)
fileNumber <- Try(fileName.substring(2, 4).toInt).toOption.iterator
} yield BrownPath(domain, fileNumber)
(wsjPaths ++ brownPaths).toList
}
}
class PTB3FileSystemService(
location: Path
) extends InterpretedPTB3Service[Try](
new PTB3FileSystemInterpreter(location)
)
| julianmichael/nlpdata | nlpdata/src-jvm/nlpdata/datasets/ptb3/PTB3FileSystemService.scala | Scala | mit | 3,019 |
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
* */
package io.github.tailhq.dynaml.pipes
/**
* @author tailhq on 17/11/15.
*
* Represents an abstract stream data pipeline.
* @tparam I The type of a single source record
* @tparam J The result type of a single record.
*
*/
trait IterableDataPipe[I, J, K] extends DataPipe[Iterable[I], K]{
/**
* The functions that compose the
* pipe operations.
*
* */
val pipe: I => J
/**
* The function which writes
* to the destination
* */
override def run(data: Iterable[I]): K
}
/**
* A pipeline which takes a [[Iterable]] of data and
* performs the scala `map`operation.
* */
trait IterableMapPipe[I, J] extends IterableDataPipe[I, J, Iterable[J]] {
override def run(data: Iterable[I]): Iterable[J] = data.map(pipe)
}
/**
* A pipeline which takes a [[Iterable]] of data and
* performs the scala `flatMap` operation.
* */
trait IterableFlatMapPipe[I, J] extends IterableDataPipe[I, Iterable[J], Iterable[J]] {
override def run(data: Iterable[I]) = data.flatMap(pipe)
}
trait IterableFilterPipe[I] extends IterableDataPipe[I, Boolean, Iterable[I]] {
override def run(data: Iterable[I]): Iterable[I] = data.filter(pipe)
}
trait IterablePartitionPipe[I] extends IterableDataPipe[I, Boolean, (Iterable[I], Iterable[I])] {
override def run(data: Iterable[I]): (Iterable[I], Iterable[I]) = data.partition(pipe)
}
trait IterableSideEffectPipe[I] extends IterableDataPipe[I, Unit, Unit] {
override def run(data: Iterable[I]): Unit = data.foreach(pipe)
}
object IterableDataPipe {
def toIterablePipe[I, S <: Traversable[I]] =
new DataPipe[S, Iterable[I]] {
override def run(data: S) = data.toIterable
}
//Iterable pipes which map from the original domain to a new one
def apply[I, J](mapFunc: I => J): IterableMapPipe[I, J] =
new IterableMapPipe[I, J] {
val pipe = mapFunc
}
def apply[I, J](map: DataPipe[I, J]): IterableMapPipe[I, J] =
new IterableMapPipe[I, J] {
val pipe = map.run _
}
//Iterable pipes which act as filters
def apply[I](mapFunc: I => Boolean): IterableFilterPipe[I] =
new IterableFilterPipe[I] {
val pipe = mapFunc
}
def apply[I](mapFunc: DataPipe[I, Boolean]): IterableFilterPipe[I] =
new IterableFilterPipe[I] {
val pipe = mapFunc.run _
}
//stream pipes with side effects
def apply[I](seFunc: I => Unit): IterableSideEffectPipe[I] =
new IterableSideEffectPipe[I] {
val pipe = seFunc
}
def apply[I](seFunc: SideEffectPipe[I]): IterableSideEffectPipe[I] =
new IterableSideEffectPipe[I] {
val pipe = seFunc.run _
}
}
object IterableFlatMapPipe {
def apply[I, J](mapFunc: I => Iterable[J]): IterableFlatMapPipe[I, J] =
new IterableFlatMapPipe[I, J] {
override val pipe = mapFunc
}
def apply[I, J](mapFunc: DataPipe[I, Iterable[J]]): IterableFlatMapPipe[I, J] =
new IterableFlatMapPipe[I, J] {
override val pipe = mapFunc.run _
}
}
object IterablePartitionPipe {
def apply[I](mapFunc: (I) => Boolean): IterablePartitionPipe[I] =
new IterablePartitionPipe[I] {
val pipe = mapFunc
}
def apply[I](mapFunc: DataPipe[I, Boolean]): IterablePartitionPipe[I] =
new IterablePartitionPipe[I] {
val pipe = mapFunc.run _
}
}
class UnzipIterable[I, J] extends
IterableDataPipe[(I, J), (I, J), (Iterable[I], Iterable[J])] {
val pipe = identity[(I, J)]
override def run(data: Iterable[(I, J)]):(Iterable[I], Iterable[J]) = data.unzip
}
object UnzipIterable {
def apply[I, J] = new UnzipIterable[I, J]
} | mandar2812/DynaML | dynaml-pipes/src/main/scala/io/github/tailhq/dynaml/pipes/IterableDataPipe.scala | Scala | apache-2.0 | 4,341 |
package org.dama.datasynth.common.generators.property.empirical
import java.io.InputStreamReader
import org.dama.datasynth.common.utils.FileUtils.File
/**
* Created by aprat on 12/05/17.
*/
class LongGenerator(file : File, separator : String )
extends DistributionBasedGenerator[Long](str => str.toLong, file, separator )
| DAMA-UPC/DataSynth | src/main/scala/org/dama/datasynth/common/generators/property/empirical/LongGenerator.scala | Scala | gpl-3.0 | 331 |
/*
* Copyright 2014 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.utils.geotools
import java.util.{Date, Locale, UUID}
import com.typesafe.config.{Config, ConfigFactory}
import com.vividsolutions.jts.geom._
import org.geotools.feature.AttributeTypeBuilder
import org.geotools.feature.simple.SimpleFeatureTypeBuilder
import org.geotools.referencing.CRS
import org.geotools.referencing.crs.DefaultGeographicCRS
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes.SpecParser.{ListAttributeType, MapAttributeType, SimpleAttributeType}
import org.locationtech.geomesa.utils.stats.Cardinality.Cardinality
import org.locationtech.geomesa.utils.stats.IndexCoverage.IndexCoverage
import org.locationtech.geomesa.utils.stats.{Cardinality, IndexCoverage}
import org.opengis.feature.`type`.{AttributeDescriptor, GeometryDescriptor}
import org.opengis.feature.simple.SimpleFeatureType
import scala.collection.JavaConversions._
import scala.util.Try
import scala.util.parsing.combinator.JavaTokenParsers
object SimpleFeatureTypes {
import org.locationtech.geomesa.utils.geotools.RichAttributeDescriptors._
val TABLE_SPLITTER = "table.splitter.class"
val TABLE_SPLITTER_OPTIONS = "table.splitter.options"
val DEFAULT_DATE_FIELD = "geomesa_index_start_time"
val OPT_INDEX_VALUE = "index-value"
val OPT_INDEX = "index"
val OPT_CARDINALITY = "cardinality"
val USER_DATA_LIST_TYPE = "subtype"
val USER_DATA_MAP_KEY_TYPE = "keyclass"
val USER_DATA_MAP_VALUE_TYPE = "valueclass"
def createType(conf: Config): SimpleFeatureType = {
val nameSpec = conf.getString("type-name")
val (namespace, name) = buildTypeName(nameSpec)
val specParser = new SpecParser
val fields = getFieldConfig(conf).map { fc => buildField(fc, specParser) }
createType(namespace, name, fields, Seq())
}
def getFieldConfig(conf: Config): Seq[Config] =
if(conf.hasPath("fields")) conf.getConfigList("fields")
else conf.getConfigList("attributes")
def buildField(conf: Config, specParser: SpecParser): AttributeSpec = conf.getString("type") match {
case t if simpleTypeMap.contains(t) => SimpleAttributeSpec(conf)
case t if geometryTypeMap.contains(t) => GeomAttributeSpec(conf)
case t if specParser.parse(specParser.listType, t).successful =>
ListAttributeSpec(conf)
case t if specParser.parse(specParser.mapType, t).successful =>
MapAttributeSpec(conf)
}
def createType(nameSpec: String, spec: String): SimpleFeatureType = {
val (namespace, name) = buildTypeName(nameSpec)
val FeatureSpec(attributeSpecs, opts) = parse(spec)
createType(namespace, name, attributeSpecs, opts)
}
def buildTypeName(nameSpec: String): (String, String) = {
val nsIndex = nameSpec.lastIndexOf(':')
val (namespace, name) = if (nsIndex == -1 || nsIndex == nameSpec.length - 1) {
(null, nameSpec)
} else {
(nameSpec.substring(0, nsIndex), nameSpec.substring(nsIndex + 1))
}
(namespace, name)
}
def createType(namespace: String, name: String, attributeSpecs: Seq[AttributeSpec], opts: Seq[FeatureOption]): SimpleFeatureType = {
val geomAttributes = attributeSpecs.collect { case g: GeomAttributeSpec => g}
val defaultGeom = geomAttributes.find(_.default).orElse(geomAttributes.headOption)
val dateAttributes = attributeSpecs.collect {
case s: SimpleAttributeSpec if s.clazz == classOf[Date] => s
}
val defaultDate = dateAttributes.headOption // TODO GEOMESA-594 allow for setting default date field
val b = new SimpleFeatureTypeBuilder()
b.setNamespaceURI(namespace)
b.setName(name)
b.addAll(attributeSpecs.map(_.toAttribute))
defaultGeom.foreach { dg => b.setDefaultGeometry(dg.name)}
val sft = b.buildFeatureType()
defaultDate.foreach(dt => sft.getUserData.put(DEFAULT_DATE_FIELD, dt.name))
opts.map(_.decorateSFT(sft))
sft
}
def encodeType(sft: SimpleFeatureType): String =
sft.getAttributeDescriptors.map { ad => AttributeSpecFactory.fromAttributeDescriptor(sft, ad).toSpec }.mkString(",")
def getSecondaryIndexedAttributes(sft: SimpleFeatureType): Seq[AttributeDescriptor] =
sft.getAttributeDescriptors.filter(ad => ad.isIndexed && !ad.isInstanceOf[GeometryDescriptor])
object AttributeSpecFactory {
def fromAttributeDescriptor(sft: SimpleFeatureType, ad: AttributeDescriptor) = ad.getType match {
case t if simpleTypeMap.contains(t.getBinding.getSimpleName) =>
SimpleAttributeSpec(
ad.getLocalName,
ad.getType.getBinding,
ad.getIndexCoverage(),
ad.getIndexValue(),
ad.getCardinality()
)
case t if geometryTypeMap.contains(t.getBinding.getSimpleName) =>
val crs = Option(ad.asInstanceOf[GeometryDescriptor].getCoordinateReferenceSystem)
.filter(crs => crs == CRS_EPSG_4326 || crs == DefaultGeographicCRS.WGS84)
val srid = if (crs.isDefined) 4326 else -1
GeomAttributeSpec(
ad.getLocalName,
ad.getType.getBinding,
srid,
sft.getGeometryDescriptor.equals(ad)
)
case t if t.getBinding.equals(classOf[java.util.List[_]]) =>
ListAttributeSpec(
ad.getLocalName,
ad.getCollectionType().get,
ad.getIndexCoverage(),
ad.getCardinality()
)
case t if t.getBinding.equals(classOf[java.util.Map[_, _]]) =>
val Some((keyType, valueType)) = ad.getMapTypes()
MapAttributeSpec(
ad.getLocalName,
keyType,
valueType,
ad.getIndexCoverage(),
ad.getCardinality()
)
}
}
sealed trait AttributeSpec {
def name: String
def clazz: Class[_]
def index: IndexCoverage
def indexValue: Boolean
def cardinality: Cardinality
def toAttribute: AttributeDescriptor
def toSpec: String
protected def getIndexSpec = {
val builder = new StringBuilder()
index match {
case IndexCoverage.NONE => // don't append
case _ => builder.append(s":$OPT_INDEX=$index")
}
if (indexValue) {
builder.append(s":$OPT_INDEX_VALUE=$indexValue")
}
if (cardinality == Cardinality.LOW || cardinality == Cardinality.HIGH) {
builder.append(s":$OPT_CARDINALITY=$cardinality")
}
builder.toString()
}
}
implicit class AttributeCopyable(val attrSpec: AttributeSpec) extends AnyVal {
def copy(): AttributeSpec = attrSpec match {
case o: SimpleAttributeSpec => o.copy()
case o: GeomAttributeSpec => o.copy()
case o: ListAttributeSpec => o.copy()
case o: MapAttributeSpec => o.copy()
}
}
object AttributeSpec {
val defaults = Map[String, AnyRef](
OPT_INDEX -> IndexCoverage.NONE.toString,
OPT_INDEX_VALUE -> java.lang.Boolean.FALSE,
OPT_CARDINALITY -> Cardinality.UNKNOWN.toString,
"srid" -> Integer.valueOf(4326),
"default" -> java.lang.Boolean.FALSE
)
val fallback = ConfigFactory.parseMap(defaults)
// back compatible with string or boolean
def getIndexCoverage(conf: Config) =
Try(conf.getString(OPT_INDEX)).flatMap(o => Try(IndexCoverage.withName(o.toLowerCase(Locale.US))))
.orElse(Try(if (conf.getBoolean(OPT_INDEX)) IndexCoverage.JOIN else IndexCoverage.NONE))
.getOrElse(IndexCoverage.NONE)
}
sealed trait NonGeomAttributeSpec extends AttributeSpec
object SimpleAttributeSpec {
def apply(in: Config): SimpleAttributeSpec = {
val conf = in.withFallback(AttributeSpec.fallback)
val name = conf.getString("name")
val attrType = conf.getString("type")
val index = AttributeSpec.getIndexCoverage(conf)
val indexValue = conf.getBoolean(OPT_INDEX_VALUE)
val cardinality = Cardinality.withName(conf.getString(OPT_CARDINALITY).toLowerCase(Locale.US))
SimpleAttributeSpec(name, simpleTypeMap(attrType), index, indexValue, cardinality)
}
}
case class SimpleAttributeSpec(name: String,
clazz: Class[_],
index: IndexCoverage,
indexValue: Boolean,
cardinality: Cardinality) extends NonGeomAttributeSpec {
override def toAttribute: AttributeDescriptor =
new AttributeTypeBuilder()
.binding(clazz)
.indexCoverage(index)
.indexValue(indexValue)
.cardinality(cardinality)
.buildDescriptor(name)
override def toSpec = s"$name:${typeEncode(clazz)}$getIndexSpec"
}
object ListAttributeSpec {
private val specParser = new SpecParser
def apply(in: Config): ListAttributeSpec = {
val conf = in.withFallback(AttributeSpec.fallback)
val name = conf.getString("name")
val attributeType = specParser.parse(specParser.listType, conf.getString("type")).getOrElse(ListAttributeType(SimpleAttributeType("string")))
val index = AttributeSpec.getIndexCoverage(conf)
val cardinality = Cardinality.withName(conf.getString(OPT_CARDINALITY).toLowerCase(Locale.US))
ListAttributeSpec(name, simpleTypeMap(attributeType.p.t), index, cardinality)
}
}
case class ListAttributeSpec(name: String,
subClass: Class[_],
index: IndexCoverage,
cardinality: Cardinality) extends NonGeomAttributeSpec {
val clazz = classOf[java.util.List[_]]
// currently we only allow simple types in the ST IDX for simplicity - revisit if it becomes a use-case
val indexValue = false
override def toAttribute: AttributeDescriptor = {
new AttributeTypeBuilder()
.binding(clazz)
.indexCoverage(index)
.indexValue(indexValue)
.cardinality(cardinality)
.collectionType(subClass)
.buildDescriptor(name)
}
override def toSpec = s"$name:List[${subClass.getSimpleName}]$getIndexSpec"
}
object MapAttributeSpec {
private val specParser = new SpecParser
private val defaultType = MapAttributeType(SimpleAttributeType("string"), SimpleAttributeType("string"))
def apply(in: Config): MapAttributeSpec = {
val conf = in.withFallback(AttributeSpec.fallback)
val name = conf.getString("name")
val attributeType = specParser.parse(specParser.mapType, conf.getString("type")).getOrElse(defaultType)
val index = AttributeSpec.getIndexCoverage(conf)
val cardinality = Cardinality.withName(conf.getString(OPT_CARDINALITY).toLowerCase(Locale.US))
MapAttributeSpec(name, simpleTypeMap(attributeType.kt.t), simpleTypeMap(attributeType.vt.t), index, cardinality)
}
}
case class MapAttributeSpec(name: String,
keyClass: Class[_],
valueClass: Class[_],
index: IndexCoverage,
cardinality: Cardinality) extends NonGeomAttributeSpec {
val clazz = classOf[java.util.Map[_, _]]
// currently we only allow simple types in the ST IDX for simplicity - revisit if it becomes a use-case
val indexValue = false
override def toAttribute: AttributeDescriptor = {
new AttributeTypeBuilder()
.binding(clazz)
.indexCoverage(index)
.indexValue(indexValue)
.cardinality(cardinality)
.mapTypes(keyClass, valueClass)
.buildDescriptor(name)
}
override def toSpec =
s"$name:Map[${keyClass.getSimpleName},${valueClass.getSimpleName}]$getIndexSpec"
}
object GeomAttributeSpec {
def apply(in: Config): GeomAttributeSpec = {
val conf = in.withFallback(AttributeSpec.fallback)
val name = conf.getString("name")
val attrType = conf.getString("type")
val srid = conf.getInt("srid")
val default = conf.getBoolean("default")
GeomAttributeSpec(name, geometryTypeMap(attrType), srid, default)
}
}
case class GeomAttributeSpec(name: String, clazz: Class[_], srid: Int, default: Boolean)
extends AttributeSpec {
val index = if (default) IndexCoverage.FULL else IndexCoverage.NONE
val indexValue = default
val cardinality = Cardinality.UNKNOWN
override def toAttribute: AttributeDescriptor = {
if (!(srid == 4326 || srid == -1)) {
throw new IllegalArgumentException(s"Invalid SRID '$srid'. Only 4326 is supported.")
}
val b = new AttributeTypeBuilder()
b.binding(clazz)
.indexCoverage(index)
.indexValue(indexValue)
.cardinality(cardinality)
.crs(CRS_EPSG_4326)
.buildDescriptor(name)
}
override def toSpec = {
val star = if (default) "*" else ""
s"$star$name:${typeEncode(clazz)}:srid=$srid$getIndexSpec"
}
}
sealed trait FeatureOption {
def decorateSFT(sft: SimpleFeatureType): Unit
}
case class Splitter(splitterClazz: String, options: Map[String, String]) extends FeatureOption {
override def decorateSFT(sft: SimpleFeatureType): Unit = {
sft.getUserData.put(TABLE_SPLITTER, splitterClazz)
sft.getUserData.put(TABLE_SPLITTER_OPTIONS, options)
}
}
case class FeatureSpec(attributes: Seq[AttributeSpec], opts: Seq[FeatureOption])
private val typeEncode: Map[Class[_], String] = Map(
classOf[java.lang.String] -> "String",
classOf[java.lang.Integer] -> "Integer",
classOf[java.lang.Double] -> "Double",
classOf[java.lang.Long] -> "Long",
classOf[java.lang.Float] -> "Float",
classOf[java.lang.Boolean] -> "Boolean",
classOf[UUID] -> "UUID",
classOf[Geometry] -> "Geometry",
classOf[Point] -> "Point",
classOf[LineString] -> "LineString",
classOf[Polygon] -> "Polygon",
classOf[MultiPoint] -> "MultiPoint",
classOf[MultiLineString] -> "MultiLineString",
classOf[MultiPolygon] -> "MultiPolygon",
classOf[GeometryCollection] -> "GeometryCollection",
classOf[Date] -> "Date",
classOf[java.util.List[_]] -> "List",
classOf[java.util.Map[_, _]] -> "Map"
)
private val simpleTypeMap = Map(
"String" -> classOf[java.lang.String],
"java.lang.String" -> classOf[java.lang.String],
"string" -> classOf[java.lang.String],
"Integer" -> classOf[java.lang.Integer],
"java.lang.Integer" -> classOf[java.lang.Integer],
"int" -> classOf[java.lang.Integer],
"Int" -> classOf[java.lang.Integer],
"0" -> classOf[java.lang.Integer],
"Long" -> classOf[java.lang.Long],
"java.lang.Long" -> classOf[java.lang.Long],
"long" -> classOf[java.lang.Long],
"Double" -> classOf[java.lang.Double],
"java.lang.Double" -> classOf[java.lang.Double],
"double" -> classOf[java.lang.Double],
"0.0" -> classOf[java.lang.Double],
"Float" -> classOf[java.lang.Float],
"java.lang.Float" -> classOf[java.lang.Float],
"float" -> classOf[java.lang.Float],
"0.0f" -> classOf[java.lang.Float],
"Boolean" -> classOf[java.lang.Boolean],
"java.lang.Boolean" -> classOf[java.lang.Boolean],
"true" -> classOf[java.lang.Boolean],
"false" -> classOf[java.lang.Boolean],
"UUID" -> classOf[UUID],
"Date" -> classOf[Date]
)
private val geometryTypeMap = Map(
"Geometry" -> classOf[Geometry],
"Point" -> classOf[Point],
"LineString" -> classOf[LineString],
"Polygon" -> classOf[Polygon],
"MultiPoint" -> classOf[MultiPoint],
"MultiLineString" -> classOf[MultiLineString],
"MultiPolygon" -> classOf[MultiPolygon],
"GeometryCollection" -> classOf[GeometryCollection]
)
private val listTypeMap = Seq("list", "List", "java.util.List").map { n => (n, classOf[java.util.List[_]]) }.toMap
private val mapTypeMap = Seq("map", "Map", "java.util.Map").map { n => (n, classOf[java.util.Map[_, _]]) }.toMap
object SpecParser {
case class Name(s: String, default: Boolean = false)
sealed trait AttributeType
case class GeometryAttributeType(t: String) extends AttributeType
case class SimpleAttributeType(t: String) extends AttributeType
case class ListAttributeType(p: SimpleAttributeType) extends AttributeType
case class MapAttributeType(kt: SimpleAttributeType, vt: SimpleAttributeType) extends AttributeType
def optionToCardinality(options: Map[String, String]) = options.get(OPT_CARDINALITY)
.flatMap(c => Try(Cardinality.withName(c.toLowerCase(Locale.US))).toOption)
.getOrElse(Cardinality.UNKNOWN)
def optionToIndexCoverage(options: Map[String, String]) = {
val o = options.getOrElse(OPT_INDEX, IndexCoverage.NONE.toString)
val fromName = Try(IndexCoverage.withName(o.toLowerCase(Locale.US)))
fromName.getOrElse(if (Try(o.toBoolean).getOrElse(false)) IndexCoverage.JOIN else IndexCoverage.NONE)
}
}
private class SpecParser extends JavaTokenParsers {
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes.SpecParser._
/*
Valid specs can have attributes that look like the following:
"id:Integer:opt1=v1:opt2=v2,*geom:Geometry:srid=4326,ct:List[String]:index=true,mt:Map[String,Double]:index=false"
*/
private val SEP = ":"
def nonDefaultAttributeName = """[^:,]+""".r ^^ { n => Name(n) } // simple name
def defaultAttributeName = ("*" ~ nonDefaultAttributeName) ^^ { // for *geom
case "*" ~ Name(n, _) => Name(n, default = true)
}
// matches any of the primitive types defined in simpleTypeMap
// order matters so that Integer is matched before Int
val simpleType =
simpleTypeMap
.keys
.toList
.sorted
.reverse
.map(literal)
.reduce(_ | _) ^^ { case s => SimpleAttributeType(s) }
// matches any of the geometry types defined in geometryTypeMap
val geometryType =
geometryTypeMap
.keys
.toList
.sorted
.reverse
.map(literal)
.reduce(_ | _) ^^ { case g => GeometryAttributeType(g) }
// valid lists
def listTypeOuter: Parser[String] = listTypeMap.keys.map(literal).reduce(_ | _)
// valid maps
def mapTypeOuter: Parser[String] = mapTypeMap.keys.map(literal).reduce(_ | _)
// list type matches "List[String]" or "List" (which defaults to parameterized type String)
def listType = listTypeOuter ~> ("[" ~> simpleType <~ "]").? ^^ {
case st => ListAttributeType(st.getOrElse(SimpleAttributeType("String")))
}
// map type matches "Map[String,String]" (defaults to Map[String,String] if parameterized types not specified
def mapType = mapTypeOuter ~> ("[" ~> simpleType ~ "," ~ simpleType <~ "]").? ^^ {
case Some(kt ~ "," ~ vt) => MapAttributeType(kt, vt)
case None => MapAttributeType(SimpleAttributeType("String"), SimpleAttributeType("String"))
}
// either a list or a map
def complexType = listType | mapType
// simple type or geometry type or complex types
def attrType = simpleType | geometryType | complexType
// converts options into key/values
def option = ("[a-zA-Z_.-]+".r <~ "=") ~ "[^:,;]+".r ^^ { case k ~ v => (k, v) }
// builds a map of key/values
def options = repsep(option, SEP) ^^ { kvs => kvs.toMap }
// options map or empty map if no options specified
def optionsOrEmptyMap = (SEP ~> options).? ^^ {
case Some(opts) => opts
case None => Map.empty[String, String]
}
// either a name with default prefix or regular name
def name = defaultAttributeName | nonDefaultAttributeName
// builds a GeometrySpec
def geometryAttribute = (name ~ SEP ~ geometryType ~ optionsOrEmptyMap) ^^ {
case Name(n, default) ~ SEP ~ GeometryAttributeType(t) ~ options =>
val srid = options.getOrElse("srid", "4326").toInt
GeomAttributeSpec(n, geometryTypeMap(t), srid, default)
}
// builds a NonGeomAttributeSpec for primitive types
def simpleAttribute = (name ~ SEP ~ simpleType ~ optionsOrEmptyMap) ^^ {
case Name(n, default) ~ SEP ~ SimpleAttributeType(t) ~ options =>
val indexed = optionToIndexCoverage(options)
val stIndexed = options.getOrElse(OPT_INDEX_VALUE, "false").toBoolean
val cardinality = optionToCardinality(options)
SimpleAttributeSpec(n, simpleTypeMap(t), indexed, stIndexed, cardinality)
}
// builds a NonGeomAttributeSpec for complex types
def complexAttribute = (name ~ SEP ~ complexType ~ optionsOrEmptyMap) ^^ {
case Name(n, default) ~ SEP ~ ListAttributeType(SimpleAttributeType(t)) ~ options =>
val indexed = optionToIndexCoverage(options)
val cardinality = optionToCardinality(options)
ListAttributeSpec(n, simpleTypeMap(t), indexed, cardinality)
case Name(n, default) ~ SEP ~ MapAttributeType(SimpleAttributeType(kt), SimpleAttributeType(vt)) ~ options =>
val indexed = optionToIndexCoverage(options)
val cardinality = optionToCardinality(options)
MapAttributeSpec(n, simpleTypeMap(kt), simpleTypeMap(vt), indexed, cardinality)
}
// any attribute
def attribute = geometryAttribute | complexAttribute | simpleAttribute
// "table.splitter=org.locationtech.geomesa.data.DigitSplitter,table.splitter.options=fmt:%02d,"
def splitter = (TABLE_SPLITTER ~ "=") ~> "[^,]*".r
def splitterOption = ("[^,:]*".r <~ ":") ~ "[^,]*".r ^^ { case k ~ v => (k, v) }
def splitterOptions = (TABLE_SPLITTER_OPTIONS ~ "=") ~> repsep(splitterOption, ",") ^^ { opts => opts.toMap }
def featureOptions = (splitter <~ ",") ~ splitterOptions.? ^^ {
case splitter ~ opts => Splitter(splitter, opts.getOrElse(Map.empty[String, String]))
}
// a full SFT spec
def spec = repsep(attribute, ",") ~ (";" ~> featureOptions).? ^^ {
case attrs ~ fOpts => FeatureSpec(attrs, fOpts.toSeq)
}
def strip(s: String) = s.stripMargin('|').replaceAll("\\\\s*", "")
def parse(s: String): FeatureSpec = parse(spec, strip(s)) match {
case Success(t, r) if r.atEnd => t
case Error(msg, r) if r.atEnd => throw new IllegalArgumentException(msg)
case Failure(msg, r) if r.atEnd => throw new IllegalArgumentException(msg)
case other => throw new IllegalArgumentException(s"Malformed attribute: $other")
}
}
def parse(s: String): FeatureSpec = new SpecParser().parse(s)
} | kevinwheeler/geomesa | geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/geotools/SimpleFeatureTypes.scala | Scala | apache-2.0 | 23,746 |
package scodec.protocols
package ip
package v6
import scala.util.Try
import scalaz.\\/
import scalaz.syntax.std.option._
import scodec.bits._
import scodec.Codec
import scodec.codecs
case class Address(bytes: ByteVector) {
require(bytes.size == 16)
override def toString = {
def condense[A](xs: List[(A, Int)]): List[(A, Int, Int)] = xs match {
case Nil => Nil
case h :: t =>
val segment = t takeWhile { case (x, _) => x == h._1 }
(h._1, h._2, segment.size + 1) +: condense(t.drop(segment.size))
}
def show(octets: List[ByteVector]): String =
octets.map { _.toHex.replaceAll("^0+", "0") }.mkString(":")
val grp = bytes.grouped(2).toList
val condensedZeroes = condense(grp.zipWithIndex).filter { case (octet, _, size) => octet == hex"0000" && size > 1 }
if (condensedZeroes.isEmpty) {
show(grp)
} else {
val (_, idx, size) = condensedZeroes.maxBy { case (_, _, size) => size }
show(grp.take(idx)) ++ "::" ++ show(grp.drop(idx + size))
}
}
}
object Address {
implicit val codec: Codec[Address] = codecs.bytes(16).as[Address]
def fromString(str: String): String \\/ Address = {
// FIXME: this implementation erroneously supports hostnames and can be slow as a result
val result = Try {
java.net.InetAddress.getByName(str) match {
case v6: java.net.Inet6Address => Address(ByteVector(v6.getAddress))
case v4: java.net.Inet4Address => ip.v4.Address(ByteVector(v4.getAddress).toInt()).toV6
}
}.toOption
result.toRightDisjunction(s"invalid IPv6 address: $str")
}
def fromStringValid(str: String): Address =
fromString(str).valueOr { err => throw new IllegalArgumentException(err) }
}
| jrudnick/scodec-protocols | src/main/scala/scodec/protocols/ip/v6/Address.scala | Scala | bsd-3-clause | 1,732 |
/*
* Copyright 2015 - 2017 Pablo Alcaraz
* Mysql to Postgres converter
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.mysql2postgresql.converter
import com.github.mysql2postgresql.converter.config.TestEnvironmentChecker
import org.springframework.boot.{ApplicationArguments, ApplicationRunner}
import org.springframework.stereotype.Component
/**
* Help task.
*/
@Component
class HelpTaskRunner extends ApplicationRunner {
override def run(args: ApplicationArguments): Unit = {
if( !TestEnvironmentChecker.isTestEnvironment && (args.containsOption("help") || args.getSourceArgs.isEmpty)) {
showHelp()
}
}
def showHelp(): Unit = {
println(
"""
|Converts a sql script from MySql to Postgres.
|
|Invoke it with the following parameters.
|
|--help
| Show this help.
|
|--convert
| Convert incoming mysql file into a postgres file.
| Add the following optional parameters:
|
| --in-mysql=input.mysql
| Mysql input file to convert. Generate this file using mysqldump command.
|
| --out-postgres=output.psql
| Postgres output file.
|
|Example of parameters:
|
| --convert --in-mysql=/path/to/mysql.sql --out-postgres=/path/to/generated-postgres.psql
|
""".stripMargin)
}
}
| pabloa/mysql2postgresql | src/main/scala/com/github/mysql2postgresql/converter/HelpTaskRunner.scala | Scala | apache-2.0 | 1,980 |
package org.tribbloid.spookystuff.actions
import org.tribbloid.spookystuff.pages.Page
import org.tribbloid.spookystuff.session.Session
/**
* Created by peng on 1/21/15.
*/
abstract class Assertion extends Action {
final override def outputNames = Set()
final override def trunk = None //can't be ommitted
final override def doExe(session: Session): Seq[Page] = {
exeWithoutPage(session: Session)
Seq()
}
def exeWithoutPage(session: Session): Unit
}
case class Assert(condition: Page => Boolean) extends Assertion {
override def exeWithoutPage(session: Session): Unit = {
val page = DefaultSnapshot.apply(session).head.asInstanceOf[Page]
assert(condition(page))
}
} | chenUT/spookystuff | core/src/main/scala/org/tribbloid/spookystuff/actions/Assertion.scala | Scala | apache-2.0 | 706 |
package com.sksamuel.scapegoat.inspections.collections
import com.sksamuel.scapegoat.{ Inspection, InspectionContext, Inspector, Levels }
/** @author Stephen Samuel */
class DuplicateSetValue extends Inspection {
def inspector(context: InspectionContext): Inspector = new Inspector(context) {
override def postTyperTraverser = Some apply new context.Traverser {
import context.global._
private def hasDuplicates(trees: List[Tree]): Boolean = {
val values: Set[Any] = trees.map {
case Literal(Constant(x)) => x
case x => x
}.toSet
values.size < trees.size
}
private def warn(tree: Tree) = {
context.warn("Duplicated set value", tree.pos, Levels.Warning,
"A set value is overwriten by a later entry: " + tree.toString().take(100), DuplicateSetValue.this)
}
override def inspect(tree: Tree): Unit = {
tree match {
case Apply(TypeApply(Select(Select(_, TermName("Set")), TermName("apply")), _), args) if hasDuplicates(args) => warn(tree)
case _ => continue(tree)
}
}
}
}
}
| pwwpche/scalac-scapegoat-plugin | src/main/scala/com/sksamuel/scapegoat/inspections/collections/DuplicateSetValue.scala | Scala | apache-2.0 | 1,147 |
package feeds.acl
import drt.server.feeds.acl.AclFeed.latestFileForPort
import drt.shared._
import net.schmizz.sshj.sftp.{FileAttributes, RemoteResourceInfo, SFTPClient}
import org.specs2.mock.Mockito
import org.specs2.mutable.Specification
import scala.collection.JavaConverters._
class AclFeedMinFileSizeSpec extends Specification with Mockito {
val sftp = mock[SFTPClient]
"Given a latest ACL file that is greater than the byte threshold then it should be returned" >> {
val mockLatest = mockFileWithSize(10000L, "MANS20_HOMEOFFICEROLL180_20200406.zip", "latest")
val mockFile = mockFileWithSize(20000L, "MANS20_HOMEOFFICEROLL180_20200405.zip", "previous")
sftp.ls("/180_Days/") returns List(mockLatest, mockFile).asJava
val latestFilePathId = latestFileForPort(sftp, PortCode("MAN"), 100L)
latestFilePathId === "latest"
}
"Given a latest ACL file that is lower than the byte threshold then it should return the latest file that is above the threshold" >> {
val mockLatest = mockFileWithSize(10000L, "MANS20_HOMEOFFICEROLL180_20200406.zip", "latest")
val mockFile = mockFileWithSize(20000L, "MANS20_HOMEOFFICEROLL180_20200405.zip", "previous")
sftp.ls("/180_Days/") returns List(mockLatest, mockFile).asJava
val latestFilePathId = latestFileForPort(sftp, PortCode("MAN"), 15000L)
latestFilePathId === "previous"
}
def mockFileWithSize(bytes: Long, fileName: String, pathId: String): RemoteResourceInfo = {
val mockFile = mock[RemoteResourceInfo]
val mockAttributes = new FileAttributes.Builder()
.withSize(bytes)
.build()
mockFile.getName returns fileName
mockFile.getPath returns pathId
mockFile.getAttributes returns mockAttributes
mockFile
}
}
| UKHomeOffice/drt-scalajs-spa-exploration | server/src/test/scala/feeds/acl/AclFeedMinFileSizeSpec.scala | Scala | apache-2.0 | 1,757 |
package com.arcusys.valamis.web.servlet.admin
import javax.servlet.http.HttpServletResponse
import com.arcusys.learn.liferay.util.{PortalUtilHelper, PortletName}
import com.arcusys.valamis.lrssupport.lrs.service.LrsRegistration
import com.arcusys.valamis.lrs.tincan.AuthorizationScope
import com.arcusys.valamis.lrssupport.lrsEndpoint.model.{AuthType, AuthorizationType, LrsEndpoint}
import com.arcusys.valamis.lrssupport.lrsEndpoint.service.LrsEndpointService
import com.arcusys.valamis.settings.service.SettingService
import com.arcusys.valamis.web.portlet.base.ViewPermission
import com.arcusys.valamis.web.servlet.base.{BaseApiController, PermissionUtil}
class AdminServlet extends BaseApiController {
lazy val endpointService = inject[LrsEndpointService]
lazy val settingsManager = inject[SettingService]
lazy val lrsRegistration = inject[LrsRegistration]
private def adminRequest = AdminRequest(this)
get("/administering/settings/lrs(/)") {
implicit val companyId = getCompanyId
jsonAction(lrsRegistration.getLrsEndpointInfo(AuthorizationScope.All,
Some(request),
PortalUtilHelper.getLocalHostUrl))
}
post("/administering/settings/:type(/)") {
requirePortletPermission(ViewPermission, PortletName.AdminView)
implicit val companyId = getCompanyId
adminRequest.settingType match {
case AdminSettingType.Issuer => updateIssuerSettings
case AdminSettingType.GoogleAPI => updateGoogleAPISettings
case AdminSettingType.Lti => updateLTISettings
case AdminSettingType.Lrs => updateLrsSettings
case AdminSettingType.BetaStudio => updateStudioSettings
}
halt(HttpServletResponse.SC_NO_CONTENT)
}
private def updateLrsSettings(implicit companyId: Long) = {
val adminRequest = AdminRequest(this)
if (!adminRequest.isExternalLrs) {
val customHost = adminRequest.customHost
endpointService.switchToInternal(customHost)
} else {
val endpoint = adminRequest.authType match {
case AuthorizationType.BASIC => LrsEndpoint(
endpoint = adminRequest.endPoint,
auth = AuthType.BASIC,
key = adminRequest.login,
secret = adminRequest.password)
case AuthorizationType.OAUTH => LrsEndpoint(
endpoint = adminRequest.endPoint,
auth = AuthType.OAUTH,
key = adminRequest.clientId,
secret = adminRequest.clientSecret)
}
endpointService.setEndpoint(endpoint)
}
}
private def updateIssuerSettings(implicit companyId: Long) = {
settingsManager.setIssuerName(adminRequest.issuerName)
settingsManager.setIssuerURL(adminRequest.issuerUrl)
settingsManager.setIssuerEmail(adminRequest.issuerEmail)
}
private def updateGoogleAPISettings(implicit companyId: Long) = {
settingsManager.setGoogleClientId(adminRequest.googleClientId)
settingsManager.setGoogleAppId(adminRequest.googleAppId)
settingsManager.setGoogleApiKey(adminRequest.googleApiKey)
}
private def updateLTISettings(implicit companyId: Long) = {
settingsManager.setLtiLaunchPresentationReturnUrl(adminRequest.ltiLaunchPresentationReturnUrl)
settingsManager.setLtiMessageType(adminRequest.ltiMessageType)
settingsManager.setLtiVersion(adminRequest.ltiVersion)
settingsManager.setLtiOauthSignatureMethod(adminRequest.ltiOauthSignatureMethod)
settingsManager.setLtiOauthVersion(adminRequest.ltiOauthVersion)
}
private def updateStudioSettings(implicit companyId: Long) = {
settingsManager.setBetaStudioUrl(adminRequest.betaStudioUrl)
}
}
| arcusys/JSCORM | valamis-portlets/src/main/scala/com/arcusys/valamis/web/servlet/admin/AdminServlet.scala | Scala | gpl-3.0 | 3,576 |
class B
class C(x: String) extends B
abstract class A {
class D { type T >: C <: B }
val x: D
var y: x.T = new C("abc")
}
class Volatile extends A {
type A >: Null
// test (1.4), pt 2 in RefChecks
val x: A with D = null
}
| yusuke2255/dotty | tests/untried/neg/volatile_no_override.scala | Scala | bsd-3-clause | 236 |
package com.twitter.finatra.example
import com.twitter.inject.Test
import com.twitter.inject.app.EmbeddedApp
class HelloWorldAppFeatureTest extends Test {
// Note this is purposely a def which creates (and thus runs) a new
// instance in every test case. It is important to not reuse a stateful
// application between test cases as each test case runs the application
// again and reuse can lead to non-deterministic tests.
def app = new EmbeddedApp(new SampleApp)
def app(underlying: SampleApp) = new EmbeddedApp(underlying)
test("SampleApp#print help") {
// help always terminates with a non-zero exit-code.
intercept[Exception] {
app.main("help" -> "true")
}
}
test("SampleApp#run") {
val underlying = new SampleApp
app(underlying).main("username" -> "jdoe")
val queue: Seq[Int] = underlying.getQueue
queue should equal(Seq(1, 2, 3, 4, 5, 6))
}
}
| twitter/finatra | examples/injectable-app/scala/src/test/scala/com/twitter/finatra/example/HelloWorldAppFeatureTest.scala | Scala | apache-2.0 | 911 |
package com.netflix.aspyker.tools.github
import org.slf4j.LoggerFactory
import play.api.libs.json._
import org.kohsuke.github._
import scala.collection.JavaConversions._
import java.util.Date
object QueryGitHub {
def logger = LoggerFactory.getLogger(getClass)
//def githubOrg = "netflix"
def githubOrg = "facebook"
def main(args: Array[String]): Unit = {
val github = GitHub.connect()
val orgs = github.getOrganization(githubOrg)
val repos = orgs.listRepositories(100).asList()
val (privateRepos, publicRepos) = repos.partition { repo => repo.isPrivate() }
walkRepos(publicRepos.toList)
System.exit(0)
}
// TODO: Is there a faster way to only pull the last commit?
def commitInfo(repo: GHRepository) : (Int, Int, List[String]) = {
val commits = repo.listCommits().asList()
val orderedCommits = commits.sortBy(_.getCommitShortInfo.getCommitter().getDate())
val lastCommitDate = orderedCommits(orderedCommits.length - 1).getCommitShortInfo().getCommitter().getDate()
//logger.debug(s"commits, first = ${orderedCommits(0).getSHA1}, last = ${orderedCommits(orderedCommits.length - 1).getSHA1()}")
val daysSinceLastCommit = daysBetween(lastCommitDate, new Date())
logger.debug(s" daysSinceLastCommit = ${daysSinceLastCommit}")
val contributors = commits.filter { commit => Option(commit.getAuthor()).isDefined }
val contributorLogins = contributors.map(contributor => contributor.getAuthor().getLogin()).distinct
logger.debug(s" numContribitors = ${contributorLogins.length}, contributorEmails = ${contributorLogins}")
(commits.length, daysSinceLastCommit, contributorLogins.toList)
}
// TODO: Is there a faster way to only pull the last commit?
def commitInfo2(repo: GHRepository) : (Int, Int, List[String]) = {
val branches = repo.getBranches().filterKeys { key => key != "gh-pages" }
val lastCommits = branches.map({
case (branchName, branch) => {
val lastCommit = branch.getSHA1()
val lastCommitDate = repo.getCommit(lastCommit).getCommitShortInfo.getCommitter.getDate()
logger.debug(s"last commit date for ${branchName} repo was ${lastCommitDate}")
lastCommitDate
}
})
val lastCommitDate = lastCommits.max
//logger.debug(s"commits, first = ${orderedCommits(0).getSHA1}, last = ${orderedCommits(orderedCommits.length - 1).getSHA1()}")
val daysSinceLastCommit = daysBetween(lastCommitDate, new Date())
logger.debug(s" daysSinceLastCommit = ${daysSinceLastCommit}")
val contributorLogins = None;
// val contributorLogins = repo.listCollaborators().map { collaborator => collaborator.getLogin }
// val contributors = commits.filter { commit => Option(commit.getAuthor()).isDefined }
// val contributorLogins = contributors.map(contributor => contributor.getAuthor().getLogin()).distinct
// logger.debug(s" numContribitors = ${contributorLogins.length}, contributorEmails = ${contributorLogins}")
(-1, daysSinceLastCommit, contributorLogins.toList)
}
def getClosedIssuesStats(repo: GHRepository) : (Int, Int) = {
(-1, -1)
}
def getClosedIssuesStats2(repo: GHRepository) : (Int, Int) = {
val closedIssues = repo.getIssues(GHIssueState.CLOSED)
val timeToCloseIssue = closedIssues.map(issue => {
val opened = issue.getCreatedAt()
val closed = issue.getClosedAt()
val difference = daysBetween(opened, closed)
difference
})
val sumIssues = timeToCloseIssue.sum
val avgIssues = timeToCloseIssue.size match {
case 0 => 0
case _ => sumIssues / timeToCloseIssue.size
}
logger.debug(s" avg days to close ${closedIssues.size()} issues = ${avgIssues} days")
(closedIssues.size(), avgIssues)
}
def getClosedPullRequestsStats(repo: GHRepository) : (Int, Int) = {
(-1, -1)
}
def getClosedPullRequestsStats2(repo: GHRepository) : (Int, Int) = {
// TODO: Look at refactoring with above into function
val closedPRs = repo.getPullRequests(GHIssueState.CLOSED)
val timeToClosePR = closedPRs.map(pr => {
val opened = pr.getCreatedAt()
val closed = pr.getClosedAt()
val difference = daysBetween(opened, closed)
difference
})
val sumPRs = timeToClosePR.sum
val avgPRs = timeToClosePR.size match {
case 0 => 0
case _ => sumPRs / timeToClosePR.size
}
logger.debug(s" avg days to close ${closedPRs.size()} pull requests = ${avgPRs} days")
(closedPRs.size, avgPRs)
}
def walkRepos(repos: List[GHRepository]) : Unit = {
val reposJsonSeq = repos.map(repo => {
logger.info(s"repo = ${repo.getName()}, forks = ${repo.getForks}, stars = ${repo.getWatchers}")
val openPullRequests = repo.getPullRequests(GHIssueState.OPEN)
logger.debug(s" openIssues = ${repo.getOpenIssueCount()}, openPullRequests = ${openPullRequests.size()}")
val (numCommits, daysSinceLastCommit, contributorLogins) = commitInfo2(repo)
val (closedIssuesSize, avgIssues) = getClosedIssuesStats(repo)
val (closedPRsSize, avgPRs) = getClosedPullRequestsStats(repo)
val repoJson: JsValue = Json.obj(
"name" -> repo.getName(),
"forks" -> repo.getForks(),
"stars" -> repo.getWatchers(),
"numContributors" -> contributorLogins.length,
"issues" -> Json.obj(
"openCount" -> repo.getOpenIssueCount(),
"closedCount" -> closedIssuesSize,
"avgTimeToCloseInDays" -> avgIssues
),
"pullRequests" -> Json.obj(
"openCount" -> openPullRequests.size(),
"closedCount" -> closedPRsSize,
"avgTimeToCloseInDays" -> avgPRs
),
"commits" -> Json.obj(
"daysSinceLastCommit" -> daysSinceLastCommit
),
"contributors" -> contributorLogins.toSeq
)
logger.debug("repo json = " + repoJson)
repoJson
})
val allRepos = Json.obj("asOf" -> new Date().toGMTString(), "repos" -> reposJsonSeq)
logger.info(s"reposJson = ${allRepos}")
}
def daysBetween(smaller: Date, bigger: Date): Int = {
val diff = (bigger.getTime() - smaller.getTime()) / (1000 * 60 * 60 * 24)
diff.toInt
}
}
| aspyker/github-scraper | src/main/scala/com/netflix/aspyker/tools/github/QueryGitHub.scala | Scala | apache-2.0 | 6,373 |
package gapt.examples.tip.isaplanner
import gapt.expr._
import gapt.proofs.context.update.InductiveType
import gapt.proofs.gaptic._
object prop_27 extends TacticsProof {
ctx += InductiveType( ty"Nat", hoc"Z:Nat", hoc"S:Nat>Nat" )
ctx += hoc"p:Nat>Nat"
ctx += InductiveType( ty"list", hoc"nil:list", hoc"cons:Nat>list>list" )
ctx += hoc"head:list>Nat"
ctx += hoc"tail:list>list"
ctx += hoc"'equal' :Nat>Nat>o"
ctx += hoc"'elem' :Nat>list>o"
ctx += hoc"'append' :list>list>list"
val sequent =
hols"""
def_p: ∀x p(S(x)) = x,
def_head: ∀x0 ∀x1 (head(cons(x0:Nat, x1:list): list): Nat) = x0,
def_tail: ∀x0 ∀x1 (tail(cons(x0:Nat, x1:list): list): list) = x1,
def_equal_1: equal(Z, Z),
def_equal_2: ∀x ¬equal(Z, S(x)),
def_equal_3: ∀x ¬equal(S(x), Z),
def_equal_4: ∀x ∀y ((equal(S(x), S(y)) → equal(x, y)) ∧ (equal(x, y) → equal(S(x), S(y)))),
def_elem_1: ∀x ¬elem(x:Nat, nil:list),
def_elem_2: ∀x ∀z ∀xs ((elem(x:Nat, cons(z:Nat, xs:list): list) → equal(x, z) ∨ elem(x, xs)) ∧ (equal(x, z) ∨ elem(x, xs) → elem(x, cons(z, xs)))),
def_append_1: ∀y (append(nil:list, y:list): list) = y,
def_append_2: ∀z ∀xs ∀y (append(cons(z:Nat, xs:list): list, y:list): list) = cons(z, append(xs, y)) ,
ax_nat: ∀x ¬Z = S(x),
ax_list: ∀y0 ∀y1 ¬(nil:list) = cons(y0:Nat, y1:list)
:-
goal: ∀x ∀xs ∀ys (elem(x:Nat, ys:list) → elem(x, append(xs:list, ys)))
"""
val proof = Lemma( sequent ) {
allR
allR
induction( hov"xs:list" )
allR
allL( "def_append_1", le"ys:list" )
eql( "def_append_1_0", "goal" ).fromLeftToRight
impR
axiomLog
allR
allL( "def_append_2", le"x_0:Nat", le"xs_0:list", le"ys:list" )
eql( "def_append_2_0", "goal" )
allL( "def_elem_2", le"x:Nat", le"x_0:Nat", le"append(xs_0:list, ys:list):list" )
andL( "def_elem_2_0" )
impR
impL( "def_elem_2_0_1" )
orR
allL( "IHxs_0", le"ys:list" )
impL( "IHxs_0_0" )
axiomLog
axiomLog
axiomLog
}
}
| gapt/gapt | examples/tip/isaplanner/prop_27.scala | Scala | gpl-3.0 | 2,104 |
object SecondsToTime {
def toSecs(h: Int, m: Int, s: Int) = h * 3600 + m * 60 + s
def prop(t: Int, h: Int, m: Int, s: Int) : Boolean =
toSecs(h, m, s) == t &&
m >= 0 && m < 60 &&
s >= 0 && s < 60
def secondsToTime(total : Int) = {
require(total >= 0)
rec(total, total, 0, 0)
} ensuring(_ match { case (h,m,s) => prop(total, h, m, s) })
def rec(total : Int, r : Int, h : Int, m : Int) : (Int, Int, Int) = {
require(
total == toSecs(h, m, r) &&
m >= 0 && m < 60 &&
h >= 0 && r >= 0 &&
(m == 0 || r + m * 60 < 3600)
)
if(r >= 3600) {
rec(total, r - 3600, h + 1, m)
} else if(r >= 60) {
rec(total, r - 60, h, m + 1)
} else {
(h, m, r)
}
} ensuring { res =>
val (h,m,s) = res
prop(total, h, m, s)
}
}
| epfl-lara/leon | testcases/repair/SecondsToTime/SecondsToTime.scala | Scala | gpl-3.0 | 821 |
/* - Coeus web framework -------------------------
*
* Licensed under the Apache License, Version 2.0.
*
* Author: Spiros Tzavellas
*/
package com.tzavellas.coeus.core
package interception
import org.junit.Test
import com.tzavellas.coeus.mvc.WebRequest
import com.tzavellas.coeus.test.MockInterceptor
class RequestFilterTest {
val request: RequestContext = new RequestContext(null, null, null)
@Test
def filter_chooses_not_to_execute_the_interceptors_for_the_request() {
val interceptor = new MockInterceptor with RequestFilter {
def accept(wr: WebRequest) = false
}
interceptor.preHandle(request)
interceptor.postHandle(request)
interceptor.afterRender(request)
assert(! interceptor.wasCalled, "interceptor was called")
}
@Test
def filter_chooses_to_execute_the_interceptors_for_the_request() {
val interceptor = new MockInterceptor with RequestFilter {
def accept(wr: WebRequest) = true
}
interceptor.preHandle(request)
interceptor.postHandle(request)
interceptor.afterRender(request)
assert(interceptor.wasCalled, "interceptor did not get called")
}
}
| sptz45/coeus | src/test/scala/com/tzavellas/coeus/core/interception/RequestFilterTest.scala | Scala | apache-2.0 | 1,156 |
/* _____ _
* | ___| __ __ _ _ __ ___ (_) __ _ _ __
* | |_ | '__/ _` | '_ ` _ \\| |/ _` | '_ \\
* | _|| | | (_| | | | | | | | (_| | | | |
* |_| |_| \\__,_|_| |_| |_|_|\\__,_|_| |_|
*
* Copyright 2014 Pellucid Analytics
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package framian
package reduce
import scala.annotation.tailrec
import scala.reflect.ClassTag
import spire._
import spire.compat._
import spire.algebra.{Field, Order}
import spire.syntax.field._
import spire.syntax.order._
final class Outliers[A: Field: Order: ClassTag](k: Double) extends SimpleReducer[A, (Option[A], Option[A])] {
val quantiler = new Quantile[A](Seq(.25, .75))
def reduce(data: Array[A]): Value[(Option[A], Option[A])] = {
val (_, q1) :: (_, q3) :: Nil = quantiler.quantiles(data)
val iqr = q3 - q1
val lowerFence = q1 - (k * iqr)
val upperFence = q3 + (k * iqr)
val lowerOutliers = data.filter(_ <= lowerFence)
val upperOutliers = data.filter(_ >= upperFence)
Value((if (lowerOutliers.length > 0) Some(lowerFence) else None,
if (upperOutliers.length > 0) Some(upperFence) else None))
}
}
| codeaudit/framian | framian/src/main/scala/framian/reduce/Outliers.scala | Scala | apache-2.0 | 1,665 |
/*
* Copyright 2009-2010 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb {
package http {
import _root_.net.liftweb.common._
import util._
import _root_.scala.reflect.Manifest
/**
* A base trait for a Factory. A Factory is both an Injector and
* a collection of FactorMaker instances. The FactoryMaker instances auto-register
* with the Injector. This provides both concrete Maker/Vender functionality as
* well as Injector functionality.
*/
trait Factory extends SimpleInjector {
/**
* Create an object or val that is a subclass of the FactoryMaker to
* generate factory for a particular class as well as define session and
* request specific vendors and use doWith to define the vendor just for
* the scope of the call.
*/
abstract class FactoryMaker[T](_default: () => T)
(implicit man: Manifest[T]) extends StackableMaker[T] with Vendor[T] {
registerInjection(this)(man)
/**
* The default function for vending an instance
*/
object default extends PSettableValueHolder[() => T] {
private var value = _default
def get = value
def is = get
def set(v: () => T): () => T = {
value = v
v
}
}
/**
* The session-specific Maker for creating an instance
*/
object session extends SessionVar[Maker[T]](Empty)
/**
* The request specific Maker for creating an instance
*/
object request extends RequestVar[Maker[T]](Empty)
private val _sub: List[PValueHolder[Maker[T]]] = List(request, session)
/**
* Vend an instance
*/
implicit def vend: T = make openOr default.is.apply()
/**
* Make a Box of the instance.
*/
override implicit def make: Box[T] = super.make or find(_sub) or Full(default.is.apply())
}
}
}
}
| jeppenejsum/liftweb | framework/lift-base/lift-webkit/src/main/scala/net/liftweb/http/Factory.scala | Scala | apache-2.0 | 2,386 |
package com.eevolution.context.dictionary.infrastructure.repository
import com.eevolution.context.dictionary.domain.model.HouseKeeping
import com.eevolution.context.dictionary.infrastructure.db.DbContext._
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: emeris.hernandez@e-evolution.com, http://www.e-evolution.com , http://github.com/EmerisScala
* Created by emeris.hernandez@e-evolution.com , www.e-evolution.com on 20/10/17.
*/
/**
* House Keeping Mapping
*/
trait HouseKeepingMapping {
val queryHouseKeeping = quote {
querySchema[HouseKeeping]("AD_HouseKeeping",
_.houseKeepingId-> "AD_HouseKeeping_ID",
_.tenantId -> "AD_Client_ID" ,
_.organizationId -> "AD_Org_ID" ,
_.entityId-> "AD_Table_ID",
_.backupFolder-> "BackupFolder",
_.created-> "Created",
_.createdBy-> "CreatedBy",
_.description-> "Description",
_.help-> "Help",
_.isActive-> "IsActive",
_.isExportXMLBackup-> "IsExportXMLBackup",
_.isSaveInHistoric-> "IsSaveInHistoric",
_.lastDeleted-> "LastDeleted",
_.lastRun-> "LastRun",
_.name-> "Name",
_.processing-> "Processing",
_.updated-> "Updated",
_.updatedBy-> "UpdatedBy",
_.value-> "Value",
_.whereClause-> "WhereClause",
_.uuid-> "UUID")
}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/repository/HouseKeepingMapping.scala | Scala | gpl-3.0 | 2,012 |
package net.scalytica.symbiotic.components
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.prefix_<^._
import scalacss.Defaults._
import scalacss.ScalaCssReact._
object IconButton {
object Style extends StyleSheet.Inline {
import dsl._
val defaultButton =
style("icon-btn-default")(addClassNames("btn", "btn-default"))
}
case class Props(
iconCls: String,
attrs: Seq[TagMod] = Seq.empty,
onPress: (ReactEventI) => Callback
)
val component = ReactComponentB[Props]("IconButton").stateless.render_P { $ =>
<.button(
^.`type` := "button",
Style.defaultButton,
^.onClick ==> $.onPress,
$.attrs,
<.i(^.className := $.iconCls)
)
}.build
def apply(props: Props) = component(props)
def apply(
iconCls: String,
attrs: Seq[TagMod],
onPress: (ReactEventI) => Callback
) =
component(Props(iconCls, attrs, onPress))
def apply(iconCls: String, attrs: Seq[TagMod]) =
component(Props(iconCls, attrs, { e: ReactEventI =>
Callback.empty
}))
}
| kpmeen/symbiotic | examples/symbiotic-client/src/main/scala/net/scalytica/symbiotic/components/IconButton.scala | Scala | apache-2.0 | 1,082 |
package part2
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
class Exercise12CalculatorSpec extends AnyFlatSpec with Matchers {
// val calc1 = Add(Num(1.1), Mul(Num(2.2), Num(3.3)))
// val calc2 = Mul(Add(Num(1.1), Num(2.2)), Num(3.3))
// val calc3 = Div(Num(1.0), Num(0.0))
// val calc4 = Sqrt(Num(-1.0))
"stringify" should "stringify an expression" in {
pending
// calc1.stringify should equal("1.1 + 2.2 * 3.3")
}
"Calculator.eval" should "return a double or some weird infinite thing" in {
pending
// Calculator.eval(calc1) should equal(1.1 + 2.2 * 3.3)
// Calculator.eval(calc2) should equal((1.1 + 2.2) * 3.3)
// Calculator.eval(calc3).isPosInfinity should equal(true)
// Calculator.eval(calc4).isNaN should equal(true)
}
"IntCalculator.eval" should "return an integer or throw an exception" in {
pending
// IntCalculator.eval(calc1) should equal(1 + 2 * 3)
// IntCalculator.eval(calc2) should equal((1 + 2) * 3)
// intercept[ArithmeticException] { IntCalculator.eval(calc3) }
// IntCalculator.eval(calc4) should equal(0) // Double.NaN.toInt is apparently 0!
}
"pythag" should "produce the correct form of expression" in {
pending
// IntCalculator.eval(Expr.pythag(3, 4)) should equal(5)
// IntCalculator.eval(Expr.pythag(5, 12)) should equal(13)
}
"factorial" should "produce the correct form of expression" in {
pending
// IntCalculator.eval(Expr.factorial(5)) should equal(5 * 4 * 3 * 2 * 1)
// IntCalculator.eval(Expr.factorial(3)) should equal(3 * 2 * 1)
// IntCalculator.eval(Expr.factorial(1)) should equal(1)
}
"stringify" should "parenthesise expression correctly (harder)" in {
pending
// calc2.stringify should equal("(1.1 + 2.2) * 3.3")
}
}
| underscoreio/essential-scala-code | src/test/scala/part2/Exercise12CalculatorSpec.scala | Scala | apache-2.0 | 1,824 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs105.boxes
import uk.gov.hmrc.ct.box._
case class AC7992(value: Option[Boolean]) extends CtBoxIdentifier(name = "Enter Advances and credits note?")
with CtOptionalBoolean
with Input
| liquidarmour/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frs105/boxes/AC7992.scala | Scala | apache-2.0 | 827 |
package be.wegenenverkeer.atomium.format
/**
* Representation of the content element in a Atom feed.
*
* @param value the content value
* @param `type` the content type
* @tparam T the type of entry
*/
case class Content[+T](value: T, `type`: String)
| joachimvda/atomium | modules/format/src/main/scala/be/wegenenverkeer/atomium/format/Content.scala | Scala | mit | 258 |
package com.scalableminds.webknossos.datastore.services
import java.io._
import java.nio.file.Path
import com.google.gson.JsonParseException
import com.google.gson.stream.JsonReader
import com.scalableminds.webknossos.datastore.models.datasource.DataLayerMapping
import com.typesafe.scalalogging.LazyLogging
import net.liftweb.common.{Box, Failure}
import scala.collection.mutable
object MappingParser extends LazyLogging {
def parse[T](r: Reader, fromLongFn: Long => T): Box[DataLayerMapping[T]] =
try {
parseImpl(r, fromLongFn)
} catch {
case e: JsonParseException =>
logger.error(s"Parse exception while parsing mapping: ${e.getMessage}.")
Failure(e.getMessage)
case e: Exception =>
logger.error(s"Unknown exception while parsing mapping: ${e.getMessage}.")
Failure(e.getMessage)
} finally {
r.close()
}
def parse[T](p: Path, fromLongFn: Long => T): Box[DataLayerMapping[T]] =
parse(new FileReader(new File(p.toString)), fromLongFn)
def parse[T](a: Array[Byte], fromLongFn: Long => T): Box[DataLayerMapping[T]] =
parse(new InputStreamReader(new ByteArrayInputStream(a)), fromLongFn)
private def parseImpl[T](r: Reader, fromLongFn: Long => T): Box[DataLayerMapping[T]] = {
val start = System.currentTimeMillis()
val jsonReader = new JsonReader(r)
var nameOpt: Option[String] = None
var classesOpt: Option[Map[T, T]] = None
jsonReader.beginObject()
while (jsonReader.hasNext) {
jsonReader.nextName() match {
case "name" =>
nameOpt = Some(jsonReader.nextString())
case "classes" =>
classesOpt = Some(parseClasses(jsonReader, fromLongFn))
case _ =>
jsonReader.skipValue()
}
}
jsonReader.endObject()
val end = System.currentTimeMillis()
logger.info(s"Mapping parsing took ${end - start} ms")
for {
name <- nameOpt
classes <- classesOpt
} yield {
DataLayerMapping(name, classes)
}
}
private def parseClasses[T](jsonReader: JsonReader, fromLongFn: Long => T): Map[T, T] = {
val mapping = mutable.HashMap[T, T]()
jsonReader.beginArray()
while (jsonReader.hasNext) {
jsonReader.beginArray()
var firstIdOpt: Option[T] = None
while (jsonReader.hasNext) {
val currentId = fromLongFn(jsonReader.nextLong())
firstIdOpt match {
case Some(firstId) =>
mapping.put(currentId, firstId)
case _ =>
firstIdOpt = Some(currentId)
}
}
jsonReader.endArray()
}
jsonReader.endArray()
mapping.toMap
}
}
| scalableminds/webknossos | webknossos-datastore/app/com/scalableminds/webknossos/datastore/services/MappingParser.scala | Scala | agpl-3.0 | 2,647 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2014 DavidGamba
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.gambaeng.utils
import org.slf4j.Logger
import org.slf4j.LoggerFactory
object OptionParser {
val logger = LoggerFactory.getLogger(this.getClass.getName)
type OptionMap = Map[Symbol, Any]
type OptionMapBuilder = Map[String, Any]
def getOptions(args: Array[String], option_map: OptionMapBuilder): Tuple2[OptionMap, Array[String]] = {
logger.debug(s"""[getOptions] Received args: ${args.mkString(",")}""")
logger.debug(s"""[getOptions] Received map: $option_map""")
parseOptions(args.toList, option_map)
}
implicit class OptionMapImprovements(val m: OptionMapBuilder) {
def match_key(opt: String): String = {
val stripped_opt = if(opt.contains("=")) opt.split("=")(0) else opt
val s = m.keys.find(_.matches(s"""[^-]*$stripped_opt(\\\\|.*)?(=.)?""")).getOrElse("")
logger.trace(s"match_key: $opt -> $s")
s
}
def match_get(opt: String): Option[Symbol] = {
val s = m.get(m.match_key(opt))
logger.trace(s"match_get: $opt -> $s")
s.asInstanceOf[Option[Symbol]]
}
def match_get_function(opt: String): () => Unit = {
val s = m.get(m.match_key(opt))
logger.trace(s"match_get_function: $opt -> $s")
s.get.asInstanceOf[() => Unit]
}
def match_apply(opt: String): Symbol = {
val s = m(m.match_key(opt))
logger.trace(s"match_apply: $opt -> $s")
s.asInstanceOf[Symbol]
}
// Check allows to stop checking for options, e.g. -- is passed.
def is_option(opt: String, check: Boolean = true): Boolean = {
val ret = check && m.match_key(opt).matches("^-.*")
logger.trace(s"is_option: $opt -> $ret")
ret
}
// If the option definition has p for procedure it is a function
def is_function(opt: String): Boolean = {
val ret = m.match_key(opt).matches(".*=p$")
logger.trace(s"is_function: $opt -> $ret")
ret
}
// If the option definition doesn't have the '=' symbol, it is just a flag
def is_flag(opt: String): Boolean = {
val ret = !m.match_key(opt).matches(".*=.$")
logger.trace(s"is_flag: $opt -> $ret")
ret
}
def cast_value(opt: String, value: String): Any = {
val key = m.match_key(opt)
val ret = if (key.matches(".*=i$")) {
logger.trace("toInt")
value.toInt
} else if (key.matches(".*=f$")) {
logger.trace("toDouble")
value.toDouble
} else if (key.matches(".*=s$")) {
logger.trace("string")
value
}
val ret_type = ret.getClass
logger.trace(s"cast_value: $opt, $value, type: $ret_type ")
ret
}
}
private def parseOptions(args: List[String],
option_map: OptionMapBuilder,
options: OptionMap = Map[Symbol, String](),
skip: Array[String] = Array[String]()): Tuple2[OptionMap, Array[String]] = {
logger.trace(s"""[parseOptions] args: $args""")
logger.trace(s"""[parseOptions] options: $options""")
logger.trace(s"""[parseOptions] skip: ${skip.mkString(",")}""")
args match {
// Empty list
case Nil => Tuple2(options, skip)
// Stop on --
case opt :: tail if opt == "--" => Tuple2(options, skip ++: tail.toArray)
// Options with values after "=". e.g --opt=value
case opt :: tail if option_map.is_option(opt) &&
!option_map.is_flag(opt) &&
!option_map.is_function(opt) &&
option_map.match_get(opt) != None &&
opt.contains("=") => {
logger.debug(s"Argument $opt maps to an option with value.")
parseOptions(tail, option_map,
skip = skip,
options = options ++ Map(option_map.match_apply(opt) -> option_map.cast_value(opt, opt.split("=")(1))))
}
// Flags
case opt :: tail if option_map.is_option(opt) &&
option_map.is_flag(opt) &&
option_map.match_get(opt) != None => {
logger.debug(s"Argument $opt maps to a flag.")
parseOptions(tail, option_map,
skip = skip,
options = options ++ Map(option_map.match_apply(opt) -> true))
}
// Options with functions
case opt :: tail if option_map.is_option(opt) &&
option_map.is_function(opt) &&
option_map.match_key(opt) != "" => {
logger.debug(s"Argument $opt maps to a function call.")
option_map.match_get_function(opt)()
parseOptions(tail, option_map,
skip = skip,
options = options)
}
// Options with values
case opt :: value :: tail if option_map.is_option(opt) &&
!option_map.is_flag(opt) &&
option_map.match_get(opt) != None => {
logger.debug(s"Argument $opt maps to an option with value.")
parseOptions(tail, option_map,
skip = skip,
options = options ++ Map(option_map.match_apply(opt) -> option_map.cast_value(opt, value)))
}
// Options with missing values
case opt :: tail if option_map.is_option(opt) &&
!option_map.is_flag(opt) &&
option_map.match_get(opt) != None => {
logger.debug(s"Argument $opt maps to an option with missing value.")
Console.err.println(s"Option $opt requires an argument")
parseOptions(tail, option_map,
skip = skip,
options = options)
}
// Warn on unknown options and ignore them
case opt :: tail if !option_map.is_option(opt) && opt.startsWith("-") => {
logger.debug(s"Argument $opt maps to an unknown option.")
if (opt.contains("="))
Console.err.println(s"""Unknown option: ${opt.split("=")(0)}""")
else
Console.err.println(s"Unknown option: $opt")
parseOptions(tail, option_map,
options = options,
skip = skip)
}
// Skip extra arguments
case opt :: tail if !option_map.is_option(opt) => {
logger.debug(s"Argument $opt is not an option.")
parseOptions(tail, option_map,
options = options,
skip = skip :+ opt)
}
}
}
}
| DavidGamba/scala-getoptions | src/main/scala/scala-getoptions/optparse.scala | Scala | mit | 7,460 |
package com.ee.assets.transformers
trait Element[A] {
def path: String
def contents: A
def lastModified: Option[Long]
}
abstract class BaseElement[A](val path: String,
val contents: A,
val lastModified: Option[Long]) extends Element[A]
case class ContentElement[A](val path: String,
val contents: A,
val lastModified: Option[Long]) extends Element[A]
case class PathElement(override val path: String) extends BaseElement[Unit](path, Unit, None)
trait DeployedElement extends Element[Unit] {
override def lastModified: Option[Long] = None
override def contents: Unit = Unit
}
case class SimpleDeployedElement(val path:String) extends DeployedElement
| edeustace/assets-loader | plugin/app/com/ee/assets/transformers/Element.scala | Scala | mit | 781 |
package fpinscala.lazyness
import org.specs2.Specification
class StreamSpec extends Specification { def is = s2"""
Stream[T] related exercises for Chapter 5:
Must create a List[A] from a Stream[A] $exer1
take should return:
a new Stream with the first 3 elements from Stream(1, 2, 3, 4, 5) $exer2
an empty Stream after taking 2 from Stream() $exer3
drop should:
return a new Stream with the last 3 elements from Stream(1, 2, 3, 4, 5) $exer4
return the whole Stream after dropping 0 elements from Stream(1, 2, 3) $exer5
takeWhile should:
return Stream(1) using predicate `if e == 1` on Stream(1, 2, 3) $exer6
return Stream() using predicate `if e == 2` on Stream(1, 2, 3) $exer7
forAll should:
return true using predicate `e > 0` on Stream(1, 2, 3) $exer8
return false using predicate `e > 0` on Stream(1, 2, 0, 4, 5) and the predicate must be executed only 3 times $exer9
takeWhile2 should:
return Stream(1, 2, 3) when predicate is `e < 4` on Stream(1, 2, 3, 4, 5) $exer10
return Stream() when predicate is `e < 0` on Stream(1, 2, 3) $exer11
headOption should:
return Some(1) for Stream(1, 2, 3) $exer12
return None() for Stream() $exer13
map should:
return Stream(1, 4, 9) `f(x) => x^2` for Stream(1, 2, 3) $exer14
filter should:
return Stream() for predicate `x < 0` on Stream(1, 2, 3) $exer15
return Stream(1, 3) for predicate `x.isOdd` on Stream(1, 2, 3) $exer16
append should:
return Stream(1, 2, 3, 4) given Stream(3, 4) to an existing Stream(1, 2) $exer17
flatMap should:
return Stream(2, 3, 4) given Stream(1, 2, 3) and the f(x) => x + 1 $exer18
exists should return true when:
the predicate is (x > 2) on Stream(-1, 0, 1, 2, 3) $exer22
exists should return false when:
the predicate is (x > 2) on Stream(-1, 0, 1, 2) $exer23
find should return Some(1) when:
given Stream(1, 2, 3, 4) and predicate (x == 1 || x == 4) $exer24
given Stream(1, 2, 3, 4) and predicate (x == 1) $exer25
find should return None when:
given Stream(1, 2, 3, 4) and predicate (x == 0) $exer26
given Stream() and predicate (x == 0) $exer27
given `ones`, recursively defined as val ones = Stream.cons(1, ones), the following expressions should not hang:
ones.map(_ + 1).exist(_ % 2 == 0) $exer19
ones.takeWhile(_ == 1) $exer20
ones.forAll(_ != 1) $exer21
Stream.constant should:
not hang when instantiated $exer28
return Stream(1, 1, 1, 1) when take(4) is invoked on the created constant stream $exer29
Stream.from should:
not hang when instantiated $exer30
return Stream(5, 6, 7, 8) when take(4) is invoked on the created stream $exer31
Stream.fibs should:
return the first 7 elements of the Fibonacci sequence $exer32
onesU (ones implemented using unfold) expressions should:
return the Stream(1, 1, 1, 1) after invoking take(4) $exer33
constantU (constant implemented using unfold) expressions should:
return the Stream(5, 5, 5, 5) after invoking take(4) $exer34
fibsU (fibs implemented using unfold) expressions should:
return the Stream(0, 1, !, 2, 3, 5, 8) after invoking take(7) $exer35
mapU should:
return Stream(1, 4, 9) `f(x) => x^2` for Stream(1, 2, 3) $exer36
takeWhileU should:
return Stream(1, 2, 3) when predicate is `e < 4` on Stream(1, 2, 3, 4, 5) $exer37
return Stream() when predicate is `e < 0` on Stream(1, 2, 3) $exer38
takeU should:
a new Stream with the first 3 elements from Stream(1, 2, 3, 4, 5) $exer39
an empty Stream after taking 2 from Stream() $exer40
zipWith should:
return Stream(2, 4, 6) for Stream(1, 2, 3) and paramter Stream(1, 2, 3) $exer41
return Stream(2, 4, 6) for Stream(1, 2, 3, 4) and paramter Stream(1, 2, 3) $exer42
return Stream(2, 4, 6) for Stream(1, 2, 3) and paramter Stream(1, 2, 3, 4) $exer43
zipAll should:
return Stream(Some(1) -> Some('a'), None -> Some('b')) for Stream(1) and given parameter Stream('a', 'b') $exer44
startsWidth should:
return true for Stream(1, 2) when invoked on Stream(1, 2, 3) $exer45
return false for Stream(2, 3) when invoked on Stream(1, 2, 3) $exer46
tails should:
return Stream(Stream(1, 2), Stream(1)) for Stream(1, 2) $exer47
scanRight should:
return Stream(6, 5, 3, 0) when invoked like `Stream(1, 2, 3).scanRight(0)(_ + _)`
"""
val ones: Stream[Integer] = Stream.cons(1, ones)
def exer1 =
Stream(1, 2, 3, 4, 5).toList must_== List(1, 2, 3, 4, 5)
def exer2 =
Stream(1, 2, 3, 4, 5).take(3).toList must_== List(1, 2, 3)
def exer3 =
Stream.empty.take(2) must_== Stream.empty
def exer4 =
Stream(1, 2, 3, 4, 5).drop(2).toList must_== List(3, 4, 5)
def exer5 =
Stream(1, 2, 3).drop(0).toList must_== List(1, 2, 3)
def exer6 =
Stream(1, 2, 3).takeWhile(_ == 1).toList must_== List(1)
def exer7 =
Stream(1, 2, 3).takeWhile(_ == 2).toList must_== List()
def exer8 =
Stream(1, 2, 3).forAll(_ > 0) must_== true
def exer9 = {
val s: Stream[Int] = Stream(1, 2, 0, 4, 5)
var count: Int = 0
s.forAll(a => {
count += 1
a > 0
}) must_== false
count must_== 3
}
def exer10 =
Stream(1, 2, 3, 4, 5).takeWhile2(_ < 4).toList must_== List(1, 2, 3)
def exer11 =
Stream(1, 2, 3).takeWhile2(_ < 0).toList must_== List()
def exer12 =
Stream(1, 2, 3).headOption() must_== Some(1)
def exer13 =
Stream().headOption() must_== None
def exer14 =
Stream(1, 2, 3).map(x => x * x).toList must_== List(1, 4, 9)
def exer15 =
Stream(1, 2, 3).filter(_ < 0).toList must_== List()
def exer16 =
Stream(1, 2, 3).filter(x => (x % 2) == 1).toList must_== List(1, 3)
def exer17 =
Stream(1, 2).append(Stream(3, 4)).toList must_== List(1, 2, 3, 4)
def exer18 =
Stream(1, 2, 3).flatMap(x => Stream(x + 1)).toList must_== List(2, 3, 4)
def exer19 =
ones.map(_ + 1).exists(_ % 2 == 0) must_== true
def exer20 = {
val stream = ones.takeWhile(_ == 1)
stream must_!= Stream.empty
}
def exer21 = {
val stream = ones.forAll(_ != 1)
stream must_!= Stream.empty
}
def exer22 =
Stream(-1, 0, 1, 2, 3).exists(_ > 2) must_== true
def exer23 =
Stream(-1, 0, 1, 2).exists(_ > 2) must_== false
def exer24 =
Stream(1, 2, 3, 4).find(x => x == 1 || x == 4) must_== Some(1)
def exer25 =
Stream(1, 2, 3, 4).find(_ == 1) must_== Some(1)
def exer26 =
Stream(1, 2, 3, 4).find(_ == 0) must_== None
def exer27 =
Stream().find(_ == 0) must_== None
def exer28 = {
val c = Stream.constant(1)
c must_!= Stream.empty
}
def exer29 =
Stream.constant(1).take(4).toList must_== List(1, 1, 1, 1)
def exer30 = {
val f = Stream.from(5)
f must_!= Stream.empty
}
def exer31 =
Stream.from(5).take(4).toList must_== List(5, 6, 7, 8)
def exer32 =
Stream.fibs.take(7).toList must_== List(0, 1, 1, 2, 3, 5, 8)
def exer33 =
Stream.onesU.take(4).toList must_== List(1, 1, 1, 1)
def exer34 =
Stream.constantU(5).take(4).toList must_== List(5, 5, 5, 5)
def exer35 =
Stream.fibsU.take(7).toList must_== List(0, 1, 1, 2, 3, 5, 8)
def exer36 =
Stream(1, 2, 3).mapU(x => x * x).toList must_== List(1, 4, 9)
def exer37 =
Stream(1, 2, 3).takeWhileU(_ == 1).toList must_== List(1)
def exer38 =
Stream(1, 2, 3).takeWhileU(_ == 2).toList must_== List()
def exer39 =
Stream(1, 2, 3, 4, 5).takeU(3).toList must_== List(1, 2, 3)
def exer40 =
Stream.empty.takeU(2) must_== Stream.empty
def exer41 =
Stream(1, 2, 3).zipWith(Stream(1, 2, 3))( _ + _ ).toList must_== List(2, 4, 6)
def exer42 =
Stream(1, 2, 3, 4).zipWith(Stream(1, 2, 3))( _ + _ ).toList must_== List(2, 4, 6)
def exer43 =
Stream(1, 2, 3).zipWith(Stream(1, 2, 3, 4))( _ + _ ).toList must_== List(2, 4, 6)
def exer44 =
Stream(1).zipAll(Stream('a', 'b')).toList must_== List(Some(1) -> Some('a'), None -> Some('b'))
def exer45 =
Stream(1, 2, 3) startsWith Stream(1, 2) must_== true
def exer46 =
Stream(1, 2, 3) startsWith Stream(2, 3) must_== false
def exer47 =
Stream(1, 2).tails.toList.map(s => s.toList) must_== List(List(1, 2), List(2))
def exer48 =
Stream(1, 2, 3).scanRight(0)(_ + _).toList == List(6, 5, 3, 0)
} | higuaro/scala-training | src/test/scala/fpinscala/lazyness/StreamSpec.scala | Scala | mit | 8,480 |
/*
Copyright (c) 2013-2016 Karol M. Stasiak
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package io.github.karols.units.arrays
import io.github.karols.units._
import scala.collection.mutable._
object DoubleAArray {
/** Creates an array of given elements */
def apply[A<:AffineSpace](elems: DoubleA[A]*) = new DoubleAArray[A](elems.map{_.value}.toArray)
def at[A<:AffineSpace](elems: Double*) = new DoubleAArray[A](Array[Double](elems:_*))
/** Concatenates all arrays into a single array. */
def concat[A<:AffineSpace](arrays: DoubleAArray[A]*) =
new DoubleAArray[A](Array.concat(arrays.map{_.underlying}: _*))
/** Copy one array to another. */
def copy[A<:AffineSpace](
src: DoubleAArray[A], srcPos: Int,
dest: DoubleAArray[A], destPos: Int,
length: Int) {
Array.copy(src.underlying, srcPos, dest.underlying, destPos, length)
}
/** Returns an array of length 0. */
def empty[A<:AffineSpace] = new DoubleAArray[A](Array.empty[Double])
/** Returns an array that contains the results of some element computation a number of times. */
def fill[A<:AffineSpace](n: Int)(elem: =>DoubleA[A]) = {
new DoubleAArray(Array.fill(n)(elem.value))
}
/** Returns an array that contains a constant element a number of times. */
def fillUniform[A<:AffineSpace](n: Int)(elem: DoubleA[A]) = {
val elemValue = elem.value
new DoubleAArray(Array.fill(n)(elemValue))
}
def unapplySeq[A<:AffineSpace](arr: DoubleAArray[A]) = Some(arr)
//TODO: more
}
class DoubleAArrayBuilder[A<:AffineSpace] extends Builder[DoubleA[A], DoubleAArray[A]] {
val underlying = new ArrayBuilder.ofDouble
def +=(elem: DoubleA[A]) = {
underlying += elem.value
this
}
def clear() = underlying.clear
def result() = new DoubleAArray[A](underlying.result())
}
/** Mutable fixed-size array of unboxed [[io.github.karols.units.DoubleA]]. */
final class DoubleAArray[A<:AffineSpace] private[arrays] (private[arrays] val underlying: Array[Double])
extends IndexedSeq[DoubleA[A]]
with ArrayLike[DoubleA[A], DoubleAArray[A]]{
def this(length: Int) {
this(new Array[Double](length))
}
override val length = underlying.length
override def stringPrefix = "DoubleAArray"
override def newBuilder = new DoubleAArrayBuilder[A]
def apply(index: Int) = underlying(index).at[A]
def update(index: Int, elem: DoubleA[A]) {
underlying(index) = elem.value
}
/** The average of all values in the array */
def avg = DoubleA[A](underlying.sum / length)
} | KarolS/units | units/src/main/scala/io/github/karols/units/arrays/DoubleAArray.scala | Scala | mit | 3,438 |
/*
* Copyright 2015 Databricks
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.databricks.spark.redshift
import java.net.URI
import java.sql.Connection
import scala.util.Random
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{Path, FileSystem}
import org.apache.spark.SparkContext
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.hive.test.TestHiveContext
import org.scalatest.{BeforeAndAfterEach, BeforeAndAfterAll, Matchers}
/**
* Base class for writing integration tests which run against a real Redshift cluster.
*/
trait IntegrationSuiteBase
extends QueryTest
with Matchers
with BeforeAndAfterAll
with BeforeAndAfterEach {
protected def loadConfigFromEnv(envVarName: String): String = {
Option(System.getenv(envVarName)).getOrElse {
fail(s"Must set $envVarName environment variable")
}
}
// The following configurations must be set in order to run these tests. In Travis, these
// environment variables are set using Travis's encrypted environment variables feature:
// http://docs.travis-ci.com/user/environment-variables/#Encrypted-Variables
// JDBC URL listed in the AWS console (should not contain username and password).
protected val AWS_REDSHIFT_JDBC_URL: String = loadConfigFromEnv("AWS_REDSHIFT_JDBC_URL")
protected val AWS_REDSHIFT_USER: String = loadConfigFromEnv("AWS_REDSHIFT_USER")
protected val AWS_REDSHIFT_PASSWORD: String = loadConfigFromEnv("AWS_REDSHIFT_PASSWORD")
protected val AWS_ACCESS_KEY_ID: String = loadConfigFromEnv("TEST_AWS_ACCESS_KEY_ID")
protected val AWS_SECRET_ACCESS_KEY: String = loadConfigFromEnv("TEST_AWS_SECRET_ACCESS_KEY")
// Path to a directory in S3 (e.g. 's3n://bucket-name/path/to/scratch/space').
private val AWS_S3_SCRATCH_SPACE: String = loadConfigFromEnv("AWS_S3_SCRATCH_SPACE")
require(AWS_S3_SCRATCH_SPACE.contains("s3n"), "must use s3n:// URL")
protected val jdbcUrl: String = {
s"$AWS_REDSHIFT_JDBC_URL?user=$AWS_REDSHIFT_USER&password=$AWS_REDSHIFT_PASSWORD"
}
/**
* Random suffix appended appended to table and directory names in order to avoid collisions
* between separate Travis builds.
*/
protected val randomSuffix: String = Math.abs(Random.nextLong()).toString
protected val tempDir: String = AWS_S3_SCRATCH_SPACE + randomSuffix + "/"
/**
* Spark Context with Hadoop file overridden to point at our local test data file for this suite,
* no-matter what temp directory was generated and requested.
*/
protected var sc: SparkContext = _
protected var sqlContext: SQLContext = _
protected var conn: Connection = _
override def beforeAll(): Unit = {
super.beforeAll()
sc = new SparkContext("local", "RedshiftSourceSuite")
// Bypass Hadoop's FileSystem caching mechanism so that we don't cache the credentials:
sc.hadoopConfiguration.setBoolean("fs.s3.impl.disable.cache", true)
sc.hadoopConfiguration.setBoolean("fs.s3n.impl.disable.cache", true)
sc.hadoopConfiguration.set("fs.s3n.awsAccessKeyId", AWS_ACCESS_KEY_ID)
sc.hadoopConfiguration.set("fs.s3n.awsSecretAccessKey", AWS_SECRET_ACCESS_KEY)
conn = DefaultJDBCWrapper.getConnector(None, jdbcUrl)
}
override def afterAll(): Unit = {
try {
val conf = new Configuration(false)
conf.set("fs.s3n.awsAccessKeyId", AWS_ACCESS_KEY_ID)
conf.set("fs.s3n.awsSecretAccessKey", AWS_SECRET_ACCESS_KEY)
// Bypass Hadoop's FileSystem caching mechanism so that we don't cache the credentials:
conf.setBoolean("fs.s3.impl.disable.cache", true)
conf.setBoolean("fs.s3n.impl.disable.cache", true)
val fs = FileSystem.get(URI.create(tempDir), conf)
fs.delete(new Path(tempDir), true)
fs.close()
} finally {
try {
conn.close()
} finally {
try {
sc.stop()
} finally {
super.afterAll()
}
}
}
}
override protected def beforeEach(): Unit = {
super.beforeEach()
sqlContext = new TestHiveContext(sc)
}
}
| huaxingao/spark-redshift | src/it/scala/com/databricks/spark/redshift/IntegrationSuiteBase.scala | Scala | apache-2.0 | 4,559 |
/*
* Copyright (C) 2009-2018 Lightbend Inc. <https://www.lightbend.com>
*/
package detailedtopics.configuration.gzipencoding
import akka.stream.ActorMaterializer
import play.api.test._
class GzipEncoding extends PlaySpecification {
import javax.inject.Inject
import play.api.http.DefaultHttpFilters
import play.filters.gzip.GzipFilter
class Filters @Inject() (gzipFilter: GzipFilter)
extends DefaultHttpFilters(gzipFilter)
"gzip filter" should {
"allow custom strategies for when to gzip (Scala)" in {
import play.api.mvc._
running() { app =>
implicit val mat = ActorMaterializer()(app.actorSystem)
def Action = app.injector.instanceOf[DefaultActionBuilder]
val filter =
//#should-gzip
new GzipFilter(shouldGzip = (request, response) =>
response.body.contentType.exists(_.startsWith("text/html")))
//#should-gzip
header(CONTENT_ENCODING,
filter(Action(Results.Ok("foo")))(gzipRequest).run()
) must beNone
}
}
"allow custom strategies for when to gzip (Java)" in {
import play.api.mvc._
val app = play.api.inject.guice.GuiceApplicationBuilder().build()
running(app) {
implicit val mat = ActorMaterializer()(app.actorSystem)
def Action = app.injector.instanceOf[DefaultActionBuilder]
val filter = (new CustomFilters(mat)).getFilters.get(0)
header(CONTENT_ENCODING,
filter(Action(Results.Ok("foo")))(gzipRequest).run()
) must beNone
}
}
}
def gzipRequest = FakeRequest().withHeaders(ACCEPT_ENCODING -> "gzip")
}
| Shenker93/playframework | documentation/manual/working/commonGuide/filters/code/GzipEncoding.scala | Scala | apache-2.0 | 1,646 |
package net.kemuridama.kafcon.protocol
import net.kemuridama.kafcon.model.SystemMetrics
trait SystemMetricsJsonProtocol extends JsonProtocol {
implicit val systemMetricsFormat = jsonFormat8(SystemMetrics)
}
| kemuridama/kafcon | src/main/scala/net/kemuridama/kafcon/protocol/SystemMetricsJsonProtocol.scala | Scala | mit | 213 |
/*
* ActorInputStream.scala
*
* Updated: Sep 19, 2014
*
* Copyright (c) 2014, CodeMettle
*/
package com.codemettle.akkasolr
package util
import java.io.InputStream
import com.codemettle.akkasolr.util.ActorInputStream._
import akka.actor._
import akka.pattern._
import akka.util.{ByteString, Timeout}
import scala.concurrent.Await
import scala.concurrent.duration._
/**
* @author steven
*
*/
object ActorInputStream {
private case class EnqueueBytes(bytes: ByteString)
private case object TriggerStreamComplete
private case class DequeueBytes(max: Int)
private case class DequeuedBytes(bytes: Option[ByteString])
private class ByteBuffer extends Actor with Stash {
private var byteStr = ByteString.empty
private var finished = false
def receive = {
case TriggerStreamComplete =>
finished = true
unstashAll()
case EnqueueBytes(bytes) =>
byteStr ++= bytes
unstashAll()
case DequeueBytes(max) =>
if (byteStr.isEmpty && finished)
sender() ! DequeuedBytes(None)
else if (byteStr.isEmpty)
stash()
else if (byteStr.size <= max) {
sender() ! DequeuedBytes(Some(byteStr))
byteStr = ByteString.empty
} else {
val (toSend, toKeep) = byteStr splitAt max
sender() ! DequeuedBytes(Some(toSend))
byteStr = toKeep
}
}
}
}
class ActorInputStream(implicit arf: ActorRefFactory) extends InputStream {
private implicit val timeout = Timeout(90.seconds)
private val buffer = arf.actorOf(Props[ByteBuffer])
def streamFinished() = buffer ! TriggerStreamComplete
def enqueueBytes(bytes: ByteString) = buffer ! EnqueueBytes(bytes)
override def close(): Unit = buffer ! PoisonPill
override def read(): Int = {
val arr = new Array[Byte](1)
if (read(arr) == -1) -1 else arr(0)
}
override def read(b: Array[Byte], off: Int, len: Int): Int = {
if (b == null)
throw new NullPointerException
else if (off < 0 || len < 0 || len > b.length - off)
throw new IndexOutOfBoundsException
else if (len == 0)
0
else {
Await.result(buffer ? DequeueBytes(len), Duration.Inf) match {
case DequeuedBytes(None) => -1
case DequeuedBytes(Some(byteStr)) =>
val bytes = byteStr.toArray
System.arraycopy(bytes, 0, b, off, bytes.length)
bytes.length
}
}
}
}
| CodeMettle/akka-solr | src/main/scala/com/codemettle/akkasolr/util/ActorInputStream.scala | Scala | apache-2.0 | 2,748 |
/*
* Copyright 2010 LinkedIn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.zk
import java.io.File
import java.util._
import org.apache.zookeeper._
import org.apache.zookeeper.data._
import org.apache.zookeeper.server.ZooKeeperServer
import org.apache.zookeeper.server.NIOServerCnxn
import org.apache.zookeeper.ZooDefs
import kafka.TestUtils
import kafka.utils._
import org.I0Itec.zkclient.ZkClient
import java.net.InetSocketAddress
class EmbeddedZookeeper(val connectString: String) {
val snapshotDir = TestUtils.tempDir()
val logDir = TestUtils.tempDir()
val zookeeper = new ZooKeeperServer(snapshotDir, logDir, 200)
val port = connectString.split(":")(1).toInt
val factory = new NIOServerCnxn.Factory(new InetSocketAddress(port))
factory.startup(zookeeper)
val client = new ZkClient(connectString)
client.setZkSerializer(StringSerializer)
def shutdown() {
factory.shutdown()
Utils.rm(logDir)
Utils.rm(snapshotDir)
}
}
| jinfei21/kafka | test/unit/kafka/zk/EmbeddedZookeeper.scala | Scala | apache-2.0 | 1,491 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package utils
import org.scalatestplus.play.PlaySpec
import play.api.libs.json._
import uk.gov.hmrc.http.cache.client.CacheMap
class CascadeUpsertSpec extends PlaySpec {
"using the apply method for a key that has no special function" when {
"the key doesn't already exists" should {
"add the key to the cache map" in {
val originalCacheMap = new CacheMap("id", Map())
val cascadeUpsert = new CascadeUpsert
val result = cascadeUpsert("key", "value", originalCacheMap)
result.data mustBe Map("key" -> JsString("value"))
}
}
"data already exists for that key" must {
"replace the value held against the key" in {
val originalCacheMap = new CacheMap("id", Map("key" -> JsString("original value")))
val cascadeUpsert = new CascadeUpsert
val result = cascadeUpsert("key", "new value", originalCacheMap)
result.data mustBe Map("key" -> JsString("new value"))
}
}
}
"addRepeatedValue" when {
"the key doesn't already exist" must {
"add the key to the cache map and save the value in a sequence" in {
val originalCacheMap = new CacheMap("id", Map())
val cascadeUpsert = new CascadeUpsert
val result = cascadeUpsert.addRepeatedValue("key", "value", originalCacheMap)
result.data mustBe Map("key" -> Json.toJson(Seq("value")))
}
}
"the key already exists" must {
"add the new value to the existing sequence" in {
val originalCacheMap = new CacheMap("id", Map("key" -> Json.toJson(Seq("value"))))
val cascadeUpsert = new CascadeUpsert
val result = cascadeUpsert.addRepeatedValue("key", "new value", originalCacheMap)
result.data mustBe Map("key" -> Json.toJson(Seq("value", "new value")))
}
}
}
}
| hmrc/vat-registration-frontend | test/utils/CascadeUpsertSpec.scala | Scala | apache-2.0 | 2,412 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy
import java.io._
import java.net.URI
import java.nio.charset.StandardCharsets
import java.nio.file.{Files, Paths}
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.io.Source
import com.google.common.io.ByteStreams
import org.apache.commons.io.FileUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileStatus, FSDataInputStream, Path}
import org.scalatest.{BeforeAndAfterEach, Matchers}
import org.scalatest.concurrent.{Signaler, ThreadSignaler, TimeLimits}
import org.scalatest.time.SpanSugar._
import org.apache.spark._
import org.apache.spark.TestUtils
import org.apache.spark.TestUtils.JavaSourceFromString
import org.apache.spark.api.r.RUtils
import org.apache.spark.deploy.SparkSubmit._
import org.apache.spark.deploy.SparkSubmitUtils.MavenCoordinate
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.UI._
import org.apache.spark.launcher.SparkLauncher
import org.apache.spark.scheduler.EventLoggingListener
import org.apache.spark.util.{CommandLineUtils, ResetSystemProperties, Utils}
trait TestPrematureExit {
suite: SparkFunSuite =>
private val noOpOutputStream = new OutputStream {
def write(b: Int) = {}
}
/** Simple PrintStream that reads data into a buffer */
private class BufferPrintStream extends PrintStream(noOpOutputStream) {
var lineBuffer = ArrayBuffer[String]()
// scalastyle:off println
override def println(line: String) {
lineBuffer += line
}
// scalastyle:on println
}
/** Returns true if the script exits and the given search string is printed. */
private[spark] def testPrematureExit(
input: Array[String],
searchString: String,
mainObject: CommandLineUtils = SparkSubmit) : Unit = {
val printStream = new BufferPrintStream()
mainObject.printStream = printStream
@volatile var exitedCleanly = false
val original = mainObject.exitFn
mainObject.exitFn = (_) => exitedCleanly = true
try {
@volatile var exception: Exception = null
val thread = new Thread {
override def run() = try {
mainObject.main(input)
} catch {
// Capture the exception to check whether the exception contains searchString or not
case e: Exception => exception = e
}
}
thread.start()
thread.join()
if (exitedCleanly) {
val joined = printStream.lineBuffer.mkString("\\n")
assert(joined.contains(searchString))
} else {
assert(exception != null)
if (!exception.getMessage.contains(searchString)) {
throw exception
}
}
} finally {
mainObject.exitFn = original
}
}
}
// Note: this suite mixes in ResetSystemProperties because SparkSubmit.main() sets a bunch
// of properties that needed to be cleared after tests.
class SparkSubmitSuite
extends SparkFunSuite
with Matchers
with BeforeAndAfterEach
with ResetSystemProperties
with TimeLimits
with TestPrematureExit {
import SparkSubmitSuite._
// Necessary to make ScalaTest 3.x interrupt a thread on the JVM like ScalaTest 2.2.x
implicit val defaultSignaler: Signaler = ThreadSignaler
private val emptyIvySettings = File.createTempFile("ivy", ".xml")
FileUtils.write(emptyIvySettings, "<ivysettings />", StandardCharsets.UTF_8)
private val submit = new SparkSubmit()
override def beforeEach() {
super.beforeEach()
}
// scalastyle:off println
test("prints usage on empty input") {
testPrematureExit(Array.empty[String], "Usage: spark-submit")
}
test("prints usage with only --help") {
testPrematureExit(Array("--help"), "Usage: spark-submit")
}
test("prints error with unrecognized options") {
testPrematureExit(Array("--blarg"), "Unrecognized option '--blarg'")
testPrematureExit(Array("-bleg"), "Unrecognized option '-bleg'")
}
test("handle binary specified but not class") {
val jar = TestUtils.createJarWithClasses(Seq("SparkSubmitClassA"))
testPrematureExit(Array(jar.toString()), "No main class")
}
test("handles arguments with --key=val") {
val clArgs = Seq(
"--jars=one.jar,two.jar,three.jar",
"--name=myApp",
"--class=org.FooBar",
SparkLauncher.NO_RESOURCE)
val appArgs = new SparkSubmitArguments(clArgs)
appArgs.jars should include regex (".*one.jar,.*two.jar,.*three.jar")
appArgs.name should be ("myApp")
}
test("handles arguments to user program") {
val clArgs = Seq(
"--name", "myApp",
"--class", "Foo",
"userjar.jar",
"some",
"--weird", "args")
val appArgs = new SparkSubmitArguments(clArgs)
appArgs.childArgs should be (Seq("some", "--weird", "args"))
}
test("handles arguments to user program with name collision") {
val clArgs = Seq(
"--name", "myApp",
"--class", "Foo",
"userjar.jar",
"--master", "local",
"some",
"--weird", "args")
val appArgs = new SparkSubmitArguments(clArgs)
appArgs.childArgs should be (Seq("--master", "local", "some", "--weird", "args"))
}
test("print the right queue name") {
val clArgs = Seq(
"--name", "myApp",
"--class", "Foo",
"--conf", "spark.yarn.queue=thequeue",
"userjar.jar")
val appArgs = new SparkSubmitArguments(clArgs)
appArgs.queue should be ("thequeue")
appArgs.toString should include ("thequeue")
}
test("SPARK-24241: do not fail fast if executor num is 0 when dynamic allocation is enabled") {
val clArgs1 = Seq(
"--name", "myApp",
"--class", "Foo",
"--num-executors", "0",
"--conf", s"${DYN_ALLOCATION_ENABLED.key}=true",
"thejar.jar")
new SparkSubmitArguments(clArgs1)
val clArgs2 = Seq(
"--name", "myApp",
"--class", "Foo",
"--num-executors", "0",
"--conf", s"${DYN_ALLOCATION_ENABLED.key}=false",
"thejar.jar")
val e = intercept[SparkException](new SparkSubmitArguments(clArgs2))
assert(e.getMessage.contains("Number of executors must be a positive number"))
}
test("specify deploy mode through configuration") {
val clArgs = Seq(
"--master", "yarn",
"--conf", "spark.submit.deployMode=client",
"--class", "org.SomeClass",
"thejar.jar"
)
val appArgs = new SparkSubmitArguments(clArgs)
val (_, _, conf, _) = submit.prepareSubmitEnvironment(appArgs)
appArgs.deployMode should be ("client")
conf.get(SUBMIT_DEPLOY_MODE) should be ("client")
// Both cmd line and configuration are specified, cmdline option takes the priority
val clArgs1 = Seq(
"--master", "yarn",
"--deploy-mode", "cluster",
"--conf", "spark.submit.deployMode=client",
"--class", "org.SomeClass",
"thejar.jar"
)
val appArgs1 = new SparkSubmitArguments(clArgs1)
val (_, _, conf1, _) = submit.prepareSubmitEnvironment(appArgs1)
appArgs1.deployMode should be ("cluster")
conf1.get(SUBMIT_DEPLOY_MODE) should be ("cluster")
// Neither cmdline nor configuration are specified, client mode is the default choice
val clArgs2 = Seq(
"--master", "yarn",
"--class", "org.SomeClass",
"thejar.jar"
)
val appArgs2 = new SparkSubmitArguments(clArgs2)
appArgs2.deployMode should be (null)
val (_, _, conf2, _) = submit.prepareSubmitEnvironment(appArgs2)
appArgs2.deployMode should be ("client")
conf2.get(SUBMIT_DEPLOY_MODE) should be ("client")
}
test("handles YARN cluster mode") {
val clArgs = Seq(
"--deploy-mode", "cluster",
"--master", "yarn",
"--executor-memory", "5g",
"--executor-cores", "5",
"--class", "org.SomeClass",
"--jars", "one.jar,two.jar,three.jar",
"--driver-memory", "4g",
"--queue", "thequeue",
"--files", "file1.txt,file2.txt",
"--archives", "archive1.txt,archive2.txt",
"--num-executors", "6",
"--name", "beauty",
"--conf", "spark.ui.enabled=false",
"thejar.jar",
"arg1", "arg2")
val appArgs = new SparkSubmitArguments(clArgs)
val (childArgs, classpath, conf, mainClass) = submit.prepareSubmitEnvironment(appArgs)
val childArgsStr = childArgs.mkString(" ")
childArgsStr should include ("--class org.SomeClass")
childArgsStr should include ("--arg arg1 --arg arg2")
childArgsStr should include regex ("--jar .*thejar.jar")
mainClass should be (SparkSubmit.YARN_CLUSTER_SUBMIT_CLASS)
// In yarn cluster mode, also adding jars to classpath
classpath(0) should endWith ("thejar.jar")
classpath(1) should endWith ("one.jar")
classpath(2) should endWith ("two.jar")
classpath(3) should endWith ("three.jar")
conf.get("spark.executor.memory") should be ("5g")
conf.get("spark.driver.memory") should be ("4g")
conf.get("spark.executor.cores") should be ("5")
conf.get("spark.yarn.queue") should be ("thequeue")
conf.get("spark.yarn.dist.jars") should include regex (".*one.jar,.*two.jar,.*three.jar")
conf.get("spark.yarn.dist.files") should include regex (".*file1.txt,.*file2.txt")
conf.get("spark.yarn.dist.archives") should include regex (".*archive1.txt,.*archive2.txt")
conf.get("spark.app.name") should be ("beauty")
conf.get(UI_ENABLED) should be (false)
sys.props("SPARK_SUBMIT") should be ("true")
}
test("handles YARN client mode") {
val clArgs = Seq(
"--deploy-mode", "client",
"--master", "yarn",
"--executor-memory", "5g",
"--executor-cores", "5",
"--class", "org.SomeClass",
"--jars", "one.jar,two.jar,three.jar",
"--driver-memory", "4g",
"--queue", "thequeue",
"--files", "file1.txt,file2.txt",
"--archives", "archive1.txt,archive2.txt",
"--num-executors", "6",
"--name", "trill",
"--conf", "spark.ui.enabled=false",
"thejar.jar",
"arg1", "arg2")
val appArgs = new SparkSubmitArguments(clArgs)
val (childArgs, classpath, conf, mainClass) = submit.prepareSubmitEnvironment(appArgs)
childArgs.mkString(" ") should be ("arg1 arg2")
mainClass should be ("org.SomeClass")
classpath should have length (4)
classpath(0) should endWith ("thejar.jar")
classpath(1) should endWith ("one.jar")
classpath(2) should endWith ("two.jar")
classpath(3) should endWith ("three.jar")
conf.get("spark.app.name") should be ("trill")
conf.get("spark.executor.memory") should be ("5g")
conf.get("spark.executor.cores") should be ("5")
conf.get("spark.yarn.queue") should be ("thequeue")
conf.get("spark.executor.instances") should be ("6")
conf.get("spark.yarn.dist.files") should include regex (".*file1.txt,.*file2.txt")
conf.get("spark.yarn.dist.archives") should include regex (".*archive1.txt,.*archive2.txt")
conf.get("spark.yarn.dist.jars") should include
regex (".*one.jar,.*two.jar,.*three.jar,.*thejar.jar")
conf.get(UI_ENABLED) should be (false)
sys.props("SPARK_SUBMIT") should be ("true")
}
test("handles standalone cluster mode") {
testStandaloneCluster(useRest = true)
}
test("handles legacy standalone cluster mode") {
testStandaloneCluster(useRest = false)
}
/**
* Test whether the launch environment is correctly set up in standalone cluster mode.
* @param useRest whether to use the REST submission gateway introduced in Spark 1.3
*/
private def testStandaloneCluster(useRest: Boolean): Unit = {
val clArgs = Seq(
"--deploy-mode", "cluster",
"--master", "spark://h:p",
"--class", "org.SomeClass",
"--supervise",
"--driver-memory", "4g",
"--driver-cores", "5",
"--conf", "spark.ui.enabled=false",
"thejar.jar",
"arg1", "arg2")
val appArgs = new SparkSubmitArguments(clArgs)
appArgs.useRest = useRest
val (childArgs, classpath, conf, mainClass) = submit.prepareSubmitEnvironment(appArgs)
val childArgsStr = childArgs.mkString(" ")
if (useRest) {
childArgsStr should endWith ("thejar.jar org.SomeClass arg1 arg2")
mainClass should be (SparkSubmit.REST_CLUSTER_SUBMIT_CLASS)
} else {
childArgsStr should startWith ("--supervise --memory 4g --cores 5")
childArgsStr should include regex "launch spark://h:p .*thejar.jar org.SomeClass arg1 arg2"
mainClass should be (SparkSubmit.STANDALONE_CLUSTER_SUBMIT_CLASS)
}
classpath should have size 0
sys.props("SPARK_SUBMIT") should be ("true")
val confMap = conf.getAll.toMap
confMap.keys should contain ("spark.master")
confMap.keys should contain ("spark.app.name")
confMap.keys should contain (JARS.key)
confMap.keys should contain ("spark.driver.memory")
confMap.keys should contain ("spark.driver.cores")
confMap.keys should contain ("spark.driver.supervise")
confMap.keys should contain (UI_ENABLED.key)
confMap.keys should contain (SUBMIT_DEPLOY_MODE.key)
conf.get(UI_ENABLED) should be (false)
}
test("handles standalone client mode") {
val clArgs = Seq(
"--deploy-mode", "client",
"--master", "spark://h:p",
"--executor-memory", "5g",
"--total-executor-cores", "5",
"--class", "org.SomeClass",
"--driver-memory", "4g",
"--conf", "spark.ui.enabled=false",
"thejar.jar",
"arg1", "arg2")
val appArgs = new SparkSubmitArguments(clArgs)
val (childArgs, classpath, conf, mainClass) = submit.prepareSubmitEnvironment(appArgs)
childArgs.mkString(" ") should be ("arg1 arg2")
mainClass should be ("org.SomeClass")
classpath should have length (1)
classpath(0) should endWith ("thejar.jar")
conf.get("spark.executor.memory") should be ("5g")
conf.get("spark.cores.max") should be ("5")
conf.get(UI_ENABLED) should be (false)
}
test("handles mesos client mode") {
val clArgs = Seq(
"--deploy-mode", "client",
"--master", "mesos://h:p",
"--executor-memory", "5g",
"--total-executor-cores", "5",
"--class", "org.SomeClass",
"--driver-memory", "4g",
"--conf", "spark.ui.enabled=false",
"thejar.jar",
"arg1", "arg2")
val appArgs = new SparkSubmitArguments(clArgs)
val (childArgs, classpath, conf, mainClass) = submit.prepareSubmitEnvironment(appArgs)
childArgs.mkString(" ") should be ("arg1 arg2")
mainClass should be ("org.SomeClass")
classpath should have length (1)
classpath(0) should endWith ("thejar.jar")
conf.get("spark.executor.memory") should be ("5g")
conf.get("spark.cores.max") should be ("5")
conf.get(UI_ENABLED) should be (false)
}
test("handles k8s cluster mode") {
val clArgs = Seq(
"--deploy-mode", "cluster",
"--master", "k8s://host:port",
"--executor-memory", "5g",
"--class", "org.SomeClass",
"--driver-memory", "4g",
"--conf", "spark.kubernetes.namespace=spark",
"--conf", "spark.kubernetes.driver.container.image=bar",
"/home/thejar.jar",
"arg1")
val appArgs = new SparkSubmitArguments(clArgs)
val (childArgs, classpath, conf, mainClass) = submit.prepareSubmitEnvironment(appArgs)
val childArgsMap = childArgs.grouped(2).map(a => a(0) -> a(1)).toMap
childArgsMap.get("--primary-java-resource") should be (Some("file:/home/thejar.jar"))
childArgsMap.get("--main-class") should be (Some("org.SomeClass"))
childArgsMap.get("--arg") should be (Some("arg1"))
mainClass should be (KUBERNETES_CLUSTER_SUBMIT_CLASS)
classpath should have length (0)
conf.get("spark.master") should be ("k8s://https://host:port")
conf.get("spark.executor.memory") should be ("5g")
conf.get("spark.driver.memory") should be ("4g")
conf.get("spark.kubernetes.namespace") should be ("spark")
conf.get("spark.kubernetes.driver.container.image") should be ("bar")
}
test("handles confs with flag equivalents") {
val clArgs = Seq(
"--deploy-mode", "cluster",
"--executor-memory", "5g",
"--class", "org.SomeClass",
"--conf", "spark.executor.memory=4g",
"--conf", "spark.master=yarn",
"thejar.jar",
"arg1", "arg2")
val appArgs = new SparkSubmitArguments(clArgs)
val (_, _, conf, mainClass) = submit.prepareSubmitEnvironment(appArgs)
conf.get("spark.executor.memory") should be ("5g")
conf.get("spark.master") should be ("yarn")
conf.get(SUBMIT_DEPLOY_MODE) should be ("cluster")
mainClass should be (SparkSubmit.YARN_CLUSTER_SUBMIT_CLASS)
}
test("SPARK-21568 ConsoleProgressBar should be enabled only in shells") {
// Unset from system properties since this config is defined in the root pom's test config.
sys.props -= UI_SHOW_CONSOLE_PROGRESS.key
val clArgs1 = Seq("--class", "org.apache.spark.repl.Main", "spark-shell")
val appArgs1 = new SparkSubmitArguments(clArgs1)
val (_, _, conf1, _) = submit.prepareSubmitEnvironment(appArgs1)
conf1.get(UI_SHOW_CONSOLE_PROGRESS) should be (true)
var sc1: SparkContext = null
try {
sc1 = new SparkContext(conf1)
assert(sc1.progressBar.isDefined)
} finally {
if (sc1 != null) {
sc1.stop()
}
}
val clArgs2 = Seq("--class", "org.SomeClass", "thejar.jar")
val appArgs2 = new SparkSubmitArguments(clArgs2)
val (_, _, conf2, _) = submit.prepareSubmitEnvironment(appArgs2)
assert(!conf2.contains(UI_SHOW_CONSOLE_PROGRESS))
var sc2: SparkContext = null
try {
sc2 = new SparkContext(conf2)
assert(!sc2.progressBar.isDefined)
} finally {
if (sc2 != null) {
sc2.stop()
}
}
}
test("launch simple application with spark-submit") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val args = Seq(
"--class", SimpleApplicationTest.getClass.getName.stripSuffix("$"),
"--name", "testApp",
"--master", "local",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
unusedJar.toString)
runSparkSubmit(args)
}
test("launch simple application with spark-submit with redaction") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val fileSystem = Utils.getHadoopFileSystem("/",
SparkHadoopUtil.get.newConfiguration(new SparkConf()))
withTempDir { testDir =>
val testDirPath = new Path(testDir.getAbsolutePath())
val args = Seq(
"--class", SimpleApplicationTest.getClass.getName.stripSuffix("$"),
"--name", "testApp",
"--master", "local",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--conf", "spark.executorEnv.HADOOP_CREDSTORE_PASSWORD=secret_password",
"--conf", "spark.eventLog.enabled=true",
"--conf", "spark.eventLog.testing=true",
"--conf", s"spark.eventLog.dir=${testDirPath.toUri.toString}",
"--conf", "spark.hadoop.fs.defaultFS=unsupported://example.com",
unusedJar.toString)
runSparkSubmit(args)
val listStatus = fileSystem.listStatus(testDirPath)
val logData = EventLoggingListener.openEventLog(listStatus.last.getPath, fileSystem)
Source.fromInputStream(logData).getLines().foreach { line =>
assert(!line.contains("secret_password"))
}
}
}
test("includes jars passed in through --jars") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val jar1 = TestUtils.createJarWithClasses(Seq("SparkSubmitClassA"))
val jar2 = TestUtils.createJarWithClasses(Seq("SparkSubmitClassB"))
val jarsString = Seq(jar1, jar2).map(j => j.toString).mkString(",")
val args = Seq(
"--class", JarCreationTest.getClass.getName.stripSuffix("$"),
"--name", "testApp",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--jars", jarsString,
unusedJar.toString, "SparkSubmitClassA", "SparkSubmitClassB")
runSparkSubmit(args)
}
// SPARK-7287
test("includes jars passed in through --packages") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val main = MavenCoordinate("my.great.lib", "mylib", "0.1")
val dep = MavenCoordinate("my.great.dep", "mylib", "0.1")
IvyTestUtils.withRepository(main, Some(dep.toString), None) { repo =>
val args = Seq(
"--class", JarCreationTest.getClass.getName.stripSuffix("$"),
"--name", "testApp",
"--master", "local-cluster[2,1,1024]",
"--packages", Seq(main, dep).mkString(","),
"--repositories", repo,
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--conf", s"spark.jars.ivySettings=${emptyIvySettings.getAbsolutePath()}",
unusedJar.toString,
"my.great.lib.MyLib", "my.great.dep.MyLib")
runSparkSubmit(args)
}
}
test("includes jars passed through spark.jars.packages and spark.jars.repositories") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val main = MavenCoordinate("my.great.lib", "mylib", "0.1")
val dep = MavenCoordinate("my.great.dep", "mylib", "0.1")
IvyTestUtils.withRepository(main, Some(dep.toString), None) { repo =>
val args = Seq(
"--class", JarCreationTest.getClass.getName.stripSuffix("$"),
"--name", "testApp",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.jars.packages=my.great.lib:mylib:0.1,my.great.dep:mylib:0.1",
"--conf", s"spark.jars.repositories=$repo",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--conf", s"spark.jars.ivySettings=${emptyIvySettings.getAbsolutePath()}",
unusedJar.toString,
"my.great.lib.MyLib", "my.great.dep.MyLib")
runSparkSubmit(args)
}
}
// TODO(SPARK-9603): Building a package is flaky on Jenkins Maven builds.
// See https://gist.github.com/shivaram/3a2fecce60768a603dac for a error log
ignore("correctly builds R packages included in a jar with --packages") {
assume(RUtils.isRInstalled, "R isn't installed on this machine.")
assume(RUtils.isSparkRInstalled, "SparkR is not installed in this build.")
val main = MavenCoordinate("my.great.lib", "mylib", "0.1")
val sparkHome = sys.props.getOrElse("spark.test.home", fail("spark.test.home is not set!"))
val rScriptDir = Seq(
sparkHome, "R", "pkg", "tests", "fulltests", "packageInAJarTest.R").mkString(File.separator)
assert(new File(rScriptDir).exists)
IvyTestUtils.withRepository(main, None, None, withR = true) { repo =>
val args = Seq(
"--name", "testApp",
"--master", "local-cluster[2,1,1024]",
"--packages", main.toString,
"--repositories", repo,
"--conf", s"spark.jars.ivySettings=${emptyIvySettings.getAbsolutePath()}",
"--verbose",
"--conf", "spark.ui.enabled=false",
rScriptDir)
runSparkSubmit(args)
}
}
test("include an external JAR in SparkR") {
assume(RUtils.isRInstalled, "R isn't installed on this machine.")
val sparkHome = sys.props.getOrElse("spark.test.home", fail("spark.test.home is not set!"))
assume(RUtils.isSparkRInstalled, "SparkR is not installed in this build.")
val rScriptDir =
Seq(sparkHome, "R", "pkg", "tests", "fulltests", "jarTest.R").mkString(File.separator)
assert(new File(rScriptDir).exists)
// compile a small jar containing a class that will be called from R code.
withTempDir { tempDir =>
val srcDir = new File(tempDir, "sparkrtest")
srcDir.mkdirs()
val excSource = new JavaSourceFromString(new File(srcDir, "DummyClass").toURI.getPath,
"""package sparkrtest;
|
|public class DummyClass implements java.io.Serializable {
| public static String helloWorld(String arg) { return "Hello " + arg; }
| public static int addStuff(int arg1, int arg2) { return arg1 + arg2; }
|}
""".
stripMargin)
val excFile = TestUtils.createCompiledClass("DummyClass", srcDir, excSource, Seq.empty)
val jarFile = new File(tempDir, "sparkRTestJar-%s.jar".format(System.currentTimeMillis()))
val jarURL = TestUtils.createJar(Seq(excFile), jarFile, directoryPrefix = Some("sparkrtest"))
val args = Seq(
"--name", "testApp",
"--master", "local",
"--jars", jarURL.toString,
"--verbose",
"--conf", "spark.ui.enabled=false",
rScriptDir)
runSparkSubmit(args)
}
}
test("resolves command line argument paths correctly") {
withTempDir { dir =>
val archive = Paths.get(dir.toPath.toString, "single.zip")
Files.createFile(archive)
val jars = "/jar1,/jar2"
val files = "local:/file1,file2"
val archives = s"file:/archive1,${dir.toPath.toAbsolutePath.toString}/*.zip#archive3"
val pyFiles = "py-file1,py-file2"
// Test jars and files
val clArgs = Seq(
"--master", "local",
"--class", "org.SomeClass",
"--jars", jars,
"--files", files,
"thejar.jar")
val appArgs = new SparkSubmitArguments(clArgs)
val (_, _, conf, _) = submit.prepareSubmitEnvironment(appArgs)
appArgs.jars should be(Utils.resolveURIs(jars))
appArgs.files should be(Utils.resolveURIs(files))
conf.get(JARS) should be(Utils.resolveURIs(jars + ",thejar.jar").split(",").toSeq)
conf.get("spark.files") should be(Utils.resolveURIs(files))
// Test files and archives (Yarn)
val clArgs2 = Seq(
"--master", "yarn",
"--class", "org.SomeClass",
"--files", files,
"--archives", archives,
"thejar.jar"
)
val appArgs2 = new SparkSubmitArguments(clArgs2)
val (_, _, conf2, _) = submit.prepareSubmitEnvironment(appArgs2)
appArgs2.files should be(Utils.resolveURIs(files))
appArgs2.archives should fullyMatch regex ("file:/archive1,file:.*#archive3")
conf2.get("spark.yarn.dist.files") should be(Utils.resolveURIs(files))
conf2.get("spark.yarn.dist.archives") should fullyMatch regex
("file:/archive1,file:.*#archive3")
// Test python files
val clArgs3 = Seq(
"--master", "local",
"--py-files", pyFiles,
"--conf", "spark.pyspark.driver.python=python3.4",
"--conf", "spark.pyspark.python=python3.5",
"mister.py"
)
val appArgs3 = new SparkSubmitArguments(clArgs3)
val (_, _, conf3, _) = submit.prepareSubmitEnvironment(appArgs3)
appArgs3.pyFiles should be(Utils.resolveURIs(pyFiles))
conf3.get(SUBMIT_PYTHON_FILES) should be(
PythonRunner.formatPaths(Utils.resolveURIs(pyFiles)))
conf3.get(PYSPARK_DRIVER_PYTHON.key) should be("python3.4")
conf3.get(PYSPARK_PYTHON.key) should be("python3.5")
}
}
test("ambiguous archive mapping results in error message") {
withTempDir { dir =>
val archive1 = Paths.get(dir.toPath.toString, "first.zip")
val archive2 = Paths.get(dir.toPath.toString, "second.zip")
Files.createFile(archive1)
Files.createFile(archive2)
val jars = "/jar1,/jar2"
val files = "local:/file1,file2"
val archives = s"file:/archive1,${dir.toPath.toAbsolutePath.toString}/*.zip#archive3"
val pyFiles = "py-file1,py-file2"
// Test files and archives (Yarn)
val clArgs2 = Seq(
"--master", "yarn",
"--class", "org.SomeClass",
"--files", files,
"--archives", archives,
"thejar.jar"
)
testPrematureExit(clArgs2.toArray, "resolves ambiguously to multiple files")
}
}
test("resolves config paths correctly") {
val jars = "/jar1,/jar2" // spark.jars
val files = "local:/file1,file2" // spark.files / spark.yarn.dist.files
val archives = "file:/archive1,archive2" // spark.yarn.dist.archives
val pyFiles = "py-file1,py-file2" // spark.submit.pyFiles
withTempDir { tmpDir =>
// Test jars and files
val f1 = File.createTempFile("test-submit-jars-files", "", tmpDir)
val writer1 = new PrintWriter(f1)
writer1.println("spark.jars " + jars)
writer1.println("spark.files " + files)
writer1.close()
val clArgs = Seq(
"--master", "local",
"--class", "org.SomeClass",
"--properties-file", f1.getPath,
"thejar.jar"
)
val appArgs = new SparkSubmitArguments(clArgs)
val (_, _, conf, _) = submit.prepareSubmitEnvironment(appArgs)
conf.get(JARS) should be(Utils.resolveURIs(jars + ",thejar.jar").split(",").toSeq)
conf.get(FILES) should be(Utils.resolveURIs(files).split(",").toSeq)
// Test files and archives (Yarn)
val f2 = File.createTempFile("test-submit-files-archives", "", tmpDir)
val writer2 = new PrintWriter(f2)
writer2.println("spark.yarn.dist.files " + files)
writer2.println("spark.yarn.dist.archives " + archives)
writer2.close()
val clArgs2 = Seq(
"--master", "yarn",
"--class", "org.SomeClass",
"--properties-file", f2.getPath,
"thejar.jar"
)
val appArgs2 = new SparkSubmitArguments(clArgs2)
val (_, _, conf2, _) = submit.prepareSubmitEnvironment(appArgs2)
conf2.get("spark.yarn.dist.files") should be(Utils.resolveURIs(files))
conf2.get("spark.yarn.dist.archives") should be(Utils.resolveURIs(archives))
// Test python files
val f3 = File.createTempFile("test-submit-python-files", "", tmpDir)
val writer3 = new PrintWriter(f3)
writer3.println("spark.submit.pyFiles " + pyFiles)
writer3.close()
val clArgs3 = Seq(
"--master", "local",
"--properties-file", f3.getPath,
"mister.py"
)
val appArgs3 = new SparkSubmitArguments(clArgs3)
val (_, _, conf3, _) = submit.prepareSubmitEnvironment(appArgs3)
conf3.get(SUBMIT_PYTHON_FILES) should be(
PythonRunner.formatPaths(Utils.resolveURIs(pyFiles)))
// Test remote python files
val hadoopConf = new Configuration()
updateConfWithFakeS3Fs(hadoopConf)
val f4 = File.createTempFile("test-submit-remote-python-files", "", tmpDir)
val pyFile1 = File.createTempFile("file1", ".py", tmpDir)
val pyFile2 = File.createTempFile("file2", ".py", tmpDir)
val writer4 = new PrintWriter(f4)
val remotePyFiles = s"s3a://${pyFile1.getAbsolutePath},s3a://${pyFile2.getAbsolutePath}"
writer4.println("spark.submit.pyFiles " + remotePyFiles)
writer4.close()
val clArgs4 = Seq(
"--master", "yarn",
"--deploy-mode", "cluster",
"--properties-file", f4.getPath,
"hdfs:///tmp/mister.py"
)
val appArgs4 = new SparkSubmitArguments(clArgs4)
val (_, _, conf4, _) = submit.prepareSubmitEnvironment(appArgs4, conf = Some(hadoopConf))
// Should not format python path for yarn cluster mode
conf4.get(SUBMIT_PYTHON_FILES) should be(Utils.resolveURIs(remotePyFiles).split(","))
}
}
test("user classpath first in driver") {
val systemJar = TestUtils.createJarWithFiles(Map("test.resource" -> "SYSTEM"))
val userJar = TestUtils.createJarWithFiles(Map("test.resource" -> "USER"))
val args = Seq(
"--class", UserClasspathFirstTest.getClass.getName.stripSuffix("$"),
"--name", "testApp",
"--master", "local",
"--conf", "spark.driver.extraClassPath=" + systemJar,
"--conf", "spark.driver.userClassPathFirst=true",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
userJar.toString)
runSparkSubmit(args)
}
test("SPARK_CONF_DIR overrides spark-defaults.conf") {
forConfDir(Map("spark.executor.memory" -> "3g")) { path =>
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val args = Seq(
"--class", SimpleApplicationTest.getClass.getName.stripSuffix("$"),
"--name", "testApp",
"--master", "local",
unusedJar.toString)
val appArgs = new SparkSubmitArguments(args, env = Map("SPARK_CONF_DIR" -> path))
assert(appArgs.propertiesFile != null)
assert(appArgs.propertiesFile.startsWith(path))
appArgs.executorMemory should be ("3g")
}
}
test("support glob path") {
withTempDir { tmpJarDir =>
withTempDir { tmpFileDir =>
withTempDir { tmpPyFileDir =>
withTempDir { tmpArchiveDir =>
val jar1 = TestUtils.createJarWithFiles(Map("test.resource" -> "1"), tmpJarDir)
val jar2 = TestUtils.createJarWithFiles(Map("test.resource" -> "USER"), tmpJarDir)
val file1 = File.createTempFile("tmpFile1", "", tmpFileDir)
val file2 = File.createTempFile("tmpFile2", "", tmpFileDir)
val pyFile1 = File.createTempFile("tmpPy1", ".py", tmpPyFileDir)
val pyFile2 = File.createTempFile("tmpPy2", ".egg", tmpPyFileDir)
val archive1 = File.createTempFile("archive1", ".zip", tmpArchiveDir)
val archive2 = File.createTempFile("archive2", ".zip", tmpArchiveDir)
val tempPyFile = File.createTempFile("tmpApp", ".py")
tempPyFile.deleteOnExit()
val args = Seq(
"--class", UserClasspathFirstTest.getClass.getName.stripPrefix("$"),
"--name", "testApp",
"--master", "yarn",
"--deploy-mode", "client",
"--jars", s"${tmpJarDir.getAbsolutePath}/*.jar",
"--files", s"${tmpFileDir.getAbsolutePath}/tmpFile*",
"--py-files", s"${tmpPyFileDir.getAbsolutePath}/tmpPy*",
"--archives", s"${tmpArchiveDir.getAbsolutePath}/*.zip",
tempPyFile.toURI().toString())
val appArgs = new SparkSubmitArguments(args)
val (_, _, conf, _) = submit.prepareSubmitEnvironment(appArgs)
conf.get("spark.yarn.dist.jars").split(",").toSet should be
(Set(jar1.toURI.toString, jar2.toURI.toString))
conf.get("spark.yarn.dist.files").split(",").toSet should be
(Set(file1.toURI.toString, file2.toURI.toString))
conf.get("spark.yarn.dist.pyFiles").split(",").toSet should be
(Set(pyFile1.getAbsolutePath, pyFile2.getAbsolutePath))
conf.get("spark.yarn.dist.archives").split(",").toSet should be
(Set(archive1.toURI.toString, archive2.toURI.toString))
}
}
}
}
}
// scalastyle:on println
private def checkDownloadedFile(sourcePath: String, outputPath: String): Unit = {
if (sourcePath == outputPath) {
return
}
val sourceUri = new URI(sourcePath)
val outputUri = new URI(outputPath)
assert(outputUri.getScheme === "file")
// The path and filename are preserved.
assert(outputUri.getPath.endsWith(new Path(sourceUri).getName))
assert(FileUtils.readFileToString(new File(outputUri.getPath)) ===
FileUtils.readFileToString(new File(sourceUri.getPath)))
}
private def deleteTempOutputFile(outputPath: String): Unit = {
val outputFile = new File(new URI(outputPath).getPath)
if (outputFile.exists) {
outputFile.delete()
}
}
test("downloadFile - invalid url") {
val sparkConf = new SparkConf(false)
intercept[IOException] {
DependencyUtils.downloadFile(
"abc:/my/file", Utils.createTempDir(), sparkConf, new Configuration(),
new SecurityManager(sparkConf))
}
}
test("downloadFile - file doesn't exist") {
val sparkConf = new SparkConf(false)
val hadoopConf = new Configuration()
val tmpDir = Utils.createTempDir()
updateConfWithFakeS3Fs(hadoopConf)
intercept[FileNotFoundException] {
DependencyUtils.downloadFile("s3a:/no/such/file", tmpDir, sparkConf, hadoopConf,
new SecurityManager(sparkConf))
}
}
test("downloadFile does not download local file") {
val sparkConf = new SparkConf(false)
val secMgr = new SecurityManager(sparkConf)
// empty path is considered as local file.
val tmpDir = Files.createTempDirectory("tmp").toFile
assert(DependencyUtils.downloadFile("", tmpDir, sparkConf, new Configuration(), secMgr) === "")
assert(DependencyUtils.downloadFile("/local/file", tmpDir, sparkConf, new Configuration(),
secMgr) === "/local/file")
}
test("download one file to local") {
val sparkConf = new SparkConf(false)
val jarFile = File.createTempFile("test", ".jar")
jarFile.deleteOnExit()
val content = "hello, world"
FileUtils.write(jarFile, content)
val hadoopConf = new Configuration()
val tmpDir = Files.createTempDirectory("tmp").toFile
updateConfWithFakeS3Fs(hadoopConf)
val sourcePath = s"s3a://${jarFile.toURI.getPath}"
val outputPath = DependencyUtils.downloadFile(sourcePath, tmpDir, sparkConf, hadoopConf,
new SecurityManager(sparkConf))
checkDownloadedFile(sourcePath, outputPath)
deleteTempOutputFile(outputPath)
}
test("download list of files to local") {
val sparkConf = new SparkConf(false)
val jarFile = File.createTempFile("test", ".jar")
jarFile.deleteOnExit()
val content = "hello, world"
FileUtils.write(jarFile, content)
val hadoopConf = new Configuration()
val tmpDir = Files.createTempDirectory("tmp").toFile
updateConfWithFakeS3Fs(hadoopConf)
val sourcePaths = Seq("/local/file", s"s3a://${jarFile.toURI.getPath}")
val outputPaths = DependencyUtils
.downloadFileList(sourcePaths.mkString(","), tmpDir, sparkConf, hadoopConf,
new SecurityManager(sparkConf))
.split(",")
assert(outputPaths.length === sourcePaths.length)
sourcePaths.zip(outputPaths).foreach { case (sourcePath, outputPath) =>
checkDownloadedFile(sourcePath, outputPath)
deleteTempOutputFile(outputPath)
}
}
test("remove copies of application jar from classpath") {
val fs = File.separator
val sparkConf = new SparkConf(false)
val hadoopConf = new Configuration()
val secMgr = new SecurityManager(sparkConf)
val appJarName = "myApp.jar"
val jar1Name = "myJar1.jar"
val jar2Name = "myJar2.jar"
val userJar = s"file:/path${fs}to${fs}app${fs}jar$fs$appJarName"
val jars = s"file:/$jar1Name,file:/$appJarName,file:/$jar2Name"
val resolvedJars = DependencyUtils
.resolveAndDownloadJars(jars, userJar, sparkConf, hadoopConf, secMgr)
assert(!resolvedJars.contains(appJarName))
assert(resolvedJars.contains(jar1Name) && resolvedJars.contains(jar2Name))
}
test("Avoid re-upload remote resources in yarn client mode") {
val hadoopConf = new Configuration()
updateConfWithFakeS3Fs(hadoopConf)
withTempDir { tmpDir =>
val file = File.createTempFile("tmpFile", "", tmpDir)
val pyFile = File.createTempFile("tmpPy", ".egg", tmpDir)
val mainResource = File.createTempFile("tmpPy", ".py", tmpDir)
val tmpJar = TestUtils.createJarWithFiles(Map("test.resource" -> "USER"), tmpDir)
val tmpJarPath = s"s3a://${new File(tmpJar.toURI).getAbsolutePath}"
val args = Seq(
"--class", UserClasspathFirstTest.getClass.getName.stripPrefix("$"),
"--name", "testApp",
"--master", "yarn",
"--deploy-mode", "client",
"--jars", tmpJarPath,
"--files", s"s3a://${file.getAbsolutePath}",
"--py-files", s"s3a://${pyFile.getAbsolutePath}",
s"s3a://$mainResource"
)
val appArgs = new SparkSubmitArguments(args)
val (_, _, conf, _) = submit.prepareSubmitEnvironment(appArgs, conf = Some(hadoopConf))
// All the resources should still be remote paths, so that YARN client will not upload again.
conf.get("spark.yarn.dist.jars") should be(tmpJarPath)
conf.get("spark.yarn.dist.files") should be(s"s3a://${file.getAbsolutePath}")
conf.get("spark.yarn.dist.pyFiles") should be(s"s3a://${pyFile.getAbsolutePath}")
// Local repl jars should be a local path.
conf.get("spark.repl.local.jars") should (startWith("file:"))
// local py files should not be a URI format.
conf.get(SUBMIT_PYTHON_FILES).foreach { _ should (startWith("/")) }
}
}
test("download remote resource if it is not supported by yarn service") {
testRemoteResources(enableHttpFs = false)
}
test("avoid downloading remote resource if it is supported by yarn service") {
testRemoteResources(enableHttpFs = true)
}
test("force download from blacklisted schemes") {
testRemoteResources(enableHttpFs = true, blacklistSchemes = Seq("http"))
}
test("force download for all the schemes") {
testRemoteResources(enableHttpFs = true, blacklistSchemes = Seq("*"))
}
private def testRemoteResources(
enableHttpFs: Boolean,
blacklistSchemes: Seq[String] = Nil): Unit = {
val hadoopConf = new Configuration()
updateConfWithFakeS3Fs(hadoopConf)
if (enableHttpFs) {
hadoopConf.set("fs.http.impl", classOf[TestFileSystem].getCanonicalName)
} else {
hadoopConf.set("fs.http.impl", getClass().getName() + ".DoesNotExist")
}
hadoopConf.set("fs.http.impl.disable.cache", "true")
val tmpDir = Utils.createTempDir()
val mainResource = File.createTempFile("tmpPy", ".py", tmpDir)
val tmpS3Jar = TestUtils.createJarWithFiles(Map("test.resource" -> "USER"), tmpDir)
val tmpS3JarPath = s"s3a://${new File(tmpS3Jar.toURI).getAbsolutePath}"
val tmpHttpJar = TestUtils.createJarWithFiles(Map("test.resource" -> "USER"), tmpDir)
val tmpHttpJarPath = s"http://${new File(tmpHttpJar.toURI).getAbsolutePath}"
val forceDownloadArgs = if (blacklistSchemes.nonEmpty) {
Seq("--conf", s"spark.yarn.dist.forceDownloadSchemes=${blacklistSchemes.mkString(",")}")
} else {
Nil
}
val args = Seq(
"--class", UserClasspathFirstTest.getClass.getName.stripPrefix("$"),
"--name", "testApp",
"--master", "yarn",
"--deploy-mode", "client",
"--jars", s"$tmpS3JarPath,$tmpHttpJarPath"
) ++ forceDownloadArgs ++ Seq(s"s3a://$mainResource")
val appArgs = new SparkSubmitArguments(args)
val (_, _, conf, _) = submit.prepareSubmitEnvironment(appArgs, conf = Some(hadoopConf))
val jars = conf.get("spark.yarn.dist.jars").split(",").toSet
def isSchemeBlacklisted(scheme: String) = {
blacklistSchemes.contains("*") || blacklistSchemes.contains(scheme)
}
if (!isSchemeBlacklisted("s3")) {
assert(jars.contains(tmpS3JarPath))
}
if (enableHttpFs && blacklistSchemes.isEmpty) {
// If Http FS is supported by yarn service, the URI of remote http resource should
// still be remote.
assert(jars.contains(tmpHttpJarPath))
} else if (!enableHttpFs || isSchemeBlacklisted("http")) {
// If Http FS is not supported by yarn service, or http scheme is configured to be force
// downloading, the URI of remote http resource should be changed to a local one.
val jarName = new File(tmpHttpJar.toURI).getName
val localHttpJar = jars.filter(_.contains(jarName))
localHttpJar.size should be(1)
localHttpJar.head should startWith("file:")
}
}
private def forConfDir(defaults: Map[String, String]) (f: String => Unit) = {
withTempDir { tmpDir =>
val defaultsConf = new File(tmpDir.getAbsolutePath, "spark-defaults.conf")
val writer =
new OutputStreamWriter(new FileOutputStream(defaultsConf), StandardCharsets.UTF_8)
for ((key, value) <- defaults) writer.write(s"$key $value\\n")
writer.close()
f(tmpDir.getAbsolutePath)
}
}
private def updateConfWithFakeS3Fs(conf: Configuration): Unit = {
conf.set("fs.s3a.impl", classOf[TestFileSystem].getCanonicalName)
conf.set("fs.s3a.impl.disable.cache", "true")
}
test("start SparkApplication without modifying system properties") {
val args = Array(
"--class", classOf[TestSparkApplication].getName(),
"--master", "local",
"--conf", "spark.test.hello=world",
"spark-internal",
"hello")
val exception = intercept[SparkException] {
submit.doSubmit(args)
}
assert(exception.getMessage() === "hello")
}
test("support --py-files/spark.submit.pyFiles in non pyspark application") {
val hadoopConf = new Configuration()
updateConfWithFakeS3Fs(hadoopConf)
withTempDir { tmpDir =>
val pyFile = File.createTempFile("tmpPy", ".egg", tmpDir)
val args = Seq(
"--class", UserClasspathFirstTest.getClass.getName.stripPrefix("$"),
"--name", "testApp",
"--master", "yarn",
"--deploy-mode", "client",
"--py-files", s"s3a://${pyFile.getAbsolutePath}",
"spark-internal"
)
val appArgs = new SparkSubmitArguments(args)
val (_, _, conf, _) = submit.prepareSubmitEnvironment(appArgs, conf = Some(hadoopConf))
conf.get(PY_FILES.key) should be(s"s3a://${pyFile.getAbsolutePath}")
conf.get(SUBMIT_PYTHON_FILES).foreach { _ should (startWith("/")) }
// Verify "spark.submit.pyFiles"
val args1 = Seq(
"--class", UserClasspathFirstTest.getClass.getName.stripPrefix("$"),
"--name", "testApp",
"--master", "yarn",
"--deploy-mode", "client",
"--conf", s"spark.submit.pyFiles=s3a://${pyFile.getAbsolutePath}",
"spark-internal"
)
val appArgs1 = new SparkSubmitArguments(args1)
val (_, _, conf1, _) = submit.prepareSubmitEnvironment(appArgs1, conf = Some(hadoopConf))
conf1.get(PY_FILES.key) should be(s"s3a://${pyFile.getAbsolutePath}")
conf.get(SUBMIT_PYTHON_FILES).foreach { _ should (startWith("/")) }
}
}
test("handles natural line delimiters in --properties-file and --conf uniformly") {
val delimKey = "spark.my.delimiter."
val LF = "\\n"
val CR = "\\r"
val lineFeedFromCommandLine = s"${delimKey}lineFeedFromCommandLine" -> LF
val leadingDelimKeyFromFile = s"${delimKey}leadingDelimKeyFromFile" -> s"${LF}blah"
val trailingDelimKeyFromFile = s"${delimKey}trailingDelimKeyFromFile" -> s"blah${CR}"
val infixDelimFromFile = s"${delimKey}infixDelimFromFile" -> s"${CR}blah${LF}"
val nonDelimSpaceFromFile = s"${delimKey}nonDelimSpaceFromFile" -> " blah\\f"
val testProps = Seq(leadingDelimKeyFromFile, trailingDelimKeyFromFile, infixDelimFromFile,
nonDelimSpaceFromFile)
val props = new java.util.Properties()
val propsFile = File.createTempFile("test-spark-conf", ".properties",
Utils.createTempDir())
val propsOutputStream = new FileOutputStream(propsFile)
try {
testProps.foreach { case (k, v) => props.put(k, v) }
props.store(propsOutputStream, "test whitespace")
} finally {
propsOutputStream.close()
}
val clArgs = Seq(
"--class", "org.SomeClass",
"--conf", s"${lineFeedFromCommandLine._1}=${lineFeedFromCommandLine._2}",
"--conf", "spark.master=yarn",
"--properties-file", propsFile.getPath,
"thejar.jar")
val appArgs = new SparkSubmitArguments(clArgs)
val (_, _, conf, _) = submit.prepareSubmitEnvironment(appArgs)
Seq(
lineFeedFromCommandLine,
leadingDelimKeyFromFile,
trailingDelimKeyFromFile,
infixDelimFromFile
).foreach { case (k, v) =>
conf.get(k) should be (v)
}
conf.get(nonDelimSpaceFromFile._1) should be ("blah")
}
}
object SparkSubmitSuite extends SparkFunSuite with TimeLimits {
// Necessary to make ScalaTest 3.x interrupt a thread on the JVM like ScalaTest 2.2.x
implicit val defaultSignaler: Signaler = ThreadSignaler
// NOTE: This is an expensive operation in terms of time (10 seconds+). Use sparingly.
def runSparkSubmit(args: Seq[String], root: String = ".."): Unit = {
val sparkHome = sys.props.getOrElse("spark.test.home", fail("spark.test.home is not set!"))
val sparkSubmitFile = if (Utils.isWindows) {
new File(s"$root\\\\bin\\\\spark-submit.cmd")
} else {
new File(s"$root/bin/spark-submit")
}
val process = Utils.executeCommand(
Seq(sparkSubmitFile.getCanonicalPath) ++ args,
new File(sparkHome),
Map("SPARK_TESTING" -> "1", "SPARK_HOME" -> sparkHome))
try {
val exitCode = failAfter(60 seconds) { process.waitFor() }
if (exitCode != 0) {
fail(s"Process returned with exit code $exitCode. See the log4j logs for more detail.")
}
} finally {
// Ensure we still kill the process in case it timed out
process.destroy()
}
}
}
object JarCreationTest extends Logging {
def main(args: Array[String]) {
TestUtils.configTestLog4j("INFO")
val conf = new SparkConf()
val sc = new SparkContext(conf)
val result = sc.makeRDD(1 to 100, 10).mapPartitions { x =>
var exception: String = null
try {
Utils.classForName(args(0))
Utils.classForName(args(1))
} catch {
case t: Throwable =>
exception = t + "\\n" + Utils.exceptionString(t)
exception = exception.replaceAll("\\n", "\\n\\t")
}
Option(exception).toSeq.iterator
}.collect()
if (result.nonEmpty) {
throw new Exception("Could not load user class from jar:\\n" + result(0))
}
sc.stop()
}
}
object SimpleApplicationTest {
def main(args: Array[String]) {
TestUtils.configTestLog4j("INFO")
val conf = new SparkConf()
val sc = new SparkContext(conf)
val configs = Seq("spark.master", "spark.app.name")
for (config <- configs) {
val masterValue = conf.get(config)
val executorValues = sc
.makeRDD(1 to 100, 10)
.map(x => SparkEnv.get.conf.get(config))
.collect()
.distinct
if (executorValues.size != 1) {
throw new SparkException(s"Inconsistent values for $config: $executorValues")
}
val executorValue = executorValues(0)
if (executorValue != masterValue) {
throw new SparkException(
s"Master had $config=$masterValue but executor had $config=$executorValue")
}
}
sc.stop()
}
}
object UserClasspathFirstTest {
def main(args: Array[String]) {
val ccl = Thread.currentThread().getContextClassLoader()
val resource = ccl.getResourceAsStream("test.resource")
val bytes = ByteStreams.toByteArray(resource)
val contents = new String(bytes, 0, bytes.length, StandardCharsets.UTF_8)
if (contents != "USER") {
throw new SparkException("Should have read user resource, but instead read: " + contents)
}
}
}
class TestFileSystem extends org.apache.hadoop.fs.LocalFileSystem {
private def local(path: Path): Path = {
// Ignore the scheme for testing.
new Path(path.toUri.getPath)
}
private def toRemote(status: FileStatus): FileStatus = {
val path = s"s3a://${status.getPath.toUri.getPath}"
status.setPath(new Path(path))
status
}
override def isFile(path: Path): Boolean = super.isFile(local(path))
override def globStatus(pathPattern: Path): Array[FileStatus] = {
val newPath = new Path(pathPattern.toUri.getPath)
super.globStatus(newPath).map(toRemote)
}
override def listStatus(path: Path): Array[FileStatus] = {
super.listStatus(local(path)).map(toRemote)
}
override def copyToLocalFile(src: Path, dst: Path): Unit = {
super.copyToLocalFile(local(src), dst)
}
override def open(path: Path): FSDataInputStream = super.open(local(path))
}
class TestSparkApplication extends SparkApplication with Matchers {
override def start(args: Array[String], conf: SparkConf): Unit = {
assert(args.size === 1)
assert(args(0) === "hello")
assert(conf.get("spark.test.hello") === "world")
assert(sys.props.get("spark.test.hello") === None)
// This is how the test verifies the application was actually run.
throw new SparkException(args(0))
}
}
| WindCanDie/spark | core/src/test/scala/org/apache/spark/deploy/SparkSubmitSuite.scala | Scala | apache-2.0 | 52,697 |
package inloopio.indicator.function
import inloopio.math.StatsFunctions
import inloopio.math.timeseries.Null
import inloopio.math.timeseries.TBaseSer
import inloopio.math.timeseries.TVar
import inloopio.math.indicator.Factor
/**
*
* @author Caoyuan Deng
*/
class MAXFunction(_baseSer: TBaseSer, var baseVar: TVar[Double], var period: Factor) extends Function(_baseSer) {
final protected def imax(idx: Int, baseVar: TVar[Double], period: Double, prev: Double): Double = {
StatsFunctions.imax(idx, baseVar.values, period.toInt, prev)
}
val _max = TVar[Double]()
override def set(args: Any*): Unit = {
baseVar = args(0).asInstanceOf[TVar[Double]]
period = args(1).asInstanceOf[Factor]
}
protected def computeSpot(i: Int): Unit = {
if (i < period.value - 1) {
_max(i) = Null.Double
} else {
_max(i) = imax(i, baseVar, period.value, _max(i - 1))
}
}
def max(sessionId: Long, idx: Int): Double = {
computeTo(sessionId, idx)
_max(idx)
}
}
| dcaoyuan/inloopio-libs | inloopio-indicator/src/main/scala/inloopio/indicator/function/MAXFunction.scala | Scala | bsd-3-clause | 1,011 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.security.auth
import kafka.admin.ZkSecurityMigrator
import kafka.utils.{Logging, TestUtils, ZkUtils}
import kafka.zk.ZooKeeperTestHarness
import org.apache.kafka.common.KafkaException
import org.apache.kafka.common.security.JaasUtils
import org.apache.zookeeper.data.{ACL}
import org.junit.Assert._
import org.junit.{After, Before, Test}
import scala.collection.JavaConverters._
import scala.util.{Try, Success, Failure}
import javax.security.auth.login.Configuration
class ZkAuthorizationTest extends ZooKeeperTestHarness with Logging {
val jaasFile = kafka.utils.JaasTestUtils.writeJaasContextsToFile(kafka.utils.JaasTestUtils.zkSections)
val authProvider = "zookeeper.authProvider.1"
@Before
override def setUp() {
System.setProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM, jaasFile.getAbsolutePath)
Configuration.setConfiguration(null)
System.setProperty(authProvider, "org.apache.zookeeper.server.auth.SASLAuthenticationProvider")
super.setUp()
}
@After
override def tearDown() {
super.tearDown()
System.clearProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM)
System.clearProperty(authProvider)
Configuration.setConfiguration(null)
}
/**
* Tests the method in JaasUtils that checks whether to use
* secure ACLs and authentication with ZooKeeper.
*/
@Test
def testIsZkSecurityEnabled() {
assertTrue(JaasUtils.isZkSecurityEnabled())
Configuration.setConfiguration(null)
System.clearProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM)
assertFalse(JaasUtils.isZkSecurityEnabled())
try {
Configuration.setConfiguration(null)
System.setProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM, "no-such-file-exists.conf")
JaasUtils.isZkSecurityEnabled()
fail("Should have thrown an exception")
} catch {
case _: KafkaException => // Expected
}
}
/**
* Exercises the code in ZkUtils. The goal is mainly
* to verify that the behavior of ZkUtils is correct
* when isSecure is set to true.
*/
@Test
def testZkUtils() {
assertTrue(zkUtils.isSecure)
for (path <- zkUtils.persistentZkPaths) {
zkUtils.makeSurePersistentPathExists(path)
if (ZkUtils.sensitivePath(path)) {
val aclList = zkUtils.zkConnection.getAcl(path).getKey
assertEquals(s"Unexpected acl list size for $path", 1, aclList.size)
for (acl <- aclList.asScala)
assertTrue(TestUtils.isAclSecure(acl, sensitive = true))
} else if (!path.equals(ZkUtils.ConsumersPath)) {
val aclList = zkUtils.zkConnection.getAcl(path).getKey
assertEquals(s"Unexpected acl list size for $path", 2, aclList.size)
for (acl <- aclList.asScala)
assertTrue(TestUtils.isAclSecure(acl, sensitive = false))
}
}
// Test that can create: createEphemeralPathExpectConflict
zkUtils.createEphemeralPathExpectConflict("/a", "")
verify("/a")
// Test that can create: createPersistentPath
zkUtils.createPersistentPath("/b")
verify("/b")
// Test that can create: createSequentialPersistentPath
val seqPath = zkUtils.createSequentialPersistentPath("/c", "")
verify(seqPath)
// Test that can update: updateEphemeralPath
zkUtils.updateEphemeralPath("/a", "updated")
val valueA: String = zkUtils.zkClient.readData("/a")
assertTrue(valueA.equals("updated"))
// Test that can update: updatePersistentPath
zkUtils.updatePersistentPath("/b", "updated")
val valueB: String = zkUtils.zkClient.readData("/b")
assertTrue(valueB.equals("updated"))
info("Leaving testZkUtils")
}
/**
* Tests the migration tool when making an unsecure
* cluster secure.
*/
@Test
def testZkMigration() {
val unsecureZkUtils = ZkUtils(zkConnect, 6000, 6000, false)
try {
testMigration(zkConnect, unsecureZkUtils, zkUtils)
} finally {
unsecureZkUtils.close()
}
}
/**
* Tests the migration tool when making a secure
* cluster unsecure.
*/
@Test
def testZkAntiMigration() {
val unsecureZkUtils = ZkUtils(zkConnect, 6000, 6000, false)
try {
testMigration(zkConnect, zkUtils, unsecureZkUtils)
} finally {
unsecureZkUtils.close()
}
}
/**
* Tests that the persistent paths cannot be deleted.
*/
@Test
def testDelete() {
info(s"zkConnect string: $zkConnect")
ZkSecurityMigrator.run(Array("--zookeeper.acl=secure", s"--zookeeper.connect=$zkConnect"))
deleteAllUnsecure()
}
/**
* Tests that znodes cannot be deleted when the
* persistent paths have children.
*/
@Test
def testDeleteRecursive() {
info(s"zkConnect string: $zkConnect")
for (path <- ZkUtils.SecureZkRootPaths) {
info(s"Creating $path")
zkUtils.makeSurePersistentPathExists(path)
zkUtils.createPersistentPath(s"$path/fpjwashere", "")
}
zkUtils.zkConnection.setAcl("/", zkUtils.defaultAcls("/"), -1)
deleteAllUnsecure()
}
/**
* Tests the migration tool when chroot is being used.
*/
@Test
def testChroot(): Unit = {
val zkUrl = zkConnect + "/kafka"
zkUtils.createPersistentPath("/kafka")
val unsecureZkUtils = ZkUtils(zkUrl, 6000, 6000, false)
val secureZkUtils = ZkUtils(zkUrl, 6000, 6000, true)
try {
testMigration(zkUrl, unsecureZkUtils, secureZkUtils)
} finally {
unsecureZkUtils.close()
secureZkUtils.close()
}
}
/**
* Exercises the migration tool. It is used in these test cases:
* testZkMigration, testZkAntiMigration, testChroot.
*/
private def testMigration(zkUrl: String, firstZk: ZkUtils, secondZk: ZkUtils) {
info(s"zkConnect string: $zkUrl")
for (path <- ZkUtils.SecureZkRootPaths ++ ZkUtils.SensitiveZkRootPaths) {
info(s"Creating $path")
firstZk.makeSurePersistentPathExists(path)
// Create a child for each znode to exercise the recurrent
// traversal of the data tree
firstZk.createPersistentPath(s"$path/fpjwashere", "")
}
// Getting security option to determine how to verify ACLs.
// Additionally, we create the consumers znode (not in
// securePersistentZkPaths) to make sure that we don't
// add ACLs to it.
val secureOpt: String =
if (secondZk.isSecure) {
firstZk.createPersistentPath(ZkUtils.ConsumersPath)
"secure"
} else {
secondZk.createPersistentPath(ZkUtils.ConsumersPath)
"unsecure"
}
ZkSecurityMigrator.run(Array(s"--zookeeper.acl=$secureOpt", s"--zookeeper.connect=$zkUrl"))
info("Done with migration")
for (path <- ZkUtils.SecureZkRootPaths ++ ZkUtils.SensitiveZkRootPaths) {
val sensitive = ZkUtils.sensitivePath(path)
val listParent = secondZk.zkConnection.getAcl(path).getKey
assertTrue(path, isAclCorrect(listParent, secondZk.isSecure, sensitive))
val childPath = path + "/fpjwashere"
val listChild = secondZk.zkConnection.getAcl(childPath).getKey
assertTrue(childPath, isAclCorrect(listChild, secondZk.isSecure, sensitive))
}
// Check consumers path.
val consumersAcl = firstZk.zkConnection.getAcl(ZkUtils.ConsumersPath).getKey
assertTrue(ZkUtils.ConsumersPath, isAclCorrect(consumersAcl, false, false))
}
/**
* Verifies that the path has the appropriate secure ACL.
*/
private def verify(path: String): Boolean = {
val sensitive = ZkUtils.sensitivePath(path)
val list = zkUtils.zkConnection.getAcl(path).getKey
list.asScala.forall(TestUtils.isAclSecure(_, sensitive))
}
/**
* Verifies ACL.
*/
private def isAclCorrect(list: java.util.List[ACL], secure: Boolean, sensitive: Boolean): Boolean = {
val isListSizeCorrect =
if (secure && !sensitive)
list.size == 2
else
list.size == 1
isListSizeCorrect && list.asScala.forall(
if (secure)
TestUtils.isAclSecure(_, sensitive)
else
TestUtils.isAclUnsecure
)
}
/**
* Sets up and starts the recursive execution of deletes.
* This is used in the testDelete and testDeleteRecursive
* test cases.
*/
private def deleteAllUnsecure() {
System.setProperty(JaasUtils.ZK_SASL_CLIENT, "false")
val unsecureZkUtils = ZkUtils(zkConnect, 6000, 6000, false)
val result: Try[Boolean] = {
deleteRecursive(unsecureZkUtils, "/")
}
// Clean up before leaving the test case
unsecureZkUtils.close()
System.clearProperty(JaasUtils.ZK_SASL_CLIENT)
// Fail the test if able to delete
result match {
case Success(_) => // All done
case Failure(e) => fail(e.getMessage)
}
}
/**
* Tries to delete znodes recursively
*/
private def deleteRecursive(zkUtils: ZkUtils, path: String): Try[Boolean] = {
info(s"Deleting $path")
var result: Try[Boolean] = Success(true)
for (child <- zkUtils.getChildren(path))
result = (path match {
case "/" => deleteRecursive(zkUtils, s"/$child")
case path => deleteRecursive(zkUtils, s"$path/$child")
}) match {
case Success(_) => result
case Failure(e) => Failure(e)
}
path match {
// Do not try to delete the root
case "/" => result
// For all other paths, try to delete it
case path =>
try {
zkUtils.deletePath(path)
Failure(new Exception(s"Have been able to delete $path"))
} catch {
case _: Exception => result
}
}
}
}
| MyPureCloud/kafka | core/src/test/scala/unit/kafka/security/auth/ZkAuthorizationTest.scala | Scala | apache-2.0 | 10,238 |
import java.io.File
import java.nio.file._
import Modes._
import com.jsuereth.sbtpgp.PgpKeys
import sbt.Keys._
import sbt._
import complete.DefaultParsers._
import pl.project13.scala.sbt.JmhPlugin
import pl.project13.scala.sbt.JmhPlugin.JmhKeys.Jmh
import sbt.Package.ManifestAttributes
import sbt.plugins.SbtPlugin
import sbt.ScriptedPlugin.autoImport._
import xerial.sbt.pack.PackPlugin
import xerial.sbt.pack.PackPlugin.autoImport._
import xerial.sbt.Sonatype.autoImport._
import dotty.tools.sbtplugin.DottyPlugin.autoImport._
import dotty.tools.sbtplugin.DottyPlugin.makeScalaInstance
import dotty.tools.sbtplugin.DottyIDEPlugin.{ installCodeExtension, prepareCommand, runProcess }
import dotty.tools.sbtplugin.DottyIDEPlugin.autoImport._
import org.scalajs.sbtplugin.ScalaJSPlugin
import org.scalajs.sbtplugin.ScalaJSPlugin.autoImport._
import sbtbuildinfo.BuildInfoPlugin
import sbtbuildinfo.BuildInfoPlugin.autoImport._
import scala.util.Properties.isJavaAtLeast
object MyScalaJSPlugin extends AutoPlugin {
import Build._
override def requires: Plugins = ScalaJSPlugin
override def projectSettings: Seq[Setting[_]] = Def.settings(
commonBootstrappedSettings,
/* Remove the Scala.js compiler plugin for scalac, and enable the
* Scala.js back-end of dotty instead.
*/
libraryDependencies := {
val deps = libraryDependencies.value
deps.filterNot(_.name.startsWith("scalajs-compiler")).map(_.withDottyCompat(scalaVersion.value))
},
scalacOptions += "-scalajs",
// Replace the JVM JUnit dependency by the Scala.js one
libraryDependencies ~= {
_.filter(!_.name.startsWith("junit-interface"))
},
libraryDependencies +=
("org.scala-js" %% "scalajs-junit-test-runtime" % scalaJSVersion % "test").withDottyCompat(scalaVersion.value),
// Typecheck the Scala.js IR found on the classpath
scalaJSLinkerConfig ~= (_.withCheckIR(true)),
// Exclude all these projects from `configureIDE/launchIDE` since they
// take time to compile, print a bunch of warnings, and are rarely edited.
excludeFromIDE := true
)
}
object Build {
val referenceVersion = "0.24.0-bin-20200407-2352d90-NIGHTLY"
val baseVersion = "0.24.0"
val baseSbtDottyVersion = "0.4.2"
// Versions used by the vscode extension to create a new project
// This should be the latest published releases.
// TODO: Have the vscode extension fetch these numbers from the Internet
// instead of hardcoding them ?
val publishedDottyVersion = referenceVersion
val publishedSbtDottyVersion = "0.3.4"
/** scala-library version required to compile Dotty.
*
* Both the non-bootstrapped and bootstrapped version should match, unless
* we're in the process of upgrading to a new major version of
* scala-library.
*/
def stdlibVersion(implicit mode: Mode): String = mode match {
case NonBootstrapped => "2.13.1"
case Bootstrapped => "2.13.1"
}
val dottyOrganization = "ch.epfl.lamp"
val dottyGithubUrl = "https://github.com/lampepfl/dotty"
val isRelease = sys.env.get("RELEASEBUILD") == Some("yes")
val dottyVersion = {
def isNightly = sys.env.get("NIGHTLYBUILD") == Some("yes")
if (isRelease)
baseVersion
else if (isNightly)
baseVersion + "-bin-" + VersionUtil.commitDate + "-" + VersionUtil.gitHash + "-NIGHTLY"
else
baseVersion + "-bin-SNAPSHOT"
}
val dottyNonBootstrappedVersion = dottyVersion + "-nonbootstrapped"
val sbtDottyName = "sbt-dotty"
val sbtDottyVersion = {
if (isRelease) baseSbtDottyVersion else baseSbtDottyVersion + "-SNAPSHOT"
}
val agentOptions = List(
// "-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=5005"
// "-agentpath:/home/dark/opt/yjp-2013-build-13072/bin/linux-x86-64/libyjpagent.so"
// "-agentpath:/Applications/YourKit_Java_Profiler_2015_build_15052.app/Contents/Resources/bin/mac/libyjpagent.jnilib",
// "-XX:+HeapDumpOnOutOfMemoryError", "-Xmx1g", "-Xss2m"
)
// Packages all subprojects to their jars
val packageAll = taskKey[Map[String, String]]("Package everything needed to run tests")
// Run tests with filter through vulpix test suite
val testCompilation = inputKey[Unit]("runs integration test with the supplied filter")
// Spawns a repl with the correct classpath
val repl = inputKey[Unit]("run the REPL with correct classpath")
// Used to compile files similar to ./bin/dotc script
val dotc = inputKey[Unit]("run the compiler using the correct classpath, or the user supplied classpath")
// Used to run binaries similar to ./bin/dotr script
val dotr = inputKey[Unit]("run compiled binary using the correct classpath, or the user supplied classpath")
// Compiles the documentation and static site
val genDocs = inputKey[Unit]("run dottydoc to generate static documentation site")
// Shorthand for compiling a docs site
val dottydoc = inputKey[Unit]("run dottydoc")
// Only available in vscode-dotty
val unpublish = taskKey[Unit]("Unpublish a package")
// Settings used to configure the test language server
val ideTestsCompilerVersion = taskKey[String]("Compiler version to use in IDE tests")
val ideTestsCompilerArguments = taskKey[Seq[String]]("Compiler arguments to use in IDE tests")
val ideTestsDependencyClasspath = taskKey[Seq[File]]("Dependency classpath to use in IDE tests")
val fetchScalaJSSource = taskKey[File]("Fetch the sources of Scala.js")
lazy val SourceDeps = config("sourcedeps")
// Settings shared by the build (scoped in ThisBuild). Used in build.sbt
lazy val thisBuildSettings = Def.settings(
organization := dottyOrganization,
organizationName := "LAMP/EPFL",
organizationHomepage := Some(url("http://lamp.epfl.ch")),
scalacOptions ++= Seq(
"-feature",
"-deprecation",
"-unchecked",
"-Xfatal-warnings",
"-encoding", "UTF8",
"-language:existentials,higherKinds,implicitConversions,postfixOps"
),
javacOptions in (Compile, compile) ++= Seq("-Xlint:unchecked", "-Xlint:deprecation"),
// Override `runCode` from sbt-dotty to use the language-server and
// vscode extension from the source repository of dotty instead of a
// published version.
runCode := (run in `dotty-language-server`).toTask("").value,
// Avoid various sbt craziness involving classloaders and parallelism
fork in run := true,
fork in Test := true,
parallelExecution in Test := false,
// enable verbose exception messages for JUnit
testOptions in Test += Tests.Argument(TestFrameworks.JUnit, "-a", "-v"),
)
// Settings shared globally (scoped in Global). Used in build.sbt
lazy val globalSettings = Def.settings(
onLoad := (onLoad in Global).value andThen { state =>
def exists(submodule: String) = {
val path = Paths.get(submodule)
Files.exists(path) && {
val fileStream = Files.list(path)
try fileStream.iterator().hasNext
finally fileStream.close()
}
}
// Copy default configuration from .vscode-template/ unless configuration files already exist in .vscode/
sbt.IO.copyDirectory(new File(".vscode-template/"), new File(".vscode/"), overwrite = false)
state
},
// I find supershell more distracting than helpful
useSuperShell := false,
// Credentials to release to Sonatype
credentials ++= (
for {
username <- sys.env.get("SONATYPE_USER")
password <- sys.env.get("SONATYPE_PW")
} yield Credentials("Sonatype Nexus Repository Manager", "oss.sonatype.org", username, password)
).toList,
PgpKeys.pgpPassphrase := sys.env.get("PGP_PW").map(_.toCharArray()),
PgpKeys.useGpgPinentry := true,
javaOptions ++= {
val ciOptions = // propagate if this is a CI build
sys.props.get("dotty.drone.mem") match {
case Some(prop) => List("-Xmx" + prop)
case _ => List()
}
agentOptions ::: ciOptions
}
)
lazy val commonSettings = publishSettings ++ Seq(
scalaSource in Compile := baseDirectory.value / "src",
scalaSource in Test := baseDirectory.value / "test",
javaSource in Compile := baseDirectory.value / "src",
javaSource in Test := baseDirectory.value / "test",
resourceDirectory in Compile := baseDirectory.value / "resources",
resourceDirectory in Test := baseDirectory.value / "test-resources",
// Disable scaladoc generation, it's way too slow and we'll replace it
// by dottydoc anyway. We still publish an empty -javadoc.jar to make
// sonatype happy.
sources in (Compile, doc) := Seq(),
// Prevent sbt from rewriting our dependencies
scalaModuleInfo ~= (_.map(_.withOverrideScalaVersion(false))),
libraryDependencies += "com.novocode" % "junit-interface" % "0.11" % Test,
// If someone puts a source file at the root (e.g., for manual testing),
// don't pick it up as part of any project.
sourcesInBase := false,
)
// Settings used for projects compiled only with Java
lazy val commonJavaSettings = commonSettings ++ Seq(
version := dottyVersion,
scalaVersion := referenceVersion,
// Do not append Scala versions to the generated artifacts
crossPaths := false,
// Do not depend on the Scala library
autoScalaLibrary := false,
excludeFromIDE := true
)
// Settings used when compiling dotty (both non-boostrapped and bootstrapped)
lazy val commonDottySettings = commonSettings ++ Seq(
// Manually set the standard library to use
autoScalaLibrary := false
)
lazy val commonScala2Settings = commonSettings ++ Seq(
scalaVersion := stdlibVersion(Bootstrapped),
moduleName ~= { _.stripSuffix("-scala2") },
version := dottyVersion,
target := baseDirectory.value / ".." / "out" / "scala-2" / name.value,
)
// Settings used when compiling dotty with the reference compiler
lazy val commonNonBootstrappedSettings = commonDottySettings ++ Seq(
unmanagedSourceDirectories in Compile += baseDirectory.value / "src-non-bootstrapped",
version := dottyNonBootstrappedVersion,
scalaVersion := referenceVersion,
excludeFromIDE := true,
)
// Settings used when compiling dotty with a non-bootstrapped dotty
lazy val commonBootstrappedSettings = commonDottySettings ++ Seq(
unmanagedSourceDirectories in Compile += baseDirectory.value / "src-bootstrapped",
version := dottyVersion,
scalaVersion := dottyNonBootstrappedVersion,
scalaCompilerBridgeBinaryJar := {
Some((packageBin in (`dotty-sbt-bridge`, Compile)).value)
},
// Use the same name as the non-bootstrapped projects for the artifacts
moduleName ~= { _.stripSuffix("-bootstrapped") },
// Enforce that the only Scala 2 classfiles we unpickle come from scala-library
/*
scalacOptions ++= {
val cp = (dependencyClasspath in `dotty-library` in Compile).value
val scalaLib = findArtifactPath(cp, "scala-library")
Seq("-Yscala2-unpickler", scalaLib)
},
*/
// sbt gets very unhappy if two projects use the same target
target := baseDirectory.value / ".." / "out" / "bootstrap" / name.value,
// Compile using the non-bootstrapped and non-published dotty
managedScalaInstance := false,
scalaInstance := {
val externalNonBootstrappedDeps = externalDependencyClasspath.in(`dotty-doc`, Compile).value
val scalaLibrary = findArtifact(externalNonBootstrappedDeps, "scala-library")
// IMPORTANT: We need to use actual jars to form the ScalaInstance and not
// just directories containing classfiles because sbt maintains a cache of
// compiler instances. This cache is invalidated based on timestamps
// however this is only implemented on jars, directories are never
// invalidated.
val tastyCore = packageBin.in(`tasty-core`, Compile).value
val dottyLibrary = packageBin.in(`dotty-library`, Compile).value
val dottyInterfaces = packageBin.in(`dotty-interfaces`, Compile).value
val dottyCompiler = packageBin.in(`dotty-compiler`, Compile).value
val dottyDoc = packageBin.in(`dotty-doc`, Compile).value
val allJars = Seq(tastyCore, dottyLibrary, dottyInterfaces, dottyCompiler, dottyDoc) ++ externalNonBootstrappedDeps.map(_.data)
makeScalaInstance(
state.value,
scalaVersion.value,
scalaLibrary,
dottyLibrary,
dottyCompiler,
allJars
)
},
// sbt-dotty defines `scalaInstance in doc` so we need to override it manually
scalaInstance in doc := scalaInstance.value,
)
lazy val commonBenchmarkSettings = Seq(
outputStrategy := Some(StdoutOutput),
mainClass in (Jmh, run) := Some("dotty.tools.benchmarks.Bench"), // custom main for jmh:run
javaOptions += "-DBENCH_COMPILER_CLASS_PATH=" + Attributed.data((fullClasspath in (`dotty-bootstrapped`, Compile)).value).mkString("", File.pathSeparator, ""),
javaOptions += "-DBENCH_CLASS_PATH=" + Attributed.data((fullClasspath in (`dotty-library-bootstrapped`, Compile)).value).mkString("", File.pathSeparator, "")
)
// sbt >= 0.13.12 will automatically rewrite transitive dependencies on
// any version in any organization of scala{-library,-compiler,-reflect,p}
// to have organization `scalaOrganization` and version `scalaVersion`
// (see https://github.com/sbt/sbt/pull/2634).
// This means that we need to provide dummy artefacts for these projects,
// otherwise users will get compilation errors if they happen to transitively
// depend on one of these projects.
lazy val commonDummySettings = commonBootstrappedSettings ++ Seq(
crossPaths := false,
libraryDependencies := Seq()
)
/** Projects -------------------------------------------------------------- */
val dottyCompilerBootstrappedRef = LocalProject("dotty-compiler-bootstrapped")
/** External dependencies we may want to put on the compiler classpath. */
def externalCompilerClasspathTask: Def.Initialize[Task[Def.Classpath]] =
// Even if we're running the non-bootstrapped compiler, we want the
// dependencies of the bootstrapped compiler since we want to put them on
// the compiler classpath, not the JVM classpath.
externalDependencyClasspath.in(dottyCompilerBootstrappedRef, Runtime)
// The root project:
// - aggregates other projects so that "compile", "test", etc are run on all projects at once.
// - publishes its own empty artifact "dotty" that depends on "dotty-library" and "dotty-compiler",
// this is only necessary for compatibility with sbt which currently hardcodes the "dotty" artifact name
lazy val dotty = project.in(file(".")).asDottyRoot(NonBootstrapped)
lazy val `dotty-bootstrapped` = project.asDottyRoot(Bootstrapped)
lazy val `dotty-interfaces` = project.in(file("interfaces")).
settings(commonJavaSettings)
private lazy val dottydocClasspath = Def.task {
val jars = (packageAll in `dotty-compiler`).value
val dottyLib = jars("dotty-library")
val otherDeps = (dependencyClasspath in Compile).value.map(_.data).mkString(File.pathSeparator)
val externalDeps = externalCompilerClasspathTask.value
dottyLib + File.pathSeparator + findArtifactPath(externalDeps, "scala-library")
}
lazy val commonDocSettings = Seq(
baseDirectory in (Compile, run) := baseDirectory.value / "..",
baseDirectory in Test := baseDirectory.value / "..",
libraryDependencies ++= {
val flexmarkVersion = "0.42.12"
Seq(
"com.vladsch.flexmark" % "flexmark" % flexmarkVersion,
"com.vladsch.flexmark" % "flexmark-ext-gfm-tasklist" % flexmarkVersion,
"com.vladsch.flexmark" % "flexmark-ext-gfm-tables" % flexmarkVersion,
"com.vladsch.flexmark" % "flexmark-ext-autolink" % flexmarkVersion,
"com.vladsch.flexmark" % "flexmark-ext-anchorlink" % flexmarkVersion,
"com.vladsch.flexmark" % "flexmark-ext-emoji" % flexmarkVersion,
"com.vladsch.flexmark" % "flexmark-ext-gfm-strikethrough" % flexmarkVersion,
"com.vladsch.flexmark" % "flexmark-ext-yaml-front-matter" % flexmarkVersion,
Dependencies.`jackson-dataformat-yaml`,
"nl.big-o" % "liqp" % "0.6.7"
)
}
)
def dottyDocSettings(implicit mode: Mode) = Seq(
connectInput in run := true,
outputStrategy := Some(StdoutOutput),
javaOptions ++= (javaOptions in `dotty-compiler`).value,
javaOptions += "-Xss3m",
genDocs := Def.inputTaskDyn {
val dottydocExtraArgs = spaceDelimited("<arg>").parsed
// Make majorVersion available at dotty.epfl.ch/versions/latest-nightly-base
// Used by sbt-dotty to resolve the latest nightly
val majorVersion = baseVersion.take(baseVersion.lastIndexOf('.'))
IO.write(file("./docs/_site/versions/latest-nightly-base"), majorVersion)
// This file is used by GitHub Pages when the page is available in a custom domain
IO.write(file("./docs/_site/CNAME"), "dotty.epfl.ch")
val sources = unmanagedSources.in(dottyLibrary, Compile).value
val args = Seq(
"-siteroot", "docs",
"-project", "Dotty",
"-project-version", dottyVersion,
"-project-url", dottyGithubUrl,
"-project-logo", "dotty-logo.svg",
"-classpath", dottydocClasspath.value,
"-Yerased-terms"
) ++ dottydocExtraArgs
(runMain in Compile).toTask(
s""" dotty.tools.dottydoc.Main ${args.mkString(" ")} ${sources.mkString(" ")}"""
)
}.evaluated,
dottydoc := Def.inputTaskDyn {
val args = spaceDelimited("<arg>").parsed
val cp = dottydocClasspath.value
(runMain in Compile).toTask(s" dotty.tools.dottydoc.Main -classpath $cp " + args.mkString(" "))
}.evaluated,
)
lazy val `dotty-doc` = project.in(file("doc-tool")).asDottyDoc(NonBootstrapped)
lazy val `dotty-doc-bootstrapped` = project.in(file("doc-tool")).asDottyDoc(Bootstrapped)
def dottyDoc(implicit mode: Mode): Project = mode match {
case NonBootstrapped => `dotty-doc`
case Bootstrapped => `dotty-doc-bootstrapped`
}
/** Find an artifact with the given `name` in `classpath` */
def findArtifact(classpath: Def.Classpath, name: String): File = classpath
.find(_.get(artifact.key).exists(_.name == name))
.getOrElse(throw new MessageOnlyException(s"Artifact for $name not found in $classpath"))
.data
/** Like `findArtifact` but returns the absolute path of the entry as a string */
def findArtifactPath(classpath: Def.Classpath, name: String): String =
findArtifact(classpath, name).getAbsolutePath
// Settings shared between dotty-compiler and dotty-compiler-bootstrapped
lazy val commonDottyCompilerSettings = Seq(
// set system in/out for repl
connectInput in run := true,
outputStrategy := Some(StdoutOutput),
// Generate compiler.properties, used by sbt
resourceGenerators in Compile += Def.task {
import java.util._
import java.text._
val file = (resourceManaged in Compile).value / "compiler.properties"
val dateFormat = new SimpleDateFormat("yyyyMMdd-HHmmss")
dateFormat.setTimeZone(TimeZone.getTimeZone("GMT"))
val contents = //2.11.11.v20170413-090219-8a413ba7cc
s"""version.number=${version.value}
|maven.version.number=${version.value}
|git.hash=${VersionUtil.gitHash}
|copyright.string=Copyright 2002-${Calendar.getInstance().get(Calendar.YEAR)}, LAMP/EPFL
""".stripMargin
if (!(file.exists && IO.read(file) == contents)) {
IO.write(file, contents)
}
Seq(file)
}.taskValue,
// get libraries onboard
libraryDependencies ++= Seq(
"org.scala-lang.modules" % "scala-asm" % "7.3.1-scala-1", // used by the backend
Dependencies.`compiler-interface`,
"org.jline" % "jline-reader" % "3.9.0", // used by the REPL
"org.jline" % "jline-terminal" % "3.9.0",
"org.jline" % "jline-terminal-jna" % "3.9.0" // needed for Windows
),
// For convenience, change the baseDirectory when running the compiler
baseDirectory in (Compile, run) := baseDirectory.value / "..",
// And when running the tests
baseDirectory in Test := baseDirectory.value / "..",
test in Test := {
// Exclude VulpixMetaTests
(testOnly in Test).toTask(" -- --exclude-categories=dotty.VulpixMetaTests").value
},
testOptions in Test += Tests.Argument(
TestFrameworks.JUnit,
"--run-listener=dotty.tools.ContextEscapeDetector",
),
// Spawn new JVM in run and test
// Add git-hash used to package the distribution to the manifest to know it in runtime and report it in REPL
packageOptions += ManifestAttributes(("Git-Hash", VersionUtil.gitHash)),
javaOptions ++= {
val managedSrcDir = {
// Populate the directory
(managedSources in Compile).value
(sourceManaged in Compile).value
}
val externalDeps = externalCompilerClasspathTask.value
val jars = packageAll.value
Seq(
"-Ddotty.tests.dottyCompilerManagedSources=" + managedSrcDir,
"-Ddotty.tests.classes.dottyInterfaces=" + jars("dotty-interfaces"),
"-Ddotty.tests.classes.dottyLibrary=" + jars("dotty-library"),
"-Ddotty.tests.classes.dottyCompiler=" + jars("dotty-compiler"),
"-Ddotty.tests.classes.tastyCore=" + jars("tasty-core"),
"-Ddotty.tests.classes.compilerInterface=" + findArtifactPath(externalDeps, "compiler-interface"),
"-Ddotty.tests.classes.scalaLibrary=" + findArtifactPath(externalDeps, "scala-library"),
"-Ddotty.tests.classes.scalaAsm=" + findArtifactPath(externalDeps, "scala-asm"),
"-Ddotty.tests.classes.jlineTerminal=" + findArtifactPath(externalDeps, "jline-terminal"),
"-Ddotty.tests.classes.jlineReader=" + findArtifactPath(externalDeps, "jline-reader"),
)
},
javaOptions += (
s"-Ddotty.tools.dotc.semanticdb.test=${(ThisBuild / baseDirectory).value/"tests"/"semanticdb"}"
),
testCompilation := Def.inputTaskDyn {
val args = spaceDelimited("<arg>").parsed
if (args.contains("--help")) {
println(
s"""
|usage: testCompilation [--help] [--from-tasty] [--update-checkfiles] [<filter>]
|
|By default runs tests in dotty.tools.dotc.*CompilationTests excluding tests tagged with dotty.SlowTests.
|
| --help show this message
| --from-tasty runs tests in dotty.tools.dotc.FromTastyTests
| --update-checkfiles override the checkfiles that did not match with the current output
| <filter> substring of the path of the tests file
|
""".stripMargin
)
(testOnly in Test).toTask(" not.a.test")
}
else {
val updateCheckfile = args.contains("--update-checkfiles")
val fromTasty = args.contains("--from-tasty")
val args1 = if (updateCheckfile | fromTasty) args.filter(x => x != "--update-checkfiles" && x != "--from-tasty") else args
val test = if (fromTasty) "dotty.tools.dotc.FromTastyTests" else "dotty.tools.dotc.*CompilationTests"
val cmd = s" $test -- --exclude-categories=dotty.SlowTests" +
(if (updateCheckfile) " -Ddotty.tests.updateCheckfiles=TRUE" else "") +
(if (args1.nonEmpty) " -Ddotty.tests.filter=" + args1.mkString(" ") else "")
(testOnly in Test).toTask(cmd)
}
}.evaluated,
dotr := {
val args: List[String] = spaceDelimited("<arg>").parsed.toList
val externalDeps = externalCompilerClasspathTask.value
val jars = packageAll.value
val scalaLib = findArtifactPath(externalDeps, "scala-library")
val dottyLib = jars("dotty-library")
def run(args: List[String]): Unit = {
val fullArgs = insertClasspathInArgs(args, List(".", dottyLib, scalaLib).mkString(File.pathSeparator))
runProcess("java" :: fullArgs, wait = true)
}
if (args.isEmpty) {
println("Couldn't run `dotr` without args. Use `repl` to run the repl or add args to run the dotty application")
} else if (scalaLib == "") {
println("Couldn't find scala-library on classpath, please run using script in bin dir instead")
} else if (args.contains("-with-compiler")) {
val args1 = args.filter(_ != "-with-compiler")
val asm = findArtifactPath(externalDeps, "scala-asm")
val dottyCompiler = jars("dotty-compiler")
val dottyStaging = jars("dotty-staging")
val dottyTastyInspector = jars("dotty-tasty-inspector")
val dottyInterfaces = jars("dotty-interfaces")
val tastyCore = jars("tasty-core")
run(insertClasspathInArgs(args1, List(dottyCompiler, dottyInterfaces, asm, dottyStaging, dottyTastyInspector, tastyCore).mkString(File.pathSeparator)))
} else run(args)
},
run := dotc.evaluated,
dotc := runCompilerMain().evaluated,
repl := runCompilerMain(repl = true).evaluated,
/* Add the sources of scalajs-ir.
* To guarantee that dotty can bootstrap without depending on a version
* of scalajs-ir built with a different Scala compiler, we add its
* sources instead of depending on the binaries.
*/
ivyConfigurations += SourceDeps.hide,
transitiveClassifiers := Seq("sources"),
libraryDependencies +=
("org.scala-js" %% "scalajs-ir" % scalaJSVersion % "sourcedeps").withDottyCompat(scalaVersion.value),
sourceGenerators in Compile += Def.task {
val s = streams.value
val cacheDir = s.cacheDirectory
val trgDir = (sourceManaged in Compile).value / "scalajs-ir-src"
val report = updateClassifiers.value
val scalaJSIRSourcesJar = report.select(
configuration = configurationFilter("sourcedeps"),
module = (_: ModuleID).name.startsWith("scalajs-ir_"),
artifact = artifactFilter(`type` = "src")).headOption.getOrElse {
sys.error(s"Could not fetch scalajs-ir sources")
}
FileFunction.cached(cacheDir / s"fetchScalaJSIRSource",
FilesInfo.lastModified, FilesInfo.exists) { dependencies =>
s.log.info(s"Unpacking scalajs-ir sources to $trgDir...")
if (trgDir.exists)
IO.delete(trgDir)
IO.createDirectory(trgDir)
IO.unzip(scalaJSIRSourcesJar, trgDir)
(trgDir ** "*.scala").get.toSet
} (Set(scalaJSIRSourcesJar)).toSeq
}.taskValue,
)
def runCompilerMain(repl: Boolean = false) = Def.inputTaskDyn {
val log = streams.value.log
val externalDeps = externalCompilerClasspathTask.value
val jars = packageAll.value
val scalaLib = findArtifactPath(externalDeps, "scala-library")
val dottyLib = jars("dotty-library")
val dottyCompiler = jars("dotty-compiler")
val args0: List[String] = spaceDelimited("<arg>").parsed.toList
val decompile = args0.contains("-decompile")
val printTasty = args0.contains("-print-tasty")
val debugFromTasty = args0.contains("-Ythrough-tasty")
val args = args0.filter(arg => arg != "-repl" && arg != "-decompile" &&
arg != "-with-compiler" && arg != "-Ythrough-tasty")
val main =
if (repl) "dotty.tools.repl.Main"
else if (decompile || printTasty) "dotty.tools.dotc.decompiler.Main"
else if (debugFromTasty) "dotty.tools.dotc.fromtasty.Debug"
else "dotty.tools.dotc.Main"
var extraClasspath = Seq(scalaLib, dottyLib)
if ((decompile || printTasty) && !args.contains("-classpath"))
extraClasspath ++= Seq(".")
if (args0.contains("-with-compiler")) {
if (scalaVersion.value == referenceVersion) {
log.error("-with-compiler should only be used with a bootstrapped compiler")
}
val dottyInterfaces = jars("dotty-interfaces")
val dottyStaging = jars("dotty-staging")
val dottyTastyInspector = jars("dotty-tasty-inspector")
val tastyCore = jars("tasty-core")
val asm = findArtifactPath(externalDeps, "scala-asm")
extraClasspath ++= Seq(dottyCompiler, dottyInterfaces, asm, dottyStaging, dottyTastyInspector, tastyCore)
}
val fullArgs = main :: insertClasspathInArgs(args, extraClasspath.mkString(File.pathSeparator))
(runMain in Compile).toTask(fullArgs.mkString(" ", " ", ""))
}
def insertClasspathInArgs(args: List[String], cp: String): List[String] = {
val (beforeCp, fromCp) = args.span(_ != "-classpath")
val classpath = fromCp.drop(1).headOption.fold(cp)(_ + File.pathSeparator + cp)
"-classpath" :: classpath :: beforeCp ::: fromCp.drop(2)
}
lazy val nonBootstrapedDottyCompilerSettings = commonDottyCompilerSettings ++ Seq(
// packageAll packages all and then returns a map with the abs location
packageAll := Def.taskDyn { // Use a dynamic task to avoid loops when loading the settings
Def.task {
Map(
"dotty-interfaces" -> packageBin.in(`dotty-interfaces`, Compile).value,
"dotty-compiler" -> packageBin.in(Compile).value,
"tasty-core" -> packageBin.in(`tasty-core`, Compile).value,
// NOTE: Using dotty-library-bootstrapped here is intentional: when
// running the compiler, we should always have the bootstrapped
// library on the compiler classpath since the non-bootstrapped one
// may not be binary-compatible.
"dotty-library" -> packageBin.in(`dotty-library-bootstrapped`, Compile).value
).mapValues(_.getAbsolutePath)
}
}.value,
testOptions in Test += Tests.Argument(
TestFrameworks.JUnit,
"--exclude-categories=dotty.BootstrappedOnlyTests",
),
// increase stack size for non-bootstrapped compiler, because some code
// is only tail-recursive after bootstrap
javaOptions in Test += "-Xss2m"
)
lazy val bootstrapedDottyCompilerSettings = commonDottyCompilerSettings ++ Seq(
javaOptions ++= {
val jars = packageAll.value
Seq(
"-Ddotty.tests.classes.dottyStaging=" + jars("dotty-staging"),
"-Ddotty.tests.classes.dottyTastyInspector=" + jars("dotty-tasty-inspector"),
)
},
packageAll := {
packageAll.in(`dotty-compiler`).value ++ Seq(
"dotty-compiler" -> packageBin.in(Compile).value.getAbsolutePath,
"dotty-staging" -> packageBin.in(LocalProject("dotty-staging"), Compile).value.getAbsolutePath,
"dotty-tasty-inspector" -> packageBin.in(LocalProject("dotty-tasty-inspector"), Compile).value.getAbsolutePath,
"tasty-core" -> packageBin.in(LocalProject("tasty-core-bootstrapped"), Compile).value.getAbsolutePath,
)
}
)
def dottyCompilerSettings(implicit mode: Mode): sbt.Def.SettingsDefinition =
if (mode == NonBootstrapped) nonBootstrapedDottyCompilerSettings else bootstrapedDottyCompilerSettings
lazy val `dotty-compiler` = project.in(file("compiler")).asDottyCompiler(NonBootstrapped)
lazy val `dotty-compiler-bootstrapped` = project.in(file("compiler")).asDottyCompiler(Bootstrapped)
def dottyCompiler(implicit mode: Mode): Project = mode match {
case NonBootstrapped => `dotty-compiler`
case Bootstrapped => `dotty-compiler-bootstrapped`
}
// Settings shared between dotty-library, dotty-library-bootstrapped and dotty-library-bootstrappedJS
lazy val dottyLibrarySettings = Seq(
scalacOptions in Compile ++= Seq(
// Needed so that the library sources are visible when `dotty.tools.dotc.core.Definitions#init` is called
"-sourcepath", (sourceDirectories in Compile).value.map(_.getAbsolutePath).distinct.mkString(File.pathSeparator),
// support declaration of scala.compiletime.erasedValue
"-Yerased-terms"
),
)
lazy val `dotty-library` = project.in(file("library")).asDottyLibrary(NonBootstrapped)
lazy val `dotty-library-bootstrapped`: Project = project.in(file("library")).asDottyLibrary(Bootstrapped)
def dottyLibrary(implicit mode: Mode): Project = mode match {
case NonBootstrapped => `dotty-library`
case Bootstrapped => `dotty-library-bootstrapped`
}
/** The dotty standard library compiled with the Scala.js back-end, to produce
* the corresponding .sjsir files.
*
* This artifact must be on the classpath on every "Dotty.js" project.
*
* Currently, only a very small fraction of the dotty library is actually
* included in this project, and hence available to Dotty.js projects. More
* will be added in the future as things are confirmed to be supported.
*/
lazy val `dotty-library-bootstrappedJS`: Project = project.in(file("library-js")).
asDottyLibrary(Bootstrapped).
enablePlugins(MyScalaJSPlugin).
settings(
unmanagedSourceDirectories in Compile :=
(unmanagedSourceDirectories in (`dotty-library-bootstrapped`, Compile)).value,
)
lazy val tastyCoreSettings = Seq(
scalacOptions ~= { old =>
val (language, other) = old.partition(_.startsWith("-language:"))
other :+ (language.headOption.map(_ + ",Scala2Compat").getOrElse("-source:3.0-migration"))
}
)
lazy val `tasty-core` = project.in(file("tasty")).asTastyCore(NonBootstrapped)
lazy val `tasty-core-bootstrapped`: Project = project.in(file("tasty")).asTastyCore(Bootstrapped)
lazy val `tasty-core-scala2`: Project = project.in(file("tasty")).asTastyCoreScala2
def tastyCore(implicit mode: Mode): Project = mode match {
case NonBootstrapped => `tasty-core`
case Bootstrapped => `tasty-core-bootstrapped`
}
lazy val `dotty-staging` = project.in(file("staging")).
withCommonSettings(Bootstrapped).
// We want the compiler to be present in the compiler classpath when compiling this project but not
// when compiling a project that depends on dotty-staging (see sbt-dotty/sbt-test/sbt-dotty/quoted-example-project),
// but we always need it to be present on the JVM classpath at runtime.
dependsOn(dottyCompiler(Bootstrapped) % "provided; compile->runtime; test->test").
settings(commonBootstrappedSettings).
settings(
javaOptions := (javaOptions in `dotty-compiler-bootstrapped`).value
)
lazy val `dotty-tasty-inspector` = project.in(file("tasty-inspector")).
withCommonSettings(Bootstrapped).
// We want the compiler to be present in the compiler classpath when compiling this project but not
// when compiling a project that depends on dotty-tasty-inspector (see sbt-dotty/sbt-test/sbt-dotty/tasty-inspector-example-project),
// but we always need it to be present on the JVM classpath at runtime.
dependsOn(dottyCompiler(Bootstrapped) % "provided; compile->runtime; test->test").
settings(commonBootstrappedSettings).
settings(
javaOptions := (javaOptions in `dotty-compiler-bootstrapped`).value
)
lazy val `dotty-sbt-bridge` = project.in(file("sbt-bridge/src")).
// We cannot depend on any bootstrapped project to compile the bridge, since the
// bridge is needed to compile these projects.
dependsOn(dottyDoc(NonBootstrapped) % Provided).
settings(commonJavaSettings).
settings(
description := "sbt compiler bridge for Dotty",
sources in Test := Seq(),
scalaSource in Compile := baseDirectory.value,
javaSource in Compile := baseDirectory.value,
// Referring to the other project using a string avoids an infinite loop
// when sbt reads the settings.
test in Test := (test in (LocalProject("dotty-sbt-bridge-tests"), Test)).value,
libraryDependencies += Dependencies.`compiler-interface` % Provided
)
// We use a separate project for the bridge tests since they can only be run
// with the bootstrapped library on the classpath.
lazy val `dotty-sbt-bridge-tests` = project.in(file("sbt-bridge/test")).
dependsOn(dottyCompiler(Bootstrapped) % Test).
settings(commonBootstrappedSettings).
settings(
sources in Compile := Seq(),
scalaSource in Test := baseDirectory.value,
javaSource in Test := baseDirectory.value,
// Tests disabled until zinc-api-info cross-compiles with 2.13,
// alternatively we could just copy in sources the part of zinc-api-info we need.
sources in Test := Seq(),
// libraryDependencies += (Dependencies.`zinc-api-info` % Test).withDottyCompat(scalaVersion.value)
)
lazy val `dotty-language-server` = project.in(file("language-server")).
dependsOn(dottyCompiler(Bootstrapped)).
settings(commonBootstrappedSettings).
settings(
// Sources representing the shared configuration file used to communicate between the sbt-dotty
// plugin and the language server
unmanagedSourceDirectories in Compile += baseDirectory.value / "../sbt-dotty/src/dotty/tools/sbtplugin/config",
libraryDependencies ++= Seq(
"org.eclipse.lsp4j" % "org.eclipse.lsp4j" % "0.6.0",
Dependencies.`jackson-databind`
),
// Work around https://github.com/eclipse/lsp4j/issues/295
dependencyOverrides += "org.eclipse.xtend" % "org.eclipse.xtend.lib" % "2.16.0",
javaOptions := (javaOptions in `dotty-compiler-bootstrapped`).value,
run := Def.inputTaskDyn {
val inputArgs = spaceDelimited("<arg>").parsed
val mainClass = "dotty.tools.languageserver.Main"
val extensionPath = (baseDirectory in `vscode-dotty`).value.getAbsolutePath
val codeArgs =
s"--extensionDevelopmentPath=$extensionPath" +:
(if (inputArgs.isEmpty) List((baseDirectory.value / "..").getAbsolutePath) else inputArgs)
val clientCommand = prepareCommand(codeCommand.value ++ codeArgs)
val allArgs = "-client_command" +: clientCommand
runTask(Runtime, mainClass, allArgs: _*)
}.dependsOn(compile in (`vscode-dotty`, Compile)).evaluated
).
settings(
ideTestsCompilerVersion := (version in `dotty-compiler`).value,
ideTestsCompilerArguments := Seq(),
ideTestsDependencyClasspath := {
val dottyLib = (classDirectory in `dotty-library-bootstrapped` in Compile).value
val scalaLib =
(dependencyClasspath in `dotty-library-bootstrapped` in Compile)
.value
.map(_.data)
.filter(_.getName.matches("scala-library.*\\\\.jar"))
.toList
dottyLib :: scalaLib
},
buildInfoKeys in Test := Seq[BuildInfoKey](
ideTestsCompilerVersion,
ideTestsCompilerArguments,
ideTestsDependencyClasspath
),
buildInfoPackage in Test := "dotty.tools.languageserver.util.server",
BuildInfoPlugin.buildInfoScopedSettings(Test),
BuildInfoPlugin.buildInfoDefaultSettings
)
/** A sandbox to play with the Scala.js back-end of dotty.
*
* This sandbox is compiled with dotty with support for Scala.js. It can be
* used like any regular Scala.js project. In particular, `fastOptJS` will
* produce a .js file, and `run` will run the JavaScript code with a JS VM.
*
* Simply running `dotty/run -scalajs` without this sandbox is not very
* useful, as that would not provide the linker and JS runners.
*/
lazy val sjsSandbox = project.in(file("sandbox/scalajs")).
enablePlugins(MyScalaJSPlugin).
dependsOn(`dotty-library-bootstrappedJS`).
settings(
// Required to run Scala.js tests.
fork in Test := false,
scalaJSUseMainModuleInitializer := true,
)
/** Scala.js test suite.
*
* This project downloads the sources of the upstream Scala.js test suite,
* and tests them with the dotty Scala.js back-end. Currently, only a very
* small fraction of the upstream test suite is actually compiled and run.
* It will grow in the future, as more stuff is confirmed to be supported.
*/
lazy val sjsJUnitTests = project.in(file("tests/sjs-junit")).
enablePlugins(MyScalaJSPlugin).
dependsOn(`dotty-library-bootstrappedJS`).
settings(
scalacOptions --= Seq("-Xfatal-warnings", "-deprecation"),
// Required to run Scala.js tests.
fork in Test := false,
sourceDirectory in fetchScalaJSSource := target.value / s"scala-js-src-$scalaJSVersion",
fetchScalaJSSource := {
import org.eclipse.jgit.api._
val s = streams.value
val ver = scalaJSVersion
val trgDir = (sourceDirectory in fetchScalaJSSource).value
if (!trgDir.exists) {
s.log.info(s"Fetching Scala.js source version $ver")
IO.createDirectory(trgDir)
new CloneCommand()
.setDirectory(trgDir)
.setURI("https://github.com/scala-js/scala-js.git")
.call()
}
// Checkout proper ref. We do this anyway so we fail if something is wrong
val git = Git.open(trgDir)
s.log.info(s"Checking out Scala.js source version $ver")
git.checkout().setName(s"v$ver").call()
trgDir
},
// We need JUnit in the Compile configuration
libraryDependencies +=
("org.scala-js" %% "scalajs-junit-test-runtime" % scalaJSVersion).withDottyCompat(scalaVersion.value),
sourceGenerators in Compile += Def.task {
import org.scalajs.linker.interface.CheckedBehavior
val stage = scalaJSStage.value
val linkerConfig = stage match {
case FastOptStage => (scalaJSLinkerConfig in (Compile, fastOptJS)).value
case FullOptStage => (scalaJSLinkerConfig in (Compile, fullOptJS)).value
}
val moduleKind = linkerConfig.moduleKind
val sems = linkerConfig.semantics
ConstantHolderGenerator.generate(
(sourceManaged in Compile).value,
"org.scalajs.testsuite.utils.BuildInfo",
"scalaVersion" -> scalaVersion.value,
"hasSourceMaps" -> false, //MyScalaJSPlugin.wantSourceMaps.value,
"isNoModule" -> (moduleKind == ModuleKind.NoModule),
"isESModule" -> (moduleKind == ModuleKind.ESModule),
"isCommonJSModule" -> (moduleKind == ModuleKind.CommonJSModule),
"isFullOpt" -> (stage == FullOptStage),
"compliantAsInstanceOfs" -> (sems.asInstanceOfs == CheckedBehavior.Compliant),
"compliantArrayIndexOutOfBounds" -> (sems.arrayIndexOutOfBounds == CheckedBehavior.Compliant),
"compliantModuleInit" -> (sems.moduleInit == CheckedBehavior.Compliant),
"strictFloats" -> sems.strictFloats,
"productionMode" -> sems.productionMode,
"es2015" -> linkerConfig.esFeatures.useECMAScript2015,
)
}.taskValue,
managedSources in Compile ++= {
val dir = fetchScalaJSSource.value / "test-suite/js/src/main/scala"
val filter = (
("*.scala": FileFilter)
-- "Typechecking*.scala"
-- "NonNativeTypeTestSeparateRun.scala"
)
(dir ** filter).get
},
managedSources in Test ++= {
val dir = fetchScalaJSSource.value / "test-suite"
(
(dir / "shared/src/test/scala/org/scalajs/testsuite/compiler" ** (("*.scala":FileFilter) -- "RegressionTest.scala" -- "ReflectiveCallTest.scala")).get
++ (dir / "shared/src/test/scala/org/scalajs/testsuite/javalib/lang" ** "*.scala").get
++ (dir / "shared/src/test/scala/org/scalajs/testsuite/javalib/io" ** (("*.scala": FileFilter) -- "ReadersTest.scala")).get
++ (dir / "shared/src/test/scala/org/scalajs/testsuite/javalib/math" ** "*.scala").get
++ (dir / "shared/src/test/scala/org/scalajs/testsuite/javalib/net" ** "*.scala").get
++ (dir / "shared/src/test/scala/org/scalajs/testsuite/javalib/security" ** "*.scala").get
++ (dir / "shared/src/test/scala/org/scalajs/testsuite/javalib/util/regex" ** "*.scala").get
++ (dir / "shared/src/test/scala/org/scalajs/testsuite/javalib/util/concurrent" ** "*.scala").get
++ (dir / "shared/src/test/scala/org/scalajs/testsuite/javalib/util" * (("*.scala": FileFilter)
-- "AbstractListTest.scala" -- "AbstractMapTest.scala" -- "AbstractSetTest.scala" -- "ArrayDequeTest.scala" -- "ArrayListTest.scala"
-- "CollectionsOnCheckedCollectionTest.scala" -- "CollectionsOnCheckedListTest.scala" -- "CollectionsOnCheckedMapTest.scala" -- "CollectionsOnCheckedSetTest.scala"
-- "CollectionsOnCollectionsTest.scala" -- "CollectionsOnListsTest.scala" -- "CollectionsOnMapsTest.scala" -- "CollectionsOnSetFromMapTest.scala" -- "CollectionsOnSetsTest.scala"
-- "CollectionsOnSynchronizedCollectionTest.scala" -- "CollectionsOnSynchronizedListTest.scala" -- "CollectionsOnSynchronizedMapTest.scala" -- "CollectionsOnSynchronizedSetTest.scala" -- "CollectionsTest.scala"
-- "DequeTest.scala" -- "EventObjectTest.scala" -- "FormatterTest.scala" -- "HashMapTest.scala" -- "HashSetTest.scala" -- "IdentityHashMapTest.scala"
-- "LinkedHashMapTest.scala" -- "LinkedHashSetTest.scala" -- "LinkedListTest.scala"
-- "PriorityQueueTest.scala" -- "SortedMapTest.scala" -- "SortedSetTest.scala" -- "TreeSetTest.scala")).get
++ (dir / "shared/src/test/scala/org/scalajs/testsuite/utils" ** "*.scala").get
++ (dir / "shared/src/test/scala/org/scalajs/testsuite/junit" ** "*.scala").get
++ (dir / "shared/src/test/scala/org/scalajs/testsuite/niobuffer" ** (("*.scala": FileFilter) -- "ByteBufferTest.scala")).get
++ (dir / "shared/src/test/scala/org/scalajs/testsuite/niocharset" ** (("*.scala": FileFilter) -- "BaseCharsetTest.scala" -- "Latin1Test.scala" -- "USASCIITest.scala" -- "UTF16Test.scala" -- "UTF8Test.scala")).get
++ (dir / "shared/src/test/scala/org/scalajs/testsuite/scalalib" ** (("*.scala": FileFilter) -- "ArrayBuilderTest.scala" -- "ClassTagTest.scala" -- "EnumerationTest.scala" -- "SymbolTest.scala")).get
++ (dir / "shared/src/test/require-sam" ** "*.scala").get
++ (dir / "shared/src/test/require-jdk8/org/scalajs/testsuite/compiler" ** (("*.scala": FileFilter) -- "DefaultMethodsTest.scala")).get
++ (dir / "shared/src/test/require-jdk8/org/scalajs/testsuite/javalib/lang" ** "*.scala").get
++ (dir / "shared/src/test/require-jdk8/org/scalajs/testsuite/javalib/util" ** (("*.scala": FileFilter) -- "CollectionsOnCopyOnWriteArrayListTestOnJDK8.scala")).get
++ (dir / "shared/src/test/require-jdk7/org/scalajs/testsuite/javalib/io" ** "*.scala").get
++ (dir / "shared/src/test/require-jdk7/org/scalajs/testsuite/javalib/lang" ** "*.scala").get
++ (dir / "shared/src/test/require-jdk7/org/scalajs/testsuite/javalib/util" ** (("*.scala": FileFilter) -- "ObjectsTestOnJDK7.scala")).get
)
}
)
lazy val `dotty-bench` = project.in(file("bench")).asDottyBench(NonBootstrapped)
lazy val `dotty-bench-bootstrapped` = project.in(file("bench")).asDottyBench(Bootstrapped)
lazy val `dotty-bench-run` = project.in(file("bench-run")).asDottyBench(Bootstrapped)
lazy val `dotty-tastydoc` = project.in(file("tastydoc")).asDottyTastydoc(Bootstrapped)
lazy val `dotty-tastydoc-input` = project.in(file("tastydoc/input")).asDottyTastydocInput(Bootstrapped)
// Depend on dotty-library so that sbt projects using dotty automatically
// depend on the dotty-library
lazy val `scala-library` = project.
dependsOn(`dotty-library-bootstrapped`).
settings(commonDummySettings).
settings(
// Need a direct dependency on the real scala-library even though we indirectly
// depend on it via dotty-library, because sbt may rewrite dependencies
// (see https://github.com/sbt/sbt/pull/2634), but won't rewrite the direct
// dependencies of scala-library (see https://github.com/sbt/sbt/pull/2897)
libraryDependencies += "org.scala-lang" % "scala-library" % stdlibVersion(Bootstrapped)
)
lazy val `scala-compiler` = project.
settings(commonDummySettings)
lazy val `scala-reflect` = project.
settings(commonDummySettings).
settings(
libraryDependencies := Seq("org.scala-lang" % "scala-reflect" % stdlibVersion(Bootstrapped))
)
lazy val scalap = project.
settings(commonDummySettings).
settings(
libraryDependencies := Seq("org.scala-lang" % "scalap" % stdlibVersion(Bootstrapped))
)
// sbt plugin to use Dotty in your own build, see
// https://github.com/lampepfl/dotty-example-project for usage.
lazy val `sbt-dotty` = project.in(file("sbt-dotty")).
enablePlugins(SbtPlugin).
settings(commonSettings).
settings(
name := sbtDottyName,
version := sbtDottyVersion,
// Keep in sync with inject-sbt-dotty.sbt
libraryDependencies ++= Seq(
Dependencies.`jackson-databind`,
Dependencies.`compiler-interface`
),
unmanagedSourceDirectories in Compile +=
baseDirectory.value / "../language-server/src/dotty/tools/languageserver/config",
sbtTestDirectory := baseDirectory.value / "sbt-test",
scriptedLaunchOpts ++= Seq(
"-Dplugin.version=" + version.value,
"-Dplugin.scalaVersion=" + dottyVersion,
"-Dsbt.boot.directory=" + ((baseDirectory in ThisBuild).value / ".sbt-scripted").getAbsolutePath // Workaround sbt/sbt#3469
),
// Pass along ivy home and repositories settings to sbt instances run from the tests
scriptedLaunchOpts ++= {
val repositoryPath = (io.Path.userHome / ".sbt" / "repositories").absolutePath
s"-Dsbt.repository.config=$repositoryPath" ::
ivyPaths.value.ivyHome.map("-Dsbt.ivy.home=" + _.getAbsolutePath).toList
},
scriptedBufferLog := true,
scripted := scripted.dependsOn(
publishLocal in `dotty-sbt-bridge`,
publishLocal in `dotty-interfaces`,
publishLocal in `dotty-compiler-bootstrapped`,
publishLocal in `dotty-library-bootstrapped`,
publishLocal in `tasty-core-bootstrapped`,
publishLocal in `dotty-staging`,
publishLocal in `dotty-tasty-inspector`,
publishLocal in `scala-library`,
publishLocal in `scala-reflect`,
publishLocal in `dotty-doc-bootstrapped`,
publishLocal in `dotty-bootstrapped` // Needed because sbt currently hardcodes the dotty artifact
).evaluated
)
lazy val `vscode-dotty` = project.in(file("vscode-dotty")).
settings(commonSettings).
settings(
version := "0.1.17-snapshot", // Keep in sync with package.json
autoScalaLibrary := false,
publishArtifact := false,
resourceGenerators in Compile += Def.task {
// Resources that will be copied when bootstrapping a new project
val buildSbtFile = baseDirectory.value / "out" / "build.sbt"
IO.write(buildSbtFile,
s"""scalaVersion := "$publishedDottyVersion"""")
val dottyPluginSbtFile = baseDirectory.value / "out" / "dotty-plugin.sbt"
IO.write(dottyPluginSbtFile,
s"""addSbtPlugin("$dottyOrganization" % "$sbtDottyName" % "$publishedSbtDottyVersion")""")
Seq(buildSbtFile, dottyPluginSbtFile)
},
compile in Compile := Def.task {
val workingDir = baseDirectory.value
val coursier = workingDir / "out" / "coursier"
val packageJson = workingDir / "package.json"
if (!coursier.exists || packageJson.lastModified > coursier.lastModified)
runProcess(Seq("npm", "install"), wait = true, directory = Some(workingDir))
val tsc = workingDir / "node_modules" / ".bin" / "tsc"
runProcess(Seq(tsc.getAbsolutePath, "--pretty", "--project", workingDir.getAbsolutePath), wait = true)
// vscode-dotty depends on scala-lang.scala for syntax highlighting,
// this is not automatically installed when starting the extension in development mode
// (--extensionDevelopmentPath=...)
installCodeExtension(codeCommand.value, "scala-lang.scala")
sbt.internal.inc.Analysis.Empty
}.dependsOn(managedResources in Compile).value,
sbt.Keys.`package`:= {
runProcess(Seq("vsce", "package"), wait = true, directory = Some(baseDirectory.value))
baseDirectory.value / s"dotty-${version.value}.vsix"
},
unpublish := {
runProcess(Seq("vsce", "unpublish"), wait = true, directory = Some(baseDirectory.value))
},
publish := {
runProcess(Seq("vsce", "publish"), wait = true, directory = Some(baseDirectory.value))
},
run := Def.inputTask {
val inputArgs = spaceDelimited("<arg>").parsed
val codeArgs = if (inputArgs.isEmpty) List((baseDirectory.value / "..").getAbsolutePath) else inputArgs
val extensionPath = baseDirectory.value.getAbsolutePath
val processArgs = List(s"--extensionDevelopmentPath=$extensionPath") ++ codeArgs
runProcess(codeCommand.value ++ processArgs, wait = true)
}.dependsOn(compile in Compile).evaluated
)
val prepareCommunityBuild = taskKey[Unit]("Publish local the compiler and the sbt plugin. Also store the versions of the published local artefacts in two files, community-build/{dotty-bootstrapped.version,sbt-dotty-sbt}.")
val updateCommunityBuild = taskKey[Unit]("Updates the community build.")
lazy val `community-build` = project.in(file("community-build")).
dependsOn(dottyLibrary(Bootstrapped)).
settings(commonBootstrappedSettings).
settings(
prepareCommunityBuild := {
(publishLocal in `dotty-sbt-bridge`).value
(publishLocal in `dotty-interfaces`).value
(publishLocal in `scala-library`).value
(publishLocal in `scala-reflect`).value
(publishLocal in `tasty-core-bootstrapped`).value
(publishLocal in `dotty-library-bootstrapped`).value
(publishLocal in `dotty-doc-bootstrapped`).value
(publishLocal in `dotty-compiler-bootstrapped`).value
(publishLocal in `sbt-dotty`).value
(publishLocal in `dotty-bootstrapped`).value
// (publishLocal in `dotty-staging`).value
val pluginText =
s"""updateOptions in Global ~= (_.withLatestSnapshots(false))
|addSbtPlugin("ch.epfl.lamp" % "sbt-dotty" % "$sbtDottyVersion")""".stripMargin
IO.write(baseDirectory.value / "sbt-dotty-sbt", pluginText)
IO.write(baseDirectory.value / "dotty-bootstrapped.version", dottyVersion)
},
updateCommunityBuild := testOnly.in(Test).toTask(
" dotty.communitybuild.CommunityBuildUpdate -- --include-categories=dotty.communitybuild.UpdateCategory").value,
testOptions in Test += Tests.Argument(
TestFrameworks.JUnit,
"--include-categories=dotty.communitybuild.TestCategory",
),
(Test / testOnly) := ((Test / testOnly) dependsOn prepareCommunityBuild).evaluated,
(Test / test ) := ((Test / test ) dependsOn prepareCommunityBuild).value,
javaOptions ++= {
// Propagate the ivy cache directory setting to the tests, which will
// then propagate it further to the sbt instances they will spawn.
val sbtProps = Option(System.getProperty("sbt.ivy.home")) match {
case Some(ivyHome) =>
Seq(s"-Dsbt.ivy.home=$ivyHome")
case _ =>
Seq()
}
sbtProps
}
)
lazy val publishSettings = Seq(
publishMavenStyle := true,
isSnapshot := version.value.contains("SNAPSHOT"),
publishTo := sonatypePublishToBundle.value,
publishConfiguration ~= (_.withOverwrite(true)),
publishLocalConfiguration ~= (_.withOverwrite(true)),
publishArtifact in Test := false,
homepage := Some(url(dottyGithubUrl)),
licenses += (("Apache-2.0",
url("https://www.apache.org/licenses/LICENSE-2.0"))),
scmInfo := Some(
ScmInfo(
url(dottyGithubUrl),
"scm:git:git@github.com:lampepfl/dotty.git"
)
),
developers := List(
Developer(
id = "odersky",
name = "Martin Odersky",
email = "martin.odersky@epfl.ch",
url = url("https://github.com/odersky")
),
Developer(
id = "DarkDimius",
name = "Dmitry Petrashko",
email = "me@d-d.me",
url = url("https://d-d.me")
),
Developer(
id = "smarter",
name = "Guillaume Martres",
email = "smarter@ubuntu.com",
url = url("http://guillaume.martres.me")
),
Developer(
id = "felixmulder",
name = "Felix Mulder",
email = "felix.mulder@gmail.com",
url = url("http://felixmulder.com")
),
Developer(
id = "liufengyun",
name = "Liu Fengyun",
email = "liu@fengy.me",
url = url("https://fengy.me")
),
Developer(
id = "nicolasstucki",
name = "Nicolas Stucki",
email = "nicolas.stucki@gmail.com",
url = url("https://github.com/nicolasstucki")
),
Developer(
id = "OlivierBlanvillain",
name = "Olivier Blanvillain",
email = "olivier.blanvillain@gmail.com",
url = url("https://github.com/OlivierBlanvillain")
),
Developer(
id = "biboudis",
name = "Aggelos Biboudis",
email = "aggelos.biboudis@epfl.ch",
url = url("http://biboudis.github.io")
),
Developer(
id = "allanrenucci",
name = "Allan Renucci",
email = "allan.renucci@gmail.com",
url = url("https://github.com/allanrenucci")
),
Developer(
id = "Duhemm",
name = "Martin Duhem",
email = "martin.duhem@gmail.com",
url = url("https://github.com/Duhemm")
)
)
)
lazy val commonDistSettings = Seq(
packMain := Map(),
publishArtifact := false,
packGenerateMakefile := false,
packExpandedClasspath := true,
packArchiveName := "dotty-" + dottyVersion
)
lazy val dist = project.asDist(NonBootstrapped)
.settings(
packResourceDir += (baseDirectory.value / "bin" -> "bin"),
)
lazy val `dist-bootstrapped` = project.asDist(Bootstrapped)
.settings(
packResourceDir += ((baseDirectory in dist).value / "bin" -> "bin"),
)
implicit class ProjectDefinitions(val project: Project) extends AnyVal {
// FIXME: we do not aggregate `bin` because its tests delete jars, thus breaking other tests
def asDottyRoot(implicit mode: Mode): Project = project.withCommonSettings.
aggregate(`dotty-interfaces`, dottyLibrary, dottyCompiler, tastyCore, dottyDoc, `dotty-sbt-bridge`).
bootstrappedAggregate(`scala-library`, `scala-compiler`, `scala-reflect`, scalap,
`dotty-language-server`, `dotty-staging`, `dotty-tasty-inspector`, `dotty-tastydoc`).
dependsOn(tastyCore).
dependsOn(dottyCompiler).
dependsOn(dottyLibrary).
nonBootstrappedSettings(
addCommandAlias("run", "dotty-compiler/run"),
// Clean everything by default
addCommandAlias("clean", ";dotty/clean;dotty-bootstrapped/clean"),
// `publishLocal` on the non-bootstrapped compiler does not produce a
// working distribution (it can't in general, since there's no guarantee
// that the non-bootstrapped library is compatible with the
// non-bootstrapped compiler), so publish the bootstrapped one by
// default.
addCommandAlias("publishLocal", "dotty-bootstrapped/publishLocal"),
)
def asDottyCompiler(implicit mode: Mode): Project = project.withCommonSettings.
dependsOn(`dotty-interfaces`).
dependsOn(dottyLibrary).
dependsOn(tastyCore).
settings(dottyCompilerSettings)
def asDottyLibrary(implicit mode: Mode): Project = project.withCommonSettings.
settings(
libraryDependencies += "org.scala-lang" % "scala-library" % stdlibVersion
).
settings(dottyLibrarySettings)
def asTastyCore(implicit mode: Mode): Project = project.withCommonSettings.
dependsOn(dottyLibrary).
settings(tastyCoreSettings)
def asTastyCoreScala2: Project = project.settings(commonScala2Settings)
def asDottyDoc(implicit mode: Mode): Project = project.withCommonSettings.
dependsOn(dottyCompiler, dottyCompiler % "test->test").
settings(commonDocSettings).
settings(dottyDocSettings)
def asDottyBench(implicit mode: Mode): Project = project.withCommonSettings.
dependsOn(dottyCompiler).
settings(commonBenchmarkSettings).
enablePlugins(JmhPlugin)
def asDottyTastydoc(implicit mode: Mode): Project = project.withCommonSettings.
aggregate(`dotty-tastydoc-input`).
dependsOn(dottyCompiler).
dependsOn(`dotty-tasty-inspector`).
settings(commonDocSettings)
def asDottyTastydocInput(implicit mode: Mode): Project = project.withCommonSettings.
dependsOn(dottyCompiler)
def asDist(implicit mode: Mode): Project = project.
enablePlugins(PackPlugin).
withCommonSettings.
dependsOn(`dotty-interfaces`, dottyCompiler, dottyLibrary, tastyCore, `dotty-staging`, `dotty-tasty-inspector`, dottyDoc).
settings(commonDistSettings).
bootstrappedSettings(
target := baseDirectory.value / "target" // override setting in commonBootstrappedSettings
)
def withCommonSettings(implicit mode: Mode): Project = project.settings(mode match {
case NonBootstrapped => commonNonBootstrappedSettings
case Bootstrapped => commonBootstrappedSettings
})
}
}
| som-snytt/dotty | project/Build.scala | Scala | apache-2.0 | 61,867 |
package org.scaladebugger.api.profiles.traits.info
import com.sun.jdi.{PrimitiveValue, Value}
import scala.util.{Failure, Success, Try}
/**
* Represents information about a primitive value.
*/
trait PrimitiveInfo extends ValueInfo with CommonInfo {
/**
* Converts the current profile instance to a representation of
* low-level Java instead of a higher-level abstraction.
*
* @return The profile instance providing an implementation corresponding
* to Java
*/
override def toJavaInfo: PrimitiveInfo
/**
* Returns the JDI representation this profile instance wraps.
*
* @return The JDI instance
*/
override def toJdiInstance: Value
/**
* Returns the type information for the value.
*
* @return The profile containing type information
*/
override def `type`: PrimitiveTypeInfo
/**
* Returns the value as a value local to this JVM.
*
* @return Success containing the value as a local instance,
* otherwise a failure
*/
override def tryToLocalValue: Try[AnyVal] = Try(toLocalValue)
/**
* Returns the value as a value local to this JVM.
*
* @return The value as a local instance
*/
override def toLocalValue: AnyVal
/**
* Returns whether or not this primitive is a boolean.
*
* @return True if the primitive is a boolean, otherwise false
*/
def isBoolean: Boolean
/**
* Returns whether or not this primitive is a byte.
*
* @return True if the primitive is a byte, otherwise false
*/
def isByte: Boolean
/**
* Returns whether or not this primitive is a char.
*
* @return True if the primitive is a char, otherwise false
*/
def isChar: Boolean
/**
* Returns whether or not this primitive is a double.
*
* @return True if the primitive is a double, otherwise false
*/
def isDouble: Boolean
/**
* Returns whether or not this primitive is a float.
*
* @return True if the primitive is a float, otherwise false
*/
def isFloat: Boolean
/**
* Returns whether or not this primitive is a integer.
*
* @return True if the primitive is a integer, otherwise false
*/
def isInteger: Boolean
/**
* Returns whether or not this primitive is a long.
*
* @return True if the primitive is a long, otherwise false
*/
def isLong: Boolean
/**
* Returns whether or not this primitive is a short.
*
* @return True if the primitive is a short, otherwise false
*/
def isShort: Boolean
/**
* Returns a string presenting a better human-readable description of
* the JDI instance.
*
* @return The human-readable description
*/
override def toPrettyString: String = this.tryToLocalValue match {
case Success(value) =>
if (this.isChar) s"'$value'"
else value.toString
case Failure(_) =>
"<ERROR>"
}
}
| ensime/scala-debugger | scala-debugger-api/src/main/scala/org/scaladebugger/api/profiles/traits/info/PrimitiveInfo.scala | Scala | apache-2.0 | 2,864 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package model.exchange.sift
import model.exchange.sift.SiftAnswersStatus.SiftAnswersStatus
import play.api.libs.json._
object SiftAnswersStatus extends Enumeration {
type SiftAnswersStatus = Value
val DRAFT, SUBMITTED = Value
implicit val siftAnswersStatusFormat = new Format[SiftAnswersStatus] {
def reads(json: JsValue) = JsSuccess(SiftAnswersStatus.withName(json.as[String]))
def writes(myEnum: SiftAnswersStatus) = JsString(myEnum.toString)
}
}
case class SiftAnswers(
applicationId: String,
status: SiftAnswersStatus,
generalAnswers: Option[GeneralQuestionsAnswers],
schemeAnswers: Map[String, SchemeSpecificAnswer])
object SiftAnswers {
implicit val siftAnswersFormat = Json.format[SiftAnswers]
def apply(a: model.persisted.sift.SiftAnswers): SiftAnswers = {
SiftAnswers(
a.applicationId,
model.exchange.sift.SiftAnswersStatus.withName(a.status.toString),
a.generalAnswers.map(model.exchange.sift.GeneralQuestionsAnswers(_)),
a.schemeAnswers.map{
case (k: String, v: model.persisted.sift.SchemeSpecificAnswer) => (k, model.exchange.sift.SchemeSpecificAnswer(v.rawText))
}
)
}
}
| hmrc/fset-faststream | app/model/exchange/sift/SiftAnswers.scala | Scala | apache-2.0 | 1,772 |
/**
* Copyright (c) 2011, Mikael Svahn, Softhouse Consulting AB
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package se.softhouse.garden.orchid.scala.orientdb
import org.springframework.context.annotation.Profile
import org.springframework.stereotype.Component
import com.orientechnologies.orient.core.db.document.ODatabaseDocumentPool
import com.orientechnologies.orient.core.db.document.ODatabaseDocumentTx
import javax.annotation.PostConstruct
import javax.annotation.PreDestroy
import com.orientechnologies.orient.core.config.OGlobalConfiguration
/**
* @author Mikael Svahn
*
*/
trait DatabasePool {
def acquire: ODatabaseDocumentTx
def release(db: ODatabaseDocumentTx)
def execute[T](f: ODatabaseDocumentTx => T)(implicit db: ODatabaseDocumentTx = null): T
def txn[T](f: ODatabaseDocumentTx => T): T
}
@Component
@Profile(Array("production"))
class LocalDatabasePool extends DatabasePool {
private val pool: ODatabaseDocumentPool = new ODatabaseDocumentPool()
val path: String = "remote:localhost/db"
@PostConstruct
def postConstruct() {
pool.setup()
val db = new ODatabaseDocumentTx(path)
}
@PreDestroy
def preDestroy() {
pool.close()
}
override def acquire: ODatabaseDocumentTx = {
pool.acquire(path, "admin", "admin")
}
override def release(db: ODatabaseDocumentTx) {
db.close
}
override def execute[T](f: ODatabaseDocumentTx => T)(implicit db: ODatabaseDocumentTx = null): T = {
if (db == null) {
val db = acquire
try {
f(db)
} finally {
db.close()
}
} else {
f(db)
}
}
override def txn[T](f: ODatabaseDocumentTx => T): T = {
implicit val db = acquire
try {
db.begin
val r = f(db)
db.commit
r
} finally {
db.rollback
db.close()
}
}
}
| Softhouse/orchid | se.softhouse.garden.orchid.scala.orientdb/src/main/scala/se/softhouse/garden/orchid/scala/orientdb/DatabasePool.scala | Scala | mit | 2,690 |
package scrabble
import util.Random
import scala.util.{ Try, Success, Failure }
//@TODO: Think about how to generalise this to other languages. Perhaps using configuration files...
/** tiles: The current tiles in the bag */
case class LetterBag(letters: List[Tile], size: Int, tileSet: Map[Char, Tile]) {
override def toString = letters.toString
lazy val lettersAsString = letters.map(_.letter).mkString
/**
* Remove @num letters from the letter bag. Returns a list of removed letters (if available)
* and the resulting letter bag
*/
def remove(num: Int): (List[Tile], LetterBag) = {
val split = letters.splitAt(num)
val removedLetters = split._1
val newSize = if (size - num <= 0) 0 else size - num;
val newBag = copy(letters = split._2, size = newSize)
(removedLetters, newBag)
}
/**
* Exchange @exchanged letters for the same number of letters from the bag. Returns None if there is
* not enough letters in the bag to exchange. Otherwise, returns the new bag after shuffling its contents.
*/
def exchange(exchanged: List[Tile]): Try[(List[Tile], LetterBag)] = {
if (exchanged.size > size) Failure(BagNotFullEnoughToExchange()) else {
val (given, bag) = remove(exchanged.size)
val newLetters = util.Random.shuffle(exchanged ::: bag.letters)
Success((given, copy(letters = newLetters)))
}
}
def letterFor(letter: Char): Option[Tile] = tileSet get letter
}
object LetterBag {
private val letters: List[Tile] = {
// (Letter, Value, Distribution)
val blankPoints = List(('_', 0, 2))
val onePoints = List('E', 'A', 'I', 'O', 'N', 'R', 'T', 'L', 'S', 'U') zip List(12, 9, 9, 8, 6, 6, 6, 4, 4, 4) map { case (x, y) => (x, 1, y) }
val twoPoints = List(('D', 2, 4), ('G', 2, 3))
val threePoints = List('B', 'C', 'M', 'P').map(ch => (ch, 3, 2))
val fourPoints = List('F', 'H', 'V', 'W', 'Y').map(ch => (ch, 4, 2))
val fivePoints = List('K').map(ch => (ch, 5, 1))
val eightPoints = List('J', 'X').map(ch => (ch, 8, 1))
val tenPoints = List('Q', 'Z').map(ch => (ch, 10, 1))
val all: List[(Char, Int, Int)] = blankPoints ::: onePoints ::: twoPoints ::: threePoints ::: fourPoints ::: fivePoints ::: eightPoints ::: tenPoints
// Yield a list of all the letters in the bag, using the distribution to yield the right number of letters
all.foldLeft(List.empty[Tile]) {
case (list, (chr: Char, vl: Int, dst: Int)) =>
List.fill(dst)(if (chr == '_') BlankLetter(chr) else Letter(chr, vl)) ::: list
}
}
private val tileSet: Map[Char, Tile] = letters.map { tile => tile.letter -> tile } toMap
/** Returns a new LetterBag in its intial state. List is in randomised order. */
def init: LetterBag = {
// Construct with a randomised list
LetterBag(util.Random.shuffle(letters), 100, tileSet)
}
/**
* Constructs a letter bag from a string of letters in the order they should be taken from the bag.
* Returns None if a character in the string is not part of the tile set
*/
def fromLetters(letters: String, tileSet: Map[Char, Tile]): Option[LetterBag] = {
def buildLetterBag(letters: List[Char], bag: LetterBag): Option[LetterBag] = {
letters match {
case Nil => Some(bag.copy(letters = bag.letters))
case c :: cs =>
val tile = tileSet.get(c)
tile.fold[Option[LetterBag]](None) {
case t =>
buildLetterBag(cs, bag.copy(letters = t :: bag.letters, size = bag.size + 1))
}
}
}
buildLetterBag(letters.toList reverse, LetterBag(Nil, 0, tileSet))
}
//@TODO: Placeholder for other language generalisation
def apply(filePath: String): LetterBag = ???
def main(args: Array[String]) {
val bag = LetterBag.init
println(bag.lettersAsString)
}
} | ornicar/scalascrabble | src/main/scala/LetterBag.scala | Scala | gpl-2.0 | 3,832 |
/*
Copyright 2008-2012 E-Hentai.org
http://forums.e-hentai.org/
ehentai@gmail.com
This file is part of Hentai@Home.
Hentai@Home is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Hentai@Home is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Hentai@Home. If not, see <http://www.gnu.org/licenses/>.
*/
package org.hath.base
//import scala.collection.mutable.MutableList
import java.util.Date
import java.util.TimeZone
import java.text.SimpleDateFormat
//import java.util.List
import java.util.ArrayList
import java.io.File
import java.io.PrintStream
import java.io.OutputStream
import java.io.FileWriter
trait OutListener extends java.util.EventListener {
def outputWritten(entry:String):Unit
}
object Out {
val DEBUG = 1
val INFO = 2
val WARNING = 4
val ERROR = 8
val LOGOUT = DEBUG | INFO | WARNING | ERROR
val LOGERR = WARNING | ERROR
val OUTPUT = INFO | WARNING | ERROR
val VERBOSE = ERROR
private var suppressedOutput = 0
private var overridden = false
private var writeLogs = false
private var def_out: PrintStream = null
private var def_err: PrintStream = null
private var or_out: OutPrintStream = null
private var or_err: OutPrintStream = null
private var logout: FileWriter = null
private var logerr: FileWriter = null
private var logout_count = 0
private var logerr_count = 0
private var sdf: SimpleDateFormat = null
private var outListeners: Set[OutListener] = null
try {
Settings.initializeDataDir()
} catch {
case ioe: java.io.IOException => {
System.err.println("Could not create data directory. Please check file access permissions and free disk space.")
System.exit(-1)
}
}
overrideDefaultOutput()
def overrideDefaultOutput() {
if(overridden) {
return
}
writeLogs = true
overridden = true
outListeners = Set[OutListener]()
suppressedOutput = 0
sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'") // ISO 8601
sdf.setTimeZone(TimeZone.getTimeZone("UTC"))
def_out = System.out
def_err = System.err
or_out = new OutPrintStream(def_out, "out", INFO)
or_err = new OutPrintStream(def_err, "ERR", ERROR)
System.setOut(or_out)
System.setErr(or_err)
logout = startLogger(Settings.getOutputLogPath())
logerr = startLogger(Settings.getErrorLogPath())
}
def addOutListener(listener: OutListener) {
outListeners.synchronized {
outListeners += listener
}
}
def removeOutListener(listener: OutListener) {
outListeners.synchronized {
outListeners -= listener
}
}
def disableLogging() {
if(writeLogs) {
info("Logging ended.")
writeLogs = false
flushLogs()
}
}
def flushLogs() {
try {
logout.flush()
} catch {
case e: Exception =>
}
}
private def startLogger(logfile: String): FileWriter = {
// delete existing old logs
List(0, 1, 2, 3).foreach(
(i:Int) => (new File(logfile + "." + i)).delete()
)
(new File(logfile + ".old")).delete()
(new File(logfile)).renameTo(new File(logfile + ".old"))
var logger: FileWriter = null
if(logfile != null && logfile.length() > 0) {
try {
logger = new FileWriter(logfile, true)
} catch {
case e: java.io.IOException => {
e.printStackTrace()
System.err.println("Failed to open log file " + logfile)
}
}
}
if(logger != null) {
Out.info("Started logging to " + logfile)
log("", logger)
log("*********************************************************", logger)
log(sdf.format(new Date()) + " Logging started", logger)
}
return logger
}
private def stopLogger(logger: FileWriter): Boolean = {
try {
logger.close()
} catch {
case e: Exception => {
e.printStackTrace(def_err)
def_err.println("Unable to close file writer handle: Cannot rotate log.")
return false
}
}
return true
}
def debug(x: String) = or_out.println(x, "debug", DEBUG)
def info(x: String) = or_out.println(x, "info", INFO)
def warning(x: String) = or_out.println(x, "WARN", WARNING)
def error(x: String) = or_out.println(x, "ERROR", ERROR)
private def log(data: String, severity: Int) {
if( ((severity & LOGOUT) > 0) && writeLogs ) {
log(data, logout, false)
logout_count += 1
if(logout_count > 100000) {
logout_count = 0
def_out.println("Rotating output logfile...")
if(stopLogger(logout)) {
logout = startLogger(Settings.getOutputLogPath())
def_out.println("Output logfile rotated.")
}
}
}
if ((severity & LOGERR) > 0) {
log(data, logerr, true)
logerr_count += 1
if(logerr_count > 100000) {
logerr_count = 0
def_out.println("Rotating error logfile...")
if(stopLogger(logerr)) {
logerr = startLogger(Settings.getErrorLogPath())
def_out.println("Error logfile rotated.")
}
}
}
}
private def log(data: String, writer: FileWriter) {
log(data, writer, false)
}
private def log(data: String, writer: FileWriter, flush: Boolean) {
if(writer != null) {
writer.synchronized {
try {
writer.write(data + "\\n")
if(flush) {
writer.flush()
}
} catch {
case ioe: java.io.IOException =>{
// IMPORTANT: writes to the default System.err to prevent loops
ioe.printStackTrace(def_err)
}
}
}
}
}
def verbose(severity: Int): String = {
if ((severity & VERBOSE) == 0) "" else {
//java.lang.StackTraceElement[]
val ste = java.lang.Thread.currentThread().getStackTrace()
val last = ste.find((el:StackTraceElement) => el.getClassName match{
case "org.hath.base.Out" => false
case "org.hath.base.Out$OutPrintStream" => false
case "java.lang.Thread" => false
case _ => true
})
last match {
case Some(el) =>
if (el.getClassName() == "java.lang.Throwable") "" else "{" + el + "} "
case None => "{Unknown Source}"
}
}
}
private class OutPrintStream(ps:PrintStream, name:String, severity:Int) extends PrintStream(ps) {
override def println(x:String) = println(x, name, severity)
override def println(x:Boolean) = println(String.valueOf(x))
override def println(x: Array[Char]) = println(String.valueOf(x))
override def println(x: Double) = println(String.valueOf(x))
override def println(x: Float) = println(String.valueOf(x))
override def println(x: Char) = println(String.valueOf(x))
override def println(x: Int) = println(String.valueOf(x))
override def println(x: Long) = println(String.valueOf(x))
override def println(x: Any) = println(String.valueOf(x))
def println(x:String, name:String):Unit = println(x, name, severity)
def println(x:String, name:String, severity:Int):Unit = {
if(x == null) {
return
}
val output = (severity & Out.OUTPUT & ~Out.suppressedOutput) > 0
val log = (severity & (Out.LOGOUT | Out.LOGERR)) > 0
if(output || log) {
outListeners.synchronized {
val v = Out.verbose(severity)
x.split("\\n").foreach((s: String) => {
val data = sdf.format(new Date()) + " [" + name + "] " + v + s
if(output) {
ps.println(data)
outListeners.foreach(_.outputWritten(data))
}
if(log) {
Out.log(data, severity)
}
})
}
}
}
}
}
| lain-dono/hath-scala | src/main/scala/Out.scala | Scala | gpl-3.0 | 8,130 |
package webserviceclients.paymentsolve
import play.api.libs.json.Json
case class PaymentSolveGetRequest(transNo: String,
trxRef: String,
isPrimaryUrl: Boolean)
object PaymentSolveGetRequest {
implicit val JsonFormat = Json.format[PaymentSolveGetRequest]
} | dvla/vrm-retention-online | app/webserviceclients/paymentsolve/PaymentSolveGetRequest.scala | Scala | mit | 329 |
package scala.in.programming.assertion
/**
* @author loustler
* @since 04/14/2017 20:34
*/
object UserAssertion {
def testNonNull(s: String): Unit = {
assert(null != s, "s is null")
}
def main(args: Array[String]): Unit = {
// testNonNull(null) // It throw AssertionError.
}
}
| loustler/scala | src/main/scala/scala/in/programming/assertion/UserAssertion.scala | Scala | mit | 301 |
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtOptionalInteger, Input}
case class CP8(value: Option[Int]) extends CtBoxIdentifier(name = "Cost Of Sales") with CtOptionalInteger with Input
object CP8 {
def apply(int: Int): CP8 = CP8(Some(int))
}
| scottcutts/ct-calculations | src/main/scala/uk/gov/hmrc/ct/computations/CP8.scala | Scala | apache-2.0 | 891 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.nn
import com.intel.analytics.bigdl.dllib.nn.abstractnn.TensorCriterion
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import scala.reflect.ClassTag
/**
* measures the mean absolute value of the element-wise difference between input and target
*/
@SerialVersionUID( - 7860793007567513758L)
class AbsCriterion[@specialized(Float, Double) T: ClassTag](val sizeAverage: Boolean = true)
(implicit ev: TensorNumeric[T]) extends TensorCriterion[T] {
@transient
private var buffer: Tensor[T] = null
override def updateOutput(input: Tensor[T], target : Tensor[T]): T = {
if (null == buffer) buffer = Tensor[T]()
buffer.resizeAs(input).add(input)
buffer.mul(input, ev.fromType[Int](-1)).add(target).abs()
output = buffer.sum()
if (sizeAverage) output = ev.divide(output, ev.fromType[Int](input.nElement()))
output
}
override def updateGradInput(input: Tensor[T], target: Tensor[T]): Tensor[T] = {
gradInput.resizeAs(input).zero()
var norm : Double = 0
if (sizeAverage) {
norm = 1.0/input.nElement()
} else {
norm = 1.0
}
gradInput.mul(input, ev.fromType[Int](-1)).add(target)
require(gradInput.isContiguous(), "AbsCriterion: gradInput should be contiguous")
val bufferArray = gradInput.storage().array()
val bufferOffset = gradInput.storageOffset() - 1
var i = 0
while(i < gradInput.nElement()) {
val z = bufferArray(i)
bufferArray(i + bufferOffset) = ev.times(ev.fromType(norm),
if (ev.isGreater(z, ev.fromType(0))) ev.fromType(-1) else ev.fromType(1))
i += 1
}
gradInput
}
override def canEqual(other: Any): Boolean = other.isInstanceOf[AbsCriterion[T]]
override def equals(other: Any): Boolean = other match {
case that: AbsCriterion[T] =>
super.equals(that) &&
(that canEqual this) &&
sizeAverage == that.sizeAverage
case _ => false
}
override def hashCode(): Int = {
def getHashCode(a: Any): Int = if (a == null) 0 else a.hashCode()
val state = Seq(super.hashCode(), sizeAverage)
state.map(getHashCode).foldLeft(0)((a, b) => 31 * a + b)
}
}
object AbsCriterion {
def apply[@specialized(Float, Double) T: ClassTag](
sizeAverage: Boolean = true)(implicit ev: TensorNumeric[T]) : AbsCriterion[T] = {
new AbsCriterion[T](sizeAverage)
}
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/nn/AbsCriterion.scala | Scala | apache-2.0 | 3,060 |
package infrastracture
import anorm.SqlParser._
import anorm._
import models.User
import play.api.Play.current
import play.api.db._
import scala.util.Try
class UserDao {
private val simple: RowParser[User] = {
(long("id") ~ str("name") ~ str("mail")).map {
case id ~ name ~ mail =>
User(id = Option(id), name = name, mail = mail)
}
}
def findAll(): Try[Seq[User]] = Try {
DB.withConnection() { implicit conn =>
SQL("SELECT * FROM user").as(simple *)
}
}
def findById(id: Long): Try[Option[User]] = Try {
DB.withConnection() { implicit conn =>
SQL("SELECT * FROM user WHERE id = {id}").on('id -> id).as(simple.singleOpt)
}
}
def insert(user: User): Try[Int] = Try {
DB.withConnection() { implicit conn =>
SQL("INSERT INTO user(name, mail) values({name}, {mail})")
.on('name -> user.name, 'mail -> user.mail)
.executeUpdate()
}
}
def update(user: User): Try[Int] = Try {
DB.withConnection() { implicit conn =>
SQL("UPDATE user SET name = {name}, mail = {mail} WHERE id = {id}")
.on('id -> user.id.get, 'name -> user.name, 'mail -> user.mail)
.executeUpdate()
}
}
def delete(id: Long): Try[Int] = Try {
DB.withConnection() { implicit conn =>
SQL("DELETE FROM user WHERE id = {id}").on('id -> id).executeUpdate()
}
}
}
| tarugo07/play-anorm-sample | app/infrastracture/UserDao.scala | Scala | mit | 1,373 |
package interretis.intro.api
import interretis.utils.SeparateSparkContext
import org.scalatest.Matchers
import org.apache.spark.rdd.RDD
import scala.math.pow
import language.postfixOps
class TransformationsSuite extends SeparateSparkContext with Letters with Matchers {
"map" should "transform whole input with function" in { f =>
// given
val numbers = f.sc parallelize (1 to 5)
// when
val squares = numbers map (pow(_, 2))
// then
squares.collect shouldBe Array(1, 4, 9, 16, 25)
}
"filter" should "leave only elements that fulfill condition" in { f =>
// given
val numbers = f.sc parallelize (1 to 5)
// when
val even = numbers filter (_ % 2 == 0)
// then
even.collect shouldBe Array(2, 4)
}
"flatMap" should "transform whole input with function and flatten results" in { f =>
// given
val numbers = f.sc parallelize (1 to 5)
// when
val sequences = numbers flatMap (1 to _)
// then
sequences.collect shouldBe Array(1, 1, 2, 1, 2, 3, 1, 2, 3, 4, 1, 2, 3, 4, 5)
}
"sample" should "should return fraction of data" in { f =>
// given
val numbers = f.sc parallelize (1 to 5)
// when
val single = numbers sample (false, 0.2, 0)
// then
single.collect should contain oneOf (1, 2, 3, 4, 5)
}
"union" should "sum the data from two sets" in { f =>
// given
val numbers1 = f.sc parallelize (1 to 3)
val numbers2 = f.sc parallelize (4 to 5)
// when
val numbers = numbers1 union numbers2
// then
numbers.collect shouldBe Array(1, 2, 3, 4, 5)
}
"distinct" should "return distinct elemements from dataset" in { f =>
// given
val numbers = f.sc parallelize Array(1, 1, 2, 3, 3, 4, 5, 5)
// when
val distinctNumbers = numbers distinct
// then
val collected = distinctNumbers.collect.toList
collected should contain allOf (1, 2, 3, 4, 5)
}
"groupByKey" should "return values grouped by first elements of pairs" in { f =>
// given
val pairs = f.sc parallelize Array((1, A), (2, A), (1, B), (3, B))
// when
val grouped = pairs groupByKey
// then
grouped.collect should contain allOf ((1, List(A, B)), (2, List(A)), (3, List(B)))
}
"reduceByKey" should "return dataset with values aggregated by function" in { f =>
// given
val pairs = f.sc parallelize Array((1, A), (2, A), (1, B), (3, B))
// when
val aggregated = pairs reduceByKey (_ + _)
// then
aggregated.collect should contain allOf ((1, "AB"), (2, A), (3, B))
}
"sortByKey" should "return datased sorted by key" in { f =>
// given
val pairs = f.sc parallelize Array((4, A), (2, A), (1, B), (3, B))
// when
val sorted = pairs sortByKey ()
// then
sorted.collect shouldBe Array((1, B), (2, A), (3, B), (4, A))
}
"join" should "join datasets by key" in { f =>
// given
val setA = f.sc parallelize Array((1, A), (2, B), (3, C), (1, D))
val setB = f.sc parallelize Array((1, List('d')), (3, Nil), (4, List('z')), (1, List('t', 'z')))
// when
val joined = setA join setB collect
// then
joined should have size 5
joined should contain((1, (A, List('d'))))
joined should contain((1, (A, List('t', 'z'))))
joined should contain((1, (D, List('d'))))
joined should contain((1, (D, List('t', 'z'))))
joined should contain((3, (C, Nil)))
}
"cogroup" should "group datasets" in { f =>
// given
val setA = f.sc parallelize Array((1, A), (2, B), (3, C), (1, D))
val setB = f.sc parallelize Array((1, List('d')), (3, Nil), (4, List('z')), (1, List('t', 'z')))
// when
val cogrouped = setA cogroup setB collect
// then
cogrouped should have size 4
cogrouped should contain(1, (List(A, D), List(List('d'), List('t', 'z'))))
cogrouped should contain(2, (List(B), List()))
cogrouped should contain(3, (List(C), List(Nil)))
cogrouped should contain(4, (List(), List(List('z'))))
}
"cartesian" should "give product of two datasets" in { f =>
// given
val setA = f.sc parallelize Array(1, 2, 3)
val setB = f.sc parallelize Array(A, B)
// when
val product = setA cartesian setB
// then
product.collect should contain allOf ((1, A), (1, B), (2, A), (2, B), (3, A), (3, B))
}
}
| MarekDudek/spark-certification | src/test/scala/interretis/intro/api/TransformationsSuite.scala | Scala | mit | 4,339 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.optimize.program
import org.apache.flink.table.calcite.FlinkContext
/**
* A FlinkOptimizeContext allows to obtain table environment information when optimizing.
*/
trait FlinkOptimizeContext extends FlinkContext {
}
| shaoxuan-wang/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/optimize/program/FlinkOptimizeContext.scala | Scala | apache-2.0 | 1,066 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.support.matching
/**
* Strategy based on the Case of the First Character being Uppercase
*/
trait FirstCharacterUpperCaseMatchStrategy extends UpperCaseStrategy with StringMatch with FirstCharacter
| adarro/ddo-calc | subprojects/common/ddo-util/src/main/scala/io/truthencode/ddo/support/matching/FirstCharacterUpperCaseMatchStrategy.scala | Scala | apache-2.0 | 872 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.index.utils.bin
import com.typesafe.scalalogging.LazyLogging
/**
* Sorts aggregated bin arrays
*/
object BinSorter extends LazyLogging {
/**
* If the length of an array to be sorted is less than this
* constant, insertion sort is used in preference to Quicksort.
*
* This length is 'logical' length, so the array is really binSize * length
*/
private val INSERTION_SORT_THRESHOLD = 3
private val swapBuffers = new ThreadLocal[Array[Byte]]() {
override def initialValue(): Array[Byte] = Array.ofDim[Byte](24) // the larger bin size
}
private val priorityOrdering = new Ordering[(Array[Byte], Int)]() {
override def compare(x: (Array[Byte], Int), y: (Array[Byte], Int)): Int =
BinSorter.compare(y._1, y._2, x._1, x._2) // reverse for priority queue
}
/**
* Compares two bin chunks by date
*/
def compare(left: Array[Byte], leftOffset: Int, right: Array[Byte], rightOffset: Int): Int =
compareIntLittleEndian(left, leftOffset + 4, right, rightOffset + 4) // offset + 4 is dtg
/**
* Comparison based on the integer encoding used by ByteBuffer
* original code is in private/protected java.nio packages
*/
private def compareIntLittleEndian(left: Array[Byte],
leftOffset: Int,
right: Array[Byte],
rightOffset: Int): Int = {
val l3 = left(leftOffset + 3)
val r3 = right(rightOffset + 3)
if (l3 < r3) {
return -1
} else if (l3 > r3) {
return 1
}
val l2 = left(leftOffset + 2) & 0xff
val r2 = right(rightOffset + 2) & 0xff
if (l2 < r2) {
return -1
} else if (l2 > r2) {
return 1
}
val l1 = left(leftOffset + 1) & 0xff
val r1 = right(rightOffset + 1) & 0xff
if (l1 < r1) {
return -1
} else if (l1 > r1) {
return 1
}
val l0 = left(leftOffset) & 0xff
val r0 = right(rightOffset) & 0xff
if (l0 == r0) {
0
} else if (l0 < r0) {
-1
} else {
1
}
}
/**
* Takes a sequence of (already sorted) aggregates and combines them in a final sort. Uses
* a priority queue to compare the head element across each aggregate.
*/
def mergeSort(aggregates: Iterator[Array[Byte]], binSize: Int): Iterator[(Array[Byte], Int)] = {
if (aggregates.isEmpty) {
return Iterator.empty
}
val queue = new scala.collection.mutable.PriorityQueue[(Array[Byte], Int)]()(priorityOrdering)
val sizes = scala.collection.mutable.ArrayBuffer.empty[Int]
while (aggregates.hasNext) {
val next = aggregates.next()
sizes.append(next.length / binSize)
queue.enqueue((next, 0))
}
logger.debug(s"Got back ${queue.length} aggregates with an average size of ${sizes.sum / sizes.length}" +
s" chunks and a median size of ${sizes.sorted.apply(sizes.length / 2)} chunks")
new Iterator[(Array[Byte], Int)] {
override def hasNext: Boolean = queue.nonEmpty
override def next(): (Array[Byte], Int) = {
val (aggregate, offset) = queue.dequeue()
if (offset < aggregate.length - binSize) {
queue.enqueue((aggregate, offset + binSize))
}
(aggregate, offset)
}
}
}
/**
* Performs a merge sort into a new byte array
*/
def mergeSort(left: Array[Byte], right: Array[Byte], binSize: Int): Array[Byte] = {
if (left.length == 0) {
return right
} else if (right.length == 0) {
return left
}
val result = Array.ofDim[Byte](left.length + right.length)
var (leftIndex, rightIndex, resultIndex) = (0, 0, 0)
while (leftIndex < left.length && rightIndex < right.length) {
if (compare(left, leftIndex, right, rightIndex) > 0) {
System.arraycopy(right, rightIndex, result, resultIndex, binSize)
rightIndex += binSize
} else {
System.arraycopy(left, leftIndex, result, resultIndex, binSize)
leftIndex += binSize
}
resultIndex += binSize
}
while (leftIndex < left.length) {
System.arraycopy(left, leftIndex, result, resultIndex, binSize)
leftIndex += binSize
resultIndex += binSize
}
while (rightIndex < right.length) {
System.arraycopy(right, rightIndex, result, resultIndex, binSize)
rightIndex += binSize
resultIndex += binSize
}
result
}
/**
* Sorts the specified range of the array by Dual-Pivot Quicksort.
* Modified version of java's DualPivotQuicksort
*
* @param bytes the array to be sorted
* @param left the index of the first element, inclusive, to be sorted
* @param right the index of the last element, inclusive, to be sorted
*/
def quickSort(bytes: Array[Byte], left: Int, right: Int, binSize: Int): Unit =
quickSort(bytes, left, right, binSize, leftmost = true)
/**
* Optimized for non-leftmost insertion sort
*/
def quickSort(bytes: Array[Byte], left: Int, right: Int, binSize: Int, leftmost: Boolean): Unit = {
val length = (right + binSize - left) / binSize
if (length < INSERTION_SORT_THRESHOLD) {
// Use insertion sort on tiny arrays
if (leftmost) {
// Traditional (without sentinel) insertion sort is used in case of the leftmost part
var i = left + binSize
while (i <= right) {
var j = i
val ai = getThreadLocalChunk(bytes, i, binSize)
while (j > left && compare(bytes, j - binSize, ai, 0) > 0) {
System.arraycopy(bytes, j - binSize, bytes, j, binSize)
j -= binSize
}
if (j != i) {
// we don't need to copy if nothing moved
System.arraycopy(ai, 0, bytes, j, binSize)
}
i += binSize
}
} else {
// optimized insertions sort when we know we have 'sentinel' elements to the left
/*
* Every element from adjoining part plays the role
* of sentinel, therefore this allows us to avoid the
* left range check on each iteration. Moreover, we use
* the more optimized algorithm, so called pair insertion
* sort, which is faster (in the context of Quicksort)
* than traditional implementation of insertion sort.
*/
// Skip the longest ascending sequence
var i = left
do {
if (i >= right) {
return
}
} while ({ i += binSize; compare(bytes, i , bytes, i - binSize) >= 0 })
val a1 = Array.ofDim[Byte](binSize)
val a2 = Array.ofDim[Byte](binSize)
var k = i
while ({ i += binSize; i } <= right) {
if (compare(bytes, k, bytes, i) < 0) {
System.arraycopy(bytes, k, a2, 0, binSize)
System.arraycopy(bytes, i, a1, 0, binSize)
} else {
System.arraycopy(bytes, k, a1, 0, binSize)
System.arraycopy(bytes, i, a2, 0, binSize)
}
while ({ k -= binSize; compare(a1, 0, bytes, k) < 0 }) {
System.arraycopy(bytes, k, bytes, k + 2 * binSize, binSize)
}
k += binSize
System.arraycopy(a1, 0, bytes, k + binSize, binSize)
while ({ k -= binSize; compare(a2, 0, bytes, k) < 0 }) {
System.arraycopy(bytes, k, bytes, k + binSize, binSize)
}
System.arraycopy(a2, 0, bytes, k + binSize, binSize)
i += binSize
k = i
}
var j = right
val last = getThreadLocalChunk(bytes, j, binSize)
while ({ j -= binSize; compare(last, 0, bytes, j) < 0 }) {
System.arraycopy(bytes, j, bytes, j + binSize, binSize)
}
System.arraycopy(last, 0, bytes, j + binSize, binSize)
}
return
}
/*
* Sort five evenly spaced elements around (and including) the
* center element in the range. These elements will be used for
* pivot selection as described below. The choice for spacing
* these elements was empirically determined to work well on
* a wide variety of inputs.
*/
val seventh = (length / 7) * binSize
val e3 = (((left + right) / binSize) / 2) * binSize // The midpoint
val e2 = e3 - seventh
val e1 = e2 - seventh
val e4 = e3 + seventh
val e5 = e4 + seventh
def swap(left: Int, right: Int) = {
val chunk = getThreadLocalChunk(bytes, left, binSize)
System.arraycopy(bytes, right, bytes, left, binSize)
System.arraycopy(chunk, 0, bytes, right, binSize)
}
// Sort these elements using insertion sort
if (compare(bytes, e2, bytes, e1) < 0) { swap(e2, e1) }
if (compare(bytes, e3, bytes, e2) < 0) { swap(e3, e2)
if (compare(bytes, e2, bytes, e1) < 0) { swap(e2, e1) }
}
if (compare(bytes, e4, bytes, e3) < 0) { swap(e4, e3)
if (compare(bytes, e3, bytes, e2) < 0) { swap(e3, e2)
if (compare(bytes, e2, bytes, e1) < 0) {swap(e2, e1) }
}
}
if (compare(bytes, e5, bytes, e4) < 0) { swap(e5, e4)
if (compare(bytes, e4, bytes, e3) < 0) { swap(e4, e3)
if (compare(bytes, e3, bytes, e2) < 0) { swap(e3, e2)
if (compare(bytes, e2, bytes, e1) < 0) { swap(e2, e1) }
}
}
}
// Pointers
var less = left // The index of the first element of center part
var great = right // The index before the first element of right part
if (compare(bytes, e1, bytes, e2) != 0 && compare(bytes, e2, bytes, e3) != 0 &&
compare(bytes, e3, bytes, e4) != 0 && compare(bytes, e4, bytes, e5) != 0 ) {
/*
* Use the second and fourth of the five sorted elements as pivots.
* These values are inexpensive approximations of the first and
* second terciles of the array. Note that pivot1 <= pivot2.
*/
val pivot1 = Array.ofDim[Byte](binSize)
System.arraycopy(bytes, e2, pivot1, 0, binSize)
val pivot2 = Array.ofDim[Byte](binSize)
System.arraycopy(bytes, e4, pivot2, 0, binSize)
/*
* The first and the last elements to be sorted are moved to the
* locations formerly occupied by the pivots. When partitioning
* is complete, the pivots are swapped back into their final
* positions, and excluded from subsequent sorting.
*/
System.arraycopy(bytes, left, bytes, e2, binSize)
System.arraycopy(bytes, right, bytes, e4, binSize)
// Skip elements, which are less or greater than pivot values.
while ({ less += binSize; compare(bytes, less, pivot1, 0) < 0 }) {}
while ({ great -= binSize; compare(bytes, great, pivot2, 0) > 0 }) {}
/*
* Partitioning:
*
* left part center part right part
* +--------------------------------------------------------------+
* | < pivot1 | pivot1 <= && <= pivot2 | ? | > pivot2 |
* +--------------------------------------------------------------+
* ^ ^ ^
* | | |
* less k great
*
* Invariants:
*
* all in (left, less) < pivot1
* pivot1 <= all in [less, k) <= pivot2
* all in (great, right) > pivot2
*
* Pointer k is the first index of ?-part.
*/
var k = less - binSize
var loop = true
while (loop && { k += binSize; k } <= great) {
val ak = getThreadLocalChunk(bytes, k, binSize)
if (compare(ak, 0, pivot1, 0) < 0) { // Move a[k] to left part
System.arraycopy(bytes, less, bytes, k, binSize)
System.arraycopy(ak, 0, bytes, less, binSize)
less += binSize
} else if (compare(ak, 0, pivot2, 0) > 0) { // Move a[k] to right part
while (loop && compare(bytes, great, pivot2, 0) > 0) {
if (great == k) {
loop = false
}
great -= binSize
}
if (loop) {
if (compare(bytes, great, pivot1, 0) < 0) { // a[great] <= pivot2
System.arraycopy(bytes, less, bytes, k, binSize)
System.arraycopy(bytes, great, bytes, less, binSize)
less += binSize
} else { // pivot1 <= a[great] <= pivot2
System.arraycopy(bytes, great, bytes, k, binSize)
}
System.arraycopy(ak, 0, bytes, great, binSize)
great -= binSize
}
}
}
// Swap pivots into their final positions
System.arraycopy(bytes, less - binSize, bytes, left, binSize)
System.arraycopy(pivot1, 0, bytes, less - binSize, binSize)
System.arraycopy(bytes, great + binSize, bytes, right, binSize)
System.arraycopy(pivot2, 0, bytes, great + binSize, binSize)
// Sort left and right parts recursively, excluding known pivots
quickSort(bytes, left, less - 2 * binSize, binSize, leftmost)
quickSort(bytes, great + 2 * binSize, right, binSize, leftmost = false)
/*
* If center part is too large (comprises > 4/7 of the array),
* swap internal pivot values to ends.
*/
if (less < e1 && e5 < great) {
// Skip elements, which are equal to pivot values.
while (compare(bytes, less, pivot1, 0) == 0) { less += binSize }
while (compare(bytes, great, pivot2, 0) == 0) { great -= binSize }
/*
* Partitioning:
*
* left part center part right part
* +----------------------------------------------------------+
* | == pivot1 | pivot1 < && < pivot2 | ? | == pivot2 |
* +----------------------------------------------------------+
* ^ ^ ^
* | | |
* less k great
*
* Invariants:
*
* all in (*, less) == pivot1
* pivot1 < all in [less, k) < pivot2
* all in (great, *) == pivot2
*
* Pointer k is the first index of ?-part.
*/
var k = less - binSize
loop = true
while (loop && { k += binSize; k } <= great) {
val ak = getThreadLocalChunk(bytes, k, binSize)
if (compare(ak, 0, pivot1, 0) == 0) { // Move a[k] to left part
System.arraycopy(bytes, less, bytes, k, binSize)
System.arraycopy(ak, 0, bytes, less, binSize)
less += binSize
} else if (compare(ak, 0, pivot2, 0) == 0) { // Move a[k] to right part
while (loop && compare(bytes, great, pivot2, 0) == 0) {
if (great == k) {
loop = false
}
great -= binSize
}
if (loop) {
if (compare(bytes, great, pivot1, 0) == 0) { // a[great] < pivot2
System.arraycopy(bytes, less, bytes, k, binSize)
System.arraycopy(bytes, great, bytes, less, binSize)
less += binSize
} else { // pivot1 < a[great] < pivot2
System.arraycopy(bytes, great, bytes, k, binSize)
}
System.arraycopy(ak, 0, bytes, great, binSize)
great -= binSize
}
}
}
}
// Sort center part recursively
quickSort(bytes, less, great, binSize, leftmost = false)
} else { // Partitioning with one pivot
/*
* Use the third of the five sorted elements as pivot.
* This value is inexpensive approximation of the median.
*/
val pivot = Array.ofDim[Byte](binSize)
System.arraycopy(bytes, e3, pivot, 0, binSize)
/*
* Partitioning degenerates to the traditional 3-way
* (or "Dutch National Flag") schema:
*
* left part center part right part
* +-------------------------------------------------+
* | < pivot | == pivot | ? | > pivot |
* +-------------------------------------------------+
* ^ ^ ^
* | | |
* less k great
*
* Invariants:
*
* all in (left, less) < pivot
* all in [less, k) == pivot
* all in (great, right) > pivot
*
* Pointer k is the first index of ?-part.
*/
var k = less
var loop = true
while (loop && k <= great) {
val comp = compare(bytes, k, pivot, 0)
if (comp != 0) {
val ak = getThreadLocalChunk(bytes, k, binSize)
if (comp < 0) { // Move a[k] to left part
System.arraycopy(bytes, less, bytes, k, binSize)
System.arraycopy(ak, 0, bytes, less, binSize)
less += binSize
} else { // a[k] > pivot - Move a[k] to right part
while (loop && compare(bytes, great, pivot, 0) > 0) {
if (k == great) {
loop = false
}
great -= binSize
}
if (loop) {
if (compare(bytes, great, pivot, 0) < 0) { // a[great] <= pivot
System.arraycopy(bytes, less, bytes, k, binSize)
System.arraycopy(bytes, great, bytes, less, binSize)
less += binSize
} else { // a[great] == pivot
System.arraycopy(bytes, great, bytes, k, binSize)
}
System.arraycopy(ak, 0, bytes, great, binSize)
great -= binSize
}
}
}
k += binSize
}
/*
* Sort left and right parts recursively.
* All elements from center part are equal
* and, therefore, already sorted.
*/
quickSort(bytes, left, less - binSize, binSize, leftmost)
quickSort(bytes, great + binSize, right, binSize, leftmost = false)
}
}
// take care - uses thread-local state
private def getThreadLocalChunk(bytes: Array[Byte], offset: Int, binSize: Int): Array[Byte] = {
val chunk = swapBuffers.get()
System.arraycopy(bytes, offset, chunk, 0, binSize)
chunk
}
} | elahrvivaz/geomesa | geomesa-index-api/src/main/scala/org/locationtech/geomesa/index/utils/bin/BinSorter.scala | Scala | apache-2.0 | 18,830 |
package com.aesthetikx.android.canopy.view
import android.graphics.Color
object DefaultColorProvider extends ColorProvider {
val colors: List[Int] = List(
Color.rgb(201, 94, 94),
Color.rgb(201, 139, 94),
Color.rgb(200, 184, 91),
Color.rgb(143, 206, 80),
Color.rgb(87, 199, 167),
Color.rgb(91, 140, 200),
Color.rgb(143, 87, 199)
)
override def getColor(depth: Int): Int = colors(depth % colors.size)
}
| Aesthetikx/canopy | library/src/main/scala/com/aesthetikx/android/canopy/view/DefaultColorProvider.scala | Scala | gpl-2.0 | 456 |
enum List[T] {
case Cons(x: T, xs: List[T])
case Nil()
}
object Test {
import List.*
val xs = Cons(1, Cons(2, Cons(3, Nil())))
def main(args: Array[String]) = println(xs)
}
| dotty-staging/dotty | tests/run/enum-List1.scala | Scala | apache-2.0 | 183 |
package com.nickelsoftware.bettercare4me.utils
import java.io.File
import java.io.FileReader
import scala.collection.JavaConversions.mapAsScalaMap
import scala.util.Random
import org.joda.time.DateTime
import org.joda.time.Days
import org.joda.time.Interval
import org.yaml.snakeyaml.constructor.SafeConstructor
import com.github.tototoshi.csv.CSVReader
import com.github.tototoshi.csv.CSVWriter
import com.nickelsoftware.bettercare4me.hedis.Scorecard
import com.nickelsoftware.bettercare4me.models.Claim
import play.api.Logger
import scalax.file.Path
/**
* Object to read properties from environment
*/
object Properties {
val dataDir: Path = Path.fromString(scala.util.Properties.envOrElse("BC4ME_DATA_DIR", "./data"))
val cassandraConfig: Path = dataDir / scala.util.Properties.envOrElse("BC4ME_CASSANDRA_CONFIG", "cassandra.yaml")
val sparkConfig: Path = dataDir / scala.util.Properties.envOrElse("BC4ME_SPARK_CONFIG", "spark.yaml")
}
object Utils {
def loadYamlConfig(fname: String): Map[String, Object] = {
try {
val yaml = new org.yaml.snakeyaml.Yaml(new SafeConstructor());
yaml.load(new FileReader(fname)).asInstanceOf[java.util.Map[String, Object]].toMap
} catch {
case ex: Exception =>
Logger.error("Utils.loadYamlConfig: Exception caught while loading cassandra config: " + ex.getMessage())
Map()
}
}
/**
* @param from date of the start of the interval
* @param to date of the end date of the interval
* @returns the number of days between from and to dates
*/
def daysBetween(from: DateTime, to: DateTime): Int = Days.daysBetween(from, to).getDays()
/**
* @returns an interval in months leading to date
*/
def getIntervalFromMonths(months: Int, date: DateTime): Interval = {
val temp = date.plusDays(1)
new Interval(temp.minusMonths(months), temp)
}
/**
* @returns an interval in years leading to date
*/
def getIntervalFromYears(years: Int, date: DateTime): Interval = {
val temp = date.plusDays(1)
new Interval(temp.minusYears(years), temp)
}
/**
* @returns an interval in days leading to date
*/
def getIntervalFromDays(days: Int, date: DateTime): Interval = {
val temp = date.plusDays(1)
new Interval(temp.minusDays(days), temp)
}
def add2Map[C](s: String, c: C, map: Map[String, List[C]]): Map[String, List[C]] = {
val l = map.getOrElse(s, List())
map + (s -> (c :: l))
}
def add2Map[C](s: String, l: List[C], map: Map[String, List[C]]): Map[String, List[C]] = {
if (l.isEmpty) map
else {
val l2 = map.getOrElse(s, List.empty)
if (l2.isEmpty) map + (s -> l)
else map + (s -> List.concat(l, l2))
}
}
/**
* Utility method to pick randomly one item from the list
*/
def pickOne[A](items: List[A]): A = items(Random.nextInt(items.size))
/**
* Utility method to filter all claims in code2Claims with codes (keys of code2Claims) that are in filterCodes
*
* @param code2Claims is the mapping of clinical codes to matching claims (from PatientHistory)
* @param filterCodes is the set of clinical codes that we retain from code2Claims
* @param f is the filter function applied to claims that have the filtered clinical codes (second level of filtering)
* @returns All the claims that match both the clinical codes and the filter function f
*/
def filterClaims[C](code2Claims: Map[String, List[C]], filterCodes: Set[String], f: (C) => Boolean): List[C] = {
def loop(l: List[C], m: Map[String, List[C]]): List[C] = {
if (m.isEmpty) l
else {
val (k, v) = m.head
if (filterCodes.contains(k)) loop(List.concat(v.filter(f), l), m.tail)
else loop(l, m.tail)
}
}
loop(List.empty, code2Claims)
}
/**
* @returns true if have nbr claims with different dates in claims
*/
def hasDifferentDates(nbr: Int, claims: List[Claim]): Boolean = {
def loop(dates: Set[DateTime], c: List[Claim]): Boolean = {
if (dates.size == nbr) true
else {
if (c.isEmpty) false
else {
if (!dates.contains(c.head.date)) loop(dates + c.head.date, c.tail)
else loop(dates, c.tail)
}
}
}
loop(Set(), claims)
}
/**
* Utility method to increase readability in the HEDIS Rule classes.
*
* Simply fold all the rules and build up the scorecard from an initial value
*
* @param scorecard the initial scorecard on which we build up additional scores from the list of rules
* @param rules is the list of predicates that adds contributions to the scorecard
* @returns the build up scorecard
*/
def applyRules(scorecard: Scorecard, rules: List[(Scorecard) => Scorecard]): Scorecard = rules.foldLeft(scorecard)({ (s, f) => f(s) })
//
// import com.nickelsoftware.bettercare4me.utils.Utils._
// flattenFile("./data/asm.ndc.c.csv", "./data/out.csv")
//
def flattenFile(from: String, to: String): Unit = {
import com.github.tototoshi.csv.CSVReader
import com.github.tototoshi.csv.CSVWriter
import java.io.File
val l = CSVReader.open(new File(from)).all().flatten
val w = CSVWriter.open(new File(to))
w.writeAll(List(l))
w.close
}
//
// import com.nickelsoftware.bettercare4me.utils.Utils._
// extractNDC("carbamazepine", "./data/MPM_D_2014_(final).csv", "./data/out.csv")
//
def extractNDC(name: String, from: String, to: String): Unit = {
import com.github.tototoshi.csv.CSVReader
import com.github.tototoshi.csv.CSVWriter
import java.io.File
val l = CSVReader.open(new File(from)).all()
val f = for (r <- l if (r(2).toLowerCase().startsWith(name))) yield r(0)
val w = CSVWriter.open(new File(to))
w.writeAll(List(f))
w.close
}
} | reactivecore01/bettercare4.me | play/app/com/nickelsoftware/bettercare4me/utils/Utils.scala | Scala | apache-2.0 | 5,780 |
/**
* ____ __ ____ ____ ____,,___ ____ __ __ ____
* ( _ \\ /__\\ (_ )(_ _)( ___)/ __) ( _ \\( )( )( _ \\ Read
* ) / /(__)\\ / /_ _)(_ )__) \\__ \\ )___/ )(__)( ) _ < README.txt
* (_)\\_)(__)(__)(____)(____)(____)(___/ (__) (______)(____/ LICENSE.txt
*/
package controllers
import com.typesafe.config.ConfigValue
import model.{UserWiki, Users}
import org.bson.types.ObjectId
import play.api.mvc.Action
import razie.Logging
import razie.audit.Audit
import razie.db._
import razie.hosting.{Website, WikiReactors}
import razie.tconf.Visibility
import razie.wiki.admin.SendEmail
import razie.wiki.model._
import razie.wiki.model.features.WForm
import razie.wiki.util.PlayTools
import razie.wiki.{Config, Enc, Sec, Services}
/** overall settings and state for this deployment */
@RTable
case class DieselSettings(uid: Option[String], realm: Option[String], name: String, value: String, _id: ObjectId =
new ObjectId) {
def set() = {
import razie.db.tx.txn
ROne[DieselSettings]("uid" -> uid, "realm" -> realm, "name" -> name).map { s =>
// todo cluster propagate notification?
RUpdate[DieselSettings](s.copy(value = this.value))
}.getOrElse {
RCreate[DieselSettings](this)
}
}
}
object DieselSettings {
def findOrElse (uid:Option[String], realm:Option[String], name:String, default:String) : String =
ROne[DieselSettings]("uid" -> uid, "realm" -> realm, "name"->name).map(_.value).getOrElse(default)
def find(uid:Option[String], realm:Option[String], name:String) : Option[String] =
ROne[DieselSettings]("uid" -> uid, "realm" -> realm, "name"->name).map(_.value)
}
/** realm/reactor controller */
object Realm extends RazController with Logging {
implicit def obtob(o: Option[Boolean]): Boolean = o.exists(_ == true)
val RK: String = Wikis.RK
/** POSTed - start creating a new wiki with template. most parms are captured as form fields, in queryParms
* @param cat is the category to create
* @param templateWpath is the wpath to the template to use, usually a section
* @param torspec is Spec vs Template
*/
def createR2(cat:String, templateWpath: String, torspec:String, realm:String="rk") = FAUR { implicit request =>
val au = request.au.get
val data = PlayTools.postData(request.req)
val name = data("name")
val wid =
if(cat == "Reactor") WID(cat, name).r(name).formatted // the wid to create
else WID(cat, name).r(realm).formatted // the wid to create
(for (
au <- activeUser;
isNew <- Wikis.find(wid).isEmpty orErr "Reactor with same name already created";
hasQuota <- (au.isAdmin || au.quota.canUpdate) orCorr cNoQuotaUpdates;
r1 <- au.hasPerm(Perm.uWiki) orCorr cNoPermission;
n1 <- name.matches("[a-zA-Z0-9_ -]+") orErr "no special characters in the name";
n2 <- (name.length >= 3 && name.length < 30) orErr "name too short or too long";
twid <- WID.fromPath(templateWpath) orErr s"template/spec wpath $templateWpath not parsed";
tw <- Wikis(realm).find(twid) orErr s"template/spec $twid not found";
hasQuota <- (au.isAdmin || au.quota.canUpdate) orCorr cNoQuotaUpdates
) yield {
val parms =
(
if("Reactor" == cat) Map(
"reactor"-> name,
"realm"-> name,
"access"-> data.getOrElse("access","Public")
) else Map.empty
) ++ Map(
"name" -> name,
"description"-> data.getOrElse("description","no description")
) ++ data
import com.typesafe.config.ConfigFactory
import scala.collection.JavaConversions._
var pages : Iterable[WikiEntry] = Nil
var addMods : Iterable[String] = Nil
// do we have pages to create?
if(tw.section("section", "pages").isDefined) {
implicit class RConfValue (c:ConfigValue) {
def s (k:String) =
Option(c.unwrapped().asInstanceOf[java.util.HashMap[String,String]].get(k)).mkString
}
val iwikisc = tw.section("section", "pages").get.content
// substitution
val wikisc = parms.foldLeft(iwikisc)((a,b)=>a.replaceAll("\\\\{\\\\{\\\\$\\\\$"+b._1+"\\\\}\\\\}", b._2))
val wikis = ConfigFactory.parseString(wikisc).resolveWith(ConfigFactory.parseMap(parms))
pages = wikis.getObject("pages") map {t =>
val (n, page) = t
val cat = page s "category"
val name = page s "name"
val label = page s "label"
val tm = page s "template"
val tags = (cat.toLowerCase :: (page s "tags").split(",").toList).distinct
val co = Wikis.template(tm, parms)
WikiEntry(cat, name, label, "md", co, au._id, tags.distinct.toSeq, name, 1, wid.parent,
Map("owner" -> au.id,
WikiEntry.PROP_WVIS -> Visibility.MODERATOR))
// the realm is changed later just before saving
}
if(wikis.hasPath("addMods"))
addMods = wikis.getObject("addMods") map {t =>
val (n, page) = t
val tm = page s "template"
tm
}
}
val mainPage = pages.find(_.name == name) getOrElse {
// if the main page doesn't have a custom template, use a default
val weCo =
if ("Reactor" == cat && templateWpath.endsWith("#form"))
Wikis.template(templateWpath, parms) // use the form that captured it
else if("Spec" == torspec) {
// for Specs, if there is a template, use it - otherwise just include the form
tw.sections.find(_.name == "template").map {sec=>
if(templateWpath.endsWith("#form"))
"[[template:"+templateWpath.replaceFirst("#form$", "#template") +"]]\\n" // for Specs - just include the form...
else
Wikis.template(templateWpath+"#template", parms)
} getOrElse "[[include:"+templateWpath+"]]\\n" // for Specs - just include the form...
}
else if("Template" == torspec) {
tw.sections.find(_.name == "template").map {sec=>
if(templateWpath.endsWith("#form"))
Wikis.template(templateWpath.replaceFirst("#form$", "#template"), parms)
else
Wikis.template(templateWpath+"#template", parms)
} getOrElse "[[include:"+templateWpath+"]]\\n" // for Specs - just include the form...
}
else
Wikis.template(s"Category:$cat#template", parms) // last ditch attempt to find some overrides
var we = WikiEntry(wid.cat, wid.name, s"$name", "md", weCo, au._id, Seq(), realm, 1, wid.parent,
Map("owner" -> au.id,
WikiEntry.PROP_WVIS -> Visibility.PRIVATE))
// add the form fields as formDAta to the main page, if this came from a form
if(templateWpath.endsWith("#form")) {
//todo validate data, use errors etc - this requires some rework on forms code
// val (newData, errors) = wf.validate(data2)
val j = new org.json.JSONObject()
parms.foreach(t => j.put(t._1, t._2))
we = we.copy(content = we.content + "\\n\\n"+WForm.formData(j))
}
we
}
// todo visibility? public unless you pay 20$ account
razie.db.tx(s"create.$cat", au.userName) { implicit txn =>
UserWiki(au._id, mainPage.uwid, "Admin").create
if ("Reactor" == cat) {
mainPage.copy(realm=name).create // create first, before using the reactor just below
WikiReactors.add(name, mainPage)
pages = pages.filter(_.name != name) map (_.copy (realm=name))
// this will also copy verified etc
request.au.get.update(
request.au.get.addPerm(
name, Perm.Moderator.s
).copy(realms=au.realms+name)
)
} else {
WikiUtil.applyStagedLinks(mainPage.wid,
mainPage).create // create first, before using the reactor just below
}
cleanAuth(request.au)
Services ! WikiAudit("CREATE_FROM_TEMPLATE", mainPage.wid.wpath, Some(au._id))
pages foreach {p=>
WikiUtil.applyStagedLinks(p.wid, p).create
}
}
SendEmail.withSession(request.realm) { implicit mailSession =>
au.quota.incUpdates
au.shouldEmailParent("Everything").map(parent => Emailer.sendEmailChildUpdatedWiki(parent, au, wid)) // ::: notifyFollowers (we)
Emailer.tellAdmin("New REACTOR", au.userName, wid.ahref)
}
val x = addMods.toList
if(x.nonEmpty) {
x.map(m=>
addMod2(m, name).apply(request.req).value.get.get
)
}
if("Reactor" == cat)
Redirect(s"/wikie/switchRealm/$name", SEE_OTHER)
else
Redirect(wid.urlRelative(request.realm))
}) getOrElse
noPerm(wid, s"Cant' create your $cat ...")
}
def sso(id:String, token:String) = Action { request =>
val tok = Sec.dec(new String(Sec.decBase64(token)))
val res = Some(id).filter(_ == tok).flatMap {conn=>
// extract user id and check with token
val uid = Enc.fromSession(id)
Users.findUserByEmailEnc(uid).map { u =>
Audit.logdb("USER_LOGIN.SSO", u.userName, u.firstName + " " + u.lastName + " realm: " + request.host)
debug("SSO.conn=" + (Services.config.CONNECTED -> Enc.toSession(u.email)))
Redirect("/").withSession(Services.config.CONNECTED -> Enc.toSession(u.email))
}
} getOrElse Redirect("/")
res
}
/** switch to new reactor and sso */
def switchRealm(realm:String) = FAUR { implicit request =>
// if active and owns target, then sso -
if (Services.config.isLocalhost) {
Config.isimulateHost = s"$realm.dieselapps.com"
DieselSettings(None, None, "isimulateHost", Config.isimulateHost).set
Redirect("/", SEE_OTHER)
} else {
// send user id and encripted
val conn = request.session.get(Services.config.CONNECTED).mkString
val token = Sec.encBase64(Sec.enc(conn))
val w = Website.forRealm(realm)
val url = w.map(_.url).getOrElse(s"http://$realm.dieselapps.com")
Redirect(s"$url/wikie/sso/$conn?token=" + token , SEE_OTHER)
}
}
/** simulate this realm as default */
def setRealm(realm:String) = FAUR { implicit request =>
if(request.au.exists(_.isAdmin)) {
// if active and owns target, then sso -
Config.isimulateHost = s"$realm.dieselapps.com"
Redirect("/", SEE_OTHER)
} else
Unauthorized("meh")
}
/** start wizard to add module to reactor */
def addMod1(realm:String) = FAUR("add mod") { implicit request =>
for (
au <- request.au;
can <- controllers.WikiUtil.canEdit(WID("Reactor", realm), auth, None);
r1 <- au.hasPerm(Perm.uWiki) orCorr cNoPermission;
twid <- Some(WID("Reactor", realm).r(realm));
uwid <- twid.uwid orErr s"template/spec $realm not found"
) yield {
ROK.k apply { implicit stok =>
views.html.wiki.wikieAddModule(realm)
}
}
}
/** POSTed - add module to a reactor
* @param module is the mod to add
* @param realm is the id of the reactor to add to
*/
def addMod2(module:String, realm:String) = FAUR("adding mod") { implicit request =>
val data = PlayTools.postData(request.req)
val wid = WID("Reactor", realm).r(realm)
for (
au <- request.au;
twid <- WID.fromPath(module) orErr s"$module not a proper WID";
tw <- Wikis.dflt.find(twid) orErr s"Module $twid not found";
reactor <- Wikis.find(wid) orErr s"Reactor $realm not found";
can <- controllers.WikiUtil.canEdit(wid, auth, Some(reactor));
r1 <- au.hasPerm(Perm.uWiki) orCorr cNoPermission;
hasQuota <- (au.isAdmin || au.quota.canUpdate) orCorr cNoQuotaUpdates
) yield {
val parms = Map(
"reactor"-> realm,
"realm"-> realm,
"description"-> data.getOrElse("description","no description")
) ++ data
import com.typesafe.config.ConfigFactory
import scala.collection.JavaConversions._
var pages : Iterable[WikiEntry] = Nil
// do we have pages to create?
if(tw.section("section", "pages").isDefined) {
implicit class RConfValue (c:ConfigValue) {
def s (k:String) = c.unwrapped().asInstanceOf[java.util.HashMap[String,String]].get(k)
}
val iwikisc = tw.section("section", "pages").get.content
val wikisc = parms.foldLeft(iwikisc)((a,b)=>a.replaceAll("\\\\$\\\\{"+b._1+"\\\\}", b._2))
val wikis = ConfigFactory.parseString(wikisc).resolveWith(ConfigFactory.parseMap(parms))
pages = wikis.getObject("pages") flatMap {t =>
val (n, page) = t
val cat = page s "category"
val name = page s "name"
val label = page s "label"
val tm = page s "template"
val co = Wikis.template(tm, parms)
// if page with same exists, forget it (make sure it didn't fallback to another realm)
if(WID(cat,name).r(realm).page.exists(_.realm == realm)) Nil
else List(WikiEntry(cat, name, label, "md", co, au._id, Seq(), realm, 1, None,
Map("owner" -> au.id,
WikiEntry.PROP_WVIS -> Visibility.PRIVATE))
)
}
}
pages = pages.map(we=> we.cloneProps(we.props ++ Map("owner" -> au.id), au._id))
// todo visibility? public unless you pay 20$ account
razie.db.tx(s"addMod.$module", au.userName) { implicit txn =>
pages foreach{we=>
we.create
Services ! WikiAudit(WikiAudit.CREATE_WIKI, we.wid.wpathFull, Some(au._id), None, Some(we))
}
}
Services ! WikiAudit("CREATE_MOD", tw.wid.wpath, Some(au._id))
Services ! WikiAudit(WikiAudit.UPD_EDIT, reactor.wid.wpathFull, Some(au._id), None, Some(reactor), Some(reactor))
// clean caches etc
SendEmail.withSession(request.realm) { implicit mailSession =>
au.quota.incUpdates
au.shouldEmailParent("Everything").map(parent => Emailer.sendEmailChildUpdatedWiki(parent, au, reactor.wid)) // ::: notifyFollowers (we)
Emailer.tellAdmin("ADD MOD", au.userName, reactor.wid.ahref)
}
Redirect(controllers.WikiUtil.w(reactor.wid, true)).flashing("count" -> "0")
}
}
}
| razie/diesel-hydra | wiki/app/controllers/Realm.scala | Scala | apache-2.0 | 14,364 |
/*
Simple application that prints the Nth (N <= 5)
Sierpinski triangle, with a size of 32 x 63
Adrian deWynter, 2017
*/
object Application {
val rows = 32
val cols = 64
def printARow(x: Int) = {
var i: Int = 0
(0 to rows-1) foreach {r =>
var s = ""
(1 to cols-1) foreach {c =>
if(c < (cols/2)-i || c > ((cols)/2)+i){
s = s + "_"
} else{
s = s + "1"
}
}
s = s + "\\n"
println(s)
i = i + 1
}
}
def printTriangle(n : Int = 3) = {
1 to cols) foreach (x => printARow(x))
}
def main(args: Array[String]): Unit = {
var N = 0
printARow(10)
//(N-1 to 1 by -1) foreach (n => printTriangle(n/2))
}
} | adewynter/Tools | Algorithms/Exercises/Scala/Sierpinski.scala | Scala | mit | 662 |
package demo
package routes
import demo.components.{ReactTreeViewInfo, ReactTreeViewDemo}
import demo.pages.ReactTreeViewPage
import japgolly.scalajs.react.extra.router.RouterConfigDsl
object ReactTreeViewRouteModule {
case object Info extends LeftRoute("Info", "info", () => ReactTreeViewInfo())
case object Demo extends LeftRoute("Demo", "demo", () => ReactTreeViewDemo())
val menu: List[LeftRoute] = List(Info,Demo)
val routes = RouterConfigDsl[LeftRoute].buildRule { dsl =>
import dsl._
menu.map(i =>
staticRoute(i.route, i) ~> renderR(r => ReactTreeViewPage(i, r))
).reduce(_ | _)
}
}
| elacin/scalajs-react-components | demo/src/main/scala/demo/routes/ReactTreeViewRouteModule.scala | Scala | apache-2.0 | 627 |
package net.kwas.impatient.ch4
import org.scalatest._
class ExercisesSpec extends FlatSpec with Matchers {
"Chapter 4 exercises" should "not explode" in {
Exercises.main(Array.empty)
}
}
| dkwasny/ScalaImpatient | src/test/scala/net/kwas/impatient/ch4/ExercisesSpec.scala | Scala | mit | 197 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.csv
import java.nio.charset.StandardCharsets
import java.time.ZoneId
import java.util.Locale
import com.univocity.parsers.csv.{CsvParserSettings, CsvWriterSettings, UnescapedQuoteHandling}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.errors.QueryExecutionErrors
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.LegacyBehaviorPolicy
class CSVOptions(
@transient val parameters: CaseInsensitiveMap[String],
val columnPruning: Boolean,
defaultTimeZoneId: String,
defaultColumnNameOfCorruptRecord: String)
extends Logging with Serializable {
def this(
parameters: Map[String, String],
columnPruning: Boolean,
defaultTimeZoneId: String) = {
this(
CaseInsensitiveMap(parameters),
columnPruning,
defaultTimeZoneId,
SQLConf.get.columnNameOfCorruptRecord)
}
def this(
parameters: Map[String, String],
columnPruning: Boolean,
defaultTimeZoneId: String,
defaultColumnNameOfCorruptRecord: String) = {
this(
CaseInsensitiveMap(parameters),
columnPruning,
defaultTimeZoneId,
defaultColumnNameOfCorruptRecord)
}
private def getChar(paramName: String, default: Char): Char = {
val paramValue = parameters.get(paramName)
paramValue match {
case None => default
case Some(null) => default
case Some(value) if value.length == 0 => '\u0000'
case Some(value) if value.length == 1 => value.charAt(0)
case _ => throw QueryExecutionErrors.paramExceedOneCharError(paramName)
}
}
private def getInt(paramName: String, default: Int): Int = {
val paramValue = parameters.get(paramName)
paramValue match {
case None => default
case Some(null) => default
case Some(value) => try {
value.toInt
} catch {
case e: NumberFormatException =>
throw QueryExecutionErrors.paramIsNotIntegerError(paramName, value)
}
}
}
private def getBool(paramName: String, default: Boolean = false): Boolean = {
val param = parameters.getOrElse(paramName, default.toString)
if (param == null) {
default
} else if (param.toLowerCase(Locale.ROOT) == "true") {
true
} else if (param.toLowerCase(Locale.ROOT) == "false") {
false
} else {
throw QueryExecutionErrors.paramIsNotBooleanValueError(paramName)
}
}
val delimiter = CSVExprUtils.toDelimiterStr(
parameters.getOrElse("sep", parameters.getOrElse("delimiter", ",")))
val parseMode: ParseMode =
parameters.get("mode").map(ParseMode.fromString).getOrElse(PermissiveMode)
val charset = parameters.getOrElse("encoding",
parameters.getOrElse("charset", StandardCharsets.UTF_8.name()))
val quote = getChar("quote", '\"')
val escape = getChar("escape", '\\')
val charToEscapeQuoteEscaping = parameters.get("charToEscapeQuoteEscaping") match {
case None => None
case Some(null) => None
case Some(value) if value.length == 0 => None
case Some(value) if value.length == 1 => Some(value.charAt(0))
case _ => throw QueryExecutionErrors.paramExceedOneCharError("charToEscapeQuoteEscaping")
}
val comment = getChar("comment", '\u0000')
val headerFlag = getBool("header")
val inferSchemaFlag = getBool("inferSchema")
val ignoreLeadingWhiteSpaceInRead = getBool("ignoreLeadingWhiteSpace", default = false)
val ignoreTrailingWhiteSpaceInRead = getBool("ignoreTrailingWhiteSpace", default = false)
// For write, both options were `true` by default. We leave it as `true` for
// backwards compatibility.
val ignoreLeadingWhiteSpaceFlagInWrite = getBool("ignoreLeadingWhiteSpace", default = true)
val ignoreTrailingWhiteSpaceFlagInWrite = getBool("ignoreTrailingWhiteSpace", default = true)
val columnNameOfCorruptRecord =
parameters.getOrElse("columnNameOfCorruptRecord", defaultColumnNameOfCorruptRecord)
val nullValue = parameters.getOrElse("nullValue", "")
val nanValue = parameters.getOrElse("nanValue", "NaN")
val positiveInf = parameters.getOrElse("positiveInf", "Inf")
val negativeInf = parameters.getOrElse("negativeInf", "-Inf")
val compressionCodec: Option[String] = {
val name = parameters.get("compression").orElse(parameters.get("codec"))
name.map(CompressionCodecs.getCodecClassName)
}
val zoneId: ZoneId = DateTimeUtils.getZoneId(
parameters.getOrElse(DateTimeUtils.TIMEZONE_OPTION, defaultTimeZoneId))
// A language tag in IETF BCP 47 format
val locale: Locale = parameters.get("locale").map(Locale.forLanguageTag).getOrElse(Locale.US)
val dateFormatInRead: Option[String] = parameters.get("dateFormat")
val dateFormatInWrite: String = parameters.getOrElse("dateFormat", DateFormatter.defaultPattern)
val timestampFormatInRead: Option[String] =
if (SQLConf.get.legacyTimeParserPolicy == LegacyBehaviorPolicy.LEGACY) {
Some(parameters.getOrElse("timestampFormat",
s"${DateFormatter.defaultPattern}'T'HH:mm:ss.SSSXXX"))
} else {
parameters.get("timestampFormat")
}
val timestampFormatInWrite: String = parameters.getOrElse("timestampFormat",
if (SQLConf.get.legacyTimeParserPolicy == LegacyBehaviorPolicy.LEGACY) {
s"${DateFormatter.defaultPattern}'T'HH:mm:ss.SSSXXX"
} else {
s"${DateFormatter.defaultPattern}'T'HH:mm:ss[.SSS][XXX]"
})
val timestampNTZFormatInRead: Option[String] = parameters.get("timestampNTZFormat")
val timestampNTZFormatInWrite: String = parameters.getOrElse("timestampNTZFormat",
s"${DateFormatter.defaultPattern}'T'HH:mm:ss[.SSS]")
val multiLine = parameters.get("multiLine").map(_.toBoolean).getOrElse(false)
val maxColumns = getInt("maxColumns", 20480)
val maxCharsPerColumn = getInt("maxCharsPerColumn", -1)
val escapeQuotes = getBool("escapeQuotes", true)
val quoteAll = getBool("quoteAll", false)
/**
* The max error content length in CSV parser/writer exception message.
*/
val maxErrorContentLength = 1000
val isCommentSet = this.comment != '\u0000'
val samplingRatio =
parameters.get("samplingRatio").map(_.toDouble).getOrElse(1.0)
/**
* Forcibly apply the specified or inferred schema to datasource files.
* If the option is enabled, headers of CSV files will be ignored.
*/
val enforceSchema = getBool("enforceSchema", default = true)
/**
* String representation of an empty value in read and in write.
*/
val emptyValue = parameters.get("emptyValue")
/**
* The string is returned when CSV reader doesn't have any characters for input value,
* or an empty quoted string `""`. Default value is empty string.
*/
val emptyValueInRead = emptyValue.getOrElse("")
/**
* The value is used instead of an empty string in write. Default value is `""`
*/
val emptyValueInWrite = emptyValue.getOrElse("\"\"")
/**
* A string between two consecutive JSON records.
*/
val lineSeparator: Option[String] = parameters.get("lineSep").map { sep =>
require(sep.nonEmpty, "'lineSep' cannot be an empty string.")
require(sep.length == 1, "'lineSep' can contain only 1 character.")
sep
}
val lineSeparatorInRead: Option[Array[Byte]] = lineSeparator.map { lineSep =>
lineSep.getBytes(charset)
}
val lineSeparatorInWrite: Option[String] = lineSeparator
val inputBufferSize: Option[Int] = parameters.get("inputBufferSize").map(_.toInt)
.orElse(SQLConf.get.getConf(SQLConf.CSV_INPUT_BUFFER_SIZE))
/**
* The handling method to be used when unescaped quotes are found in the input.
*/
val unescapedQuoteHandling: UnescapedQuoteHandling = UnescapedQuoteHandling.valueOf(parameters
.getOrElse("unescapedQuoteHandling", "STOP_AT_DELIMITER").toUpperCase(Locale.ROOT))
def asWriterSettings: CsvWriterSettings = {
val writerSettings = new CsvWriterSettings()
val format = writerSettings.getFormat
format.setDelimiter(delimiter)
format.setQuote(quote)
format.setQuoteEscape(escape)
charToEscapeQuoteEscaping.foreach(format.setCharToEscapeQuoteEscaping)
if (isCommentSet) {
format.setComment(comment)
}
lineSeparatorInWrite.foreach(format.setLineSeparator)
writerSettings.setIgnoreLeadingWhitespaces(ignoreLeadingWhiteSpaceFlagInWrite)
writerSettings.setIgnoreTrailingWhitespaces(ignoreTrailingWhiteSpaceFlagInWrite)
writerSettings.setNullValue(nullValue)
writerSettings.setEmptyValue(emptyValueInWrite)
writerSettings.setSkipEmptyLines(true)
writerSettings.setQuoteAllFields(quoteAll)
writerSettings.setQuoteEscapingEnabled(escapeQuotes)
writerSettings.setErrorContentLength(maxErrorContentLength)
writerSettings
}
def asParserSettings: CsvParserSettings = {
val settings = new CsvParserSettings()
val format = settings.getFormat
format.setDelimiter(delimiter)
format.setQuote(quote)
format.setQuoteEscape(escape)
lineSeparator.foreach(format.setLineSeparator)
charToEscapeQuoteEscaping.foreach(format.setCharToEscapeQuoteEscaping)
if (isCommentSet) {
format.setComment(comment)
} else {
settings.setCommentProcessingEnabled(false)
}
settings.setIgnoreLeadingWhitespaces(ignoreLeadingWhiteSpaceInRead)
settings.setIgnoreTrailingWhitespaces(ignoreTrailingWhiteSpaceInRead)
settings.setReadInputOnSeparateThread(false)
inputBufferSize.foreach(settings.setInputBufferSize)
settings.setMaxColumns(maxColumns)
settings.setNullValue(nullValue)
settings.setEmptyValue(emptyValueInRead)
settings.setMaxCharsPerColumn(maxCharsPerColumn)
settings.setUnescapedQuoteHandling(unescapedQuoteHandling)
settings.setLineSeparatorDetectionEnabled(lineSeparatorInRead.isEmpty && multiLine)
lineSeparatorInRead.foreach { _ =>
settings.setNormalizeLineEndingsWithinQuotes(!multiLine)
}
settings.setErrorContentLength(maxErrorContentLength)
settings
}
}
| mahak/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/csv/CSVOptions.scala | Scala | apache-2.0 | 10,805 |
package scala_code
/**
* Created by chanjinpark on 2016. 8. 16..
*/
class X(val i: Int) {
def add(implicit x: X) = println(x.i + i)
}
object X {
implicit def xx = new X(3)
}
abstract class SemiGroup[A] {
def add(x: A, y: A): A
}
abstract class Monoid[A] extends SemiGroup[A] {
def unit: A
}
object ImplicitParameters {
def main(args: Array[String]): Unit = {
/*
When a method requires an implicit there are several ways that the implicit is resolved.
One way is to search for an implicit definition in the companion object of the required type.
For example: def x(implicit m:MyClass) parameter m will search local scope, class hierarchy and
the MyClass companion object for an implicit val or def. (More on implicit resolution later).
*/
println("--- 1")
new X(3).add
val other = new {
def print(implicit x: X) = println(x.i)
}
other.print
implicit def x = new X(32)
other.print
println("--- 2")
/*
아래 예제에서는 모노이드의 add와 unit메서드를 이용해서 리스트 항목들의 합을 구하는 sum 메서드를 정의한다.
암시적 값은 최상위 레벨이 될 수 없고 템플릿의 멤버여야만 한다는 점에 주의하자.
*/
implicit object StringMonoid extends Monoid[String] {
def add(x: String, y: String): String = x concat y
def unit: String = ""
}
implicit object IntMonoid extends Monoid[Int] {
def add(x: Int, y: Int): Int = x + y
def unit: Int = 0
}
def sum[A](xs: List[A])(implicit m: Monoid[A]): A = {
if (xs.isEmpty) m.unit
else m.add (xs.head, sum (xs.tail) )
}
println(sum(List(1, 2, 3)))
println(sum(List("a", "b", "c")))
}
}
| chanjin/IntelliTraj | src/main/scala/scala_code/ImplicitParameters.scala | Scala | apache-2.0 | 1,761 |
package dao
import model.{Airport, Country, Runaway}
import scala.concurrent.Future
trait DAO {
def allAirports(): Future[List[Airport]]
def allCountries(): Future[List[Country]]
def allRunaways(): Future[List[Runaway]]
def findAirportsByName(countryCode: String): Future[List[(Airport, List[Runaway])]]
def allCountriesSortedByNumberOfAirports(): Future[List[(Country, Int)]]
def typeOfSurfacesPerCountry(): Future[List[(Country, List[String])]]
def topIdentifications(): Future[List[(String, Int)]]
}
| MysterionRise/airport-dangerzone | app/dao/DAO.scala | Scala | mit | 528 |
package skinny.micro.test
import java.io.{ NotSerializableException, ObjectOutputStream }
import javax.servlet.http.{ HttpSessionAttributeListener, HttpSessionBindingEvent }
/*
* Taken from https://gist.github.com/3485500, Thanks @LeifWarner
*/
object SessionSerializingListener extends HttpSessionAttributeListener {
val oos = new ObjectOutputStream(NullOut)
def attributeAdded(event: HttpSessionBindingEvent) {
serializeSession(event)
}
def attributeRemoved(event: HttpSessionBindingEvent) {
serializeSession(event)
}
def attributeReplaced(event: HttpSessionBindingEvent) {
serializeSession(event)
}
def serializeSession(event: HttpSessionBindingEvent) {
try {
oos.writeObject(event.getValue)
} catch {
case e: NotSerializableException =>
sys.error("Can't serialize session key '" + event.getName + "' value of type " + e.getMessage)
}
}
}
| xerial/skinny-micro | micro-test/src/main/scala/skinny/micro/test/SessionSerializingListener.scala | Scala | bsd-2-clause | 914 |
package generic
import java.io.{DataInputStream,DataOutputStream}
import scala.collection.IterableFactory
import scala.collection.mutable.ArrayBuffer
import Shapes.*
object Serialization {
trait Serializable[T] {
def write(x: T, out: DataOutputStream): Unit
def read(in: DataInputStream): T
}
implicit val UnitSerializable: Serializable[Unit] =
new Serializable[Unit] {
def write(x: Unit, out: DataOutputStream) = ()
def read(in: DataInputStream) = ()
}
implicit def SingleSerializable[T](implicit
ev1: Singleton[T]
): Serializable[T] = new Serializable[T] {
def write(x: T, out: DataOutputStream) = ()
def read(in: DataInputStream) = ev1.value
}
implicit def EnumValueSerializable[T]: Serializable[EnumValue[T]] =
new Serializable[EnumValue[T]] {
def write(x: EnumValue[T], out: DataOutputStream) = out.writeShort(x.tag)
def read(in: DataInputStream) = EnumValue(in.readShort())
}
implicit val BooleanSerializable: Serializable[Boolean] =
new Serializable[Boolean] {
def write(x: Boolean, out: DataOutputStream) = out.writeBoolean(x)
def read(in: DataInputStream) = in.readBoolean()
}
implicit val IntSerializable: Serializable[Int] =
new Serializable[Int] {
def write(x: Int, out: DataOutputStream) = out.writeInt(x)
def read(in: DataInputStream) = in.readInt()
}
implicit val StringSerializable: Serializable[String] =
new Serializable[String] {
def write(x: String, out: DataOutputStream) = out.writeUTF(x)
def read(in: DataInputStream) = in.readUTF()
}
def RecSerializable[T, U](implicit
ev1: T unfolds U,
ev2: Serializable[U]
): Serializable[T] =
new Serializable[T] {
def write(x: T, out: DataOutputStream) = ev2.write(ev1.toShape(x), out)
def read(in: DataInputStream) = ev1.fromShape(ev2.read(in))
}
implicit def ShapedSerializable[T, U](implicit
ev1: T shaped U,
ev2: Serializable[U]
): Serializable[T] =
new Serializable[T] {
def write(x: T, out: DataOutputStream) = ev2.write(ev1.toShape(x), out)
def read(in: DataInputStream) = ev1.fromShape(ev2.read(in))
}
implicit def SumSerializable[T, U](implicit
// parameters need to be call by name, or we get a recursive lazy val definition in materialized code
ev1: => Serializable[T],
ev2: => Serializable[U]
): Serializable[Sum[T, U]] =
new Serializable[Sum[T, U]] {
def write(x: Sum[T, U], out: DataOutputStream): Unit = x match {
case Fst(y) => out.writeBoolean(false); ev1.write(y, out)
case Snd(y) => out.writeBoolean(true); ev2.write(y, out)
}
def read(in: DataInputStream) = in.readBoolean() match {
case false => Fst(ev1.read(in))
case true => Snd(ev2.read(in))
}
}
implicit def ProdSerializable[T, U](implicit
ev1: Serializable[T],
ev2: Serializable[U]
): Serializable[Prod[T, U]] =
new Serializable[Prod[T, U]] {
def write(x: Prod[T, U], out: DataOutputStream): Unit = {
ev1.write(x.fst, out)
ev2.write(x.snd, out)
}
def read(in: DataInputStream) = {
Prod(ev1.read(in), ev2.read(in))
}
}
implicit def IterableSerializable[I[X] <: Iterable[X], Elem](implicit
ev1: IterableFactory[I],
ev2: Serializable[Elem]
): Serializable[I[Elem]] =
new Serializable[I[Elem]] {
def write(xs: I[Elem], out: DataOutputStream) = {
out.writeInt(xs.size)
xs.foreach(ev2.write(_, out))
}
def read(in: DataInputStream) = {
val bldr = ev1.newBuilder[Elem]
for (i <- 0 until in.readInt()) bldr += ev2.read(in)
bldr.result()
}
}
}
| dotty-staging/dotty | tests/run/generic/Serialization.scala | Scala | apache-2.0 | 3,738 |
package org.scalacoin.marshallers.transaction
import org.scalacoin.protocol.BitcoinAddress
import org.scalacoin.protocol.transaction.TransactionOutputMeta
import org.scalatest.{FlatSpec, MustMatchers}
import spray.json._
/**
* Created by Tom on 1/12/2016.
*/
class TransactionOutputMetaMarshallerTest extends FlatSpec with MustMatchers {
val str =
"""
|{
| "bestblock" : "000000000000078233dfa9376fe6bc3b68e2bbda04700b5e663a1d4c8b322e62",
| "confirmations" : 1,
| "value" : 0.75829574,
| "scriptPubKey" : {
| "asm" : "OP_HASH160 5a81f53ac1ecf0312a2a4df29a734b8f2c0d8c93 OP_EQUAL",
| "hex" : "a9145a81f53ac1ecf0312a2a4df29a734b8f2c0d8c9387",
| "reqSigs" : 1,
| "type" : "scripthash",
| "addresses" : [
| "2N1VnVBccBVPrWgPgLaszLk2UMwEHTXTAuG"
| ]
| },
| "version" : 1,
| "coinbase" : false
|}
""".stripMargin
val json = str.parseJson
"TransactionOutputMetaMarshaller" must "parse meta information for tx output" in {
val meta : TransactionOutputMeta = TransactionOutputMetaMarshaller.TransactionOutputMetaFormatter.read(json)
meta.bestBlock must be ("000000000000078233dfa9376fe6bc3b68e2bbda04700b5e663a1d4c8b322e62")
meta.confirmations must be (1)
meta.value.value must be (0.75829574)
meta.scriptPubKey.asm must be ("OP_HASH160 5a81f53ac1ecf0312a2a4df29a734b8f2c0d8c93 OP_EQUAL")
meta.scriptPubKey.hex must be ("a9145a81f53ac1ecf0312a2a4df29a734b8f2c0d8c9387")
meta.version must be (1)
meta.coinbase must be (false)
}
}
| TomMcCabe/scalacoin | src/test/scala/org/scalacoin/marshallers/transaction/TransactionOutputMetaMarshallerTest.scala | Scala | mit | 1,644 |
package lib
case class VersionedName(
name: String,
version: Option[String] = None
) extends Ordered[VersionedName] {
private val versionTag = version.map(VersionTag(_))
val label = version match {
case None => name
case Some(v) => s"$name:$v"
}
def compare(that: VersionedName) = {
if (versionTag.isEmpty && that.versionTag.isEmpty) {
0
} else if (versionTag.isEmpty && !that.versionTag.isEmpty) {
1
} else if (!versionTag.isEmpty && that.versionTag.isEmpty) {
-1
} else {
versionTag.get.compare(that.versionTag.get)
}
}
}
| Seanstoppable/apidoc-generator | lib/src/main/scala/VersionedName.scala | Scala | mit | 595 |
package org.scalafmt.config
import metaconfig.ConfCodec
sealed abstract class Docstrings
object Docstrings {
implicit val reader: ConfCodec[Docstrings] =
ReaderUtil.oneOf[Docstrings](JavaDoc, ScalaDoc, preserve)
case object JavaDoc extends Docstrings
case object ScalaDoc extends Docstrings
case object preserve extends Docstrings
}
| olafurpg/scalafmt | scalafmt-core/shared/src/main/scala/org/scalafmt/config/Docstrings.scala | Scala | apache-2.0 | 348 |
package org.akka.essentials.stm.transactor.example2
import scala.concurrent.Await
import scala.concurrent.duration._
import akka.actor.Actor
import akka.actor.ActorLogging
import akka.actor.OneForOneStrategy
import akka.actor.Props
import akka.actor.SupervisorStrategy._
import akka.pattern.ask
import akka.transactor.CoordinatedTransactionException
import akka.util.Timeout
class BankActor extends Actor with ActorLogging {
val transferActor = context.actorOf(Props[TransferActor], name = "TransferActor")
implicit val timeout = Timeout(5 seconds)
def receive = {
case transfer: TransferMsg =>
transferActor ! transfer
case balance: AccountBalance =>
val future = (transferActor ? balance).mapTo[AccountBalance]
val account = Await.result(future, timeout.duration)
println("Account #" + account.accountNumber + " , Balance #" + account.accountBalance)
}
override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = 10 seconds) {
case _: CoordinatedTransactionException => Resume
case _: IllegalStateException => Resume
case _: IllegalArgumentException => Stop
case _: Exception => Escalate
}
} | rokumar7/trial | AkkaSTMExample/src/main/scala/org/akka/essentials/stm/transactor/example2/BankActor.scala | Scala | unlicense | 1,190 |
package controllers
import play.api.i18n.{Messages, MessagesProvider}
import scala.concurrent.ExecutionContext.Implicits.global
import javax.inject.{Inject, Singleton}
import controllers.NeedLogin.Authenticated
import models._
import play.api.mvc._
import play.api.libs.ws._
import play.api.db.Database
@Singleton
class LoginAgentController @Inject() (
cc: MessagesControllerComponents,
loginAgentTable: LoginAgentTable,
ws: WSClient,
storeUserRepo: StoreUserRepo,
loginSessionRepo: LoginSessionRepo,
db: Database
) extends MessagesAbstractController(cc) {
def loginOffice365(code: String) = Action.async { implicit request: MessagesRequest[AnyContent] =>
println("LoginAgent.loginOffice365() code: '" + code + "'")
loginAgentTable.office365Agent match {
case None => throw new RuntimeException("Office365 login agent is not defined.")
case Some(office365: Office365LoginAgent) =>
office365.aquireToken(ws, code).flatMap { resp =>
office365.retrieveUserEmail(ws, resp.accessToken).map { email =>
db.withConnection { implicit conn =>
storeUserRepo.getByEmail(email) match {
case None =>
Redirect(routes.Admin.startLogin("/")).flashing("errorMessage" -> Messages("unregisteredUserEmail", email))
case Some(user) => {
val resp = Redirect("/").withSession {
(loginSessionRepo.loginUserKey,
loginSessionRepo.serialize(user.id.get, System.currentTimeMillis + loginSessionRepo.sessionTimeout))
}
val msg = Messages("welcome")
if (! msg.isEmpty) resp.flashing("message" -> msg) else resp
resp
}
}
}
}
}
}
}
}
| ruimo/store2 | app/controllers/LoginAgentController.scala | Scala | apache-2.0 | 1,828 |
package com.geeksville.akka
import scala.collection.mutable.Publisher
import akka.actor.Actor
import akka.actor.ActorRef
/**
* Similar to the classic akka EventBus but simplified
*/
class EventStream(implicit sender: ActorRef = Actor.noSender) extends Publisher[Any] {
private val publisher = new {}
// Send to actor when our event comes in
class Subscriber(val dest: ActorRef) extends Sub {
override def notify(p: Pub, evt: Any) {
//println(s"Publishing from $sender to $dest with $evt")
if (!dest.isTerminated)
dest ! evt
else
removeSubscription(this) // Our actor died - remove our filter
}
}
/**
* @param isInterested - if not specified we look at the actor's partial function to see what it understands
*/
def subscribe(a: ActorRef, receiver: PartialFunction[Any, Unit]): Subscriber = subscribe(a, receiver.isDefinedAt _)
/**
* @param isInterested - if not specified we look at the actor's partial function to see what it understands
*/
def subscribe(a: ActorRef, isInterested: Any => Boolean): Subscriber = {
val isInt = if (isInterested != null)
isInterested
else { x: Any => true }
val sub = new Subscriber(a)
super.subscribe(sub, isInt)
sub
}
/**
* @param isInterested - if not specified we look at the actor's partial function to see what it understands
*/
def subscribe(a: Actor): Subscriber = subscribe(a.self, a.receive)
override def publish(x: Any) { super.publish(x) }
}
| geeksville/arduleader | common/src/main/scala/com/geeksville/akka/EventStream.scala | Scala | gpl-3.0 | 1,507 |
package org.powlab.jeye.decode
import org.powlab.jeye.core._
import org.powlab.jeye.decode.expression._
import scala.collection.mutable.ListBuffer
import org.powlab.jeye.decode.LocalVariableStore.LocalVariableStore
import scala.collection.mutable.ArrayBuffer
import org.powlab.jeye.decode.graph.OpcodeTree
import org.powlab.jeye.decode.decoders.ExtraInfo
case class MethodContext(classFile: ClassFile, method: MemberInfo,
frames: FrameStack, localVariables: LocalVariableStore, namer: Namer,
tree: OpcodeTree, draft: Boolean,
extra: ExtraInfo, trace: LocalVariableTrace) {
} | powlab/jeye | src/main/scala/org/powlab/jeye/decode/MethodContext.scala | Scala | apache-2.0 | 651 |
package com.aristocrat.mandrill.requests.Messages
import com.aristocrat.mandrill.requests.MandrillRequest
case class CancelScheduled(key: String, id: String) extends MandrillRequest
| thepratt/mandrill | src/main/scala/com/aristocrat/mandrill/requests/Messages/CancelScheduled.scala | Scala | mit | 184 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.twitter.zipkin.common
import org.specs.Specification
import com.twitter.zipkin.Constants
class SpanSpec extends Specification {
val annotationValue = "NONSENSE"
val expectedAnnotation = Annotation(1, annotationValue, Some(Endpoint(1, 2, "service")))
val expectedSpan = Span(12345, "methodcall", 666, None,
List(expectedAnnotation), Nil)
val annotation1 = Annotation(1, "value1", Some(Endpoint(1, 2, "service")))
val annotation2 = Annotation(2, "value2", Some(Endpoint(3, 4, "Service"))) // upper case service name
val annotation3 = Annotation(3, "value3", Some(Endpoint(5, 6, "service")))
val spanWith3Annotations = Span(12345, "methodcall", 666, None,
List(annotation1, annotation2, annotation3), Nil)
"Span" should {
"serviceNames is lowercase" in {
val names = spanWith3Annotations.serviceNames
names.size mustBe 1
names.toSeq(0) mustBe "service"
}
"serviceNames" in {
val map = expectedSpan.getAnnotationsAsMap
val actualAnnotation = map.get(annotationValue).get
expectedAnnotation mustEqual actualAnnotation
}
"merge two span parts" in {
val ann1 = Annotation(1, "value1", Some(Endpoint(1, 2, "service")))
val ann2 = Annotation(2, "value2", Some(Endpoint(3, 4, "service")))
val span1 = Span(12345, "", 666, None, List(ann1), Nil)
val span2 = Span(12345, "methodcall", 666, None, List(ann2), Nil)
val expectedSpan = Span(12345, "methodcall", 666, None, List(ann1, ann2), Nil)
val actualSpan = span1.mergeSpan(span2)
actualSpan mustEqual expectedSpan
}
"merge span with Unknown span name with known span name" in {
val span1 = Span(1, "Unknown", 2, None, List(), Seq())
val span2 = Span(1, "get", 2, None, List(), Seq())
span1.mergeSpan(span2).name mustEqual "get"
span2.mergeSpan(span1).name mustEqual "get"
}
"return the first annotation" in {
annotation1 mustEqual spanWith3Annotations.firstAnnotation.get
}
"return the last annotation" in {
annotation3 mustEqual spanWith3Annotations.lastAnnotation.get
}
"know this is not a client side span" in {
val spanSr = Span(1, "n", 2, None, List(Annotation(1, Constants.ServerRecv, None)), Nil)
spanSr.isClientSide mustEqual false
}
"get duration" in {
spanWith3Annotations.duration mustEqual Some(2)
}
"don't get duration duration when there are no annotations" in {
val span = Span(1, "n", 2, None, List(), Nil)
span.duration mustEqual None
}
"validate span" in {
val cs = Annotation(1, Constants.ClientSend, None)
val sr = Annotation(2, Constants.ServerRecv, None)
val ss = Annotation(3, Constants.ServerSend, None)
val cr = Annotation(4, Constants.ClientRecv, None)
val cs2 = Annotation(5, Constants.ClientSend, None)
val s1 = Span(1, "i", 123, None, List(cs, sr, ss, cr), Nil)
s1.isValid mustEqual true
val s3 = Span(1, "i", 123, None, List(cs, sr, ss, cr, cs2), Nil)
s3.isValid mustEqual false
}
}
}
| martindale/zipkin | zipkin-common/src/test/scala/com/twitter/zipkin/common/SpanSpec.scala | Scala | apache-2.0 | 3,698 |
/***********************************************************************
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.accumulo.filter
import java.util.Date
import com.typesafe.scalalogging.LazyLogging
import com.vividsolutions.jts.geom.Coordinate
import org.geotools.data.Query
import org.geotools.factory.{CommonFactoryFinder, Hints}
import org.geotools.feature.simple.SimpleFeatureBuilder
import org.geotools.filter.text.ecql.ECQL
import org.geotools.geometry.jts.JTSFactoryFinder
import org.junit.runner.RunWith
import org.locationtech.geomesa.accumulo.TestWithDataStore
import org.locationtech.geomesa.accumulo.filter.TestFilters._
import org.locationtech.geomesa.accumulo.iterators.TestData
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.features.avro.AvroSimpleFeatureFactory
import org.locationtech.geomesa.utils.geotools.Conversions._
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.opengis.feature.simple.SimpleFeature
import org.opengis.filter._
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class FilterTester extends Specification with TestWithDataStore with LazyLogging {
override val spec = SimpleFeatureTypes.encodeType(TestData.featureType)
val mediumDataFeatures: Seq[SimpleFeature] =
TestData.mediumData.map(TestData.createSF).map(f => new ScalaSimpleFeature(f.getID, sft, f.getAttributes.toArray))
addFeatures(mediumDataFeatures)
"Filters" should {
"filter correctly for all predicates" >> {
runTest(goodSpatialPredicates)
}
"filter correctly for AND geom predicates" >> {
runTest(andedSpatialPredicates)
}
"filter correctly for OR geom predicates" >> {
runTest(oredSpatialPredicates)
}
"filter correctly for OR geom predicates with projections" >> {
runTest(oredSpatialPredicates, Array("geom"))
}
"filter correctly for basic temporal predicates" >> {
runTest(temporalPredicates)
}
"filter correctly for basic spatiotemporal predicates" >> {
runTest(spatioTemporalPredicates)
}
"filter correctly for basic spariotemporal predicates with namespaces" >> {
runTest(spatioTemporalPredicatesWithNS)
}
"filter correctly for attribute predicates" >> {
runTest(attributePredicates)
}
"filter correctly for attribute and geometric predicates" >> {
runTest(attributeAndGeometricPredicates)
}
"filter correctly for attribute and geometric predicates with namespaces" >> {
runTest(attributeAndGeometricPredicatesWithNS)
}
"filter correctly for DWITHIN predicates" >> {
runTest(dwithinPointPredicates)
}
"filter correctly for ID predicates" >> {
runTest(idPredicates)
}
}
def compareFilter(filter: Filter, projection: Array[String]) = {
val filterCount = mediumDataFeatures.count(filter.evaluate)
val query = new Query(sftName, filter)
Option(projection).foreach(query.setPropertyNames)
val queryCount = fs.getFeatures(query).size
logger.debug(s"\\nFilter: ${ECQL.toCQL(filter)}\\nFullData size: ${mediumDataFeatures.size}: " +
s"filter hits: $filterCount query hits: $queryCount")
queryCount mustEqual filterCount
}
def runTest(filters: Seq[String], projection: Array[String] = null) =
forall(filters.map(ECQL.toFilter))(compareFilter(_, projection))
}
@RunWith(classOf[JUnitRunner])
class IdQueryTest extends Specification with TestWithDataStore {
override val spec = "age:Int:index=true,name:String:index=true,dtg:Date,*geom:Point:srid=4326"
val ff = CommonFactoryFinder.getFilterFactory2
val geomBuilder = JTSFactoryFinder.getGeometryFactory
val builder = new SimpleFeatureBuilder(sft, new AvroSimpleFeatureFactory)
val data = List(
("1", Array(10, "johndoe", new Date), geomBuilder.createPoint(new Coordinate(10, 10))),
("2", Array(20, "janedoe", new Date), geomBuilder.createPoint(new Coordinate(20, 20))),
("3", Array(30, "johnrdoe", new Date), geomBuilder.createPoint(new Coordinate(20, 20)))
)
val features = data.map { case (id, attrs, geom) =>
builder.reset()
builder.addAll(attrs.asInstanceOf[Array[AnyRef]])
val f = builder.buildFeature(id)
f.setDefaultGeometry(geom)
f.getUserData.put(Hints.USE_PROVIDED_FID, java.lang.Boolean.TRUE)
f
}
addFeatures(features)
"Id queries" should {
"use record table to return a result" >> {
val idQ = ff.id(ff.featureId("2"))
val res = fs.getFeatures(idQ).features().toList
res.length mustEqual 1
res.head.getID mustEqual "2"
}
"handle multiple ids correctly" >> {
val idQ = ff.id(ff.featureId("1"), ff.featureId("3"))
val res = fs.getFeatures(idQ).features().toList
res.length mustEqual 2
res.map(_.getID) must contain ("1", "3")
}
"return no events when multiple IDs ANDed result in no intersection" >> {
val idQ1 = ff.id(ff.featureId("1"), ff.featureId("3"))
val idQ2 = ff.id(ff.featureId("2"))
val idQ = ff.and(idQ1, idQ2)
val qRes = fs.getFeatures(idQ)
val res= qRes.features().toList
res.length mustEqual 0
}
}
}
| vpipkt/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/accumulo/filter/FilterTester.scala | Scala | apache-2.0 | 5,603 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import monix.execution.Ack.{Continue, Stop}
import monix.execution.atomic.Atomic
import monix.execution.atomic.PaddingStrategy.LeftRight128
import scala.util.control.NonFatal
import monix.execution.{Ack, Cancelable}
import monix.execution.exceptions.CompositeException
import monix.reactive.Observable
import monix.reactive.observers.Subscriber
import scala.annotation.tailrec
import scala.concurrent.{Future, Promise}
import scala.util.Failure
/** Implementation for `Observable.scanTask`.
*
* Implementation is based on [[MapTaskObservable]].
*
* Tricky concurrency handling within, here be dragons!
*/
private[reactive] final class FlatScanObservable[A,R](
source: Observable[A],
initial: () => R,
op: (R, A) => Observable[R], delayErrors: Boolean)
extends Observable[R] {
def unsafeSubscribeFn(out: Subscriber[R]): Cancelable = {
var streamErrors = true
try {
val value = initial()
streamErrors = false
subscribeWithState(out, value)
} catch {
case NonFatal(ex) if streamErrors =>
out.onError(ex)
Cancelable.empty
}
}
def subscribeWithState(out: Subscriber[R], initial: R): Cancelable = {
val subscriber = new FlatScanSubscriber(out, initial)
val mainSubscription = source.unsafeSubscribeFn(subscriber)
Cancelable { () =>
try mainSubscription.cancel()
finally subscriber.cancel()
}
}
private final class FlatScanSubscriber(out: Subscriber[R], initial: R)
extends Subscriber[A] with Cancelable { self =>
import ConcatMapObservable.FlatMapState
import ConcatMapObservable.FlatMapState._
implicit val scheduler = out.scheduler
// For gathering errors
private[this] val errors =
if (delayErrors) Atomic(List.empty[Throwable])
else null
// Boolean for keeping the `isActive` state, needed because we could miss
// out on seeing a `Cancelled` state due to the `lazySet` instructions,
// making the visibility of the `Cancelled` state thread-unsafe!
private[this] val isActive = Atomic(true)
// For synchronizing our internal state machine, padded
// in order to avoid the false sharing problem
private[this] val stateRef = Atomic.withPadding(
WaitOnNextChild(Continue) : FlatMapState,
LeftRight128)
// Mutable reference to the current state
private[this] var currentState = initial
/** For canceling the current active task, in case there is any. Here
* we can afford a `compareAndSet`, not being a big deal since
* cancellation only happens once.
*/
def cancel(): Unit = {
// The cancellation is a two-phase process
if (isActive.getAndSet(false)) cancelState()
}
@tailrec private def cancelState(): Unit = {
stateRef.get match {
case current @ Active(ref) =>
if (stateRef.compareAndSet(current, Cancelled)) {
ref.cancel()
} else {
// $COVERAGE-OFF$
cancelState() // retry
// $COVERAGE-ON$
}
case current @ WaitComplete(_, ref) =>
if (ref != null) {
if (stateRef.compareAndSet(current, Cancelled)) {
ref.cancel()
} else {
// $COVERAGE-OFF$
cancelState() // retry
// $COVERAGE-ON$
}
}
case current @ (WaitOnNextChild(_) | WaitOnActiveChild) =>
if (!stateRef.compareAndSet(current, Cancelled)) {
// $COVERAGE-OFF$
cancelState() // retry
// $COVERAGE-ON$
}
case Cancelled =>
// $COVERAGE-OFF$
() // do nothing else
// $COVERAGE-ON$
}
}
def onNext(elem: A): Future[Ack] = {
// For protecting against user code, without violating the
// observer's contract, by marking the boundary after which
// we can no longer stream errors downstream
var streamErrors = true
// WARN: Concurrent cancellation might have happened, due
// to the `Cancelled` state being thread-unsafe because of
// the logic using `lazySet` below; hence the extra check
if (!isActive.get) {
Stop
} else try {
val asyncUpstreamAck = Promise[Ack]()
val child = op(currentState, elem)
// No longer allowed to stream errors downstream
streamErrors = false
// Simple, ordered write - we cannot use WaitOnNext as the start of an
// iteration because we cannot detect synchronous execution below;
// WARN: this can override the `Cancelled` status!
stateRef.lazySet(WaitOnActiveChild)
// Shoot first, ask questions later :-)
val cancellable = child.unsafeSubscribeFn(new ChildSubscriber(out, asyncUpstreamAck))
// Execution already started at this point This `getAndSet` is
// concurrent with the task being finished (the `getAndSet` in
// the Task.flatMap above), but not with the `getAndSet`
// happening in `onComplete` and `onError`, therefore a
// `WaitComplete` state is invalid here. The state we do
// expect most of the time is either `WaitOnNext` or
// `WaitActiveTask`.
stateRef.getAndSet(Active(cancellable)) match {
case previous @ WaitOnNextChild(ack) =>
// Task execution was synchronous, w00t, so redo state!
//
// NOTE: we don't need to worry about cancellation here, b/c we
// have no child active and the cancellation of the parent stream
// is not our concern
stateRef.lazySet(previous)
ack.syncTryFlatten
case WaitOnActiveChild =>
// Expected outcome for async observables ...
//
// Concurrent cancellation might have happened, the `Cancelled` state
// being thread-unsafe, hence this check;
//
// WARN: the assumption is that if the `Cancelled` state was set
// right before `lazySet(WaitOnActiveChild)`, then we would see
// `isActive == false` here b/c it was updated before `stateRef` (JMM);
// And if `stateRef = Cancelled` happened afterwards, then we should
// see it in the outer match statement
if (isActive.get) {
asyncUpstreamAck.future.syncTryFlatten
} else {
cancelState()
Stop
}
case WaitComplete(_, _) =>
// Branch that can happen in case the child has finished
// already in error, so stop further onNext events.
stateRef.lazySet(Cancelled) // GC purposes
Stop
case Cancelled =>
// Race condition, oops, now cancel the active task
cancelState()
Stop
case state @ Active(_) =>
// This should never, ever happen!
// Something is screwed up in our state machine :-(
// $COVERAGE-OFF$
reportInvalidState(state, "onNext")
Stop
// $COVERAGE-ON$
}
} catch { case ex if NonFatal(ex) =>
if (streamErrors) {
onError(ex)
Stop
} else {
scheduler.reportFailure(ex)
Stop
}
}
}
private def signalFinish(ex: Option[Throwable]): Unit = {
// It's fine to fetch the current cancelable like this because
// this can only give us the cancelable of the active child and
// the only race condition that can happen is for the child to
// set this to `null` between this `get` and the upcoming
// `getAndSet`, which is totally fine
val childRef = stateRef.get match {
case Active(ref) => ref
case WaitComplete(_,ref) => ref
case _ => null
}
// Can have a race condition with the `onComplete` / `onError`
// signal in the child, but this works fine because of the
// no-concurrent clause in the protocol of communication. So
// either we have exactly one active child, in which case it
// will be responsible for sending the final signal, or we don't
// have any active child, in which case it is the responsibility
// of the main subscriber to do it right here
stateRef.getAndSet(WaitComplete(ex, childRef)) match {
case WaitOnNextChild(_) =>
// In this state we know we have no active task, so we are
// free to signal the final event
if (ex.isEmpty) sendOnComplete() else out.onError(ex.get)
// GC purposes: we no longer need the cancelable reference!
stateRef.lazySet(Cancelled)
case Active(_) =>
// On this branch we've got an active child that needs to finish.
//
// WARN: Concurrent cancellation might have happened and because the
// `Cancelled` state is thread unsafe, we need a second check.
// Assumption is that `isActive = false` would be visible in case of
// a race condition!
if (!isActive.get) cancelState()
case WaitComplete(_,_) =>
// This branch happens if the child has triggered the completion
// event already, thus there's nothing for us left to do.
// GC purposes: we no longer need `childRef`.
stateRef.lazySet(Cancelled)
case Cancelled =>
// Oops, cancellation happened, cancel!
cancelState()
// GC purposes: we no longer need `childRef`.
stateRef.lazySet(Cancelled)
case WaitOnActiveChild =>
// Something is screwed up in our state machine :-(
// $COVERAGE-OFF$
reportInvalidState(WaitOnActiveChild, "signalFinish")
// $COVERAGE-ON$
}
}
def onComplete(): Unit =
signalFinish(None)
def onError(ex: Throwable): Unit =
if (!delayErrors) signalFinish(Some(ex)) else {
errors.transform(list => ex :: list)
signalFinish(None)
}
private def sendOnComplete(): Unit = {
if (!delayErrors) out.onComplete() else
this.errors.get match {
case Nil => out.onComplete()
case list => out.onError(CompositeException(list))
}
}
private def reportInvalidState(state: FlatMapState, method: String): Unit = {
// $COVERAGE-OFF$
cancelState()
scheduler.reportFailure(
new IllegalStateException(
s"State $state in the Monix ConcatMap.$method implementation is invalid, " +
"due to either a broken Subscriber implementation, or a bug, " +
"please open an issue, see: https://monix.io"
))
// $COVERAGE-ON$
}
private final class ChildSubscriber(out: Subscriber[R], asyncUpstreamAck: Promise[Ack])
extends Subscriber[R] {
implicit val scheduler = out.scheduler
private[this] var ack: Future[Ack] = Continue
// Reusable reference to stop creating function references for each `onNext`
private[this] val onStopOrFailureRef = (err: Option[Throwable]) => {
if (err.isDefined) out.scheduler.reportFailure(err.get)
signalChildOnComplete(Stop, isStop = true)
}
def onNext(elem: R) = {
self.currentState = elem
ack = out.onNext(elem)
ack.syncOnStopOrFailure(onStopOrFailureRef)
}
def onComplete(): Unit =
signalChildOnComplete(ack, isStop = false)
def onError(ex: Throwable): Unit =
if (!delayErrors) signalChildOnError(ex) else {
errors.transform(list => ex :: list)
onComplete()
}
private def signalChildOnError(ex: Throwable): Unit = {
// The cancelable passed in WaitComplete here can be `null`
// because it would only replace the child's own cancelable
stateRef.getAndSet(WaitComplete(Some(ex), null)) match {
case WaitOnActiveChild | WaitOnNextChild(_) | Active(_) =>
// Branch happens while the main subscriber is still
// active; the `getAndSet(WaitComplete)` however will
// stop it and we are free to send the final error
out.onError(ex)
asyncUpstreamAck.trySuccess(Stop)
case WaitComplete(otherEx, _) =>
// Branch happens when the main subscriber has already
// finished - we were in `Active` until now, so it is
// the child's responsibility to finish! But if an
// exception also happened on main subscriber, we need
// to log it somewhere!
otherEx.foreach(scheduler.reportFailure)
// Send our immediate error downstream and stop everything
out.onError(ex)
asyncUpstreamAck.trySuccess(Stop)
case Cancelled =>
// User cancelled, but we have to log errors somewhere
scheduler.reportFailure(ex)
}
}
private def signalChildOnComplete(ack: Future[Ack], isStop: Boolean): Unit = {
// This assignment must happen after `onNext`, otherwise
// we can end with a race condition with `onComplete`
stateRef.getAndSet(WaitOnNextChild(ack)) match {
case WaitOnActiveChild =>
() // Optimization, do nothing else
case WaitOnNextChild(_) | Active(_) =>
// Branch happens when the main subscriber is still
// active and this child is thus giving it permission
// to continue with the next child observable
ack.value match {
case Some(result) =>
asyncUpstreamAck.tryComplete(result)
case None =>
asyncUpstreamAck.tryCompleteWith(ack)
}
case Cancelled =>
asyncUpstreamAck.trySuccess(Stop)
case WaitComplete(exOpt, _) =>
// An `onComplete` or `onError` event happened since
// `onNext` was called, so we are now responsible for
// signaling it downstream. Note that we've set
// `WaitOnNext` above, which would make one wonder if
// we couldn't have a problem with the logic in
// `onComplete` or `onError`, but if we are seeing
// this state, it means that these calls already
// happened, so we can't have a race condition.
if (!isStop) exOpt match {
case None => sendOnComplete()
case Some(ex) => out.onError(ex)
}
else ack.value match {
case Some(Failure(ex)) =>
// An error happened and we need to report it somewhere
scheduler.reportFailure(ex)
case _ =>
() // do nothing else
}
}
}
}
}
}
| Wogan/monix | monix-reactive/shared/src/main/scala/monix/reactive/internal/operators/FlatScanObservable.scala | Scala | apache-2.0 | 15,535 |
package cromwell
import java.util.UUID
import java.util.concurrent.atomic.AtomicInteger
import akka.actor.{Actor, ActorRef, ActorSystem, Props, Terminated}
import akka.pattern.ask
import akka.stream.ActorMaterializer
import akka.testkit._
import com.typesafe.config.{Config, ConfigFactory}
import cromwell.CromwellTestKitSpec._
import cromwell.core._
import cromwell.core.path.BetterFileMethods.Cmds
import cromwell.core.path.DefaultPathBuilder
import cromwell.docker.DockerHashActor.DockerHashSuccessResponse
import cromwell.docker.{DockerHashRequest, DockerHashResult}
import cromwell.engine.backend.BackendConfigurationEntry
import cromwell.engine.workflow.WorkflowManagerActor.RetrieveNewWorkflows
import cromwell.engine.workflow.lifecycle.execution.callcaching.CallCacheReadActor.{CacheLookupNoHit, CacheLookupRequest}
import cromwell.engine.workflow.lifecycle.execution.callcaching.CallCacheWriteActor.SaveCallCacheHashes
import cromwell.engine.workflow.lifecycle.execution.callcaching.CallCacheWriteSuccess
import cromwell.engine.workflow.workflowstore.WorkflowStoreSubmitActor.WorkflowSubmittedToStore
import cromwell.engine.workflow.workflowstore.{InMemoryWorkflowStore, WorkflowStoreActor}
import cromwell.jobstore.JobStoreActor.{JobStoreWriteSuccess, JobStoreWriterCommand}
import cromwell.server.{CromwellRootActor, CromwellSystem}
import cromwell.services.ServiceRegistryActor
import cromwell.services.metadata.MetadataService._
import cromwell.subworkflowstore.EmptySubWorkflowStoreActor
import cromwell.util.SampleWdl
import cromwell.webservice.metadata.MetadataBuilderActor
import cromwell.webservice.metadata.MetadataBuilderActor.{BuiltMetadataResponse, FailedMetadataResponse, MetadataBuilderActorResponse}
import org.scalactic.Equality
import org.scalatest._
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.time.{Millis, Seconds, Span}
import spray.json._
import wom.core.FullyQualifiedName
import wom.types._
import wom.values._
import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.language.postfixOps
import scala.util.matching.Regex
case class OutputNotFoundException(outputFqn: String, actualOutputs: String) extends RuntimeException(s"Expected output $outputFqn was not found in: '$actualOutputs'")
case class LogNotFoundException(log: String) extends RuntimeException(s"Expected log $log was not found")
object CromwellTestKitSpec {
val ConfigText =
"""
|akka {
| loggers = ["akka.testkit.TestEventListener"]
| loglevel = "INFO"
| actor {
| debug {
| receive = on
| }
| }
| dispatchers {
| # A dispatcher for actors performing blocking io operations
| # Prevents the whole system from being slowed down when waiting for responses from external resources for instance
| io-dispatcher {
| type = Dispatcher
| executor = "fork-join-executor"
| # Using the forkjoin defaults, this can be tuned if we wish
| }
|
| # A dispatcher for actors handling API operations
| # Keeps the API responsive regardless of the load of workflows being run
| api-dispatcher {
| type = Dispatcher
| executor = "fork-join-executor"
| }
|
| # A dispatcher for engine actors
| # Because backends behavior is unpredictable (potentially blocking, slow) the engine runs
| # on its own dispatcher to prevent backends from affecting its performance.
| engine-dispatcher {
| type = Dispatcher
| executor = "fork-join-executor"
| }
|
| # A dispatcher used by supported backend actors
| backend-dispatcher {
| type = Dispatcher
| executor = "fork-join-executor"
| }
|
| # Note that without further configuration, backend actors run on the default dispatcher
| }
| test {
| # Some of our tests fire off a message, then expect a particular event message within 3s (the default).
| # Especially on CI, the metadata test does not seem to be returning in time. So, overriding the timeouts
| # with slightly higher values. Alternatively, could also adjust the akka.test.timefactor only in CI.
| filter-leeway = 10s
| single-expect-default = 5s
| default-timeout = 10s
| }
|}
|
|services {}
""".stripMargin
val TimeoutDuration = 60 seconds
private val testWorkflowManagerSystemCount = new AtomicInteger()
class TestWorkflowManagerSystem extends CromwellSystem {
override protected def systemName: String = "test-system-" + testWorkflowManagerSystemCount.incrementAndGet()
override protected def newActorSystem() = ActorSystem(systemName, ConfigFactory.parseString(CromwellTestKitSpec.ConfigText))
/**
* Do NOT shut down the test actor system inside the normal flow.
* The actor system will be externally shutdown outside the block.
*/
// -Ywarn-value-discard
override def shutdownActorSystem(): Future[Terminated] = { Future.successful(null) }
def shutdownTestActorSystem() = super.shutdownActorSystem()
}
/**
* Wait for exactly one occurrence of the specified info pattern in the specified block. The block is in its own
* parameter list for usage syntax reasons.
*/
def waitForInfo[T](pattern: String, occurrences: Int = 1)(block: => T)(implicit system: ActorSystem): T = {
EventFilter.info(pattern = pattern, occurrences = occurrences).intercept {
block
}
}
/**
* Wait for occurrence(s) of the specified warning pattern in the specified block. The block is in its own parameter
* list for usage syntax reasons.
*/
def waitForWarning[T](pattern: String, occurrences: Int = 1)(block: => T)(implicit system: ActorSystem): T = {
EventFilter.warning(pattern = pattern, occurrences = occurrences).intercept {
block
}
}
/**
* Akka TestKit appears to be unable to match errors generated by `log.error(Throwable, String)` with the normal
* `EventFilter.error(...).intercept {...}` mechanism since `EventFilter.error` forces the use of a dummy exception
* that never matches a real exception. This method works around that problem by building an `ErrorFilter` more
* explicitly to allow the caller to specify a `Throwable` class.
*/
def waitForErrorWithException[T](pattern: String,
throwableClass: Class[_ <: Throwable] = classOf[Throwable],
occurrences: Int = 1)
(block: => T)
(implicit system: ActorSystem): T = {
val regex = Right[String, Regex](pattern.r)
ErrorFilter(throwableClass, source = None, message = regex, complete = false)(occurrences = occurrences).intercept {
block
}
}
/**
* Special case for validating outputs. Used when the test wants to check that an output exists, but doesn't care what
* the actual value was.
*/
lazy val AnyValueIsFine: WomValue = WomString("Today you are you! That is truer than true! There is no one alive who is you-er than you!")
def replaceVariables(womValue: WomValue, workflowId: WorkflowId): WomValue = {
womValue match {
case WomString(value) => WomString(replaceVariables(value, workflowId))
case _ => womValue
}
}
def replaceVariables(value: String, workflowId: WorkflowId): String = {
val variables = Map("PWD" -> Cmds.pwd, "UUID" -> workflowId)
variables.foldLeft(value) {
case (result, (variableName, variableValue)) => result.replace(s"<<$variableName>>", s"$variableValue")
}
}
lazy val DefaultConfig = ConfigFactory.load
lazy val DefaultLocalBackendConfig = ConfigFactory.parseString(
"""
| {
| root: "cromwell-executions"
|
| filesystems {
| local {
| localization: [
| "hard-link", "soft-link", "copy"
| ]
| }
| }
| }
""".stripMargin)
lazy val JesBackendConfig = ConfigFactory.parseString(
"""
|{
| // Google project
| project = "my-cromwell-workflows"
|
| // Base bucket for workflow executions
| root = "gs://my-cromwell-workflows-bucket"
|
| // Polling for completion backs-off gradually for slower-running jobs.
| // This is the maximum polling interval (in seconds):
| maximum-polling-interval = 600
|
| // Optional Dockerhub Credentials. Can be used to access private docker images.
| dockerhub {
| // account = ""
| // token = ""
| }
|
| genomics {
| // A reference to an auth defined in the `google` stanza at the top. This auth is used to create
| // Pipelines and manipulate auth JSONs.
| auth = "service-account"
|
| // Endpoint for APIs, no reason to change this unless directed by Google.
| endpoint-url = "https://genomics.googleapis.com/"
| }
|
| filesystems {
| gcs {
| // A reference to a potentially different auth for manipulating files via engine functions.
| auth = "user-via-refresh"
| }
| }
|}
""".stripMargin)
lazy val JesBackendConfigEntry = BackendConfigurationEntry(
name = "JES",
lifecycleActorFactoryClass = "cromwell.backend.impl.jes.JesBackendLifecycleActorFactory",
JesBackendConfig
)
/**
* It turns out that tests using the metadata refresh actor running in parallel don't work so well if there are more
* than one refresh actor. After many fits and starts the author decided that discretion was the better part of valor
* as it is difficult to only singleton-ize the refresher or even the metadata service itself.
*
* Instead, doing things the "old way" for the tests, setting up a separate actor system and a singleton service
* registry - this will be used by the CromwellRootActor within tests.
*
* :'(
*/
private val ServiceRegistryActorSystem = akka.actor.ActorSystem("cromwell-service-registry-system")
val ServiceRegistryActorInstance = {
ServiceRegistryActorSystem.actorOf(ServiceRegistryActor.props(ConfigFactory.load()), "ServiceRegistryActor")
}
class TestCromwellRootActor(config: Config)(implicit materializer: ActorMaterializer) extends CromwellRootActor(false, false) {
override val serverMode = true
override lazy val serviceRegistryActor = ServiceRegistryActorInstance
override lazy val workflowStore = new InMemoryWorkflowStore
def submitWorkflow(sources: WorkflowSourceFilesWithoutImports): WorkflowId = {
val submitMessage = WorkflowStoreActor.SubmitWorkflow(sources)
val result = Await.result(workflowStoreActor.ask(submitMessage)(TimeoutDuration), Duration.Inf).asInstanceOf[WorkflowSubmittedToStore].workflowId
workflowManagerActor ! RetrieveNewWorkflows
result
}
}
def defaultTwms = new CromwellTestKitSpec.TestWorkflowManagerSystem()
}
abstract class CromwellTestKitWordSpec extends CromwellTestKitSpec with WordSpecLike
abstract class CromwellTestKitSpec(val twms: TestWorkflowManagerSystem = defaultTwms) extends TestKit(twms.actorSystem)
with DefaultTimeout with ImplicitSender with Matchers with ScalaFutures with Eventually with Suite with OneInstancePerTest with BeforeAndAfterAll {
override protected def afterAll() = { twms.shutdownTestActorSystem(); () }
implicit val defaultPatience = PatienceConfig(timeout = Span(200, Seconds), interval = Span(1000, Millis))
implicit val ec = system.dispatcher
implicit val materializer = twms.materializer
val dummyServiceRegistryActor = system.actorOf(Props.empty)
val dummyLogCopyRouter = system.actorOf(Props.empty)
// Allow to use shouldEqual between 2 WdlTypes while acknowledging for edge cases
implicit val wdlTypeSoftEquality = new Equality[WomType] {
override def areEqual(a: WomType, b: Any): Boolean = (a, b) match {
case (WomStringType | WomFileType, WomFileType | WomStringType) => true
case (arr1: WomArrayType, arr2: WomArrayType) => areEqual(arr1.memberType, arr2.memberType)
case (map1: WomMapType, map2: WomMapType) => areEqual(map1.valueType, map2.valueType)
case _ => a == b
}
}
// Allow to use shouldEqual between 2 WdlValues while acknowledging for edge cases and checking for WomType compatibility
implicit val wdlEquality = new Equality[WomValue] {
def fileEquality(f1: String, f2: String) =
DefaultPathBuilder.get(f1).getFileName == DefaultPathBuilder.get(f2).getFileName
override def areEqual(a: WomValue, b: Any): Boolean = {
val typeEquality = b match {
case v: WomValue => wdlTypeSoftEquality.areEqual(a.womType, v.womType)
case _ => false
}
val valueEquality = (a, b) match {
case (_: WomFile, expectedFile: WomFile) => fileEquality(a.valueString, expectedFile.valueString)
case (_: WomString, expectedFile: WomFile) => fileEquality(a.valueString, expectedFile.valueString)
case (array: WomArray, expectedArray: WomArray) =>
(array.value.length == expectedArray.value.length) &&
array.value.zip(expectedArray.value).map(Function.tupled(areEqual)).forall(identity)
case (map: WomMap, expectedMap: WomMap) =>
val mapped = map.value.map {
case (k, v) => expectedMap.value.get(k).isDefined && areEqual(v, expectedMap.value(k))
}
(map.value.size == expectedMap.value.size) && mapped.forall(identity)
case _ => a == b
}
typeEquality && valueEquality
}
}
private def buildCromwellRootActor(config: Config) = {
TestActorRef(new TestCromwellRootActor(config), name = "TestCromwellRootActor" + UUID.randomUUID().toString)
}
def runWdl(sampleWdl: SampleWdl,
runtime: String = "",
workflowOptions: String = "{}",
customLabels: String = "{}",
terminalState: WorkflowState = WorkflowSucceeded,
config: Config = DefaultConfig,
patienceConfig: PatienceConfig = defaultPatience)(implicit ec: ExecutionContext): Map[FullyQualifiedName, WomValue] = {
val rootActor = buildCromwellRootActor(config)
val sources = WorkflowSourceFilesWithoutImports(
workflowSource = sampleWdl.workflowSource(runtime),
workflowType = Option("WDL"),
workflowTypeVersion = None,
inputsJson = sampleWdl.workflowJson,
workflowOptionsJson = workflowOptions,
labelsJson = customLabels,
warnings = Vector.empty)
val workflowId = rootActor.underlyingActor.submitWorkflow(sources)
eventually { verifyWorkflowState(rootActor.underlyingActor.serviceRegistryActor, workflowId, terminalState) } (config = patienceConfig, pos = implicitly[org.scalactic.source.Position])
val outcome = getWorkflowOutputsFromMetadata(workflowId, rootActor.underlyingActor.serviceRegistryActor)
system.stop(rootActor)
// And return the outcome:
outcome
}
def runWdlAndAssertOutputs(sampleWdl: SampleWdl,
eventFilter: EventFilter,
expectedOutputs: Map[FullyQualifiedName, WomValue],
runtime: String = "",
workflowOptions: String = "{}",
allowOtherOutputs: Boolean = true,
terminalState: WorkflowState = WorkflowSucceeded,
config: Config = DefaultConfig,
patienceConfig: PatienceConfig = defaultPatience)
(implicit ec: ExecutionContext): WorkflowId = {
val rootActor = buildCromwellRootActor(config)
val sources = sampleWdl.asWorkflowSources(runtime, workflowOptions)
val workflowId = rootActor.underlyingActor.submitWorkflow(sources)
eventually { verifyWorkflowState(rootActor.underlyingActor.serviceRegistryActor, workflowId, terminalState) } (config = patienceConfig, pos = implicitly[org.scalactic.source.Position])
val outputs = getWorkflowOutputsFromMetadata(workflowId, rootActor.underlyingActor.serviceRegistryActor)
val actualOutputNames = outputs.keys mkString ", "
val expectedOutputNames = expectedOutputs.keys mkString " "
expectedOutputs foreach { case (outputFqn, expectedValue) =>
val actualValue = outputs.getOrElse(outputFqn, throw OutputNotFoundException(outputFqn, actualOutputNames))
if (expectedValue != AnyValueIsFine) actualValue shouldEqual replaceVariables(expectedValue, workflowId)
}
if (!allowOtherOutputs) {
outputs foreach { case (actualFqn, actualValue) =>
val expectedValue = expectedOutputs.getOrElse(actualFqn, throw new RuntimeException(s"Actual output $actualFqn was not wanted in '$expectedOutputNames'"))
if (expectedValue != AnyValueIsFine) actualValue shouldEqual expectedValue
}
}
system.stop(rootActor)
workflowId
}
/**
* Verifies that a state is correct. // TODO: There must be a better way...?
*/
private def verifyWorkflowState(serviceRegistryActor: ActorRef, workflowId: WorkflowId, expectedState: WorkflowState)(implicit ec: ExecutionContext): Unit = {
def getWorkflowState(workflowId: WorkflowId, serviceRegistryActor: ActorRef)(implicit ec: ExecutionContext): WorkflowState = {
val statusResponse = serviceRegistryActor.ask(GetStatus(workflowId))(TimeoutDuration).collect {
case StatusLookupResponse(_, state) => state
case f => throw new RuntimeException(s"Unexpected status response for $workflowId: $f")
}
Await.result(statusResponse, TimeoutDuration)
}
getWorkflowState(workflowId, serviceRegistryActor) should equal (expectedState)
()
}
private def getWorkflowOutputsFromMetadata(id: WorkflowId, serviceRegistryActor: ActorRef): Map[FullyQualifiedName, WomValue] = {
val mba = system.actorOf(MetadataBuilderActor.props(serviceRegistryActor))
val response = mba.ask(WorkflowOutputs(id)).mapTo[MetadataBuilderActorResponse] collect {
case BuiltMetadataResponse(r) => r
case FailedMetadataResponse(e) => throw e
}
val jsObject = Await.result(response, TimeoutDuration)
system.stop(mba)
jsObject.getFields(WorkflowMetadataKeys.Outputs).toList match {
case head::_ => head.asInstanceOf[JsObject].fields.map( x => (x._1, jsValueToWdlValue(x._2)))
case _ => Map.empty
}
}
private def jsValueToWdlValue(jsValue: JsValue): WomValue = {
jsValue match {
case str: JsString => WomString(str.value)
case JsNumber(number) if number.scale == 0 => WomInteger(number.intValue)
case JsNumber(number) => WomFloat(number.doubleValue)
case JsBoolean(bool) => WomBoolean(bool)
case array: JsArray =>
val valuesArray = array.elements.map(jsValueToWdlValue)
if (valuesArray.isEmpty) WomArray(WomArrayType(WomStringType), Seq.empty)
else WomArray(WomArrayType(valuesArray.head.womType), valuesArray)
case map: JsObject =>
// TODO: currently assuming all keys are String. But that's not WDL-complete...
val valuesMap: Map[WomValue, WomValue] = map.fields.map { case (fieldName, fieldValue) => (WomString(fieldName), jsValueToWdlValue(fieldValue)) }
if (valuesMap.isEmpty) WomMap(WomMapType(WomStringType, WomStringType), Map.empty)
else WomMap(WomMapType(WomStringType, valuesMap.head._2.womType), valuesMap)
}
}
}
class AlwaysHappyJobStoreActor extends Actor {
override def receive: Receive = {
case x: JobStoreWriterCommand => sender ! JobStoreWriteSuccess(x)
}
}
object AlwaysHappySubWorkflowStoreActor {
def props: Props = Props(new EmptySubWorkflowStoreActor)
}
object AlwaysHappyJobStoreActor {
def props: Props = Props(new AlwaysHappyJobStoreActor)
}
class EmptyCallCacheReadActor extends Actor {
override def receive: Receive = {
case _: CacheLookupRequest => sender ! CacheLookupNoHit
}
}
class EmptyCallCacheWriteActor extends Actor {
override def receive: Receive = {
case SaveCallCacheHashes => sender ! CallCacheWriteSuccess
}
}
object EmptyCallCacheReadActor {
def props: Props = Props(new EmptyCallCacheReadActor)
}
object EmptyCallCacheWriteActor {
def props: Props = Props(new EmptyCallCacheWriteActor)
}
class EmptyDockerHashActor extends Actor {
override def receive: Receive = {
case request: DockerHashRequest => sender ! DockerHashSuccessResponse(DockerHashResult("alg", "hash"), request)
}
}
object EmptyDockerHashActor {
def props: Props = Props(new EmptyDockerHashActor)
}
| ohsu-comp-bio/cromwell | engine/src/test/scala/cromwell/CromwellTestKitSpec.scala | Scala | bsd-3-clause | 20,873 |
package marge
/**
*
* User: mikio
* Date: 4/11/11
* Time: 2:58 PM
*/
class SeqDoubleFunctions(seq: Seq[Double]) {
}
object SeqDoubleFunctions {
def mean(s: Seq[Double]): Double = s.sum / s.length
def argmax(s: Seq[Double]): Int = s.zipWithIndex.maxBy(_._1)._2
}
| mikiobraun/marge | src/main/scala/marge/SeqDoubleFunctions.scala | Scala | mit | 275 |
package uk.org.nbn.nbnv.importer.validation
import uk.org.nbn.nbnv.importer.testing.BaseFunSuite
import uk.org.nbn.nbnv.importer.records.NbnRecord
import org.mockito.Mockito._
import uk.org.nbn.nbnv.importer.fidelity.ResultLevel
class Nbnv67ValidatorSuite extends BaseFunSuite{
test("Nvnv67 should validate when sensitiveOccurrenceRaw is null") {
val record = mock[NbnRecord]
when(record.sensitiveOccurrenceRaw).thenReturn(None)
val v = new Nbnv67Validator
val r = v.validate(record)
r.level should be (ResultLevel.DEBUG)
}
test("Nvnv67 should validate when sensitiveOccurrenceRaw is true") {
val record = mock[NbnRecord]
when(record.sensitiveOccurrenceRaw).thenReturn(Some("true"))
val v = new Nbnv67Validator
val r = v.validate(record)
r.level should be (ResultLevel.DEBUG)
}
test("Nvnv67 should validate when sensitiveOccurrenceRaw is false") {
val record = mock[NbnRecord]
when(record.sensitiveOccurrenceRaw).thenReturn(Some("false"))
val v = new Nbnv67Validator
val r = v.validate(record)
r.level should be (ResultLevel.DEBUG)
}
test("Nvnv67 should not validate when sensitiveOccurrenceRaw is not null true or false") {
val record = mock[NbnRecord]
when(record.sensitiveOccurrenceRaw).thenReturn(Some("fgadg"))
val v = new Nbnv67Validator
val r = v.validate(record)
r.level should be (ResultLevel.ERROR)
}
}
| JNCC-dev-team/nbn-importer | importer/src/test/scala/uk/org/nbn/nbnv/importer/validation/Nbnv67ValidatorSuite.scala | Scala | apache-2.0 | 1,470 |
type Vec[+A] = collection.immutable.Vector[A]
val Vec = collection.immutable.Vector
class Variable[A](val data: Vector[A], val shape: Vector[Int]) {
require(data.size == shape.product)
def read(sections: Vector[Range]): Vector[A] = {
require(sections.size == shape.size)
require(sections.zipWithIndex.forall { case (r, ri) => r.forall(i => i >= 0 && i < shape(ri)) })
val sz = if (sections.isEmpty) 0 else (1 /: sections)(_ * _.size)
val zip = (shape zip sections).reverse
Vector.tabulate(sz) { i =>
val (j, _, _) = ((0, 1, 1) /: zip) { case ((res, m, n), (dim, r)) =>
val add = r((i / n) % r.size) * m
(res + add, m * dim, n * r.size)
}
data(j)
}
}
}
val v = new Variable[Int](Vector(1 to 512: _*), Vector(8, 8, 8))
assert(v.read(Vector(0 until 0, 0 until 0, 0 until 0)) == Vector())
assert(v.read(Vector(0 to 0, 0 to 0, 0 to 2)) == Vector(1, 2, 3))
assert(v.read(Vector(1 to 1, 1 to 1, 0 to 2)) == Vector(73, 74, 75))
val selSome = Vector(
2 until 4, // size 2
3 until 6, // size 3
4 until 8 // size 4
)
assert(v.read(selSome) == Vector(
157, 158, 159, 160,
165, 166, 167, 168,
173, 174, 175, 176,
221, 222, 223, 224,
229, 230, 231, 232,
237, 238, 239, 240
))
trait ChunkReader[A] {
def read(chunkSize: Int): Vector[A]
}
def test(c: ChunkReader[Int]): Unit =
assert((1 to 4).map(c.read(5)) ++ c.read(4) == Vector(
157, 158, 159, 160,
165, 166, 167, 168,
173, 174, 175, 176,
221, 222, 223, 224,
229, 230, 231, 232,
237, 238, 239, 240
))
// test(???)
def calcInSection(pos: Int, sections: Vector[Range]): Vector[Int] = {
val sizes = sections.map(_.size)
val modsDivs = sizes zip sizes.scanRight(1)(_ * _).tail
modsDivs.map { case (mod, div) =>
(pos / div) % mod
}
}
val selAll = v.shape.map(0 until _)
calcInSection(1, selAll)
(0 until 24).map(i => calcInSection(i, selAll)).foreach(println)
//////////////////////////////////////////////
val selSome = Vector(
2 until 4, // size 2
3 until 6, // size 3
4 until 8 // size 4
)
calcInSection( 0, selSome) // [0 0 0]
calcInSection( 4, selSome) // [0 1 0]
calcInSection( 5, selSome) // [0 1 1]
calcInSection( 9, selSome) // [0 2 1]
// this is the most interesting case as the highest dimension index changes
// ; can we do this with two reads?
calcInSection(10, selSome) // [0 2 2]
calcInSection(14, selSome) // [1 0 2]
calcInSection(15, selSome) // [1 0 3]
calcInSection(19, selSome) // [1 1 3]
// this is an interesting case because only the lowest dim index changes
// ; this should be solved with a single read
calcInSection(20, selSome) // [1 2 0]
calcInSection(23, selSome) // [1 2 3]
/*---
Algorithm:
- s0 := calculate the start indices for current `pos`, as done above through `calcInSection`
- i0 := in s0, find the right-most dim index where the index (elem in s0) is > 0; if there is none, i0 := 0
- numProc := min((sizes(i0) - s0(i0)) * mods(i0), chunkRemain)
- ...
let's see this attempt in the above example (reading 4 times 5 elements, then 4):
*/
(pos = 0, chunk = 5) --> s0 := [0 0 0]; i0 := 0; numProc = min(2 - 0) * 12 = 24, 5) = 5
// that doesn't help; we should also look at s1 (stop indices)
val expected = v.read(selSome)
assert(expected.size == expected.distinct.size)
// e.g. (2, 3, 4) --> 4 + 3 * 8 + 2 * 64 + 1 = 157 OK
// e.g. (3, 5, 7) --> 7 + 5 * 8 + 3 * 64 + 1 = 240 OK
// let's look at a hypothetical four dimensional example to understand
// whether might need up to N instead of up to three reads:
s0 := [1 1 1 1]
s1 := [2 2 2 2]
// "normalized" "floor-s1"
// last read would be ("s1f to s1")
s1f := [2 0 0 0]
// "empirically":
(10 to 14).map(i => calcInSection(i, selSome)).foreach(println)
Vector(0, 2, 2)
Vector(0, 2, 3)
Vector(1, 0, 0)
Vector(1, 0, 1)
Vector(1, 0, 2)
// we can chunk this up into contiguous blocks as long as no element "decreases"
// with respect to the initial element; hence:
// first read:
Vector(0, 2, 2)
Vector(0, 2, 3)
// second read:
Vector(1, 0, 0)
Vector(1, 0, 1)
Vector(1, 0, 2)
//
(20 to 23).map(i => calcInSection(i, selSome)).foreach(println)
// first and only read
Vector(1, 2, 0)
Vector(1, 2, 1)
Vector(1, 2, 2)
Vector(1, 2, 3)
// let's find a case that requires three reads
(5 to 12).map(i => calcInSection(i, selSome)).foreach(println)
// first read
Vector(0, 1, 1)
Vector(0, 1, 2)
Vector(0, 1, 3)
// second read
Vector(0, 2, 0)
Vector(0, 2, 1)
Vector(0, 2, 2)
Vector(0, 2, 3)
// third read
Vector(1, 0, 0)
// it would thus seem that the worst case is N reads where N = rank of variable
/////////////////////////////////////////////////////
/////////////////////////////////////////////////////
def sampleRange(in: Range, by: Range): Range = {
val drop = by.start
val stepM = by.step
require(drop >= 0 && stepM > 0)
val in1 = in.drop(drop)
val in2 = if (stepM == 1)
in1
else if (in1.isInclusive) // copy-method is protected
new Range.Inclusive(start = in1.start, end = in1.end, step = in1.step * stepM)
else
new Range(start = in1.start, end = in1.end, step = in1.step * stepM)
in2.take(by.size)
}
class Impl[A](v: Variable[A], in: Vec[Range])
extends ChunkReader[A] {
require(v.shape.size == sections.size)
private var pos: Int = 0 // advanced by readNext
val size: Int = in.map(_.size).product
def reset(): Unit = pos = 0
def read(chunkSize: Int): Vec[A] = {
require (chunkSize >= 0)
if (chunkSize == 0) return Vec.empty
val stop = pos + chunkSize
var stats = 0
def flush(s0: Vec[Int], s1: Vec[Int], res: Vec[A]): Vec[A] = {
val by = (s0, s1).zipped.map(_ to _)
val sampled = (in, by).zipped.map(sampleRange(_, _))
val r = v.read(sampled)
stats += 1
res ++ r
}
@annotation.tailrec
def loop(s0: Vec[Int], s1: Vec[Int], p: Int, res: Vec[A]): Vec[A] =
if (p == stop) flush(s0, s1, res)
else {
val s2 = calcInSection(p, in)
val ok = (s0, s2).zipped.forall(_ <= _)
println(s"s2 = $s2; ok = $ok")
if (ok) loop(s0, s2, p + 1, res)
else loop(s2, s2, p + 1, flush(s0, s1, res))
}
val s00 = calcInSection(pos, in)
println(s"s00 = $s00")
val res = loop(s00, s00, pos + 1, Vec.empty)
pos = stop
println(s"# of reads: $stats")
res
}
}
val i = new Impl(v, selSome)
i.read(5) // wrong
/////////////////////////////////////////////////
// the above doesn't take the ending into account
// ; does it?
/////////////////////////////////////////////////
// "empirically":
(0 to 4).map(i => calcInSection(i, selSome)).foreach(println)
// first
Vector(0, 0, 0)
Vector(0, 0, 1)
Vector(0, 0, 2)
Vector(0, 0, 3)
// second
Vector(0, 1, 0)
| iem-projects/sysson | sketches/FlatReading.scala | Scala | gpl-3.0 | 6,809 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.metrics.config
import java.net.InetSocketAddress
import java.util.Locale
import java.util.concurrent.TimeUnit
import com.codahale.metrics.Slf4jReporter.LoggingLevel
import com.codahale.metrics.ganglia.GangliaReporter
import com.codahale.metrics.graphite.{Graphite, GraphiteReporter}
import com.codahale.metrics.{MetricRegistry, _}
import com.typesafe.config.{ConfigFactory, Config}
import com.typesafe.scalalogging.LazyLogging
import info.ganglia.gmetric4j.gmetric.GMetric
import info.ganglia.gmetric4j.gmetric.GMetric.UDPAddressingMode
import org.locationtech.geomesa.metrics.reporters.{DelimitedFileReporter, AccumuloReporter}
import org.slf4j.LoggerFactory
@deprecated("Will be removed without replacement")
object MetricsConfig extends LazyLogging {
val ConfigPath = "geomesa.metrics.reporters"
def reporters(config: Config,
registry: MetricRegistry,
path: Option[String] = Some(ConfigPath),
start: Boolean = true): Seq[ScheduledReporter] = {
val reporters = path match {
case Some(p) => if (config.hasPath(p)) config.getConfig(p) else ConfigFactory.empty()
case None => config
}
import scala.collection.JavaConversions._
reporters.root.keys.toSeq.flatMap { path =>
try {
Some(createReporter(reporters.getConfig(path), registry, start))
} catch {
case e: Exception =>
logger.warn("Invalid reporter config", e)
None
}
}
}
private def createReporter(config: Config, registry: MetricRegistry, start: Boolean): ScheduledReporter = {
val rate = timeUnit(config.getString(if (config.hasPath("rate-units")) "rate-units" else "units"))
val duration = timeUnit(config.getString(if (config.hasPath("duration-units")) "duration-units" else "units"))
val interval = if (config.hasPath("interval")) config.getInt("interval") else -1
val typ = config.getString("type").toLowerCase(Locale.US)
val reporter = if (typ == "console") {
ConsoleReporter.forRegistry(registry)
.convertRatesTo(rate)
.convertDurationsTo(duration)
.build()
} else if (typ == "slf4j") {
val logger = LoggerFactory.getLogger(config.getString("logger"))
val level = if (config.hasPath("level")) {
LoggingLevel.valueOf(config.getString("level").toUpperCase)
} else {
LoggingLevel.DEBUG
}
Slf4jReporter.forRegistry(registry)
.outputTo(logger)
.convertRatesTo(rate)
.convertDurationsTo(duration)
.withLoggingLevel(level)
.build()
} else if (typ == "delimited-text") {
val path = config.getString("output")
val aggregate = if (config.hasPath("aggregate")) config.getBoolean("aggregate") else true
val tabs = if (config.hasPath("tabs")) config.getBoolean("tabs") else true
val builder = DelimitedFileReporter.forRegistry(registry)
.convertRatesTo(rate)
.convertDurationsTo(duration)
.aggregate(aggregate)
if (tabs) {
builder.withTabs()
} else {
builder.withCommas()
}
builder.build(path)
} else if (typ == "graphite") {
val url +: Nil :+ port = config.getString("url").split(":").toList
val graphite = new Graphite(new InetSocketAddress(url, port.toInt))
val prefix = if (config.hasPath("prefix")) config.getString("prefix") else null
GraphiteReporter.forRegistry(registry)
.prefixedWith(prefix)
.convertRatesTo(rate)
.convertDurationsTo(duration)
.build(graphite)
} else if (typ == "ganglia") {
val group = config.getString("group")
val port = config.getInt("port")
val mode = if (config.hasPath("addressing-mode") &&
config.getString("addressing-mode").equalsIgnoreCase("unicast")) {
UDPAddressingMode.UNICAST
} else {
UDPAddressingMode.MULTICAST
}
val ttl = config.getInt("ttl")
val is311 = if (config.hasPath("ganglia311")) config.getBoolean("ganglia311") else true
val ganglia = new GMetric(group, port.toInt, mode, ttl, is311)
GangliaReporter.forRegistry(registry)
.convertRatesTo(rate)
.convertDurationsTo(duration)
.build(ganglia)
} else if (typ == "accumulo") {
val instance = config.getString("instanceId")
val zoos = config.getString("zookeepers")
val user = config.getString("user")
val password = config.getString("password")
val table = config.getString("tableName")
val builder = AccumuloReporter.forRegistry(registry)
.convertRatesTo(rate)
.convertDurationsTo(duration)
.writeToTable(table)
if (config.hasPath("visibilities")) {
builder.withVisibilities(config.getString("visibilities"))
}
if (config.hasPath("mock") && config.getBoolean("mock")) {
builder.mock(true)
}
builder.build(instance, zoos, user, password)
} else {
throw new RuntimeException(s"No reporter type '$typ' defined")
}
if (start && interval > 0) {
reporter.start(interval, TimeUnit.SECONDS)
}
reporter
}
// convert string to timeunit enum using reflection
private def timeUnit(unit: String): TimeUnit =
classOf[TimeUnit].getField(unit.toUpperCase(Locale.US)).get(null).asInstanceOf[TimeUnit]
}
| elahrvivaz/geomesa | geomesa-metrics/src/main/scala/org/locationtech/geomesa/metrics/config/MetricsConfig.scala | Scala | apache-2.0 | 5,888 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster.k8s
import java.util.concurrent.{ScheduledExecutorService, TimeUnit}
import scala.concurrent.Future
import io.fabric8.kubernetes.api.model.Pod
import io.fabric8.kubernetes.client.KubernetesClient
import org.apache.spark.SparkContext
import org.apache.spark.deploy.k8s.Config._
import org.apache.spark.deploy.k8s.Constants._
import org.apache.spark.deploy.k8s.KubernetesUtils
import org.apache.spark.deploy.k8s.submit.KubernetesClientUtils
import org.apache.spark.deploy.security.HadoopDelegationTokenManager
import org.apache.spark.internal.config.SCHEDULER_MIN_REGISTERED_RESOURCES_RATIO
import org.apache.spark.resource.ResourceProfile
import org.apache.spark.rpc.{RpcAddress, RpcCallContext}
import org.apache.spark.scheduler.{ExecutorKilled, ExecutorLossReason, TaskSchedulerImpl}
import org.apache.spark.scheduler.cluster.{CoarseGrainedSchedulerBackend, SchedulerBackendUtils}
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages.RegisterExecutor
import org.apache.spark.util.{ThreadUtils, Utils}
private[spark] class KubernetesClusterSchedulerBackend(
scheduler: TaskSchedulerImpl,
sc: SparkContext,
kubernetesClient: KubernetesClient,
executorService: ScheduledExecutorService,
snapshotsStore: ExecutorPodsSnapshotsStore,
podAllocator: ExecutorPodsAllocator,
lifecycleEventHandler: ExecutorPodsLifecycleManager,
watchEvents: ExecutorPodsWatchSnapshotSource,
pollEvents: ExecutorPodsPollingSnapshotSource)
extends CoarseGrainedSchedulerBackend(scheduler, sc.env.rpcEnv) {
protected override val minRegisteredRatio =
if (conf.get(SCHEDULER_MIN_REGISTERED_RESOURCES_RATIO).isEmpty) {
0.8
} else {
super.minRegisteredRatio
}
private val initialExecutors = SchedulerBackendUtils.getInitialTargetExecutorNumber(conf)
private val shouldDeleteDriverService = conf.get(KUBERNETES_DRIVER_SERVICE_DELETE_ON_TERMINATION)
private val shouldDeleteExecutors = conf.get(KUBERNETES_DELETE_EXECUTORS)
private val defaultProfile = scheduler.sc.resourceProfileManager.defaultResourceProfile
// Allow removeExecutor to be accessible by ExecutorPodsLifecycleEventHandler
private[k8s] def doRemoveExecutor(executorId: String, reason: ExecutorLossReason): Unit = {
removeExecutor(executorId, reason)
}
private def setUpExecutorConfigMap(driverPod: Option[Pod]): Unit = {
val configMapName = KubernetesClientUtils.configMapNameExecutor
val confFilesMap = KubernetesClientUtils
.buildSparkConfDirFilesMap(configMapName, conf, Map.empty)
val labels =
Map(SPARK_APP_ID_LABEL -> applicationId(), SPARK_ROLE_LABEL -> SPARK_POD_EXECUTOR_ROLE)
val configMap = KubernetesClientUtils.buildConfigMap(configMapName, confFilesMap, labels)
KubernetesUtils.addOwnerReference(driverPod.orNull, Seq(configMap))
kubernetesClient.configMaps().create(configMap)
}
/**
* Get an application ID associated with the job.
* This returns the string value of spark.app.id if set, otherwise
* the locally-generated ID from the superclass.
*
* @return The application ID
*/
override def applicationId(): String = {
conf.getOption("spark.app.id").map(_.toString).getOrElse(super.applicationId)
}
override def start(): Unit = {
super.start()
val initExecs = Map(defaultProfile -> initialExecutors)
podAllocator.setTotalExpectedExecutors(initExecs)
lifecycleEventHandler.start(this)
podAllocator.start(applicationId(), this)
watchEvents.start(applicationId())
pollEvents.start(applicationId())
if (!conf.get(KUBERNETES_EXECUTOR_DISABLE_CONFIGMAP)) {
setUpExecutorConfigMap(podAllocator.driverPod)
}
}
override def stop(): Unit = {
// When `CoarseGrainedSchedulerBackend.stop` throws `SparkException`,
// K8s cluster scheduler should log and proceed in order to delete the K8s cluster resources.
Utils.tryLogNonFatalError {
super.stop()
}
Utils.tryLogNonFatalError {
snapshotsStore.stop()
}
Utils.tryLogNonFatalError {
watchEvents.stop()
}
Utils.tryLogNonFatalError {
pollEvents.stop()
}
if (shouldDeleteDriverService) {
Utils.tryLogNonFatalError {
kubernetesClient
.services()
.withLabel(SPARK_APP_ID_LABEL, applicationId())
.delete()
}
}
Utils.tryLogNonFatalError {
kubernetesClient
.persistentVolumeClaims()
.withLabel(SPARK_APP_ID_LABEL, applicationId())
.delete()
}
if (shouldDeleteExecutors) {
Utils.tryLogNonFatalError {
kubernetesClient
.pods()
.withLabel(SPARK_APP_ID_LABEL, applicationId())
.withLabel(SPARK_ROLE_LABEL, SPARK_POD_EXECUTOR_ROLE)
.delete()
}
if (!conf.get(KUBERNETES_EXECUTOR_DISABLE_CONFIGMAP)) {
Utils.tryLogNonFatalError {
kubernetesClient
.configMaps()
.withLabel(SPARK_APP_ID_LABEL, applicationId())
.withLabel(SPARK_ROLE_LABEL, SPARK_POD_EXECUTOR_ROLE)
.delete()
}
}
}
Utils.tryLogNonFatalError {
ThreadUtils.shutdown(executorService)
}
Utils.tryLogNonFatalError {
kubernetesClient.close()
}
}
override def doRequestTotalExecutors(
resourceProfileToTotalExecs: Map[ResourceProfile, Int]): Future[Boolean] = {
podAllocator.setTotalExpectedExecutors(resourceProfileToTotalExecs)
Future.successful(true)
}
override def sufficientResourcesRegistered(): Boolean = {
totalRegisteredExecutors.get() >= initialExecutors * minRegisteredRatio
}
override def getExecutorIds(): Seq[String] = synchronized {
super.getExecutorIds()
}
override def doKillExecutors(executorIds: Seq[String]): Future[Boolean] = {
executorIds.foreach { id =>
removeExecutor(id, ExecutorKilled)
}
// Give some time for the executors to shut themselves down, then forcefully kill any
// remaining ones. This intentionally ignores the configuration about whether pods
// should be deleted; only executors that shut down gracefully (and are then collected
// by the ExecutorPodsLifecycleManager) will respect that configuration.
val killTask = new Runnable() {
override def run(): Unit = Utils.tryLogNonFatalError {
val running = kubernetesClient
.pods()
.withField("status.phase", "Running")
.withLabel(SPARK_APP_ID_LABEL, applicationId())
.withLabel(SPARK_ROLE_LABEL, SPARK_POD_EXECUTOR_ROLE)
.withLabelIn(SPARK_EXECUTOR_ID_LABEL, executorIds: _*)
if (!running.list().getItems().isEmpty()) {
logInfo(s"Forcefully deleting ${running.list().getItems().size()} pods " +
s"(out of ${executorIds.size}) that are still running after graceful shutdown period.")
running.delete()
}
}
}
executorService.schedule(killTask, conf.get(KUBERNETES_DYN_ALLOC_KILL_GRACE_PERIOD),
TimeUnit.MILLISECONDS)
// Return an immediate success, since we can't confirm or deny that executors have been
// actually shut down without waiting too long and blocking the allocation thread, which
// waits on this future to complete, blocking further allocations / deallocations.
//
// This relies a lot on the guarantees of Spark's RPC system, that a message will be
// delivered to the destination unless there's an issue with the connection, in which
// case the executor will shut itself down (and the driver, separately, will just declare
// it as "lost"). Coupled with the allocation manager keeping track of which executors are
// pending release, returning "true" here means that eventually all the requested executors
// will be removed.
//
// The cleanup timer above is just an optimization to make sure that stuck executors don't
// stick around in the k8s server. Normally it should never delete any pods at all.
Future.successful(true)
}
override def createDriverEndpoint(): DriverEndpoint = {
new KubernetesDriverEndpoint()
}
override protected def createTokenManager(): Option[HadoopDelegationTokenManager] = {
Some(new HadoopDelegationTokenManager(conf, sc.hadoopConfiguration, driverEndpoint))
}
override protected def isExecutorExcluded(executorId: String, hostname: String): Boolean = {
podAllocator.isDeleted(executorId)
}
private class KubernetesDriverEndpoint extends DriverEndpoint {
private def ignoreRegisterExecutorAtStoppedContext: PartialFunction[Any, Unit] = {
case _: RegisterExecutor if sc.isStopped => // No-op
}
override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] =
ignoreRegisterExecutorAtStoppedContext.orElse(super.receiveAndReply(context))
override def onDisconnected(rpcAddress: RpcAddress): Unit = {
// Don't do anything besides disabling the executor - allow the Kubernetes API events to
// drive the rest of the lifecycle decisions
// TODO what if we disconnect from a networking issue? Probably want to mark the executor
// to be deleted eventually.
addressToExecutorId.get(rpcAddress).foreach(disableExecutor)
}
}
}
| maropu/spark | resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterSchedulerBackend.scala | Scala | apache-2.0 | 10,092 |
/*
* Licensed to the Programming Language and Software Methodology Lab (PLSM)
* under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership.
* The PLSM licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.nccu.plsm.geo.dsl
import java.util
import edu.nccu.plsm.geo.datum.TMDatum
import edu.nccu.plsm.geo.projection.{ CoverageResult, ProjectionResult, Result, TransverseMercator }
import org.slf4j.LoggerFactory
import scala.collection.JavaConverters._
import scala.language.implicitConversions
trait DSL extends DegreeConversion with DatumDefining {
protected[this] val logger = LoggerFactory.getLogger(getClass)
val resultMap = new util.LinkedHashMap[Symbol, Result].asScala
implicit def symbol2Assignable(s: Symbol): Assignable = new Assignable(s)
trait Operation[T] {
def WITH(datum: TMDatum): Result
}
final class PROJECT(private val lng: CoordinateUnit, private val lat: CoordinateUnit) extends Operation[(Double, Double)] {
override def WITH(datum: TMDatum): ProjectionResult = {
TransverseMercator.project(lng.toRadians, lat.toRadians, datum)
}
}
final class CONVERGENCE(private val lng: CoordinateUnit, private val lat: CoordinateUnit) extends Operation[Double] {
override def WITH(datum: TMDatum): CoverageResult = {
TransverseMercator.meridianConvergence(lng.toRadians, lat.toRadians, datum)
}
}
final class Assignable(private val symbol: Symbol) {
def :=[T](result: Result): Unit = {
resultMap.put(symbol, result)
.foreach(result => logger.warn(s"!!! Overwriting result ${symbol.name} = ${result.result}"))
}
}
object PROJECT {
def apply(lng: CoordinateUnit, lat: CoordinateUnit): PROJECT = new PROJECT(lng, lat)
def apply(p: (CoordinateUnit, CoordinateUnit)): PROJECT = new PROJECT(p._1, p._2)
}
object CONVERGENCE {
def apply(lng: CoordinateUnit, lat: CoordinateUnit): CONVERGENCE = new CONVERGENCE(lng, lat)
def apply(p: (CoordinateUnit, CoordinateUnit)): CONVERGENCE = new CONVERGENCE(p._1, p._2)
}
/*
object RESULT {
def PRINT(): Unit = {
logger.info(s"""Result:
|$RETURN""".stripMargin)
}
def RETURN(): String = resultMap.map { case (symbol, result) => f"${symbol.name}%-10s = $result" }.mkString("\\n")
def CLEAR(): Unit = resultMap.clear()
}
*/
} | AtkinsChang/geoconvert | core/src/main/scala/edu.nccu.plsm.geo/dsl/DSL.scala | Scala | apache-2.0 | 2,966 |
package so.blacklight.swarm.smtp
import akka.actor.{Actor, ActorRef, Props}
import akka.event.Logging
import so.blacklight.swarm.mail.{Address, Email, Envelope}
/**
* Impelments the SMTP protocol by supervising the lower level client session and deciding
* what replies to send to the clients' requests.
*
* @param clientSession client session
*/
class SMTPServerProtocol(clientSession: ActorRef, connector: ActorRef) extends Actor {
import context._
val logger = Logging(context.system, this)
var tempEnvelope = new PartialEnvelope
/**
* This is the initial stage of mail processing, switches to "expect EHLO" mode immediately
* after issuing the server greeting
*/
override def receive: Receive = {
case greeting @ SMTPServerServiceReady(_) =>
clientSession ! greeting
become(expectEhlo)
case ClientDisconnected =>
logger.warning("Client disconnected unexpectedly")
sender() ! ClientDisconnected
}
def expectEhlo: PartialFunction[Any, Unit] = {
case SMTPClientEhlo(hostId) =>
sender() ! processEhlo(hostId)
become(expectEmail)
case SMTPClientQuit =>
sender() ! SMTPServerQuit
case SMTPClientNoOperation =>
sender() ! SMTPServerOk
case ClientDisconnected =>
logger.warning("Client disconnected unexpectedly")
sender() ! ClientDisconnected
unbecome()
case _: SMTPClientCommand =>
sender() ! SMTPServerBadSequence
case unknownMessage =>
logger.warning(s"Received unknown event: $unknownMessage")
sender() ! SMTPServerSyntaxError
}
def expectEmail: PartialFunction[Any, Unit] = {
case SMTPClientMailFrom(mailFrom) =>
sender() ! processMailFrom(mailFrom)
case SMTPClientReceiptTo(recipient) =>
sender() ! processReceiptTo(recipient)
case SMTPClientDataBegin =>
sender() ! processDataRequest
case SMTPClientDataEnd(msg) =>
sender() ! processDataSent(msg)
case SMTPClientReset =>
sender() ! processReset
unbecome()
case SMTPClientQuit =>
sender() ! SMTPServerQuit
case ClientDisconnected =>
logger.warning("Client disconnected unexpectedly")
sender() ! ClientDisconnected
unbecome()
case unknownMessage =>
logger.warning(s"Received unknown event: $unknownMessage")
sender() ! SMTPServerSyntaxError
}
private def processEhlo(hostname: String): SMTPServerEvent = {
logger.info(s"Received client connection from: $hostname")
SMTPServerEhlo(Array("THIS", "THAT"))
}
private def processMailFrom(sender: String): SMTPServerEvent = {
Address(sender) match {
case Left(error) =>
logger.warning(s"Invalid sender address: $error")
SMTPServerInvalidParameter
case Right(address) =>
tempEnvelope.sender match {
case Some(_) =>
logger.error("A sender address has already been defined")
SMTPServerBadSequence
case None =>
tempEnvelope.setSender(address)
logger.info(s"Sender: $address")
SMTPServerOk
}
}
}
private def processReceiptTo(recipient: String): SMTPServerEvent = {
Address(recipient) match {
case Left(error) =>
logger.warning(s"Invalid recipient address: $error")
SMTPServerInvalidParameter
case Right(address) =>
tempEnvelope.sender match {
case Some(_) =>
tempEnvelope.addRecipient(address)
logger.info(s"Recipient: $address")
SMTPServerOk
case None =>
logger.error("Invalid command sequence")
SMTPServerBadSequence
}
}
}
private def processDataRequest: SMTPServerEvent = {
if (tempEnvelope.isComplete()) {
logger.info("Received DATA request")
SMTPServerDataReady
} else {
SMTPServerBadSequence
}
}
private def processDataSent(msg: Array[Char]): SMTPServerEvent = {
tempEnvelope.toEnvelope() match {
case Right(envelope) =>
Email(envelope, msg) match {
case Left(_) =>
// TODO find a better error, although this is not expected to happen too often
SMTPServerSyntaxError
case Right(email) =>
connector ! ReceivedMessage(email)
SMTPServerOk
}
case Left(error) =>
logger.error(error)
SMTPServerBadSequence
}
}
private def processReset: SMTPServerEvent = {
tempEnvelope.reset
SMTPServerOk
}
}
object SMTPServerProtocol {
def props(clientSession: ActorRef, connector: ActorRef): Props = Props(new SMTPServerProtocol(clientSession, connector))
}
class PartialEnvelope {
var sender: Option[Address] = None
var recipients: List[Address] = List()
def reset: PartialEnvelope = {
sender = None
recipients = List()
this
}
def setSender(sender: Address): PartialEnvelope = {
this.sender = Option(sender)
this
}
def addRecipient(recipient: Address): PartialEnvelope = {
recipients ++= List(recipient)
this
}
def toEnvelope(): Either[String, Envelope] = {
sender.map(address => Right(Envelope(address, recipients))).getOrElse(Left("Invalid envelope: sender missing"))
}
def hasSender(): Boolean = sender.isDefined
def hasRecipient(): Boolean = recipients.nonEmpty
def isComplete(): Boolean = hasSender() && hasRecipient()
}
| xea/swarm-msg | src/main/scala/so/blacklight/swarm/smtp/SMTPServerProtocol.scala | Scala | apache-2.0 | 5,035 |
package is.hail.types.virtual
import is.hail.annotations.{Annotation, ExtendedOrdering}
import is.hail.check.Gen
import is.hail.types.physical.PSet
import is.hail.utils._
import org.json4s.jackson.JsonMethods
import scala.reflect.{ClassTag, classTag}
final case class TSet(elementType: Type) extends TContainer {
def _toPretty = s"Set[$elementType]"
override def pyString(sb: StringBuilder): Unit = {
sb.append("set<")
elementType.pyString(sb)
sb.append('>')
}
override def canCompare(other: Type): Boolean = other match {
case TSet(otherType) => elementType.canCompare(otherType)
case _ => false
}
override def unify(concrete: Type): Boolean = concrete match {
case TSet(celementType) => elementType.unify(celementType)
case _ => false
}
override def subst() = TSet(elementType.subst())
def _typeCheck(a: Any): Boolean =
a.isInstanceOf[Set[_]] && a.asInstanceOf[Set[_]].forall(elementType.typeCheck)
override def _pretty(sb: StringBuilder, indent: Int, compact: Boolean = false) {
sb.append("Set[")
elementType.pretty(sb, indent, compact)
sb.append("]")
}
override lazy val ordering: ExtendedOrdering = mkOrdering()
override def mkOrdering(missingEqual: Boolean): ExtendedOrdering =
ExtendedOrdering.setOrdering(elementType.ordering, missingEqual)
override def _showStr(a: Annotation): String =
a.asInstanceOf[Set[Annotation]]
.map { case elt => elementType.showStr(elt) }
.mkString("{", ",", "}")
override def str(a: Annotation): String = JsonMethods.compact(toJSON(a))
override def genNonmissingValue: Gen[Annotation] = Gen.buildableOf[Set](elementType.genValue)
override def scalaClassTag: ClassTag[Set[AnyRef]] = classTag[Set[AnyRef]]
override def valueSubsetter(subtype: Type): Any => Any = {
assert(elementType == subtype.asInstanceOf[TSet].elementType)
identity
}
override def arrayElementsRepr: TArray = TArray(elementType)
}
| hail-is/hail | hail/src/main/scala/is/hail/types/virtual/TSet.scala | Scala | mit | 1,965 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import java.io.File
import java.util.{Locale, TimeZone}
import org.apache.commons.io.FileUtils
import org.scalatest.Assertions
import org.apache.spark.{SparkEnv, SparkException}
import org.apache.spark.rdd.BlockRDD
import org.apache.spark.sql.{AnalysisException, DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.plans.logical.Aggregate
import org.apache.spark.sql.catalyst.util.DateTimeConstants._
import org.apache.spark.sql.execution.{SparkPlan, UnaryExecNode}
import org.apache.spark.sql.execution.exchange.Exchange
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.execution.streaming.sources.MemorySink
import org.apache.spark.sql.execution.streaming.state.StreamingAggregationStateManager
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.streaming.OutputMode._
import org.apache.spark.sql.streaming.util.{MockSourceProvider, StreamManualClock}
import org.apache.spark.sql.types.StructType
import org.apache.spark.storage.{BlockId, StorageLevel, TestBlockId}
import org.apache.spark.util.Utils
object FailureSingleton {
var firstTime = true
}
class StreamingAggregationSuite extends StateStoreMetricsTest with Assertions {
import testImplicits._
def executeFuncWithStateVersionSQLConf(
stateVersion: Int,
confPairs: Seq[(String, String)],
func: => Any): Unit = {
withSQLConf(confPairs ++
Seq(SQLConf.STREAMING_AGGREGATION_STATE_FORMAT_VERSION.key -> stateVersion.toString): _*) {
func
}
}
def testWithAllStateVersions(name: String, confPairs: (String, String)*)
(func: => Any): Unit = {
for (version <- StreamingAggregationStateManager.supportedVersions) {
test(s"$name - state format version $version") {
executeFuncWithStateVersionSQLConf(version, confPairs, func)
}
}
}
def testQuietlyWithAllStateVersions(name: String, confPairs: (String, String)*)
(func: => Any): Unit = {
for (version <- StreamingAggregationStateManager.supportedVersions) {
testQuietly(s"$name - state format version $version") {
executeFuncWithStateVersionSQLConf(version, confPairs, func)
}
}
}
testWithAllStateVersions("simple count, update mode") {
val inputData = MemoryStream[Int]
val aggregated =
inputData.toDF()
.groupBy($"value")
.agg(count("*"))
.as[(Int, Long)]
testStream(aggregated, Update)(
AddData(inputData, 3),
CheckLastBatch((3, 1)),
AddData(inputData, 3, 2),
CheckLastBatch((3, 2), (2, 1)),
StopStream,
StartStream(),
AddData(inputData, 3, 2, 1),
CheckLastBatch((3, 3), (2, 2), (1, 1)),
// By default we run in new tuple mode.
AddData(inputData, 4, 4, 4, 4),
CheckLastBatch((4, 4))
)
}
testWithAllStateVersions("count distinct") {
val inputData = MemoryStream[(Int, Seq[Int])]
val aggregated =
inputData.toDF()
.select($"*", explode($"_2") as 'value)
.groupBy($"_1")
.agg(size(collect_set($"value")))
.as[(Int, Int)]
testStream(aggregated, Update)(
AddData(inputData, (1, Seq(1, 2))),
CheckLastBatch((1, 2))
)
}
testWithAllStateVersions("simple count, complete mode") {
val inputData = MemoryStream[Int]
val aggregated =
inputData.toDF()
.groupBy($"value")
.agg(count("*"))
.as[(Int, Long)]
testStream(aggregated, Complete)(
AddData(inputData, 3),
CheckLastBatch((3, 1)),
AddData(inputData, 2),
CheckLastBatch((3, 1), (2, 1)),
StopStream,
StartStream(),
AddData(inputData, 3, 2, 1),
CheckLastBatch((3, 2), (2, 2), (1, 1)),
AddData(inputData, 4, 4, 4, 4),
CheckLastBatch((4, 4), (3, 2), (2, 2), (1, 1))
)
}
testWithAllStateVersions("simple count, append mode") {
val inputData = MemoryStream[Int]
val aggregated =
inputData.toDF()
.groupBy($"value")
.agg(count("*"))
.as[(Int, Long)]
val e = intercept[AnalysisException] {
testStream(aggregated, Append)()
}
Seq("append", "not supported").foreach { m =>
assert(e.getMessage.toLowerCase(Locale.ROOT).contains(m.toLowerCase(Locale.ROOT)))
}
}
testWithAllStateVersions("sort after aggregate in complete mode") {
val inputData = MemoryStream[Int]
val aggregated =
inputData.toDF()
.groupBy($"value")
.agg(count("*"))
.toDF("value", "count")
.orderBy($"count".desc)
.as[(Int, Long)]
testStream(aggregated, Complete)(
AddData(inputData, 3),
CheckLastBatch(isSorted = true, (3, 1)),
AddData(inputData, 2, 3),
CheckLastBatch(isSorted = true, (3, 2), (2, 1)),
StopStream,
StartStream(),
AddData(inputData, 3, 2, 1),
CheckLastBatch(isSorted = true, (3, 3), (2, 2), (1, 1)),
AddData(inputData, 4, 4, 4, 4),
CheckLastBatch(isSorted = true, (4, 4), (3, 3), (2, 2), (1, 1))
)
}
testWithAllStateVersions("state metrics - append mode") {
val inputData = MemoryStream[Int]
val aggWithWatermark = inputData.toDF()
.withColumn("eventTime", timestamp_seconds($"value"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
implicit class RichStreamExecution(query: StreamExecution) {
// this could be either empty row batch or actual batch
def stateNodes: Seq[SparkPlan] = {
query.lastExecution.executedPlan.collect {
case p if p.isInstanceOf[StateStoreSaveExec] => p
}
}
// Pick the latest progress that actually ran a batch
def lastExecutedBatch: StreamingQueryProgress = {
query.recentProgress.filter(_.durationMs.containsKey("addBatch")).last
}
def stateOperatorProgresses: Seq[StateOperatorProgress] = {
lastExecutedBatch.stateOperators
}
}
val clock = new StreamManualClock()
testStream(aggWithWatermark)(
// batchId 0
AddData(inputData, 15),
StartStream(Trigger.ProcessingTime("interval 1 second"), clock),
CheckAnswer(), // watermark = 0
AssertOnQuery { _.stateNodes.size === 1 },
AssertOnQuery { _.stateNodes.head.metrics("numOutputRows").value === 0 },
AssertOnQuery { _.stateOperatorProgresses.head.numRowsUpdated === 1 },
AssertOnQuery { _.stateOperatorProgresses.head.numRowsTotal === 1 },
AssertOnQuery { _.lastExecutedBatch.sink.numOutputRows == 0 },
// batchId 1 without data
AdvanceManualClock(1000L), // watermark = 5
Execute { q => // wait for the no data batch to complete
eventually(timeout(streamingTimeout)) { assert(q.lastProgress.batchId === 1) }
},
CheckAnswer(),
AssertOnQuery { _.stateNodes.head.metrics("numOutputRows").value === 0 },
AssertOnQuery { _.stateOperatorProgresses.head.numRowsUpdated === 0 },
AssertOnQuery { _.stateOperatorProgresses.head.numRowsTotal === 1 },
AssertOnQuery { _.lastExecutedBatch.sink.numOutputRows == 0 },
// batchId 2 with data
AddData(inputData, 10, 12, 14),
AdvanceManualClock(1000L), // watermark = 5
CheckAnswer(),
AssertOnQuery { _.stateNodes.head.metrics("numOutputRows").value === 0 },
AssertOnQuery { _.stateOperatorProgresses.head.numRowsUpdated === 1 },
AssertOnQuery { _.stateOperatorProgresses.head.numRowsTotal === 2 },
AssertOnQuery { _.lastExecutedBatch.sink.numOutputRows == 0 },
// batchId 3 with data
AddData(inputData, 25),
AdvanceManualClock(1000L), // watermark = 5
CheckAnswer(),
AssertOnQuery { _.stateNodes.head.metrics("numOutputRows").value === 0 },
AssertOnQuery { _.stateOperatorProgresses.head.numRowsUpdated === 1 },
AssertOnQuery { _.stateOperatorProgresses.head.numRowsTotal === 3 },
AssertOnQuery { _.lastExecutedBatch.sink.numOutputRows == 0 },
// batchId 4 without data
AdvanceManualClock(1000L), // watermark = 15
Execute { q => // wait for the no data batch to complete
eventually(timeout(streamingTimeout)) { assert(q.lastProgress.batchId === 4) }
},
CheckAnswer((10, 3)),
AssertOnQuery { _.stateNodes.head.metrics("numOutputRows").value === 1 },
AssertOnQuery { _.stateOperatorProgresses.head.numRowsUpdated === 0 },
AssertOnQuery { _.stateOperatorProgresses.head.numRowsTotal === 2 },
AssertOnQuery { _.lastExecutedBatch.sink.numOutputRows == 1 }
)
}
testWithAllStateVersions("state metrics - update/complete mode") {
val inputData = MemoryStream[Int]
val aggregated =
inputData.toDS()
.flatMap(x => Seq(x, x + 1))
.toDF("value")
.groupBy($"value")
.agg(count("*"))
.as[(Int, Long)]
implicit class RichStreamExecution(query: StreamExecution) {
def stateNodes: Seq[SparkPlan] = {
query.lastExecution.executedPlan.collect {
case p if p.isInstanceOf[StateStoreSaveExec] => p
}
}
}
// Test with Update mode
testStream(aggregated, Update)(
AddData(inputData, 1),
CheckLastBatch((1, 1), (2, 1)),
AssertOnQuery { _.stateNodes.size === 1 },
AssertOnQuery { _.stateNodes.head.metrics("numOutputRows").value === 2 },
AssertOnQuery { _.stateNodes.head.metrics("numUpdatedStateRows").value === 2 },
AssertOnQuery { _.stateNodes.head.metrics("numTotalStateRows").value === 2 },
AddData(inputData, 2, 3),
CheckLastBatch((2, 2), (3, 2), (4, 1)),
AssertOnQuery { _.stateNodes.size === 1 },
AssertOnQuery { _.stateNodes.head.metrics("numOutputRows").value === 3 },
AssertOnQuery { _.stateNodes.head.metrics("numUpdatedStateRows").value === 3 },
AssertOnQuery { _.stateNodes.head.metrics("numTotalStateRows").value === 4 }
)
// Test with Complete mode
inputData.reset()
testStream(aggregated, Complete)(
AddData(inputData, 1),
CheckLastBatch((1, 1), (2, 1)),
AssertOnQuery { _.stateNodes.size === 1 },
AssertOnQuery { _.stateNodes.head.metrics("numOutputRows").value === 2 },
AssertOnQuery { _.stateNodes.head.metrics("numUpdatedStateRows").value === 2 },
AssertOnQuery { _.stateNodes.head.metrics("numTotalStateRows").value === 2 },
AddData(inputData, 2, 3),
CheckLastBatch((1, 1), (2, 2), (3, 2), (4, 1)),
AssertOnQuery { _.stateNodes.size === 1 },
AssertOnQuery { _.stateNodes.head.metrics("numOutputRows").value === 4 },
AssertOnQuery { _.stateNodes.head.metrics("numUpdatedStateRows").value === 3 },
AssertOnQuery { _.stateNodes.head.metrics("numTotalStateRows").value === 4 }
)
}
testWithAllStateVersions("multiple keys") {
val inputData = MemoryStream[Int]
val aggregated =
inputData.toDF()
.groupBy($"value", $"value" + 1)
.agg(count("*"))
.as[(Int, Int, Long)]
testStream(aggregated, Update)(
AddData(inputData, 1, 2),
CheckLastBatch((1, 2, 1), (2, 3, 1)),
AddData(inputData, 1, 2),
CheckLastBatch((1, 2, 2), (2, 3, 2))
)
}
testWithAllStateVersions("SPARK-29438: ensure UNION doesn't lead streaming aggregation to use" +
" shifted partition IDs") {
def constructUnionDf(desiredPartitionsForInput1: Int)
: (MemoryStream[Int], MemoryStream[Int], DataFrame) = {
val input1 = MemoryStream[Int](desiredPartitionsForInput1)
val input2 = MemoryStream[Int]
val df1 = input1.toDF()
.select($"value", $"value" + 1)
val df2 = input2.toDF()
.groupBy($"value")
.agg(count("*"))
// Unioned DF would have columns as (Int, Int)
(input1, input2, df1.union(df2))
}
withTempDir { checkpointDir =>
val (input1, input2, unionDf) = constructUnionDf(2)
testStream(unionDf, Update)(
StartStream(checkpointLocation = checkpointDir.getAbsolutePath),
MultiAddData(input1, 11, 12)(input2, 21, 22),
CheckNewAnswer(Row(11, 12), Row(12, 13), Row(21, 1), Row(22, 1)),
StopStream
)
// We're restoring the query with different number of partitions in left side of UNION,
// which may lead right side of union to have mismatched partition IDs (e.g. if it relies on
// TaskContext.partitionId()). This test will verify streaming aggregation doesn't have
// such issue.
val (newInput1, newInput2, newUnionDf) = constructUnionDf(3)
newInput1.addData(11, 12)
newInput2.addData(21, 22)
testStream(newUnionDf, Update)(
StartStream(checkpointLocation = checkpointDir.getAbsolutePath),
MultiAddData(newInput1, 13, 14)(newInput2, 22, 23),
CheckNewAnswer(Row(13, 14), Row(14, 15), Row(22, 2), Row(23, 1))
)
}
}
testQuietlyWithAllStateVersions("midbatch failure") {
val inputData = MemoryStream[Int]
FailureSingleton.firstTime = true
val aggregated =
inputData.toDS()
.map { i =>
if (i == 4 && FailureSingleton.firstTime) {
FailureSingleton.firstTime = false
sys.error("injected failure")
}
i
}
.groupBy($"value")
.agg(count("*"))
.as[(Int, Long)]
testStream(aggregated, Update)(
StartStream(),
AddData(inputData, 1, 2, 3, 4),
ExpectFailure[SparkException](),
StartStream(),
CheckLastBatch((1, 1), (2, 1), (3, 1), (4, 1))
)
}
testWithAllStateVersions("prune results by current_time, complete mode") {
import testImplicits._
val clock = new StreamManualClock
val inputData = MemoryStream[Long]
val aggregated =
inputData.toDF()
.groupBy($"value")
.agg(count("*"))
.where('value >= current_timestamp().cast("long") - 10L)
testStream(aggregated, Complete)(
StartStream(Trigger.ProcessingTime("10 seconds"), triggerClock = clock),
// advance clock to 10 seconds, all keys retained
AddData(inputData, 0L, 5L, 5L, 10L),
AdvanceManualClock(10 * 1000),
CheckLastBatch((0L, 1), (5L, 2), (10L, 1)),
// advance clock to 20 seconds, should retain keys >= 10
AddData(inputData, 15L, 15L, 20L),
AdvanceManualClock(10 * 1000),
CheckLastBatch((10L, 1), (15L, 2), (20L, 1)),
// advance clock to 30 seconds, should retain keys >= 20
AddData(inputData, 0L, 85L),
AdvanceManualClock(10 * 1000),
CheckLastBatch((20L, 1), (85L, 1)),
// bounce stream and ensure correct batch timestamp is used
// i.e., we don't take it from the clock, which is at 90 seconds.
StopStream,
AssertOnQuery { q => // clear the sink
q.sink.asInstanceOf[MemorySink].clear()
q.commitLog.purge(3)
// advance by a minute i.e., 90 seconds total
clock.advance(60 * 1000L)
true
},
StartStream(Trigger.ProcessingTime("10 seconds"), triggerClock = clock),
// The commit log blown, causing the last batch to re-run
CheckLastBatch((20L, 1), (85L, 1)),
AssertOnQuery { q =>
clock.getTimeMillis() == 90000L
},
// advance clock to 100 seconds, should retain keys >= 90
AddData(inputData, 85L, 90L, 100L, 105L),
AdvanceManualClock(10 * 1000),
CheckLastBatch((90L, 1), (100L, 1), (105L, 1))
)
}
testWithAllStateVersions("prune results by current_date, complete mode") {
import testImplicits._
val clock = new StreamManualClock
val tz = TimeZone.getDefault.getID
val inputData = MemoryStream[Long]
val aggregated =
inputData.toDF()
.select(to_utc_timestamp(from_unixtime('value * SECONDS_PER_DAY), tz))
.toDF("value")
.groupBy($"value")
.agg(count("*"))
.where($"value".cast("date") >= date_sub(current_date(), 10))
.select(($"value".cast("long") / SECONDS_PER_DAY).cast("long"), $"count(1)")
testStream(aggregated, Complete)(
StartStream(Trigger.ProcessingTime("10 day"), triggerClock = clock),
// advance clock to 10 days, should retain all keys
AddData(inputData, 0L, 5L, 5L, 10L),
AdvanceManualClock(MILLIS_PER_DAY * 10),
CheckLastBatch((0L, 1), (5L, 2), (10L, 1)),
// advance clock to 20 days, should retain keys >= 10
AddData(inputData, 15L, 15L, 20L),
AdvanceManualClock(MILLIS_PER_DAY * 10),
CheckLastBatch((10L, 1), (15L, 2), (20L, 1)),
// advance clock to 30 days, should retain keys >= 20
AddData(inputData, 85L),
AdvanceManualClock(MILLIS_PER_DAY * 10),
CheckLastBatch((20L, 1), (85L, 1)),
// bounce stream and ensure correct batch timestamp is used
// i.e., we don't take it from the clock, which is at 90 days.
StopStream,
AssertOnQuery { q => // clear the sink
q.sink.asInstanceOf[MemorySink].clear()
q.commitLog.purge(3)
// advance by 60 days i.e., 90 days total
clock.advance(MILLIS_PER_DAY * 60)
true
},
StartStream(Trigger.ProcessingTime("10 day"), triggerClock = clock),
// Commit log blown, causing a re-run of the last batch
CheckLastBatch((20L, 1), (85L, 1)),
// advance clock to 100 days, should retain keys >= 90
AddData(inputData, 85L, 90L, 100L, 105L),
AdvanceManualClock(MILLIS_PER_DAY * 10),
CheckLastBatch((90L, 1), (100L, 1), (105L, 1))
)
}
testWithAllStateVersions("SPARK-19690: do not convert batch aggregation in streaming query " +
"to streaming") {
val streamInput = MemoryStream[Int]
val batchDF = Seq(1, 2, 3, 4, 5)
.toDF("value")
.withColumn("parity", 'value % 2)
.groupBy('parity)
.agg(count("*") as 'joinValue)
val joinDF = streamInput
.toDF()
.join(batchDF, 'value === 'parity)
// make sure we're planning an aggregate in the first place
assert(batchDF.queryExecution.optimizedPlan match { case _: Aggregate => true })
testStream(joinDF, Append)(
AddData(streamInput, 0, 1, 2, 3),
CheckLastBatch((0, 0, 2), (1, 1, 3)),
AddData(streamInput, 0, 1, 2, 3),
CheckLastBatch((0, 0, 2), (1, 1, 3)))
}
/**
* This method verifies certain properties in the SparkPlan of a streaming aggregation.
* First of all, it checks that the child of a `StateStoreRestoreExec` creates the desired
* data distribution, where the child could be an Exchange, or a `HashAggregateExec` which already
* provides the expected data distribution.
*
* The second thing it checks that the child provides the expected number of partitions.
*
* The third thing it checks that we don't add an unnecessary shuffle in-between
* `StateStoreRestoreExec` and `StateStoreSaveExec`.
*/
private def checkAggregationChain(
se: StreamExecution,
expectShuffling: Boolean,
expectedPartition: Int): Boolean = {
val executedPlan = se.lastExecution.executedPlan
val restore = executedPlan
.collect { case ss: StateStoreRestoreExec => ss }
.head
restore.child match {
case node: UnaryExecNode =>
assert(node.outputPartitioning.numPartitions === expectedPartition,
"Didn't get the expected number of partitions.")
if (expectShuffling) {
assert(node.isInstanceOf[Exchange], s"Expected a shuffle, got: ${node.child}")
} else {
assert(!node.isInstanceOf[Exchange], "Didn't expect a shuffle")
}
case _ => fail("Expected no shuffling")
}
var reachedRestore = false
// Check that there should be no exchanges after `StateStoreRestoreExec`
executedPlan.foreachUp { p =>
if (reachedRestore) {
assert(!p.isInstanceOf[Exchange], "There should be no further exchanges")
} else {
reachedRestore = p.isInstanceOf[StateStoreRestoreExec]
}
}
true
}
testWithAllStateVersions("SPARK-21977: coalesce(1) with 0 partition RDD should be " +
"repartitioned to 1") {
val inputSource = new BlockRDDBackedSource(spark)
MockSourceProvider.withMockSources(inputSource) {
// `coalesce(1)` changes the partitioning of data to `SinglePartition` which by default
// satisfies the required distributions of all aggregations. Therefore in our SparkPlan, we
// don't have any shuffling. However, `coalesce(1)` only guarantees that the RDD has at most 1
// partition. Which means that if we have an input RDD with 0 partitions, nothing gets
// executed. Therefore the StateStore's don't save any delta files for a given trigger. This
// then leads to `FileNotFoundException`s in the subsequent batch.
// This isn't the only problem though. Once we introduce a shuffle before
// `StateStoreRestoreExec`, the input to the operator is an empty iterator. When performing
// `groupBy().agg(...)`, `HashAggregateExec` returns a `0` value for all aggregations. If
// we fail to restore the previous state in `StateStoreRestoreExec`, we save the 0 value in
// `StateStoreSaveExec` losing all previous state.
val aggregated: Dataset[Long] =
spark.readStream.format((new MockSourceProvider).getClass.getCanonicalName)
.load().coalesce(1).groupBy().count().as[Long]
testStream(aggregated, Complete())(
AddBlockData(inputSource, Seq(1)),
CheckLastBatch(1),
AssertOnQuery("Verify no shuffling") { se =>
checkAggregationChain(se, expectShuffling = false, 1)
},
AddBlockData(inputSource), // create an empty trigger
CheckLastBatch(1),
AssertOnQuery("Verify that no exchange is required") { se =>
checkAggregationChain(se, expectShuffling = false, 1)
},
AddBlockData(inputSource, Seq(2, 3)),
CheckLastBatch(3),
AddBlockData(inputSource),
CheckLastBatch(3),
StopStream
)
}
}
testWithAllStateVersions("SPARK-21977: coalesce(1) with aggregation should still be " +
"repartitioned when it has non-empty grouping keys") {
val inputSource = new BlockRDDBackedSource(spark)
MockSourceProvider.withMockSources(inputSource) {
withTempDir { tempDir =>
// `coalesce(1)` changes the partitioning of data to `SinglePartition` which by default
// satisfies the required distributions of all aggregations. However, when we have
// non-empty grouping keys, in streaming, we must repartition to
// `spark.sql.shuffle.partitions`, otherwise only a single StateStore is used to process
// all keys. This may be fine, however, if the user removes the coalesce(1) or changes to
// a `coalesce(2)` for example, then the default behavior is to shuffle to
// `spark.sql.shuffle.partitions` many StateStores. When this happens, all StateStore's
// except 1 will be missing their previous delta files, which causes the stream to fail
// with FileNotFoundException.
def createDf(partitions: Int): Dataset[(Long, Long)] = {
spark.readStream
.format((new MockSourceProvider).getClass.getCanonicalName)
.load().coalesce(partitions).groupBy('a % 1).count().as[(Long, Long)]
}
testStream(createDf(1), Complete())(
StartStream(checkpointLocation = tempDir.getAbsolutePath),
AddBlockData(inputSource, Seq(1)),
CheckLastBatch((0L, 1L)),
AssertOnQuery("Verify addition of exchange operator") { se =>
checkAggregationChain(
se,
expectShuffling = true,
spark.sessionState.conf.numShufflePartitions)
},
StopStream
)
testStream(createDf(2), Complete())(
StartStream(checkpointLocation = tempDir.getAbsolutePath),
Execute(se => se.processAllAvailable()),
AddBlockData(inputSource, Seq(2), Seq(3), Seq(4)),
CheckLastBatch((0L, 4L)),
AssertOnQuery("Verify no exchange added") { se =>
checkAggregationChain(
se,
expectShuffling = false,
spark.sessionState.conf.numShufflePartitions)
},
AddBlockData(inputSource),
CheckLastBatch((0L, 4L)),
StopStream
)
}
}
}
testWithAllStateVersions("SPARK-22230: last should change with new batches") {
val input = MemoryStream[Int]
val aggregated = input.toDF().agg(last('value))
testStream(aggregated, OutputMode.Complete())(
AddData(input, 1, 2, 3),
CheckLastBatch(3),
AddData(input, 4, 5, 6),
CheckLastBatch(6),
AddData(input),
CheckLastBatch(6),
AddData(input, 0),
CheckLastBatch(0)
)
}
testWithAllStateVersions("SPARK-23004: Ensure that TypedImperativeAggregate functions " +
"do not throw errors", SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
// See the JIRA SPARK-23004 for more details. In short, this test reproduces the error
// by ensuring the following.
// - A streaming query with a streaming aggregation.
// - Aggregation function 'collect_list' that is a subclass of TypedImperativeAggregate.
// - Post shuffle partition has exactly 128 records (i.e. the threshold at which
// ObjectHashAggregateExec falls back to sort-based aggregation). This is done by having a
// micro-batch with 128 records that shuffle to a single partition.
// This test throws the exact error reported in SPARK-23004 without the corresponding fix.
val input = MemoryStream[Int]
val df = input.toDF().toDF("value")
.selectExpr("value as group", "value")
.groupBy("group")
.agg(collect_list("value"))
testStream(df, outputMode = OutputMode.Update)(
AddData(input, (1 to spark.sqlContext.conf.objectAggSortBasedFallbackThreshold): _*),
AssertOnQuery { q =>
q.processAllAvailable()
true
}
)
}
test("simple count, update mode - recovery from checkpoint uses state format version 1") {
val inputData = MemoryStream[Int]
val aggregated =
inputData.toDF()
.groupBy($"value")
.agg(count("*"))
.as[(Int, Long)]
val resourceUri = this.getClass.getResource(
"/structured-streaming/checkpoint-version-2.3.1-streaming-aggregate-state-format-1/").toURI
val checkpointDir = Utils.createTempDir().getCanonicalFile
// Copy the checkpoint to a temp dir to prevent changes to the original.
// Not doing this will lead to the test passing on the first run, but fail subsequent runs.
FileUtils.copyDirectory(new File(resourceUri), checkpointDir)
inputData.addData(3)
inputData.addData(3, 2)
testStream(aggregated, Update)(
StartStream(checkpointLocation = checkpointDir.getAbsolutePath,
additionalConfs = Map(SQLConf.STREAMING_AGGREGATION_STATE_FORMAT_VERSION.key -> "2")),
/*
Note: The checkpoint was generated using the following input in Spark version 2.3.1
AddData(inputData, 3),
CheckLastBatch((3, 1)),
AddData(inputData, 3, 2),
CheckLastBatch((3, 2), (2, 1))
*/
AddData(inputData, 3, 2, 1),
CheckLastBatch((3, 3), (2, 2), (1, 1)),
Execute { query =>
// Verify state format = 1
val stateVersions = query.lastExecution.executedPlan.collect {
case f: StateStoreSaveExec => f.stateFormatVersion
case f: StateStoreRestoreExec => f.stateFormatVersion
}
assert(stateVersions.size == 2)
assert(stateVersions.forall(_ == 1))
},
// By default we run in new tuple mode.
AddData(inputData, 4, 4, 4, 4),
CheckLastBatch((4, 4))
)
}
/** Add blocks of data to the `BlockRDDBackedSource`. */
case class AddBlockData(source: BlockRDDBackedSource, data: Seq[Int]*) extends AddData {
override def addData(query: Option[StreamExecution]): (Source, Offset) = {
source.addBlocks(data: _*)
(source, LongOffset(source.counter))
}
}
/**
* A Streaming Source that is backed by a BlockRDD and that can create RDDs with 0 blocks at will.
*/
class BlockRDDBackedSource(spark: SparkSession) extends Source {
var counter = 0L
private val blockMgr = SparkEnv.get.blockManager
private var blocks: Seq[BlockId] = Seq.empty
def addBlocks(dataBlocks: Seq[Int]*): Unit = synchronized {
dataBlocks.foreach { data =>
val id = TestBlockId(counter.toString)
blockMgr.putIterator(id, data.iterator, StorageLevel.MEMORY_ONLY)
blocks ++= id :: Nil
counter += 1
}
counter += 1
}
override def getOffset: Option[Offset] = synchronized {
if (counter == 0) None else Some(LongOffset(counter))
}
override def getBatch(start: Option[Offset], end: Offset): DataFrame = synchronized {
val rdd = new BlockRDD[Int](spark.sparkContext, blocks.toArray)
.map(i => InternalRow(i)) // we don't really care about the values in this test
blocks = Seq.empty
spark.internalCreateDataFrame(rdd, schema, isStreaming = true).toDF()
}
override def schema: StructType = MockSourceProvider.fakeSchema
override def stop(): Unit = {
blockMgr.getMatchingBlockIds(_.isInstanceOf[TestBlockId]).foreach(blockMgr.removeBlock(_))
}
}
}
| shuangshuangwang/spark | sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingAggregationSuite.scala | Scala | apache-2.0 | 30,533 |
package com.yammer.metrics.experiments
import java.util.concurrent.TimeUnit
import com.yammer.metrics.reporting.ConsoleReporter
import com.yammer.metrics.scala.Instrumented
object LongLivedRunner extends Instrumented {
val counters = Seq("one", "two").map { s => s -> metrics.counter("counter", s) }.toMap
def main(args: Array[String]) {
ConsoleReporter.enable(1, TimeUnit.SECONDS)
val thread = new Thread {
override def run() {
while (true) {
counters("one") += 1
counters("two") += 2
}
Thread.sleep(100)
}
}
thread.setDaemon(true)
thread.start()
println("Hit return to quit")
readLine()
}
}
| sstone/metrics-scala | src/test/scala/com/yammer/metrics/experiments/LongLivedRunner.scala | Scala | apache-2.0 | 692 |
/*
* Copyright 2012-2013 Stephane Godbillon (@sgodbillon) and Zenexity
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package reactivemongo.utils
import scala.concurrent._
import scala.concurrent.duration._
object `package` {
/** Concats two array - fast way */
def concat[T](a1: Array[T], a2: Array[T])(implicit m: Manifest[T]): Array[T] = {
var i, j = 0
val result = new Array[T](a1.length + a2.length)
while (i < a1.length) {
result(i) = a1(i)
i = i + 1
}
while (j < a2.length) {
result(i + j) = a2(j)
j = j + 1
}
result
}
/** Makes an option of the value matching the condition. */
def option[T](cond: => Boolean, value: => T): Option[T] = (if (cond) Some(value) else None)
}
case class EitherMappableFuture[A](future: Future[A]) {
def mapEither[E <: Throwable, B](f: A => Either[E, B])(implicit ec: ExecutionContext) = {
future.flatMap(
f(_) match {
case Left(e) => Future.failed(e)
case Right(b) => Future.successful(b)
})
}
}
object EitherMappableFuture {
implicit def futureToEitherMappable[A](future: Future[A]): EitherMappableFuture[A] = EitherMappableFuture(future)
}
object ExtendedFutures {
import akka.actor.{ ActorSystem, Scheduler }
// better way to this?
def DelayedFuture(millis: Long, system: ActorSystem): Future[Unit] = {
implicit val ec = system.dispatcher
val promise = Promise[Unit]()
system.scheduler.scheduleOnce(Duration.apply(millis, "millis"))(promise.success(()))
promise.future
}
} | sh1ng/ReactiveMongo | driver/src/main/scala/utils.scala | Scala | apache-2.0 | 2,057 |
package im.tox.antox.utils
import android.media.{AudioFormat, AudioRecord}
import android.util.Log
import im.tox.antox.tox.ToxSingleton
import rx.lang.scala.Observable
object CaptureAudio {
val TAG = "im.tox.antox.utils.CaptureAudio"
var bufferSizeBytes: Int = _
def sendAudio(callID: Int, audioRecord: AudioRecord, audioBitRate: Int): Unit = {
/* var channels = 1
var frameSize = (codecSettings.audio_frame_duration * codecSettings.audio_sample_rate) / 1000 * channels
val buffer = Array.ofDim[Short](CaptureAudio.bufferSizeBytes)
audioRecord.read(buffer, 0, CaptureAudio.bufferSizeBytes)
val intBuffer = buffer.map(x => x: Int)
val preparedBuffer = ToxSingleton.tox.avPrepareAudioFrame(callID,
frameSize * 2, intBuffer, frameSize)
ToxSingleton.tox.avSendAudio(callID, preparedBuffer) */
Log.d("Mic", "Sending audio to:" + callID)
}
def makeObservable(callID: Integer, audioBitRate: Int): Observable[Boolean] = {
Observable[Boolean](subscriber => {
/* val mAudioRecord = findAudioRecord()
mAudioRecord match {
case Some(audioRecord) => {
audioRecord.startRecording()
while (!subscriber.isUnsubscribed) {
try {
sendAudio(callID, audioRecord, codecSettings)
} catch {
case e: Exception =>
e.printStackTrace
subscriber.onError(e)
}
}
audioRecord.stop()
audioRecord.release()
}
case None => Log.d(TAG, "Audio record: None!")
} */
subscriber.onCompleted()
})
}
private def findAudioRecord(): Option[AudioRecord] = {
try {
val audioFormat = AudioFormat.ENCODING_PCM_16BIT
val channelConfig = AudioFormat.CHANNEL_IN_MONO
val rate = 48000
Log.d("CaptureAudio", "Attempting rate " + rate + "Hz, bits: " + audioFormat +
", channel: " +
channelConfig)
val bufferSize = AudioRecord.getMinBufferSize(rate, channelConfig, audioFormat)
if (bufferSize != AudioRecord.ERROR_BAD_VALUE) {
val recorder = new AudioRecord(0, rate, channelConfig, audioFormat,
bufferSize)
if (recorder.getState == AudioRecord.STATE_INITIALIZED) {
CaptureAudio.bufferSizeBytes = bufferSize
return Some(recorder)
} else {
None
}
} else {
None
}
} catch {
case e: Exception =>
e.printStackTrace()
None
}
}
}
| 0xPoly/Antox | app/src/main/scala/im/tox/antox/utils/CaptureAudio.scala | Scala | gpl-3.0 | 2,510 |
/*
* Copyright 2009-2016 DigitalGlobe, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and limitations under the License.
*
*/
package org.mrgeo.mapalgebra.binarymath
import java.awt.image.DataBuffer
import org.mrgeo.mapalgebra.parser.{ParserException, ParserNode}
import org.mrgeo.mapalgebra.raster.RasterMapOp
import org.mrgeo.mapalgebra.{MapOp, MapOpRegistrar}
object LessThanMapOp extends MapOpRegistrar {
override def register: Array[String] = {
Array[String]("<", "lt")
}
def create(raster:RasterMapOp, const:Double):MapOp = {
new LessThanMapOp(Some(raster), Some(const))
}
def create(rasterA:RasterMapOp, rasterB:RasterMapOp):MapOp = {
new LessThanMapOp(Some(rasterA), Some(rasterB))
}
override def apply(node:ParserNode, variables: String => Option[ParserNode]): MapOp =
new LessThanMapOp(node, variables)
}
class LessThanMapOp extends RawBinaryMathMapOp {
private[binarymath] def this(raster: Option[RasterMapOp], paramB:Option[Any]) = {
this()
varA = raster
paramB match {
case Some(rasterB:RasterMapOp) => varB = Some(rasterB)
case Some(double:Double) => constB = Some(double)
case Some(int:Int) => constB = Some(int.toDouble)
case Some(long:Long) => constB = Some(long.toDouble)
case Some(float:Float) => constB = Some(float.toDouble)
case Some(short:Short) => constB = Some(short.toDouble)
case _ => throw new ParserException("Second term \"" + paramB + "\" is not a raster or constant")
}
}
private[binarymath] def this(node:ParserNode, variables: String => Option[ParserNode]) = {
this()
initialize(node, variables)
}
override private[binarymath] def function(a: Double, b: Double): Double = if (a < b) 1 else 0
override private[binarymath] def datatype():Int = { DataBuffer.TYPE_BYTE }
override private[binarymath] def nodata():Double = { 255 }
}
| ttislerdg/mrgeo | mrgeo-mapalgebra/mrgeo-mapalgebra-rastermath/src/main/scala/org/mrgeo/mapalgebra/binarymath/LessThanMapOp.scala | Scala | apache-2.0 | 2,350 |
package com.krux.hyperion.workflow
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import com.typesafe.config.ConfigFactory
import com.krux.hyperion.HyperionContext
import com.krux.hyperion.activity.ShellCommandActivity
import com.krux.hyperion.resource.Ec2Resource
import com.krux.hyperion.workflow.WorkflowExpression._
class WorkflowExpressionSpec extends AnyWordSpec with Matchers {
"WorkflowExpression" should {
implicit val hc: HyperionContext = new HyperionContext(ConfigFactory.load("example"))
val ec2 = Ec2Resource()
"produce correct dependencies with no duplicates" in {
val act1 = ShellCommandActivity("run act1")(ec2).idNamed("act1")
val act2 = ShellCommandActivity("run act2")(ec2).idNamed("act2")
val act3 = ShellCommandActivity("run act3")(ec2).idNamed("act3")
val act4 = ShellCommandActivity("run act4")(ec2).idNamed("act4")
val act5 = ShellCommandActivity("run act5")(ec2).idNamed("act5")
val act6 = ShellCommandActivity("run act6")(ec2).idNamed("act6")
val dependencies = (act1 + act2) ~> ((act3 ~> act4) + act5) ~> act6
val rAct3 = act3.dependsOn(act1, act2)
val rAct4 = act4.dependsOn(rAct3)
val rAct5 = act5.dependsOn(act1, act2)
val rAct6 = act6.dependsOn(rAct4, rAct5)
dependencies.toActivities should contain theSameElementsAs Seq(
act1, act2, rAct3, rAct4, rAct5, rAct6
)
}
"produce correct dependencies for straight arrow" in {
val act1 = ShellCommandActivity("run act1")(ec2).idNamed("act1")
val act2 = ShellCommandActivity("run act2")(ec2).idNamed("act2")
val act3 = ShellCommandActivity("run act3")(ec2).idNamed("act3")
val act4 = ShellCommandActivity("run act4")(ec2).idNamed("act4")
val dependencies = act1 ~> (act2 ~> act3) ~> act4
val rAct2 = act2.dependsOn(act1)
val rAct3 = act3.dependsOn(rAct2)
val rAct4 = act4.dependsOn(rAct3)
dependencies.toActivities should contain theSameElementsAs Seq(
act1, rAct2, rAct3, rAct4
)
}
"produce correct dependencies with duplicates" in {
val act1 = ShellCommandActivity("run act1")(ec2).idNamed("act1")
val act2 = ShellCommandActivity("run act2")(ec2).idNamed("act2")
val act3 = ShellCommandActivity("run act3")(ec2).idNamed("act3")
val act4 = ShellCommandActivity("run act4")(ec2).idNamed("act4")
val act5 = ShellCommandActivity("run act5")(ec2).idNamed("act5")
val act6 = ShellCommandActivity("run act6")(ec2).idNamed("act6")
// equivalent to val dependencies = (act1 + act2) ~> ((act3 ~> act4) + act5) ~> act6
val dependencies =
(act1 ~> act3) +
(act2 ~> act3) +
(act3 ~> act4) +
(act2 ~> act5) +
(act1 ~> act5) +
(act4 ~> act6) +
(act5 ~> act6)
val rAct3 = act3.dependsOn(act1, act2)
val rAct4 = act4.dependsOn(rAct3)
val rAct5 = act5.dependsOn(act1, act2)
val rAct6 = act6.dependsOn(rAct4, rAct5)
dependencies.toActivities should contain theSameElementsAs Seq(
act1, act2, rAct3, rAct4, rAct5, rAct6
)
}
"detect inconsistent duplicated ids" in {
val act1 = ShellCommandActivity("run act1")(ec2).idNamed("act1")
val act2 = ShellCommandActivity("run act2")(ec2).idNamed("act2")
val act3 = ShellCommandActivity("run act3")(ec2).idNamed("act3")
val dependencies =
(act1 ~> act3) +
(act2 ~> act3.withArguments("modified"))
an [AssertionError] should be thrownBy dependencies.toActivities
}
"detect circular dependencies" in {
val act1 = ShellCommandActivity("run act1")(ec2).idNamed("act1")
val act2 = ShellCommandActivity("run act2")(ec2).idNamed("act2")
val dependencies =
(act1 ~> act2) +
(act2 ~> act1)
an [AssertionError] should be thrownBy dependencies.toActivities
}
}
}
| realstraw/hyperion | core/src/test/scala/com/krux/hyperion/workflow/WorkflowExpressionSpec.scala | Scala | bsd-3-clause | 3,961 |
package io.github.oxlade39.storrent.persistence
import org.scalatest.WordSpec
import io.github.oxlade39.storrent.peer.{Block, DownloadPiece}
import akka.util.ByteString
import io.github.oxlade39.storrent.core.Torrent
import io.github.oxlade39.storrent.persistence.FolderPersistence.FileOffset
import org.scalatest.MustMatchers
class FileOffsetTest extends WordSpec with MustMatchers {
import FileOffsetTest._
"FileOffsetTest" must {
"chop the start of a DownloadPiece to within bounds" in {
val fileOffset = FileOffset(0, 3)
fileOffset.chop(downloadPiece) mustEqual ByteString(0.toByte, 1.toByte, 2.toByte)
}
"chop the middle of a DownloadPiece to within bounds" in {
val fileOffset = FileOffset(10, 20)
fileOffset.chop(downloadPiece) mustEqual ByteString(10.toByte, 11.toByte)
}
"chop a DownloadPiece which is smaller than the end offset" in {
val fileOffset = FileOffset(3, 6)
fileOffset.chop(downloadPiece) mustEqual ByteString(3.toByte, 4.toByte, 5.toByte)
}
"chop a middle DownloadPiece which overlaps the end of a middle FileOffset" in {
val fileOffset = FileOffset(28, 56)
val bytes = ByteString(18.until(36).map(_.toByte).toArray)
val blocks = bytes.grouped(6).zipWithIndex.map{case (b,i) => Block(i * 6, b)}
val dp = DownloadPiece(index = 1,
size = 18,
offset = 18,
hash = Torrent.hash(bytes)
) ++ blocks.toTraversable
fileOffset.chop(dp) mustEqual ByteString(28.until(36).map(_.toByte).toArray)
}
"chop a middle DownloadPiece which overlaps the end of a begging FileOffset" in {
val fileOffset = FileOffset(0, 28)
val bytes = ByteString(18.until(36).map(_.toByte).toArray)
val blocks = bytes.grouped(6).zipWithIndex.map{case (b,i) => Block(i * 6, b)}
val dp = DownloadPiece(index = 1,
size = 18,
offset = 18,
hash = Torrent.hash(bytes)
) ++ blocks.toTraversable
fileOffset.chop(dp) mustEqual ByteString(18.until(28).map(_.toByte).toArray)
}
}
}
object FileOffsetTest {
val blocks = Seq(
Block(0, ByteString(0.toByte, 1.toByte)),
Block(2, ByteString(2.toByte, 3.toByte)),
Block(4, ByteString(4.toByte, 5.toByte)),
Block(6, ByteString(6.toByte, 7.toByte)),
Block(8, ByteString(8.toByte, 9.toByte)),
Block(10, ByteString(10.toByte, 11.toByte))
)
val blocksAsBytes = blocks.map(_.data).reduce(_ ++ _)
val downloadPiece = DownloadPiece(0, blocksAsBytes.size, 0, Torrent.hash(blocksAsBytes)) ++ blocks
} | oxlade39/STorrent | src/test/scala/io/github/oxlade39/storrent/persistence/FileOffsetTest.scala | Scala | apache-2.0 | 2,554 |
package org.scaladebugger.test.classes
/**
* Provides test of class preparation (loading) used to verify reception
* of class prepare events.
*/
object ClassPrepare extends App {
// Load our class
val customClass = new CustomClass
while (true) { Thread.sleep(1000) }
}
| ensime/scala-debugger | scala-debugger-test/src/main/scala/org/scaladebugger/test/classes/ClassPrepare.scala | Scala | apache-2.0 | 280 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.