code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
// Generated by the Scala Plugin for the Protocol Buffer Compiler.
// Do not edit!
//
// Protofile syntax: PROTO3
package com.google.protobuf.any
object AnyProto extends _root_.scalapb.GeneratedFileObject {
lazy val dependencies: Seq[_root_.scalapb.GeneratedFileObject] = Seq.empty
lazy val messagesCompanions: Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]] =
Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]](
com.google.protobuf.any.Any
)
private lazy val ProtoBytes: _root_.scala.Array[Byte] =
scalapb.Encoding.fromBase64(scala.collection.immutable.Seq(
"""Chlnb29nbGUvcHJvdG9idWYvYW55LnByb3RvEg9nb29nbGUucHJvdG9idWYiUAoDQW55EicKCHR5cGVfdXJsGAEgASgJQgziP
wkSB3R5cGVVcmxSB3R5cGVVcmwSIAoFdmFsdWUYAiABKAxCCuI/BxIFdmFsdWVSBXZhbHVlQm8KE2NvbS5nb29nbGUucHJvdG9id
WZCCEFueVByb3RvUAFaJWdpdGh1Yi5jb20vZ29sYW5nL3Byb3RvYnVmL3B0eXBlcy9hbnmiAgNHUEKqAh5Hb29nbGUuUHJvdG9id
WYuV2VsbEtub3duVHlwZXNiBnByb3RvMw=="""
).mkString)
lazy val scalaDescriptor: _root_.scalapb.descriptors.FileDescriptor = {
val scalaProto = com.google.protobuf.descriptor.FileDescriptorProto.parseFrom(ProtoBytes)
_root_.scalapb.descriptors.FileDescriptor.buildFrom(scalaProto, dependencies.map(_.scalaDescriptor))
}
lazy val javaDescriptor: com.google.protobuf.Descriptors.FileDescriptor =
com.google.protobuf.AnyProto.getDescriptor()
@deprecated("Use javaDescriptor instead. In a future version this will refer to scalaDescriptor.", "ScalaPB 0.5.47")
def descriptor: com.google.protobuf.Descriptors.FileDescriptor = javaDescriptor
} | trueaccord/ScalaPB | scalapb-runtime/src/main/scalajvm/com/google/protobuf/any/AnyProto.scala | Scala | apache-2.0 | 1,628 |
package com.krux.hyperion.common
import scala.annotation.tailrec
trait Escapable {
/**
* Given the start of exp, seek the end of expression returning the expression block and the rest
* of the string. Note that expression is not a nested structure and the only legitimate '{' or
* '}' within a expression is within quotes (i.e. '"' or "'")
*
* @note this does not handle the case that expression have escaped quotes (i.e. "\"" or '\'')
*/
@tailrec
private def seekEndOfExpr(
exp: String,
quote: Option[Char] = None,
expPart: StringBuilder = StringBuilder.newBuilder
): (String, String) = {
if (exp.isEmpty) {
throw new RuntimeException("Expression started but not ended")
} else {
val curChar = exp.head
val next = exp.tail
quote match {
case Some(quoteChar) => // if is in quote
seekEndOfExpr(next, quote.filter(_ != curChar), expPart += curChar)
case _ =>
curChar match {
case '}' => ((expPart += curChar).result, next)
case '\'' | '"' => seekEndOfExpr(next, Option(curChar), expPart += curChar)
case _ => seekEndOfExpr(next, None, expPart += curChar)
}
}
}
}
def escape(s: String, c: Char): String = {
def escapeChar(cc: Char): String = if (cc == c) s"\\\\$c" else cc.toString
@tailrec
def escapeRec(
s: String,
hashSpotted: Boolean = false,
result: StringBuilder = StringBuilder.newBuilder
): String = {
if (s.isEmpty) {
result.toString
} else {
val curChar = s.head
val sTail = s.tail
if (!hashSpotted) { // outside an expression block
escapeRec(sTail, curChar == '#', result ++= escapeChar(curChar))
} else { // the previous char is '#'
if (curChar == '{') { // start of an expression
val (blockBody, rest) = seekEndOfExpr(sTail)
escapeRec(rest, false, result += curChar ++= blockBody)
} else { // not start of an expression
escapeRec(sTail, false, result ++= escapeChar(curChar))
}
}
}
}
escapeRec(s)
}
}
object Escapable extends Escapable
| sethyates/hyperion | core/src/main/scala/com/krux/hyperion/common/Escapable.scala | Scala | apache-2.0 | 2,218 |
/*
* The Bluejelly project, Copyright 2012.
*
* This source code is distributed under the terms of
* the BSD license, see the LICENSE file for details.
*/
package bluejelly.asm
import java.io.StringWriter
import scala.text.Document
import scala.text.Document.group
import scala.text.Document.nest
import scala.text.Document.text
import bluejelly.utils.Errors
/**
* Custom error bag.
* @author ppedemon
*/
class AsmErrors extends Errors(false) {
private def quote[T](v:T):String = "`%s'" format v
private def ppr(d:Document):String = {
val s = new StringWriter
d.format(75, s)
s.flush()
s.toString
}
private def gnest(d:Document):Document = group(nest(2,d))
private def pprList[T](xs:List[T]):Document = text(xs mkString ("[",",","]"))
def dupFunction(f:Function, prev:Function) {
val doc = gnest(
gnest("duplicated function declaration" :/: quote(f.name) :/: "at:" :/: text(f.pos.toString)) :/:
gnest("(previous declaration was at:" :/: prev.pos.toString :: text(")")))
error(f.pos, ppr(doc))
}
def invalidRefInFun(f:Function, i:Instr, ref:String) {
val doc = gnest(
group("invalid function reference: " :/: text(quote(ref))) :/:
gnest(
group("in instruction:" :/: text(i.toString)) :/:
group("at: " :/: text(i.pos.toString))) :/:
gnest(
group("in function:" :/: text(quote(f.name))) :/:
group("at:" :/: text(f.pos.toString))))
error(f.pos, ppr(doc))
}
def repeatedAltsInFun[U](f:Function, i:Instr, vs:List[U]) {
val v = if (vs.size == 1) "value" else "values"
val msg = "repeated %s in case alternatives:" format v
val doc = gnest(
gnest(
group(msg :/: pprList(vs)) :/:
group("in match instruction at:" :/: text(i.pos.toString))) :/:
gnest(
group("in function:" :/: text(quote(f.name))) :/:
group("at:" :/: text(f.pos.toString))))
error(f.pos, ppr(doc))
}
}
/**
* Very simple validation routines for {@link Module} instances.
* @author ppedemon
*/
class Validator(val m:Module) {
private type Env = Map[String,Function]
private val errors = new AsmErrors
/*
* Collect functions in the module to validate.
*/
private def collectFunctions():Env = {
(m.funcs foldLeft Map[String,Function]()){case (map,f) =>
if (map.contains(f.name)) {
errors.dupFunction(f, map(f.name))
map
} else {
map + (f.name -> f)
}
}
}
/*
* Separate an identifier in qualifier and local id. If the id is
* unqualified, return module's name.
*/
private def unqual(s:String):(String,String) = {
val ix = s lastIndexOf '.'
if (ix == -1) (m.name,s) else (s substring (0,ix), s substring (ix+1))
}
/*
* Check whether the given reference is valid. References are valid
* if they are external (referring to another module) or if they
* refer to an existing local function.
*/
private def validateInstrRef(env:Env,f:Function,i:Instr, ref:String) {
val (q,id) = unqual(ref)
if (q == m.name && !(env contains id))
errors.invalidRefInFun(f, i, ref)
}
/*
* Validate case alternatives.
*/
private def validateAlts[U](env:Env,f:Function)(alts:List[Alt[U]]):Unit =
alts foreach {a => validateBlock(env,f)(a.b)}
/*
* Check for repeated alternatives in some alts. Return whether there
* are repeated alternatives, and the list of repeated values in order
* of appearance.
*/
private def repeatedAlts[U](alts:List[Alt[U]]):(Boolean,List[U]) = {
val (_,_,rep) = (alts foldLeft (Set[U](),Set[U](),List[U]())) {
case (t@(_,rep,_),alt) if rep contains alt.v => t
case ((seen,rep,repList),alt) if seen contains alt.v =>
(seen,rep + alt.v, alt.v::repList)
case ((seen,rep,repList),alt) => (seen + alt.v, rep, repList)
}
(!rep.isEmpty,rep.reverse)
}
/*
* Validate a match instruction.
*/
private def validateMatch[U](
env:Env,
f:Function,
i:Instr,
alts:List[Alt[U]],
mdef:Option[Block]) {
val (hasReps,reps) = repeatedAlts(alts)
if (hasReps) {
errors.repeatedAltsInFun(f, i, reps)
} else {
validateAlts(env,f)(alts)
mdef foreach validateBlock(env,f)
}
}
/*
* Validate an instruction inside a function.
*/
private def validateInstr(env:Env,f:Function)(i:Instr):Unit = i match {
case Jump(funId)
=> validateInstrRef(env, f, i, funId)
case EvalVar(_,funId)
=> validateInstrRef(env, f, i, funId)
case PushCode(funId)
=> validateInstrRef(env, f, i, funId)
case PushCont(funId)
=> validateInstrRef(env, f, i, funId)
case MatchCon(alts,mdef)
=> validateMatch(env, f, i, alts, mdef)
case MatchInt(alts,mdef)
=> validateMatch(env, f, i, alts, mdef)
case MatchDbl(alts,mdef)
=> validateMatch(env, f, i, alts, mdef)
case MatchChr(alts,mdef)
=> validateMatch(env, f, i, alts, mdef)
case _ => ()
}
/*
* Validate a block.
*/
private def validateBlock(env:Env,f:Function)(b:Block):Unit =
b.is foreach validateInstr(env,f)
/*
* Validate all functions in a module.
*/
private def validateFuns(env:Env,fs:List[Function]):Unit =
fs foreach {f => validateBlock(env,f)(f.b)}
/**
* Validate the given module.
* @return Some[AsmErrors] if validation fails, Nothing otherwise.
*/
def validate[T >: Errors]():Option[T] = {
val env = collectFunctions()
if (errors.hasErrors) return Some(errors)
validateFuns(env, m.funcs)
if (errors.hasErrors) Some(errors) else None
}
}
| ppedemon/Bluejelly | bluejelly-asm/src/main/scala/bluejelly/asm/Validator.scala | Scala | bsd-3-clause | 5,673 |
/*
* Copyright 2015 Async-IO.org
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.atmosphere.samples.pubsub.websocket
import java.io.Serializable
import org.atmosphere.websocket.WebSocket
import org.atmosphere.cpr.AtmosphereRequest
import org.atmosphere.websocket.protocol.SimpleHttpProtocol
class DevoxxWebSocketProtocol extends SimpleHttpProtocol with Serializable {
override def onMessage(webSocket: WebSocket, message: String): java.util.List[AtmosphereRequest] = {
if (message.startsWith("message=devoxx:")) {
webSocket.write(webSocket.resource.getResponse, message.substring("message=".length()))
null
} else super.onMessage(webSocket, message)
}
} | pjvds/atmosphere-samples | samples/all-api-pubsub/src/main/scala/org/atmosphere/samples/pubsub/websocket/DevoxxWebSocketProtocol.scala | Scala | apache-2.0 | 1,203 |
package net.itsky
/** measure the time consumption of an operation */
object Timer {
def time(b: (Long=>Unit), x:Long): Long = {
val t0 : Long = System.currentTimeMillis
b(x)
val t1 : Long = System.currentTimeMillis
val dt = t1 - t0
val s : String = dt.toString()
System.out.println(s)
dt
}
//time(busy, 10)
}
| bk1/sysprogramming-examples | akka/akka/src/main/scala/net/itsky/Timer.scala | Scala | gpl-2.0 | 352 |
package com.catinthedark.yoba.units
/**
* Created by over on 02.01.15.
*/
object RenderFactory {
}
| cat-in-the-dark/old48_33_game | src/main/scala/com/catinthedark/yoba/units/RenderFactory.scala | Scala | mit | 104 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.observers.buffers
import monix.reactive.observers.Subscriber
/** A `BufferedSubscriber` implementation for the
* [[monix.reactive.OverflowStrategy.BackPressure BackPressure]]
* buffer overflow strategy.
*/
private[observers] final class BackPressuredBufferedSubscriber[A] private
(out: Subscriber[A], _bufferSize: Int)
extends AbstractBackPressuredBufferedSubscriber[A,A](out, _bufferSize) {
@volatile protected var p50, p51, p52, p53, p54, p55, p56, p57 = 5
@volatile protected var q50, q51, q52, q53, q54, q55, q56, q57 = 5
override protected def fetchNext(): A =
queue.poll()
override protected def fetchSize(r: A): Int =
if (r == null) 0 else 1
}
private[observers] object BackPressuredBufferedSubscriber {
def apply[A](underlying: Subscriber[A], bufferSize: Int): BackPressuredBufferedSubscriber[A] =
new BackPressuredBufferedSubscriber[A](underlying, bufferSize)
} | Wogan/monix | monix-reactive/jvm/src/main/scala/monix/reactive/observers/buffers/BackPressuredBufferedSubscriber.scala | Scala | apache-2.0 | 1,606 |
package com.ovoenergy.comms.monitor
package metrics
import cats._, implicits._
import com.ovoenergy.comms.model.{TemplateManifest, FeedbackStatus, FeedbackOptions}
import com.ovoenergy.comms.logging.core.Loggable
import com.ovoenergy.comms.{model => kafkaModel}
object model {
case class EventId(value: String)
object EventId {
implicit def eqEventId: Eq[EventId] = Eq.by(_.value)
implicit def loggableEventId: Loggable[EventId] = Loggable.instance { id =>
Map("eventId" -> id.value)
}
}
sealed trait Status
object Status {
case object Delivered extends Status
case object Failed extends Status
def fromFeedback(status: FeedbackStatus): Option[Status] =
status match {
case FeedbackOptions.Failed => Failed.some
case FeedbackOptions.Delivered => Delivered.some
case _ => None
}
}
case class Template(id: Template.Id, version: String)
object Template {
case class Id(value: String)
def fromManifest(t: TemplateManifest): Template =
Template(Template.Id(t.id), t.version)
}
sealed trait Channel
case object Email extends Channel
case object SMS extends Channel
case object Print extends Channel
object Channel {
def removePhone(c: kafkaModel.Channel): Option[Channel] = c match {
case kafkaModel.Email => Email.some
case kafkaModel.SMS => SMS.some
case kafkaModel.Print => Print.some
case kafkaModel.Phone => None
}
}
}
| ovotech/comms-monitor-service | src/main/scala/com/ovoenergy/comms/monitor/metrics/model.scala | Scala | mit | 1,470 |
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar
import slamdata.Predef._
import quasar.fp._
import matryoshka._
import matryoshka.data._
import matryoshka.implicits._
import scalaz._, Scalaz._
import simulacrum.typeclass
import iotaz.{CopK, TListK}
@typeclass trait RenderTree[A] {
def render(a: A): RenderedTree
}
@SuppressWarnings(Array("org.wartremover.warts.ImplicitConversion"))
object RenderTree extends RenderTreeInstances {
import ops._
def contramap[A, B: RenderTree](f: A => B): RenderTree[A] =
new RenderTree[A] { def render(v: A) = RenderTree[B].render(f(v)) }
def make[A](f: A => RenderedTree): RenderTree[A] =
new RenderTree[A] { def render(v: A) = f(v) }
/** Always a Terminal, with a fixed type and computed label. */
def simple[A](nodeType: List[String], f: A => Option[String]): RenderTree[A] =
new RenderTree[A] { def render(v: A) = Terminal(nodeType, f(v)) }
/** Derive an instance from `Show[A]`, with a static type; e.g. `Shape(Circle(5))`. */
def fromShow[A: Show](simpleType: String): RenderTree[A] =
make[A](v => Terminal(List(simpleType), Some(v.shows)))
/** Derive an instance from `Show[A]`, where the result is one of a few choices,
* and suitable as the node's type; e.g. `LeftSide`. Note that the `parentType`
* is not shown in the usual text rendering. */
def fromShowAsType[A: Show](parentType: String): RenderTree[A] =
make[A](v => Terminal(List(v.shows, parentType), None))
/** Derive a `Show[A]` where RenderTree is defined. */
def toShow[A: RenderTree]: Show[A] = Show.show(_.render.show)
def delayFromShow[F[_]: Functor: Foldable](implicit F: Delay[Show, F]) =
new Delay[RenderTree, F] {
def apply[A](a: RenderTree[A]) = new RenderTree[F[A]] {
def render(v: F[A]) =
NonTerminal(List(v.void.shows), None, v.toList.map(a.render))
}
}
/** For use with `<|`, mostly. */
def print[A: RenderTree](label: String, a: A): Unit =
println(label + ":\\n" + a.render.shows)
def recursive[T, F[_]](implicit T: Recursive.Aux[T, F], FD: Delay[RenderTree, F], FF: Functor[F]): RenderTree[T] =
make(_.cata(FD(RenderTree[RenderedTree]).render))
}
sealed abstract class RenderTreeInstances extends RenderTreeInstances0 {
import RenderTree.make
import RenderTree.ops._
implicit def const[A: RenderTree]: Delay[RenderTree, Const[A, ?]] =
Delay.fromNT(λ[RenderTree ~> DelayedA[A]#RenderTree](_ =>
make(_.getConst.render)))
implicit def delay[F[_], A: RenderTree](implicit F: Delay[RenderTree, F]): RenderTree[F[A]] =
F(RenderTree[A])
@SuppressWarnings(Array("org.wartremover.warts.Recursion"))
implicit def free[F[_]: Functor](implicit F: Delay[RenderTree, F]): Delay[RenderTree, Free[F, ?]] =
Delay.fromNT(λ[RenderTree ~> (RenderTree ∘ Free[F, ?])#λ](rt =>
make(_.resume.fold(F(free[F].apply(rt)).render, rt.render))))
@SuppressWarnings(Array("org.wartremover.warts.Recursion"))
implicit def cofree[F[_]](implicit F: Delay[RenderTree, F]): Delay[RenderTree, Cofree[F, ?]] =
Delay.fromNT(λ[RenderTree ~> (RenderTree ∘ Cofree[F, ?])#λ](rt =>
make(t => NonTerminal(List("Cofree"), None, List(rt.render(t.head), F(cofree(F)(rt)).render(t.tail))))))
implicit def these[A: RenderTree, B: RenderTree]: RenderTree[A \\&/ B] =
make {
case \\&/.Both(a, b) => NonTerminal(List("\\\\&/"), "Both".some, List(a.render, b.render))
case \\&/.This(a) => NonTerminal(List("\\\\&/"), "This".some, List(a.render))
case \\&/.That(b) => NonTerminal(List("\\\\&/"), "That".some, List(b.render))
}
implicit def coproduct[F[_], G[_], A](implicit RF: RenderTree[F[A]], RG: RenderTree[G[A]]): RenderTree[Coproduct[F, G, A]] =
make(_.run.fold(RF.render, RG.render))
implicit lazy val unit: RenderTree[Unit] =
make(_ => Terminal(List("()", "Unit"), None))
implicit def renderTreeT[T[_[_]], F[_]: Functor](implicit T: RenderTreeT[T], F: Delay[RenderTree, F]): RenderTree[T[F]] =
T.renderTree(F)
implicit def copKRenderTree[LL <: TListK](implicit M: RenderTreeKMaterializer[LL]): Delay[RenderTree, CopK[LL, ?]] = M.materialize(offset = 0)
implicit def coproductDelay[F[_], G[_]](implicit RF: Delay[RenderTree, F], RG: Delay[RenderTree, G]): Delay[RenderTree, Coproduct[F, G, ?]] =
Delay.fromNT(λ[RenderTree ~> DelayedFG[F, G]#RenderTree](ra =>
make(_.run.fold(RF(ra).render, RG(ra).render))))
implicit def eitherRenderTree[A, B](implicit RA: RenderTree[A], RB: RenderTree[B]): RenderTree[A \\/ B] =
make {
case -\\/ (a) => NonTerminal("-\\\\/" :: Nil, None, RA.render(a) :: Nil)
case \\/- (b) => NonTerminal("\\\\/-" :: Nil, None, RB.render(b) :: Nil)
}
implicit def optionRenderTree[A](implicit RA: RenderTree[A]): RenderTree[Option[A]] =
make {
case Some(a) => RA.render(a)
case None => Terminal("None" :: "Option" :: Nil, None)
}
implicit def listRenderTree[A](implicit RA: RenderTree[A]): RenderTree[List[A]] =
make(v => NonTerminal(List("List"), None, v.map(RA.render)))
implicit def listMapRenderTree[K: Show, V](implicit RV: RenderTree[V]): RenderTree[ListMap[K, V]] =
make(RenderTree[Map[K, V]].render(_))
implicit def vectorRenderTree[A](implicit RA: RenderTree[A]): RenderTree[Vector[A]] =
make(v => NonTerminal(List("Vector"), None, v.map(RA.render).toList))
implicit lazy val booleanRenderTree: RenderTree[Boolean] =
RenderTree.fromShow[Boolean]("Boolean")
implicit lazy val intRenderTree: RenderTree[Int] =
RenderTree.fromShow[Int]("Int")
implicit lazy val doubleRenderTree: RenderTree[Double] =
RenderTree.fromShow[Double]("Double")
implicit lazy val stringRenderTree: RenderTree[String] =
RenderTree.fromShow[String]("String")
implicit lazy val symbolRenderTree: RenderTree[Symbol] =
RenderTree.fromShow[Symbol]("Symbol")
implicit def pathRenderTree[B,T,S]: RenderTree[pathy.Path[B,T,S]] =
// NB: the implicit Show instance in scope here ends up being a circular
// call, so an explicit reference to pathy's Show is needed.
make(p => Terminal(List("Path"), pathy.Path.pathShow.shows(p).some))
implicit def leftTuple4RenderTree[A, B, C, D](implicit RA: RenderTree[A], RB: RenderTree[B], RC: RenderTree[C], RD: RenderTree[D]):
RenderTree[(((A, B), C), D)] =
new RenderTree[(((A, B), C), D)] {
def render(t: (((A, B), C), D)) =
NonTerminal("tuple" :: Nil, None,
RA.render(t._1._1._1) ::
RB.render(t._1._1._2) ::
RC.render(t._1._2) ::
RD.render(t._2) ::
Nil)
}
}
sealed abstract class RenderTreeInstances0 extends RenderTreeInstances1 {
implicit def leftTuple3RenderTree[A, B, C](
implicit RA: RenderTree[A], RB: RenderTree[B], RC: RenderTree[C]
): RenderTree[((A, B), C)] =
new RenderTree[((A, B), C)] {
def render(t: ((A, B), C)) =
NonTerminal("tuple" :: Nil, None,
RA.render(t._1._1) ::
RB.render(t._1._2) ::
RC.render(t._2) ::
Nil)
}
implicit def mapRenderTree[K: Show, V](implicit RV: RenderTree[V]): RenderTree[Map[K, V]] =
RenderTree.make(v => NonTerminal("Map" :: Nil, None,
v.toList.map { case (k, v) =>
NonTerminal("Key" :: "Map" :: Nil, Some(k.shows), RV.render(v) :: Nil)
}))
implicit def fix[F[_]: Functor](implicit F: Delay[RenderTree, F]): RenderTree[Fix[F]] =
RenderTree.recursive
implicit def mu[F[_]: Functor](implicit F: Delay[RenderTree, F]): RenderTree[Mu[F]] =
RenderTree.recursive
implicit def nu[F[_]: Functor](implicit F: Delay[RenderTree, F]): RenderTree[Nu[F]] =
RenderTree.recursive
}
sealed abstract class RenderTreeInstances1 {
import RenderTree.make
implicit def tuple2RenderTree[A, B](
implicit RA: RenderTree[A], RB: RenderTree[B]
): RenderTree[(A, B)] =
make(t => NonTerminal("tuple" :: Nil, None,
RA.render(t._1) ::
RB.render(t._2) ::
Nil))
}
| slamdata/quasar | foundation/src/main/scala/quasar/RenderTree.scala | Scala | apache-2.0 | 8,550 |
// Copyright 2014 Foursquare Labs Inc. All Rights Reserved.
package io.fsq.twofishes.indexer.scalding
import io.fsq.twofishes.gen.{PolygonMatchingKey, YahooWoeType}
import java.io.{DataInput, DataOutput, IOException}
import org.apache.hadoop.io.WritableComparable
class PolygonMatchingKeyWritable(value: PolygonMatchingKey) extends WritableComparable[PolygonMatchingKeyWritable] {
var s2CellIdValue = value.s2CellIdOption.getOrElse(0L)
var woeTypeValue = value.woeTypeOption.map(_.getValue).getOrElse(0)
def this() = this(PolygonMatchingKey(0L, YahooWoeType.UNKNOWN))
def getKey(): PolygonMatchingKey = {
PolygonMatchingKey(s2CellIdValue, YahooWoeType.findByIdOrUnknown(woeTypeValue))
}
@throws(classOf[IOException])
override def readFields(in: DataInput) {
s2CellIdValue = in.readLong
woeTypeValue = in.readInt
}
@throws(classOf[IOException])
override def write(out: DataOutput) {
out.writeLong(s2CellIdValue)
out.writeInt(woeTypeValue)
}
override def compareTo(o: PolygonMatchingKeyWritable): Int = {
toString().compare(o.toString())
}
override def equals(that: Any): Boolean = {
that match {
case null => false
case o: PolygonMatchingKeyWritable => toString().equals(o.toString())
case _ => false
}
}
override def hashCode: Int = toString().hashCode
override def toString: String = getKey().toString
}
| foursquare/fsqio | src/jvm/io/fsq/twofishes/indexer/scalding/PolygonMatchingKeyWritable.scala | Scala | apache-2.0 | 1,399 |
/**
* Copyright (C) 2016 DANS - Data Archiving and Networked Services (info@dans.knaw.nl)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.knaw.dans.easy.multideposit.parser
import java.util.Locale
import better.files.File
import cats.data.NonEmptyChain
import cats.data.Validated.{ Invalid, Valid }
import cats.instances.option._
import cats.syntax.apply._
import cats.syntax.option._
import cats.syntax.traverse._
import cats.syntax.validated._
import nl.knaw.dans.easy.multideposit.model.PlayMode.PlayMode
import nl.knaw.dans.easy.multideposit.model._
trait AudioVideoParser {
this: ParserUtils =>
def extractAudioVideo(depositId: DepositId, rowNum: Int, rows: DepositRows): Validated[AudioVideo] = {
// @formatter:off
(
extractSpringfieldList(rowNum, rows),
extractSubtitlesPerFile(depositId, rows),
).mapN(AudioVideo)
// @formatter:on
.andThen {
case AudioVideo(None, avFiles) if avFiles.nonEmpty =>
ParseError(rowNum, s"The column '${ Headers.AudioVideoFilePath }' contains values, but the columns [${ Headers.SpringfieldCollection }, ${ Headers.SpringfieldUser }] do not").toInvalid
case otherwise => otherwise.toValidated
}
}
def extractSpringfieldList(rowNum: => Int, rows: DepositRows): Validated[Option[Springfield]] = {
val records = rows.flatMap(springfield).toList
val validSpringfields = records.collect { case Valid(a) => a }
val invalidRecords = records.collect { case Invalid(e) => e }.reduceOption(_ ++ _)
(validSpringfields, invalidRecords) match {
case (Nil, None) => none.toValidated
case (Nil, Some(invalids)) => invalids.invalid
case (singleSpringfield :: Nil, None) => singleSpringfield.some.toValidated
case (singleSpringfield :: Nil, Some(_)) => ParseError(rowNum, s"At most one row is allowed to contain a value for these columns: [${ Headers.SpringfieldDomain }, ${ Headers.SpringfieldUser }, ${ Headers.SpringfieldCollection }, ${ Headers.SpringfieldPlayMode }]. Found one complete instance ${ singleSpringfield.toTuple } as well as one or more incomplete instances.").toInvalid
case (multipleSpringfields @ head :: _, None) if multipleSpringfields.distinct.size == 1 => head.some.toValidated
case (multipleSpringfields, None) => ParseError(rowNum, s"At most one row is allowed to contain a value for these columns: [${ Headers.SpringfieldDomain }, ${ Headers.SpringfieldUser }, ${ Headers.SpringfieldCollection }, ${ Headers.SpringfieldPlayMode }]. Found: ${ multipleSpringfields.map(_.toTuple).mkString("[", ", ", "]") }").toInvalid
case (multipleSpringfields, Some(_)) => ParseError(rowNum, s"At most one row is allowed to contain a value for these columns: [${ Headers.SpringfieldDomain }, ${ Headers.SpringfieldUser }, ${ Headers.SpringfieldCollection }, ${ Headers.SpringfieldPlayMode }]. Found: ${ multipleSpringfields.map(_.toTuple).mkString("[", ", ", "]") } as well as one or more incomplete instances.").toInvalid
}
}
def springfield(row: DepositRow): Option[Validated[Springfield]] = {
val domain = row.find(Headers.SpringfieldDomain)
val user = row.find(Headers.SpringfieldUser)
val collection = row.find(Headers.SpringfieldCollection)
val playmode = row.find(Headers.SpringfieldPlayMode).map(playMode(row.rowNum))
lazy val collectionException = ParseError(row.rowNum, s"Missing value for: ${ Headers.SpringfieldCollection }")
lazy val userException = ParseError(row.rowNum, s"Missing value for: ${ Headers.SpringfieldUser }")
lazy val playModeException = ParseError(row.rowNum, s"Missing value for: ${ Headers.SpringfieldPlayMode }")
(domain, user, collection, playmode) match {
case (maybeD, Some(u), Some(c), Some(pm)) =>
// @formatter:off
(
maybeD.traverse(checkValidChars(_, row.rowNum, Headers.SpringfieldDomain)),
checkValidChars(u, row.rowNum, Headers.SpringfieldUser),
checkValidChars(c, row.rowNum, Headers.SpringfieldCollection),
pm,
).mapN(Springfield.maybeWithDomain).some
// @formatter:on
case (_, Some(_), Some(_), None) => playModeException.toInvalid.some
case (_, Some(_), None, Some(Valid(_))) => collectionException.toInvalid.some
case (_, Some(_), None, Some(Invalid(parseError))) => (collectionException +: parseError).invalid.some
case (_, Some(_), None, None) => NonEmptyChain(collectionException, playModeException).invalid.some
case (_, None, Some(_), Some(Valid(_))) => userException.toInvalid.some
case (_, None, Some(_), Some(Invalid(parseError))) => (userException +: parseError).invalid.some
case (_, None, Some(_), None) => NonEmptyChain(userException, playModeException).invalid.some
case (_, None, None, Some(Valid(_))) => NonEmptyChain(collectionException, userException).invalid.some
case (_, None, None, Some(Invalid(parseError))) => (collectionException +: userException +: parseError).invalid.some
case (_, None, None, None) => None
}
}
def playMode(rowNum: => Int)(pm: String): Validated[PlayMode] = {
PlayMode.valueOf(pm)
.toValidNec(ParseError(rowNum, s"Value '$pm' is not a valid play mode"))
}
def extractSubtitlesPerFile(depositId: DepositId, rows: DepositRows): Validated[Map[File, Set[SubtitlesFile]]] = {
extractList(rows)(avFile(depositId))
.map(_.groupBy { case (file, _) => file }.mapValues(_.collect { case (_, subtitles) => subtitles }.toSet))
}
def avFile(depositId: DepositId)(row: DepositRow): Option[Validated[(File, SubtitlesFile)]] = {
val file = row.find(Headers.AudioVideoFilePath).map(findRegularFile(depositId, row.rowNum))
val subtitle = row.find(Headers.AudioVideoSubtitles).map(findRegularFile(depositId, row.rowNum))
val subtitleLang = row.find(Headers.AudioVideoSubtitlesLanguage)
(file, subtitle, subtitleLang) match {
case (Some(Invalid(_)), Some(Invalid(_)), _) => ParseError(row.rowNum, s"Both ${ Headers.AudioVideoFilePath } and ${ Headers.AudioVideoSubtitles } do not represent a valid path").toInvalid.some
case (Some(Invalid(_)), _, _) => ParseError(row.rowNum, s"${ Headers.AudioVideoFilePath } does not represent a valid path").toInvalid.some
case (_, Some(Invalid(_)), _) => ParseError(row.rowNum, s"${ Headers.AudioVideoSubtitles } does not represent a valid path").toInvalid.some
case (Some(Valid(p)), Some(Valid(sub)), subLang)
if p.exists &&
p.isRegularFile &&
sub.exists &&
sub.isRegularFile &&
subLang.exists(isValidISO639_1Language) =>
(p, SubtitlesFile(sub, subLang)).toValidated.some
case (Some(Valid(p)), Some(_), _)
if !p.exists =>
ParseError(row.rowNum, s"${ Headers.AudioVideoFilePath } '$p' does not exist").toInvalid.some
case (Some(Valid(p)), Some(_), _)
if !p.isRegularFile =>
ParseError(row.rowNum, s"${ Headers.AudioVideoFilePath } '$p' is not a file").toInvalid.some
case (Some(_), Some(Valid(sub)), _)
if !sub.exists =>
ParseError(row.rowNum, s"${ Headers.AudioVideoSubtitles } '$sub' does not exist").toInvalid.some
case (Some(_), Some(Valid(sub)), _)
if !sub.isRegularFile =>
ParseError(row.rowNum, s"${ Headers.AudioVideoSubtitles } '$sub' is not a file").toInvalid.some
case (Some(_), Some(_), Some(subLang))
if !isValidISO639_1Language(subLang) =>
ParseError(row.rowNum, s"${ Headers.AudioVideoSubtitlesLanguage } '$subLang' doesn't have a valid ISO 639-1 language value").toInvalid.some
case (Some(_), Some(_), None) =>
ParseError(row.rowNum, s"${ Headers.AudioVideoSubtitlesLanguage } AV_SUBTITLES specified without AV_SUBTITLES_LANGUAGE").toInvalid.some
case (Some(_), None, Some(subLang)) =>
ParseError(row.rowNum, s"Missing value for ${ Headers.AudioVideoSubtitles }, since ${ Headers.AudioVideoSubtitlesLanguage } does have a value: '$subLang'").toInvalid.some
case (Some(Valid(p)), None, None)
if p.exists &&
p.isRegularFile => none
case (Some(Valid(p)), None, None)
if !p.exists =>
ParseError(row.rowNum, s"${ Headers.AudioVideoFilePath } '$p' does not exist").toInvalid.some
case (Some(Valid(p)), None, None)
if !p.isRegularFile =>
ParseError(row.rowNum, s"${ Headers.AudioVideoFilePath } '$p' is not a file").toInvalid.some
case (None, None, None) => None
case (None, _, _) =>
ParseError(row.rowNum, s"No value is defined for ${ Headers.AudioVideoFilePath }, while some of [${ Headers.AudioVideoSubtitles }, ${ Headers.AudioVideoSubtitlesLanguage }] are defined").toInvalid.some
}
}
def isValidISO639_1Language(lang: String): Boolean = {
val b0: Boolean = lang.length == 2
val b1: Boolean = new Locale(lang).getDisplayLanguage.toLowerCase != lang.toLowerCase
b0 && b1
}
}
| DANS-KNAW/easy-split-multi-deposit | src/main/scala/nl.knaw.dans.easy.multideposit/parser/AudioVideoParser.scala | Scala | apache-2.0 | 9,455 |
package debop4s.data.orm.mapping.associations
import java.security.Timestamp
import java.util.Date
import java.{lang, util}
import javax.persistence._
import debop4s.core.ValueObject
import debop4s.core.utils.Hashs
import debop4s.data.orm.AbstractJpaJUnitSuite
import debop4s.data.orm.mapping.associations.ProductStatus.ProductStatus
import debop4s.data.orm.model.HibernateEntityBase
import org.hibernate.annotations.{LazyCollection, LazyCollectionOption, Parent}
import org.junit.Test
import org.springframework.transaction.annotation.Transactional
import scala.beans.BeanProperty
/**
* OneToManySetJUnitSuite
* Created by debop on 2014. 3. 6.
*/
class OneToManySetJUnitSuite extends AbstractJpaJUnitSuite {
@PersistenceContext val em: EntityManager = null
@Test
@Transactional
def bidding() {
val item = new BiddingItem()
val bid1 = new Bid(item, new java.math.BigDecimal(100.0))
val bid2 = new Bid(item, new java.math.BigDecimal(200.0))
em.persist(item)
em.flush()
em.clear()
val loaded = em.find(classOf[BiddingItem], item.id)
assert(loaded != null)
assert(loaded.bids.size == 2)
val bid11 = loaded.bids.iterator().next()
loaded.bids.remove(bid11)
em.persist(loaded)
em.flush()
em.clear()
val loaded2 = em.find(classOf[BiddingItem], item.id)
assert(loaded2 != null)
assert(loaded2.bids.size == 1)
em.remove(loaded2)
em.flush()
assert(em.find(classOf[BiddingItem], item.id) == null)
}
@Test
@Transactional
def productTest() {
val item = new ProductItem()
val image1 = new ProductImage()
item.images.add(image1)
image1.setItem(item)
image1.name = "image1"
val image2 = new ProductImage()
item.images.add(image2)
image2.item = item
image2.name = "image2"
item.status = ProductStatus.Active
em.persist(item)
em.flush()
em.clear()
val loaded = em.find(classOf[ProductItem], item.id)
assert(loaded != null)
assert(loaded.images.size == 2)
loaded.images.clear()
em.persist(loaded)
em.flush()
em.clear()
val loaded2 = em.find(classOf[ProductItem], item.id)
assert(loaded2 != null)
assert(loaded2.images.size == 0)
em.remove(loaded2)
em.flush()
assert(em.find(classOf[ProductItem], item.id) == null)
}
}
@Entity
@Access(AccessType.FIELD)
class Bid extends HibernateEntityBase[lang.Long] {
def this(item: BiddingItem, amount: java.math.BigDecimal) {
this()
this.item = item
this.item.bids.add(this)
this.amount = amount
}
@Id
@GeneratedValue
var id: lang.Long = _
def getId = id
@ManyToOne
@JoinColumn(name = "itemId")
var item: BiddingItem = _
@Column(nullable = false)
var amount: java.math.BigDecimal = _
@Transient
var timestamp: Timestamp = _
override def hashCode(): Int = Hashs.compute(amount)
}
@Entity
@Access(AccessType.FIELD)
class BiddingItem extends HibernateEntityBase[lang.Long] {
@Id
@GeneratedValue
var id: lang.Long = _
def getId = id
var name: String = _
var description: String = _
@OneToMany(mappedBy = "item", cascade = Array(CascadeType.ALL), orphanRemoval = true)
@LazyCollection(value = LazyCollectionOption.EXTRA)
var bids: util.Set[Bid] = new util.HashSet[Bid]()
override def hashCode(): Int = Hashs.compute(name)
}
@Embeddable
@Access(AccessType.FIELD)
class ProductImage extends ValueObject {
// 이 놈은 @BeanProperty가 없으면 예외가 발생합니다.
@Parent
@BeanProperty
var item: ProductItem = _
@BeanProperty
var name: String = _
@BeanProperty
var filename: String = _
@BeanProperty
var sizeX: lang.Integer = 0
@BeanProperty
var sizeY: lang.Integer = 0
override def hashCode(): Int = Hashs.compute(name, filename)
}
@Entity
@Access(AccessType.FIELD)
class ProductItem extends HibernateEntityBase[lang.Long] {
@Id
@GeneratedValue
var id: lang.Long = _
def getId = id
var name : String = _
var description : String = _
var initialPrice: java.math.BigDecimal = _
var reservePrice: java.math.BigDecimal = _
@Temporal(TemporalType.DATE)
var startDate: Date = _
@Temporal(TemporalType.DATE)
var endDate: Date = _
@Column(name = "status")
var statusInt: lang.Integer = _
def status: ProductStatus = ProductStatus(statusInt)
def status_=(v: ProductStatus.Value) {
statusInt = v.id
}
@CollectionTable(name = "ProductItemImage", joinColumns = Array(new JoinColumn(name = "ProductItemId")))
@ElementCollection(targetClass = classOf[ProductImage])
@org.hibernate.annotations.Cascade(Array(org.hibernate.annotations.CascadeType.ALL))
var images: util.Set[ProductImage] = new util.HashSet[ProductImage]()
def removeImage(image: ProductImage): Boolean = {
images.remove(image)
}
override def hashCode(): Int = Hashs.compute(name)
}
object ProductStatus extends Enumeration {
type ProductStatus = Value
val Unknown = Value(0, "Unknown")
val Active = Value(1, "Active")
val Inactive = Value(2, "Inactive")
} | debop/debop4s | debop4s-data-orm/src/test/scala/debop4s/data/orm/mapping/associations/OneToManySetJUnitSuite.scala | Scala | apache-2.0 | 5,079 |
package io.vamp.container_driver.kubernetes
import io.vamp.common.{ Config, Namespace }
import scala.concurrent.duration.FiniteDuration
object K8sClientConfig {
import KubernetesContainerDriver._
def apply(kubernetesNamespace: String)(implicit namespace: Namespace): K8sClientConfig = {
K8sClientConfig(
url = Config.string(s"$config.url")(),
bearer = Config.string(s"$config.bearer")(),
token = Config.string(s"$config.token")(),
username = Config.string(s"$config.username")(),
password = Config.string(s"$config.password")(),
serverCaCert = Config.string(s"$config.server-ca-cert")(),
clientCert = Config.string(s"$config.client-cert")(),
privateKey = Config.string(s"$config.private-key")(),
namespace = kubernetesNamespace,
tlsCheck = Config.boolean(s"$config.tls-check")()
)
}
}
case class K8sClientConfig(
url: String,
bearer: String,
token: String,
username: String,
password: String,
serverCaCert: String,
clientCert: String,
privateKey: String,
namespace: String,
tlsCheck: Boolean
)
object K8sCacheConfig {
import KubernetesContainerDriver._
def apply()(implicit namespace: Namespace): K8sCacheConfig = {
K8sCacheConfig(
readTimeToLivePeriod = Config.duration(s"$config.cache.read-time-to-live")(),
writeTimeToLivePeriod = Config.duration(s"$config.cache.write-time-to-live")(),
failureTimeToLivePeriod = Config.duration(s"$config.cache.failure-time-to-live")()
)
}
}
case class K8sCacheConfig(
readTimeToLivePeriod: FiniteDuration,
writeTimeToLivePeriod: FiniteDuration,
failureTimeToLivePeriod: FiniteDuration
)
| magneticio/vamp | kubernetes/src/main/scala/io/vamp/container_driver/kubernetes/K8sClientConfig.scala | Scala | apache-2.0 | 1,713 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import org.scalatest.BeforeAndAfterAll
import org.apache.spark.sql.{DataFrame, QueryTest, Row}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.hive.test.TestHiveSingleton
// TODO ideally we should put the test suite into the package `sql`, as
// `hive` package is optional in compiling, however, `SQLContext.sql` doesn't
// support the `cube` or `rollup` yet.
class HiveDataFrameAnalyticsSuite extends QueryTest with TestHiveSingleton with BeforeAndAfterAll {
import spark.implicits._
import spark.sql
private var testData: DataFrame = _
override def beforeAll() {
super.beforeAll()
testData = Seq((1, 2), (2, 2), (3, 4)).toDF("a", "b")
testData.createOrReplaceTempView("mytable")
}
override def afterAll(): Unit = {
try {
spark.catalog.dropTempView("mytable")
} finally {
super.afterAll()
}
}
test("rollup") {
checkAnswer(
testData.rollup($"a" + $"b", $"b").agg(sum($"a" - $"b")),
sql("select a + b, b, sum(a - b) from mytable group by a + b, b with rollup").collect()
)
checkAnswer(
testData.rollup("a", "b").agg(sum("b")),
sql("select a, b, sum(b) from mytable group by a, b with rollup").collect()
)
}
test("cube") {
checkAnswer(
testData.cube($"a" + $"b", $"b").agg(sum($"a" - $"b")),
sql("select a + b, b, sum(a - b) from mytable group by a + b, b with cube").collect()
)
checkAnswer(
testData.cube("a", "b").agg(sum("b")),
sql("select a, b, sum(b) from mytable group by a, b with cube").collect()
)
}
}
| gioenn/xSpark | sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveDataFrameAnalyticsSuite.scala | Scala | apache-2.0 | 2,414 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.sj.common.dal.model.provider
import java.io.File
import java.net.URL
import java.util.{Date, UUID}
import com.bwsw.common.embedded.{EmbeddedElasticsearch, EmbeddedKafka, EmbeddedMongo}
import com.bwsw.sj.common.MongoAuthChecker
import com.bwsw.sj.common.config.{ConfigLiterals, SettingsUtils}
import com.bwsw.sj.common.dal.model.ConfigurationSettingDomain
import com.bwsw.sj.common.dal.repository.ConnectionRepository
import com.bwsw.sj.common.si.model.FileMetadataLiterals
import com.bwsw.sj.common.utils.{MessageResourceUtils, NetworkUtils, ProviderLiterals}
import org.apache.commons.io.FileUtils
import org.apache.curator.test.TestingServer
import org.mockserver.integration.ClientAndServer
import org.scalatest.mockito.MockitoSugar
import org.scalatest.{Assertion, BeforeAndAfterAll, FlatSpec, Matchers}
import ru.yandex.qatools.embed.postgresql.EmbeddedPostgres
import ru.yandex.qatools.embed.postgresql.distribution.Version
import scaldi.{Injector, Module}
import scala.collection.mutable.ArrayBuffer
import scala.util.Try
/**
* Integration tests for [[ProviderDomain]]
*
* @author Pavel Tomskikh
*/
class ProviderDomainIntegrationTests extends FlatSpec with Matchers with BeforeAndAfterAll with MockitoSugar {
val messageResourceUtils = new MessageResourceUtils
val zkTimeout = 1000
val zkServer1 = new TestingServer(false)
val zkServer2 = new TestingServer(false)
"ProviderDomain" should "check connection to the ZooKeeper server properly" in {
zkServer1.restart()
zkServer2.restart()
val address1 = zkServer1.getConnectString
val address2 = zkServer2.getConnectString
val provider = new ProviderDomain(
"zk-provider",
"description",
Array(address1, address2),
ProviderLiterals.zookeeperType,
new Date())
val wrappedServer1 = new ServerWrapper(createZkConnectionError(address1), () => zkServer1.stop())
val wrappedServer2 = new ServerWrapper(createZkConnectionError(address2), () => zkServer2.stop())
val result = Try(testProviderConnection(provider, wrappedServer1, wrappedServer2))
wrappedServer1.stop()
wrappedServer2.stop()
result.get
}
it should "check connection to the RESTful server properly" in {
val server1 = new ClientAndServer()
val server2 = new ClientAndServer()
val address1 = "localhost:" + server1.getPort
val address2 = "localhost:" + server2.getPort
val provider = new ProviderDomain(
"rest-provider",
"description",
Array(address1, address2),
ProviderLiterals.restType,
new Date())
val wrappedServer1 = new ServerWrapper(createRestConnectionError(address1), () => server1.stop())
val wrappedServer2 = new ServerWrapper(createRestConnectionError(address2), () => server2.stop())
val result = Try(testProviderConnection(provider, wrappedServer1, wrappedServer2))
wrappedServer1.stop()
wrappedServer2.stop()
result.get
}
it should "check connection to the Kafka server properly" in {
zkServer1.restart()
zkServer2.restart()
val server1 = new EmbeddedKafka(Some(zkServer1.getConnectString))
val server2 = new EmbeddedKafka(Some(zkServer2.getConnectString))
val address1 = "localhost:" + server1.port
val address2 = "localhost:" + server2.port
server1.start()
server2.start()
val provider = new ProviderDomain(
"kafka-provider",
"description",
Array(address1, address2),
ProviderLiterals.kafkaType,
new Date())
val wrappedServer1 = new ServerWrapper(createKafkaConnectionError(address1), () => server1.stop())
val wrappedServer2 = new ServerWrapper(createKafkaConnectionError(address2), () => server2.stop())
val result = Try(testProviderConnection(provider, wrappedServer1, wrappedServer2))
wrappedServer1.stop()
wrappedServer2.stop()
zkServer1.stop()
zkServer2.stop()
result.get
}
it should "check connection to the PostgreSQL database properly" in {
val mongoPort = NetworkUtils.findFreePort()
val mongoServer = new EmbeddedMongo(mongoPort)
mongoServer.start()
val connectionRepository = createConnectionRepository(mongoPort)
val injector = new Module {
bind[ConnectionRepository] to connectionRepository
bind[SettingsUtils] to new SettingsUtils()
}.injector
val user = "test-user"
val password = "test-password"
val database = "testDB"
val version = Version.V9_6_2
val server1 = new EmbeddedPostgres(version)
val server1Port = NetworkUtils.findFreePort()
server1.start("localhost", server1Port, database, user, password)
val server2 = new EmbeddedPostgres(version)
val server2Port = NetworkUtils.findFreePort()
server2.start("localhost", server2Port, database, user, password)
val address1 = "localhost:" + server1Port
val address2 = "localhost:" + server2Port
val driverName = "postgresql"
loadJdbcDriver(connectionRepository, driverName)
val provider = new JDBCProviderDomain(
"rest-provider",
"description",
Array(address1, address2),
user,
password,
driverName,
new Date())
val wrappedServer1 = new ServerWrapper(createJdbcConnectionError(address1), () => server1.stop())
val wrappedServer2 = new ServerWrapper(createJdbcConnectionError(address2), () => server2.stop())
val result = Try(testProviderConnection(provider, wrappedServer1, wrappedServer2, injector))
wrappedServer1.stop()
wrappedServer2.stop()
mongoServer.stop()
result.get
}
it should "check connection to the Elasticsearch database properly" in {
val server1 = new EmbeddedElasticsearch(NetworkUtils.findFreePort())
server1.start()
val server2 = new EmbeddedElasticsearch(NetworkUtils.findFreePort())
server2.start()
val address1 = "localhost:" + server1.port
val address2 = "localhost:" + server2.port
val provider = new ESProviderDomain(
"es-provider",
"description",
Array(address1, address2),
null,
null,
new Date())
val wrappedServer1 = new ServerWrapper(createEsConnectionError(address1), () => server1.stop())
val wrappedServer2 = new ServerWrapper(createEsConnectionError(address2), () => server2.stop())
val result = Try(testProviderConnection(provider, wrappedServer1, wrappedServer2))
wrappedServer1.stop()
wrappedServer2.stop()
result.get
}
override def afterAll(): Unit = {
zkServer1.close()
zkServer2.close()
}
private def testProviderConnection(provider: ProviderDomain,
server1: ServerWrapper,
server2: ServerWrapper,
injector: Injector = mock[Injector]): Assertion = {
provider.checkConnection(zkTimeout)(injector) shouldBe empty
server1.stop()
provider.checkConnection(zkTimeout)(injector) shouldBe ArrayBuffer(server1.connectionError)
server2.stop()
provider.checkConnection(zkTimeout)(injector) shouldBe ArrayBuffer(server1.connectionError, server2.connectionError)
}
private def createZkConnectionError(address: String): String =
messageResourceUtils.createMessage("rest.providers.provider.cannot.connect.zk", address)
private def createRestConnectionError(address: String): String =
messageResourceUtils.createMessage("rest.providers.provider.cannot.connect.rest", address)
private def createKafkaConnectionError(address: String): String =
messageResourceUtils.createMessage("rest.providers.provider.cannot.connect.kafka", address)
private def createJdbcConnectionError(address: String): String =
messageResourceUtils.createMessage("rest.providers.provider.cannot.connect.jdbc", address)
private def createEsConnectionError(address: String): String =
messageResourceUtils.createMessage("rest.providers.provider.cannot.connect.es", address)
private def createConnectionRepository(mongoPort: Int): ConnectionRepository = {
val mongoAddress = "localhost:" + mongoPort
val mongoDatabase = "stream-juggler-test"
val mongoAuthChecker = new MongoAuthChecker(mongoAddress, mongoDatabase)
new ConnectionRepository(mongoAuthChecker, mongoAddress, mongoDatabase, None, None)
}
private def loadJdbcDriver(connectionRepository: ConnectionRepository, driverName: String): Unit = {
val driverUrl = new URL("https://jdbc.postgresql.org/download/postgresql-42.1.3.jar")
val driverFilename = s"postgresql-driver-${UUID.randomUUID().toString}.jar"
val driverFile = new File(driverFilename)
FileUtils.copyURLToFile(driverUrl, driverFile)
connectionRepository.getFileStorage.put(
driverFile,
driverFilename,
Map.empty[String, Any],
FileMetadataLiterals.customFileType)
driverFile.delete()
val driverFilenameConfig = ConfigLiterals.getDriverFilename(driverName)
val driverClassConfig = ConfigLiterals.getDriverClass(driverName)
val driverPrefixConfig = ConfigLiterals.getDriverPrefix(driverName)
val configService = connectionRepository.getConfigRepository
configService.save(ConfigurationSettingDomain(driverFilenameConfig, driverFilename, ConfigLiterals.jdbcDomain, new Date()))
configService.save(ConfigurationSettingDomain(driverClassConfig, "org.postgresql.Driver", ConfigLiterals.jdbcDomain, new Date()))
configService.save(ConfigurationSettingDomain(driverPrefixConfig, "jdbc:postgresql", ConfigLiterals.jdbcDomain, new Date()))
}
}
class ServerWrapper(val connectionError: String, stoppingMethod: () => Unit) {
private var serverRun = true
def stop(): Unit = {
if (serverRun) {
stoppingMethod()
serverRun = false
}
}
}
| bwsw/sj-platform | core/sj-common/src/test/scala-2.12/com/bwsw/sj/common/dal/model/provider/ProviderDomainIntegrationTests.scala | Scala | apache-2.0 | 10,561 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples
import java.util.Random
import breeze.linalg.{DenseVector, Vector}
/**
* Logistic regression based classification.
*
* This is an example implementation for learning how to use Spark. For more conventional use,
* please refer to org.apache.spark.ml.classification.LogisticRegression.
*/
object LocalLR {
val N = 10000 // Number of data points
val D = 10 // Number of dimensions
val R = 0.7 // Scaling factor
val ITERATIONS = 5
val rand = new Random(42)
case class DataPoint(x: Vector[Double], y: Double)
def generateData: Array[DataPoint] = {
def generatePoint(i: Int): DataPoint = {
val y = if (i % 2 == 0) -1 else 1
val x = DenseVector.fill(D) {rand.nextGaussian + y * R}
DataPoint(x, y)
}
Array.tabulate(N)(generatePoint)
}
def showWarning() {
System.err.println(
"""WARN: This is a naive implementation of Logistic Regression and is given as an example!
|Please use org.apache.spark.ml.classification.LogisticRegression
|for more conventional use.
""".stripMargin)
}
def main(args: Array[String]) {
showWarning()
val data = generateData
// Initialize w to a random value
val w = DenseVector.fill(D) {2 * rand.nextDouble - 1}
println(s"Initial w: $w")
for (i <- 1 to ITERATIONS) {
println(s"On iteration $i")
val gradient = DenseVector.zeros[Double](D)
for (p <- data) {
val scale = (1 / (1 + math.exp(-p.y * (w.dot(p.x)))) - 1) * p.y
gradient += p.x * scale
}
w -= gradient
}
println(s"Final w: $w")
}
}
// scalastyle:on println
| lxsmnv/spark | examples/src/main/scala/org/apache/spark/examples/LocalLR.scala | Scala | apache-2.0 | 2,480 |
package lila.app
package templating
import scala.util.Random.shuffle
import controllers._
import play.api.i18n.{ Lang, Messages }
import play.api.mvc.{ RequestHeader, Call }
import play.twirl.api.Html
import lila.i18n.Env.{ current => i18nEnv }
import lila.i18n.{ LangList, I18nDomain, I18nKey }
import lila.user.UserContext
trait I18nHelper {
private def pool = i18nEnv.pool
private def transInfos = i18nEnv.transInfos
private def hideCallsCookieName = i18nEnv.hideCallsCookieName
lazy val trans = i18nEnv.keys
lazy val protocol = i18nEnv.RequestHandlerProtocol
implicit def lang(implicit ctx: UserContext) = pool lang ctx.req
def transKey(key: String, args: Seq[Any] = Nil)(implicit lang: Lang): String =
i18nEnv.translator.transTo(key, args)(lang)
def i18nJsObject(keys: I18nKey*)(implicit lang: Lang) =
i18nEnv.jsDump.keysToObject(keys, lang)
def langName(lang: Lang): Option[String] = langName(lang.language)
def langName(lang: String): Option[String] = LangList name lang
def shortLangName(lang: Lang): Option[String] = shortLangName(lang.language)
def shortLangName(lang: String): Option[String] = langName(lang) map (_ takeWhile (','!=))
def translationCall(implicit ctx: UserContext) =
if (ctx.isAnon || ctx.req.cookies.get(hideCallsCookieName).isDefined) None
else (~ctx.me.map(_.count.game) >= 8000) ?? shuffle(
(ctx.req.acceptLanguages map transInfos.get).flatten filter (_.nonComplete)
).headOption
def transValidationPattern(trans: String) =
(trans contains "%s") option ".*%s.*"
def langFallbackLinks(implicit ctx: UserContext) = Html {
pool.preferredNames(ctx.req, 3).map {
case (code, name) => """<a class="lang_fallback" lang="%s" href="%s">%s</a>""".format(
code, langUrl(Lang(code))(I18nDomain(ctx.req.domain)), name)
}.mkString("").replace(uriPlaceholder, ctx.req.uri)
}
private lazy val langAnnotationsBase: String =
pool.names.keySet diff Set("fp", "kb", "le", "tp", "pi", "io") map { code =>
s"""<link rel="alternate" hreflang="$code" href="http://$code.lichess.org%"/>"""
} mkString ""
def langAnnotations(implicit ctx: UserContext) = Html {
langAnnotationsBase.replace("%", ctx.req.uri)
}
def commonDomain(implicit ctx: UserContext): String =
I18nDomain(ctx.req.domain).commonDomain
def acceptLanguages(implicit ctx: UserContext): List[String] =
ctx.req.acceptLanguages.map(_.language.toString).toList.distinct
def acceptsLanguage(lang: Lang)(implicit ctx: UserContext): Boolean =
ctx.req.acceptLanguages exists (_.language == lang.language)
private val uriPlaceholder = "[URI]"
private def langUrl(lang: Lang)(i18nDomain: I18nDomain) =
protocol + (i18nDomain withLang lang).domain + uriPlaceholder
}
| Happy0/lila | app/templating/I18hHelper.scala | Scala | mit | 2,782 |
/*^
===========================================================================
TwoBinManager
===========================================================================
Copyright (C) 2016-2017 Gianluca Costa
===========================================================================
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program. If not, see
<http://www.gnu.org/licenses/gpl-3.0.html>.
===========================================================================
*/
package info.gianlucacosta.twobinmanager.db
import java.util.UUID
import javax.persistence.{EntityManagerFactory, NoResultException}
import info.gianlucacosta.helios.jpa.Includes._
import info.gianlucacosta.twobinmanager.db.DbConversions._
import info.gianlucacosta.twobinpack.core.Problem
import info.gianlucacosta.twobinpack.io.repositories.ProblemRepository
import scala.collection.JavaConversions._
/**
* ProblemRepository backed by ORM on a database
*
* @param entityManagerFactory
*/
class DbProblemRepository(entityManagerFactory: EntityManagerFactory) extends ProblemRepository {
override def findById(id: UUID): Option[Problem] = {
entityManagerFactory.runUnitOfWork(entityManager => {
val problemEntity =
entityManager.find(classOf[ProblemEntity], id)
Option(problemEntity)
})
}
override def add(problem: Problem): Unit = {
entityManagerFactory.runTransaction(entityManager => {
entityManager.persist(problem: ProblemEntity)
})
}
override def update(problem: Problem): Unit = {
entityManagerFactory.runTransaction(entityManager => {
entityManager.merge(problem: ProblemEntity)
})
}
entityManagerFactory.addNamedQueryFor(
getClass,
"findAllNames",
"""
SELECT problem.name
FROM ProblemEntity problem
ORDER BY problem.name
"""
)
override def findAllNamesSorted(): Iterable[String] = {
entityManagerFactory.runUnitOfWork(entityManager => {
entityManager
.createNamedQueryFor(
getClass,
"findAllNames",
classOf[String]
)
.getResultList
})
}
entityManagerFactory.addNamedQueryFor(
getClass,
"findByName",
"""
SELECT problem
FROM ProblemEntity problem
WHERE problem.name = :name
"""
)
def findByName(name: String): Option[Problem] = {
entityManagerFactory.runUnitOfWork(entityManager => {
val query =
entityManager.createNamedQueryFor(
getClass,
"findByName",
classOf[ProblemEntity]
)
.setParameter("name", name)
try
Some(query.getSingleResult)
catch {
case ex: NoResultException =>
None
}
})
}
entityManagerFactory.addNamedQueryFor(
getClass,
"removeByName",
"""
DELETE FROM ProblemEntity problem
WHERE problem.name = :name
"""
)
override def removeByName(name: String): Unit = {
entityManagerFactory.runTransaction(entityManager => {
entityManager.createNamedQueryFor(
getClass,
"removeByName"
)
.setParameter("name", name)
.executeUpdate()
})
}
entityManagerFactory.addNamedQueryFor(
getClass,
"removeAll",
"""
DELETE FROM ProblemEntity problem
"""
)
override def removeAll(): Unit =
entityManagerFactory.runTransaction(entityManager => {
entityManager.createNamedQueryFor(
getClass,
"removeAll"
)
.executeUpdate()
})
entityManagerFactory.addNamedQueryFor(
getClass,
"count",
"""
SELECT COUNT(id)
FROM ProblemEntity
"""
)
override def count(): Long =
entityManagerFactory.runUnitOfWork(entityManager => {
entityManager
.createNamedQueryFor(
getClass,
"count",
classOf[java.lang.Long]
)
.getSingleResult
})
}
| giancosta86/TwoBinManager | src/main/scala/info/gianlucacosta/twobinmanager/db/DbProblemRepository.scala | Scala | gpl-3.0 | 4,474 |
/*
* Copyright 2009-2010 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.json4s
package scalaz
import _root_.scalaz._
import syntax.validation._
trait Tuples { this: Types =>
implicit def Tuple2JSON[A: JSON, B: JSON]: JSON[(A, B)] = new JSON[(A, B)] {
def read(json: JValue) = json match {
case JArray(a :: b :: _) =>
Apply[Result].apply2(fromJSON[A](a),fromJSON[B](b))(Tuple2.apply)
case x => UnexpectedJSONError(x, classOf[JArray]).failureNel
}
def write(value: (A, B)) = JArray(toJSON(value._1) :: toJSON(value._2) :: Nil)
}
implicit def Tuple3JSON[A: JSON, B: JSON, C: JSON]: JSON[(A, B, C)] = new JSON[(A, B, C)] {
def read(json: JValue) = json match {
case JArray(a :: b :: c :: _) =>
Apply[Result].apply3(fromJSON[A](a),fromJSON[B](b),fromJSON[C](c))(Tuple3.apply)
case x => UnexpectedJSONError(x, classOf[JArray]).failureNel
}
def write(value: (A, B, C)) = JArray(toJSON(value._1) :: toJSON(value._2) :: toJSON(value._3) :: Nil)
}
implicit def Tuple4JSON[A: JSON, B: JSON, C: JSON, D: JSON]: JSON[(A, B, C, D)] = new JSON[(A, B, C, D)] {
def read(json: JValue) = json match {
case JArray(a :: b :: c :: d :: _) =>
Apply[Result].apply4(fromJSON[A](a),fromJSON[B](b),fromJSON[C](c),fromJSON[D](d))(Tuple4.apply)
case x => UnexpectedJSONError(x, classOf[JArray]).failureNel
}
def write(value: (A, B, C, D)) = JArray(toJSON(value._1) :: toJSON(value._2) :: toJSON(value._3) :: toJSON(value._4) :: Nil)
}
implicit def Tuple5JSON[A: JSON, B: JSON, C: JSON, D: JSON, E: JSON]: JSON[(A, B, C, D, E)] = new JSON[(A, B, C, D, E)] {
def read(json: JValue) = json match {
case JArray(a :: b :: c :: d :: e :: _) =>
Apply[Result].apply5(fromJSON[A](a),fromJSON[B](b),fromJSON[C](c),fromJSON[D](d),fromJSON[E](e))(Tuple5.apply)
case x => UnexpectedJSONError(x, classOf[JArray]).failureNel
}
def write(value: (A, B, C, D, E)) = JArray(toJSON(value._1) :: toJSON(value._2) :: toJSON(value._3) :: toJSON(value._4) :: toJSON(value._5) :: Nil)
}
implicit def Tuple6JSON[A: JSON, B: JSON, C: JSON, D: JSON, E: JSON, F: JSON]: JSON[(A, B, C, D, E, F)] = new JSON[(A, B, C, D, E, F)] {
def read(json: JValue) = json match {
case JArray(a :: b :: c :: d :: e :: f :: _) =>
Apply[Result].apply6(fromJSON[A](a),fromJSON[B](b),fromJSON[C](c),fromJSON[D](d),fromJSON[E](e),fromJSON[F](f))(Tuple6.apply)
case x => UnexpectedJSONError(x, classOf[JArray]).failureNel
}
def write(value: (A, B, C, D, E, F)) = JArray(toJSON(value._1) :: toJSON(value._2) :: toJSON(value._3) :: toJSON(value._4) :: toJSON(value._5) :: toJSON(value._6) :: Nil)
}
}
| geggo98/json4s | scalaz/src/main/scala/org/json4s/scalaz/Tuples.scala | Scala | apache-2.0 | 3,274 |
package org.opencompare.analysis.analyzer
/**
* Created by gbecan on 05/04/16.
*/
case class TemplateResult(name : String, arguments : Int) {
}
| OpenCompare/wikipedia-dump-analysis | src/main/scala/org/opencompare/analysis/analyzer/TemplateResult.scala | Scala | apache-2.0 | 150 |
package io.aecor.liberator.macros
import scala.collection.immutable.Seq
import scala.meta._
class algebra(commonFields: scala.Symbol*) extends scala.annotation.StaticAnnotation {
inline def apply(defn: Any): Any = meta {
val commonFields = this match {
case q"new $_(..$xs)" => xs.map { case ctor"$_(${Lit(x: String)})" => x }.toList
case _ => Nil
}
defn match {
case Term.Block(Seq(t: Defn.Trait, companion: Defn.Object)) =>
AlgebraMacro(commonFields, t, Some(companion))
case t: Defn.Trait =>
AlgebraMacro(commonFields, t, None)
case other =>
defn
}
}
}
object AlgebraMacro {
def apply(commonFields: List[String], base: Defn.Trait, companion: Option[Defn.Object]): Term.Block = {
val typeName = base.name
val opName = s"${typeName.value}Op"
val opTypeName = Type.Name(opName)
val traitStats = base.templ.stats.get
val (theF, abstractParams) = (base.tparams.last.name, base.tparams.dropRight(1))
val abstractTypes = abstractParams.map(_.name.value).map(Type.Name(_))
def opCaseName(name: Term.Name) = s"$opName.${name.value.capitalize}"
val unifiedOp =
if (abstractTypes.isEmpty) {
Type.Name(opName)
} else {
t"({type X[A] = $opTypeName[..$abstractTypes, A]})#X"
}
val unifiedBase =
if (abstractTypes.isEmpty) {
base.name
} else {
t"({type X[F[_]] = $typeName[..$abstractTypes, F]})#X"
}
val abstractMethods = traitStats.collect {
case m @ q"def $name[..$tps](..$params): ${someF: Type.Name}[$out]" if someF.value == theF.value =>
m
case m @ q"def $name: ${someF: Type.Name}[$out]" =>
m
}
val commonFieldsStat =
if (commonFields.nonEmpty) {
abstractMethods.collectFirst {
case q"def $name[..$tps](..$params): $_[$out]" => params
}.map { params =>
params.collect {
case param"..$mods $paramname: ${Some(tpe: Type.Arg)} = $expropt" if commonFields.contains(paramname.value) =>
q"def ${Term.Name(paramname.value)}: ${Type.Name(tpe.toString)}"
}
}.getOrElse(Seq.empty)
} else {
Seq.empty
}
val companionStats: Seq[Stat] = Seq(
q"""sealed abstract class $opTypeName[..$abstractParams, A] extends _root_.io.aecor.liberator.Term.Invocation[$unifiedBase, A] with Product with Serializable {
..$commonFieldsStat
}
""", {
val freeAdtLeafs = abstractMethods.map {
case q"def $name[..$tps](..$params): $_[$out]" =>
q"""final case class ${Type.Name(name.value.capitalize)}[..${abstractParams ++ tps}](..$params)
extends ${Ctor.Name(opName)}[..$abstractTypes, $out] {
def invoke[F[_]](mf: $typeName[..$abstractTypes, F]): F[$out] = mf.$name(..${params.map(_.name.value).map(Term.Name(_))})
}
"""
case m @ q"def $name: ${someF: Type.Name}[$out]" =>
q"""final case object ${Term.Name(name.value.capitalize)} extends ${Ctor.Name(opName)}[..$abstractTypes, $out] {
def invoke[F[_]](mf: $typeName[..$abstractTypes, F]): F[$out] = mf.$name
}
"""
}
q"""object ${Term.Name(opName)} {
..$freeAdtLeafs
}"""
},
{
val methods = abstractMethods.map {
case q"def $name[..$tps](..$params): $_[$out]" =>
val ctor = Ctor.Name(opCaseName(name))
val args = params.map(_.name.value).map(Term.Name(_))
q"def $name[..$tps](..$params): F[$out] = f($ctor(..$args))"
case m @ q"def $name: ${someF: Type.Name}[$out]" =>
val objectName = Term.Name(opCaseName(name))
q"def $name: F[$out] = f($objectName)"
}
q"""def fromFunctionK[..$abstractParams, F[_]](f: _root_.cats.arrow.FunctionK[$unifiedOp, F]): $typeName[..$abstractTypes, F] =
new ${Ctor.Name(typeName.value)}[..$abstractTypes, F] {
..$methods
}
"""
},
{
q"""def toFunctionK[..$abstractParams, F[_]](ops: $typeName[..$abstractTypes, F]): _root_.cats.arrow.FunctionK[$unifiedOp, F] =
new _root_.cats.arrow.FunctionK[$unifiedOp, F] {
def apply[A](invocation: $opTypeName[..$abstractTypes, A]): F[A] =
invocation.invoke(ops)
}
"""
},
q"""
implicit def liberatorAlgebraInstance[..$abstractParams]: _root_.io.aecor.liberator.Algebra.Aux[$unifiedBase, $unifiedOp] =
new _root_.io.aecor.liberator.Algebra[$unifiedBase] {
type Out[A] = $opTypeName[..$abstractTypes, A]
final override def toFunctionK[F[_]](of: $typeName[..$abstractTypes, F]): _root_.cats.arrow.FunctionK[$unifiedOp, F] =
${Term.Name(typeName.value)}.toFunctionK(of)
final override def fromFunctionK[F[_]](f: _root_.cats.arrow.FunctionK[$unifiedOp, F]): $typeName[..$abstractTypes, F] =
${Term.Name(typeName.value)}.fromFunctionK(f)
final override def invoke[F[_], A](mf: $unifiedBase[F], f: Out[A]): F[A] = f.invoke(mf)
}
"""
)
val newCompanion = companion match {
case Some(c) =>
val oldTemplStats = c.templ.stats.getOrElse(Nil)
c.copy(templ = c.templ.copy(stats = Some(companionStats ++ oldTemplStats)))
case None =>
q"object ${Term.Name(typeName.value)} { ..$companionStats }"
}
Term.Block(Seq(base, newCompanion))
}
}
| aecor/liberator | macros/src/main/scala/io/aecor/liberator/macros/algebra.scala | Scala | mit | 5,560 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtOptionalInteger, Input}
@deprecated("This box is no longer in use")
case class CP86(value: Option[Int]) extends CtBoxIdentifier(name = "Other first year allowances claimed") with CtOptionalInteger with Input
object CP86 {
def apply(int: Int): CP86 = CP86(Some(int))
}
| pncampbell/ct-calculations | src/main/scala/uk/gov/hmrc/ct/computations/CP86.scala | Scala | apache-2.0 | 962 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.history
import java.util.NoSuchElementException
import java.util.zip.ZipOutputStream
import javax.servlet.http.{HttpServlet, HttpServletRequest, HttpServletResponse}
import scala.util.control.NonFatal
import scala.xml.Node
import org.eclipse.jetty.servlet.{ServletContextHandler, ServletHolder}
import org.apache.spark.{SecurityManager, SparkConf}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.deploy.history.config.HISTORY_SERVER_UI_PORT
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.status.api.v1.{ApiRootResource, ApplicationInfo, UIRoot}
import org.apache.spark.ui.{SparkUI, UIUtils, WebUI}
import org.apache.spark.ui.JettyUtils._
import org.apache.spark.util.{ShutdownHookManager, SystemClock, Utils}
/**
* A web server that renders SparkUIs of completed applications.
*
* For the standalone mode, MasterWebUI already achieves this functionality. Thus, the
* main use case of the HistoryServer is in other deploy modes (e.g. Yarn or Mesos).
*
* The logging directory structure is as follows: Within the given base directory, each
* application's event logs are maintained in the application's own sub-directory. This
* is the same structure as maintained in the event log write code path in
* EventLoggingListener.
*/
class HistoryServer(
conf: SparkConf,
provider: ApplicationHistoryProvider,
securityManager: SecurityManager,
port: Int)
extends WebUI(securityManager, securityManager.getSSLOptions("historyServer"), port, conf)
with Logging with UIRoot with ApplicationCacheOperations {
// How many applications to retain
private val retainedApplications = conf.getInt("spark.history.retainedApplications", 50)
// How many applications the summary ui displays
private[history] val maxApplications = conf.get(HISTORY_UI_MAX_APPS);
// application
private val appCache = new ApplicationCache(this, retainedApplications, new SystemClock())
// and its metrics, for testing as well as monitoring
val cacheMetrics = appCache.metrics
private val loaderServlet = new HttpServlet {
protected override def doGet(req: HttpServletRequest, res: HttpServletResponse): Unit = {
// Parse the URI created by getAttemptURI(). It contains an app ID and an optional
// attempt ID (separated by a slash).
val parts = Option(req.getPathInfo()).getOrElse("").split("/")
if (parts.length < 2) {
res.sendError(HttpServletResponse.SC_BAD_REQUEST,
s"Unexpected path info in request (URI = ${req.getRequestURI()}")
return
}
val appId = parts(1)
val attemptId = if (parts.length >= 3) Some(parts(2)) else None
// Since we may have applications with multiple attempts mixed with applications with a
// single attempt, we need to try both. Try the single-attempt route first, and if an
// error is raised, then try the multiple attempt route.
if (!loadAppUi(appId, None) && (!attemptId.isDefined || !loadAppUi(appId, attemptId))) {
val msg = <div class="row-fluid">Application {appId} not found.</div>
res.setStatus(HttpServletResponse.SC_NOT_FOUND)
UIUtils.basicSparkPage(req, msg, "Not Found").foreach { n =>
res.getWriter().write(n.toString)
}
return
}
// Note we don't use the UI retrieved from the cache; the cache loader above will register
// the app's UI, and all we need to do is redirect the user to the same URI that was
// requested, and the proper data should be served at that point.
// Also, make sure that the redirect url contains the query string present in the request.
val requestURI = req.getRequestURI + Option(req.getQueryString).map("?" + _).getOrElse("")
res.sendRedirect(res.encodeRedirectURL(requestURI))
}
// SPARK-5983 ensure TRACE is not supported
protected override def doTrace(req: HttpServletRequest, res: HttpServletResponse): Unit = {
res.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED)
}
}
override def withSparkUI[T](appId: String, attemptId: Option[String])(fn: SparkUI => T): T = {
appCache.withSparkUI(appId, attemptId)(fn)
}
initialize()
/**
* Initialize the history server.
*
* This starts a background thread that periodically synchronizes information displayed on
* this UI with the event logs in the provided base directory.
*/
def initialize() {
attachPage(new HistoryPage(this))
attachHandler(ApiRootResource.getServletHandler(this))
addStaticHandler(SparkUI.STATIC_RESOURCE_DIR)
val contextHandler = new ServletContextHandler
contextHandler.setContextPath(HistoryServer.UI_PATH_PREFIX)
contextHandler.addServlet(new ServletHolder(loaderServlet), "/*")
attachHandler(contextHandler)
}
/** Bind to the HTTP server behind this web interface. */
override def bind() {
super.bind()
}
/** Stop the server and close the file system. */
override def stop() {
super.stop()
provider.stop()
}
/** Attach a reconstructed UI to this server. Only valid after bind(). */
override def attachSparkUI(
appId: String,
attemptId: Option[String],
ui: SparkUI,
completed: Boolean) {
assert(serverInfo.isDefined, "HistoryServer must be bound before attaching SparkUIs")
handlers.synchronized {
ui.getHandlers.foreach(attachHandler)
}
}
/** Detach a reconstructed UI from this server. Only valid after bind(). */
override def detachSparkUI(appId: String, attemptId: Option[String], ui: SparkUI): Unit = {
assert(serverInfo.isDefined, "HistoryServer must be bound before detaching SparkUIs")
handlers.synchronized {
ui.getHandlers.foreach(detachHandler)
}
provider.onUIDetached(appId, attemptId, ui)
}
/**
* Get the application UI and whether or not it is completed
* @param appId application ID
* @param attemptId attempt ID
* @return If found, the Spark UI and any history information to be used in the cache
*/
override def getAppUI(appId: String, attemptId: Option[String]): Option[LoadedAppUI] = {
provider.getAppUI(appId, attemptId)
}
/**
* Returns a list of available applications, in descending order according to their end time.
*
* @return List of all known applications.
*/
def getApplicationList(): Iterator[ApplicationInfo] = {
provider.getListing()
}
def getEventLogsUnderProcess(): Int = {
provider.getEventLogsUnderProcess()
}
def getLastUpdatedTime(): Long = {
provider.getLastUpdatedTime()
}
def getApplicationInfoList: Iterator[ApplicationInfo] = {
getApplicationList()
}
def getApplicationInfo(appId: String): Option[ApplicationInfo] = {
provider.getApplicationInfo(appId)
}
override def writeEventLogs(
appId: String,
attemptId: Option[String],
zipStream: ZipOutputStream): Unit = {
provider.writeEventLogs(appId, attemptId, zipStream)
}
/**
* @return html text to display when the application list is empty
*/
def emptyListingHtml(): Seq[Node] = {
provider.getEmptyListingHtml()
}
/**
* Returns the provider configuration to show in the listing page.
*
* @return A map with the provider's configuration.
*/
def getProviderConfig(): Map[String, String] = provider.getConfig()
/**
* Load an application UI and attach it to the web server.
* @param appId application ID
* @param attemptId optional attempt ID
* @return true if the application was found and loaded.
*/
private def loadAppUi(appId: String, attemptId: Option[String]): Boolean = {
try {
appCache.withSparkUI(appId, attemptId) { _ =>
// Do nothing, just force the UI to load.
}
true
} catch {
case NonFatal(e: NoSuchElementException) =>
false
}
}
/**
* String value for diagnostics.
* @return a multi-line description of the server state.
*/
override def toString: String = {
s"""
| History Server;
| provider = $provider
| cache = $appCache
""".stripMargin
}
}
/**
* The recommended way of starting and stopping a HistoryServer is through the scripts
* start-history-server.sh and stop-history-server.sh. The path to a base log directory,
* as well as any other relevant history server configuration, should be specified via
* the $SPARK_HISTORY_OPTS environment variable. For example:
*
* export SPARK_HISTORY_OPTS="-Dspark.history.fs.logDirectory=/tmp/spark-events"
* ./sbin/start-history-server.sh
*
* This launches the HistoryServer as a Spark daemon.
*/
object HistoryServer extends Logging {
private val conf = new SparkConf
val UI_PATH_PREFIX = "/history"
def main(argStrings: Array[String]): Unit = {
Utils.initDaemon(log)
new HistoryServerArguments(conf, argStrings)
initSecurity()
val securityManager = createSecurityManager(conf)
val providerName = conf.getOption("spark.history.provider")
.getOrElse(classOf[FsHistoryProvider].getName())
val provider = Utils.classForName(providerName)
.getConstructor(classOf[SparkConf])
.newInstance(conf)
.asInstanceOf[ApplicationHistoryProvider]
val port = conf.get(HISTORY_SERVER_UI_PORT)
val server = new HistoryServer(conf, provider, securityManager, port)
server.bind()
ShutdownHookManager.addShutdownHook { () => server.stop() }
// Wait until the end of the world... or if the HistoryServer process is manually stopped
while(true) { Thread.sleep(Int.MaxValue) }
}
/**
* Create a security manager.
* This turns off security in the SecurityManager, so that the History Server can start
* in a Spark cluster where security is enabled.
* @param config configuration for the SecurityManager constructor
* @return the security manager for use in constructing the History Server.
*/
private[history] def createSecurityManager(config: SparkConf): SecurityManager = {
if (config.getBoolean(SecurityManager.SPARK_AUTH_CONF, false)) {
logDebug(s"Clearing ${SecurityManager.SPARK_AUTH_CONF}")
config.set(SecurityManager.SPARK_AUTH_CONF, "false")
}
if (config.getBoolean("spark.acls.enable", config.getBoolean("spark.ui.acls.enable", false))) {
logInfo("Either spark.acls.enable or spark.ui.acls.enable is configured, clearing it and " +
"only using spark.history.ui.acl.enable")
config.set("spark.acls.enable", "false")
config.set("spark.ui.acls.enable", "false")
}
new SecurityManager(config)
}
def initSecurity() {
// If we are accessing HDFS and it has security enabled (Kerberos), we have to login
// from a keytab file so that we can access HDFS beyond the kerberos ticket expiration.
// As long as it is using Hadoop rpc (hdfs://), a relogin will automatically
// occur from the keytab.
if (conf.getBoolean("spark.history.kerberos.enabled", false)) {
// if you have enabled kerberos the following 2 params must be set
val principalName = conf.get("spark.history.kerberos.principal")
val keytabFilename = conf.get("spark.history.kerberos.keytab")
SparkHadoopUtil.get.loginUserFromKeytab(principalName, keytabFilename)
}
}
private[history] def getAttemptURI(appId: String, attemptId: Option[String]): String = {
val attemptSuffix = attemptId.map { id => s"/$id" }.getOrElse("")
s"${HistoryServer.UI_PATH_PREFIX}/${appId}${attemptSuffix}"
}
}
| bravo-zhang/spark | core/src/main/scala/org/apache/spark/deploy/history/HistoryServer.scala | Scala | apache-2.0 | 12,353 |
package api
import core.DefaultTimeout
import akka.actor.ActorSystem
import spray.routing.Directives
import spray.httpx.TwirlSupport
import spray.httpx.encoding.Gzip
import service.{LocationFormats, LocationData, Location}
import akka.pattern.ask
import scala.util.Try
/**
* Sample API for the Godzilla Prediction System
* Each api path is in a separate spray 'path' directive for easier management.
* LocationFormats provide implicit JSON marshalling,
* TwirlSupport provides twirl templates
* DefaultTimeout provide a base timeout
*/
class GodzillaApi(implicit val actorSystem: ActorSystem) extends Directives with DefaultTimeout with TwirlSupport with LocationFormats {
import scala.concurrent.ExecutionContext.Implicits.global
val godzillaActor = actorSystem.actorSelection("/user/gds/godzilla")
// home page, retrieves compiled twirl template
val index = path("") {
get {
complete {
html.index()
}
}
}
// does not block
val locations = path("locations" / IntNumber) { deviation =>
get {
complete {
(godzillaActor ? LocationData(deviation)).mapTo[Try[List[Location]]]
}
}
}
// for webjar javascript dependencies
val webjars = pathPrefix("webjars") {
get {
getFromResourceDirectory("META-INF/resources/webjars")
}
}
val routes = index ~ locations ~ webjars ~ getFromResourceDirectory("assets")
}
| jeffusan/godzilla-prediction-system | src/main/scala/api/godzilla.scala | Scala | gpl-2.0 | 1,412 |
def merge(xs: List[Int], ys: List[Int]): List[Int] = (xs, ys) match {
case (xs, Nil) => xs
case (Nil, ys) => ys
case (x::xs, y::ys) if x == y => x :: merge(xs, (y::ys))
case (x::xs, y::ys) if x < y => x :: merge(xs, (y::ys))
case (x::xs, y::ys) if x > y => y :: merge((x::xs), ys)
}
def msort(list: List[Int]): List[Int] = list match {
case xs if xs.isEmpty => Nil
case xs if xs.length == 1 => xs
case xs => {
val h = xs.length / 2
merge(msort(xs.take(h)), msort(xs.drop(h)))
}
}
println(msort(List(3,4,1,2,5)))
// println(msort(List(13,55,98,1,52,97,16,99,45,30,82,22,77,91,70,59,54,7,96,20,29,79,0,49,85,58,36,33,32,74,64,92,76,34,37,56,5,18,38,40,78,48,2,81,94,65,24,69,8,21,12,66,73,25,26,51,84,31,3,27,46,10,83,87,63,11,47,6,50,35,75,23,19,44,89,86,41,42,43,17,60,71,62,57,15,80,14,100,4,88,68,28,72,95,93,67,90,61,39,9,113,155,198,101,152,197,116,199,145,130,182,122,177,191,170,159,154,107,196,120,129,179,149,185,158,136,133,132,174,164,192,176,134,137,156,105,118,138,140,178,148,102,181,194,165,124,169,108,121,112,166,173,125,126,151,184,131,103,127,146,183,187,163,111,147,106,150,135,175,123,119,144,189,186,141,142,143,117,160,171,162,157,115,180,114,110,104,188,168,128,172,195,193,167,190,161,139,109)))
// println(msort(List(13,55,98,1,52,97,16,99,45,30,82,22,77,91,70,59,54,7,96,20,29,79,0,49,85,58,36,33,32,74,64,92,76,34,37,56,5,18,38,40,78,48,2,81,94,65,24,69,8,21,12,66,73,25,26,51,84,31,3,27,46,10,83,87,63,11,47,6,50,35,75,23,19,44,89,86,41,42,43,17,60,71,62,57,15,80,14,100,4,88,68,28,72,95,93,67,90,61,39,9)))
// def msort[A](less: (A, A) => Boolean)(xs: List[A]): List[A] = {
// def merge(xs1: List[A], xs2: List[A]): List[A] =
// if (xs1.isEmpty) xs2
// else if (xs2.isEmpty) xs1
// else if (less(xs1.head, xs2.head)) xs1.head :: merge(xs1.tail, xs2)
// else xs2.head :: merge(xs1, xs2.tail)
// val n = xs.length/2
// if (n == 0) xs
// else merge(msort(less)(xs take n), msort(less)(xs drop n))
// }
// println(msort((x: Int, y: Int) => x < y)(List(5, 7, 1, 3)))
| shigemk2/haskell_abc | merge.scala | Scala | mit | 2,045 |
/*
* This file is part of the \BlueLaTeX project.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gnieh.blue
package compile
package impl
package let
import http._
import common._
import permission._
import tiscaf._
import com.typesafe.config.Config
import scala.util.{
Try,
Success,
Failure
}
import org.apache.pdfbox.pdmodel.PDDocument
import resource._
import gnieh.sohva.control.CouchClient
/** Handle request that ask for the number of pages in the compiled paper.
*
* @author Lucas Satabin
*/
class GetPagesLet(paperId: String, val couch: CouchClient, config: Config, logger: Logger) extends SyncPermissionLet(paperId, config, logger) {
import FileUtils._
def permissionAct(user: Option[UserInfo], role: Role, permissions: Set[Permission])(implicit talk: HTalk): Try[Any] = permissions match {
case Read() =>
// the generated pdf file
val pdfFile = configuration.buildDir(paperId) / s"main.pdf"
if(pdfFile.exists) {
managed(PDDocument.load(pdfFile)).map(_.getNumberOfPages).either match {
case Right(pages) =>
Try(talk.writeJson(pages))
case Left(errors) =>
logError(s"Cannot extract number of pages for paper $paperId", errors.head)
Try(
talk
.setStatus(HStatus.InternalServerError)
.writeJson(ErrorResponse("unknown_error", "The number of pages could not be extracted")))
}
} else {
Try(
talk
.setStatus(HStatus.NotFound)
.writeJson(ErrorResponse("not_found", "No compiled version of paper $paperId found")))
}
case _ =>
Try(
talk
.setStatus(HStatus.Forbidden)
.writeJson(ErrorResponse("no_sufficient_rights", "You have no permission to see the number of pages")))
}
}
| tdurieux/bluelatex | blue-compile/src/main/scala/gnieh/blue/compile/impl/let/GetPagesLet.scala | Scala | apache-2.0 | 2,384 |
/**
* Copyright (C) 2009-2014 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.remote.transport
import FailureInjectorTransportAdapter._
import akka.AkkaException
import akka.actor.{ Address, ExtendedActorSystem }
import akka.event.Logging
import akka.remote.transport.AssociationHandle.{ HandleEvent, HandleEventListener }
import akka.remote.transport.Transport._
import akka.util.ByteString
import java.util.concurrent.ConcurrentHashMap
import scala.concurrent.forkjoin.ThreadLocalRandom
import scala.concurrent.{ Future, Promise }
import scala.util.control.NoStackTrace
import scala.util.Try
@SerialVersionUID(1L)
case class FailureInjectorException(msg: String) extends AkkaException(msg) with NoStackTrace
class FailureInjectorProvider extends TransportAdapterProvider {
override def create(wrappedTransport: Transport, system: ExtendedActorSystem): Transport =
new FailureInjectorTransportAdapter(wrappedTransport, system)
}
/**
* INTERNAL API
*/
private[remote] object FailureInjectorTransportAdapter {
val FailureInjectorSchemeIdentifier = "gremlin"
trait FailureInjectorCommand
@SerialVersionUID(1L)
case class All(mode: GremlinMode)
@SerialVersionUID(1L)
case class One(remoteAddress: Address, mode: GremlinMode)
sealed trait GremlinMode
@SerialVersionUID(1L)
case object PassThru extends GremlinMode {
/**
* Java API: get the singleton instance
*/
def getInstance = this
}
@SerialVersionUID(1L)
case class Drop(outboundDropP: Double, inboundDropP: Double) extends GremlinMode
}
/**
* INTERNAL API
*/
private[remote] class FailureInjectorTransportAdapter(wrappedTransport: Transport, val extendedSystem: ExtendedActorSystem)
extends AbstractTransportAdapter(wrappedTransport)(extendedSystem.dispatcher) with AssociationEventListener {
private def rng = ThreadLocalRandom.current()
private val log = Logging(extendedSystem, "FailureInjector (gremlin)")
private val shouldDebugLog: Boolean = extendedSystem.settings.config.getBoolean("akka.remote.gremlin.debug")
@volatile private var upstreamListener: Option[AssociationEventListener] = None
private[transport] val addressChaosTable = new ConcurrentHashMap[Address, GremlinMode]()
@volatile private var allMode: GremlinMode = PassThru
override val addedSchemeIdentifier = FailureInjectorSchemeIdentifier
protected def maximumOverhead = 0
override def managementCommand(cmd: Any): Future[Boolean] = cmd match {
case All(mode) ⇒
allMode = mode
Future.successful(true)
case One(address, mode) ⇒
// don't care about the protocol part - we are injected in the stack anyway!
addressChaosTable.put(address.copy(protocol = "", system = ""), mode)
Future.successful(true)
case _ ⇒ wrappedTransport.managementCommand(cmd)
}
protected def interceptListen(listenAddress: Address,
listenerFuture: Future[AssociationEventListener]): Future[AssociationEventListener] = {
log.warning("FailureInjectorTransport is active on this system. Gremlins might munch your packets.")
listenerFuture.onSuccess {
// Side effecting: As this class is not an actor, the only way to safely modify state is through volatile vars.
// Listen is called only during the initialization of the stack, and upstreamListener is not read before this
// finishes.
case listener: AssociationEventListener ⇒ upstreamListener = Some(listener)
}
Future.successful(this)
}
protected def interceptAssociate(remoteAddress: Address, statusPromise: Promise[AssociationHandle]): Unit = {
// Association is simulated to be failed if there was either an inbound or outbound message drop
if (shouldDropInbound(remoteAddress, Unit, "interceptAssociate") || shouldDropOutbound(remoteAddress, Unit, "interceptAssociate"))
statusPromise.failure(new FailureInjectorException("Simulated failure of association to " + remoteAddress))
else
statusPromise.completeWith(wrappedTransport.associate(remoteAddress).map { handle ⇒
addressChaosTable.putIfAbsent(handle.remoteAddress.copy(protocol = "", system = ""), PassThru)
new FailureInjectorHandle(handle, this)
})
}
def notify(ev: AssociationEvent): Unit = ev match {
case InboundAssociation(handle) if shouldDropInbound(handle.remoteAddress, ev, "notify") ⇒ //Ignore
case _ ⇒ upstreamListener match {
case Some(listener) ⇒ listener notify interceptInboundAssociation(ev)
case None ⇒
}
}
def interceptInboundAssociation(ev: AssociationEvent): AssociationEvent = ev match {
case InboundAssociation(handle) ⇒ InboundAssociation(FailureInjectorHandle(handle, this))
case _ ⇒ ev
}
def shouldDropInbound(remoteAddress: Address, instance: Any, debugMessage: String): Boolean = chaosMode(remoteAddress) match {
case PassThru ⇒ false
case Drop(_, inboundDropP) ⇒
if (rng.nextDouble() <= inboundDropP) {
if (shouldDebugLog) log.debug("Dropping inbound [{}] for [{}] {}", instance.getClass, remoteAddress, debugMessage)
true
} else false
}
def shouldDropOutbound(remoteAddress: Address, instance: Any, debugMessage: String): Boolean = chaosMode(remoteAddress) match {
case PassThru ⇒ false
case Drop(outboundDropP, _) ⇒
if (rng.nextDouble() <= outboundDropP) {
if (shouldDebugLog) log.debug("Dropping outbound [{}] for [{}] {}", instance.getClass, remoteAddress, debugMessage)
true
} else false
}
def chaosMode(remoteAddress: Address): GremlinMode = {
val mode = addressChaosTable.get(remoteAddress.copy(protocol = "", system = ""))
if (mode eq null) PassThru else mode
}
}
/**
* INTERNAL API
*/
private[remote] case class FailureInjectorHandle(_wrappedHandle: AssociationHandle,
private val gremlinAdapter: FailureInjectorTransportAdapter)
extends AbstractTransportAdapterHandle(_wrappedHandle, FailureInjectorSchemeIdentifier)
with HandleEventListener {
import gremlinAdapter.extendedSystem.dispatcher
@volatile private var upstreamListener: HandleEventListener = null
override val readHandlerPromise: Promise[HandleEventListener] = Promise()
readHandlerPromise.future.onSuccess {
case listener: HandleEventListener ⇒
upstreamListener = listener
wrappedHandle.readHandlerPromise.success(this)
}
override def write(payload: ByteString): Boolean =
if (!gremlinAdapter.shouldDropOutbound(wrappedHandle.remoteAddress, payload, "handler.write")) wrappedHandle.write(payload)
else true
override def disassociate(): Unit = wrappedHandle.disassociate()
override def notify(ev: HandleEvent): Unit =
if (!gremlinAdapter.shouldDropInbound(wrappedHandle.remoteAddress, ev, "handler.notify"))
upstreamListener notify ev
}
| Fincore/org.spark-project.akka | remote/src/main/scala/akka/remote/transport/FailureInjectorTransportAdapter.scala | Scala | mit | 6,925 |
package cromwell
import java.net.URL
import akka.testkit.{EventFilter, TestActorRef}
import cromwell.engine._
import cromwell.engine.backend.jes.{JesAttributes, JesBackend}
import cromwell.engine.io.gcs.{GoogleConfiguration, ServiceAccountMode}
import cromwell.engine.workflow.WorkflowManagerActor
import cromwell.util.SampleWdl
import org.scalatest.BeforeAndAfterAll
class InvalidRuntimeAttributesSpec extends CromwellTestkitSpec with BeforeAndAfterAll {
val testWorkflowManagerSystem = new CromwellTestkitSpec.TestWorkflowManagerSystem()
val actorSystem = testWorkflowManagerSystem.actorSystem
override protected def afterAll() = {
testWorkflowManagerSystem.shutdownTestActorSystem()
super.afterAll()
}
"A workflow with a task with invalid runtime attributes" should {
"fail on JES Backend" in {
val jesBackend = new JesBackend(actorSystem) {
private val anyString = ""
private val anyURL: URL = null
override lazy val jesConf = new JesAttributes(
project = anyString,
executionBucket = anyString,
endpointUrl = anyURL) {
}
override def jesUserConnection(workflow: WorkflowDescriptor) = null
override lazy val jesCromwellInterface = null
override lazy val googleConf = GoogleConfiguration("appName", ServiceAccountMode("accountID", "pem"), None)
}
val workflowSources = WorkflowSourceFiles(SampleWdl.HelloWorld.wdlSource(), SampleWdl.HelloWorld.wdlJson, """ {"jes_gcs_root": "gs://fake/path"} """)
val submitMessage = WorkflowManagerActor.SubmitWorkflow(workflowSources)
runWdlWithWorkflowManagerActor(
wma = TestActorRef(new WorkflowManagerActor(jesBackend)),
submitMsg = submitMessage,
stdout = Map.empty,
stderr = Map.empty,
eventFilter = EventFilter.error(pattern = "RuntimeAttribute is not valid.", occurrences = 1),
terminalState = WorkflowFailed
)
}
"fail on Local Backend" in {
runWdl(
sampleWdl = SampleWdl.HelloWorld,
runtime =
""" runtime { wrongAttribute: "nop" }""".stripMargin,
eventFilter = EventFilter.error(pattern = "RuntimeAttribute is not valid", occurrences = 1),
terminalState = WorkflowFailed
)
}
}
}
| dgtester/cromwell | src/test/scala/cromwell/InvalidRuntimeAttributesSpec.scala | Scala | bsd-3-clause | 2,296 |
package im.mange.driveby.scalatest
import im.mange.driveby.pool.PooledApplications
//TODO: pull out error messages somewhere
trait Applications extends PooledApplications {
//TODO: can we pull this up into PooledApplications
def application = pooledApplication.getOrElse(throw new RuntimeException("Failed to acquire an application for this example within the timeout"))
}
| alltonp/driveby | src/main/scala/im/mange/driveby/scalatest/Applications.scala | Scala | apache-2.0 | 377 |
package no.skytteren.elasticala
import org.elasticsearch.common.settings.Settings
import org.elasticsearch.node.NodeBuilder._
import org.scalatest.{BeforeAndAfterAll, FunSpec}
import org.scalatest.concurrent.ScalaFutures
import no.skytteren.elasticala.mapping._
import org.scalatest.concurrent._
import scala.concurrent.duration._
import org.scalatest.time._
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.Random
class MappingDSLSpec extends FunSpec with ScalaFutures with BeforeAndAfterAll{
import api._
import mapping._
val node = nodeBuilder.local(true).data(true).settings(Settings.settingsBuilder().
put("path.home", "target/es/MappingDSLSpec/" + Random.nextString(10) ).put("cluster.name", "MappingDSLSpec")).build()
val client = Client(node)
implicit val patience = PatienceConfig(timeout = Span(6, Seconds))
override def beforeAll(): Unit = {
node.start()
try {
val r = Await.result(for {
c <- client.execute("index".create)
e <- client.execute("twitter".create)
} yield {
c.acknowledged & c.acknowledged
}, 2.seconds)
}catch {
case e: Throwable => e.printStackTrace()
}
Thread.sleep(200)
}
describe("MappingDSL") {
/*
* $ curl -XPUT 'http://localhost:9200/twitter/_mapping/tweet' -d '
* {
* "tweet" : {
* "properties" : {
* "message" : {"type" : "string", "store" : true }
* }
* }
* }
*'
*/
it("should support twitter string mapping example") {
val e = client.execute("twitter"/ "tweet" mapping (
"message" -> string(store = true)
))
whenReady(e)(res => assert(res.acknowledge))
}
it("should support string mapping") {
val e = client.execute("index" / "stringType" mapping (
"stringField" -> string
))
whenReady(e)(res => assert(res.acknowledge))
}
it("should support integer mapping") {
val e = client.execute("index" / "intType" mapping (
"stringField" -> string
))
whenReady(e)(res => assert(res.acknowledge))
}
it("should support boolean mapping") {
val e = client.execute("index" / "boolType" mapping (
"boolField" -> boolean
))
whenReady(e)(res => assert(res.acknowledge))
}
it("should support long mapping") {
val e = client.execute("index" / "longType" mapping (
"longField" -> long
))
whenReady(e)(res => assert(res.acknowledge))
}
it("should support short mapping") {
val e = client.execute("index" / "shortType" mapping (
"shortField" -> short
))
whenReady(e)(res => assert(res.acknowledge))
}
it("should support double mapping") {
val e = client.execute("index" / "doubleType" mapping (
"doubleField" -> double
))
whenReady(e)(res => assert(res.acknowledge))
}
it("should support byte mapping") {
val e = client.execute("index" / "byteType" mapping (
"byteField" -> byte
))
whenReady(e)(res => assert(res.acknowledge))
}
it("should support binary mapping") {
val e = client.execute("index" / "binaryType" mapping (
"binaryField" -> binary
))
whenReady(e)(res => assert(res.acknowledge))
}
it("should support float mapping") {
val e = client.execute("index" / "floatType" mapping (
"floatField" -> float
))
whenReady(e)(res => assert(res.acknowledge))
}
it("should support date mapping") {
val e = client.execute("index" / "dateType" mapping (
"dateField" -> date
))
whenReady(e)(res => assert(res.acknowledge))
}
it("should support object mapping") {
val e = client.execute("index" / "objectType" mapping (
"objectField" -> properties(
"field" -> string
)
))
whenReady(e)(res => assert(res.acknowledge))
}
it("should support nested mapping") {
val e = client.execute("index" / "nestedType" mapping (
"nestedField" -> nested(
"field" -> string
)
))
whenReady(e)(res => assert(res.acknowledge))
}
}
} | skytteren/elasticala | src/test/scala/no/skytteren/elasticala/MappingDSLSpec.scala | Scala | apache-2.0 | 4,231 |
package controllers
import akka.pattern.ask
import play.api.libs.json._
import play.api.mvc._
import lila.app._
import lila.monitor.actorApi._
import lila.socket.actorApi.PopulationGet
import makeTimeout.short
object Monitor extends LilaController {
private def env = Env.monitor
def index = Secure(_.Admin) { ctx =>
me =>
Ok(views.html.monitor.monitor()).fuccess
}
def websocket = SocketOption[JsValue] { implicit ctx =>
get("sri") ?? { sri =>
env.socketHandler(sri) map some
}
}
def statusParam = Action.async { implicit req =>
handleStatus(~get("key", req))
}
def status(key: String) = Action.async { implicit req =>
handleStatus(key)
}
private def handleStatus(key: String) = key match {
case "threads" => Ok(java.lang.management.ManagementFactory.getThreadMXBean.getThreadCount).fuccess
case "moves" => (env.reporting ? GetNbMoves).mapTo[Int] map { Ok(_) }
case "moveLatency" => (env.reporting ? GetMoveLatency).mapTo[Int] map { Ok(_) }
case "players" => {
(env.reporting ? PopulationGet).mapTo[Int] map { "%d %d".format(_, Env.user.onlineUserIdMemo.count) }
} map { Ok(_) }
case "uptime" => fuccess {
val up = lila.common.PlayApp.uptime
Ok {
s"""${prop("java.vm.name")} ${prop("java.vendor")} ${prop("java.version")}
${prop("user.name")} @ ${prop("os.arch")} ${prop("os.name")} ${prop("os.version")}
uptime: ${org.joda.time.format.PeriodFormat.wordBased(new java.util.Locale("en")).print(up)}
uptime seconds: ${up.toStandardSeconds.getSeconds}
last deploy: ${lila.common.PlayApp.startedAt}"""
}
}
case "locale" => Ok(java.util.Locale.getDefault.toString).fuccess
case key => BadRequest(s"Unknown monitor status key: $key").fuccess
}
private def prop(name: String) = System.getProperty(name)
}
| r0k3/lila | app/controllers/Monitor.scala | Scala | mit | 1,848 |
/*
* Copyright 2014 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark.kernel.protocol.v5.handler
import java.io.OutputStream
import akka.actor._
import akka.testkit.{ImplicitSender, TestKit, TestProbe}
import com.ibm.spark.kernel.api.{FactoryMethods, FactoryMethodsLike, Kernel}
import com.ibm.spark.kernel.protocol.v5._
import com.ibm.spark.kernel.protocol.v5.content._
import com.ibm.spark.kernel.protocol.v5.kernel.ActorLoader
import com.ibm.spark.kernel.protocol.v5Test._
import org.scalatest.mock.MockitoSugar
import org.scalatest.{BeforeAndAfter, FunSpecLike, Matchers}
import play.api.libs.json.Json
import org.mockito.Mockito._
import org.mockito.Matchers._
import scala.concurrent.duration._
class ExecuteRequestHandlerSpec extends TestKit(
ActorSystem("ExecuteRequestHandlerSpec")
) with ImplicitSender with FunSpecLike with Matchers with MockitoSugar
with BeforeAndAfter {
private var mockActorLoader: ActorLoader = _
private var mockFactoryMethods: FactoryMethods = _
private var mockKernel: Kernel = _
private var handlerActor: ActorRef = _
private var kernelMessageRelayProbe: TestProbe = _
private var executeRequestRelayProbe: TestProbe = _
private var statusDispatchProbe: TestProbe = _
before {
mockActorLoader = mock[ActorLoader]
mockFactoryMethods = mock[FactoryMethods]
mockKernel = mock[Kernel]
doReturn(mockFactoryMethods).when(mockKernel)
.factory(any[KernelMessage], any[KMBuilder])
// Add our handler and mock interpreter to the actor system
handlerActor = system.actorOf(Props(
classOf[ExecuteRequestHandler],
mockActorLoader,
mockKernel
))
kernelMessageRelayProbe = new TestProbe(system)
when(mockActorLoader.load(SystemActorType.KernelMessageRelay))
.thenReturn(system.actorSelection(kernelMessageRelayProbe.ref.path.toString))
executeRequestRelayProbe = new TestProbe(system)
when(mockActorLoader.load(SystemActorType.ExecuteRequestRelay))
.thenReturn(system.actorSelection(executeRequestRelayProbe.ref.path.toString))
statusDispatchProbe = new TestProbe(system)
when(mockActorLoader.load(SystemActorType.StatusDispatch))
.thenReturn(system.actorSelection(statusDispatchProbe.ref.path.toString))
}
/**
* This method simulates the interpreter passing back an
* execute result and reply.
*/
def replyToHandlerWithOkAndResult() = {
// This stubs the behaviour of the interpreter executing code
val expectedClass = classOf[(ExecuteRequest, KernelMessage, OutputStream)]
executeRequestRelayProbe.expectMsgClass(expectedClass)
executeRequestRelayProbe.reply((
ExecuteReplyOk(1, Some(Payloads()), Some(UserExpressions())),
ExecuteResult(1, Data("text/plain" -> "resulty result"), Metadata())
))
}
def replyToHandlerWithOk() = {
// This stubs the behaviour of the interpreter executing code
val expectedClass = classOf[(ExecuteRequest, KernelMessage, OutputStream)]
executeRequestRelayProbe.expectMsgClass(expectedClass)
executeRequestRelayProbe.reply((
ExecuteReplyOk(1, Some(Payloads()), Some(UserExpressions())),
ExecuteResult(1, Data("text/plain" -> ""), Metadata())
))
}
/**
* This method simulates the interpreter passing back an
* execute result and reply
*/
def replyToHandlerWithErrorAndResult() = {
// This stubs the behaviour of the interpreter executing code
val expectedClass = classOf[(ExecuteRequest, KernelMessage, OutputStream)]
executeRequestRelayProbe.expectMsgClass(expectedClass)
executeRequestRelayProbe.reply((
ExecuteReplyError(1, Some(""), Some(""), Some(Nil)),
ExecuteResult(1, Data("text/plain" -> "resulty result"), Metadata())
))
}
describe("ExecuteRequestHandler( ActorLoader )") {
describe("#receive( KernelMessage ) when interpreter replies") {
it("should send an execute result message if the result is not empty") {
handlerActor ! MockExecuteRequestKernelMessage
replyToHandlerWithOkAndResult()
kernelMessageRelayProbe.fishForMessage(100.milliseconds) {
case KernelMessage(_, _, header, _, _, _) =>
header.msg_type == ExecuteResult.toTypeString
}
}
it("should not send an execute result message if there is no result") {
handlerActor ! MockExecuteRequestKernelMessage
replyToHandlerWithOk()
kernelMessageRelayProbe.fishForMessage(200.milliseconds) {
case KernelMessage(_, _, header, _, _, _) =>
header.msg_type != ExecuteResult.toTypeString
}
}
it("should send an execute reply message") {
handlerActor ! MockExecuteRequestKernelMessage
replyToHandlerWithOkAndResult()
kernelMessageRelayProbe.fishForMessage(200.milliseconds) {
case KernelMessage(_, _, header, _, _, _) =>
header.msg_type == ExecuteResult.toTypeString
}
}
it("should send an execute input message") {
handlerActor ! MockExecuteRequestKernelMessage
kernelMessageRelayProbe.fishForMessage(200.milliseconds) {
case KernelMessage(_, _, header, _, _, _) =>
header.msg_type == ExecuteInput.toTypeString
}
}
it("should send a message with ids equal to the incoming " +
"KernelMessage's ids") {
handlerActor ! MockExecuteRequestKernelMessage
kernelMessageRelayProbe.fishForMessage(200.milliseconds) {
case KernelMessage(ids, _, _, _, _, _) =>
ids == MockExecuteRequestKernelMessage.ids
}
}
it("should send a message with parent header equal to the incoming " +
"KernelMessage's header") {
handlerActor ! MockExecuteRequestKernelMessage
kernelMessageRelayProbe.fishForMessage(200.milliseconds) {
case KernelMessage(_, _, _, parentHeader, _, _) =>
parentHeader == MockExecuteRequestKernelMessage.header
}
}
// TODO: Investigate if this is still relevant at all
// it("should send a status busy and idle message") {
// handlerActor ! MockExecuteRequestKernelMessage
// replyToHandlerWithOkAndResult()
// var busy = false
// var idle = false
//
// statusDispatchProbe.receiveWhile(100.milliseconds) {
// case Tuple2(status: KernelStatusType, header: Header)=>
// if(status == KernelStatusType.Busy)
// busy = true
// if(status == KernelStatusType.Idle)
// idle = true
// }
//
// idle should be (true)
// busy should be (true)
// }
}
}
// Testing error timeout for interpreter future
describe("ExecuteRequestHandler( ActorLoader )") {
describe("#receive( KernelMessage with bad JSON content )"){
it("should respond with an execute_reply with status error") {
handlerActor ! MockKernelMessageWithBadExecuteRequest
kernelMessageRelayProbe.fishForMessage(200.milliseconds) {
// Only mark as successful if this specific message was received
case KernelMessage(_, _, header, _, _, contentString)
if header.msg_type == ExecuteReply.toTypeString =>
val reply = Json.parse(contentString).as[ExecuteReply]
reply.status == "error"
case _ => false
}
}
it("should send error message to relay") {
handlerActor ! MockKernelMessageWithBadExecuteRequest
kernelMessageRelayProbe.fishForMessage(200.milliseconds) {
// Only mark as successful if this specific message was received
case KernelMessage(_, _, header, _, _, _)
if header.msg_type == ErrorContent.toTypeString => true
case _ => false
}
}
// TODO: Investigate if this is still relevant at all
// it("should send a status idle message") {
// handlerActor ! MockKernelMessageWithBadExecuteRequest
// var busy = false
// var idle = false
//
// statusDispatchProbe.receiveWhile(100.milliseconds) {
// case Tuple2(status: KernelStatusType, header: Header)=>
// if(status == KernelStatusType.Busy)
// busy = true
// if(status == KernelStatusType.Idle)
// idle = true
// }
//
// idle should be (true)
// busy should be (false)
// }
}
}
}
| yeghishe/spark-kernel | kernel/src/test/scala/com/ibm/spark/kernel/protocol/v5/handler/ExecuteRequestHandlerSpec.scala | Scala | apache-2.0 | 8,955 |
object typerep extends App {
class TypeRep[T] {}
case object IntRep extends TypeRep[Int] {
override def toString = "Int"
}
case object BooleanRep extends TypeRep[Boolean] {
override def toString = "Boolean"
}
case class ListRep[T](elemrep: TypeRep[T]) extends TypeRep[List[T]] {
override def toString = "List"
}
implicit def intRep: TypeRep[Int] = IntRep
implicit def booleanRep: TypeRep[Boolean] = BooleanRep
implicit def listRep[T](implicit elemrep: TypeRep[T]): TypeRep[List[T]] = ListRep(elemrep)
def getType[T](x: T)(implicit rep: TypeRep[T]): TypeRep[T] = rep
println(getType(1))
println(getType(List(1)))
}
| yusuke2255/dotty | tests/untried/pos/typerep_pos.scala | Scala | bsd-3-clause | 655 |
/*
* Copyright (c) 2014 Contributor. All rights reserved.
*/
package scala.tools.nsc
import scala.io.StdIn.readLine
/**
* Simple application to check out amount of memory used by chosen classpath representation.
* It allows us to create many scalac-like calls based on specified parameters, where each main retains Global.
* And we need additional tool (e.g. profiler) to measure memory consumption itself.
*/
object ClassPathMemoryConsumptionTester {
private class TestSettings extends Settings {
val requiredInstances = IntSetting("-requiredInstances",
"Determine how many times classpath should be loaded", 10, Some((1, 10000)), (_: String) => None)
}
private class MainRetainsGlobal extends scala.tools.nsc.MainClass {
var retainedGlobal: Global = _
override def doCompile(compiler: Global) {
retainedGlobal = compiler
super.doCompile(compiler)
}
}
def main(args: Array[String]): Unit = {
if (args contains "-help") usage()
else doTest(args)
}
private def doTest(args: Array[String]) = {
val settings = loadSettings(args.toList)
val mains = (1 to settings.requiredInstances.value) map (_ => new MainRetainsGlobal)
// we need original settings without additional params to be able to use them later
val baseArgs = argsWithoutRequiredInstances(args)
println(s"Loading classpath ${settings.requiredInstances.value} times")
val startTime = System.currentTimeMillis()
mains map (_.process(baseArgs))
val elapsed = System.currentTimeMillis() - startTime
println(s"Operation finished - elapsed $elapsed ms")
println("Memory consumption can be now measured")
var textFromStdIn = ""
while (textFromStdIn.toLowerCase != "exit")
textFromStdIn = readLine("Type 'exit' to close application: ")
}
/**
* Prints usage information
*/
private def usage(): Unit =
println( """Use classpath and sourcepath options like in the case of e.g. 'scala' command.
| There's also one additional option:
| -requiredInstances <int value> Determine how many times classpath should be loaded
""".stripMargin.trim)
private def loadSettings(args: List[String]) = {
val settings = new TestSettings()
settings.processArguments(args, processAll = true)
if (settings.classpath.isDefault)
settings.classpath.value = sys.props("java.class.path")
settings
}
private def argsWithoutRequiredInstances(args: Array[String]) = {
val instancesIndex = args.indexOf("-requiredInstances")
if (instancesIndex == -1) args
else args.dropRight(args.length - instancesIndex) ++ args.drop(instancesIndex + 2)
}
}
| felixmulder/scala | src/compiler/scala/tools/nsc/ClassPathMemoryConsumptionTester.scala | Scala | bsd-3-clause | 2,692 |
package database
import org.json4s.JsonAST.JValue
import org.json4s.native.JsonMethods.parse
import org.mongodb.scala.MongoDatabase
/**
* Created by franblas on 02/04/17.
*/
class Database {
protected val db: MongoDatabase = DatabaseContext.database
def loadJsonResource(filepath: String): JValue = {
val json = io.Source.fromResource(filepath).getLines mkString "\n"
parse(json)
}
def intChecker(a: JValue): Int = {
a match {
case null => 0
case _ if a.values == null => 0
case _ => a.values.toString.toInt
}
}
def stringChecker(a: JValue): String = {
a match {
case null => ""
case _ if a.values == null => ""
case _ => a.values.toString
}
}
def booleanChecker(a: JValue): Boolean = {
a match {
case null => false
case _ if a.values == null => false
case _ => a.values.toString.toBoolean
}
}
}
| franblas/NAOC | src/main/scala/database/Database.scala | Scala | mit | 912 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iht.controllers.registration.deceased
import iht.config.AppConfig
import iht.controllers.ControllerHelper.Mode
import iht.forms.registration.DeceasedForms._
import iht.models.{DeceasedDateOfDeath, DeceasedDetails, RegistrationDetails}
import iht.testhelpers.{CommonBuilder, TestHelper}
import iht.utils.RegistrationKickOutHelper
import iht.views.html.registration.deceased.deceased_permanent_home
import org.joda.time.LocalDate
import play.api.data.Form
import play.api.i18n.{Lang, Messages, MessagesApi}
import play.api.mvc.MessagesControllerComponents
import play.api.test.Helpers._
import uk.gov.hmrc.play.bootstrap.frontend.controller.FrontendController
import scala.concurrent.Future
class DeceasedPermanentHomeControllerTest
extends RegistrationDeceasedControllerWithEditModeBehaviour[DeceasedPermanentHomeController] with RegistrationKickOutHelper {
val defaultDod = Some(DeceasedDateOfDeath(new LocalDate(2014, 1, 1)))
override implicit val messagesApi: MessagesApi = app.injector.instanceOf[MessagesApi]
implicit val messages: Messages = mockControllerComponents.messagesApi.preferred(Seq(Lang.defaultLang)).messages
val appConfig: AppConfig = mockAppConfig
protected abstract class TestController extends FrontendController(mockControllerComponents) with DeceasedPermanentHomeController {
override val cc: MessagesControllerComponents = mockControllerComponents
override implicit val appConfig: AppConfig = mockAppConfig
override val deceasedPermanentHomeView: deceased_permanent_home = app.injector.instanceOf[deceased_permanent_home]
}
def controller = new TestController {
override val cachingConnector = mockCachingConnector
override val authConnector = mockAuthConnector
}
def controllerNotAuthorised = new TestController {
override val cachingConnector = mockCachingConnector
override val authConnector = mockAuthConnector
}
// Perform tests.
"DeceasedPermanentHomeController" must {
behave like securedRegistrationDeceasedController()
"create the new form when there is no deceased details present" in {
val applicantDetails = CommonBuilder.buildApplicantDetails
val registrationDetails = RegistrationDetails(None, Some(applicantDetails), None)
createMockToGetRegDetailsFromCache(mockCachingConnector, Some(registrationDetails))
val result: Form[DeceasedDetails] = controller.fillForm(registrationDetails)(createFakeRequest())
result mustBe a[Form[_]]
}
"contain Continue button when Page is loaded in normal mode" in {
val deceasedDetails = CommonBuilder.buildDeceasedDetails
val registrationDetails=CommonBuilder.buildRegistrationDetails copy (
deceasedDateOfDeath = Some(DeceasedDateOfDeath(LocalDate.now)),
deceasedDetails=Some(deceasedDetails))
createMockToGetRegDetailsFromCache(mockCachingConnector, Some(registrationDetails))
val result = controller.onPageLoad()(createFakeRequestWithReferrer(
referrerURL=referrerURL,host=host))
status(result) mustBe(OK)
contentAsString(result) must include(messagesApi("iht.continue"))
contentAsString(result) must not include(messagesApi("site.link.cancel"))
}
"contain Continue and Cancel buttons when page is loaded in edit mode" in {
val deceasedDetails = CommonBuilder.buildDeceasedDetails
val registrationDetails=CommonBuilder.buildRegistrationDetails copy (deceasedDateOfDeath = Some(DeceasedDateOfDeath(LocalDate.now)),
deceasedDetails=Some(deceasedDetails))
createMockToGetRegDetailsFromCache(mockCachingConnector, Some(registrationDetails))
val result = controller.onEditPageLoad()(createFakeRequestWithReferrer(referrerURL=referrerURL,host=host))
status(result) mustBe(OK)
contentAsString(result) must include(messagesApi("iht.continue"))
contentAsString(result) must include(messagesApi("site.link.cancel"))
}
"respond appropriately to a submit with valid values in all fields" in {
val deceasedDetails = CommonBuilder.buildDeceasedDetails
val registrationDetails = RegistrationDetails(defaultDod, None, Some(deceasedDetails))
val deceasedDetailsForm1 = deceasedPermanentHomeForm.fill(deceasedDetails)
val request = createFakeRequestWithReferrerWithBody(referrerURL=referrerURL,host=host,
data=deceasedDetailsForm1.data.toSeq, authRetrieveNino = false)
createMockToGetRegDetailsFromCacheNoOption(mockCachingConnector, Future.successful(Some(registrationDetails)))
createMockToGetRegDetailsFromCache(mockCachingConnector, Some(registrationDetails))
createMockToStoreRegDetailsInCache(mockCachingConnector, Some(registrationDetails))
val result = controller.onSubmit()(request)
status(result) must be(SEE_OTHER)
redirectLocation(result) must be (
Some(iht.controllers.registration.deceased.routes.AboutDeceasedController.onPageLoad().url))
}
"respond appropriately to an invalid submit: Missing mandatory fields" in {
val deceasedDetails = DeceasedDetails(None, None, None, None, None, None, None, None, None)
val registrationDetails = RegistrationDetails(defaultDod, None, Some(deceasedDetails))
val deceasedDetailsForm1 = deceasedPermanentHomeForm.fill(deceasedDetails)
val request = createFakeRequestWithReferrerWithBody(referrerURL=referrerURL,host=host,
data=deceasedDetailsForm1.data.toSeq, authRetrieveNino = false)
createMockToGetRegDetailsFromCache(mockCachingConnector, Some(registrationDetails))
createMockToStoreRegDetailsInCache(mockCachingConnector, Some(registrationDetails))
val result = await(controller.onSubmit()(request))
status(result) mustBe(BAD_REQUEST)
}
"respond appropriately to a submit in edit mode with valid values in all fields" in {
val deceasedDetails = CommonBuilder.buildDeceasedDetails
val registrationDetails = RegistrationDetails(defaultDod, None, Some(deceasedDetails))
val deceasedDetailsForm1 = deceasedPermanentHomeForm.fill(deceasedDetails)
val request = createFakeRequestWithReferrerWithBody(referrerURL=referrerURL,host=host, data=deceasedDetailsForm1.data.toSeq, authRetrieveNino = false)
createMockToGetRegDetailsFromCacheNoOption(mockCachingConnector, Future.successful(Some(registrationDetails)))
createMockToGetRegDetailsFromCache(mockCachingConnector, Some(registrationDetails))
createMockToStoreRegDetailsInCache(mockCachingConnector, Some(registrationDetails))
val result = controller.onEditSubmit()(request)
status(result) must be(SEE_OTHER)
redirectLocation(result) must be (Some(iht.controllers.registration.routes.RegistrationSummaryController.onPageLoad().url))
}
"respond appropriately to an invalid submit in edit mode: Missing mandatory fields" in {
val deceasedDetails = DeceasedDetails(None, None, None, None, None, None, None, None, None)
val registrationDetails = RegistrationDetails(defaultDod, None, Some(deceasedDetails))
val deceasedDetailsForm1 = deceasedPermanentHomeForm.fill(deceasedDetails)
val request = createFakeRequestWithReferrerWithBody(referrerURL=referrerURL,host=host, data=deceasedDetailsForm1.data.toSeq, authRetrieveNino = false)
createMockToGetRegDetailsFromCache(mockCachingConnector, Some(registrationDetails))
createMockToStoreRegDetailsInCache(mockCachingConnector, Some(registrationDetails))
val result = await(controller.onEditSubmit()(request))
status(result) mustBe(BAD_REQUEST)
}
"save valid data correctly when coming to this screen for the first time" in {
val existingRegistrationDetails = RegistrationDetails(Some(DeceasedDateOfDeath(new LocalDate(1980, 1, 1))), None, None)
val deceasedDetails = DeceasedDetails(domicile = Some(mockAppConfig.domicileEnglandOrWales))
createMockToGetRegDetailsFromCache(mockCachingConnector, Some(existingRegistrationDetails))
createMockToStoreRegDetailsInCache(mockCachingConnector, Some(existingRegistrationDetails))
val form = deceasedPermanentHomeForm.fill(deceasedDetails)
val request = createFakeRequestWithReferrerWithBody(referrerURL=referrerURL,host=host, data=form.data.toSeq, authRetrieveNino = false)
val result = controller.onSubmit()(request)
status(result) must be (SEE_OTHER)
val capturedValue = verifyAndReturnStoredRegistationDetails(mockCachingConnector)
capturedValue.deceasedDetails mustBe Some(deceasedDetails)
}
"return true if the guard conditions are true" in {
val rd = CommonBuilder.buildRegistrationDetails copy (
deceasedDateOfDeath = Some(DeceasedDateOfDeath(LocalDate.now)))
controller.checkGuardCondition(rd, "") mustBe true
}
"return false if the guard conditions are false" in {
val rd = CommonBuilder.buildRegistrationDetails copy (deceasedDateOfDeath = None)
controller.checkGuardCondition(rd, "") mustBe false
}
def ensureRedirectOnKickout(domicile: String, kickoutReasonKey: String, mode: Mode.Value) = {
val deceasedDetails = CommonBuilder.buildDeceasedDetails copy (domicile = Some(domicile))
val registrationDetails = RegistrationDetails(defaultDod, None, Some(deceasedDetails))
val deceasedDetailsForm1 = deceasedPermanentHomeForm.fill(deceasedDetails)
val request = createFakeRequestWithReferrerWithBody(referrerURL=referrerURL,host=host,
data=deceasedDetailsForm1.data.toSeq, authRetrieveNino = false)
createMockToGetRegDetailsFromCacheNoOption(mockCachingConnector, Future.successful(Some(registrationDetails)))
createMockToGetRegDetailsFromCache(mockCachingConnector, Some(registrationDetails))
createMockToStoreRegDetailsInCache(mockCachingConnector, Some(registrationDetails))
createMockToStoreSingleValueInCache(
cachingConnector=mockCachingConnector,
singleValueReturn=Some(kickoutReasonKey))
val result =
if (mode == Mode.Standard) await(controller.onSubmit()(request))
else await(controller.onEditSubmit()(request))
status(result) must be(SEE_OTHER)
redirectLocation(result) must be (
Some(iht.controllers.registration.routes.KickoutRegController.onPageLoad().url))
verifyAndReturnStoredSingleValue(mockCachingConnector) match {
case (cachedKey, cachedValue) =>
cachedKey mustBe RegistrationKickoutReasonCachingKey
cachedValue mustBe kickoutReasonKey
}
}
"redirect to kickout page if domicile is Scotland" in {
ensureRedirectOnKickout(TestHelper.domicileScotland, KickoutDeceasedDetailsLocationScotland, Mode.Standard)
}
"redirect to kickout page if domicile is Northern Ireland" in {
ensureRedirectOnKickout(TestHelper.domicileNI, KickoutDeceasedDetailsLocationNI, Mode.Standard)
}
"redirect to kickout page if domicile is outside the UK" in {
ensureRedirectOnKickout(TestHelper.domicileOther, KickoutDeceasedDetailsLocationOther, Mode.Standard)
}
"redirect to kickout page in edit mode if domicile is Scotland" in {
ensureRedirectOnKickout(TestHelper.domicileScotland, KickoutDeceasedDetailsLocationScotland, Mode.Edit)
}
"redirect to kickout page in edit mode if domicile is Northern Ireland" in {
ensureRedirectOnKickout(TestHelper.domicileNI, KickoutDeceasedDetailsLocationNI, Mode.Edit)
}
"redirect to kickout page in edit mode if domicile is outside the UK" in {
ensureRedirectOnKickout(TestHelper.domicileOther, KickoutDeceasedDetailsLocationOther, Mode.Edit)
}
}
}
| hmrc/iht-frontend | test/iht/controllers/registration/deceased/DeceasedPermanentHomeControllerTest.scala | Scala | apache-2.0 | 12,259 |
package at.logic.gapt.proofs.lkNew
import at.logic.gapt.expr._
import at.logic.gapt.proofs.HOLSequent
import at.logic.gapt.proofs._
import at.logic.gapt.provers.Prover
import at.logic.gapt.expr.To
class InterpolationException( msg: String ) extends Exception( msg )
object ExtractInterpolant {
def apply( p: LKProof, npart: Seq[SequentIndex], ppart: Seq[SequentIndex] ) = Interpolate( p, npart, ppart )._3
/**
* Given sequents negative: \\Gamma |- \\Delta and positive: \\Pi |- \\Lambda,
* compute a proof of \\Gamma, \\Pi |- \\Delta, \\Lambda and from that proof,
* extract an interpolant I such that \\Gamma |- \\Delta, I and I, \\Pi |- \\Lambda
* are valid.
*/
def apply( negative: HOLSequent, positive: HOLSequent, prover: Prover ): HOLFormula = {
val seq = negative ++ positive
val p = prover.getLKProof( seq ).get
val npart = p.endSequent.filter { fo => negative.contains( fo ) }
val ppart = p.endSequent.filter { fo => positive.contains( fo ) }
apply( p, npart.indices, ppart.indices )
}
}
object Interpolate {
/**
* This method computes interpolating proofs from propositional LK-proof
* containing at most atomic cuts. As arguments it expects a proof p
* and a partition of its end-sequent into two parts:
* a "negative" part and a "positive" part.
* For \\Gamma |- \\Delta being the negative and \\Pi |- \\Lambda being the
* positive part, it will compute an interpolant I and proofs of
* \\Gamma |- \\Delta, I and I, \\Pi |- \\Lambda
*
* @param p the LK proof from which the interpolant is to be extracted
* @param npart the negative part of the partition of the end-sequent of p
* @param ppart the positive part of the partition of the end-sequent of p
* @return a triple consisting of ( a proof of \\Gamma |- \\Delta, I,
* a proof of I, \\Pi |- \\Lambda, the FOLFormula I )
* @throws InterpolationException if the input proof is not propositional,
* contains non-atomic cuts or if (npart,ppart) is not a partition of its
* end-sequent.
*/
def apply( p: LKProof, npart: Seq[SequentIndex], ppart: Seq[SequentIndex] ): ( LKProof, LKProof, HOLFormula ) = p match {
// axioms
case LogicalAxiom( atom ) => {
assert( npart.size + ppart.size == 2 )
val inNpart = npart.filter( ind => p.endSequent( ind ) == atom )
val inPpart = ppart.filter( ind => p.endSequent( ind ) == atom )
/*
* Distinguish cases according to the partitions of the formulas in the logical axiom:
* Case: A :- A and :- => Interpolant: ⊥ => Result: A :- A,⊥ and ⊥ :-
*
* Case: :- and A :- A => Interpolant: ⊤ => Result: :- ⊤ and ⊤,A :- A
*
* Case: :- A and A :- => Interpolant: ¬A => Result: :- A,¬A and ¬A,A :-
*
* Case: A :- and :- A => Interpolant: A => Result: A :- A and A :- A
*/
if ( inNpart.size == 2 ) ( WeakeningRightRule( p, Bottom() ), Axiom( Bottom() :: Nil, Nil ), Bottom() )
else if ( inNpart.size == 1 && inPpart.size == 1 ) {
if ( inNpart( 0 ).isInstanceOf[Ant] && inPpart( 0 ).isInstanceOf[Suc] ) ( p, p, atom )
else if ( inNpart( 0 ).isInstanceOf[Suc] && inPpart( 0 ).isInstanceOf[Ant] ) ( NegRightRule( p, atom ), NegLeftRule( p, atom ), Neg( atom ) )
else throw new InterpolationException( "Negative and positive part must form a partition of the end-sequent." )
} else if ( inPpart.size == 2 ) ( Axiom( Nil, Top() :: Nil ), WeakeningLeftRule( p, Top() ), Top() )
else throw new InterpolationException( "Negative and positive part must form a partition of the end-sequent." )
}
/*
* Possible partitions
*
* Case: :- ⊤ and :- => Interpolant: ⊥ => Result: :- ⊤,⊥ and ⊥ :-
*
* Case: :- and :- ⊤ => Interpolant: ⊤ => Result: :- ⊤ and ⊤ :- ⊤
*/
case TopAxiom => {
assert( npart.size + ppart.size == 1 )
val inNpart = npart.filter( ind => p.endSequent( ind ) == Top() )
val inPpart = ppart.filter( ind => p.endSequent( ind ) == Top() )
if ( inNpart.size == 1 ) ( WeakeningRightRule( p, Bottom() ), Axiom( Bottom() :: Nil, Nil ), Bottom() )
else if ( inPpart.size == 1 ) ( Axiom( Nil, Top() :: Nil ), WeakeningLeftRule( p, Top() ), Top() )
else throw new InterpolationException( "Negative and positive part must form a partition of the end-sequent." )
}
/*
* Possible Partitions:
*
* Case: ⊥ :- and :- => Interpolant: ⊥ => Result: ⊥ :- ⊥ and ⊥ :-
*
* Case: :- and ⊥ :- => Interpolant: ⊤ => Result: :- ⊤ and ⊤,⊥ :-
*/
case BottomAxiom => {
assert( npart.size + ppart.size == 1 )
val inNpart = npart.filter( ind => p.endSequent( ind ) == Bottom() )
val inPpart = ppart.filter( ind => p.endSequent( ind ) == Bottom() )
if ( inNpart.size == 1 ) ( WeakeningRightRule( p, Bottom() ), Axiom( Bottom() :: Nil, Nil ), Bottom() )
else if ( inPpart.size == 1 ) ( Axiom( Nil, Top() :: Nil ), WeakeningLeftRule( p, Top() ), Top() )
else throw new InterpolationException( "Negative and positive part must form a partition of the end-sequent." )
}
/*
* Possible Partitions:
*
* Case: :- s=s and :- => Interpolant: ⊥ => Result: :- s=s,⊥ and ⊥ :-
*
* Case: :- and :- s=s => Interpolant: ⊤ => Result: :- ⊤ and ⊤ :- s=s
*/
case ReflexivityAxiom( term ) => {
assert( npart.size + ppart.size == 1 )
val atom = Eq( term, term )
val inNpart = npart.filter( ind => p.endSequent( ind ) == atom )
val inPpart = ppart.filter( ind => p.endSequent( ind ) == atom )
if ( inNpart.size == 1 ) ( WeakeningRightRule( p, Bottom() ), Axiom( Bottom() :: Nil, Nil ), Bottom() )
else if ( inPpart.size == 1 ) ( Axiom( Nil, Top() :: Nil ), WeakeningLeftRule( p, Top() ), Top() )
else throw new InterpolationException( "Negative and positive part must form a partition of the end-sequent." )
}
// structural rules
case WeakeningLeftRule( subProof, formula ) => {
val ( up_nproof, up_pproof, up_I ) = applyUpUnary( p, npart, ppart )
val inNpart = npart.filter( ind => p.endSequent.indices.contains( ind ) )
val inPpart = ppart.filter( ind => p.endSequent.indices.contains( ind ) )
// p.mainIndices refers to the index of the formula introduced by WeakeningLeft in the end-sequent of the proof p
if ( npart.contains( p.mainIndices( 0 ) ) ) ( WeakeningLeftRule( up_nproof, formula ), up_pproof, up_I )
else if ( ppart.contains( p.mainIndices( 0 ) ) ) ( up_nproof, WeakeningLeftRule( up_pproof, formula ), up_I )
else throw new InterpolationException( "Negative and positive part must form a partition of the end-sequent." )
}
case WeakeningRightRule( subProof, formula ) => {
val ( up_nproof, up_pproof, up_I ) = applyUpUnary( p, npart, ppart )
if ( npart.contains( p.mainIndices( 0 ) ) ) ( WeakeningRightRule( up_nproof, formula ), up_pproof, up_I )
else if ( ppart.contains( p.mainIndices( 0 ) ) ) ( up_nproof, WeakeningRightRule( up_pproof, formula ), up_I )
else throw new InterpolationException( "Negative and positive part must form a partition of the end-sequent." )
}
case ContractionLeftRule( subProof, aux1, aux2 ) => {
val ( up_nproof, up_pproof, up_I ) = applyUpUnary( p, npart, ppart )
val formula = p.mainFormulas( 0 )
if ( npart.contains( p.mainIndices( 0 ) ) ) ( ContractionLeftRule( up_nproof, formula ), up_pproof, up_I )
else if ( ppart.contains( p.mainIndices( 0 ) ) ) ( up_nproof, ContractionLeftRule( up_pproof, formula ), up_I )
else throw new InterpolationException( "Negative and positive part must form a partition of the end-sequent." )
}
case ContractionRightRule( subProof, aux1, aux2 ) => {
val ( up_nproof, up_pproof, up_I ) = applyUpUnary( p, npart, ppart )
val formula = p.mainFormulas( 0 )
if ( npart.contains( p.mainIndices( 0 ) ) ) ( ContractionRightRule( up_nproof, formula ), up_pproof, up_I )
else if ( ppart.contains( p.mainIndices( 0 ) ) ) ( up_nproof, ContractionRightRule( up_pproof, formula ), up_I )
else throw new InterpolationException( "Negative and positive part must form a partition of the end-sequent." )
}
case CutRule( leftSubProof, aux1, rightSubProof, aux2 ) => {
val ( up1_nproof, up1_pproof, up1_I ) = applyUpCutLeft( p, npart, ppart, aux1 )
val ( up2_nproof, up2_pproof, up2_I ) = applyUpCutRight( p, npart, ppart, aux2 )
val up1_nFormulas = up1_nproof.endSequent.formulas
val up2_nFormulas = up2_nproof.endSequent.formulas
val up1_pFormulas = up1_pproof.endSequent.formulas
val up2_pFormulas = up2_pproof.endSequent.formulas
val cutFormula = leftSubProof.endSequent( aux1 )
if ( ( up1_nFormulas.contains( cutFormula ) || up2_nFormulas.contains( cutFormula ) ) ) {
val ipl = Or( up1_I, up2_I )
val np = OrRightRule( CutRule( up1_nproof, cutFormula, up2_nproof, cutFormula ), up1_I, up2_I )
val pp = OrLeftRule( up1_pproof, up1_I, up2_pproof, up2_I )
( np, pp, ipl )
} else if ( ( up1_pFormulas.contains( cutFormula ) || up2_pFormulas.contains( cutFormula ) ) ) {
val ipl = And( up1_I, up2_I )
val np = AndRightRule( up1_nproof, up1_I, up2_nproof, up2_I )
val pp = AndLeftRule( CutRule( up1_pproof, cutFormula, up2_pproof, cutFormula ), up1_I, up2_I )
( np, pp, ipl )
} else throw new InterpolationException( "Negative and positive part must form a partition of the end-sequent." )
}
// propositional rules
case AndRightRule( leftSubProof, aux1, rightSubProof, aux2 ) => {
val ( up1_nproof, up1_pproof, up1_I ) = applyUpBinaryLeft( p, npart, ppart )
val ( up2_nproof, up2_pproof, up2_I ) = applyUpBinaryRight( p, npart, ppart )
val formula1 = p.auxFormulas( 0 )( 0 )
val formula2 = p.auxFormulas( 1 )( 0 )
if ( npart.contains( p.mainIndices( 0 ) ) ) {
val ipl = Or( up1_I, up2_I )
val np = OrRightRule( AndRightRule( up1_nproof, formula1, up2_nproof, formula2 ), up1_I, up2_I )
val pp = OrLeftRule( up1_pproof, up1_I, up2_pproof, up2_I )
( np, pp, ipl )
} else if ( ppart.contains( p.mainIndices( 0 ) ) ) {
val ipl = And( up1_I, up2_I )
val np = AndRightRule( up1_nproof, up1_I, up2_nproof, up2_I )
val pp = AndLeftRule( AndRightRule( up1_pproof, formula1, up2_pproof, formula2 ), up1_I, up2_I )
( np, pp, ipl )
} else throw new InterpolationException( "Negative and positive part must form a partition of the end-sequent." )
}
case AndLeftRule( subProof, aux1, aux2 ) => {
val ( up_nproof, up_pproof, up_I ) = applyUpUnary( p, npart, ppart )
val formula1 = p.auxFormulas( 0 )( 0 )
val formula2 = p.auxFormulas( 0 )( 1 )
if ( npart.contains( p.mainIndices( 0 ) ) ) ( AndLeftRule( up_nproof, formula1, formula2 ), up_pproof, up_I )
else if ( ppart.contains( p.mainIndices( 0 ) ) ) ( up_nproof, AndLeftRule( up_pproof, formula1, formula2 ), up_I )
else throw new InterpolationException( "Negative and positive part must form a partition of the end-sequent." )
}
case OrLeftRule( leftSubProof, aux1, rightSubProof, aux2 ) => {
val ( up1_nproof, up1_pproof, up1_I ) = applyUpBinaryLeft( p, npart, ppart )
val ( up2_nproof, up2_pproof, up2_I ) = applyUpBinaryRight( p, npart, ppart )
val formula1 = p.auxFormulas( 0 )( 0 )
val formula2 = p.auxFormulas( 1 )( 0 )
if ( npart.contains( p.mainIndices( 0 ) ) ) {
val ipl = Or( up1_I, up2_I )
val np = OrRightRule( OrLeftRule( up1_nproof, formula1, up2_nproof, formula2 ), up1_I, up2_I )
val pp = OrLeftRule( up1_pproof, up1_I, up2_pproof, up2_I )
( np, pp, ipl )
} else if ( ppart.contains( p.mainIndices( 0 ) ) ) {
val ipl = And( up1_I, up2_I )
val np = AndRightRule( up1_nproof, up1_I, up2_nproof, up2_I )
val pp = AndLeftRule( OrLeftRule( up1_pproof, formula1, up2_pproof, formula2 ), up1_I, up2_I )
( np, pp, ipl )
} else throw new InterpolationException( "Negative and positive part must form a partition of the end-sequent." )
}
case OrRightRule( subProof, aux1, aux2 ) => {
val ( up_nproof, up_pproof, up_I ) = applyUpUnary( p, npart, ppart )
val formula1 = p.auxFormulas( 0 )( 0 )
val formula2 = p.auxFormulas( 0 )( 1 )
if ( npart.contains( p.mainIndices( 0 ) ) ) ( OrRightRule( up_nproof, formula1, formula2 ), up_pproof, up_I )
else if ( ppart.contains( p.mainIndices( 0 ) ) ) ( up_nproof, OrRightRule( up_pproof, formula1, formula2 ), up_I )
else throw new InterpolationException( "Negative and positive part must form a partition of the end-sequent." )
}
case NegLeftRule( subProof, aux ) => {
val ( up_nproof, up_pproof, up_I ) = applyUpUnary( p, npart, ppart )
if ( npart.contains( p.mainIndices( 0 ) ) ) ( NegLeftRule( up_nproof, subProof.endSequent( aux ) ), up_pproof, up_I )
else if ( ppart.contains( p.mainIndices( 0 ) ) ) ( up_nproof, NegLeftRule( up_pproof, subProof.endSequent( aux ) ), up_I )
else throw new InterpolationException( "Negative and positive part must form a partition of the end-sequent." )
}
case NegRightRule( subProof, aux ) => {
val ( up_nproof, up_pproof, up_I ) = applyUpUnary( p, npart, ppart )
if ( npart.contains( p.mainIndices( 0 ) ) ) ( NegRightRule( up_nproof, subProof.endSequent( aux ) ), up_pproof, up_I )
else if ( ppart.contains( p.mainIndices( 0 ) ) ) ( up_nproof, NegRightRule( up_pproof, subProof.endSequent( aux ) ), up_I )
else throw new InterpolationException( "Negative and positive part must form a partition of the end-sequent." )
}
case ImpLeftRule( leftSubProof, aux1, rightSubProof, aux2 ) => {
val ( up1_nproof, up1_pproof, up1_I ) = applyUpBinaryLeft( p, npart, ppart )
val ( up2_nproof, up2_pproof, up2_I ) = applyUpBinaryRight( p, npart, ppart )
val formula1 = p.auxFormulas( 0 )( 0 )
val formula2 = p.auxFormulas( 1 )( 0 )
if ( npart.contains( p.mainIndices( 0 ) ) ) {
val ipl = Or( up1_I, up2_I )
val np = OrRightRule( ImpLeftRule( up1_nproof, formula1, up2_nproof, formula2 ), up1_I, up2_I )
val pp = OrLeftRule( up1_pproof, up1_I, up2_pproof, up2_I )
( np, pp, ipl )
} else if ( ppart.contains( p.mainIndices( 0 ) ) ) {
val ipl = And( up1_I, up2_I )
val np = AndRightRule( up1_nproof, up1_I, up2_nproof, up2_I )
val pp = AndLeftRule( ImpLeftRule( up1_pproof, formula1, up2_pproof, formula2 ), up1_I, up2_I )
( np, pp, ipl )
} else throw new InterpolationException( "Negative and positive part must form a partition of the end-sequent." )
}
case ImpRightRule( subProof, aux1, aux2 ) => {
val ( up_nproof, up_pproof, up_I ) = applyUpUnary( p, npart, ppart )
val formula1 = p.auxFormulas( 0 )( 0 )
val formula2 = p.auxFormulas( 0 )( 1 )
if ( npart.contains( p.mainIndices( 0 ) ) ) ( ImpRightRule( up_nproof, formula1, formula2 ), up_pproof, up_I )
else if ( ppart.contains( p.mainIndices( 0 ) ) ) ( up_nproof, ImpRightRule( up_pproof, formula1, formula2 ), up_I )
else throw new InterpolationException( "Negative and positive part must form a partition of the end-sequent." )
}
// equality rules
case EqualityRightRule( subProof, eq, aux, pos ) => {
val ( up_nproof, up_pproof, up_I ) = applyUpUnary( p, npart, ppart )
val auxFormula = subProof.endSequent( aux )
val eqIndex = p.occConnectors( 0 ).child( eq )
var ipl = up_I
if ( npart.contains( eqIndex ) && npart.contains( p.mainIndices( 0 ) ) ) ( EqualityRightRule( up_nproof, eq, auxFormula, pos ), up_pproof, up_I )
else if ( ppart.contains( eqIndex ) && ppart.contains( p.mainIndices( 0 ) ) ) ( up_nproof, EqualityRightRule( up_pproof, eq, auxFormula, pos ), up_I )
else if ( npart.contains( p.mainIndices( 0 ) ) ) {
ipl = Imp( p.endSequent( eqIndex ), up_I )
val up_nproof1 = WeakeningLeftRule( up_nproof, p.endSequent( eqIndex ) )
val up_nproof2 = EqualityRightRule( up_nproof1, eq, auxFormula, pos )
val up_nproof3 = ImpRightRule( up_nproof2, p.endSequent( eqIndex ), up_I )
val up_pproof1 = ImpLeftRule( LogicalAxiom( p.endSequent( eqIndex ).asInstanceOf[FOLAtom] ), p.endSequent( eqIndex ), up_pproof, up_I )
val up_pproof2 = ContractionLeftRule( up_pproof1, p.endSequent( eqIndex ) )
( up_nproof3, up_pproof2, ipl )
} else if ( ppart.contains( p.mainIndices( 0 ) ) ) {
ipl = And( p.endSequent( eqIndex ), up_I )
val up_nproof1 = AndRightRule( LogicalAxiom( p.endSequent( eqIndex ).asInstanceOf[FOLAtom] ), up_nproof, And( p.endSequent( eqIndex ), up_I ) )
val up_nproof2 = ContractionLeftRule( up_nproof1, p.endSequent( eqIndex ) )
val up_pproof1 = WeakeningLeftRule( up_pproof, p.endSequent( eqIndex ) )
val up_pproof2 = EqualityRightRule( up_pproof1, eq, auxFormula, pos )
val up_pproof3 = AndLeftRule( up_pproof2, p.endSequent( eqIndex ), up_I )
( up_nproof2, up_pproof3, ipl )
} else throw new InterpolationException( "Negative and positive part must form a partition of the end-sequent." )
}
case EqualityLeftRule( subProof, eq, aux, pos ) => {
val ( up_nproof, up_pproof, up_I ) = applyUpUnary( p, npart, ppart )
val auxFormula = subProof.endSequent( aux )
val eqIndex = p.occConnectors( 0 ).child( eq )
var ipl = up_I
if ( npart.contains( eqIndex ) && npart.contains( p.mainIndices( 0 ) ) ) ( EqualityLeftRule( up_nproof, eq, auxFormula, pos ), up_pproof, up_I )
else if ( ppart.contains( eqIndex ) && ppart.contains( p.mainIndices( 0 ) ) ) ( up_nproof, EqualityLeftRule( up_pproof, eq, auxFormula, pos ), up_I )
else if ( npart.contains( p.mainIndices( 0 ) ) ) {
ipl = Imp( p.endSequent( eqIndex ), up_I )
val up_nproof1 = WeakeningLeftRule( up_nproof, p.endSequent( eqIndex ) )
val up_nproof2 = EqualityLeftRule( up_nproof1, eq, auxFormula, pos )
val up_nproof3 = ImpRightRule( up_nproof2, p.endSequent( eqIndex ), up_I )
val up_pproof1 = ImpLeftRule( LogicalAxiom( p.endSequent( eqIndex ).asInstanceOf[FOLAtom] ), p.endSequent( eqIndex ), up_pproof, up_I )
val up_pproof2 = ContractionLeftRule( up_pproof1, p.endSequent( eqIndex ) )
( up_nproof3, up_pproof2, ipl )
} else if ( ppart.contains( p.mainIndices( 0 ) ) ) {
ipl = And( p.endSequent( eqIndex ), up_I )
val up_nproof1 = AndRightRule( LogicalAxiom( p.endSequent( eqIndex ).asInstanceOf[FOLAtom] ), up_nproof, And( p.endSequent( eqIndex ), up_I ) )
val up_nproof2 = ContractionLeftRule( up_nproof1, p.endSequent( eqIndex ) )
val up_pproof1 = WeakeningLeftRule( up_pproof, p.endSequent( eqIndex ) )
val up_pproof2 = EqualityLeftRule( up_pproof1, eq, auxFormula, pos )
val up_pproof3 = AndLeftRule( up_pproof2, p.endSequent( eqIndex ), up_I )
( up_nproof2, up_pproof3, ipl )
} else throw new InterpolationException( "Negative and positive part must form a partition of the end-sequent." )
}
case _ => throw new InterpolationException( "Unknown inference rule of type: " + p.name.toString() + "." )
}
private def applyUpUnary( p: LKProof, npart: Seq[SequentIndex], ppart: Seq[SequentIndex] ) = {
val up_npart = npart.flatMap { ind => p.occConnectors( 0 ).parents( ind ) }
val up_ppart = ppart.flatMap { ind => p.occConnectors( 0 ).parents( ind ) }
apply( p.immediateSubProofs( 0 ), up_npart, up_ppart )
}
private def applyUpBinaryLeft( p1: LKProof, npart: Seq[SequentIndex], ppart: Seq[SequentIndex] ) = {
val up_npart = npart.flatMap { ind => p1.occConnectors( 0 ).parents( ind ) }
val up_ppart = ppart.flatMap { ind => p1.occConnectors( 0 ).parents( ind ) }
apply( p1.immediateSubProofs( 0 ), up_npart, up_ppart )
}
private def applyUpBinaryRight( p2: LKProof, npart: Seq[SequentIndex], ppart: Seq[SequentIndex] ) = {
val up_npart = npart.flatMap { ind => p2.occConnectors( 1 ).parents( ind ) }
val up_ppart = ppart.flatMap { ind => p2.occConnectors( 1 ).parents( ind ) }
apply( p2.immediateSubProofs( 1 ), up_npart, up_ppart )
}
private def applyUpCutLeft( p1: LKProof, npart: Seq[SequentIndex], ppart: Seq[SequentIndex], aux1: SequentIndex ) = {
var up_npart = npart.flatMap { ind => p1.occConnectors( 0 ).parents( ind ) }
var up_ppart = ppart.flatMap { ind => p1.occConnectors( 0 ).parents( ind ) }
val auxFormula = p1.immediateSubProofs( 0 ).endSequent( aux1 )
val nFormulas = npart.filter { ind => p1.endSequent( ind ) == auxFormula }
val pFormulas = ppart.filter { ind => p1.endSequent( ind ) == auxFormula }
if ( !pFormulas.isEmpty && nFormulas.isEmpty ) {
up_ppart :+= aux1
} else {
up_npart :+= aux1
}
apply( p1.immediateSubProofs( 0 ), up_npart, up_ppart )
}
private def applyUpCutRight( p2: LKProof, npart: Seq[SequentIndex], ppart: Seq[SequentIndex], aux2: SequentIndex ) = {
var up_npart = npart.flatMap { ind => p2.occConnectors( 1 ).parents( ind ) }
var up_ppart = ppart.flatMap { ind => p2.occConnectors( 1 ).parents( ind ) }
val auxFormula = p2.immediateSubProofs( 1 ).endSequent( aux2 )
val nFormulas = npart.filter { ind => p2.endSequent( ind ) == auxFormula }
val pFormulas = ppart.filter { ind => p2.endSequent( ind ) == auxFormula }
if ( !pFormulas.isEmpty && nFormulas.isEmpty ) {
up_ppart :+= aux2
} else {
up_npart :+= aux2
}
apply( p2.immediateSubProofs( 1 ), up_npart, up_ppart )
}
}
| loewenheim/gapt | src/main/scala/at/logic/gapt/proofs/lkNew/applyInterpolation.scala | Scala | gpl-3.0 | 22,008 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.datasource.mongodb.partitioner
import com.mongodb.casbah.Imports._
import com.stratio.datasource.partitioner.PartitionRange
import org.apache.spark.Partition
/**
* @param index Partition index
* @param hosts Hosts that hold partition data
* @param partitionRange Partition range
*/
case class MongodbPartition(
index: Int,
hosts: Seq[String],
partitionRange: PartitionRange[DBObject]) extends Partition | Stratio/spark-mongodb | spark-mongodb/src/main/scala/com/stratio/datasource/mongodb/partitioner/MongodbPartition.scala | Scala | apache-2.0 | 1,054 |
package it.unipd.dei.diversity
import com.codahale.metrics.MetricRegistry
object PerformanceMetrics {
val registry = new MetricRegistry
val distanceFnCounter = registry.counter("distance-counter")
val matroidOracleCounter = registry.counter("matroid-oracle")
def reset(): Unit = {
distanceFnCounter.dec(distanceFnCounter.getCount)
matroidOracleCounter.dec(matroidOracleCounter.getCount)
}
def distanceFnCounterInc() = distanceFnCounter.inc()
def matroidOracleCounterInc() = matroidOracleCounter.inc()
}
| Cecca/diversity-maximization | core/src/main/scala/it/unipd/dei/diversity/PerformanceMetrics.scala | Scala | gpl-3.0 | 534 |
/* Copyright (C) 2008-2016 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.app.nlp
import cc.factorie.util.FastLogging
import scala.reflect.ClassTag
/**User: apassos
* Date: 8/7/13
* Time: 2:48 PM
*/
/** A sequence of DocumentAnnotators packaged as a single DocumentAnnotator.
This class also properly populates the Document.annotators with a record of which DocumentAnnotator classes provided which annotation classes. */
class DocumentAnnotationPipeline(val annotators: Seq[DocumentAnnotator], val prereqAttrs: Seq[Class[_]] = Seq()) extends DocumentAnnotator {
var profile = false
var tokensProcessed = 0
var msProcessed = 0L
val timePerAnnotator = collection.mutable.LinkedHashMap[DocumentAnnotator,Long]()
def postAttrs = annotators.flatMap(_.postAttrs).distinct
def process(document: Document) = {
var doc = document
val t00 = System.currentTimeMillis()
for (annotator <- annotators; if annotator.postAttrs.forall(!doc.hasAnnotation(_))) {
val t0 = System.currentTimeMillis()
doc = annotator.process(doc)
if (profile) timePerAnnotator(annotator) = timePerAnnotator.getOrElse(annotator, 0L) + System.currentTimeMillis() - t0
annotator.postAttrs.foreach(a => document.annotators(a) = annotator.getClass)
}
if (profile) {
msProcessed += System.currentTimeMillis() - t00
tokensProcessed += doc.tokenCount
}
doc
}
def profileReport: String = {
s"Processed $tokensProcessed tokens in ${msProcessed/1000.0} seconds, at ${tokensProcessed.toDouble*1000.0/msProcessed} tokens / second " +
"Speeds of individual components:\\n" + timePerAnnotator.map(i => f" ${i._1.getClass.getSimpleName}%30s: ${tokensProcessed.toDouble*1000.0/i._2}%4.4f tokens/sec ").mkString("\\n")
}
def tokenAnnotationString(token: Token): String = annotators.map(_.tokenAnnotationString(token)).mkString("\\t")
}
/** A Map from annotation class to DocumentAnnotator that provides that annotation.
Used to store default ways of getting certain prerequisite annotations. */
class MutableDocumentAnnotatorMap extends collection.mutable.LinkedHashMap[Class[_], () => DocumentAnnotator] {
def +=(annotator: DocumentAnnotator) = annotator.postAttrs.foreach(a => this(a) = () => annotator)
}
/** A factory for creating DocumentAnnotatorPipelines given requirements about which annotations or which DocumentAnnotators are desired. */
object DocumentAnnotatorPipeline extends FastLogging {
val defaultDocumentAnnotationMap: DocumentAnnotatorMap = new collection.immutable.ListMap ++ Seq(
// Note that order matters here
classOf[pos.PennPosTag] -> (() => pos.OntonotesForwardPosTagger),
classOf[parse.ParseTree] -> (() => parse.OntonotesTransitionBasedParser),
classOf[segment.PlainNormalizedTokenString] -> (() => segment.PlainTokenNormalizer),
classOf[Token] -> (() => segment.DeterministicNormalizingTokenizer),
classOf[Sentence] -> (() => segment.DeterministicSentenceSegmenter),
classOf[lemma.WordNetTokenLemma] -> (() => lemma.WordNetLemmatizer),
classOf[lemma.SimplifyDigitsTokenLemma] -> (() => lemma.SimplifyDigitsLemmatizer),
classOf[lemma.CollapseDigitsTokenLemma] -> (() => lemma.CollapseDigitsLemmatizer),
classOf[lemma.PorterTokenLemma] -> (() => lemma.PorterLemmatizer),
classOf[lemma.LowercaseTokenLemma] -> (() => lemma.LowercaseLemmatizer),
classOf[ner.NerTag] -> (() => ner.ConllChainNer), // TODO Should there be a different default?
classOf[ner.BilouConllNerTag] -> (() => ner.NoEmbeddingsConllStackedChainNer),
classOf[ner.BilouOntonotesNerTag] -> (() => ner.NoEmbeddingsOntonotesStackedChainNer),
classOf[ner.ConllNerSpanBuffer] -> (() => ner.BilouConllNerChunkAnnotator),
classOf[ner.OntonotesNerSpanBuffer] -> (() => ner.BilouOntonotesNerChunkAnnotator),
//classOf[coref.mention.NerMentionList] -> (() => coref.mention.NerAndPronounMentionFinder),
//classOf[phrase.GenderLabel[coref.Mention]] -> (() => phrase.GenderLabeler[]),
classOf[phrase.Gender] -> (() => phrase.MentionPhraseGenderLabeler),
classOf[phrase.Number] -> (() => phrase.MentionPhraseNumberLabeler),
classOf[phrase.DatePhraseList] -> (() => phrase.DatePhraseFinder),
classOf[coref.WithinDocCoref] -> (() => coref.NerForwardCoref),
classOf[relation.RelationMentionSeq] -> (() => relation.ConllPatternBasedRelationFinder)
//classOf[phrase.NumberLabel[phrase.NounPhrase]] -> (() => phrase.NounPhraseNumberLabeler),
//classOf[MentionEntityType] -> (() => coref.mention.MentionEntityTypeLabeler),
//classOf[cc.factorie.util.coref.GenericEntityMap[coref.mention.Mention]] -> (() => coref.NerForwardCoref)
)
//def apply(goal: Class[_]): DocumentAnnotationPipeline = apply(Seq(goal), defaultDocumentAnnotationMap)
def apply[A](implicit m:ClassTag[A]): DocumentAnnotationPipeline = apply(defaultDocumentAnnotationMap, Nil, Seq(m.runtimeClass))
def apply[A,B](implicit m1:ClassTag[A], m2:ClassTag[B]): DocumentAnnotationPipeline = apply(defaultDocumentAnnotationMap, Nil, Seq(m1.runtimeClass, m2.runtimeClass))
def apply[A,B,C](implicit m1:ClassTag[A], m2:ClassTag[B], m3:ClassTag[C]): DocumentAnnotationPipeline = apply(defaultDocumentAnnotationMap, Nil, Seq(m1.runtimeClass, m2.runtimeClass, m3.runtimeClass))
def apply[A,B,C,D](implicit m1:ClassTag[A], m2:ClassTag[B], m3:ClassTag[C], m4:ClassTag[D]): DocumentAnnotationPipeline = apply(defaultDocumentAnnotationMap, Nil, Seq(m1.runtimeClass, m2.runtimeClass, m3.runtimeClass, m4.runtimeClass))
//def apply(goal: Class[_], map: DocumentAnnotatorMap): DocumentAnnotationPipeline = apply(Seq(goal), map)
def apply[A](map: DocumentAnnotatorMap)(implicit m:ClassTag[A]): DocumentAnnotationPipeline = apply(map, Nil, Seq(m.runtimeClass))
def apply[A,B](map: DocumentAnnotatorMap)(implicit m1:ClassTag[A], m2:ClassTag[B]): DocumentAnnotationPipeline = apply(map, Nil, Seq(m1.runtimeClass, m2.runtimeClass))
def apply[A,B,C](map: DocumentAnnotatorMap)(implicit m1:ClassTag[A], m2:ClassTag[B], m3:ClassTag[C]): DocumentAnnotationPipeline = apply(map, Nil, Seq(m1.runtimeClass, m2.runtimeClass, m3.runtimeClass))
def apply[A,B,C,D](map: DocumentAnnotatorMap)(implicit m1:ClassTag[A], m2:ClassTag[B], m3:ClassTag[C], m4:ClassTag[D]): DocumentAnnotationPipeline = apply(map, Nil, Seq(m1.runtimeClass, m2.runtimeClass, m3.runtimeClass, m4.runtimeClass))
//def apply(goals:Class[_]*): DocumentAnnotationPipeline = apply(defaultDocumentAnnotationMap, Nil, goals:_*)
//def apply(prereqs: Seq[Class[_]], goals:Class[_]*): DocumentAnnotationPipeline = apply(defaultDocumentAnnotationMap, prereqs, goals)
//def forGoals(map:DocumentAnnotatorMap, goals:Class[_]*): DocumentAnnotationPipeline = forGoals(map, Nil, goals)
def apply(map:DocumentAnnotatorMap, prereqs:Seq[Class[_]], goals:Iterable[Class[_]]): DocumentAnnotationPipeline = {
val pipeSet = collection.mutable.LinkedHashSet[DocumentAnnotator]()
val preSet = new scala.collection.mutable.HashSet[Class[_]] ++= prereqs
def recursiveSatisfyPrereqs(goal: Class[_]) {
if (!preSet.contains(goal) && (!preSet.exists(x => goal.isAssignableFrom(x)))) {
val provider = if (map.contains(goal)) map(goal)() else {
val list = map.keys.filter(k => goal.isAssignableFrom(k))
assert(list.nonEmpty, s"Could not find annotator for goal $goal, map includes ${map.keys.mkString(", ")}")
map(list.head)()
}
if (!pipeSet.contains(provider)) {
provider.prereqAttrs.foreach(recursiveSatisfyPrereqs)
provider.postAttrs.foreach(preSet += _)
pipeSet += provider
}
}
}
goals.foreach(recursiveSatisfyPrereqs)
checkPipeline(pipeSet.toSeq)
new DocumentAnnotationPipeline(pipeSet.toSeq)
}
def apply(annotators:DocumentAnnotator*): DocumentAnnotationPipeline = apply(defaultDocumentAnnotationMap, Nil, annotators:_*)
def apply(map:DocumentAnnotatorMap, annotators:DocumentAnnotator*): DocumentAnnotationPipeline = apply(map, Nil, annotators:_*)
def apply(map:DocumentAnnotatorMap, prereqs:Seq[Class[_]], annotators: DocumentAnnotator*): DocumentAnnotationPipeline = {
val other = new MutableDocumentAnnotatorMap
map.foreach(k => other += k)
annotators.foreach(a => other += a) // By being added later, these annotators will overwrite the default ones when there is an overlap
apply(map=other, prereqs, annotators.flatMap(_.postAttrs))
}
def checkPipeline(pipeline: Seq[DocumentAnnotator]) {
if (logger.level == cc.factorie.util.Logger.DEBUG) {
logger.debug("-- printing pipeline --")
for (annotator <- pipeline) {
logger.debug(s"Annotator ${annotator.getClass.getName} Prereqs(${annotator.prereqAttrs.map(_.getName).mkString(", ")}}) PostAttrs(${annotator.postAttrs.map(_.getName).mkString(", ")})")
}
}
val satisfiedSet = collection.mutable.HashSet[Class[_]]()
for (annotator <- pipeline) {
for (requirement <- annotator.prereqAttrs
if !satisfiedSet.contains(requirement)
if !satisfiedSet.exists(c => requirement.isAssignableFrom(c)))
assert(1 == 0, s"Prerequisite $requirement not satisfied before $annotator gets called in pipeline ${pipeline.mkString(" ")}")
for (provision <- annotator.postAttrs) {
assert(!satisfiedSet.contains(provision), s"Pipeline attempting to provide $provision twice. Pipeline: ${pipeline.mkString(" ")}")
satisfiedSet += provision
}
}
}
}
| strubell/factorie | src/main/scala/cc/factorie/app/nlp/DocumentAnnotatorPipeline.scala | Scala | apache-2.0 | 10,190 |
package x7c1.wheat.splicer.android
import sbt.File
import sbt.complete.DefaultParsers._
import sbt.complete.Parser
object PropertyLoader {
object sdkRoot extends LinedProperty[File](
parser = _ ~> "=" ~> NotSpace,
property = "sdk.dir"
)
object buildToolsVersion extends LinedProperty[String](
parser = Space ~> _ ~> Space ~> quoted,
property = "buildToolsVersion"
)
object compileSdkVersion extends LinedProperty[Int](
parser = Space ~> _ ~> Space ~> Digit.+.string,
property = "compileSdkVersion"
)
object dependencies extends LinedProperty[Seq[String]](
parser = Space ~> _ ~> Space ~> quoted,
property = "compile"
)
private val quoted = {
val quoted1 = "'" ~> NotSpace <~ "'"
val quoted2 = '"' ~> NotSpace <~ '"'
quoted1 | quoted2
}
}
abstract class LinedProperty[A: LineLoadable](
parser: String => Parser[String],
property: String) {
def via(file: File): A = {
convertFrom(PropertyFile(file))
}
def fromResource(resourcePath: String): A = {
convertFrom(PropertyResource(resourcePath))
}
private def convertFrom(source: PropertySource) = {
val load = implicitly[LineLoadable[A]]
load by new LineLoader(source, property, parser(property))
}
}
| x7c1/Wheat | wheat-splicer/src/main/scala/x7c1/wheat/splicer/android/PropertyLoader.scala | Scala | mit | 1,255 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.sbt.run
import sbt._
import sbt.Keys._
import xsbti.Maybe
import xsbti.Position
import xsbti.Problem
import xsbti.Severity
import play.api.PlayException
import play.sbt.PlayExceptions.CompilationException
import play.sbt.PlayExceptions.UnexpectedException
import com.lightbend.lagom.dev.Reloader.CompileFailure
import com.lightbend.lagom.dev.Reloader.CompileResult
import com.lightbend.lagom.dev.Reloader.CompileSuccess
import com.lightbend.lagom.dev.Reloader.Source
trait RunSupportCompat {
def taskFailureHandler(incomplete: Incomplete, streams: Option[Streams]): PlayException = {
Incomplete
.allExceptions(incomplete)
.headOption
.map {
case e: PlayException => e
case e: xsbti.CompileFailed =>
getProblems(incomplete, streams)
.find(_.severity == xsbti.Severity.Error)
.map(CompilationException)
.getOrElse(UnexpectedException(Some("The compilation failed without reporting any problem!"), Some(e)))
case e: Exception => UnexpectedException(unexpected = Some(e))
}
.getOrElse {
UnexpectedException(Some("The compilation task failed without any exception!"))
}
}
def originalSource(file: File): Option[File] = {
play.twirl.compiler.MaybeGeneratedSource.unapply(file).map(_.file)
}
def compileFailure(streams: Option[Streams])(incomplete: Incomplete): CompileResult = {
CompileFailure(taskFailureHandler(incomplete, streams))
}
def compile(
reloadCompile: () => Result[sbt.inc.Analysis],
classpath: () => Result[Classpath],
streams: () => Option[Streams]
): CompileResult = {
reloadCompile().toEither.left
.map(compileFailure(streams()))
.right
.map { analysis =>
classpath().toEither.left
.map(compileFailure(streams()))
.right
.map { classpath =>
CompileSuccess(sourceMap(analysis), classpath.files)
}
.fold(identity, identity)
}
.fold(identity, identity)
}
def sourceMap(analysis: sbt.inc.Analysis): Map[String, Source] = {
analysis.apis.internal.foldLeft(Map.empty[String, Source]) {
case (sourceMap, (file, source)) =>
sourceMap ++ {
source.api.definitions.map { d =>
d.name -> Source(file, originalSource(file))
}
}
}
}
def getScopedKey(incomplete: Incomplete): Option[ScopedKey[_]] = incomplete.node.flatMap {
case key: ScopedKey[_] => Option(key)
case task: Task[_] => task.info.attributes.get(taskDefinitionKey)
}
def allProblems(inc: Incomplete): Seq[Problem] = {
allProblems(inc :: Nil)
}
def allProblems(incs: Seq[Incomplete]): Seq[Problem] = {
problems(Incomplete.allExceptions(incs).toSeq)
}
def problems(es: Seq[Throwable]): Seq[Problem] = {
es.flatMap {
case cf: xsbti.CompileFailed => cf.problems
case _ => Nil
}
}
def getProblems(incomplete: Incomplete, streams: Option[Streams]): Seq[Problem] = {
allProblems(incomplete) ++ {
Incomplete.linearize(incomplete).flatMap(getScopedKey).flatMap { scopedKey =>
val JavacError = """\\[error\\]\\s*(.*[.]java):(\\d+):\\s*(.*)""".r
val JavacErrorInfo = """\\[error\\]\\s*([a-z ]+):(.*)""".r
val JavacErrorPosition = """\\[error\\](\\s*)\\^\\s*""".r
streams
.map { streamsManager =>
var first: (Option[(String, String, String)], Option[Int]) = (None, None)
var parsed: (Option[(String, String, String)], Option[Int]) = (None, None)
Output
.lastLines(scopedKey, streamsManager, None)
.map(_.replace(scala.Console.RESET, ""))
.map(_.replace(scala.Console.RED, ""))
.collect {
case JavacError(file, line, message) => parsed = Some((file, line, message)) -> None
case JavacErrorInfo(key, message) =>
parsed._1.foreach { o =>
parsed = Some(
(
parsed._1.get._1,
parsed._1.get._2,
parsed._1.get._3 + " [" + key.trim + ": " + message.trim + "]"
)
) -> None
}
case JavacErrorPosition(pos) =>
parsed = parsed._1 -> Some(pos.length)
if (first == ((None, None))) {
first = parsed
}
}
first
}
.collect {
case (Some(error), maybePosition) =>
new Problem {
def message: String = error._3
def category: String = ""
def position: Position = new Position {
def line: Maybe[Integer] = Maybe.just(error._2.toInt)
def lineContent: String = ""
def offset: Maybe[Integer] = Maybe.nothing[java.lang.Integer]
def pointer: Maybe[Integer] =
maybePosition
.map(pos => Maybe.just((pos - 1).asInstanceOf[java.lang.Integer]))
.getOrElse(Maybe.nothing[java.lang.Integer])
def pointerSpace: Maybe[String] = Maybe.nothing[String]
def sourceFile: Maybe[File] = Maybe.just(file(error._1))
def sourcePath: Maybe[String] = Maybe.just(error._1)
}
def severity: Severity = xsbti.Severity.Error
}
}
}
}
}
}
| lagom/lagom | dev/sbt-plugin/src/main/scala-sbt-0.13/com/lightbend/lagom/sbt/run/RunSupportCompat.scala | Scala | apache-2.0 | 5,698 |
package org.backuity
import scala.language.experimental.macros
import scala.reflect.macros.blackbox
package object clist {
/**
* Define an attribute of a [[Command]] to be a command line argument.
*
* Ex: Given the `cat` command, `var file = arg[String]()` would produce
* the following command : `cat <file>`
*/
// We'd really like to have default parameters here but we can't due
// to https://issues.scala-lang.org/browse/SI-5920
// The partial workaround is to use the apply method of the ArgumentBuilder
// Once SI-5920 gets fixed we'll be able to make some of the runtime checks happen
// at compile time.
def arg[T]: CliArgument.Builder[T] = macro arg_impl[T]
/**
* Define an attribute of a [[Command]] to be a command line multi-argument (i.e
* that accepts multiple values).
*
* Ex: Given the `cat` command, `var files = args[List[String]]()` would produce
* the following command : `cat <files>...`
*/
def args[T]: MultipleCliArgument.Builder[T] = macro args_impl[T]
/**
* Define an attribute of a [[Command]] to be a command line option.
*
* Ex: Given the `cat` command, `var verbose = opt[Boolean]()` would produce
* the following command : `cat [options]` with options containing `--verbose`
*/
// same as arg[T] above
def opt[T]: CliOption.Builder[T] = macro opt_impl[T]
def arg_impl[T: c.WeakTypeTag](c: blackbox.Context) = {
import c.universe._
val term = checkTerm(c)
q"""new _root_.org.backuity.clist.CliArgument.Builder(this, ${term.name.toString.trim})"""
}
// TODO make sure there's at most one args and it comes last (if multiple `arg` are specified)
def args_impl[T: c.WeakTypeTag](c: blackbox.Context) = {
import c.universe._
val term = checkTerm(c)
q"""new _root_.org.backuity.clist.MultipleCliArgument.Builder(this, ${term.name.toString.trim})"""
}
def opt_impl[T: c.WeakTypeTag](c: blackbox.Context) = {
import c.universe._
val term = checkTerm(c)
q"""new _root_.org.backuity.clist.CliOption.Builder(this, ${term.name.toString.trim})"""
}
private def checkTerm[T: c.WeakTypeTag](c: blackbox.Context): c.universe.TermSymbol = {
import c.universe._
val term: TermSymbol = c.internal.enclosingOwner.asTerm
// why isPublic returns false??
// TODO make sure the var is public
// println(term.name + " - " + term.isVar + " - " + term.isPrivate + " - " + term.isPrivateThis)
if (!term.isVar) {
c.abort(term.pos, "Command arguments can only be a public `var`.")
}
// TODO make sure apply is called on the builder
// => avoid: `var cmd = arg[String]`
term
}
} | backuity/clist | macros/src/main/scala/org/backuity/clist/package.scala | Scala | apache-2.0 | 2,680 |
package prisoners_dilemma
import org.scalacheck._
import scala.concurrent.duration._
object Package {
val move: Gen[Move] = Gen.oneOf(Cooperate, Defect)
implicit val arbMoves: Arbitrary[Move] = Arbitrary(move)
def someOf[T](n: Gen[Int], g: Gen[T]): Gen[Seq[T]] =
for { len <- n
list <- Gen.listOfN(len, g)
} yield list
def overAndOverForever[T](seq: Seq[T]): Stream[T] =
Stream.continually(seq).flatten
val MAX_BIRDS_IN_POPULATION = 20
val MIN_TURN_TIME = 50.millis
val MAX_TIME_LIMIT_PER_TEST = 900.millis
val maxTurnsPerGame: Int = (MAX_TIME_LIMIT_PER_TEST / MIN_TURN_TIME).toInt
}
| Mharlin/better-testing-workshop | exercise2-prisoners-dilemma/src/test/scala/prisoners-dilemma/package.scala | Scala | mit | 629 |
package uk.ac.ncl.openlab.intake24.services.shorturl
import javax.inject.Inject
import org.slf4j.LoggerFactory
import play.api.Configuration
import play.api.http.{DefaultHttpRequestHandler, HttpConfiguration, HttpErrorHandler, HttpFilters}
import play.api.mvc._
import play.api.routing.Router
import scala.concurrent.ExecutionContext
class RequestHandler @Inject()(errorHandler: HttpErrorHandler,
config: Configuration,
httpConfig: HttpConfiguration,
shortUrlCache: ShortUrlService,
filters: HttpFilters,
actionBuilder: DefaultActionBuilder,
router: Router,
implicit val executionContext: ExecutionContext) extends DefaultHttpRequestHandler(
router, errorHandler, httpConfig, filters
) {
private val shortUrlDomain = config.get[String]("intake24.urlShort.internal.domain")
private val logger = LoggerFactory.getLogger(classOf[RequestHandler])
override def routeRequest(request: RequestHeader) = {
request.host match {
case `shortUrlDomain` =>
logger.debug(s"Resolve request: ${request.host + request.uri}")
Some(actionBuilder.async {
shortUrlCache.resolve(request.host + request.uri).map {
case Some(url) =>
logger.debug(s"Resolve result: $url")
Results.PermanentRedirect(url)
case None =>
logger.debug(s"Resolve result: not found")
Results.NotFound
}
})
case _ =>
super.routeRequest(request)
}
}
} | digitalinteraction/intake24 | ShortUrlService/app/uk/ac/ncl/openlab/intake24/services/shorturl/RequestHandler.scala | Scala | apache-2.0 | 1,685 |
package org.jetbrains.plugins.scala
package lang
package psi
package stubs
package elements
import com.intellij.psi.PsiElement
import com.intellij.psi.stubs.{IndexSink, StubElement, StubInputStream, StubOutputStream}
import com.intellij.util.io.StringRef
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.ScImportSelector
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.imports.ScImportSelectorImpl
import org.jetbrains.plugins.scala.lang.psi.stubs.impl.ScImportSelectorStubImpl
/**
* User: Alexander Podkhalyuzin
* Date: 20.06.2009
*/
class ScImportSelectorElementType[Func <: ScImportSelector]
extends ScStubElementType[ScImportSelectorStub, ScImportSelector]("import selector") {
def serialize(stub: ScImportSelectorStub, dataStream: StubOutputStream): Unit = {
dataStream.writeName(stub.asInstanceOf[ScImportSelectorStubImpl[_ <: PsiElement]].referenceText.toString)
dataStream.writeName(stub.importedName)
dataStream.writeBoolean(stub.isAliasedImport)
}
def createStubImpl[ParentPsi <: PsiElement](psi: ScImportSelector, parentStub: StubElement[ParentPsi]): ScImportSelectorStub = {
val refText = psi.reference.getText
val importedName = psi.importedName
val aliasImport = psi.isAliasedImport
new ScImportSelectorStubImpl(parentStub, this, refText, importedName, aliasImport)
}
def deserializeImpl(dataStream: StubInputStream, parentStub: Any): ScImportSelectorStub = {
val refText = StringRef.toString(dataStream.readName)
val importedName = StringRef.toString(dataStream.readName)
val aliasImport = dataStream.readBoolean()
new ScImportSelectorStubImpl(parentStub.asInstanceOf[StubElement[PsiElement]], this, refText, importedName, aliasImport)
}
def indexStub(stub: ScImportSelectorStub, sink: IndexSink): Unit = {}
def createPsi(stub: ScImportSelectorStub): ScImportSelector = {
new ScImportSelectorImpl(stub)
}
} | triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/stubs/elements/ScImportSelectorElementType.scala | Scala | apache-2.0 | 1,931 |
/*
* Copyright 2014 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s.jetty.server
import cats.effect._
import cats.syntax.all._
import org.eclipse.jetty.util.component.Destroyable
import org.eclipse.jetty.util.component.LifeCycle
private[jetty] object JettyLifeCycle {
/** Wrap a Jetty [[org.eclipse.jetty.util.component.LifeCycle]] value in a
* [[cats.effect.Resource]]. This calls
* [[org.eclipse.jetty.util.component.LifeCycle#start]] on startup and
* waits for the component to emit an event stating it is started, failing
* if it is already started (or starting). On release
* [[org.eclipse.jetty.util.component.LifeCycle#stop]] is called.
*
* @note If `A` is _also_ a subtype of
* [[org.eclipse.jetty.util.component.Destroyable]] then
* [[org.eclipse.jetty.util.component.Destroyable#destroy]] will also
* be invoked.
*/
def lifeCycleAsResource[F[_], A <: LifeCycle](
fa: F[A]
)(implicit F: Async[F]): Resource[F, A] =
Resource.make[F, A](
fa.flatTap(startLifeCycle[F])
) {
case value: LifeCycle with Destroyable =>
stopLifeCycle[F](value) *> F.delay(value.destroy())
case otherwise =>
stopLifeCycle[F](otherwise)
}
/** Attempt to invoke [[org.eclipse.jetty.util.component.LifeCycle#stop]] on a
* [[org.eclipse.jetty.util.component.LifeCycle]]. If the
* [[org.eclipse.jetty.util.component.LifeCycle]] is already stopped then
* this method will return immediately. This can be valid in some cases
* where a [[org.eclipse.jetty.util.component.LifeCycle]] is stopped
* internally, e.g. due to some internal error occurring.
*/
private[this] def stopLifeCycle[F[_]](lifeCycle: LifeCycle)(implicit F: Async[F]): F[Unit] =
F.async_[Unit] { cb =>
lifeCycle.addLifeCycleListener(
new LifeCycle.Listener {
override def lifeCycleStopped(a: LifeCycle): Unit =
cb(Right(()))
override def lifeCycleFailure(a: LifeCycle, error: Throwable): Unit =
cb(Left(error))
}
)
// In the general case, it is not sufficient to merely call stop(). For
// example, the concrete implementation of stop() for the canonical
// Jetty Server instance will shortcut to a `return` call taking no
// action if the server is "stopping". This method _can't_ return until
// we are _actually stopped_, so we have to check three different states
// here.
if (lifeCycle.isStopped) {
// If the first case, we are already stopped, so our listener won't be
// called and we just return.
cb(Right(()))
} else if (lifeCycle.isStopping()) {
// If it is stopping, we need to wait for our listener to get invoked.
()
} else {
// If it is neither stopped nor stopping, we need to request a stop
// and then wait for the event. It is imperative that we add the
// listener beforehand here. Otherwise we have some very annoying race
// conditions.
lifeCycle.stop()
}
}
/** Attempt to [[org.eclipse.jetty.util.component.LifeCycle#start]] on a
* [[org.eclipse.jetty.util.component.LifeCycle]].
*
* If the [[org.eclipse.jetty.util.component.LifeCycle]] is already started
* (or starting) this will fail.
*/
private[this] def startLifeCycle[F[_]](lifeCycle: LifeCycle)(implicit F: Async[F]): F[Unit] =
F.async_[Unit] { cb =>
lifeCycle.addLifeCycleListener(
new LifeCycle.Listener {
override def lifeCycleStarted(a: LifeCycle): Unit =
cb(Right(()))
override def lifeCycleFailure(a: LifeCycle, error: Throwable): Unit =
cb(Left(error))
}
)
// Sanity check to ensure the LifeCycle component is not already
// started. A couple of notes here.
//
// - There is _always_ going to be a small chance of a race condition
// here in the final branch where we invoke `lifeCycle.start()` _if_
// something else has a reference to the `LifeCycle`
// value. Thankfully, unlike the stopLifeCycle function, this is
// completely in the control of the caller. As long as the caller
// doesn't leak the reference (or call .start() themselves) nothing
// internally to Jetty should ever invoke .start().
// - Jetty components allow for reuse in many cases, unless the
// .destroy() method is invoked (and the particular type implements
// `Destroyable`, it's not part of `LifeCycle`). Jetty uses this for
// "soft" resets of the `LifeCycle` component. Thus it is possible
// that this `LifeCycle` component has been started before, though I
// don't recommend this and we don't (at this time) do that in the
// http4s codebase.
if (lifeCycle.isStarted) {
cb(
Left(
new IllegalStateException(
"Attempting to start Jetty LifeCycle component, but it is already started."
)
)
)
} else if (lifeCycle.isStarting) {
cb(
Left(
new IllegalStateException(
"Attempting to start Jetty LifeCycle component, but it is already starting."
)
)
)
} else {
lifeCycle.start()
}
}
}
| http4s/http4s | jetty-server/src/main/scala/org/http4s/jetty/server/JettyLifeCycle.scala | Scala | apache-2.0 | 5,924 |
package jafaeldon
import org.scalatest.FunSpec
class HelloWorldSpec extends FunSpec {
describe("Adding 1 to 1") {
it("should equals 2"){
assert(1+1 == 2)
}
}
}
| jafaeldon/multiproject | test/benchmark/src/test/scala/jafaeldon/MainSpec.scala | Scala | apache-2.0 | 180 |
package com.tribetron.editor.objects
import org.json4s._
import org.json4s.JsonDSL._
import com.tribetron.editor.gui.BattleMapSettings
class BattleMap(val battleSettings: BattleMapSettings, override val width: Int, override val height: Int)
extends TribetronMap(width, height) {
override def toJson(story: String): JValue = {
("type" -> "battle") ~
("story" -> story) ~
("maxRoster" -> battleSettings.maxRoster) ~
("reward" -> battleSettings.reward) ~
("opponentTeamName" -> battleSettings.opponentTeamName) ~
("opponent" -> battleSettings.opponent.toString()) ~
("rounds" -> battleSettings.rounds) ~
("rows" ->
rows.map { row =>
row.columns.map { column =>
("object" -> column.objectType.jsonName)
}})
}
} | Humakt83/tribetron-editor | src/main/scala/com/tribetron/editor/objects/BattleMap.scala | Scala | mit | 814 |
package com.xebia.chat
import akka.actor._
import spray.can.Http
import spray.can.server._
import akka.io.IO
object Main extends App {
implicit val system = ActorSystem()
import system.dispatcher
val server = system.actorOf(Props[WebSocketApi], "websocket")
IO(UHttp) ! Http.Bind(server, "0.0.0.0", 8080)
}
| xebia/akka-chat | src/main/scala/com/xebia/chat/Main.scala | Scala | mit | 320 |
package rros.play
/**
* Created by namnguyen on 3/12/15.
*/
import akka.actor.ActorRef
/**
* Created by namnguyen on 3/5/15.
*/
case class RegisterToReceiveMessage(actor:ActorRef)
case class UnregisterToReceiveMessage(actor:ActorRef)
case class MessageReceived(message:String)
case class SendMessage(message:String)
object ForceClose
object WebSocketClose
object CheckReceiverActor
object Ping | namhnguyen/RROS | src/main/scala/rros/play/ActorMessages.scala | Scala | apache-2.0 | 399 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import java.math.{BigDecimal => JBigDecimal}
import java.nio.charset.StandardCharsets
import java.sql.{Date, Timestamp}
import org.apache.parquet.filter2.predicate.{FilterApi, FilterPredicate, Operators}
import org.apache.parquet.filter2.predicate.FilterApi._
import org.apache.parquet.filter2.predicate.Operators.{Column => _, _}
import org.apache.parquet.schema.MessageType
import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.optimizer.InferFiltersFromConstraints
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.execution.datasources.{DataSourceStrategy, HadoopFsRelation, LogicalRelation}
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2ScanRelation
import org.apache.spark.sql.execution.datasources.v2.parquet.ParquetScan
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.ParquetOutputTimestampType
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
import org.apache.spark.util.{AccumulatorContext, AccumulatorV2}
/**
* A test suite that tests Parquet filter2 API based filter pushdown optimization.
*
* NOTE:
*
* 1. `!(a cmp b)` is always transformed to its negated form `a cmp' b` by the
* `BooleanSimplification` optimization rule whenever possible. As a result, predicate `!(a < 1)`
* results in a `GtEq` filter predicate rather than a `Not`.
*
* 2. `Tuple1(Option(x))` is used together with `AnyVal` types like `Int` to ensure the inferred
* data type is nullable.
*
* NOTE:
*
* This file intendedly enables record-level filtering explicitly. If new test cases are
* dependent on this configuration, don't forget you better explicitly set this configuration
* within the test.
*/
abstract class ParquetFilterSuite extends QueryTest with ParquetTest with SharedSparkSession {
protected def createParquetFilters(
schema: MessageType,
caseSensitive: Option[Boolean] = None): ParquetFilters =
new ParquetFilters(schema, conf.parquetFilterPushDownDate, conf.parquetFilterPushDownTimestamp,
conf.parquetFilterPushDownDecimal, conf.parquetFilterPushDownStringStartWith,
conf.parquetFilterPushDownInFilterThreshold,
caseSensitive.getOrElse(conf.caseSensitiveAnalysis))
override def beforeEach(): Unit = {
super.beforeEach()
// Note that there are many tests here that require record-level filtering set to be true.
spark.conf.set(SQLConf.PARQUET_RECORD_FILTER_ENABLED.key, "true")
}
override def afterEach(): Unit = {
try {
spark.conf.unset(SQLConf.PARQUET_RECORD_FILTER_ENABLED.key)
} finally {
super.afterEach()
}
}
def checkFilterPredicate(
df: DataFrame,
predicate: Predicate,
filterClass: Class[_ <: FilterPredicate],
checker: (DataFrame, Seq[Row]) => Unit,
expected: Seq[Row]): Unit
private def checkFilterPredicate
(predicate: Predicate, filterClass: Class[_ <: FilterPredicate], expected: Seq[Row])
(implicit df: DataFrame): Unit = {
checkFilterPredicate(df, predicate, filterClass, checkAnswer(_, _: Seq[Row]), expected)
}
private def checkFilterPredicate[T]
(predicate: Predicate, filterClass: Class[_ <: FilterPredicate], expected: T)
(implicit df: DataFrame): Unit = {
checkFilterPredicate(predicate, filterClass, Seq(Row(expected)))(df)
}
private def checkBinaryFilterPredicate
(predicate: Predicate, filterClass: Class[_ <: FilterPredicate], expected: Seq[Row])
(implicit df: DataFrame): Unit = {
def checkBinaryAnswer(df: DataFrame, expected: Seq[Row]) = {
assertResult(expected.map(_.getAs[Array[Byte]](0).mkString(",")).sorted) {
df.rdd.map(_.getAs[Array[Byte]](0).mkString(",")).collect().toSeq.sorted
}
}
checkFilterPredicate(df, predicate, filterClass, checkBinaryAnswer _, expected)
}
private def checkBinaryFilterPredicate
(predicate: Predicate, filterClass: Class[_ <: FilterPredicate], expected: Array[Byte])
(implicit df: DataFrame): Unit = {
checkBinaryFilterPredicate(predicate, filterClass, Seq(Row(expected)))(df)
}
private def testTimestampPushdown(data: Seq[Timestamp]): Unit = {
assert(data.size === 4)
val ts1 = data.head
val ts2 = data(1)
val ts3 = data(2)
val ts4 = data(3)
withParquetDataFrame(data.map(i => Tuple1(i))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], data.map(i => Row.apply(i)))
checkFilterPredicate('_1 === ts1, classOf[Eq[_]], ts1)
checkFilterPredicate('_1 <=> ts1, classOf[Eq[_]], ts1)
checkFilterPredicate('_1 =!= ts1, classOf[NotEq[_]],
Seq(ts2, ts3, ts4).map(i => Row.apply(i)))
checkFilterPredicate('_1 < ts2, classOf[Lt[_]], ts1)
checkFilterPredicate('_1 > ts1, classOf[Gt[_]], Seq(ts2, ts3, ts4).map(i => Row.apply(i)))
checkFilterPredicate('_1 <= ts1, classOf[LtEq[_]], ts1)
checkFilterPredicate('_1 >= ts4, classOf[GtEq[_]], ts4)
checkFilterPredicate(Literal(ts1) === '_1, classOf[Eq[_]], ts1)
checkFilterPredicate(Literal(ts1) <=> '_1, classOf[Eq[_]], ts1)
checkFilterPredicate(Literal(ts2) > '_1, classOf[Lt[_]], ts1)
checkFilterPredicate(Literal(ts3) < '_1, classOf[Gt[_]], ts4)
checkFilterPredicate(Literal(ts1) >= '_1, classOf[LtEq[_]], ts1)
checkFilterPredicate(Literal(ts4) <= '_1, classOf[GtEq[_]], ts4)
checkFilterPredicate(!('_1 < ts4), classOf[GtEq[_]], ts4)
checkFilterPredicate('_1 < ts2 || '_1 > ts3, classOf[Operators.Or], Seq(Row(ts1), Row(ts4)))
}
}
private def testDecimalPushDown(data: DataFrame)(f: DataFrame => Unit): Unit = {
withTempPath { file =>
data.write.parquet(file.getCanonicalPath)
readParquetFile(file.toString)(f)
}
}
// This function tests that exactly go through the `canDrop` and `inverseCanDrop`.
private def testStringStartsWith(dataFrame: DataFrame, filter: String): Unit = {
withTempPath { dir =>
val path = dir.getCanonicalPath
dataFrame.write.option("parquet.block.size", 512).parquet(path)
Seq(true, false).foreach { pushDown =>
withSQLConf(
SQLConf.PARQUET_FILTER_PUSHDOWN_STRING_STARTSWITH_ENABLED.key -> pushDown.toString) {
val accu = new NumRowGroupsAcc
sparkContext.register(accu)
val df = spark.read.parquet(path).filter(filter)
df.foreachPartition((it: Iterator[Row]) => it.foreach(v => accu.add(0)))
if (pushDown) {
assert(accu.value == 0)
} else {
assert(accu.value > 0)
}
AccumulatorContext.remove(accu.id)
}
}
}
}
test("filter pushdown - boolean") {
withParquetDataFrame((true :: false :: Nil).map(b => Tuple1.apply(Option(b)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], Seq(Row(true), Row(false)))
checkFilterPredicate('_1 === true, classOf[Eq[_]], true)
checkFilterPredicate('_1 <=> true, classOf[Eq[_]], true)
checkFilterPredicate('_1 =!= true, classOf[NotEq[_]], false)
}
}
test("filter pushdown - tinyint") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i.toByte)))) { implicit df =>
assert(df.schema.head.dataType === ByteType)
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1.toByte, classOf[Eq[_]], 1)
checkFilterPredicate('_1 <=> 1.toByte, classOf[Eq[_]], 1)
checkFilterPredicate('_1 =!= 1.toByte, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2.toByte, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3.toByte, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1.toByte, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4.toByte, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1.toByte) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1.toByte) <=> '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2.toByte) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3.toByte) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1.toByte) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4.toByte) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4.toByte), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 < 2.toByte || '_1 > 3.toByte,
classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - smallint") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i.toShort)))) { implicit df =>
assert(df.schema.head.dataType === ShortType)
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1.toShort, classOf[Eq[_]], 1)
checkFilterPredicate('_1 <=> 1.toShort, classOf[Eq[_]], 1)
checkFilterPredicate('_1 =!= 1.toShort, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2.toShort, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3.toShort, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1.toShort, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4.toShort, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1.toShort) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1.toShort) <=> '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2.toShort) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3.toShort) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1.toShort) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4.toShort) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4.toShort), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 < 2.toShort || '_1 > 3.toShort,
classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - integer") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 <=> 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 =!= 1, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1) <=> '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 < 2 || '_1 > 3, classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - long") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i.toLong)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 <=> 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 =!= 1, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1) <=> '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 < 2 || '_1 > 3, classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - float") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i.toFloat)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 <=> 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 =!= 1, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1) <=> '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 < 2 || '_1 > 3, classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - double") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Option(i.toDouble)))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 === 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 <=> 1, classOf[Eq[_]], 1)
checkFilterPredicate('_1 =!= 1, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('_1 < 2, classOf[Lt[_]], 1)
checkFilterPredicate('_1 > 3, classOf[Gt[_]], 4)
checkFilterPredicate('_1 <= 1, classOf[LtEq[_]], 1)
checkFilterPredicate('_1 >= 4, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1) === '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1) <=> '_1, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2) > '_1, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3) < '_1, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1) >= '_1, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4) <= '_1, classOf[GtEq[_]], 4)
checkFilterPredicate(!('_1 < 4), classOf[GtEq[_]], 4)
checkFilterPredicate('_1 < 2 || '_1 > 3, classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
test("filter pushdown - string") {
withParquetDataFrame((1 to 4).map(i => Tuple1(i.toString))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate(
'_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(i => Row.apply(i.toString)))
checkFilterPredicate('_1 === "1", classOf[Eq[_]], "1")
checkFilterPredicate('_1 <=> "1", classOf[Eq[_]], "1")
checkFilterPredicate(
'_1 =!= "1", classOf[NotEq[_]], (2 to 4).map(i => Row.apply(i.toString)))
checkFilterPredicate('_1 < "2", classOf[Lt[_]], "1")
checkFilterPredicate('_1 > "3", classOf[Gt[_]], "4")
checkFilterPredicate('_1 <= "1", classOf[LtEq[_]], "1")
checkFilterPredicate('_1 >= "4", classOf[GtEq[_]], "4")
checkFilterPredicate(Literal("1") === '_1, classOf[Eq[_]], "1")
checkFilterPredicate(Literal("1") <=> '_1, classOf[Eq[_]], "1")
checkFilterPredicate(Literal("2") > '_1, classOf[Lt[_]], "1")
checkFilterPredicate(Literal("3") < '_1, classOf[Gt[_]], "4")
checkFilterPredicate(Literal("1") >= '_1, classOf[LtEq[_]], "1")
checkFilterPredicate(Literal("4") <= '_1, classOf[GtEq[_]], "4")
checkFilterPredicate(!('_1 < "4"), classOf[GtEq[_]], "4")
checkFilterPredicate('_1 < "2" || '_1 > "3", classOf[Operators.Or], Seq(Row("1"), Row("4")))
}
}
test("filter pushdown - binary") {
implicit class IntToBinary(int: Int) {
def b: Array[Byte] = int.toString.getBytes(StandardCharsets.UTF_8)
}
withParquetDataFrame((1 to 4).map(i => Tuple1(i.b))) { implicit df =>
checkBinaryFilterPredicate('_1 === 1.b, classOf[Eq[_]], 1.b)
checkBinaryFilterPredicate('_1 <=> 1.b, classOf[Eq[_]], 1.b)
checkBinaryFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkBinaryFilterPredicate(
'_1.isNotNull, classOf[NotEq[_]], (1 to 4).map(i => Row.apply(i.b)).toSeq)
checkBinaryFilterPredicate(
'_1 =!= 1.b, classOf[NotEq[_]], (2 to 4).map(i => Row.apply(i.b)).toSeq)
checkBinaryFilterPredicate('_1 < 2.b, classOf[Lt[_]], 1.b)
checkBinaryFilterPredicate('_1 > 3.b, classOf[Gt[_]], 4.b)
checkBinaryFilterPredicate('_1 <= 1.b, classOf[LtEq[_]], 1.b)
checkBinaryFilterPredicate('_1 >= 4.b, classOf[GtEq[_]], 4.b)
checkBinaryFilterPredicate(Literal(1.b) === '_1, classOf[Eq[_]], 1.b)
checkBinaryFilterPredicate(Literal(1.b) <=> '_1, classOf[Eq[_]], 1.b)
checkBinaryFilterPredicate(Literal(2.b) > '_1, classOf[Lt[_]], 1.b)
checkBinaryFilterPredicate(Literal(3.b) < '_1, classOf[Gt[_]], 4.b)
checkBinaryFilterPredicate(Literal(1.b) >= '_1, classOf[LtEq[_]], 1.b)
checkBinaryFilterPredicate(Literal(4.b) <= '_1, classOf[GtEq[_]], 4.b)
checkBinaryFilterPredicate(!('_1 < 4.b), classOf[GtEq[_]], 4.b)
checkBinaryFilterPredicate(
'_1 < 2.b || '_1 > 3.b, classOf[Operators.Or], Seq(Row(1.b), Row(4.b)))
}
}
test("filter pushdown - date") {
implicit class StringToDate(s: String) {
def date: Date = Date.valueOf(s)
}
val data = Seq("2018-03-18", "2018-03-19", "2018-03-20", "2018-03-21")
withParquetDataFrame(data.map(i => Tuple1(i.date))) { implicit df =>
checkFilterPredicate('_1.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('_1.isNotNull, classOf[NotEq[_]], data.map(i => Row.apply(i.date)))
checkFilterPredicate('_1 === "2018-03-18".date, classOf[Eq[_]], "2018-03-18".date)
checkFilterPredicate('_1 <=> "2018-03-18".date, classOf[Eq[_]], "2018-03-18".date)
checkFilterPredicate('_1 =!= "2018-03-18".date, classOf[NotEq[_]],
Seq("2018-03-19", "2018-03-20", "2018-03-21").map(i => Row.apply(i.date)))
checkFilterPredicate('_1 < "2018-03-19".date, classOf[Lt[_]], "2018-03-18".date)
checkFilterPredicate('_1 > "2018-03-20".date, classOf[Gt[_]], "2018-03-21".date)
checkFilterPredicate('_1 <= "2018-03-18".date, classOf[LtEq[_]], "2018-03-18".date)
checkFilterPredicate('_1 >= "2018-03-21".date, classOf[GtEq[_]], "2018-03-21".date)
checkFilterPredicate(
Literal("2018-03-18".date) === '_1, classOf[Eq[_]], "2018-03-18".date)
checkFilterPredicate(
Literal("2018-03-18".date) <=> '_1, classOf[Eq[_]], "2018-03-18".date)
checkFilterPredicate(
Literal("2018-03-19".date) > '_1, classOf[Lt[_]], "2018-03-18".date)
checkFilterPredicate(
Literal("2018-03-20".date) < '_1, classOf[Gt[_]], "2018-03-21".date)
checkFilterPredicate(
Literal("2018-03-18".date) >= '_1, classOf[LtEq[_]], "2018-03-18".date)
checkFilterPredicate(
Literal("2018-03-21".date) <= '_1, classOf[GtEq[_]], "2018-03-21".date)
checkFilterPredicate(!('_1 < "2018-03-21".date), classOf[GtEq[_]], "2018-03-21".date)
checkFilterPredicate(
'_1 < "2018-03-19".date || '_1 > "2018-03-20".date,
classOf[Operators.Or],
Seq(Row("2018-03-18".date), Row("2018-03-21".date)))
}
}
test("filter pushdown - timestamp") {
// spark.sql.parquet.outputTimestampType = TIMESTAMP_MILLIS
val millisData = Seq(Timestamp.valueOf("2018-06-14 08:28:53.123"),
Timestamp.valueOf("2018-06-15 08:28:53.123"),
Timestamp.valueOf("2018-06-16 08:28:53.123"),
Timestamp.valueOf("2018-06-17 08:28:53.123"))
withSQLConf(SQLConf.PARQUET_OUTPUT_TIMESTAMP_TYPE.key ->
ParquetOutputTimestampType.TIMESTAMP_MILLIS.toString) {
testTimestampPushdown(millisData)
}
// spark.sql.parquet.outputTimestampType = TIMESTAMP_MICROS
val microsData = Seq(Timestamp.valueOf("2018-06-14 08:28:53.123456"),
Timestamp.valueOf("2018-06-15 08:28:53.123456"),
Timestamp.valueOf("2018-06-16 08:28:53.123456"),
Timestamp.valueOf("2018-06-17 08:28:53.123456"))
withSQLConf(SQLConf.PARQUET_OUTPUT_TIMESTAMP_TYPE.key ->
ParquetOutputTimestampType.TIMESTAMP_MICROS.toString) {
testTimestampPushdown(microsData)
}
// spark.sql.parquet.outputTimestampType = INT96 doesn't support pushdown
withSQLConf(SQLConf.PARQUET_OUTPUT_TIMESTAMP_TYPE.key ->
ParquetOutputTimestampType.INT96.toString) {
withParquetDataFrame(millisData.map(i => Tuple1(i))) { implicit df =>
val schema = new SparkToParquetSchemaConverter(conf).convert(df.schema)
assertResult(None) {
createParquetFilters(schema).createFilter(sources.IsNull("_1"))
}
}
}
}
test("filter pushdown - decimal") {
Seq(
(false, Decimal.MAX_INT_DIGITS), // int32Writer
(false, Decimal.MAX_LONG_DIGITS), // int64Writer
(true, Decimal.MAX_LONG_DIGITS), // binaryWriterUsingUnscaledLong
(false, DecimalType.MAX_PRECISION) // binaryWriterUsingUnscaledBytes
).foreach { case (legacyFormat, precision) =>
withSQLConf(SQLConf.PARQUET_WRITE_LEGACY_FORMAT.key -> legacyFormat.toString) {
val schema = StructType.fromDDL(s"a decimal($precision, 2)")
val rdd =
spark.sparkContext.parallelize((1 to 4).map(i => Row(new java.math.BigDecimal(i))))
val dataFrame = spark.createDataFrame(rdd, schema)
testDecimalPushDown(dataFrame) { implicit df =>
assert(df.schema === schema)
checkFilterPredicate('a.isNull, classOf[Eq[_]], Seq.empty[Row])
checkFilterPredicate('a.isNotNull, classOf[NotEq[_]], (1 to 4).map(Row.apply(_)))
checkFilterPredicate('a === 1, classOf[Eq[_]], 1)
checkFilterPredicate('a <=> 1, classOf[Eq[_]], 1)
checkFilterPredicate('a =!= 1, classOf[NotEq[_]], (2 to 4).map(Row.apply(_)))
checkFilterPredicate('a < 2, classOf[Lt[_]], 1)
checkFilterPredicate('a > 3, classOf[Gt[_]], 4)
checkFilterPredicate('a <= 1, classOf[LtEq[_]], 1)
checkFilterPredicate('a >= 4, classOf[GtEq[_]], 4)
checkFilterPredicate(Literal(1) === 'a, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(1) <=> 'a, classOf[Eq[_]], 1)
checkFilterPredicate(Literal(2) > 'a, classOf[Lt[_]], 1)
checkFilterPredicate(Literal(3) < 'a, classOf[Gt[_]], 4)
checkFilterPredicate(Literal(1) >= 'a, classOf[LtEq[_]], 1)
checkFilterPredicate(Literal(4) <= 'a, classOf[GtEq[_]], 4)
checkFilterPredicate(!('a < 4), classOf[GtEq[_]], 4)
checkFilterPredicate('a < 2 || 'a > 3, classOf[Operators.Or], Seq(Row(1), Row(4)))
}
}
}
}
test("Ensure that filter value matched the parquet file schema") {
val scale = 2
val schema = StructType(Seq(
StructField("cint", IntegerType),
StructField("cdecimal1", DecimalType(Decimal.MAX_INT_DIGITS, scale)),
StructField("cdecimal2", DecimalType(Decimal.MAX_LONG_DIGITS, scale)),
StructField("cdecimal3", DecimalType(DecimalType.MAX_PRECISION, scale))
))
val parquetSchema = new SparkToParquetSchemaConverter(conf).convert(schema)
val decimal = new JBigDecimal(10).setScale(scale)
val decimal1 = new JBigDecimal(10).setScale(scale + 1)
assert(decimal.scale() === scale)
assert(decimal1.scale() === scale + 1)
val parquetFilters = createParquetFilters(parquetSchema)
assertResult(Some(lt(intColumn("cdecimal1"), 1000: Integer))) {
parquetFilters.createFilter(sources.LessThan("cdecimal1", decimal))
}
assertResult(None) {
parquetFilters.createFilter(sources.LessThan("cdecimal1", decimal1))
}
assertResult(Some(lt(longColumn("cdecimal2"), 1000L: java.lang.Long))) {
parquetFilters.createFilter(sources.LessThan("cdecimal2", decimal))
}
assertResult(None) {
parquetFilters.createFilter(sources.LessThan("cdecimal2", decimal1))
}
assert(parquetFilters.createFilter(sources.LessThan("cdecimal3", decimal)).isDefined)
assertResult(None) {
parquetFilters.createFilter(sources.LessThan("cdecimal3", decimal1))
}
}
test("SPARK-6554: don't push down predicates which reference partition columns") {
import testImplicits._
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/part=1"
(1 to 3).map(i => (i, i.toString)).toDF("a", "b").write.parquet(path)
// If the "part = 1" filter gets pushed down, this query will throw an exception since
// "part" is not a valid column in the actual Parquet file
checkAnswer(
spark.read.parquet(dir.getCanonicalPath).filter("part = 1"),
(1 to 3).map(i => Row(i, i.toString, 1)))
}
}
}
test("SPARK-10829: Filter combine partition key and attribute doesn't work in DataSource scan") {
import testImplicits._
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/part=1"
(1 to 3).map(i => (i, i.toString)).toDF("a", "b").write.parquet(path)
// If the "part = 1" filter gets pushed down, this query will throw an exception since
// "part" is not a valid column in the actual Parquet file
checkAnswer(
spark.read.parquet(dir.getCanonicalPath).filter("a > 0 and (part = 0 or a > 1)"),
(2 to 3).map(i => Row(i, i.toString, 1)))
}
}
}
test("SPARK-12231: test the filter and empty project in partitioned DataSource scan") {
import testImplicits._
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}"
(1 to 3).map(i => (i, i + 1, i + 2, i + 3)).toDF("a", "b", "c", "d").
write.partitionBy("a").parquet(path)
// The filter "a > 1 or b < 2" will not get pushed down, and the projection is empty,
// this query will throw an exception since the project from combinedFilter expect
// two projection while the
val df1 = spark.read.parquet(dir.getCanonicalPath)
assert(df1.filter("a > 1 or b < 2").count() == 2)
}
}
}
test("SPARK-12231: test the new projection in partitioned DataSource scan") {
import testImplicits._
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}"
(1 to 3).map(i => (i, i + 1, i + 2, i + 3)).toDF("a", "b", "c", "d").
write.partitionBy("a").parquet(path)
// test the generate new projection case
// when projects != partitionAndNormalColumnProjs
val df1 = spark.read.parquet(dir.getCanonicalPath)
checkAnswer(
df1.filter("a > 1 or b > 2").orderBy("a").selectExpr("a", "b", "c", "d"),
(2 to 3).map(i => Row(i, i + 1, i + 2, i + 3)))
}
}
}
test("Filter applied on merged Parquet schema with new column should work") {
import testImplicits._
Seq("true", "false").foreach { vectorized =>
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true",
SQLConf.PARQUET_SCHEMA_MERGING_ENABLED.key -> "true",
SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> vectorized) {
withTempPath { dir =>
val path1 = s"${dir.getCanonicalPath}/table1"
(1 to 3).map(i => (i, i.toString)).toDF("a", "b").write.parquet(path1)
val path2 = s"${dir.getCanonicalPath}/table2"
(1 to 3).map(i => (i, i.toString)).toDF("c", "b").write.parquet(path2)
// No matter "c = 1" gets pushed down or not, this query should work without exception.
val df = spark.read.parquet(path1, path2).filter("c = 1").selectExpr("c", "b", "a")
checkAnswer(
df,
Row(1, "1", null))
val path3 = s"${dir.getCanonicalPath}/table3"
val dfStruct = sparkContext.parallelize(Seq((1, 1))).toDF("a", "b")
dfStruct.select(struct("a").as("s")).write.parquet(path3)
val path4 = s"${dir.getCanonicalPath}/table4"
val dfStruct2 = sparkContext.parallelize(Seq((1, 1))).toDF("c", "b")
dfStruct2.select(struct("c").as("s")).write.parquet(path4)
// No matter "s.c = 1" gets pushed down or not, this query should work without exception.
val dfStruct3 = spark.read.parquet(path3, path4).filter("s.c = 1")
.selectExpr("s")
checkAnswer(dfStruct3, Row(Row(null, 1)))
}
}
}
}
// The unsafe row RecordReader does not support row by row filtering so run it with it disabled.
test("SPARK-11661 Still pushdown filters returned by unhandledFilters") {
import testImplicits._
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/part=1"
(1 to 3).map(i => (i, i.toString)).toDF("a", "b").write.parquet(path)
val df = spark.read.parquet(path).filter("a = 2")
// The result should be single row.
// When a filter is pushed to Parquet, Parquet can apply it to every row.
// So, we can check the number of rows returned from the Parquet
// to make sure our filter pushdown work.
assert(stripSparkFilter(df).count == 1)
}
}
}
}
test("SPARK-12218: 'Not' is included in Parquet filter pushdown") {
import testImplicits._
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/table1"
(1 to 5).map(i => (i, (i % 2).toString)).toDF("a", "b").write.parquet(path)
checkAnswer(
spark.read.parquet(path).where("not (a = 2) or not(b in ('1'))"),
(1 to 5).map(i => Row(i, (i % 2).toString)))
checkAnswer(
spark.read.parquet(path).where("not (a = 2 and b in ('1'))"),
(1 to 5).map(i => Row(i, (i % 2).toString)))
}
}
}
test("SPARK-12218 and SPARK-25559 Converting conjunctions into Parquet filter predicates") {
val schema = StructType(Seq(
StructField("a", IntegerType, nullable = false),
StructField("b", StringType, nullable = true),
StructField("c", DoubleType, nullable = true)
))
val parquetSchema = new SparkToParquetSchemaConverter(conf).convert(schema)
val parquetFilters = createParquetFilters(parquetSchema)
assertResult(Some(and(
lt(intColumn("a"), 10: Integer),
gt(doubleColumn("c"), 1.5: java.lang.Double)))
) {
parquetFilters.createFilter(
sources.And(
sources.LessThan("a", 10),
sources.GreaterThan("c", 1.5D)))
}
// Testing when `canRemoveOneSideInAnd == true`
// case sources.And(lhs, rhs) =>
// ...
// case (Some(lhsFilter), None) if canRemoveOneSideInAnd => Some(lhsFilter)
assertResult(Some(lt(intColumn("a"), 10: Integer))) {
parquetFilters.createFilter(
sources.And(
sources.LessThan("a", 10),
sources.StringContains("b", "prefix")))
}
// Testing when `canRemoveOneSideInAnd == true`
// case sources.And(lhs, rhs) =>
// ...
// case (None, Some(rhsFilter)) if canRemoveOneSideInAnd => Some(rhsFilter)
assertResult(Some(lt(intColumn("a"), 10: Integer))) {
parquetFilters.createFilter(
sources.And(
sources.StringContains("b", "prefix"),
sources.LessThan("a", 10)))
}
// Testing complex And conditions
assertResult(Some(
FilterApi.and(lt(intColumn("a"), 10: Integer), gt(intColumn("a"), 5: Integer)))) {
parquetFilters.createFilter(
sources.And(
sources.And(
sources.LessThan("a", 10),
sources.StringContains("b", "prefix")
),
sources.GreaterThan("a", 5)))
}
// Testing complex And conditions
assertResult(Some(
FilterApi.and(gt(intColumn("a"), 5: Integer), lt(intColumn("a"), 10: Integer)))) {
parquetFilters.createFilter(
sources.And(
sources.GreaterThan("a", 5),
sources.And(
sources.StringContains("b", "prefix"),
sources.LessThan("a", 10)
)))
}
// Testing
// case sources.Not(pred) =>
// createFilterHelper(nameToParquetField, pred, canRemoveOneSideInAnd = false)
// .map(FilterApi.not)
//
// and
//
// Testing when `canRemoveOneSideInAnd == false`
// case sources.And(lhs, rhs) =>
// ...
// case (Some(lhsFilter), None) if canRemoveOneSideInAnd => Some(lhsFilter)
assertResult(None) {
parquetFilters.createFilter(
sources.Not(
sources.And(
sources.GreaterThan("a", 1),
sources.StringContains("b", "prefix"))))
}
// Testing
// case sources.Not(pred) =>
// createFilterHelper(nameToParquetField, pred, canRemoveOneSideInAnd = false)
// .map(FilterApi.not)
//
// and
//
// Testing when `canRemoveOneSideInAnd == false`
// case sources.And(lhs, rhs) =>
// ...
// case (None, Some(rhsFilter)) if canRemoveOneSideInAnd => Some(rhsFilter)
assertResult(None) {
parquetFilters.createFilter(
sources.Not(
sources.And(
sources.StringContains("b", "prefix"),
sources.GreaterThan("a", 1))))
}
// Testing
// case sources.Not(pred) =>
// createFilterHelper(nameToParquetField, pred, canRemoveOneSideInAnd = false)
// .map(FilterApi.not)
//
// and
//
// Testing passing `canRemoveOneSideInAnd = false` into
// case sources.And(lhs, rhs) =>
// val lhsFilterOption = createFilterHelper(nameToParquetField, lhs, canRemoveOneSideInAnd)
assertResult(None) {
parquetFilters.createFilter(
sources.Not(
sources.And(
sources.And(
sources.GreaterThan("a", 1),
sources.StringContains("b", "prefix")),
sources.GreaterThan("a", 2))))
}
// Testing
// case sources.Not(pred) =>
// createFilterHelper(nameToParquetField, pred, canRemoveOneSideInAnd = false)
// .map(FilterApi.not)
//
// and
//
// Testing passing `canRemoveOneSideInAnd = false` into
// case sources.And(lhs, rhs) =>
// val rhsFilterOption = createFilterHelper(nameToParquetField, rhs, canRemoveOneSideInAnd)
assertResult(None) {
parquetFilters.createFilter(
sources.Not(
sources.And(
sources.GreaterThan("a", 2),
sources.And(
sources.GreaterThan("a", 1),
sources.StringContains("b", "prefix")))))
}
}
test("SPARK-27699 Converting disjunctions into Parquet filter predicates") {
val schema = StructType(Seq(
StructField("a", IntegerType, nullable = false),
StructField("b", StringType, nullable = true),
StructField("c", DoubleType, nullable = true)
))
val parquetSchema = new SparkToParquetSchemaConverter(conf).convert(schema)
val parquetFilters = createParquetFilters(parquetSchema)
// Testing
// case sources.Or(lhs, rhs) =>
// ...
// lhsFilter <- createFilterHelper(nameToParquetField, lhs, canRemoveOneSideInAnd = true)
assertResult(Some(
FilterApi.or(gt(intColumn("a"), 1: Integer), gt(intColumn("a"), 2: Integer)))) {
parquetFilters.createFilter(
sources.Or(
sources.And(
sources.GreaterThan("a", 1),
sources.StringContains("b", "prefix")),
sources.GreaterThan("a", 2)))
}
// Testing
// case sources.Or(lhs, rhs) =>
// ...
// rhsFilter <- createFilterHelper(nameToParquetField, rhs, canRemoveOneSideInAnd = true)
assertResult(Some(
FilterApi.or(gt(intColumn("a"), 2: Integer), gt(intColumn("a"), 1: Integer)))) {
parquetFilters.createFilter(
sources.Or(
sources.GreaterThan("a", 2),
sources.And(
sources.GreaterThan("a", 1),
sources.StringContains("b", "prefix"))))
}
// Testing
// case sources.Or(lhs, rhs) =>
// ...
// lhsFilter <- createFilterHelper(nameToParquetField, lhs, canRemoveOneSideInAnd = true)
// rhsFilter <- createFilterHelper(nameToParquetField, rhs, canRemoveOneSideInAnd = true)
assertResult(Some(
FilterApi.or(gt(intColumn("a"), 1: Integer), lt(intColumn("a"), 0: Integer)))) {
parquetFilters.createFilter(
sources.Or(
sources.And(
sources.GreaterThan("a", 1),
sources.StringContains("b", "prefix")),
sources.And(
sources.LessThan("a", 0),
sources.StringContains("b", "foobar"))))
}
}
test("SPARK-27698 Convertible Parquet filter predicates") {
val schema = StructType(Seq(
StructField("a", IntegerType, nullable = false),
StructField("b", StringType, nullable = true),
StructField("c", DoubleType, nullable = true)
))
val parquetSchema = new SparkToParquetSchemaConverter(conf).convert(schema)
val parquetFilters = createParquetFilters(parquetSchema)
assertResult(Seq(sources.And(sources.LessThan("a", 10), sources.GreaterThan("c", 1.5D)))) {
parquetFilters.convertibleFilters(
Seq(sources.And(
sources.LessThan("a", 10),
sources.GreaterThan("c", 1.5D))))
}
assertResult(Seq(sources.LessThan("a", 10))) {
parquetFilters.convertibleFilters(
Seq(sources.And(
sources.LessThan("a", 10),
sources.StringContains("b", "prefix"))))
}
assertResult(Seq(sources.LessThan("a", 10))) {
parquetFilters.convertibleFilters(
Seq(sources.And(
sources.StringContains("b", "prefix"),
sources.LessThan("a", 10))))
}
// Testing complex And conditions
assertResult(Seq(sources.And(sources.LessThan("a", 10), sources.GreaterThan("a", 5)))) {
parquetFilters.convertibleFilters(
Seq(sources.And(
sources.And(
sources.LessThan("a", 10),
sources.StringContains("b", "prefix")
),
sources.GreaterThan("a", 5))))
}
// Testing complex And conditions
assertResult(Seq(sources.And(sources.GreaterThan("a", 5), sources.LessThan("a", 10)))) {
parquetFilters.convertibleFilters(
Seq(sources.And(
sources.GreaterThan("a", 5),
sources.And(
sources.StringContains("b", "prefix"),
sources.LessThan("a", 10)
))))
}
// Testing complex And conditions
assertResult(Seq(sources.Or(sources.GreaterThan("a", 1), sources.GreaterThan("a", 2)))) {
parquetFilters.convertibleFilters(
Seq(sources.Or(
sources.And(
sources.GreaterThan("a", 1),
sources.StringContains("b", "prefix")),
sources.GreaterThan("a", 2))))
}
// Testing complex And/Or conditions, the And condition under Or condition can't be pushed down.
assertResult(Seq(sources.And(sources.LessThan("a", 10),
sources.Or(sources.GreaterThan("a", 1), sources.GreaterThan("a", 2))))) {
parquetFilters.convertibleFilters(
Seq(sources.And(
sources.LessThan("a", 10),
sources.Or(
sources.And(
sources.GreaterThan("a", 1),
sources.StringContains("b", "prefix")),
sources.GreaterThan("a", 2)))))
}
assertResult(Seq(sources.Or(sources.GreaterThan("a", 2), sources.GreaterThan("c", 1.1)))) {
parquetFilters.convertibleFilters(
Seq(sources.Or(
sources.GreaterThan("a", 2),
sources.And(
sources.GreaterThan("c", 1.1),
sources.StringContains("b", "prefix")))))
}
// Testing complex Not conditions.
assertResult(Seq.empty) {
parquetFilters.convertibleFilters(
Seq(sources.Not(
sources.And(
sources.GreaterThan("a", 1),
sources.StringContains("b", "prefix")))))
}
assertResult(Seq.empty) {
parquetFilters.convertibleFilters(
Seq(sources.Not(
sources.And(
sources.StringContains("b", "prefix"),
sources.GreaterThan("a", 1)))))
}
assertResult(Seq.empty) {
parquetFilters.convertibleFilters(
Seq(sources.Not(
sources.And(
sources.And(
sources.GreaterThan("a", 1),
sources.StringContains("b", "prefix")),
sources.GreaterThan("a", 2)))))
}
assertResult(Seq.empty) {
parquetFilters.convertibleFilters(
Seq(sources.Not(
sources.And(
sources.GreaterThan("a", 2),
sources.And(
sources.GreaterThan("a", 1),
sources.StringContains("b", "prefix"))))))
}
}
test("SPARK-16371 Do not push down filters when inner name and outer name are the same") {
withParquetDataFrame((1 to 4).map(i => Tuple1(Tuple1(i)))) { implicit df =>
// Here the schema becomes as below:
//
// root
// |-- _1: struct (nullable = true)
// | |-- _1: integer (nullable = true)
//
// The inner column name, `_1` and outer column name `_1` are the same.
// Obviously this should not push down filters because the outer column is struct.
assert(df.filter("_1 IS NOT NULL").count() === 4)
}
}
test("Filters should be pushed down for vectorized Parquet reader at row group level") {
import testImplicits._
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "true",
SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "false") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/table"
(1 to 1024).map(i => (101, i)).toDF("a", "b").write.parquet(path)
Seq(true, false).foreach { enablePushDown =>
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> enablePushDown.toString) {
val accu = new NumRowGroupsAcc
sparkContext.register(accu)
val df = spark.read.parquet(path).filter("a < 100")
df.foreachPartition((it: Iterator[Row]) => it.foreach(v => accu.add(0)))
if (enablePushDown) {
assert(accu.value == 0)
} else {
assert(accu.value > 0)
}
AccumulatorContext.remove(accu.id)
}
}
}
}
}
test("SPARK-17213: Broken Parquet filter push-down for string columns") {
Seq(true, false).foreach { vectorizedEnabled =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> vectorizedEnabled.toString) {
withTempPath { dir =>
import testImplicits._
val path = dir.getCanonicalPath
// scalastyle:off nonascii
Seq("a", "é").toDF("name").write.parquet(path)
// scalastyle:on nonascii
assert(spark.read.parquet(path).where("name > 'a'").count() == 1)
assert(spark.read.parquet(path).where("name >= 'a'").count() == 2)
// scalastyle:off nonascii
assert(spark.read.parquet(path).where("name < 'é'").count() == 1)
assert(spark.read.parquet(path).where("name <= 'é'").count() == 2)
// scalastyle:on nonascii
}
}
}
}
test("SPARK-20364: Disable Parquet predicate pushdown for fields having dots in the names") {
import testImplicits._
Seq(true, false).foreach { vectorized =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> vectorized.toString,
SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> true.toString,
SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "false") {
withTempPath { path =>
Seq(Some(1), None).toDF("col.dots").write.parquet(path.getAbsolutePath)
val readBack = spark.read.parquet(path.getAbsolutePath).where("`col.dots` IS NOT NULL")
assert(readBack.count() == 1)
}
}
}
}
test("Filters should be pushed down for Parquet readers at row group level") {
import testImplicits._
withSQLConf(
// Makes sure disabling 'spark.sql.parquet.recordFilter' still enables
// row group level filtering.
SQLConf.PARQUET_RECORD_FILTER_ENABLED.key -> "false",
SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true",
SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
withTempPath { path =>
val data = (1 to 1024)
data.toDF("a").coalesce(1)
.write.option("parquet.block.size", 512)
.parquet(path.getAbsolutePath)
val df = spark.read.parquet(path.getAbsolutePath).filter("a == 500")
// Here, we strip the Spark side filter and check the actual results from Parquet.
val actual = stripSparkFilter(df).collect().length
// Since those are filtered at row group level, the result count should be less
// than the total length but should not be a single record.
// Note that, if record level filtering is enabled, it should be a single record.
// If no filter is pushed down to Parquet, it should be the total length of data.
assert(actual > 1 && actual < data.length)
}
}
}
test("SPARK-23852: Broken Parquet push-down for partially-written stats") {
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
// parquet-1217.parquet contains a single column with values -1, 0, 1, 2 and null.
// The row-group statistics include null counts, but not min and max values, which
// triggers PARQUET-1217.
val df = readResourceParquetFile("test-data/parquet-1217.parquet")
// Will return 0 rows if PARQUET-1217 is not fixed.
assert(df.where("col > 0").count() === 2)
}
}
test("filter pushdown - StringStartsWith") {
withParquetDataFrame((1 to 4).map(i => Tuple1(i + "str" + i))) { implicit df =>
checkFilterPredicate(
'_1.startsWith("").asInstanceOf[Predicate],
classOf[UserDefinedByInstance[_, _]],
Seq("1str1", "2str2", "3str3", "4str4").map(Row(_)))
Seq("2", "2s", "2st", "2str", "2str2").foreach { prefix =>
checkFilterPredicate(
'_1.startsWith(prefix).asInstanceOf[Predicate],
classOf[UserDefinedByInstance[_, _]],
"2str2")
}
Seq("2S", "null", "2str22").foreach { prefix =>
checkFilterPredicate(
'_1.startsWith(prefix).asInstanceOf[Predicate],
classOf[UserDefinedByInstance[_, _]],
Seq.empty[Row])
}
checkFilterPredicate(
!'_1.startsWith("").asInstanceOf[Predicate],
classOf[Operators.Not],
Seq().map(Row(_)))
Seq("2", "2s", "2st", "2str", "2str2").foreach { prefix =>
checkFilterPredicate(
!'_1.startsWith(prefix).asInstanceOf[Predicate],
classOf[Operators.Not],
Seq("1str1", "3str3", "4str4").map(Row(_)))
}
Seq("2S", "null", "2str22").foreach { prefix =>
checkFilterPredicate(
!'_1.startsWith(prefix).asInstanceOf[Predicate],
classOf[Operators.Not],
Seq("1str1", "2str2", "3str3", "4str4").map(Row(_)))
}
val schema = new SparkToParquetSchemaConverter(conf).convert(df.schema)
assertResult(None) {
createParquetFilters(schema).createFilter(sources.StringStartsWith("_1", null))
}
}
// SPARK-28371: make sure filter is null-safe.
withParquetDataFrame(Seq(Tuple1[String](null))) { implicit df =>
checkFilterPredicate(
'_1.startsWith("blah").asInstanceOf[Predicate],
classOf[UserDefinedByInstance[_, _]],
Seq.empty[Row])
}
import testImplicits._
// Test canDrop() has taken effect
testStringStartsWith(spark.range(1024).map(_.toString).toDF(), "value like 'a%'")
// Test inverseCanDrop() has taken effect
testStringStartsWith(spark.range(1024).map(c => "100").toDF(), "value not like '10%'")
}
test("SPARK-17091: Convert IN predicate to Parquet filter push-down") {
val schema = StructType(Seq(
StructField("a", IntegerType, nullable = false)
))
val parquetSchema = new SparkToParquetSchemaConverter(conf).convert(schema)
val parquetFilters = createParquetFilters(parquetSchema)
assertResult(Some(FilterApi.eq(intColumn("a"), null: Integer))) {
parquetFilters.createFilter(sources.In("a", Array(null)))
}
assertResult(Some(FilterApi.eq(intColumn("a"), 10: Integer))) {
parquetFilters.createFilter(sources.In("a", Array(10)))
}
// Remove duplicates
assertResult(Some(FilterApi.eq(intColumn("a"), 10: Integer))) {
parquetFilters.createFilter(sources.In("a", Array(10, 10)))
}
assertResult(Some(or(or(
FilterApi.eq(intColumn("a"), 10: Integer),
FilterApi.eq(intColumn("a"), 20: Integer)),
FilterApi.eq(intColumn("a"), 30: Integer)))
) {
parquetFilters.createFilter(sources.In("a", Array(10, 20, 30)))
}
assert(parquetFilters.createFilter(sources.In("a",
Range(0, conf.parquetFilterPushDownInFilterThreshold).toArray)).isDefined)
assert(parquetFilters.createFilter(sources.In("a",
Range(0, conf.parquetFilterPushDownInFilterThreshold + 1).toArray)).isEmpty)
import testImplicits._
withTempPath { path =>
val data = 0 to 1024
data.toDF("a").selectExpr("if (a = 1024, null, a) AS a") // convert 1024 to null
.coalesce(1).write.option("parquet.block.size", 512)
.parquet(path.getAbsolutePath)
val df = spark.read.parquet(path.getAbsolutePath)
Seq(true, false).foreach { pushEnabled =>
withSQLConf(
SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> pushEnabled.toString) {
Seq(1, 5, 10, 11).foreach { count =>
val filter = s"a in(${Range(0, count).mkString(",")})"
assert(df.where(filter).count() === count)
val actual = stripSparkFilter(df.where(filter)).collect().length
if (pushEnabled && count <= conf.parquetFilterPushDownInFilterThreshold) {
assert(actual > 1 && actual < data.length)
} else {
assert(actual === data.length)
}
}
assert(df.where("a in(null)").count() === 0)
assert(df.where("a = null").count() === 0)
assert(df.where("a is null").count() === 1)
}
}
}
}
test("SPARK-25207: Case-insensitive field resolution for pushdown when reading parquet") {
def testCaseInsensitiveResolution(
schema: StructType,
expected: FilterPredicate,
filter: sources.Filter): Unit = {
val parquetSchema = new SparkToParquetSchemaConverter(conf).convert(schema)
val caseSensitiveParquetFilters =
createParquetFilters(parquetSchema, caseSensitive = Some(true))
val caseInsensitiveParquetFilters =
createParquetFilters(parquetSchema, caseSensitive = Some(false))
assertResult(Some(expected)) {
caseInsensitiveParquetFilters.createFilter(filter)
}
assertResult(None) {
caseSensitiveParquetFilters.createFilter(filter)
}
}
val schema = StructType(Seq(StructField("cint", IntegerType)))
testCaseInsensitiveResolution(
schema, FilterApi.eq(intColumn("cint"), null.asInstanceOf[Integer]), sources.IsNull("CINT"))
testCaseInsensitiveResolution(
schema,
FilterApi.notEq(intColumn("cint"), null.asInstanceOf[Integer]),
sources.IsNotNull("CINT"))
testCaseInsensitiveResolution(
schema, FilterApi.eq(intColumn("cint"), 1000: Integer), sources.EqualTo("CINT", 1000))
testCaseInsensitiveResolution(
schema,
FilterApi.notEq(intColumn("cint"), 1000: Integer),
sources.Not(sources.EqualTo("CINT", 1000)))
testCaseInsensitiveResolution(
schema, FilterApi.eq(intColumn("cint"), 1000: Integer), sources.EqualNullSafe("CINT", 1000))
testCaseInsensitiveResolution(
schema,
FilterApi.notEq(intColumn("cint"), 1000: Integer),
sources.Not(sources.EqualNullSafe("CINT", 1000)))
testCaseInsensitiveResolution(
schema,
FilterApi.lt(intColumn("cint"), 1000: Integer), sources.LessThan("CINT", 1000))
testCaseInsensitiveResolution(
schema,
FilterApi.ltEq(intColumn("cint"), 1000: Integer),
sources.LessThanOrEqual("CINT", 1000))
testCaseInsensitiveResolution(
schema, FilterApi.gt(intColumn("cint"), 1000: Integer), sources.GreaterThan("CINT", 1000))
testCaseInsensitiveResolution(
schema,
FilterApi.gtEq(intColumn("cint"), 1000: Integer),
sources.GreaterThanOrEqual("CINT", 1000))
testCaseInsensitiveResolution(
schema,
FilterApi.or(
FilterApi.eq(intColumn("cint"), 10: Integer),
FilterApi.eq(intColumn("cint"), 20: Integer)),
sources.In("CINT", Array(10, 20)))
val dupFieldSchema = StructType(
Seq(StructField("cint", IntegerType), StructField("cINT", IntegerType)))
val dupParquetSchema = new SparkToParquetSchemaConverter(conf).convert(dupFieldSchema)
val dupCaseInsensitiveParquetFilters =
createParquetFilters(dupParquetSchema, caseSensitive = Some(false))
assertResult(None) {
dupCaseInsensitiveParquetFilters.createFilter(sources.EqualTo("CINT", 1000))
}
}
test("SPARK-25207: exception when duplicate fields in case-insensitive mode") {
withTempPath { dir =>
val count = 10
val tableName = "spark_25207"
val tableDir = dir.getAbsoluteFile + "/table"
withTable(tableName) {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
spark.range(count).selectExpr("id as A", "id as B", "id as b")
.write.mode("overwrite").parquet(tableDir)
}
sql(
s"""
|CREATE TABLE $tableName (A LONG, B LONG) USING PARQUET LOCATION '$tableDir'
""".stripMargin)
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
val e = intercept[SparkException] {
sql(s"select a from $tableName where b > 0").collect()
}
assert(e.getCause.isInstanceOf[RuntimeException] && e.getCause.getMessage.contains(
"""Found duplicate field(s) "B": [B, b] in case-insensitive mode"""))
}
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
checkAnswer(sql(s"select A from $tableName where B > 0"), (1 until count).map(Row(_)))
}
}
}
}
}
class ParquetV1FilterSuite extends ParquetFilterSuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST, "parquet")
override def checkFilterPredicate(
df: DataFrame,
predicate: Predicate,
filterClass: Class[_ <: FilterPredicate],
checker: (DataFrame, Seq[Row]) => Unit,
expected: Seq[Row]): Unit = {
val output = predicate.collect { case a: Attribute => a }.distinct
withSQLConf(
SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true",
SQLConf.PARQUET_FILTER_PUSHDOWN_DATE_ENABLED.key -> "true",
SQLConf.PARQUET_FILTER_PUSHDOWN_TIMESTAMP_ENABLED.key -> "true",
SQLConf.PARQUET_FILTER_PUSHDOWN_DECIMAL_ENABLED.key -> "true",
SQLConf.PARQUET_FILTER_PUSHDOWN_STRING_STARTSWITH_ENABLED.key -> "true",
// Disable adding filters from constraints because it adds, for instance,
// is-not-null to pushed filters, which makes it hard to test if the pushed
// filter is expected or not (this had to be fixed with SPARK-13495).
SQLConf.OPTIMIZER_EXCLUDED_RULES.key -> InferFiltersFromConstraints.ruleName,
SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
val query = df
.select(output.map(e => Column(e)): _*)
.where(Column(predicate))
var maybeRelation: Option[HadoopFsRelation] = None
val maybeAnalyzedPredicate = query.queryExecution.optimizedPlan.collect {
case PhysicalOperation(_, filters,
LogicalRelation(relation: HadoopFsRelation, _, _, _)) =>
maybeRelation = Some(relation)
filters
}.flatten.reduceLeftOption(_ && _)
assert(maybeAnalyzedPredicate.isDefined, "No filter is analyzed from the given query")
val (_, selectedFilters, _) =
DataSourceStrategy.selectFilters(maybeRelation.get, maybeAnalyzedPredicate.toSeq)
assert(selectedFilters.nonEmpty, "No filter is pushed down")
val schema = new SparkToParquetSchemaConverter(conf).convert(df.schema)
val parquetFilters = createParquetFilters(schema)
// In this test suite, all the simple predicates are convertible here.
assert(parquetFilters.convertibleFilters(selectedFilters) === selectedFilters)
val pushedParquetFilters = selectedFilters.map { pred =>
val maybeFilter = parquetFilters.createFilter(pred)
assert(maybeFilter.isDefined, s"Couldn't generate filter predicate for $pred")
maybeFilter.get
}
// Doesn't bother checking type parameters here (e.g. `Eq[Integer]`)
assert(pushedParquetFilters.exists(_.getClass === filterClass),
s"${pushedParquetFilters.map(_.getClass).toList} did not contain ${filterClass}.")
checker(stripSparkFilter(query), expected)
}
}
}
class ParquetV2FilterSuite extends ParquetFilterSuite {
// TODO: enable Parquet V2 write path after file source V2 writers are workable.
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST, "")
override def checkFilterPredicate(
df: DataFrame,
predicate: Predicate,
filterClass: Class[_ <: FilterPredicate],
checker: (DataFrame, Seq[Row]) => Unit,
expected: Seq[Row]): Unit = {
val output = predicate.collect { case a: Attribute => a }.distinct
withSQLConf(
SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true",
SQLConf.PARQUET_FILTER_PUSHDOWN_DATE_ENABLED.key -> "true",
SQLConf.PARQUET_FILTER_PUSHDOWN_TIMESTAMP_ENABLED.key -> "true",
SQLConf.PARQUET_FILTER_PUSHDOWN_DECIMAL_ENABLED.key -> "true",
SQLConf.PARQUET_FILTER_PUSHDOWN_STRING_STARTSWITH_ENABLED.key -> "true",
// Disable adding filters from constraints because it adds, for instance,
// is-not-null to pushed filters, which makes it hard to test if the pushed
// filter is expected or not (this had to be fixed with SPARK-13495).
SQLConf.OPTIMIZER_EXCLUDED_RULES.key -> InferFiltersFromConstraints.ruleName,
SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
val query = df
.select(output.map(e => Column(e)): _*)
.where(Column(predicate))
query.queryExecution.optimizedPlan.collectFirst {
case PhysicalOperation(_, filters,
DataSourceV2ScanRelation(_, scan: ParquetScan, _)) =>
assert(filters.nonEmpty, "No filter is analyzed from the given query")
val sourceFilters = filters.flatMap(DataSourceStrategy.translateFilter).toArray
val pushedFilters = scan.pushedFilters
assert(pushedFilters.nonEmpty, "No filter is pushed down")
val schema = new SparkToParquetSchemaConverter(conf).convert(df.schema)
val parquetFilters = createParquetFilters(schema)
// In this test suite, all the simple predicates are convertible here.
assert(parquetFilters.convertibleFilters(sourceFilters) === pushedFilters)
val pushedParquetFilters = pushedFilters.map { pred =>
val maybeFilter = parquetFilters.createFilter(pred)
assert(maybeFilter.isDefined, s"Couldn't generate filter predicate for $pred")
maybeFilter.get
}
// Doesn't bother checking type parameters here (e.g. `Eq[Integer]`)
assert(pushedParquetFilters.exists(_.getClass === filterClass),
s"${pushedParquetFilters.map(_.getClass).toList} did not contain ${filterClass}.")
checker(stripSparkFilter(query), expected)
case _ =>
throw new AnalysisException("Can not match ParquetTable in the query.")
}
}
}
}
class NumRowGroupsAcc extends AccumulatorV2[Integer, Integer] {
private var _sum = 0
override def isZero: Boolean = _sum == 0
override def copy(): AccumulatorV2[Integer, Integer] = {
val acc = new NumRowGroupsAcc()
acc._sum = _sum
acc
}
override def reset(): Unit = _sum = 0
override def add(v: Integer): Unit = _sum += v
override def merge(other: AccumulatorV2[Integer, Integer]): Unit = other match {
case a: NumRowGroupsAcc => _sum += a._sum
case _ => throw new UnsupportedOperationException(
s"Cannot merge ${this.getClass.getName} with ${other.getClass.getName}")
}
override def value: Integer = _sum
}
| jkbradley/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala | Scala | apache-2.0 | 63,168 |
package org.jetbrains.plugins.scala
package lang
package psi
package stubs
package elements
import com.intellij.psi.PsiElement
import com.intellij.psi.stubs.{IndexSink, StubElement, StubInputStream, StubOutputStream}
import com.intellij.util.io.StringRef
import org.jetbrains.plugins.scala.lang.psi.api.base.ScAccessModifier
import org.jetbrains.plugins.scala.lang.psi.impl.base.ScAccessModifierImpl
import org.jetbrains.plugins.scala.lang.psi.stubs.impl.ScAccessModifierStubImpl
/**
* User: Alexander Podkhalyuzin
* Date: 17.06.2009
*/
class ScAccessModifierElementType[Func <: ScAccessModifier]
extends ScStubElementType[ScAccessModifierStub, ScAccessModifier]("access modifier") {
def serialize(stub: ScAccessModifierStub, dataStream: StubOutputStream) {
dataStream.writeBoolean(stub.isProtected)
dataStream.writeBoolean(stub.isPrivate)
dataStream.writeBoolean(stub.isThis)
val hasId = stub.getIdText != None
dataStream.writeBoolean(hasId)
if (hasId) {
dataStream.writeName(stub.getIdText.get)
}
}
def indexStub(stub: ScAccessModifierStub, sink: IndexSink) {}
def createPsi(stub: ScAccessModifierStub): ScAccessModifier = {
new ScAccessModifierImpl(stub)
}
def createStubImpl[ParentPsi <: PsiElement](psi: ScAccessModifier, parentStub: StubElement[ParentPsi]): ScAccessModifierStub = {
new ScAccessModifierStubImpl(parentStub.asInstanceOf[StubElement[PsiElement]], this, psi.isPrivate, psi.isProtected,
psi.isThis, psi.idText.map(StringRef.fromString(_)))
}
def deserializeImpl(dataStream: StubInputStream, parentStub: Any): ScAccessModifierStub = {
val isProtected = dataStream.readBoolean
val isPrivate = dataStream.readBoolean
val isThis = dataStream.readBoolean
val hasId = dataStream.readBoolean
val idText = if (hasId) Some(dataStream.readName)else None
new ScAccessModifierStubImpl(parentStub.asInstanceOf[StubElement[PsiElement]], this, isPrivate, isProtected, isThis, idText)
}
} | triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/stubs/elements/ScAccessModifierElementType.scala | Scala | apache-2.0 | 2,000 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.util
import java.util.jar._
import java.io.{File, FileInputStream, FileOutputStream, InputStream, OutputStream}
import org.codehaus.plexus.archiver.tar._
import com.typesafe.scalalogging.LazyLogging
import cmwell.util.string._
import cmwell.util.os.Props._
import cmwell.util.exceptions._
import scala.language.implicitConversions
/**
* Created with IntelliJ IDEA.
* User: gilad
* Date: 7/23/13
* Time: 10:51 AM
* To change this template use File | Settings | File Templates.
*/
package object files extends LazyLogging {
//it's a wrapper for plexus' weird logger...
//TODO: use the threshold!
private[this] class FileOpsTmpLogger(threshold: Int, name: String)
extends org.codehaus.plexus.logging.AbstractLogger(threshold, name)
with LazyLogging {
def getChildLogger(arg0: String): org.codehaus.plexus.logging.Logger = { null }
def fatalError(arg0: String, arg1: Throwable): Unit = logger.error(arg0, arg1)
def error(arg0: String, arg1: Throwable): Unit = logger.error(arg0, arg1)
def warn(arg0: String, arg1: Throwable): Unit = logger.warn(arg0, arg1)
def info(arg0: String, arg1: Throwable): Unit = logger.debug(arg0, arg1)
def debug(arg0: String, arg1: Throwable): Unit = logger.trace(arg0, arg1)
}
def resourceToStream(filename: String) = {
if (System.getProperty("onejar.file") == null)
Thread.currentThread.getContextClassLoader.getResourceAsStream(filename)
else
fileFromZipToStream(filename, os.Props.jarName)
}
def fileFromZipToStream(filename: String, zipfile: String): InputStream = {
val zip = stringToZipFile(zipfile)
val ze = {
val rv = zip.getEntry(filename)
if (rv == null) {
val toggleSlash = if (filename.head == '/') filename.drop(1) else s"/$filename"
zip.getEntry(toggleSlash)
} else rv
}
zip.getInputStream(ze)
}
def resourceToFile(resource: String, file: String) { resourceToFile(resource, new File(file)) }
def resourceToFile(resource: String, file: File) {
val resourceStream = resourceToStream(resource)
isToFile(resourceStream, file)
}
/**
* @author Israel (copied from clfRunner & modified by Gilad)
*
* Copy from InputStream to target file
*/
implicit def stringToFile(filename: String): File = new java.io.File(filename)
def isToFile(input: InputStream, to: File) {
ensureDirHirerchy(to.getAbsolutePath)
val out = new FileOutputStream(to)
try {
isToOs(input, out)
} finally {
out.close()
}
}
private[this] def ensureDirHirerchy(path: String) {
val nixPath = unixify(path)
new java.io.File(nixPath.dropRight(nixPath.length - nixPath.lastIndexOf("/"))).mkdirs
}
def deployDirectoryFromJarToFile(jarPath: String, to: String) {
logger.debug("deploying " + jarPath + " to " + to)
val jarfile = new JarFile(jarName)
logger.debug("jarfile = " + jarfile.getName)
unzipEntries(jarfile,
jarfile.entries,
to,
e => e.getName.startsWith(jarPath),
s => if (s.startsWith(jarPath)) s.drop(jarPath.length) else s)
}
/**
* @author Israel (copied from clfRunner & modified by Gilad)
*
* Copy from InputStream to target file through buffer
*/
def isToOs(input: InputStream, to: OutputStream) {
val buffer = new Array[Byte](8192)
def transfer() {
val read = input.read(buffer)
if (read >= 0) {
to.write(buffer, 0, read)
transfer()
}
}
transfer()
}
def isToString(input: InputStream): String = {
scala.io.Source.fromInputStream(input).mkString
}
/**
* @author Gilad
*
* override a file with data from an input stream
* (delete the file first, even if it's a directory)
*/
def isToOverrideFile(input: InputStream, to: File) {
if (to.exists) {
if (to.isDirectory) {
cleanDir(to)
}
to.delete
}
isToFile(input, to)
}
/**
* @author Gilad
*
* extract a given zip File into a given destination
*/
implicit def stringToZipFile(zip: String): java.util.zip.ZipFile = new java.util.zip.ZipFile(zip)
implicit def fileToZipFile(file: java.io.File): java.util.zip.ZipFile = new java.util.zip.ZipFile(file)
def unzip(zip: java.util.zip.ZipFile, to: String) {
unzipEntries(zip, zip.entries, to, e => true, identity)
zip.close
}
private[this] def unzipEntries(zip: java.util.zip.ZipFile,
entries: java.util.Enumeration[_ <: java.util.zip.ZipEntry],
to: String,
filter: java.util.zip.ZipEntry => Boolean,
pathModifier: String => String) {
while (entries.hasMoreElements) {
val entry: java.util.zip.ZipEntry = entries.nextElement
if (filter(entry)) {
if (entry.isDirectory) {
val d = new File(to + pathModifier(entry.getName))
if (!d.exists) { d.mkdirs }
} else {
val n = unixify(pathModifier(entry.getName))
val d = (n.dropRight(n.length - n.lastIndexOf("/")))
val f = new File(to + d)
if (!f.exists) { f.mkdirs }
isToFile(zip.getInputStream(entry), new java.io.File(to + n))
}
} else {
logger.trace("skipping: " + entry.getName)
}
}
}
/**
* extract a given tar.gz File into a given destination
*/
def untgz(srcFile: File, dstDir: File) {
def validLetters(c: Char): Boolean = {
c match {
case '_' => true
case '-' => true
case chr => chr.isLetter
}
}
val ua: TarGZipUnArchiver = new TarGZipUnArchiver
// for some reason, plexus' TarGZipUnArchiver must have org.codehaus.plexus.logging.Logger
// enabled for it to run properly. (otherwise you'll get a null pointer exception)
val logger: org.codehaus.plexus.logging.Logger =
new FileOpsTmpLogger(0, srcFile.getName.toLowerCase.filterNot(validLetters))
ua.enableLogging(logger)
ua.setSourceFile(srcFile)
dstDir.mkdirs
ua.setDestDirectory(dstDir)
ua.extract
}
def cleanDir(dir: java.io.File) { cleanDir(dir, _ => false) }
/**
* filter should return true for files that needs to be kept
* and not deleted.
*/
def cleanDir(dir: java.io.File, filter: String => Boolean) {
//logger.debug("cleanDir: " + dir.getAbsolutePath)
if (!dir.exists) {
throw new IllegalFileException("can't clean a dir which does not exist! (" + dir.getAbsolutePath + ")")
} else if (!dir.isDirectory) {
throw new IllegalFileException(dir.getAbsolutePath + " is not a directory!")
}
for (f <- dir.listFiles) {
if (!filter(f.getName)) {
if (f.isDirectory) {
cleanDir(f, filter)
}
f.delete
}
}
}
/**
*
*/
def mergeDirInto(src: String, dst: String) {
val s = new java.io.File(src)
if (!s.exists) throw new IllegalArgumentException("source file " + src + " was not found!")
val d = new java.io.File(dst)
if (s.isDirectory) {
if (d.exists && !d.isDirectory) {
d.delete
} else if (!d.exists) {
d.mkdirs
}
s.list.foreach(f => {
mergeDirInto(src + "/" + f, dst + "/" + f);
logger.trace("merging " + f + " into " + d.getAbsolutePath)
})
} else {
if (d.exists) {
d.delete
}
isToFile(new FileInputStream(s), d)
}
}
def replaceFileContentFromInputStream(contentInputStream: InputStream, fileToOverride: String) {
val file = new java.io.File(fileToOverride)
val path =
new java.io.File(fileToOverride.dropRight(fileToOverride.length - fileToOverride.lastIndexOf(fileSeparator)))
if (file.exists) {
file.delete
} else {
path.mkdirs
}
isToFile(contentInputStream, file)
}
def writeStringToFile(s: String, f: File) {
logger.debug("writing: " + s + " to " + f.getAbsolutePath)
if (f.exists) {
logger.warn(f.getName + " already exists!")
if (!f.delete) {
logger.error("failed to truncate " + f.getName)
throw new IllegalFileException("could not delete " + f.getAbsolutePath)
}
} else { logger.debug(f.getName + " does not exists.") }
if (f.createNewFile) {
logger.debug("creating: " + f.getAbsolutePath)
val fw = new java.io.FileWriter(f)
fw.write(s)
fw.flush
} else {
logger.error("failed to create " + f.getName)
throw new IllegalFileException("could not create " + f.getAbsolutePath)
}
}
def deleteFile(path: String) = java.nio.file.Files.delete(java.nio.file.Paths.get(path))
}
| hochgi/CM-Well | server/cmwell-util/src/main/scala/cmwell/util/files/package.scala | Scala | apache-2.0 | 9,363 |
package genetic.baldwin
import java.util
import java.util.Random
import genetic.genetic.generation.Crossover
import genetic.genetic.Metric
import genetic.genetic.{Genetic, Metric}
class GeneticBaldwin(maxIterations: Int,
target: Array[Byte],
rand: Random) extends Genetic[Array[Byte]] {
override def fitness(gene: Array[Byte]): Double = {
if(java.util.Arrays.equals(gene, target)) 0
else {
// do not let the fitness to be 0
val remainingIterations = Math.min(BaldwinBitString.localSearchesTimeRemaining(gene, target, maxIterations, rand), maxIterations - 1)
val rawFitness = 1 + 19.0 * remainingIterations.toDouble / maxIterations // 1~20
val normalized = 1 - rawFitness / 20 // 1 -> 1, 20 -> 0
normalized
}
}
override def mate(x: Array[Byte], y: Array[Byte]): Array[Byte] = Crossover.onePointCrossoverBytes(x,y, rand)
override def randomElement(rand: Random): Array[Byte] = BaldwinBitString.randomBaldwinString(target.length ,rand)
override def metric(): Metric[Array[Byte]] = new Metric[Array[Byte]] {
override def distance(x: Array[Byte], y: Array[Byte]): Double = 1
}
override def show(gene: Array[Byte]): String = {
gene.iterator.map {
case BaldwinBit.Zero => '0'
case BaldwinBit.One => '1'
case BaldwinBit.QuestionMark => '?'
}.mkString
}
override def showScientific: Boolean = false
override def mutate(a: Array[Byte]): Array[Byte] = {
val index = rand.nextInt(a.length)
a(index) = BaldwinBit.genBaldwinBit(rand)
a
}
override def hash(gene: Array[Byte]): Int = util.Arrays.hashCode(gene)
}
| NightRa/AILab | Genetic/src/main/scala/genetic/baldwin/GeneticBaldwin.scala | Scala | apache-2.0 | 1,658 |
package de.tu_berlin.formic.server
import akka.Done
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.headers.{Authorization, BasicHttpCredentials}
import akka.http.scaladsl.model.ws.{Message, TextMessage, WebSocketRequest}
import akka.http.scaladsl.model.{StatusCodes, Uri}
import akka.http.scaladsl.server.Directives._
import akka.stream.scaladsl.{Flow, Keep, Sink, SinkQueueWithCancel, Source, SourceQueueWithComplete}
import akka.stream.{ActorMaterializer, OverflowStrategy}
import akka.testkit.TestKit
import de.tu_berlin.formic.common.datastructure.{OperationContext, ServerDataStructureProvider}
import de.tu_berlin.formic.common.message._
import de.tu_berlin.formic.common.{ClientId, DataStructureInstanceId, OperationId}
import de.tu_berlin.formic.datastructure.linear.server.{LinearServerDataStructureProvider, StringDataStructureFactory}
import de.tu_berlin.formic.datastructure.linear.LinearInsertOperation
import de.tu_berlin.formic.datastructure.linear.server.StringDataStructureFactory
import org.scalatest.{BeforeAndAfterAll, Matchers, OneInstancePerTest, WordSpecLike}
import upickle.default._
import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.language.postfixOps
import scala.util.{Failure, Success}
/**
* @author Ronny Bräunlich
*/
class OperationsIntergrationTest extends TestKit(ActorSystem("OperationsIntergrationTest"))
with WordSpecLike
with Matchers
with OneInstancePerTest
with BeforeAndAfterAll {
val formicServer = new FormicServer with ServerDataStructures {
override val dataStructureProvider: Set[ServerDataStructureProvider] = Set(LinearServerDataStructureProvider())
}
implicit val writer = formicServer.jsonProtocol.writer
implicit val reader = formicServer.jsonProtocol.reader
val testRoute = path("formic") {
extractCredentials {
creds =>
get {
handleWebSocketMessages(formicServer.newUserProxy(creds.get.asInstanceOf[BasicHttpCredentials].username)
(formicServer.system, formicServer.materializer))
}
}
}
override def afterAll(): Unit = {
super.afterAll()
server.terminate()
system.terminate()
}
val server = new ServerThread(formicServer, testRoute)
"Formic server" must {
"allow two users to work on a linear structure together" in {
server.setDaemon(true)
server.start()
Thread.sleep(6000)
implicit val materializer = ActorMaterializer()
import system.dispatcher
val user1Id = ClientId("foo")
val user2Id = ClientId("bar")
val (user1Incoming, user1Outgoing) = connectUser(user1Id.id)
val (user2Incoming, user2Outgoing) = connectUser(user2Id.id)
val dataStructureInstanceId: DataStructureInstanceId = createStringDataStructureInstance(user1Id, user2Id, user1Incoming, user1Outgoing, user2Incoming, user2Outgoing)
applyOperations(user1Id, user2Id, user1Incoming, user1Outgoing, user2Incoming, user2Outgoing, dataStructureInstanceId)
user2Outgoing.offer(TextMessage(write(UpdateRequest(user2Id, dataStructureInstanceId))))
val finalResponse = user2Incoming.pull()
Await.ready(finalResponse, 3 seconds)
finalResponse.value.get match {
case Success(m) =>
val text = m.get.asTextMessage.getStrictText
val readMsg = read[FormicMessage](text)
readMsg.asInstanceOf[UpdateResponse].dataStructureInstanceId should equal(dataStructureInstanceId)
readMsg.asInstanceOf[UpdateResponse].dataStructure should equal(StringDataStructureFactory.name)
readMsg.asInstanceOf[UpdateResponse].data should equal("[\\"3\\",\\"2\\",\\"1\\",\\"c\\",\\"b\\",\\"a\\"]")
//the lastOperationId is unimportant here
case Failure(ex) => fail(ex)
}
user1Outgoing.complete()
user2Outgoing.complete()
println("Shutting down OperationsIntergrationTest server")
server.terminate()
}
}
def verifyEqual(message: Future[Option[Message]], formicMessage: FormicMessage)(implicit ec: ExecutionContext) = {
val ready = Await.ready(message, 3 seconds)
ready.value.get match {
case Success(m) =>
val text = m.get.asTextMessage.getStrictText
read[FormicMessage](text) should equal(formicMessage)
case Failure(ex) => fail(ex)
}
}
def applyOperations(user1Id: ClientId, user2Id: ClientId, user1Incoming: SinkQueueWithCancel[Message], user1Outgoing: SourceQueueWithComplete[Message], user2Incoming: SinkQueueWithCancel[Message], user2Outgoing: SourceQueueWithComplete[Message], dataStructureInstanceId: DataStructureInstanceId)(implicit ec: ExecutionContext) = {
//let both users send operations in parallel
//because the id of u1 is greater than u2 (f > b), it should have precedence
//user 2
val u2op1 = LinearInsertOperation(0, 'a', OperationId(), OperationContext(List.empty), user2Id)
val u2Msg1 = OperationMessage(user2Id, dataStructureInstanceId, StringDataStructureFactory.name, List(u2op1))
val u2op2 = LinearInsertOperation(0, 'b', OperationId(), OperationContext(List(u2op1.id)), user2Id)
val u2Msg2 = OperationMessage(user2Id, dataStructureInstanceId, StringDataStructureFactory.name, List(u2op2))
val u2op3 = LinearInsertOperation(0, 'c', OperationId(), OperationContext(List(u2op2.id)), user2Id)
val u2Msg3 = OperationMessage(user2Id, dataStructureInstanceId, StringDataStructureFactory.name, List(u2op3))
//user 1
val u1op1 = LinearInsertOperation(0, '1', OperationId(), OperationContext(List.empty), user1Id)
val u1Msg1 = OperationMessage(user1Id, dataStructureInstanceId, StringDataStructureFactory.name, List(u1op1))
val u1op2 = LinearInsertOperation(0, '2', OperationId(), OperationContext(List(u1op1.id)), user1Id)
val u1Msg2 = OperationMessage(user1Id, dataStructureInstanceId, StringDataStructureFactory.name, List(u1op2))
val u1op3 = LinearInsertOperation(0, '3', OperationId(), OperationContext(List(u1op2.id)), user1Id)
val u1Msg3 = OperationMessage(user1Id, dataStructureInstanceId, StringDataStructureFactory.name, List(u1op3))
user2Outgoing.offer(TextMessage(write(u2Msg1)))
user2Outgoing.offer(TextMessage(write(u2Msg2)))
user2Outgoing.offer(TextMessage(write(u2Msg3)))
// 3 acks for u2
verifyEqual(user2Incoming.pull(), u2Msg1)
verifyEqual(user2Incoming.pull(), u2Msg2)
verifyEqual(user2Incoming.pull(), u2Msg3)
//3 incoming for u1
verifyEqual(user1Incoming.pull(), u2Msg1)
verifyEqual(user1Incoming.pull(), u2Msg2)
verifyEqual(user1Incoming.pull(), u2Msg3)
user1Outgoing.offer(TextMessage(write(u1Msg1)))
user1Outgoing.offer(TextMessage(write(u1Msg2)))
user1Outgoing.offer(TextMessage(write(u1Msg3)))
// 3 acks for u1
val transformedu1op1 = LinearInsertOperation(u1op1.index, u1op1.o, u1op1.id, OperationContext(List(u2op3.id)), user1Id)
val transformedu1Msg1 = OperationMessage(user1Id, u1Msg1.dataStructureInstanceId, u1Msg1.dataStructure, List(transformedu1op1))
verifyEqual(user1Incoming.pull(), transformedu1Msg1)
verifyEqual(user1Incoming.pull(), u1Msg2)
verifyEqual(user1Incoming.pull(), u1Msg3)
//3 incoming for u2
verifyEqual(user2Incoming.pull(), transformedu1Msg1)
verifyEqual(user2Incoming.pull(), u1Msg2)
verifyEqual(user2Incoming.pull(), u1Msg3)
}
def createStringDataStructureInstance(user1Id: ClientId, user2Id: ClientId, user1Incoming: SinkQueueWithCancel[Message], user1Outgoing: SourceQueueWithComplete[Message], user2Incoming: SinkQueueWithCancel[Message], user2Outgoing: SourceQueueWithComplete[Message])(implicit executionContext: ExecutionContext): DataStructureInstanceId = {
val dataStructureInstanceId = DataStructureInstanceId()
user1Outgoing.offer(TextMessage(write(CreateRequest(user1Id, dataStructureInstanceId, StringDataStructureFactory.name))))
val incomingCreateResponse = user1Incoming.pull()
Await.ready(incomingCreateResponse, 3 seconds)
incomingCreateResponse.value.get match {
case Success(m) =>
val text = m.get.asTextMessage.getStrictText
read[FormicMessage](text) should equal(CreateResponse(dataStructureInstanceId))
case Failure(ex) => fail(ex)
}
user2Outgoing.offer(TextMessage(write(UpdateRequest(user2Id, dataStructureInstanceId))))
val incomingUpdateResponse = user2Incoming.pull()
//can't start sending operation messages before the client is subscribed to the data type instance
Await.ready(incomingUpdateResponse, 5 seconds)
incomingUpdateResponse.value.get match {
case Success(m) =>
val text = m.get.asTextMessage.getStrictText
read[FormicMessage](text) should equal(UpdateResponse(dataStructureInstanceId, StringDataStructureFactory.name, "[]", Option.empty))
case Failure(ex) => fail(ex)
}
dataStructureInstanceId
}
def connectUser(username: String)(implicit materializer: ActorMaterializer, executionContext: ExecutionContext): (SinkQueueWithCancel[Message], SourceQueueWithComplete[Message]) = {
val sink: Sink[Message, SinkQueueWithCancel[Message]] = Sink.queue()
val source = Source.queue[Message](10, OverflowStrategy.fail)
val flow = Flow.fromSinkAndSourceMat(sink, source)(Keep.both)
// upgradeResponse is a Future[WebSocketUpgradeResponse] that
// completes or fails when the connection succeeds or fails
val serverPort = server.binding.localAddress.getPort
val (upgradeResponse, sinkAndSource) =
Http().singleWebSocketRequest(
WebSocketRequest(
Uri(s"ws://0.0.0.0:$serverPort/formic"),
List(Authorization(BasicHttpCredentials(username, "")))
),
flow
)
val connected = upgradeResponse.map { upgrade =>
if (upgrade.response.status == StatusCodes.SwitchingProtocols) {
Done
} else {
throw new RuntimeException(s"Connection failed: ${upgrade.response.status}")
}
}
val result = Await.ready(connected, 6 seconds)
result.value.get match {
case Success(_) => sinkAndSource
case Failure(ex) => throw ex
}
}
}
| rbraeunlich/formic | server/src/test/scala/de/tu_berlin/formic/server/OperationsIntergrationTest.scala | Scala | apache-2.0 | 10,177 |
package dhg.ccg.parse.scg.mcmc
import dhg.util._
import scala.annotation.tailrec
import scala.collection.mutable.{ Map => MMap }
import scala.collection.mutable.{ Set => MSet }
import dhg.ccg.cat._
import dhg.ccg.parse._
import dhg.ccg.parse.pcfg._
import dhg.ccg.parse.pcfg.mcmc._
import dhg.ccg.tagdict._
import scalaz._
import Scalaz._
trait ScgGuideChartProdFinder extends PcfgGuideChartProdFinder {
def lctxs(gc: CfgGuideChart)(se: StartEndTags[Cat]): Map[Cat, Set[Cat]]
def rctxs(gc: CfgGuideChart)(se: StartEndTags[Cat]): Map[Cat, Set[Cat]]
}
class SimpleScgGuideChartProdFinder(
pcfgGuideChartProdFinder: PcfgGuideChartProdFinder)
extends ScgGuideChartProdFinder {
def roots(gc: CfgGuideChart): Set[Cat] = pcfgGuideChartProdFinder.roots(gc)
def prods(gc: CfgGuideChart): Map[Cat, Set[Prod]] = pcfgGuideChartProdFinder.prods(gc)
def binys(gc: CfgGuideChart): Map[Cat, Set[BinaryProd]] = pcfgGuideChartProdFinder.binys(gc)
def unrys(gc: CfgGuideChart): Map[Cat, Set[UnaryProd]] = pcfgGuideChartProdFinder.unrys(gc)
def terms(gc: CfgGuideChart): Map[Cat, Set[TermProd]] = pcfgGuideChartProdFinder.terms(gc)
def lctxs(gc: CfgGuideChart)(se: StartEndTags[Cat]): Map[Cat, Set[Cat]] = {
val tagSets = Set(se.startTag) +: gc.supertagSets :+ Set(se.endTag)
gc.bottomUpNodes.mapt { (i, j, cell) =>
cell.mapVals(_ => tagSets(i))
}.fold(Map.empty[Cat, Set[Cat]])(_ |+| _).filter(_._2.nonEmpty)
}
def rctxs(gc: CfgGuideChart)(se: StartEndTags[Cat]): Map[Cat, Set[Cat]] = {
val tagSets = Set(se.startTag) +: gc.supertagSets :+ Set(se.endTag)
gc.bottomUpNodes.mapt { (i, j, cell) =>
cell.mapVals(_ => tagSets(j + 1))
}.fold(Map.empty[Cat, Set[Cat]])(_ |+| _).filter(_._2.nonEmpty)
}
}
| dhgarrette/2015-ccg-parsing | src/main/scala/dhg/ccg/parse/scg/mcmc/ScgGuideChartUtil.scala | Scala | apache-2.0 | 1,749 |
package rugloom.rug
import play.api.libs.functional.Variant
import rugloom.util.test.Test
/**
* RugLoom - Explorative analysis pipeline prototype
* Created by oliverr on 8/6/2015.
*/
object Rug {
}
trait Rug {
def nSamps: Pipe[Int]
def samps: Pipe[Iterator[String]]
def nVaris: Pipe[Int]
def varis: Pipe[Iterator[Variation]]
def sampGenos(samp: String): Pipe[Iterator[Genotype]]
def variGenos(vari: Variation): Pipe[Iterator[(String, Genotype)]]
def genotype(samp: String, vari: Variation): Pipe[Option[Genotype]]
def filterVaris(filter: Test[Variation]): Rug
def variStats: Pipe[Iterator[VariStats]]
}
| curoli/rugloom-client | app/rugloom/rug/Rug.scala | Scala | mit | 638 |
package demy.mllib.test
import org.scalatest._
class UnitTest() extends FlatSpec {
def getSpark = SharedSpark.getSpark
}
class UnitTestVars {
def getSpark = SharedSpark.getSpark
}
| forchard-epi/demy | mllib/src/test/scala/UnitTest.scala | Scala | bsd-3-clause | 191 |
package models
import com.google.inject.Inject
import scala.concurrent.Future
class BeersRepository @Inject()(db: AppDatabase) {
def getByStyle(style: String): Future[Seq[Beer]] = {
db.beers.getByStyle(style)
}
def all: Future[Seq[Beer]] = db.beers.all()
}
| outworkers/phantom-activator-template | app/models/BeersRepository.scala | Scala | apache-2.0 | 272 |
/*
* Copyright 2020 ABSA Group Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package za.co.absa.spline.persistence
import org.apache.commons.lang3.StringUtils
object LogMessageUtils {
private val separatorLine = "=" * 80
def createQueryLogMessage(queryString: String): String = {
if (ArangoRepoConfig.Database.LogFullQueryOnError) {
s"""
|$separatorLine
|${queryString.trim}
|$separatorLine"""
.stripMargin
} else {
val fifth = queryString.length / 5
val truncatedQuery = StringUtils.truncate(queryString.trim, fifth * 2, fifth)
s"""
|$separatorLine
|...
|${truncatedQuery.trim}
|...
|$separatorLine"""
.stripMargin
}
}
}
| AbsaOSS/spline | persistence/src/main/scala/za/co/absa/spline/persistence/LogMessageUtils.scala | Scala | apache-2.0 | 1,281 |
import sbt._
import Keys._
import PlayProject._
object ApplicationBuild extends Build {
val appName = "to-ban"
val appVersion = "1.6.2"
val appDependencies = Seq(
// Add your project dependencies here,
"org.scalaz" %% "scalaz-core" % "6.0.4"
,"org.scala-tools.time" %% "time" % "0.5"
,"org.mockito" % "mockito-all" % "1.9.0" % "test"
)
val main = PlayProject(appName, appVersion, appDependencies, mainLang = SCALA).settings(
// Add your own project settings here
)
}
| nisshiee/to-ban | project/Build.scala | Scala | mit | 539 |
package lila.user
import org.specs2.mutable.Specification
class UserTest extends Specification {
def canSignup(str: User.ID) =
User.newUsernamePrefix.pattern.matcher(str).matches && User.newUsernameSuffix.pattern
.matcher(str)
.matches &&
User.newUsernameChars.pattern.matcher(str).matches &&
User.newUsernameLetters.pattern.matcher(str).matches
"username regex" in {
import User.couldBeUsername
"bad prefix: can login" in {
couldBeUsername("000") must beTrue
couldBeUsername("0foo") must beTrue
couldBeUsername("_foo") must beFalse
couldBeUsername("-foo") must beFalse
}
"bad prefix: cannot signup" in {
canSignup("000") must beFalse
canSignup("0foo") must beFalse
canSignup("_foo") must beFalse
canSignup("-foo") must beFalse
}
"bad suffix" in {
couldBeUsername("a_") must beFalse
couldBeUsername("a_") must beFalse
}
"too many consecutive non-letter chars" in {
canSignup("a_-a") must beFalse
canSignup("_-a") must beFalse
canSignup("a__a") must beFalse
canSignup("a_-a") must beFalse
canSignup("a--a") must beFalse
canSignup("a--_") must beFalse
couldBeUsername("a--a") must beTrue
canSignup("a-a") must beTrue
canSignup("a_a") must beTrue
canSignup("a333") must beTrue
canSignup("ksean222") must beTrue
canSignup("Ksean222") must beTrue
}
"OK things" in {
couldBeUsername("g-foo") must beTrue
couldBeUsername("G_FOo") must beTrue
couldBeUsername("g-foo") must beTrue
}
}
}
| luanlv/lila | modules/user/src/test/UserTest.scala | Scala | mit | 1,615 |
package org.tribbloid.spikystuff.spike.spark
import org.apache.spark.SparkContext.IntAccumulatorParam
import org.apache.spark.{SparkConf, SparkContext}
import scala.concurrent.{Await, Future}
object TestNonBlockingIteration {
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("TestNonBlockingIteration")
// conf.setMaster("local-cluster[2,4,1000]") //no can do! spark cannot find jars
conf.setMaster("local[8,3]")
conf.setSparkHome(System.getenv("SPARK_HOME"))
val sc = new SparkContext(conf)
val input = sc.parallelize(1 to 10)
val acc = sc.accumulator(-1)(IntAccumulatorParam)
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
val future = Future {
for (i <- 1 to 10) {
sc.broadcast(new AutoInsert(i))
Thread.sleep(1000)
println(i)
}
}
future.onSuccess{case u => println("success")}
val rdd1 = input.foreachPartition{
values => {
for (i<- 1 to 100) {
acc += WorkerContainer.last
println("feeding: " + WorkerContainer.last)
Thread.sleep(100)
}
}
}
Await.result(future, 100 seconds)
println(acc.value)
sc.stop()
println("finished")
}
}
object TestNonBlockingIterationSubmit {
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("TestNonBlockingIteration")
// conf.setMaster("local-cluster[2,4,1000]") //no can do! spark cannot find jars
val sc = new SparkContext(conf)
val input = sc.parallelize(1 to 10)
val acc = sc.accumulator(-1)(IntAccumulatorParam)
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
val future = Future {
for (i <- 1 to 10) {
sc.broadcast(new AutoInsert(i))
Thread.sleep(1000)
println(i)
}
}
future.onSuccess{case u => println("success")}
val rdd1 = input.foreachPartition{
values => {
for (i<- 1 to 100) {
acc += WorkerContainer.last
println("feeding: " + WorkerContainer.last)
Thread.sleep(100)
}
}
}
Await.result(future, 100 seconds)
println(acc.value)
sc.stop()
println("finished")
}
}
| tribbloid/spikystuff | core/src/main/scala/org/tribbloid/spikystuff/spike/spark/TestNonBlockingIteration.scala | Scala | mit | 2,304 |
/*
* ecalogic: a tool for performing energy consumption analysis.
*
* Copyright (c) 2013, J. Neutelings, D. Peelen, M. Schoolderman
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* Neither the name of the Radboud University Nijmegen nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package nl.ru.cs.ecalogic
package parser
/** Trait representing a token.
*
* @author Jascha Neutelings
*/
trait Token extends Pattern {
def matches(token: Token) = this == token
}
/** Abstract class representing a token with a fixed value and length.
*
* @author Jascha Neutelings
*/
abstract class FixedToken(fixedValue: String) extends Token {
override def toString = s"'$fixedValue'"
}
/** Abstract class representing a keyword token.
*
* @author Jascha Neutelings
*/
abstract class Keyword(val keyword: String) extends FixedToken(keyword)
/** Abstract class representing a token with a variable value and length.
*
* @author Jascha Neutelings
*/
abstract class VariableToken[T](name: String) extends Token {
def value: T
override def toString = s"'$value' ($name)"
}
| squell/ecalogic | src/main/scala/nl/ru/cs/ecalogic/parser/Token.scala | Scala | bsd-3-clause | 2,489 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import scala.collection.mutable.Map
import org.apache.spark.{LocalSparkContext, SparkConf, SparkContext, SparkFunSuite}
import org.apache.spark.executor.ExecutorMetrics
import org.apache.spark.resource.ResourceProfile
import org.apache.spark.scheduler.SchedulingMode.SchedulingMode
import org.apache.spark.storage.BlockManagerId
import org.apache.spark.util.AccumulatorV2
class ExternalClusterManagerSuite extends SparkFunSuite with LocalSparkContext {
test("launch of backend and scheduler") {
val conf = new SparkConf().setMaster("myclusterManager").setAppName("testcm")
sc = new SparkContext(conf)
// check if the scheduler components are created and initialized
sc.schedulerBackend match {
case dummy: DummySchedulerBackend => assert(dummy.initialized)
case other => fail(s"wrong scheduler backend: ${other}")
}
sc.taskScheduler match {
case dummy: DummyTaskScheduler => assert(dummy.initialized)
case other => fail(s"wrong task scheduler: ${other}")
}
}
}
/**
* Super basic ExternalClusterManager, just to verify ExternalClusterManagers can be configured.
*
* Note that if you want a special ClusterManager for tests, you are probably much more interested
* in [[MockExternalClusterManager]] and the corresponding [[SchedulerIntegrationSuite]]
*/
private class DummyExternalClusterManager extends ExternalClusterManager {
def canCreate(masterURL: String): Boolean = masterURL == "myclusterManager"
def createTaskScheduler(sc: SparkContext,
masterURL: String): TaskScheduler = new DummyTaskScheduler
def createSchedulerBackend(sc: SparkContext,
masterURL: String,
scheduler: TaskScheduler): SchedulerBackend = new DummySchedulerBackend()
def initialize(scheduler: TaskScheduler, backend: SchedulerBackend): Unit = {
scheduler.asInstanceOf[DummyTaskScheduler].initialized = true
backend.asInstanceOf[DummySchedulerBackend].initialized = true
}
}
private class DummySchedulerBackend extends SchedulerBackend {
var initialized = false
def start(): Unit = {}
def stop(): Unit = {}
def reviveOffers(): Unit = {}
def defaultParallelism(): Int = 1
def maxNumConcurrentTasks(rp: ResourceProfile): Int = 0
}
private class DummyTaskScheduler extends TaskScheduler {
var initialized = false
override def schedulingMode: SchedulingMode = SchedulingMode.FIFO
override def rootPool: Pool = new Pool("", schedulingMode, 0, 0)
override def start(): Unit = {}
override def stop(): Unit = {}
override def submitTasks(taskSet: TaskSet): Unit = {}
override def cancelTasks(stageId: Int, interruptThread: Boolean): Unit = {}
override def killTaskAttempt(
taskId: Long, interruptThread: Boolean, reason: String): Boolean = false
override def killAllTaskAttempts(
stageId: Int, interruptThread: Boolean, reason: String): Unit = {}
override def notifyPartitionCompletion(stageId: Int, partitionId: Int): Unit = {}
override def setDAGScheduler(dagScheduler: DAGScheduler): Unit = {}
override def defaultParallelism(): Int = 2
override def executorLost(executorId: String, reason: ExecutorLossReason): Unit = {}
override def workerRemoved(workerId: String, host: String, message: String): Unit = {}
override def applicationAttemptId(): Option[String] = None
def executorHeartbeatReceived(
execId: String,
accumUpdates: Array[(Long, Seq[AccumulatorV2[_, _]])],
blockManagerId: BlockManagerId,
executorMetrics: Map[(Int, Int), ExecutorMetrics]): Boolean = true
override def executorDecommission(
executorId: String,
decommissionInfo: ExecutorDecommissionInfo): Unit = {}
}
| dbtsai/spark | core/src/test/scala/org/apache/spark/scheduler/ExternalClusterManagerSuite.scala | Scala | apache-2.0 | 4,480 |
/*
* Copyright (c) 2014 - 2015 Contributor. All rights reserved.
*/
package org.scalaide.debug.internal.expression
package features
import org.junit.Test
import org.scalaide.debug.internal.expression.Names.Java
import org.scalaide.debug.internal.expression.TestValues.VariablesTestCase
trait AssignmentTest {
self: BaseIntegrationTest =>
protected def testAssignment(on: String, tpe: String, values: String*) = {
val (oldValue, oldType) = runCode(on)
try {
values.foreach { value =>
val (resultValue, _) = runCode(value)
runCode(s"$on = $value")
eval(code = on, expectedValue = resultValue, expectedType = tpe)
}
} finally {
// set old value for next tests
def haveQuotes(s: String) = s.startsWith("\\"") && s.endsWith("\\"")
if (oldType == Java.String && !haveQuotes(oldValue)) runCode(s"""$on = "$oldValue"""")
else runCode(s"$on = $oldValue")
}
}
}
class VarsTest extends BaseIntegrationTest(VarsTest) with AssignmentTest {
@Test
def testVariableAssignment(): Unit =
testAssignment("state.int", Java.primitives.int, values = "1", "2", "3")
@Test
def testLocalVariableAssignment(): Unit =
testAssignment("localString", Java.String, values = s("1"), s("2"), s("3"))
// TODO - O-8559 - This fails with 'InvalidStackFrameException' when you try to assign to local boxed primitive :(
@Test(expected = classOf[UnsupportedFeature])
def testLocalVariableAssignmentForBoxedPrimitives(): Unit =
testAssignment("localBoxedInt", Java.boxed.Integer,
values = "new java.lang.Integer(1)",
"new java.lang.Integer(2)",
"new java.lang.Integer(3)")
@Test
def testLocalVariableAssignmentForPrimitives(): Unit =
testAssignment("localInt", Java.primitives.int, values = "1", "2", "3")
@Test
def testFieldVariableAssignmentWithImplicitThis(): Unit =
testAssignment("fieldInt", Java.primitives.int, values = "1", "2", "3")
@Test
def testFieldVariableAssignmentWithExplicitThis(): Unit =
testAssignment("this.fieldInt", Java.primitives.int, values = "1", "2", "3")
@Test
def testAssignmentWithSameVariableOnLhsAndRhs(): Unit =
testAssignment("fieldInt", Java.primitives.int, "fieldInt + 1")
@Test
def testAssignmentWithTmpVariable(): Unit = {
runCode("val tmp = fieldInt + 1; fieldInt = tmp")
eval(code = "fieldInt", expectedValue = "2", expectedType = Java.primitives.int)
}
@Test
def testAssignmentWithVariableOnRhs(): Unit =
testAssignment("fieldInt", Java.primitives.int, "state.int + 1")
@Test
def testStringToStringAssignment(): Unit =
testAssignment("fieldString", Java.String, "anotherStringField")
@Test
def testSetterMethod(): Unit = {
runCode(s"state.int_=(1)")
eval("state.int", 1, Java.primitives.int)
runCode(s"state.int_=(123)")
eval("state.int", 123, Java.primitives.int)
}
@Test
def testVariableInExpression(): Unit =
eval("var a = 1; a = 2; a", 2, Java.primitives.int)
}
object VarsTest extends BaseIntegrationTestCompanion(VariablesTestCase)
| Kwestor/scala-ide | org.scala-ide.sdt.debug.expression.tests/src/org/scalaide/debug/internal/expression/features/VarsTest.scala | Scala | bsd-3-clause | 3,074 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle
import java.io._
import com.google.common.io.ByteStreams
import org.apache.spark.{SparkConf, SparkEnv}
import org.apache.spark.internal.Logging
import org.apache.spark.io.NioBufferedFileInputStream
import org.apache.spark.network.buffer.{FileSegmentManagedBuffer, ManagedBuffer}
import org.apache.spark.network.netty.SparkTransportConf
import org.apache.spark.shuffle.IndexShuffleBlockResolver.NOOP_REDUCE_ID
import org.apache.spark.storage._
import org.apache.spark.util.Utils
/**
* Create and maintain the shuffle blocks' mapping between logic block and physical file location.
* Data of shuffle blocks from the same map task are stored in a single consolidated data file.
* The offsets of the data blocks in the data file are stored in a separate index file.
*
* We use the name of the shuffle data's shuffleBlockId with reduce ID set to 0 and add ".data"
* as the filename postfix for data file, and ".index" as the filename postfix for index file.
*
*/
// Note: Changes to the format in this file should be kept in sync with
// org.apache.spark.network.shuffle.ExternalShuffleBlockResolver#getSortBasedShuffleBlockData().
private[spark] class IndexShuffleBlockResolver(
conf: SparkConf,
_blockManager: BlockManager = null)
extends ShuffleBlockResolver
with Logging {
private lazy val blockManager = Option(_blockManager).getOrElse(SparkEnv.get.blockManager)
private val transportConf = SparkTransportConf.fromSparkConf(conf, "shuffle")
def getDataFile(shuffleId: Int, mapId: Int): File = {
blockManager.diskBlockManager.getFile(ShuffleDataBlockId(shuffleId, mapId, NOOP_REDUCE_ID))
}
private def getIndexFile(shuffleId: Int, mapId: Int): File = {
blockManager.diskBlockManager.getFile(ShuffleIndexBlockId(shuffleId, mapId, NOOP_REDUCE_ID))
}
/**
* Remove data file and index file that contain the output data from one map.
* */
def removeDataByMap(shuffleId: Int, mapId: Int): Unit = {
var file = getDataFile(shuffleId, mapId)
if (file.exists()) {
if (!file.delete()) {
logWarning(s"Error deleting data ${file.getPath()}")
}
}
file = getIndexFile(shuffleId, mapId)
if (file.exists()) {
if (!file.delete()) {
logWarning(s"Error deleting index ${file.getPath()}")
}
}
}
/**
* Check whether the given index and data files match each other.
* If so, return the partition lengths in the data file. Otherwise return null.
*/
private def checkIndexAndDataFile(index: File, data: File, blocks: Int): Array[Long] = {
// the index file should have `block + 1` longs as offset.
if (index.length() != (blocks + 1) * 8) {
return null
}
val lengths = new Array[Long](blocks)
// Read the lengths of blocks
val in = try {
new DataInputStream(new NioBufferedFileInputStream(index))
} catch {
case e: IOException =>
return null
}
try {
// Convert the offsets into lengths of each block
var offset = in.readLong()
if (offset != 0L) {
return null
}
var i = 0
while (i < blocks) {
val off = in.readLong()
lengths(i) = off - offset
offset = off
i += 1
}
} catch {
case e: IOException =>
return null
} finally {
in.close()
}
// the size of data file should match with index file
if (data.length() == lengths.sum) {
lengths
} else {
null
}
}
/**
* Write an index file with the offsets of each block, plus a final offset at the end for the
* end of the output file. This will be used by getBlockData to figure out where each block
* begins and ends.
*
* It will commit the data and index file as an atomic operation, use the existing ones, or
* replace them with new ones.
*
* Note: the `lengths` will be updated to match the existing index file if use the existing ones.
* */
def writeIndexFileAndCommit(
shuffleId: Int,
mapId: Int,
lengths: Array[Long],
dataTmp: File): Unit = {
val indexFile = getIndexFile(shuffleId, mapId)
val indexTmp = Utils.tempFileWith(indexFile)
try {
val out = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(indexTmp)))
Utils.tryWithSafeFinally {
// We take in lengths of each block, need to convert it to offsets.
var offset = 0L
out.writeLong(offset)
for (length <- lengths) {
offset += length
out.writeLong(offset)
}
} {
out.close()
}
val dataFile = getDataFile(shuffleId, mapId)
// There is only one IndexShuffleBlockResolver per executor, this synchronization make sure
// the following check and rename are atomic.
synchronized {
val existingLengths = checkIndexAndDataFile(indexFile, dataFile, lengths.length)
if (existingLengths != null) {
// Another attempt for the same task has already written our map outputs successfully,
// so just use the existing partition lengths and delete our temporary map outputs.
System.arraycopy(existingLengths, 0, lengths, 0, lengths.length)
if (dataTmp != null && dataTmp.exists()) {
dataTmp.delete()
}
indexTmp.delete()
} else {
// This is the first successful attempt in writing the map outputs for this task,
// so override any existing index and data files with the ones we wrote.
if (indexFile.exists()) {
indexFile.delete()
}
if (dataFile.exists()) {
dataFile.delete()
}
if (!indexTmp.renameTo(indexFile)) {
throw new IOException("fail to rename file " + indexTmp + " to " + indexFile)
}
if (dataTmp != null && dataTmp.exists() && !dataTmp.renameTo(dataFile)) {
throw new IOException("fail to rename file " + dataTmp + " to " + dataFile)
}
}
}
} finally {
if (indexTmp.exists() && !indexTmp.delete()) {
logError(s"Failed to delete temporary index file at ${indexTmp.getAbsolutePath}")
}
}
}
override def getBlockData(blockId: ShuffleBlockId): ManagedBuffer = {
// The block is actually going to be a range of a single map output file for this map, so
// find out the consolidated file, then the offset within that from our index
val indexFile = getIndexFile(blockId.shuffleId, blockId.mapId)
val in = new DataInputStream(new FileInputStream(indexFile))
try {
ByteStreams.skipFully(in, blockId.reduceId * 8)
val offset = in.readLong()
val nextOffset = in.readLong()
new FileSegmentManagedBuffer(
transportConf,
getDataFile(blockId.shuffleId, blockId.mapId),
offset,
nextOffset - offset)
} finally {
in.close()
}
}
override def stop(): Unit = {}
}
private[spark] object IndexShuffleBlockResolver {
// No-op reduce ID used in interactions with disk store.
// The disk store currently expects puts to relate to a (map, reduce) pair, but in the sort
// shuffle outputs for several reduces are glommed into a single file.
val NOOP_REDUCE_ID = 0
}
| sh-cho/cshSpark | shuffle/IndexShuffleBlockResolver.scala | Scala | apache-2.0 | 8,081 |
package org.json4s
package ext
class JacksonJodaTimeSerializerSpec extends JodaTimeSerializerSpec("Jackson") {
val s: Serialization = jackson.Serialization
val m: JsonMethods[_] = jackson.JsonMethods
}
| json4s/json4s | tests/src/test/scala/org/json4s/ext/JacksonJodaTimeSerializerSpec.scala | Scala | apache-2.0 | 207 |
package ch.epfl.performanceNetwork.gitInterface
import scala.annotation.tailrec
import scala.collection.JavaConverters._
import scala.runtime.ZippedTraversable3.zippedTraversable3ToTraversable
import org.eclipse.jgit.api.ListBranchCommand.ListMode
import org.eclipse.jgit.revwalk.RevCommit
/**
* @author Thibault Urien
*
*/
object NetworkDownloader {
def apply(repoUrl: String, workingDir: String, repoDir: String) = {
/*
* @param commits
* take commit 0, set its vertical index to 0, propagate
* recursively this index to the first parent.
* take commit 1, if it already have a vertical index, go to next commit,
* if not propagate recursively this index to ...
*/
def uncoilNetwork(commits: Seq[RevCommit]): Seq[Int] = {
var resultMap = Map[RevCommit, Int]()
@tailrec
def recPropateToFirstParent(fromNy: (RevCommit, Int)): Unit =
if (!resultMap.contains(fromNy._1)) {
resultMap += fromNy
fromNy._1.getParents.headOption match {
case None =>
case Some(next) => recPropateToFirstParent(next, fromNy._2)
}
}
commits.zipWithIndex foreach recPropateToFirstParent
val exaustiveMap = resultMap.withDefault { x => 0 }
commits map exaustiveMap
}
val git = RepoData.loadRepo(repoUrl, workingDir + repoDir)
val commits = git.log().call().asScala.toSeq
val indexesCommits = commits.zipWithIndex
val comMap = indexesCommits.map { case (c, i) => c.getName -> i }(collection.breakOut): Map[String, Int]
val edgeList = indexesCommits.flatMap {
case (c, i) =>
c.getParents.map(x => (comMap(x.getName), i))
}
val yPoses = uncoilNetwork(commits)
(
new CommitPrinter(
(commits, yPoses).zipped.toList),
new EdgePrinter(edgeList))
}
} | ThibaultUrien/SemesterProject | jvm/src/main/scala/ch/epfl/performanceNetwork/gitInterface/NetworkDownloader.scala | Scala | bsd-3-clause | 1,855 |
package com.bwsw.commitlog.filesystem
object FilePathManager {
val DATAEXTENSION = ".dat"
val MD5EXTENSION = ".md5"
} | bwsw/tstreams-transaction-server | src/main/scala/com/bwsw/commitlog/filesystem/FilePathManager.scala | Scala | apache-2.0 | 122 |
package scalaopts
import scala.volatile
import scala.collection.mutable
/**
*/
case class CommandLineOptionResults(val results: CommandLineOptionResultsMap, val errors: CommandLineOptionParseErrors)
//class CommandLineOptionResults[TKey >: CommandLineOptionResultKey, +TValue >: CommandLineOptionResultValue](
// private[this] val results: CommandLineOptionResultsMap,
// @volatile private[this] var invalidOptions: mutable.ListBuffer[String] = new mutable.ListBuffer[String]
//) {
//
// private[scalaopts] def invalidOption(name: String): Unit = invalidOptions += name
//
// def apply(key: TKey): TValue = results(key.asInstanceOf[CommandLineOptionResultKey])
// def getOrElse[T >: TValue](key: TKey, default: => T): TValue = results.get(key.asInstanceOf[CommandLineOptionResultKey]) match {
// case Some(v) => v
// case None => default.asInstanceOf[CommandLineOptionResultValue]
// }
//
// def updated[T >: TValue](key: TKey, value: T) =
// new CommandLineOptionResults(results.updated(key.asInstanceOf[CommandLineOptionResultKey], value).asInstanceOf[CommandLineOptionResultsMap], invalidOptions)
//}
| davidhoyt/scalaopts | src/main/scala/scalaopts/CommandLineOptionResults.scala | Scala | apache-2.0 | 1,124 |
/**
* Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
* See accompanying LICENSE file.
*/
package kafka.manager
import java.util.Properties
import java.util.concurrent.{LinkedBlockingQueue, TimeUnit, ThreadPoolExecutor}
import akka.actor.{ActorPath, ActorSystem, Props}
import akka.util.Timeout
import com.typesafe.config.{ConfigFactory, Config}
import kafka.manager.ActorModel._
import org.slf4j.{LoggerFactory, Logger}
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration._
import scala.reflect.ClassTag
import scala.util.{Success, Failure, Try}
/**
* @author hiral
*/
case class TopicListExtended(list: IndexedSeq[(String, Option[TopicIdentity])],
topicToConsumerMap: Map[String, Iterable[String]],
deleteSet: Set[String],
underReassignments: IndexedSeq[String],
clusterContext: ClusterContext)
case class BrokerListExtended(list: IndexedSeq[BrokerIdentity],
metrics: Map[Int,BrokerMetrics],
combinedMetric: Option[BrokerMetrics],
clusterContext: ClusterContext)
case class ConsumerListExtended(list: IndexedSeq[(String, Option[ConsumerIdentity])], clusterContext: ClusterContext)
case class LogkafkaListExtended(list: IndexedSeq[(String, Option[LogkafkaIdentity])], deleteSet: Set[String])
case class ApiError(msg: String)
object ApiError {
private[this] val log : Logger = LoggerFactory.getLogger(classOf[ApiError])
implicit def fromThrowable(t: Throwable) : ApiError = {
log.error(s"error : ${t.getMessage}", t)
ApiError(t.getMessage)
}
implicit def from(actorError: ActorErrorResponse): ApiError = {
actorError.throwableOption.foreach { t =>
log.error(s"Actor error : ${actorError.msg}", t)
}
ApiError(actorError.msg)
}
}
object KafkaManager {
val BaseZkPath = "kafka-manager.base-zk-path"
val PinnedDispatchName = "kafka-manager.pinned-dispatcher-name"
val ZkHosts = "kafka-manager.zkhosts"
val BrokerViewUpdateSeconds = "kafka-manager.broker-view-update-seconds"
val KafkaManagerUpdateSeconds = "kafka-manager.kafka-manager-update-seconds"
val DeleteClusterUpdateSeconds = "kafka-manager.delete-cluster-update-seconds"
val DeletionBatchSize = "kafka-manager.deletion-batch-size"
val MaxQueueSize = "kafka-manager.max-queue-size"
val ThreadPoolSize = "kafka-manager.thread-pool-size"
val MutexTimeoutMillis = "kafka-manager.mutex-timeout-millis"
val StartDelayMillis = "kafka-manager.start-delay-millis"
val ApiTimeoutMillis = "kafka-manager.api-timeout-millis"
val ClusterActorsAskTimeoutMillis = "kafka-manager.cluster-actors-ask-timeout-millis"
val PartitionOffsetCacheTimeoutSecs = "kafka-manager.partition-offset-cache-timeout-secs"
val SimpleConsumerSocketTimeoutMillis = "kafka-manager.simple-consumer-socket-timeout-millis"
val DefaultConfig: Config = {
val defaults: Map[String, _ <: AnyRef] = Map(
BaseZkPath -> KafkaManagerActor.ZkRoot,
PinnedDispatchName -> "pinned-dispatcher",
BrokerViewUpdateSeconds -> "10",
KafkaManagerUpdateSeconds -> "10",
DeleteClusterUpdateSeconds -> "10",
DeletionBatchSize -> "2",
MaxQueueSize -> "100",
ThreadPoolSize -> "2",
MutexTimeoutMillis -> "4000",
StartDelayMillis -> "1000",
ApiTimeoutMillis -> "5000",
ClusterActorsAskTimeoutMillis -> "2000",
PartitionOffsetCacheTimeoutSecs -> "5",
SimpleConsumerSocketTimeoutMillis -> "10000"
)
import scala.collection.JavaConverters._
ConfigFactory.parseMap(defaults.asJava)
}
}
import KafkaManager._
import akka.pattern._
import scalaz.{-\\/, \\/, \\/-}
class KafkaManager(akkaConfig: Config)
{
private[this] val system = ActorSystem("kafka-manager-system", akkaConfig)
private[this] val configWithDefaults = akkaConfig.withFallback(DefaultConfig)
private[this] val kafkaManagerConfig = {
val curatorConfig = CuratorConfig(configWithDefaults.getString(ZkHosts))
KafkaManagerActorConfig(
curatorConfig = curatorConfig,
baseZkPath = configWithDefaults.getString(BaseZkPath),
pinnedDispatcherName = configWithDefaults.getString(PinnedDispatchName),
brokerViewUpdatePeriod = FiniteDuration(configWithDefaults.getInt(BrokerViewUpdateSeconds), SECONDS),
startDelayMillis = configWithDefaults.getLong(StartDelayMillis),
threadPoolSize = configWithDefaults.getInt(ThreadPoolSize),
mutexTimeoutMillis = configWithDefaults.getInt(MutexTimeoutMillis),
maxQueueSize = configWithDefaults.getInt(MaxQueueSize),
kafkaManagerUpdatePeriod = FiniteDuration(configWithDefaults.getInt(KafkaManagerUpdateSeconds), SECONDS),
deleteClusterUpdatePeriod = FiniteDuration(configWithDefaults.getInt(DeleteClusterUpdateSeconds), SECONDS),
deletionBatchSize = configWithDefaults.getInt(DeletionBatchSize),
clusterActorsAskTimeoutMillis = configWithDefaults.getInt(ClusterActorsAskTimeoutMillis),
partitionOffsetCacheTimeoutSecs = configWithDefaults.getInt(PartitionOffsetCacheTimeoutSecs),
simpleConsumerSocketTimeoutMillis = configWithDefaults.getInt(SimpleConsumerSocketTimeoutMillis)
)
}
private[this] val props = Props(classOf[KafkaManagerActor], kafkaManagerConfig)
private[this] val kafkaManagerActor: ActorPath = system.actorOf(props, "kafka-manager").path
private[this] val apiExecutor = new ThreadPoolExecutor(
kafkaManagerConfig.threadPoolSize,
kafkaManagerConfig.threadPoolSize,
0L,
TimeUnit.MILLISECONDS,
new LinkedBlockingQueue[Runnable](kafkaManagerConfig.maxQueueSize)
)
private[this] val apiExecutionContext = ExecutionContext.fromExecutor(apiExecutor)
private[this] implicit val apiTimeout: Timeout = FiniteDuration(
configWithDefaults.getInt(ApiTimeoutMillis),
MILLISECONDS
)
private[this] def tryWithKafkaManagerActor[Input, Output, FOutput](msg: Input)
(fn: Output => FOutput)
(implicit tag: ClassTag[Output]): Future[ApiError \\/ FOutput] =
{
implicit val ec = apiExecutionContext
system.actorSelection(kafkaManagerActor).ask(msg).map {
case err: ActorErrorResponse => -\\/(ApiError.from(err))
case o: Output =>
Try {
fn(o)
} match {
case Failure(t) => -\\/(ApiError.fromThrowable(t))
case Success(foutput) => \\/-(foutput)
}
}.recover { case t: Throwable =>
-\\/(ApiError.fromThrowable(t))
}
}
private[this] def withKafkaManagerActor[Input, Output, FOutput](msg: Input)
(fn: Output => Future[ApiError \\/ FOutput])
(implicit tag: ClassTag[Output]): Future[ApiError \\/ FOutput] =
{
implicit val ec = apiExecutionContext
system.actorSelection(kafkaManagerActor).ask(msg).flatMap {
case err: ActorErrorResponse => Future.successful(-\\/(ApiError.from(err)))
case o: Output =>
fn(o)
}.recover { case t: Throwable =>
-\\/(ApiError.fromThrowable(t))
}
}
private[this] def toDisjunction[T](t: Try[T]): ApiError \\/ T = {
t match {
case Failure(th) =>
-\\/(th)
case Success(tInst) =>
\\/-(tInst)
}
}
def shutdown(): Unit = {
implicit val ec = apiExecutionContext
system.actorSelection(kafkaManagerActor).tell(KMShutdown, system.deadLetters)
system.shutdown()
apiExecutor.shutdown()
}
//--------------------Commands--------------------------
def addCluster(clusterName: String,
version: String,
zkHosts: String,
jmxEnabled: Boolean,
filterConsumers: Boolean,
logkafkaEnabled: Boolean = false,
activeOffsetCacheEnabled: Boolean = false): Future[ApiError \\/
Unit] =
{
val cc = ClusterConfig(
clusterName,
version,
zkHosts,
jmxEnabled = jmxEnabled,
filterConsumers = filterConsumers,
logkafkaEnabled = logkafkaEnabled,
activeOffsetCacheEnabled = activeOffsetCacheEnabled)
tryWithKafkaManagerActor(KMAddCluster(cc)) { result: KMCommandResult =>
result.result.get
}
}
def updateCluster(clusterName: String, version: String, zkHosts: String, jmxEnabled: Boolean, filterConsumers: Boolean, logkafkaEnabled: Boolean = false, activeOffsetCacheEnabled: Boolean = false): Future[ApiError \\/
Unit] =
{
val cc = ClusterConfig(
clusterName,
version,
zkHosts,
jmxEnabled = jmxEnabled,
filterConsumers = filterConsumers,
logkafkaEnabled = logkafkaEnabled,
activeOffsetCacheEnabled = activeOffsetCacheEnabled)
tryWithKafkaManagerActor(KMUpdateCluster(cc)) { result: KMCommandResult =>
result.result.get
}
}
def disableCluster(clusterName: String): Future[ApiError \\/ Unit] = {
tryWithKafkaManagerActor(KMDisableCluster(clusterName)) { result: KMCommandResult =>
result.result.get
}
}
def enableCluster(clusterName: String): Future[ApiError \\/ Unit] = {
tryWithKafkaManagerActor(KMEnableCluster(clusterName)) { result: KMCommandResult =>
result.result.get
}
}
def deleteCluster(clusterName: String): Future[ApiError \\/ Unit] = {
tryWithKafkaManagerActor(KMDeleteCluster(clusterName)) { result: KMCommandResult =>
result.result.get
}
}
def runPreferredLeaderElection(clusterName: String, topics: Set[String]): Future[ApiError \\/ ClusterContext] = {
implicit val ec = apiExecutionContext
withKafkaManagerActor(
KMClusterCommandRequest(
clusterName,
CMRunPreferredLeaderElection(topics)
)
) { result: Future[CMCommandResult] =>
result.map(cmr => toDisjunction(cmr.result))
}
}
def manualPartitionAssignments( clusterName: String,
assignments: List[(String, List[(Int, List[Int])])]) = {
implicit val ec = apiExecutionContext
val results = tryWithKafkaManagerActor(
KMClusterCommandRequest (
clusterName,
CMManualPartitionAssignments(assignments)
)
) { result: CMCommandResults =>
val errors = result.result.collect { case Failure(t) => ApiError(t.getMessage)}
if (errors.isEmpty)
\\/-({})
else
-\\/(errors)
}
results.map {
case -\\/(e) => -\\/(IndexedSeq(e))
case \\/-(lst) => lst
}
}
def generatePartitionAssignments(
clusterName: String,
topics: Set[String],
brokers: Seq[Int]
): Future[IndexedSeq[ApiError] \\/ Unit] =
{
val results = tryWithKafkaManagerActor(
KMClusterCommandRequest(
clusterName,
CMGeneratePartitionAssignments(topics, brokers)
)
) { result: CMCommandResults =>
val errors = result.result.collect { case Failure(t) => ApiError(t.getMessage)}
if (errors.isEmpty)
\\/-({})
else
-\\/(errors)
}
implicit val ec = apiExecutionContext
results.map {
case -\\/(e) => -\\/(IndexedSeq(e))
case \\/-(lst) => lst
}
}
def runReassignPartitions(clusterName: String, topics: Set[String]): Future[IndexedSeq[ApiError] \\/ Unit] = {
implicit val ec = apiExecutionContext
val results = tryWithKafkaManagerActor(KMClusterCommandRequest(clusterName, CMRunReassignPartition(topics))) {
resultFuture: Future[CMCommandResults] =>
resultFuture map { result =>
val errors = result.result.collect { case Failure(t) => ApiError(t.getMessage)}
if (errors.isEmpty)
\\/-({})
else
-\\/(errors)
}
}
results.flatMap {
case \\/-(lst) => lst
case -\\/(e) => Future.successful(-\\/(IndexedSeq(e)))
}
}
def createTopic(
clusterName: String,
topic: String,
partitions: Int,
replication: Int,
config: Properties = new Properties
): Future[ApiError \\/ ClusterContext] =
{
implicit val ec = apiExecutionContext
withKafkaManagerActor(KMClusterCommandRequest(clusterName, CMCreateTopic(topic, partitions, replication, config))) {
result: Future[CMCommandResult] =>
result.map(cmr => toDisjunction(cmr.result))
}
}
def addTopicPartitions(
clusterName: String,
topic: String,
brokers: Seq[Int],
partitions: Int,
readVersion: Int
): Future[ApiError \\/ ClusterContext] =
{
implicit val ec = apiExecutionContext
getTopicIdentity(clusterName, topic).flatMap { topicIdentityOrError =>
topicIdentityOrError.fold(
e => Future.successful(-\\/(e)), { ti =>
val partitionReplicaList: Map[Int, Seq[Int]] = ti.partitionsIdentity.mapValues(_.replicas)
withKafkaManagerActor(
KMClusterCommandRequest(
clusterName,
CMAddTopicPartitions(topic, brokers, partitions, partitionReplicaList, readVersion)
)
) {
result: Future[CMCommandResult] =>
result.map(cmr => toDisjunction(cmr.result))
}
}
)
}
}
def addMultipleTopicsPartitions(
clusterName: String,
topics: Seq[String],
brokers: Seq[Int],
partitions: Int,
readVersions: Map[String, Int]
): Future[ApiError \\/ ClusterContext] =
{
implicit val ec = apiExecutionContext
getTopicListExtended(clusterName).flatMap { tleOrError =>
tleOrError.fold(
e => Future.successful(-\\/(e)), { tle =>
// add partitions to only topics with topic identity
val topicsAndReplicas = topicListSortedByNumPartitions(tle).filter(t => topics.contains(t._1) && t._2.nonEmpty).map{ case (t,i) => (t, i.get.partitionsIdentity.mapValues(_.replicas)) }
withKafkaManagerActor(
KMClusterCommandRequest(
clusterName,
CMAddMultipleTopicsPartitions(topicsAndReplicas, brokers, partitions, readVersions)
)
) {
result: Future[CMCommandResult] =>
result.map(cmr => toDisjunction(cmr.result))
}
}
)
}
}
def updateTopicConfig(
clusterName: String,
topic: String,
config: Properties,
readVersion: Int
): Future[ApiError \\/ ClusterContext] =
{
implicit val ec = apiExecutionContext
withKafkaManagerActor(
KMClusterCommandRequest(
clusterName,
CMUpdateTopicConfig(topic, config, readVersion)
)
) {
result: Future[CMCommandResult] =>
result.map(cmr => toDisjunction(cmr.result))
}
}
def deleteTopic(
clusterName: String,
topic: String
): Future[ApiError \\/ ClusterContext] =
{
implicit val ec = apiExecutionContext
withKafkaManagerActor(KMClusterCommandRequest(clusterName, CMDeleteTopic(topic))) {
result: Future[CMCommandResult] =>
result.map(cmr => toDisjunction(cmr.result))
}
}
def createLogkafka(
clusterName: String,
hostname: String,
log_path: String,
config: Properties = new Properties
): Future[ApiError \\/ ClusterContext] =
{
implicit val ec = apiExecutionContext
withKafkaManagerActor(KMClusterCommandRequest(clusterName, CMCreateLogkafka(hostname, log_path, config))) {
result: Future[CMCommandResult] =>
result.map(cmr => toDisjunction(cmr.result))
}
}
def updateLogkafkaConfig(
clusterName: String,
hostname: String,
log_path: String,
config: Properties
): Future[ApiError \\/ ClusterContext] =
{
implicit val ec = apiExecutionContext
withKafkaManagerActor(
KMClusterCommandRequest(
clusterName,
CMUpdateLogkafkaConfig(hostname, log_path, config)
)
) {
result: Future[CMCommandResult] =>
result.map(cmr => toDisjunction(cmr.result))
}
}
def deleteLogkafka(
clusterName: String,
hostname: String,
log_path: String
): Future[ApiError \\/ ClusterContext] =
{
implicit val ec = apiExecutionContext
withKafkaManagerActor(KMClusterCommandRequest(clusterName, CMDeleteLogkafka(hostname, log_path))) {
result: Future[CMCommandResult] =>
result.map(cmr => toDisjunction(cmr.result))
}
}
//--------------------Queries--------------------------
def getClusterConfig(clusterName: String): Future[ApiError \\/ ClusterConfig] = {
tryWithKafkaManagerActor(KMGetClusterConfig(clusterName)) { result: KMClusterConfigResult =>
result.result.get
}
}
def getClusterContext(clusterName: String): Future[ApiError \\/ ClusterContext] = {
tryWithKafkaManagerActor(KMClusterQueryRequest(clusterName, CMGetClusterContext))(
identity[ClusterContext]
)
}
def getClusterList: Future[ApiError \\/ KMClusterList] = {
tryWithKafkaManagerActor(KMGetAllClusters)(identity[KMClusterList])
}
def getClusterView(clusterName: String): Future[ApiError \\/ CMView] = {
tryWithKafkaManagerActor(KMClusterQueryRequest(clusterName, CMGetView))(identity[CMView])
}
def getTopicList(clusterName: String): Future[ApiError \\/ TopicList] = {
tryWithKafkaManagerActor(KMClusterQueryRequest(clusterName, KSGetTopics))(identity[TopicList])
}
def getTopicListExtended(clusterName: String): Future[ApiError \\/ TopicListExtended] = {
val futureTopicIdentities = tryWithKafkaManagerActor(KMClusterQueryRequest(clusterName, BVGetTopicIdentities))(
identity[Map[String, TopicIdentity]])
val futureTopicList = tryWithKafkaManagerActor(KMClusterQueryRequest(clusterName, KSGetTopics))(identity[TopicList])
val futureTopicToConsumerMap = tryWithKafkaManagerActor(KMClusterQueryRequest(clusterName, BVGetTopicConsumerMap))(
identity[Map[String, Iterable[String]]])
val futureTopicsReasgn = getTopicsUnderReassignment(clusterName)
implicit val ec = apiExecutionContext
for {
errOrTi <- futureTopicIdentities
errOrTl <- futureTopicList
errOrTCm <- futureTopicToConsumerMap
errOrRap <- futureTopicsReasgn
} yield {
for {
ti <- errOrTi
tl <- errOrTl
tcm <- errOrTCm
rap <- errOrRap
} yield {
TopicListExtended(tl.list.map(t => (t, ti.get(t))).sortBy(_._1), tcm, tl.deleteSet, rap, tl.clusterContext)
}
}
}
def getConsumerListExtended(clusterName: String): Future[ApiError \\/ ConsumerListExtended] = {
val futureConsumerIdentities = tryWithKafkaManagerActor(KMClusterQueryRequest(clusterName, BVGetConsumerIdentities))(
identity[Map[String, ConsumerIdentity]])
val futureConsumerList = tryWithKafkaManagerActor(KMClusterQueryRequest(clusterName, KSGetConsumers))(identity[ConsumerList])
implicit val ec = apiExecutionContext
for {
errorOrCI <- futureConsumerIdentities
errorOrCL <- futureConsumerList
} yield {
for {
ci <- errorOrCI
cl <- errorOrCL
} yield {
ConsumerListExtended(cl.list.map(c => (c, ci.get(c))), cl.clusterContext)
}
}
}
def getTopicsUnderReassignment(clusterName: String): Future[ApiError \\/ IndexedSeq[String]] = {
val futureReassignments = getReassignPartitions(clusterName)
implicit val ec = apiExecutionContext
futureReassignments.map {
case -\\/(e) => -\\/(e)
case \\/-(rap) =>
\\/-(rap.map { asgn =>
asgn.endTime.map(_ => IndexedSeq()).getOrElse{
asgn.partitionsToBeReassigned.map { case (t, s) => t.topic}.toSet.toIndexedSeq
}
}.getOrElse{IndexedSeq()})
}
}
def getBrokerList(clusterName: String): Future[ApiError \\/ BrokerListExtended] = {
val futureBrokerList= tryWithKafkaManagerActor(KMClusterQueryRequest(clusterName, KSGetBrokers))(identity[BrokerList])
val futureBrokerMetrics = tryWithKafkaManagerActor(KMClusterQueryRequest(clusterName, BVGetBrokerMetrics))(identity[Map[Int,BrokerMetrics]])
implicit val ec = apiExecutionContext
futureBrokerList.flatMap[ApiError \\/ BrokerListExtended] { errOrBrokerList =>
errOrBrokerList.fold ({
err: ApiError => Future.successful(-\\/(err))
}, { bl =>
for {
errOrbm <- futureBrokerMetrics.recover[ApiError \\/ Map[Int,BrokerMetrics]] { case t =>
\\/-(Map.empty)
}
} yield {
val bm = errOrbm.toOption.getOrElse(Map.empty)
\\/-(
BrokerListExtended(
bl.list,
bm,
if(bm.isEmpty) None else Option(bm.values.foldLeft(BrokerMetrics.DEFAULT)((acc, m) => acc + m)),
bl.clusterContext
))
}
})
}
}
def getBrokersView(clusterName: String): Future[\\/[ApiError, Map[Int, BVView]]] = {
implicit val ec = apiExecutionContext
tryWithKafkaManagerActor(
KMClusterQueryRequest(
clusterName,
BVGetViews
)
)(identity[Map[Int, BVView]])
}
def getBrokerView(clusterName: String, brokerId: Int): Future[ApiError \\/ BVView] = {
val futureView = tryWithKafkaManagerActor(
KMClusterQueryRequest(
clusterName,
BVGetView(brokerId)
)
)(identity[Option[BVView]])
implicit val ec = apiExecutionContext
futureView.flatMap[ApiError \\/ BVView] { errOrView =>
errOrView.fold(
{ err: ApiError =>
Future.successful(-\\/[ApiError](err))
}, { viewOption: Option[BVView] =>
viewOption.fold {
Future.successful[ApiError \\/ BVView](-\\/(ApiError(s"Broker not found $brokerId for cluster $clusterName")))
} { view =>
Future.successful(\\/-(view))
}
}
)
}
}
def getTopicIdentity(clusterName: String, topic: String): Future[ApiError \\/ TopicIdentity] = {
val futureCMTopicIdentity = tryWithKafkaManagerActor(KMClusterQueryRequest(clusterName, CMGetTopicIdentity(topic)))(
identity[Option[CMTopicIdentity]]
)
implicit val ec = apiExecutionContext
futureCMTopicIdentity.map[ApiError \\/ TopicIdentity] { errOrTD =>
errOrTD.fold[ApiError \\/ TopicIdentity](
{ err: ApiError =>
-\\/[ApiError](err)
}, { tiOption: Option[CMTopicIdentity] =>
tiOption.fold[ApiError \\/ TopicIdentity] {
-\\/(ApiError(s"Topic not found $topic for cluster $clusterName"))
} { cmTopicIdentity =>
cmTopicIdentity.topicIdentity match {
case scala.util.Failure(t) =>
-\\/[ApiError](t)
case scala.util.Success(ti) =>
\\/-(ti)
}
}
}
)
}
}
def getConsumersForTopic(clusterName: String, topic: String): Future[Option[Iterable[String]]] = {
val futureTopicConsumerMap = tryWithKafkaManagerActor(KMClusterQueryRequest(clusterName, BVGetTopicConsumerMap))(
identity[Map[String, Iterable[String]]])
implicit val ec = apiExecutionContext
futureTopicConsumerMap.map[Option[Iterable[String]]] { errOrTCM =>
errOrTCM.fold[Option[Iterable[String]]] (_ => None, _.get(topic))
}
}
def getConsumerIdentity(clusterName: String, consumer: String): Future[ApiError \\/ ConsumerIdentity] = {
val futureCMConsumerIdentity = tryWithKafkaManagerActor(KMClusterQueryRequest(clusterName, CMGetConsumerIdentity(consumer)))(
identity[Option[CMConsumerIdentity]]
)
implicit val ec = apiExecutionContext
futureCMConsumerIdentity.map[ApiError \\/ ConsumerIdentity] { errOrCI =>
errOrCI.fold[ApiError \\/ ConsumerIdentity](
{ err: ApiError =>
-\\/[ApiError](err)
}, { ciOption: Option[CMConsumerIdentity] =>
ciOption.fold[ApiError \\/ ConsumerIdentity] {
-\\/(ApiError(s"Consumer not found $consumer for cluster $clusterName"))
} { cmConsumerIdentity =>
cmConsumerIdentity.consumerIdentity match {
case scala.util.Failure(c) =>
-\\/[ApiError](c)
case scala.util.Success(ci) =>
\\/-(ci)
}
}
}
)
}
}
def getConsumedTopicState(clusterName: String, consumer: String, topic: String): Future[ApiError \\/ ConsumedTopicState] = {
val futureCMConsumedTopic = tryWithKafkaManagerActor(KMClusterQueryRequest(clusterName, CMGetConsumedTopicState(consumer,topic)))(
identity[CMConsumedTopic]
)
implicit val ec = apiExecutionContext
futureCMConsumedTopic.map[ApiError \\/ ConsumedTopicState] { errOrCT =>
errOrCT.fold[ApiError \\/ ConsumedTopicState](
{ err: ApiError =>
-\\/[ApiError](err)
}, { cmConsumedTopic: CMConsumedTopic =>
cmConsumedTopic.ctIdentity match {
case scala.util.Failure(c) =>
-\\/[ApiError](c)
case scala.util.Success(ci) =>
\\/-(ci)
}
}
)
}
}
def getTopicMetrics(clusterName: String, topic: String): Future[ApiError \\/ Option[BrokerMetrics]] = {
tryWithKafkaManagerActor(
KMClusterQueryRequest(
clusterName,
BVGetTopicMetrics(topic)
)
) { brokerMetrics: Option[BrokerMetrics] =>
brokerMetrics
}
}
def getPreferredLeaderElection(clusterName: String): Future[ApiError \\/ Option[PreferredReplicaElection]] = {
tryWithKafkaManagerActor(
KMClusterQueryRequest(
clusterName,
KSGetPreferredLeaderElection
)
)(identity[Option[PreferredReplicaElection]])
}
def getReassignPartitions(clusterName: String): Future[ApiError \\/ Option[ReassignPartitions]] = {
tryWithKafkaManagerActor(
KMClusterQueryRequest(
clusterName,
KSGetReassignPartition
)
)(identity[Option[ReassignPartitions]])
}
def topicListSortedByNumPartitions(tle: TopicListExtended): Seq[(String, Option[TopicIdentity])] = {
def partition(tiOption: Option[TopicIdentity]): Int = {
tiOption match {
case Some(ti) => ti.partitions
case None => 0
}
}
val sortedByNumPartition = tle.list.sortWith{ (leftE, rightE) =>
partition(leftE._2) > partition(rightE._2)
}
sortedByNumPartition
}
def getLogkafkaHostnameList(clusterName: String): Future[ApiError \\/ LogkafkaHostnameList] = {
tryWithKafkaManagerActor(KMClusterQueryRequest(clusterName, LKSGetLogkafkaHostnames))(identity[LogkafkaHostnameList])
}
def getLogkafkaListExtended(clusterName: String): Future[ApiError \\/ LogkafkaListExtended] = {
val futureLogkafkaIdentities = tryWithKafkaManagerActor(KMClusterQueryRequest(clusterName, LKVGetLogkafkaIdentities))(identity[Map[String, LogkafkaIdentity]])
val futureLogkafkaList = tryWithKafkaManagerActor(KMClusterQueryRequest(clusterName, LKSGetLogkafkaHostnames))(identity[LogkafkaHostnameList])
implicit val ec = apiExecutionContext
for {
errOrLi <- futureLogkafkaIdentities
errOrLl <- futureLogkafkaList
} yield {
for {
li <- errOrLi
ll <- errOrLl
} yield {
LogkafkaListExtended(ll.list.map(l => (l, li.get(l))), ll.deleteSet)
}
}
}
def getLogkafkaIdentity(clusterName: String, hostname: String): Future[ApiError \\/ LogkafkaIdentity] = {
val futureCMLogkafkaIdentity = tryWithKafkaManagerActor(KMClusterQueryRequest(clusterName, CMGetLogkafkaIdentity(hostname)))(
identity[Option[CMLogkafkaIdentity]]
)
implicit val ec = apiExecutionContext
futureCMLogkafkaIdentity.map[ApiError \\/ LogkafkaIdentity] { errOrLI =>
errOrLI.fold[ApiError \\/ LogkafkaIdentity](
{ err: ApiError =>
-\\/[ApiError](err)
}, { liOption: Option[CMLogkafkaIdentity] =>
liOption.fold[ApiError \\/ LogkafkaIdentity] {
-\\/(ApiError(s"Logkafka not found $hostname for cluster $clusterName"))
} { cmLogkafkaIdentity =>
cmLogkafkaIdentity.logkafkaIdentity match {
case scala.util.Failure(l) =>
-\\/[ApiError](l)
case scala.util.Success(li) =>
\\/-(li)
}
}
}
)
}
}
}
| jack6215/kafka-manager | app/kafka/manager/KafkaManager.scala | Scala | apache-2.0 | 28,769 |
package com.clarifi.reporting
package ermine
import Term.{ subTermEx, termVars }
import HasTermVars._
import scala.collection.immutable.List
case class Alt(loc: Loc, patterns: List[Pattern], body: Term) extends Located {
def arity = patterns.length
def vars: TermVars = (termVars(patterns) ++ termVars(body)) -- (for {
p <- patterns
v <- p.vars.toList
} yield v.map(_.body))
def typeVars: TypeVars = Type.typeVars(patterns) ++ Type.typeVars(body) // NB: does not return in order
def allTypeVars: TypeVars = Type.allTypeVars(patterns) ++ Type.allTypeVars(body) // NB: does not return in order
def subst(ks: PartialFunction[KindVar, Kind], ts: PartialFunction[TypeVar,Type], m: PartialFunction[TermVar, TermVar]) =
Alt(loc, subTermEx(ks,ts,m,patterns), subTermEx(ks,ts,m,body))
def close(implicit supply: Supply) = Alt(loc, patterns.map(_.close), body.close(supply)) // TODO: pattern closing is suspicious
}
object Alt {
implicit def altHasTermVars: HasTermVars[Alt] = new HasTermVars[Alt] {
def vars(b: Alt) = b.vars
def sub(ks: PartialFunction[KindVar,Kind], ts: PartialFunction[TypeVar,Type], es: PartialFunction[TermVar,TermVar], b: Alt) =
b.subst(ks,ts,es)
}
implicit def altHasTypeVars: HasTypeVars[Alt] = new HasTypeVars[Alt] {
def vars(b: Alt) = b.typeVars
def allVars(b: Alt) = b.allTypeVars
def sub(ks: PartialFunction[KindVar,Kind], ts: PartialFunction[TypeVar,Type], b: Alt) = b.subst(ks,ts,Map())
}
}
sealed abstract class Binding extends Located {
def v: TermVar
def alts: List[Alt]
def arity = alts match {
case List() => 0
case a :: _ => a.arity
}
def subst(ks: PartialFunction[KindVar, Kind], ts: PartialFunction[TypeVar,Type], m: PartialFunction[TermVar, TermVar]): Binding
def vars: TermVars
def typeVars: TypeVars
def allTypeVars: TypeVars
def close(implicit supply: Supply): Binding
def remember: Option[Int]
}
object Binding {
implicit def bindingHasTermVars: HasTermVars[Binding] = new HasTermVars[Binding] {
def vars(b: Binding) = b.vars
def sub(ks: PartialFunction[KindVar,Kind], ts: PartialFunction[TypeVar,Type], es: PartialFunction[TermVar,TermVar], b: Binding) = b.subst(ks,ts,es)
}
implicit def bindingHasTypeVars: HasTypeVars[Binding] = new HasTypeVars[Binding] {
def vars(b: Binding) = b.typeVars
def allVars(b: Binding) = b.allTypeVars
def sub(ks: PartialFunction[KindVar,Kind], ts: PartialFunction[TypeVar,Type], b: Binding) = b.subst(ks,ts,Map())
}
}
case class ExplicitBinding(loc: Loc, v: TermVar, ty: Annot, alts: List[Alt], remember: Option[Int] = None) extends Binding {
def subst(ks: PartialFunction[KindVar, Kind], ts: PartialFunction[TypeVar,Type], m: PartialFunction[TermVar,TermVar]): ExplicitBinding =
ExplicitBinding(loc, m.lift(v).getOrElse(v.map(_.subst(ks, ts))), ty.subst(ks, ts), alts.map(_.subst(ks,ts,m)))
def vars = Vars(v) ++ termVars(alts)
def typeVars = /*Type.typeVars(v) ++ */ Type.typeVars(ty) ++ Type.typeVars(alts)
def allTypeVars = Type.allTypeVars(ty) ++ Type.allTypeVars(alts)
def close(implicit supply: Supply) = ExplicitBinding(loc, v, ty.close(supply), alts.map(_.close(supply)))
def forgetSignature: ImplicitBinding = ImplicitBinding(loc, v, alts, remember)
}
object ExplicitBinding {
implicit def explicitBindingHasTermVars: HasTermVars[ExplicitBinding] = new HasTermVars[ExplicitBinding] {
def vars(b: ExplicitBinding) = b.vars
def sub(ks: PartialFunction[KindVar,Kind], ts: PartialFunction[TypeVar,Type], es: PartialFunction[TermVar,TermVar], b: ExplicitBinding) = b.subst(ks,ts,es)
}
implicit def explicitBindingHasTypeVars: HasTypeVars[ExplicitBinding] = new HasTypeVars[ExplicitBinding] {
def vars(b: ExplicitBinding) = b.typeVars
def allVars(b: ExplicitBinding) = b.allTypeVars
def sub(ks: PartialFunction[KindVar,Kind], ts: PartialFunction[TypeVar,Type], b: ExplicitBinding) = b.subst(ks,ts,Map())
}
}
case class ImplicitBinding(loc: Loc, v: TermVar, alts: List[Alt], remember: Option[Int] = None) extends Binding {
def subst(ks: PartialFunction[KindVar, Kind], ts: PartialFunction[TypeVar,Type], m: PartialFunction[TermVar, TermVar]): ImplicitBinding =
ImplicitBinding(loc, m.lift(v).getOrElse(v.map(_.subst(ks, ts))), alts.map(_.subst(ks,ts,m)))
def vars = Vars(v) ++ termVars(alts)
def typeVars = /* Type.typeVars(v) ++ */ Type.typeVars(alts)
def allTypeVars = Type.allTypeVars(alts)
def close(implicit supply: Supply) = ImplicitBinding(loc, v, alts.map(_.close(supply)))
}
object ImplicitBinding {
implicit def implicitBindingHasTermVars: HasTermVars[ImplicitBinding] = new HasTermVars[ImplicitBinding] {
def vars(b: ImplicitBinding) = b.vars
def sub(ks: PartialFunction[KindVar,Kind], ts: PartialFunction[TypeVar,Type], es: PartialFunction[TermVar,TermVar], b: ImplicitBinding) = b.subst(ks,ts,es)
}
implicit def implicitBindingHasTypeVars: HasTypeVars[ImplicitBinding] = new HasTypeVars[ImplicitBinding] {
def vars(b: ImplicitBinding) = b.typeVars
def allVars(b: ImplicitBinding) = b.allTypeVars
def sub(ks: PartialFunction[KindVar,Kind], ts: PartialFunction[TypeVar,Type], b: ImplicitBinding) = b.subst(ks,ts,Map())
}
// perform strongly connected component analysis to infer better types
def implicitBindingComponents(xs: List[ImplicitBinding]): List[List[ImplicitBinding]] = {
val vm = xs.map(b => b.v.id -> b).toMap
val sccs = SCC.tarjan(vm.keySet.toList) { i =>
termVars(vm(i).alts).toList.collect {
case v if vm.contains(v.id) => v.id
}
}
sccs.reverse.map { xs => xs.toList.map(vm(_)) }
}
}
| ermine-language/ermine-legacy | src/main/scala/com/clarifi/reporting/ermine/Binding.scala | Scala | bsd-2-clause | 5,634 |
package reactivemongo.api.commands
import reactivemongo.api.{ SerializationPack, Session }
/**
* Implements the [[https://docs.mongodb.com/manual/reference/command/insert/ insert]] command.
*/
@deprecated("Use the new insert operation", "0.16.0")
trait InsertCommand[P <: SerializationPack] extends ImplicitCommandHelpers[P] {
/**
* @param head the first mandatory document
* @param tail maybe other documents
*/
sealed class Insert(
val head: pack.Document,
val tail: Seq[pack.Document],
val ordered: Boolean,
val writeConcern: WriteConcern,
val bypassDocumentValidation: Boolean) extends CollectionCommand with CommandWithResult[InsertResult] with Mongo26WriteCommand with Serializable with Product {
@deprecated("Will be removed", "0.19.8")
def this(
head: pack.Document,
tail: Seq[pack.Document],
ordered: Boolean,
writeConcern: WriteConcern) =
this(head, tail, ordered, writeConcern, false)
@deprecated("No longer a case class", "0.19.8")
val productArity = 4
@deprecated("No longer a case class", "0.19.8")
def productElement(n: Int): Any = n match {
case 0 => head
case 1 => tail
case 2 => ordered
case 3 => writeConcern
case _ => bypassDocumentValidation
}
def canEqual(that: Any): Boolean = that match {
case _: Insert => true
case _ => false
}
private[commands] lazy val tupled =
Tuple4(head, tail, ordered, writeConcern)
// TODO#1.1: All fields after release
override def equals(that: Any): Boolean = that match {
case other: Insert =>
other.tupled == this.tupled
case _ => false
}
// TODO#1.1: All fields after release
override def hashCode: Int = tupled.hashCode
}
object Insert {
@deprecated("Use factory with bypassDocumentValidation", "0.19.8")
def apply(
head: pack.Document,
tail: Seq[pack.Document],
ordered: Boolean,
writeConcern: WriteConcern): Insert =
apply(head, tail, ordered, writeConcern, false)
def apply(
head: pack.Document,
tail: Seq[pack.Document],
ordered: Boolean,
writeConcern: WriteConcern,
bypassDocumentValidation: Boolean): Insert =
new Insert(head, tail, ordered, writeConcern, bypassDocumentValidation)
@deprecated("No longer a case class", "0.19.8")
def unapply(that: Any): Option[(pack.Document, Seq[pack.Document], Boolean, WriteConcern)] = that match {
case other: Insert => Option(other).map(_.tupled)
case _ => None
}
}
type InsertResult = DefaultWriteResult // for simplified imports
}
@deprecated("Will be removed", "0.19.8")
private[reactivemongo] object InsertCommand {
// TODO#1.1: Remove when BSONInsertCommand is removed
def writer[P <: SerializationPack with Singleton](pack: P)(
context: InsertCommand[pack.type]): Option[Session] => ResolvedCollectionCommand[context.Insert] => pack.Document = {
val builder = pack.newBuilder
val writeWriteConcern = CommandCodecs.writeWriteConcern(pack)
val writeSession = CommandCodecs.writeSession(builder)
{ session: Option[Session] =>
import builder.{ elementProducer => element }
{ insert =>
import insert.command
val documents = builder.array(command.head, command.tail)
val ordered = builder.boolean(command.ordered)
val elements = Seq.newBuilder[pack.ElementProducer]
elements ++= Seq[pack.ElementProducer](
element("insert", builder.string(insert.collection)),
element("ordered", ordered),
element("documents", documents))
session.foreach { s =>
elements ++= writeSession(s)
}
if (!session.exists(_.transaction.isSuccess)) {
// writeConcern is not allowed within a multi-statement transaction
// code=72
elements += element(
"writeConcern", writeWriteConcern(command.writeConcern))
}
builder.document(elements.result())
}
}
}
}
| cchantep/ReactiveMongo | driver/src/main/scala/api/commands/InsertCommand.scala | Scala | apache-2.0 | 4,084 |
package edu.berkeley.nlp.coref
import edu.berkeley.nlp.futile.fig.basic.Indexer
class DocumentInferencerBasic extends DocumentInferencer {
def getInitialWeightVector(featureIndexer: Indexer[String]): Array[Double] = Array.fill(featureIndexer.size())(0.0);
/**
* N.B. always returns a reference to the same matrix, so don't call twice in a row and
* attempt to use the results of both computations
*/
private def computeMarginals(docGraph: DocumentGraph,
gold: Boolean,
lossFcn: (CorefDoc, Int, Int) => Double,
pairwiseScorer: PairwiseScorer): Array[Array[Double]] = {
computeMarginals(docGraph, gold, lossFcn, docGraph.featurizeIndexAndScoreNonPrunedUseCache(pairwiseScorer)._2)
}
private def computeMarginals(docGraph: DocumentGraph,
gold: Boolean,
lossFcn: (CorefDoc, Int, Int) => Double,
scoresChart: Array[Array[Double]]): Array[Array[Double]] = {
// var marginals = new Array[Array[Double]](docGraph.doc.predMentions.size());
// for (i <- 0 until marginals.size) {
// marginals(i) = Array.fill(i+1)(Double.NegativeInfinity);
// }
val marginals = docGraph.cachedMarginalMatrix;
for (i <- 0 until docGraph.size) {
var normalizer = 0.0;
// Restrict to gold antecedents if we're doing gold, but don't load the gold antecedents
// if we're not.
val goldAntecedents: Seq[Int] = if (gold) docGraph.getGoldAntecedentsUnderCurrentPruning(i) else null;
for (j <- 0 to i) {
// If this is a legal antecedent
if (!docGraph.isPruned(i, j) && (!gold || goldAntecedents.contains(j))) {
// N.B. Including lossFcn is okay even for gold because it should be zero
val unnormalizedProb = Math.exp(scoresChart(i)(j) + lossFcn(docGraph.corefDoc, i, j));
marginals(i)(j) = unnormalizedProb;
normalizer += unnormalizedProb;
} else {
marginals(i)(j) = 0.0;
}
}
for (j <- 0 to i) {
marginals(i)(j) /= normalizer;
}
}
marginals;
}
def computeLikelihood(docGraph: DocumentGraph,
pairwiseScorer: PairwiseScorer,
lossFcn: (CorefDoc, Int, Int) => Double): Double = {
var likelihood = 0.0;
val marginals = computeMarginals(docGraph, false, lossFcn, pairwiseScorer);
for (i <- 0 until docGraph.size) {
val goldAntecedents = docGraph.getGoldAntecedentsUnderCurrentPruning(i);
var currProb = 0.0;
for (j <- goldAntecedents) {
currProb += marginals(i)(j);
}
var currLogProb = Math.log(currProb);
if (currLogProb.isInfinite()) {
currLogProb = -30;
}
likelihood += currLogProb;
}
likelihood;
}
def addUnregularizedStochasticGradient(docGraph: DocumentGraph,
pairwiseScorer: PairwiseScorer,
lossFcn: (CorefDoc, Int, Int) => Double,
gradient: Array[Double]) = {
val (featsChart, scoresChart) = docGraph.featurizeIndexAndScoreNonPrunedUseCache(pairwiseScorer);
// N.B. Can't have pred marginals and gold marginals around at the same time because
// they both live in the same cached matrix
val predMarginals = this.computeMarginals(docGraph, false, lossFcn, scoresChart);
for (i <- 0 until docGraph.size) {
for (j <- 0 to i) {
if (predMarginals(i)(j) > 1e-20) {
addToGradient(featsChart(i)(j), -predMarginals(i)(j), gradient);
}
}
}
val goldMarginals = this.computeMarginals(docGraph, true, lossFcn, scoresChart);
for (i <- 0 until docGraph.size) {
for (j <- 0 to i) {
if (goldMarginals(i)(j) > 1e-20) {
addToGradient(featsChart(i)(j), goldMarginals(i)(j), gradient);
}
}
}
}
private def addToGradient(feats: Seq[Int], scale: Double, gradient: Array[Double]) {
var i = 0;
while (i < feats.size) {
val feat = feats(i);
gradient(feat) += 1.0 * scale;
i += 1;
}
}
def viterbiDecode(docGraph: DocumentGraph, scorer: PairwiseScorer): Array[Int] = {
val (featsChart, scoresChart) = docGraph.featurizeIndexAndScoreNonPrunedUseCache(scorer);
if (Driver.decodeType == "sum") {
val backptrs = Decoder.decodeLeftToRightMarginalize(docGraph, (idx: Int) => {
val probs = scoresChart(idx);
GUtil.expAndNormalizeiHard(probs);
probs;
});
backptrs;
} else {
val backptrs = Decoder.decodeMax(docGraph, (idx: Int) => {
val probs = scoresChart(idx);
GUtil.expAndNormalizeiHard(probs);
probs;
});
backptrs;
}
}
def finishPrintStats() = {}
} | nate331/jbt-berkeley-coref-resolution | src/main/java/edu/berkeley/nlp/coref/DocumentInferencerBasic.scala | Scala | gpl-3.0 | 4,900 |
package org.bitcoins.server
import akka.http.scaladsl.model.ContentTypes
import akka.http.scaladsl.testkit.ScalatestRouteTest
import org.bitcoins.core.util.EnvUtil
import org.bitcoins.server.routes.{CommonRoutes, ServerCommand}
import org.bitcoins.testkit.BitcoinSTestAppConfig
import org.bitcoins.testkit.util.FileUtil
import org.bitcoins.testkit.util.FileUtil.withTempDir
import org.scalamock.scalatest.MockFactory
import org.scalatest.wordspec.AnyWordSpec
import java.nio.file.Files
class CommonRoutesSpec
extends AnyWordSpec
with ScalatestRouteTest
with MockFactory {
implicit val conf: BitcoinSAppConfig =
BitcoinSTestAppConfig.getSpvTestConfig()
val commonRoutes = CommonRoutes(conf.baseDatadir)
"CommonRoutes" should {
"getversion" in {
val version =
Option(EnvUtil.getVersion).map(v => "\\"" + v + "\\"").getOrElse("null")
val expectedJson =
ujson.read(s"""{"result":{"version":$version},"error":null}""")
val route =
commonRoutes.handleCommand(ServerCommand("getversion", ujson.Arr()))
Post() ~> route ~> check {
assert(contentType == ContentTypes.`application/json`)
val actualJson = ujson.read(responseAs[String])
assert(actualJson == expectedJson)
}
}
"zipdatadir" in {
withTempDir(getClass.getName) { dir =>
val expectedJson =
ujson.read(s"""{"result":null,"error":null}""")
val fileName = FileUtil.randomDirName
val dirName = FileUtil.randomDirName
val target = dir.resolve(dirName).resolve(fileName)
assert(!Files.exists(target))
assert(!Files.exists(target.getParent))
val route =
commonRoutes.handleCommand(
ServerCommand("zipdatadir", ujson.Arr(target.toString)))
Post() ~> route ~> check {
assert(contentType == ContentTypes.`application/json`)
val actualJson = ujson.read(responseAs[String])
assert(actualJson == expectedJson)
assert(Files.exists(target))
}
}
}
}
}
| bitcoin-s/bitcoin-s | app/server-test/src/test/scala/org/bitcoins/server/CommonRoutesSpec.scala | Scala | mit | 2,072 |
import java.io.File
import scalaxb.compiler.Config
import scalaxb.compiler.ConfigEntry._
import scalaxb.compiler.xsd.Driver
object DispatchResponseAsScalaxbTest extends TestBase with JaxrsTestBase {
override val module = new Driver // with Verbose
def serviceImpl:RestService = new RestService()
def serviceAddress: String = "dispatch-response-as-scalaxb"
step {
startServer
}
val packageName = "stockquote"
val xsdFile = new File(s"integration/src/test/resources/item.xsd")
val config = Config.default.update(PackageNames(Map(None -> Some(packageName)))).
update(Outdir(tmp)).
update(GeneratePackageDir).
update(GenerateDispatchAs)
lazy val generated = {
module.process(xsdFile, config)
}
"dispatch-response-as-scalaxb service works" in {
(List("""import stockquote._
import scala.concurrent._, duration._, dispatch._""",
s"""val request = url("http://localhost:$servicePort/$serviceAddress/item/GOOG")""",
"""val fresponse = Http(request > as.scalaxb[StoreItem])""",
"""val response = Await.result(fresponse, 5.seconds)""",
"""if (response != StoreItem(symbol = "GOOG", price = 42.0)) sys.error(response.toString)""",
"""true"""), generated) must evaluateTo(true,
outdir = "./tmp", usecurrentcp = true)
}
step {
stopServer
}
} | Fayho/scalaxb | integration/src/test/scala/DispatchResponseAsScalaxbTest.scala | Scala | mit | 1,340 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.nisp.events
import uk.gov.hmrc.http.HeaderCarrier
object NIRecordEvent {
def apply(
nino: String,
yearsToContribute: Int,
qualifyingYears: Int,
nonQualifyingYears: Int,
fillableGaps: Int,
nonFillableGaps: Int,
pre75Years: Int
)(implicit hc: HeaderCarrier): NIRecordEvent =
new NIRecordEvent(
nino,
yearsToContribute,
qualifyingYears,
nonQualifyingYears,
fillableGaps,
nonFillableGaps,
pre75Years
)
}
class NIRecordEvent(
nino: String,
yearsToContribute: Int,
qualifyingYears: Int,
nonQualifyingYears: Int,
fillableGaps: Int,
nonFillableGaps: Int,
pre75Years: Int
)(implicit hc: HeaderCarrier)
extends NispBusinessEvent(
"NIRecord",
Map(
"nino" -> nino,
"yearsToContribute" -> yearsToContribute.toString,
"qualifyingYears" -> qualifyingYears.toString,
"nonQualifyingYears" -> nonQualifyingYears.toString,
"fillableGaps" -> fillableGaps.toString,
"nonFillableGaps" -> nonFillableGaps.toString,
"pre75Years" -> pre75Years.toString
)
)
| hmrc/nisp-frontend | app/uk/gov/hmrc/nisp/events/NIRecordEvent.scala | Scala | apache-2.0 | 1,773 |
import java.util.UUID
import TodoRepository.TodoNotFound
import scala.concurrent.{ExecutionContext, Future}
trait TodoRepository {
def all(): Future[Seq[Todo]]
def done(): Future[Seq[Todo]]
def pending(): Future[Seq[Todo]]
def create(createTodo: CreateTodo): Future[Todo]
def update(id: String, updateTodo: UpdateTodo): Future[Todo]
}
object TodoRepository {
final case class TodoNotFound(id: String) extends Exception(s"Todo with id $id not found.")
}
class InMemoryTodoRepository(initialTodos: Seq[Todo] = Seq.empty)(implicit ec: ExecutionContext) extends TodoRepository {
private var todos: Vector[Todo] = initialTodos.toVector
override def all(): Future[Seq[Todo]] = Future.successful(todos)
override def done(): Future[Seq[Todo]] = Future.successful(todos.filter(_.done))
override def pending(): Future[Seq[Todo]] = Future.successful(todos.filterNot(_.done))
override def create(createTodo: CreateTodo): Future[Todo] = Future.successful {
val todo = Todo(
UUID.randomUUID().toString,
createTodo.title,
createTodo.description,
false
)
todos = todos :+ todo
todo
}
override def update(id: String, updateTodo: UpdateTodo): Future[Todo] = {
todos.find(_.id == id) match {
case Some(foundTodo) =>
val newTodo = updateHelper(foundTodo, updateTodo)
todos = todos.map(t => if (t.id == id) newTodo else t)
Future.successful(newTodo)
case None =>
Future.failed(TodoNotFound(id))
}
}
private def updateHelper(todo: Todo, updateTodo: UpdateTodo): Todo = {
val t1 = updateTodo.title.map(title => todo.copy(title = title)).getOrElse(todo)
val t2 = updateTodo.description.map(description => t1.copy(description = description)).getOrElse(t1)
updateTodo.done.map(done => t2.copy(done = done)).getOrElse(t2)
}
} | matija94/show-me-the-code | akka-http-quickstart/src/main/scala/TodoRepository.scala | Scala | mit | 1,849 |
/* Copyright (C) 2011 Mikołaj Sochacki mikolajsochacki AT gmail.com
* This file is part of VRegister (Virtual Register - Wirtualny Dziennik)
* LICENCE: GNU AFFERO GENERAL PUBLIC LICENS Version 3 (AGPLv3)
* See: <http://www.gnu.org/licenses/>.
*/
package eu.brosbit.opos.model.page
import _root_.net.liftweb.mongodb._
import java.util.Date
import org.bson.types.ObjectId
object ForumThreadHead extends MongoDocumentMeta[ForumThreadHead] {
override def collectionName = "forumthreadhead"
override def formats = super.formats + new ObjectIdSerializer + new DateSerializer
def create = new ForumThreadHead(ObjectId.get, new ObjectId("000000000000000000000000"),
0, "", "", Nil, "", 0L)
}
case class ForumThreadHead(var _id: ObjectId, var content: ObjectId,
var count: Int, var lastInfo: String,
var title: String, var tags: List[String],
var authorName: String, var authorId: Long)
extends MongoDocument[ForumThreadHead] {
def meta = ForumThreadHead
}
| mikolajs/osp | src/main/scala/eu/brosbit/opos/model/page/ForumThreadHead.scala | Scala | agpl-3.0 | 1,064 |
package de.unihamburg.vsis.sddf.similarity.measures
import com.rockymadden.stringmetric.StringMetric
final case class MeasureNumeric(maxValueForNormalisation: Int, minValueForNormalisation: Int = 0) extends StringMetric[Double] {
val normalise = maxValueForNormalisation - minValueForNormalisation
override def compare(a: Array[Char], b: Array[Char]): Option[Double] = {
compare(a.toString(), b.toString())
}
override def compare(a: String, b: String): Option[Double] = {
// make robust against empty strings
val aVal = if(a.length > 0) a.toInt else 0
val bVal = if(b.length > 0) b.toInt else 0
val diff = math.abs(aVal - bVal)
Some(1 - (diff.toDouble / normalise))
}
} | numbnut/sddf | src/main/scala/de/unihamburg/vsis/sddf/similarity/measures/MeasureNumeric.scala | Scala | gpl-3.0 | 712 |
/**
*
* Copyright (C) 2017 University of Bamberg, Software Technologies Research Group
* <https://www.uni-bamberg.de/>, <http://www.swt-bamberg.de/>
*
* This file is part of the Data Structure Investigator (DSI) project, which received financial support by the
* German Research Foundation (DFG) under grant no. LU 1748/4-1, see
* <http://www.swt-bamberg.de/dsi/>.
*
* DSI is licensed under the GNU GENERAL PUBLIC LICENSE (Version 3), see
* the LICENSE file at the project's top-level directory for details or consult <http://www.gnu.org/licenses/>.
*
* DSI is free software: you can redistribute it and/or modify it under the
* terms of the GNU General Public License as published by the Free Software
* Foundation, either version 3 of the License, or any later version.
*
* DSI is a RESEARCH PROTOTYPE and distributed WITHOUT ANY
* WARRANTY, without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* The following people contributed to the conception and realization of the present DSI distribution (in
* alphabetic order by surname):
*
* - Jan H. Boockmann
* - Gerald Lüttgen
* - Thomas Rupprecht
* - David H. White
*
*/
/**
* @author DSI
*
* DsOliConConfClassification.scala created on Jun 19, 2015
*
* Description: Represents a classification of a connection
* between strands
*/
package dsnaming
import dsnaming.DsOliConConfClassificationTag._
/**
* @author DSI
*
* @constructor creates a classification
* @param classfication the detected data structure classification for a connection
* @param evidence the calculated evidence
*/
class DsOliConConfClassification(val classification: DsOliConConfClassificationTag, val evidence: Int) {
def this() = this(ccNoClassification, 0)
override def toString(): String = {
"classification: " + classification + ", evidence: " + evidence
}
}
| uniba-swt/DSIsrc | src/dsnaming/DsOliConConfClassification.scala | Scala | gpl-3.0 | 1,943 |
package uk.gov.dvla.vehicles.presentation.common.views.helpers
object SelectHelper {
def defaultOption(htmlArgs: Map[Symbol, Any]) = htmlArgs.get('_default).map { defaultValue =>
<option class="blank" value="">@defaultValue</option>
}
}
| dvla/vehicles-presentation-common | app/uk/gov/dvla/vehicles/presentation/common/views/helpers/SelectHelper.scala | Scala | mit | 247 |
/*
* Copyright (C) 2007-2008 Artima, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Example code from:
*
* Programming in Scala (First Edition, Version 6)
* by Martin Odersky, Lex Spoon, Bill Venners
*
* http://booksites.artima.com/programming_in_scala
*/
/** A point class whose equals method has the wrong
* type signature. */
object Points1 {
class Point(val x: Int, val y: Int) {
// An utterly wrong definition of equals
def equals(other: Point): Boolean =
this.x == other.x && this.y == other.y
}
}
| peachyy/scalastu | equality/Points1.scala | Scala | apache-2.0 | 1,078 |
package scalaz.stream
import collection.mutable
import org.scalacheck.Prop._
import org.scalacheck.{Prop, Properties}
import scala.concurrent.SyncVar
import scalaz.concurrent.Task
import scalaz.stream.Process._
import scalaz.{\\/-, \\/, -\\/}
//unfortunatelly this has to be separate. If we have it on AsyncTopicSpec, some tests will deadlock
object WriterHelper {
def w: Writer1[Long, String, Int] = {
def go(acc: Long): Writer1[Long, String, Int] = {
receive1[String, Long \\/ Int] {
s =>
val t: Long = s.size.toLong + acc
emit(-\\/(t)) fby emit(\\/-(s.size)) fby go(t)
}
}
go(0)
}
}
object AsyncTopicSpec extends Properties("topic") {
case object TestedEx extends Throwable {
override def fillInStackTrace = this
}
//tests basic publisher and subscriber functionality
//have two publishers and four subscribers.
//each publishers emits own half of elements (odd and evens) and all subscribers must
//get that messages
property("basic") = forAll {
l: List[Int] =>
case class SubscriberData(
endSyncVar: SyncVar[Throwable \\/ Unit] = new SyncVar[Throwable \\/ Unit],
data: mutable.Buffer[Int] = mutable.Buffer.empty)
val (even, odd) = (l.filter(_ % 2 == 0), l.filter(_ % 2 != 0))
val topic = async.topic[Int]()
val subscribers = List.fill(4)(SubscriberData())
def sink[A](f: A => Unit): Process.Sink[Task, A] = {
io.resource[Unit, A => Task[Unit]](Task.now(()))(_ => Task.now(()))(
_ =>
Task.now {
(i: A) =>
Task.now {
f(i)
}
}
)
}
def collectToBuffer(buffer: mutable.Buffer[Int]): Sink[Task, Int] = {
sink {
(i: Int) =>
buffer += i
}
}
subscribers.foreach {
subs =>
Task(topic.subscribe.to(collectToBuffer(subs.data)).run.runAsync(subs.endSyncVar.put)).run
}
val pubOdd = new SyncVar[Throwable \\/ Unit]
Task((Process.emitAll(odd).evalMap(Task.now(_)) to topic.publish).run.runAsync(pubOdd.put)).run
val pubEven = new SyncVar[Throwable \\/ Unit]
Task((Process.emitAll(even).evalMap(Task.now(_)) to topic.publish).run.runAsync(pubEven.put)).run
val oddResult = pubOdd.get(3000)
val evenResult = pubEven.get(3000)
topic.close.run
def verifySubscribers(result1: SubscriberData, subId: String) = {
val result = result1.endSyncVar.get(3000)
(result.nonEmpty && result.get.isRight :| s"Subscriber $subId finished") &&
((result1.data.size == l.size) :| s"Subscriber $subId got all numbers ${result1.data } == ${l.size }") &&
((result1.data.filter(_ % 2 == 0) == even) :| s"Subscriber $subId got all even numbers") &&
((result1.data.filter(_ % 2 != 0) == odd) :| s"Subscriber $subId got all odd numbers")
}
(oddResult.nonEmpty && oddResult.get.isRight :| "Odd numbers were published") &&
(evenResult.nonEmpty && evenResult.get.isRight :| "Even numbers were published") &&
(subscribers.zipWithIndex.foldLeft(Prop.propBoolean(true)) {
case (b, (a, index)) => b && verifySubscribers(a, index.toString)
})
}
//tests once failed all the publishes, subscribers and signals will fail too
property("fail") = forAll {
l: List[Int] =>
(l.size > 0 && l.size < 10000) ==> {
val topic = async.topic[Int]()
topic.fail(TestedEx).run
val emitted = new SyncVar[Throwable \\/ Unit]
Task((Process.emitAll(l).evalMap(Task.now(_)) to topic.publish).run.runAsync(emitted.put)).run
val sub1 = new SyncVar[Throwable \\/ Seq[Int]]
Task(topic.subscribe.runLog.runAsync(sub1.put)).run
emitted.get(3000)
sub1.get(3000)
(emitted.get(0).nonEmpty && emitted.get == -\\/(TestedEx)) :| "publisher fails" &&
(sub1.get(0).nonEmpty && sub1.get == -\\/(TestedEx)) :| "subscriber fails"
}
}
property("writer.state") = forAll {
l: List[String] =>
(l.nonEmpty) ==> {
val topic = async.writerTopic(emit(-\\/(0L)) fby WriterHelper.w)()
val published = new SyncVar[Throwable \\/ IndexedSeq[Long \\/ Int]]
topic.subscribe.runLog.runAsync(published.put)
val signalDiscrete = new SyncVar[Throwable \\/ IndexedSeq[Long]]
topic.signal.discrete.runLog.runAsync(signalDiscrete.put)
val signalContinuous = new SyncVar[Throwable \\/ IndexedSeq[Long]]
topic.signal.continuous.runLog.runAsync(signalContinuous.put)
Thread.sleep(100) //all has to have chance to register
((Process(l: _*).toSource to topic.publish) onComplete (eval_(topic.close))).run.run
val expectPublish = l.foldLeft[(Long, Seq[Long \\/ Int])]((0L, Nil))({
case ((sum, acc), s) =>
val t = sum + s.size
(t, acc :+ -\\/(t) :+ \\/-(s.size))
})
val signals = 0L +: expectPublish._2.collect { case -\\/(s) => s }
((published.get(3000).map(_.map(_.toList)) == Some(\\/-(-\\/(0L) +: expectPublish._2))) :| "All items were published") &&
((signalDiscrete.get(3000) == Some(\\/-(signals))) :| "Discrete signal published correct writes") &&
((signalContinuous.get(3000).map(_.map(signals diff _)) == Some(\\/-(List()))) :| "Continuous signal published correct writes")
}
}
property("writer.state.startWith.up") = forAll {
l: List[String] =>
(l.nonEmpty) ==> {
val topic = async.writerTopic(emit(-\\/(0L)) fby WriterHelper.w)()
((Process(l: _*).toSource to topic.publish)).run.run
val subscriber = topic.subscribe.take(1).runLog.run
topic.close.run
subscriber == List(-\\/(l.map(_.size).sum))
}
}
property("writer.state.startWith.down") = secure {
val topic = async.writerTopic(emit(-\\/(0L)) fby WriterHelper.w)()
val subscriber = topic.subscribe.take(1).runLog.run
topic.close.run
subscriber == List(-\\/(0))
}
property("writer.state.consume") = secure {
val topic = async.writerTopic(emit(-\\/(0L)) fby WriterHelper.w)()
val result = new SyncVar[Throwable \\/ IndexedSeq[Long\\/Int]]
topic.subscribe.runLog.runAsync(result.put)
Task.fork(topic.consumeOne(Process("one","two","three").toSource onComplete eval_(topic.close))).runAsync(_=>())
result.get(3000).flatMap(_.toOption).toSeq.flatten ==
Vector(-\\/(0L), -\\/(3L), \\/-(3), -\\/(6L), \\/-(3), -\\/(11L), \\/-(5))
}
}
| jedws/scalaz-stream | src/test/scala/scalaz/stream/AsyncTopicSpec.scala | Scala | mit | 6,547 |
package com.arcusys.learn.liferay.update.version300.migrations.scorm
import com.arcusys.valamis.persistence.common.SlickProfile
import com.arcusys.valamis.persistence.impl.scorm.model.AttemptModel
import com.arcusys.valamis.persistence.impl.scorm.schema.{AttemptTableComponent, ScormUserComponent}
import slick.driver.JdbcProfile
import slick.jdbc.{GetResult, JdbcBackend, StaticQuery}
class AttemptMigration(val db: JdbcBackend#DatabaseDef,
val driver: JdbcProfile)
extends AttemptTableComponent
with ScormUserComponent
with SlickProfile {
import driver.simple._
val attemptDataMigration = new AttemptDataMigration(db, driver)
val activityStateTreeMigration = new ActivityStateTreeMigration(db, driver)
def migrate()(implicit s: JdbcBackend#Session): Unit = {
val attempts = getOldAttempts
if (attempts.nonEmpty) {
attempts.foreach(a => {
val newId = attemptTQ.returning(attemptTQ.map(_.id)).insert(a)
attemptDataMigration.migrate(a.id.get, newId)
activityStateTreeMigration.migrate(a.id.get, newId)
})
}
}
private def getOldAttempts(implicit s: JdbcBackend#Session): Seq[AttemptModel] = {
implicit val reader = GetResult[AttemptModel](r => AttemptModel(
r.nextLongOption(), // LONG not null primary key,
r.nextLong(), // INTEGER null,
r.nextLong(), // INTEGER null,
r.nextString(), // TEXT null,
r.nextBoolean() // BOOLEAN null
))
StaticQuery.queryNA[AttemptModel]("select * from Learn_LFAttempt").list
}
}
| igor-borisov/valamis | learn-portlet/src/main/scala/com/arcusys/learn/liferay/update/version300/migrations/scorm/AttemptMigration.scala | Scala | gpl-3.0 | 1,559 |
package scadla.examples.cnc
import scadla._
import utils._
import Trig._
import utils.gear._
import InlineOps._
import thread._
import Common._
import scadla.EverythingIsIn.{millimeters, radians}
import squants.space.{Length, Angle, Degrees, Millimeters}
import scala.language.postfixOps
import squants.space.LengthConversions._
//parameters:
//- thickness of structure
//- bolt
// - diameter
// - head height
// - non-threaded length
// - threaded length
//- motor
// - base fixation
// - height from base to gear: 37.3
// - rotor diameter
// - additional thing to make the gear hold better to the rotor
//- chuck
// - outer diameter (inner diameter is constrained by the size of the nut/bolt diameter)
// - thread (type, lead, size)
// - collet height
/*
for a parallax 1050kv outrunner brushless motor
rotary thing
height: 2x5mm
hole diameter: 5mm
outer diameter: 17mm
base
height 15mm
space between the screw 23.5 mm
screw diameter 3.5 mm
base of rotary thing is 38 mm above the base
*/
//TODO as a class with the parameter in the ctor
object Spindle {
////////////////
// parameters //
////////////////
val motorBoltDistance = (30 + 30) / 2f //depends on the size of the motorBase and boltSupport
val gearHeight = 10 mm
val chuckNonThreadOverlap = 10 mm
val topBoltWasher = 2 mm
val bottomBoltWasher = 2 mm
val boltThreadedLength = 25 mm //23
val boltNonThreadedLength = 96 mm //86
val boltSupportTop = boltNonThreadedLength - gearHeight - chuckNonThreadOverlap - topBoltWasher - bottomBoltWasher
val motorBaseToGear = 37.3 mm
val motorBaseHeight = boltSupportTop + topBoltWasher - motorBaseToGear
val bitsLength = 25 mm
val colletLength = bitsLength - 3
val chuckHeight = colletLength + boltThreadedLength + chuckNonThreadOverlap
val innerHole = 9 mm //17.5 / 2
///////////
// gears //
///////////
lazy val gear1 = Gear.helical( motorBoltDistance * 2 / 3.0, 32, gearHeight, Twist(-0.03), tolerance)
lazy val gear2 = Gear.helical( motorBoltDistance / 3.0 , 16, gearHeight, Twist(0.06), tolerance)
val motorKnobs = {
val c = Cylinder(3-tolerance, 2).moveZ(gearHeight)
val u = Union(c.moveX(9), c.moveX(-9), c.moveY(9), c.moveY(-9))
val r = (motorBoltDistance / 3.0) * (1.0 - 2.0 / 16)
u * Cylinder(r, 20)
}
val nutTop = Cylinder(ISO.M8 * 3, 14) - nut.M8.moveZ(gearHeight)
//gears
lazy val gearBolt = gear1 + nutTop - Cylinder(ISO.M8 + tolerance, gearHeight)
lazy val gearMotor = gear2 - Cylinder(ISO.M5 + tolerance, gearHeight) + motorKnobs
/////////////////////////////////
// bolt support and motor base //
/////////////////////////////////
//TODO not so square ...
val motorBase = {
val topZ = 3
val bot = Trapezoid(46, 30, 5, 21).rotateX(-Pi/2).moveZ(5)
val top = Cube(30,30,topZ).moveZ(motorBaseHeight - topZ)
val subTtop = Cube(30,30-6.5,topZ).moveZ(motorBaseHeight - 2 * topZ)
val base = Union(
Hull(bot, subTtop),
top
)
val nm3 = Bigger(Hull(nut.M3, nut.M3.moveX(5)), 0.4)
val screw_hole = Cylinder(ISO.M3, 10)
val fasteners = Seq(
screw_hole.move( 3.25, 3.25, 0),
screw_hole.move(26.75, 3.25, 0),
screw_hole.move( 3.25,26.75, 0),
screw_hole.move(26.75,26.75, 0),
nm3.move(26.75, 3.25, 4),
nm3.move(26.75,26.75, 4),
nm3.rotateZ(Pi).move( 3.25, 3.25, 4),
nm3.rotateZ(Pi).move( 3.25,26.75, 4)
).map(_.moveZ(motorBaseHeight - 10))
val shaftHole = {
val c = Cylinder( 8, motorBaseHeight).move(15, 15, 0) //hole for the lower part of the motor's shaft
Hull(c, c.moveY(15))
}
val breathingSpaces = Seq(
Cylinder(20, 50).moveZ(-25).scaleX(0.30).rotateX(Pi/2).move(15,15,motorBaseHeight)//,
//Cylinder(20, 50).moveZ(-25).scaleY(0.30).rotateY(Pi/2).move(15,15,motorBaseHeight)
)
//the block on which the motor is screwed
base - shaftHole -- fasteners -- breathingSpaces
}
val fixCoord = List[(Length, Length, Angle)](
(31, 4, -Pi/5.2),
(-1, 4, Pi+Pi/5.2),
(34, 30, 0),
(-4, 30, Pi),
(34, 56, Pi/2),
(-4, 56, Pi/2)
)
//centered at 0, 0
val boltSupport = {
val base = Hull(
Cylinder(15, boltSupportTop),
Cube(30, 1, motorBaseHeight).move(-15, 14, 0)
)
val lowerBearing = Hull(Cylinder(10, 7.5), bearing.moveZ(-0.5)) //add a small chamfer
base - lowerBearing - bearing.moveZ(boltSupportTop - 7) - Cylinder(9, boltSupportTop)
}
val spindle = {
val s = Cylinder(ISO.M3 + tolerance, 5)
val fix = Cylinder(4, 4) + Cube(5, 8, 4).move(-5, -4, 0) - s
Union(
boltSupport.move(15, 15, 0),
motorBase.moveY(30)
) ++ fixCoord.map{ case (x,y,a) => fix.rotateZ(a).move(x,y,0) }
}
///////////
// chuck //
///////////
val chuck = Chuck.innerThread(13, innerHole+tolerance, chuckHeight, colletLength, 20)
val slits = 4 //6
val collet = Collet.threaded(innerHole+1, innerHole, UTS._1_8, colletLength,
slits, 0.5, 1, 20, ISO.M2)
val colletWrench = Collet.wrench(innerHole, UTS._1_8, slits, ISO.M2)
def objects = Map(
"gear_bolt" -> gearBolt,
"gear_motor" -> gearMotor,
"bolt_washer_top" -> Tube(6, (4 mm) + 2*tolerance, topBoltWasher),
"bolt_washer_bot" -> Tube(6, (4 mm) + 2*tolerance, bottomBoltWasher),
"spindle_body" -> spindle,
"chuck_wrench" -> Chuck.wrench(13),
"chuck" -> chuck.rotateX(Pi),
"collet_inner" -> collet,
"collet_wrench" -> colletWrench
)
}
| dzufferey/scadla | src/main/scala/scadla/examples/cnc/Spindle.scala | Scala | apache-2.0 | 5,544 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.ignite
import org.apache.ignite.IgniteException
import org.apache.ignite.configuration.IgniteConfiguration
import org.apache.ignite.internal.IgnitionEx
import org.apache.ignite.spark.IgniteContext
import scala.collection.JavaConverters._
import scala.reflect.runtime.universe.TypeTag
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession.Builder
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst._
import org.apache.spark.sql.catalyst.encoders._
import org.apache.spark.sql.catalyst.expressions.AttributeReference
import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, Range}
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.internal._
import org.apache.spark.sql.sources.BaseRelation
import org.apache.spark.sql.streaming._
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.Utils
/**
* Implementation of Spark Session for Ignite.
*/
class IgniteSparkSession private(
ic: IgniteContext,
proxy: SparkSession,
existingSharedState: Option[SharedState],
parentSessionState: Option[SessionState]) extends SparkSession(proxy.sparkContext) {
self ⇒
private def this(ic: IgniteContext, proxy: SparkSession) =
this(ic, proxy, None, None)
private def this(proxy: SparkSession) =
this(new IgniteContext(proxy.sparkContext, IgnitionEx.DFLT_CFG), proxy)
private def this(proxy: SparkSession, configPath: String) =
this(new IgniteContext(proxy.sparkContext, configPath), proxy)
private def this(proxy: SparkSession, cfgF: () => IgniteConfiguration) =
this(new IgniteContext(proxy.sparkContext, cfgF), proxy)
/** @inheritdoc */
@transient override lazy val catalog = new CatalogImpl(self)
/** @inheritdoc */
@transient override val sqlContext: SQLContext = new SQLContext(self)
/** @inheritdoc */
@transient override lazy val sharedState: SharedState =
existingSharedState.getOrElse(new IgniteSharedState(ic, sparkContext))
/** @inheritdoc */
@transient override lazy val sessionState: SessionState = {
parentSessionState
.map(_.clone(this))
.getOrElse {
val sessionState = new SessionStateBuilder(self, None).build()
sessionState.experimentalMethods.extraOptimizations =
sessionState.experimentalMethods.extraOptimizations :+ IgniteOptimization
sessionState
}
}
/** @inheritdoc */
@transient override lazy val conf: RuntimeConfig = proxy.conf
/** @inheritdoc */
@transient override lazy val emptyDataFrame: DataFrame = proxy.emptyDataFrame
/** @inheritdoc */
override def newSession(): SparkSession = new IgniteSparkSession(ic, proxy.newSession())
/** @inheritdoc */
override def version: String = proxy.version
/** @inheritdoc */
override def emptyDataset[T: Encoder]: Dataset[T] = {
val encoder = implicitly[Encoder[T]]
new Dataset(self, LocalRelation(encoder.schema.toAttributes), encoder)
}
/** @inheritdoc */
override def createDataFrame(rows: java.util.List[Row], schema: StructType): DataFrame = {
Dataset.ofRows(self, LocalRelation.fromExternalRows(schema.toAttributes, rows.asScala))
}
/** @inheritdoc */
override def createDataFrame(rdd: RDD[_], beanClass: Class[_]): DataFrame = {
val attributeSeq: Seq[AttributeReference] = getSchema(beanClass)
val className = beanClass.getName
val rowRdd = rdd.mapPartitions { iter =>
SQLContext.beansToRows(iter, Utils.classForName(className), attributeSeq)
}
Dataset.ofRows(self, LogicalRDD(attributeSeq, rowRdd)(self))
}
/** @inheritdoc */
override def createDataFrame(data: java.util.List[_], beanClass: Class[_]): DataFrame = {
val attrSeq = getSchema(beanClass)
val rows = SQLContext.beansToRows(data.asScala.iterator, beanClass, attrSeq)
Dataset.ofRows(self, LocalRelation(attrSeq, rows.toSeq))
}
/** @inheritdoc */
override def createDataFrame[A <: Product : TypeTag](rdd: RDD[A]): DataFrame = {
SparkSession.setActiveSession(this)
val encoder = Encoders.product[A]
Dataset.ofRows(self, ExternalRDD(rdd, self)(encoder))
}
/** @inheritdoc */
override def baseRelationToDataFrame(baseRelation: BaseRelation): DataFrame = {
Dataset.ofRows(self, LogicalRelation(baseRelation))
}
/** @inheritdoc */
override def createDataset[T: Encoder](data: Seq[T]): Dataset[T] = {
val enc = encoderFor[T]
val attributes = enc.schema.toAttributes
val encoded = data.map(d => enc.toRow(d).copy())
val plan = new LocalRelation(attributes, encoded)
Dataset[T](self, plan)
}
/** @inheritdoc */
override def createDataset[T: Encoder](data: RDD[T]): Dataset[T] = {
Dataset[T](self, ExternalRDD(data, self))
}
/** @inheritdoc */
override def range(start: Long, end: Long, step: Long, numPartitions: Int): Dataset[java.lang.Long] = {
new Dataset(self, Range(start, end, step, numPartitions), Encoders.LONG)
}
/** @inheritdoc */
override def table(tableName: String): DataFrame = {
val tableIdent = sessionState.sqlParser.parseTableIdentifier(tableName)
Dataset.ofRows(self, sessionState.catalog.lookupRelation(tableIdent))
}
/** @inheritdoc */
override def sql(sqlText: String): DataFrame = Dataset.ofRows(self, sessionState.sqlParser.parsePlan(sqlText))
/** @inheritdoc */
override def read: DataFrameReader = new DataFrameReader(self)
/** @inheritdoc */
override def readStream: DataStreamReader = new DataStreamReader(self)
/** @inheritdoc */
override def stop(): Unit = proxy.stop()
/** @inheritdoc */
override private[sql] def applySchemaToPythonRDD(rdd: RDD[Array[Any]], schema: StructType) = {
val rowRdd = rdd.map(r => python.EvaluatePython.makeFromJava(schema).asInstanceOf[InternalRow])
Dataset.ofRows(self, LogicalRDD(schema.toAttributes, rowRdd)(self))
}
/** @inheritdoc */
override private[sql] def cloneSession(): IgniteSparkSession = {
val session = new IgniteSparkSession(ic, proxy.cloneSession(), Some(sharedState), Some(sessionState))
session.sessionState // Force copy of SessionState.
session
}
/** @inheritdoc */
@transient override private[sql] val extensions =
proxy.extensions
/** @inheritdoc */
override private[sql] def createDataFrame(rowRDD: RDD[Row],
schema: StructType,
needsConversion: Boolean) = {
val catalystRows = if (needsConversion) {
val encoder = RowEncoder(schema)
rowRDD.map(encoder.toRow)
} else {
rowRDD.map{r: Row => InternalRow.fromSeq(r.toSeq)}
}
val logicalPlan = LogicalRDD(schema.toAttributes, catalystRows)(self)
Dataset.ofRows(self, logicalPlan)
}
/** @inheritdoc */
override private[sql] def table( tableIdent: TableIdentifier) =
Dataset.ofRows(self, sessionState.catalog.lookupRelation(tableIdent))
private def getSchema(beanClass: Class[_]): Seq[AttributeReference] = {
val (dataType, _) = JavaTypeInference.inferDataType(beanClass)
dataType.asInstanceOf[StructType].fields.map { f =>
AttributeReference(f.name, f.dataType, f.nullable)()
}
}
}
object IgniteSparkSession {
/**
* @return New instance of <code>IgniteBuilder</code>
*/
def builder(): IgniteBuilder = {
new IgniteBuilder
}
/**
* Builder for <code>IgniteSparkSession</code>.
* Extends spark session builder with methods related to Ignite configuration.
*/
class IgniteBuilder extends Builder {
/**
* Config provider.
*/
private var cfgF: () ⇒ IgniteConfiguration = _
/**
* Path to config file.
*/
private var config: String = _
/** @inheritdoc */
override def getOrCreate(): IgniteSparkSession = synchronized {
val sparkSession = super.getOrCreate()
val ic = if (cfgF != null)
new IgniteContext(sparkSession.sparkContext, cfgF)
else if (config != null)
new IgniteContext(sparkSession.sparkContext, config)
else {
logWarning("No `igniteConfig` or `igniteConfigProvider`. " +
"IgniteSparkSession will use DFLT_CFG for Ignite.")
new IgniteContext(sparkSession.sparkContext)
}
new IgniteSparkSession(ic, sparkSession)
}
/**
* Set path to Ignite config file.
* User should use only one of <code>igniteConfig</code> and <code>igniteConfigProvider</code>.
*
* @param cfg Path to Ignite config file.
* @return This for chaining.
*/
def igniteConfig(cfg: String): IgniteBuilder = {
if (cfgF != null)
throw new IgniteException("only one of config or configProvider should be provided")
this.config = cfg
this
}
/**
* Set Ignite config provider.
* User should use only one of <code>igniteConfig</code> and <code>igniteConfigProvider</code>.
*
* @param cfgF Closure to provide <code>IgniteConfiguration</code>.
* @return This for chaining.
*/
def igniteConfigProvider(cfgF: () ⇒ IgniteConfiguration): IgniteBuilder = {
if (config != null)
throw new IgniteException("only one of config or configProvider should be provided")
this.cfgF = cfgF
this
}
/** @inheritdoc */
override def appName(name: String): IgniteBuilder = {
super.appName(name)
this
}
/** @inheritdoc */
override def config(key: String, value: String): IgniteBuilder = {
super.config(key, value)
this
}
/** @inheritdoc */
override def config(key: String, value: Long): IgniteBuilder = {
super.config(key, value)
this
}
/** @inheritdoc */
override def config(key: String, value: Double): IgniteBuilder = {
super.config(key, value)
this
}
/** @inheritdoc */
override def config(key: String, value: Boolean): IgniteBuilder = {
super.config(key, value)
this
}
/** @inheritdoc */
override def config(conf: SparkConf): IgniteBuilder = {
super.config(conf)
this
}
/** @inheritdoc */
override def master(master: String): IgniteBuilder = {
super.master(master)
this
}
/**
* This method will throw RuntimeException as long as we building '''IgniteSparkSession'''
*/
override def enableHiveSupport(): IgniteBuilder =
throw new IgniteException("This method doesn't supported by IgniteSparkSession")
/** @inheritdoc */
override def withExtensions(f: (SparkSessionExtensions) ⇒ Unit): IgniteBuilder = {
super.withExtensions(f)
this
}
}
}
| samaitra/ignite | modules/spark/src/main/scala/org/apache/spark/sql/ignite/IgniteSparkSession.scala | Scala | apache-2.0 | 12,381 |
/*
* ******************************************************************************
* * Copyright (C) 2013 Christopher Harris (Itszuvalex)
* * Itszuvalex@gmail.com
* *
* * This program is free software; you can redistribute it and/or
* * modify it under the terms of the GNU General Public License
* * as published by the Free Software Foundation; either version 2
* * of the License, or (at your option) any later version.
* *
* * This program is distributed in the hope that it will be useful,
* * but WITHOUT ANY WARRANTY; without even the implied warranty of
* * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* * GNU General Public License for more details.
* *
* * You should have received a copy of the GNU General Public License
* * along with this program; if not, write to the Free Software
* * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
* *****************************************************************************
*/
package com.itszuvalex.femtocraft.power.items
import java.util
import com.itszuvalex.femtocraft.api.power.PowerContainer
import com.itszuvalex.femtocraft.core.items.CoreItemBlock
import com.itszuvalex.femtocraft.power.tiles.TileEntityPowerBase
import cpw.mods.fml.relauncher.{Side, SideOnly}
import net.minecraft.block.Block
import net.minecraft.entity.player.EntityPlayer
import net.minecraft.item.ItemStack
import net.minecraft.nbt.NBTTagCompound
import net.minecraft.world.World
abstract class ItemBlockPower(block: Block) extends CoreItemBlock(block) {
@SideOnly(Side.CLIENT) override def addInformation(par1ItemStack: ItemStack, par2EntityPlayer: EntityPlayer, par3List: util.List[_], par4: Boolean) {
super.addInformation(par1ItemStack, par2EntityPlayer, par3List, par4)
var nbt = par1ItemStack.getTagCompound
if (nbt == null) {
nbt = {par1ItemStack.stackTagCompound = new NBTTagCompound; par1ItemStack.stackTagCompound}
}
val fieldName = getFieldName
var container: PowerContainer = null
val init = nbt.hasKey(fieldName)
val power = nbt.getCompoundTag(fieldName)
if (!init) {
container = getDefaultContainer
container.saveToNBT(power)
nbt.setTag(fieldName, power)
}
else {
container = PowerContainer.createFromNBT(power)
}
container.addInformationToTooltip(par3List)
}
private def getFieldName: String = {
val fields = classOf[TileEntityPowerBase].getDeclaredFields
val fieldName = "power"
fields.filter(_.getType eq classOf[PowerContainer]).
foreach { field =>
var ret: String = null
val access = field.isAccessible
if (!access) {
field.setAccessible(true)
}
ret = field.getName
if (!access) {
field.setAccessible(false)
}
return ret
}
fieldName
}
def getDefaultContainer: PowerContainer
override def onCreated(par1ItemStack: ItemStack, par2World: World, par3EntityPlayer: EntityPlayer) {
super.onCreated(par1ItemStack, par2World, par3EntityPlayer)
var nbt = par1ItemStack.getTagCompound
if (nbt == null) nbt = {par1ItemStack.stackTagCompound = new NBTTagCompound; par1ItemStack.stackTagCompound}
val power = new NBTTagCompound
getDefaultContainer.saveToNBT(power)
nbt.setTag(getFieldName, power)
}
}
| Itszuvalex/Femtocraft-alpha-1 | src/main/java/com/itszuvalex/femtocraft/power/items/ItemBlockPower.scala | Scala | gpl-2.0 | 3,352 |
package utils.silhouette
import com.mohiva.play.silhouette.api.Env
import com.mohiva.play.silhouette.impl.authenticators.CookieAuthenticator
import models.User
trait MyEnv extends Env {
type I = User
type A = CookieAuthenticator
} | bminderh/play-react-webpack | app/utils/silhouette/Env.scala | Scala | apache-2.0 | 236 |
package com.github.cuzfrog.webdriver
import com.typesafe.config.ConfigFactory
/**
* Created by cuz on 1/24/17.
*/
private object ClientConfig {
private val config = ConfigFactory.load()
val arteryEnabled: Boolean = config.getBoolean("akka.remote.artery.enabled")
val akkaProtocol:String = if (arteryEnabled) "" else ".tcp"
private val cliCfg = config.getConfig("webdriver.client")
/**
* Server uri with port. e.g. "localhost:60001"
*/
val serverUri: String = cliCfg.getString("server-uri")
/**
* Server connection timeout in Seconds
*/
val timeoutSec: Int = cliCfg.getInt("timeout")
/**
* Milliseconds
*/
val actionInterval: Int = cliCfg.getInt("action-interval")
val parserScriptDir: String = cliCfg.getString("parser-script-dir")
}
| cuzfrog/WebDriverServ | client/src/main/scala/com/github/cuzfrog/webdriver/ClientConfig.scala | Scala | apache-2.0 | 788 |
/*
* Copyright 2014 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.core.process.tube
import com.vividsolutions.jts.geom.{Coordinate, GeometryFactory, Point}
import org.geotools.data.collection.ListFeatureCollection
import org.geotools.data.{DataStoreFinder, Query}
import org.geotools.factory.Hints
import org.geotools.feature.DefaultFeatureCollection
import org.geotools.filter.text.cql2.CQL
import org.joda.time.{DateTime, DateTimeZone}
import org.junit.runner.RunWith
import org.locationtech.geomesa.core.data.{AccumuloDataStore, AccumuloFeatureStore}
import org.locationtech.geomesa.core.index.Constants
import org.locationtech.geomesa.feature.AvroSimpleFeatureFactory
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.text.WKTUtils
import org.opengis.filter.Filter
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class TubeSelectProcessTest extends Specification {
sequential
val dtgField = org.locationtech.geomesa.core.process.tube.DEFAULT_DTG_FIELD
val geotimeAttributes = s"*geom:Geometry:srid=4326,$dtgField:Date"
def createStore: AccumuloDataStore =
// the specific parameter values should not matter, as we
// are requesting a mock data store connection to Accumulo
DataStoreFinder.getDataStore(Map(
"instanceId" -> "mycloud",
"zookeepers" -> "zoo1:2181,zoo2:2181,zoo3:2181",
"user" -> "myuser",
"password" -> "mypassword",
"auths" -> "A,B,C",
"tableName" -> "testwrite",
"useMock" -> "true",
"featureEncoding" -> "avro")).asInstanceOf[AccumuloDataStore]
"TubeSelect" should {
"should do a simple tube with geo interpolation" in {
val sftName = "tubeTestType"
val sft = SimpleFeatureTypes.createType(sftName, s"type:String,$geotimeAttributes")
sft.getUserData()(Constants.SF_PROPERTY_START_TIME) = dtgField
val ds = createStore
ds.createSchema(sft)
val fs = ds.getFeatureSource(sftName).asInstanceOf[AccumuloFeatureStore]
val featureCollection = new DefaultFeatureCollection(sftName, sft)
List("a", "b").foreach { name =>
List(1, 2, 3, 4).zip(List(45, 46, 47, 48)).foreach { case (i, lat) =>
val sf = AvroSimpleFeatureFactory.buildAvroFeature(sft, List(), name + i.toString)
sf.setDefaultGeometry(WKTUtils.read(f"POINT($lat%d $lat%d)"))
sf.setAttribute(org.locationtech.geomesa.core.process.tube.DEFAULT_DTG_FIELD, new DateTime("2011-01-01T00:00:00Z", DateTimeZone.UTC).toDate)
sf.setAttribute("type", name)
sf.getUserData()(Hints.USE_PROVIDED_FID) = java.lang.Boolean.TRUE
featureCollection.add(sf)
}
}
// write the feature to the store
val res = fs.addFeatures(featureCollection)
// tube features
val tubeFeatures = fs.getFeatures(CQL.toFilter("type = 'a'"))
// result set to tube on
val features = fs.getFeatures(CQL.toFilter("type <> 'a'"))
// get back type b from tube
val ts = new TubeSelectProcess()
val results = ts.execute(tubeFeatures, features, null, 1L, 1L, 0.0, 5, null)
val f = results.features()
while (f.hasNext) {
val sf = f.next
sf.getAttribute("type") mustEqual "b"
}
results.size mustEqual 4
}
"should do a simple tube with geo + time interpolation" in {
val sftName = "tubeTestType"
val sft = SimpleFeatureTypes.createType(sftName, s"type:String,$geotimeAttributes")
sft.getUserData()(Constants.SF_PROPERTY_START_TIME) = dtgField
val ds = createStore
val fs = ds.getFeatureSource(sftName).asInstanceOf[AccumuloFeatureStore]
val featureCollection = new DefaultFeatureCollection(sftName, sft)
List("c").foreach { name =>
List(1, 2, 3, 4).zip(List(45, 46, 47, 48)).foreach { case (i, lat) =>
val sf = AvroSimpleFeatureFactory.buildAvroFeature(sft, List(), name + i.toString)
sf.setDefaultGeometry(WKTUtils.read(f"POINT($lat%d $lat%d)"))
sf.setAttribute(org.locationtech.geomesa.core.process.tube.DEFAULT_DTG_FIELD, new DateTime("2011-01-02T00:00:00Z", DateTimeZone.UTC).toDate)
sf.setAttribute("type", name)
sf.getUserData()(Hints.USE_PROVIDED_FID) = java.lang.Boolean.TRUE
sf.getUserData()(Constants.SF_PROPERTY_START_TIME) = dtgField
featureCollection.add(sf)
}
}
// write the feature to the store
fs.addFeatures(featureCollection)
// tube features
val tubeFeatures = fs.getFeatures(CQL.toFilter("type = 'a'"))
// result set to tube on
val features = fs.getFeatures(CQL.toFilter("type <> 'a'"))
// get back type b from tube
val ts = new TubeSelectProcess()
val results = ts.execute(tubeFeatures, features, null, 1L, 1L, 0.0, 5, null)
val f = results.features()
while (f.hasNext) {
val sf = f.next
sf.getAttribute("type") mustEqual "b"
}
results.size mustEqual 4
}
"should properly convert speed/time to distance" in {
val sftName = "tubetest2"
val sft = SimpleFeatureTypes.createType(sftName, s"type:String,$geotimeAttributes")
sft.getUserData()(Constants.SF_PROPERTY_START_TIME) = dtgField
val ds = createStore
ds.createSchema(sft)
val fs = ds.getFeatureSource(sftName).asInstanceOf[AccumuloFeatureStore]
val featureCollection = new DefaultFeatureCollection(sftName, sft)
var i = 0
List("a", "b").foreach { name =>
for (lon <- 40 until 50; lat <- 40 until 50) {
val sf = AvroSimpleFeatureFactory.buildAvroFeature(sft, List(), name + i.toString)
i += 1
sf.setDefaultGeometry(WKTUtils.read(f"POINT($lon%d $lat%d)"))
sf.setAttribute(org.locationtech.geomesa.core.process.tube.DEFAULT_DTG_FIELD, new DateTime("2011-01-02T00:00:00Z", DateTimeZone.UTC).toDate)
sf.setAttribute("type", name)
sf.getUserData()(Hints.USE_PROVIDED_FID) = java.lang.Boolean.TRUE
featureCollection.add(sf)
}
}
// write the feature to the store
val res = fs.addFeatures(featureCollection)
// tube features
val tubeFeatures = fs.getFeatures(CQL.toFilter("BBOX(geom, 39.999999999,39.999999999, 40.00000000001, 50.000000001) AND type = 'a'"))
// result set to tube on
val features = fs.getFeatures(CQL.toFilter("type <> 'a'"))
// get back type b from tube
val ts = new TubeSelectProcess()
// 110 m/s times 1000 seconds is just 100km which is under 1 degree
val results = ts.execute(tubeFeatures, features, null, 110L, 1000L, 0.0, 5, null)
val f = results.features()
while (f.hasNext) {
val sf = f.next
sf.getAttribute("type") mustEqual "b"
val point = sf.getDefaultGeometry.asInstanceOf[Point]
point.getX mustEqual 40.0
point.getY should be between(40.0, 50.0)
}
results.size mustEqual 10
}
"should properly dedup overlapping results based on buffer size " in {
val sftName = "tubetest2"
val ds = createStore
val fs = ds.getFeatureSource(sftName).asInstanceOf[AccumuloFeatureStore]
// tube features
val tubeFeatures = fs.getFeatures(CQL.toFilter("BBOX(geom, 39.999999999,39.999999999, 40.00000000001, 50.000000001) AND type = 'a'"))
// result set to tube on
val features = fs.getFeatures(CQL.toFilter("type <> 'a'"))
// get back type b from tube
val ts = new TubeSelectProcess()
// this time we use 112km which is just over 1 degree so we should pick up additional features
// but with buffer overlap since the features in the collection are 1 degrees apart
val results = ts.execute(tubeFeatures, features, null, 112L, 1000L, 0.0, 5, null)
val f = results.features()
while (f.hasNext) {
val sf = f.next
sf.getAttribute("type") mustEqual "b"
val point = sf.getDefaultGeometry.asInstanceOf[Point]
point.getX should be between(40.0, 41.0)
point.getY should be between(40.0, 50.0)
}
results.size mustEqual 20
}
}
"TubeSelect" should {
"should handle all geometries" in {
val sftName = "tubeline"
val sft = SimpleFeatureTypes.createType(sftName, s"type:String,$geotimeAttributes")
sft.getUserData()(Constants.SF_PROPERTY_START_TIME) = dtgField
val ds = createStore
ds.createSchema(sft)
val fs = ds.getFeatureSource(sftName).asInstanceOf[AccumuloFeatureStore]
val featureCollection = new DefaultFeatureCollection(sftName, sft)
List("b").foreach { name =>
List(1, 2, 3, 4).zip(List(45, 46, 47, 48)).foreach { case (i, lat) =>
val sf = AvroSimpleFeatureFactory.buildAvroFeature(sft, List(), name + i.toString)
sf.setDefaultGeometry(WKTUtils.read(f"POINT(40 $lat%d)"))
sf.setAttribute(org.locationtech.geomesa.core.process.tube.DEFAULT_DTG_FIELD, new DateTime("2011-01-01T00:00:00Z", DateTimeZone.UTC).toDate)
sf.setAttribute("type", name)
sf.getUserData()(Hints.USE_PROVIDED_FID) = java.lang.Boolean.TRUE
featureCollection.add(sf)
}
}
val bLine = AvroSimpleFeatureFactory.buildAvroFeature(sft, List(), "b-line")
bLine.setDefaultGeometry(WKTUtils.read("LINESTRING(40 40, 40 50)"))
bLine.setAttribute(org.locationtech.geomesa.core.process.tube.DEFAULT_DTG_FIELD, new DateTime("2011-01-01T00:00:00Z", DateTimeZone.UTC).toDate)
bLine.setAttribute("type", "b")
bLine.getUserData()(Hints.USE_PROVIDED_FID) = java.lang.Boolean.TRUE
featureCollection.add(bLine)
val bPoly = AvroSimpleFeatureFactory.buildAvroFeature(sft, List(), "b-poly")
bPoly.setDefaultGeometry(WKTUtils.read("POLYGON((40 40, 41 40, 41 41, 40 41, 40 40))"))
bPoly.setAttribute(org.locationtech.geomesa.core.process.tube.DEFAULT_DTG_FIELD, new DateTime("2011-01-01T00:00:00Z", DateTimeZone.UTC).toDate)
bPoly.setAttribute("type", "b")
bPoly.getUserData()(Hints.USE_PROVIDED_FID) = java.lang.Boolean.TRUE
featureCollection.add(bPoly)
// tube features
val aLine = AvroSimpleFeatureFactory.buildAvroFeature(sft, List(), "a-line")
aLine.setDefaultGeometry(WKTUtils.read("LINESTRING(40 40, 40 50)"))
aLine.setAttribute(org.locationtech.geomesa.core.process.tube.DEFAULT_DTG_FIELD, new DateTime("2011-01-01T00:00:00Z", DateTimeZone.UTC).toDate)
// aLine.setAttribute("end", new DateTime("2011-01-01T00:00:00Z", DateTimeZone.UTC).toDate)
aLine.setAttribute("type", "a")
aLine.getUserData()(Hints.USE_PROVIDED_FID) = java.lang.Boolean.TRUE
val tubeFeatures = new ListFeatureCollection(sft, List(aLine))
// write the feature to the store
val res = fs.addFeatures(featureCollection)
// result set to tube on
val features = fs.getFeatures(CQL.toFilter("type <> 'a'"))
features.size mustEqual 6
// get back type b from tube
val ts = new TubeSelectProcess()
val results = ts.execute(tubeFeatures, features, null, 112L, 1L, 0.0, 5, null)
val f = results.features()
while (f.hasNext) {
val sf = f.next
sf.getAttribute("type") mustEqual "b"
}
results.size mustEqual 6
}
}
"TubeBuilder" should {
"approximate meters to degrees" in {
val geoFac = new GeometryFactory
val sftName = "tubeline"
val sft = SimpleFeatureTypes.createType(sftName, s"type:String,$geotimeAttributes")
// calculated km at various latitude by USGS
forall(List(0, 30, 60, 89).zip(List(110.57, 110.85, 111.41, 111.69))) { case(lat, dist) =>
val deg = new NoGapFill(new DefaultFeatureCollection(sftName, sft), 0, 0).metersToDegrees(110.57*1000, geoFac.createPoint(new Coordinate(0, lat)))
(1.0-dist) should beLessThan(.0001)
}
}
}
"TubeSelect" should {
"properly handle values for execute" in {
val sftName = "tubeline"
val sft = SimpleFeatureTypes.createType(sftName, s"type:String,$geotimeAttributes")
val ts = new TubeSelectProcess
val ds = createStore
val fs = ds.getFeatureSource(sftName).asInstanceOf[AccumuloFeatureStore]
val q = new Query(sftName, Filter.INCLUDE)
val res = fs.getFeatures(q)
// tube features
val aLine = AvroSimpleFeatureFactory.buildAvroFeature(sft, List(), "a-line")
aLine.setDefaultGeometry(WKTUtils.read("LINESTRING(40 40, 40 50)"))
aLine.setAttribute(org.locationtech.geomesa.core.process.tube.DEFAULT_DTG_FIELD, new DateTime("2011-01-01T00:00:00Z", DateTimeZone.UTC).toDate)
// aLine.setAttribute("end", new DateTime("2011-01-01T00:00:00Z", DateTimeZone.UTC).toDate)
aLine.setAttribute("type", "a")
aLine.getUserData()(Hints.USE_PROVIDED_FID) = java.lang.Boolean.TRUE
val tubeFeatures = new ListFeatureCollection(sft, List(aLine))
// ensure null values work and don't throw exceptions
ts.execute( tubeFeatures, res, null, null, null, null, null, null) should not(throwAn[ClassCastException])
}
}
}
| mmatz-ccri/geomesa | geomesa-core/src/test/scala/org/locationtech/geomesa/core/process/tube/TubeSelectProcessTest.scala | Scala | apache-2.0 | 13,893 |
/*******************************************************************************
* Copyright 2017 Capital One Services, LLC and Bitwise, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package hydrograph.engine.spark.datasource.jdbc
import java.sql.Connection
import hydrograph.engine.spark.components.utils.HydrographJDBCUtils
import org.apache.spark.sql._
import org.apache.spark.sql.sources.{BaseRelation, CreatableRelationProvider}
import org.slf4j.{Logger, LoggerFactory}
/**
* The Class DefaultSource.
*
* @author Bitwise
*
*/
class DefaultSource extends CreatableRelationProvider with Serializable {
private val LOG: Logger = LoggerFactory.getLogger(classOf[DefaultSource])
override def createRelation(sqlContext: SQLContext, mode: SaveMode, parameters: Map[String, String], df: DataFrame): BaseRelation = {
LOG.trace("In method createRelation for JDBC")
val url: String = parameters.getOrElse("connectionURL", throw new RuntimeException("Url option must be specified for output jdbc update Component"))
val table = parameters.getOrElse("tablename", throw new RuntimeException("Table option must be specified for output jdbc update Component"))
val driver = parameters.getOrElse("driver", throw new RuntimeException("Driver option must be specified for output jdbc update Component"))
val user = parameters.getOrElse("user", throw new RuntimeException("User option must be specified for output jdbc update Component"))
val password = parameters.getOrElse("password", throw new RuntimeException("Password option must be specified for output jdbc update Component"))
val batchSize: Int = parameters.getOrElse("batchsize", throw new RuntimeException("batchSize option must be specified for output jdbc update Component")).toInt
val updateIndex = parameters.getOrElse("updateIndex", "Update index must be present for output jdbc update Component")
val updateQuery = parameters.getOrElse("updateQuery", throw new RuntimeException("Update query must be present for output jdbc update Component"))
LOG.debug("Updating table '" + table + "' with update query : " + updateQuery)
val connectionProperties = new java.util.Properties()
(user, password) match {
case (u, p) if u == null && p == null =>
LOG.warn("Output jdbc update component , both userName and password are empty")
case (u, p) if u != null && p == null =>
LOG.warn("Output jdbc update component, password is empty")
connectionProperties.setProperty("user", user)
case (u, p) if u == null && p != null =>
LOG.warn("Output jdbc update component, userName is empty")
connectionProperties.setProperty("password", password)
case (u, p) =>
connectionProperties.setProperty("user", user)
connectionProperties.setProperty("password", password)
}
val conn: Connection = HydrographJDBCUtils().createConnectionFactory(driver, url, connectionProperties)()
LOG.debug("Connection created successfully with driver '" + driver + "' url '" + url + "'")
try {
val tableExists = HydrographJDBCUtils().tableExists(conn, table)
if (tableExists) {
HydrographJDBCUtils().saveTable(df, url, table, driver, batchSize, updateQuery, updateIndex, connectionProperties)
} else {
LOG.error("Table '" + table + "' does not exist.")
throw TableDoesNotExistException("Exception : Table '" + table + "' does not exist for update.")
}
} finally {
conn.close()
}
HydrographJDBCRelation(sqlContext.sparkSession)
}
}
case class TableDoesNotExistException(message: String = "", cause: Throwable = null) extends Exception(message, cause)
| capitalone/Hydrograph | hydrograph.engine/hydrograph.engine.spark/src/main/scala/hydrograph/engine/spark/datasource/jdbc/DefaultSource.scala | Scala | apache-2.0 | 4,283 |
package ru.finagram.test
import ch.qos.logback.classic.spi.{ ILoggingEvent, IThrowableProxy }
import ch.qos.logback.core.filter.Filter
import ch.qos.logback.core.spi.FilterReply
class TestExceptionLogFilter extends Filter[ILoggingEvent] {
override def decide(event: ILoggingEvent): FilterReply = {
if (isTestException(event.getThrowableProxy)) {
FilterReply.DENY
} else {
FilterReply.NEUTRAL
}
}
private val testExceptionName: String = classOf[TestException].getCanonicalName
private def isTestException(proxy: IThrowableProxy): Boolean = {
(proxy != null) && (proxy.getClassName == testExceptionName)
}
}
| finagram/finagram | src/test/scala/ru/finagram/test/TestExceptionLogFilter.scala | Scala | mit | 649 |
case object Empty extends Stream[Nothing]
case class Cons[+A](h: () => A, t: () => Stream[A]) extends Stream[A]
object Stream {
def cons[A](hd: => A, tl: => Stream[A]): Stream[A] = {
lazy val head = hd
lazy val tail = tl
Cons(() => head, () => tail)
}
def empty[A]: Stream[A] = Empty
def apply[A](as: A*): Stream[A] =
if (as.isEmpty) empty
else cons(as.head, apply(as.tail: _*))
val ones: Stream[Int] = Stream.cons(1, ones)
}
import Stream._
trait Stream[+A] {
def toList: List[A] = {
@annotation.tailrec
def go(s: Stream[A], acc: List[A]): List[A] = s match {
case Cons(h,t) => go(t(), h() :: acc)
case _ => acc
}
go(this, List()).reverse
}
def foldRight[B](z: => B)(f: (A, => B) => B): B =
this match {
case Cons(h,t) => f(h(), t().foldRight(z)(f))
case _ => z
}
// 07
def map[B](f: A => B): Stream[B] =
foldRight(empty[B])((h,t) => cons(f(h), t))
def filter(f: A => Boolean): Stream[A] =
foldRight(empty[A])((h,t) =>
if (f(h)) cons(h, t)
else t)
def append[B>:A](s: => Stream[B]): Stream[B] =
foldRight(s)((h,t) => cons(h,t))
def flatMap[B](f: A => Stream[B]): Stream[B] =
foldRight(empty[B])((h,t) => f(h) append t)
}
object Answer07 {
import Stream._
def main(args: Array[String]) = {
println(Stream(1,2,3,4,5).map(n => n + 2).toList) // List(3,4,5,6,7)
println(Stream(1,2,3,4,5).filter(n => n > 2).toList) // List(3,4,5
println(Stream(1,2,3,4,5).append(Stream(1)).toList) // List(1,2,3,4,5,1)
println(Stream(1,2,3,4,5).flatMap(n => Stream(n + 2)).toList) // List(3,4,5,6,7)
}
}
| shigemk2/functional_shibuya | chapter05/07.answer.scala | Scala | mit | 1,640 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers.businessdetails
import audit.AddressConversions._
import audit.{AddressCreatedEvent, AddressModifiedEvent}
import cats.data.OptionT
import cats.implicits._
import com.google.inject.Inject
import connectors.DataCacheConnector
import controllers.{AmlsBaseController, CommonPlayDependencies}
import forms.{EmptyForm, Form2, InvalidForm, ValidForm}
import models.businessdetails.{BusinessDetails, CorrespondenceAddress, CorrespondenceAddressNonUk}
import play.api.mvc.{MessagesControllerComponents, Request}
import services.AutoCompleteService
import uk.gov.hmrc.http.HeaderCarrier
import uk.gov.hmrc.play.audit.http.connector.AuditResult.Success
import uk.gov.hmrc.play.audit.http.connector.{AuditConnector, AuditResult}
import utils.AuthAction
import views.html.businessdetails._
import scala.concurrent.Future
class CorrespondenceAddressNonUkController @Inject ()(val dataConnector: DataCacheConnector,
val auditConnector: AuditConnector,
val autoCompleteService: AutoCompleteService,
val authAction: AuthAction,
val ds: CommonPlayDependencies,
val cc: MessagesControllerComponents,
correspondence_address_non_uk: correspondence_address_non_uk) extends AmlsBaseController(ds, cc) {
def get(edit: Boolean = false) = authAction.async {
implicit request =>
dataConnector.fetch[BusinessDetails](request.credId, BusinessDetails.key) map {
response =>
val form: Form2[CorrespondenceAddressNonUk] = (for {
businessDetails <- response
correspondenceAddress <- businessDetails.correspondenceAddress
ukAddress <- correspondenceAddress.nonUkAddress
} yield Form2[CorrespondenceAddressNonUk](ukAddress)).getOrElse(EmptyForm)
Ok(correspondence_address_non_uk(form, edit, autoCompleteService.getCountries))
}
}
def post(edit: Boolean = false) = authAction.async {
implicit request => {
Form2[CorrespondenceAddressNonUk](request.body) match {
case f: InvalidForm =>
Future.successful(BadRequest(correspondence_address_non_uk(f, edit, autoCompleteService.getCountries)))
case ValidForm(_, data) =>
val doUpdate = for {
businessDetails:BusinessDetails <- OptionT(dataConnector.fetch[BusinessDetails](request.credId, BusinessDetails.key))
_ <- OptionT.liftF(dataConnector.save[BusinessDetails]
(request.credId, BusinessDetails.key, businessDetails.correspondenceAddress(CorrespondenceAddress(None, Some(data)))))
_ <- OptionT.liftF(auditAddressChange(data, businessDetails.correspondenceAddress.flatMap(a => a.nonUkAddress), edit)) orElse OptionT.some(Success)
} yield Redirect(routes.SummaryController.get)
doUpdate getOrElse InternalServerError("Could not update correspondence address")
}
}
}
def auditAddressChange(currentAddress: CorrespondenceAddressNonUk, oldAddress: Option[CorrespondenceAddressNonUk], edit: Boolean)
(implicit hc: HeaderCarrier, request: Request[_]): Future[AuditResult] = {
if (edit) {
auditConnector.sendEvent(AddressModifiedEvent(currentAddress, oldAddress))
} else {
auditConnector.sendEvent(AddressCreatedEvent(currentAddress))
}
}
} | hmrc/amls-frontend | app/controllers/businessdetails/CorrespondenceAddressNonUkController.scala | Scala | apache-2.0 | 4,176 |
package io.github.facaiy.fp.scala.c7
import java.util.concurrent.atomic.{AtomicInteger, AtomicReference}
import java.util.concurrent.{Callable,ExecutorService}
import annotation.tailrec
/*
* Implementation is taken from `scalaz` library, with only minor changes. See:
*
* https://github.com/scalaz/scalaz/blob/scalaz-seven/concurrent/src/main/scala/scalaz/concurrent/Actor.scala
*
* This code is copyright Andriy Plokhotnyuk, Runar Bjarnason, and other contributors,
* and is licensed using 3-clause BSD, see LICENSE file at:
*
* https://github.com/scalaz/scalaz/blob/scalaz-seven/etc/LICENCE
*/
/**
* Processes messages of type `A`, one at a time. Messages are submitted to
* the actor with the method `!`. Processing is typically performed asynchronously,
* this is controlled by the provided `strategy`.
*
* Memory consistency guarantee: when each message is processed by the `handler`, any memory that it
* mutates is guaranteed to be visible by the `handler` when it processes the next message, even if
* the `strategy` runs the invocations of `handler` on separate threads. This is achieved because
* the `Actor` reads a volatile memory location before entering its event loop, and writes to the same
* location before suspending.
*
* Implementation based on non-intrusive MPSC node-based queue, described by Dmitriy Vyukov:
* [[http://www.1024cores.net/home/lock-free-algorithms/queues/non-intrusive-mpsc-node-based-queue]]
*
* @see scalaz.concurrent.Promise for a use case.
*
* @param handler The message handler
* @param onError Exception handler, called if the message handler throws any `Throwable`.
* @param strategy Execution strategy, for example, a strategy that is backed by an `ExecutorService`
* @tparam A The type of messages accepted by this actor.
*/
final class Actor[A](strategy: Strategy)(handler: A => Unit, onError: Throwable => Unit = throw(_)) {
self =>
private val tail = new AtomicReference(new Node[A]())
private val suspended = new AtomicInteger(1)
private val head = new AtomicReference(tail.get)
/** Alias for `apply` */
def !(a: A) {
val n = new Node(a)
head.getAndSet(n).lazySet(n)
trySchedule()
}
/** Pass the message `a` to the mailbox of this actor */
def apply(a: A) {
this ! a
}
def contramap[B](f: B => A): Actor[B] =
new Actor[B](strategy)((b: B) => (this ! f(b)), onError)
private def trySchedule() {
if (suspended.compareAndSet(1, 0)) schedule()
}
private def schedule() {
strategy(act())
}
private def act() {
val t = tail.get
val n = batchHandle(t, 1024)
if (n ne t) {
n.a = null.asInstanceOf[A]
tail.lazySet(n)
schedule()
} else {
suspended.set(1)
if (n.get ne null) trySchedule()
}
}
@tailrec
private def batchHandle(t: Node[A], i: Int): Node[A] = {
val n = t.get
if (n ne null) {
try {
handler(n.a)
} catch {
case ex: Throwable => onError(ex)
}
if (i > 0) batchHandle(n, i - 1) else n
} else t
}
}
private class Node[A](var a: A = null.asInstanceOf[A]) extends AtomicReference[Node[A]]
object Actor {
/** Create an `Actor` backed by the given `ExecutorService`. */
def apply[A](es: ExecutorService)(handler: A => Unit, onError: Throwable => Unit = throw(_)): Actor[A] =
new Actor(Strategy.fromExecutorService(es))(handler, onError)
}
/**
* Provides a function for evaluating expressions, possibly asynchronously.
* The `apply` function should typically begin evaluating its argument
* immediately. The returned thunk can be used to block until the resulting `A`
* is available.
*/
trait Strategy {
def apply[A](a: => A): () => A
}
object Strategy {
/**
* We can create a `Strategy` from any `ExecutorService`. It's a little more
* convenient than submitting `Callable` objects directly.
*/
def fromExecutorService(es: ExecutorService): Strategy = new Strategy {
def apply[A](a: => A): () => A = {
val f = es.submit { new Callable[A] { def call = a} }
() => f.get
}
}
/**
* A `Strategy` which begins executing its argument immediately in the calling thread.
*/
def sequential: Strategy = new Strategy {
def apply[A](a: => A): () => A = {
val r = a
() => r
}
}
}
| ningchi/book_notes | Manning_Functional_Programming_in_Scala/src/main/scala/io/github/facaiy/fp/scala/c7/Actor.scala | Scala | cc0-1.0 | 4,318 |
package dk.bayes.factorgraph
import dk.bayes.factorgraph.factor.api.SingleFactor
/**
* This class represents an outgoing gate fimport dk.bayes.factorgraph.VarNode
import dk.bayes.factorgraph.FactorNode
rom a factor/variable node in a factor graph.
*
* @author Daniel Korzekwa
*
* @param message The initial outgoing message sent through the gate
*/
sealed abstract class Gate(initialMsg: SingleFactor) {
type END_GATE <: Gate
private var endGate: Option[END_GATE] = None
private var message: SingleFactor = initialMsg
private var oldMessage: SingleFactor = initialMsg
/**Allows for comparing the age between different messages and finding the message that was updated least recently.*/
private var msgIndex: Long = -1
def setEndGate(gate: END_GATE):Unit = { endGate = Some(gate) }
def getEndGate(): END_GATE = endGate.get
def setMessage(newMessage: SingleFactor, msgIndex: Long):Unit = {
oldMessage = message
message = newMessage
this.msgIndex = msgIndex
}
def getMsgIndex(): Long = msgIndex
def getMessage(): SingleFactor = message
def getOldMessage(): SingleFactor = oldMessage
}
case class FactorGate(initialMsg: SingleFactor) extends Gate(initialMsg) {
type END_GATE = VarGate
var _factorNode: Option[FactorNode] = None
def setFactorNode(factorNode: FactorNode) = _factorNode = Some(factorNode)
def getFactorNode() = _factorNode.get
}
case class VarGate(initialMsg: SingleFactor, varNode: VarNode) extends Gate(initialMsg) {
type END_GATE = FactorGate
} | danielkorzekwa/bayes-scala | src/main/scala/dk/bayes/factorgraph/Gate.scala | Scala | bsd-2-clause | 1,526 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.stats
import java.lang.{Double => jDouble, Float => jFloat, Long => jLong}
import java.util.Date
import org.locationtech.jts.geom.Geometry
import org.geotools.feature.simple.SimpleFeatureBuilder
import org.junit.runner.RunWith
import org.locationtech.geomesa.curve.{BinnedTime, TimePeriod, Z2SFC}
import org.locationtech.geomesa.utils.geotools.GeoToolsDateFormat
import org.locationtech.geomesa.utils.text.WKTUtils
import org.locationtech.sfcurve.zorder.Z2
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class FrequencyTest extends Specification with StatTestHelper {
def createStat[T](attribute: String, precision: Int, observe: Boolean): Frequency[T] = {
val s = Stat(sft, Stat.Frequency(attribute, precision))
if (observe) {
features.foreach { s.observe }
}
s.asInstanceOf[Frequency[T]]
}
def stringStat(precision: Int, observe: Boolean = true) =
createStat[String]("strAttr", precision: Int, observe)
def intStat(precision: Int, observe: Boolean = true) =
createStat[Integer]("intAttr", precision: Int, observe)
def longStat(precision: Int, observe: Boolean = true) =
createStat[jLong]("longAttr", precision: Int, observe)
def floatStat(precision: Int, observe: Boolean = true) =
createStat[jFloat]("floatAttr", precision: Int, observe)
def doubleStat(precision: Int, observe: Boolean = true) =
createStat[jDouble]("doubleAttr", precision: Int, observe)
def dateStat(precision: Int, observe: Boolean = true) =
createStat[Date]("dtg", precision: Int, observe)
def geomStat(precision: Int, observe: Boolean = true) =
createStat[Geometry]("geom", precision: Int, observe)
def toDate(string: String) = java.util.Date.from(java.time.LocalDateTime.parse(string, GeoToolsDateFormat).toInstant(java.time.ZoneOffset.UTC))
def toGeom(string: String) = WKTUtils.read(string)
"Frequency stat" should {
"enumerate ranges" >> {
val min = Z2SFC.invert(Z2(2, 2))
val max = Z2SFC.invert(Z2(3, 6))
val ranges = Z2SFC.ranges(Seq((min._1, min._2, max._1, max._2)))
val indices = Frequency.enumerate(ranges, 64).toSeq
indices must containTheSameElementsAs(Seq(12, 13, 14, 15, 36, 37, 38, 39, 44, 45))
}
"support weekly binning" >> {
val stat = Stat(sft, Stat.Frequency("longAttr", "dtg", TimePeriod.Week, 1)).asInstanceOf[Frequency[Long]]
val weekStart = 45 * 52 // approximately jan 2015
val weeks = Set(weekStart, weekStart + 1, weekStart + 2, weekStart + 3)
val dayStart = BinnedTime.Epoch.plusWeeks(weekStart).plusHours(1)
(0 until 28 * 4).foreach { i =>
val sf = SimpleFeatureBuilder.build(sft, Array[AnyRef](), i.toString)
sf.setAttribute("longAttr", i)
sf.setAttribute("geom", "POINT(-75 45)")
sf.setAttribute("dtg", Date.from(dayStart.plusDays(i % 28).toInstant))
stat.observe(sf)
}
val serializer = StatSerializer(sft)
stat.sketchMap must haveSize(4)
stat.sketchMap.keySet mustEqual weeks
val offsets = (0 until 4).map(_ * 28)
forall(offsets.flatMap(o => o + 0 until o + 7))(stat.count(weekStart.toShort, _) mustEqual 1)
forall(offsets.flatMap(o => o + 7 until o + 14))(stat.count((weekStart + 1).toShort, _) mustEqual 1)
forall(offsets.flatMap(o => o + 14 until o + 21))(stat.count((weekStart + 2).toShort, _) mustEqual 1)
forall(offsets.flatMap(o => o + 21 until o + 28))(stat.count((weekStart + 3).toShort, _) mustEqual 1)
val serialized = serializer.serialize(stat)
val deserialized = serializer.deserialize(serialized)
stat.isEquivalent(deserialized) must beTrue
val splits = stat.splitByTime.toMap
splits must haveSize(4)
splits.keySet mustEqual weeks
forall(offsets.flatMap(o => o + 0 until o + 7))(d => splits(weekStart.toShort).count(d) mustEqual 1)
forall(offsets.flatMap(o => o + 7 until o + 14))(d => splits((weekStart + 1).toShort).count(d) mustEqual 1)
forall(offsets.flatMap(o => o + 14 until o + 21))(d => splits((weekStart + 2).toShort).count(d) mustEqual 1)
forall(offsets.flatMap(o => o + 21 until o + 28))(d => splits((weekStart + 3).toShort).count(d) mustEqual 1)
}
"work with strings" >> {
"be empty initially" >> {
val stat = stringStat(6, observe = false)
stat.isEmpty must beTrue
stat.size mustEqual 0
}
"correctly bin values" >> {
val stat = stringStat(6)
stat.isEmpty must beFalse
stat.size mustEqual 100
forall(0 until 100)(i => stat.count(f"abc$i%03d") must beBetween(1L, 2L))
stat.count("foo") mustEqual 0
}
"serialize and deserialize" >> {
val stat = stringStat(6)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[String]]
unpacked.asInstanceOf[Frequency[String]].property mustEqual stat.property
unpacked.asInstanceOf[Frequency[String]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[String]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[String]].toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = stringStat(6, observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[String]]
unpacked.asInstanceOf[Frequency[String]].property mustEqual stat.property
unpacked.asInstanceOf[Frequency[String]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[String]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[String]].toJson mustEqual stat.toJson
}
"deserialize as immutable value" >> {
val stat = stringStat(6)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed, immutable = true)
unpacked must beAnInstanceOf[Frequency[String]]
unpacked.asInstanceOf[Frequency[String]].property mustEqual stat.property
unpacked.asInstanceOf[Frequency[String]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[String]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[String]].toJson mustEqual stat.toJson
unpacked.clear must throwAn[Exception]
unpacked.+=(stat) must throwAn[Exception]
unpacked.observe(features.head) must throwAn[Exception]
unpacked.unobserve(features.head) must throwAn[Exception]
}
"combine two Frequencies" >> {
val stat = stringStat(6)
val stat2 = stringStat(6, observe = false)
features2.foreach { stat2.observe }
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(f"abc$i%03d") must beBetween(1L, 2L))
stat2.count("foo") mustEqual 0L
stat += stat2
stat.size mustEqual 200
forall(0 until 200)(i => stat.count(f"abc$i%03d") must beBetween(1L, 3L))
stat.count("foo") mustEqual 0L
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(f"abc$i%03d") must beBetween(1L, 2L))
stat2.count("foo") mustEqual 0L
}
"clear" >> {
val stat = stringStat(6)
stat.clear()
stat.isEmpty must beTrue
stat.size mustEqual 0
forall(0 until 200)(i => stat.count(f"abc$i%3d") mustEqual 0)
}
}
"work with integers" >> {
"be empty initially" >> {
val stat = intStat(1, observe = false)
stat.isEmpty must beTrue
stat.size mustEqual 0
}
"correctly bin values" >> {
val stat = intStat(1)
stat.isEmpty must beFalse
stat.size mustEqual 100
forall(0 until 100)(i => stat.count(i) must beBetween(1L, 2L))
stat.count(200) mustEqual 0
}
"serialize and deserialize" >> {
val stat = intStat(1)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[Integer]]
unpacked.asInstanceOf[Frequency[Integer]].property mustEqual stat.property
unpacked.asInstanceOf[Frequency[Integer]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[Integer]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[Integer]].toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = intStat(1, observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[Integer]]
unpacked.asInstanceOf[Frequency[Integer]].property mustEqual stat.property
unpacked.asInstanceOf[Frequency[Integer]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[Integer]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[Integer]].toJson mustEqual stat.toJson
}
"combine two Frequencies" >> {
val stat = intStat(1)
val stat2 = intStat(1, observe = false)
features2.foreach { stat2.observe }
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(i) must beBetween(1L, 2L))
stat2.count(300) mustEqual 0L
stat += stat2
stat.size mustEqual 200
forall(0 until 200)(i => stat.count(i) must beBetween(1L, 3L))
stat.count(300) mustEqual 0L
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(i) must beBetween(1L, 2L))
stat2.count(300) mustEqual 0L
}
"clear" >> {
val stat = intStat(1)
stat.clear()
stat.isEmpty must beTrue
stat.size mustEqual 0
forall(0 until 200)(i => stat.count(i) mustEqual 0)
}
}
"work with longs" >> {
"be empty initially" >> {
val stat = longStat(1, observe = false)
stat.isEmpty must beTrue
stat.size mustEqual 0
}
"correctly bin values" >> {
val stat = longStat(1)
stat.isEmpty must beFalse
stat.size mustEqual 100
forall(0 until 100)(i => stat.count(i.toLong) must beBetween(1L, 2L))
stat.count(200L) mustEqual 0
}
"serialize and deserialize" >> {
val stat = longStat(1)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[jLong]]
unpacked.asInstanceOf[Frequency[jLong]].property mustEqual stat.property
unpacked.asInstanceOf[Frequency[jLong]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[jLong]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[jLong]].toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = longStat(1, observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[jLong]]
unpacked.asInstanceOf[Frequency[jLong]].property mustEqual stat.property
unpacked.asInstanceOf[Frequency[jLong]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[jLong]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[jLong]].toJson mustEqual stat.toJson
}
"combine two Frequencies" >> {
val stat = longStat(1)
val stat2 = longStat(1, observe = false)
features2.foreach { stat2.observe }
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(i.toLong) must beBetween(1L, 2L))
stat2.count(300L) mustEqual 0L
stat += stat2
stat.size mustEqual 200
forall(0 until 200)(i => stat.count(i.toLong) must beBetween(1L, 3L))
stat.count(300L) mustEqual 0L
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(i.toLong) must beBetween(1L, 2L))
stat2.count(300L) mustEqual 0L
}
"clear" >> {
val stat = longStat(1)
stat.clear()
stat.isEmpty must beTrue
stat.size mustEqual 0
forall(0 until 200)(i => stat.count(i.toLong) mustEqual 0)
}
}
"work with floats" >> {
"be empty initially" >> {
val stat = floatStat(1, observe = false)
stat.isEmpty must beTrue
stat.size mustEqual 0
}
"correctly bin values" >> {
val stat = floatStat(1)
stat.isEmpty must beFalse
stat.size mustEqual 100
forall(0 until 100)(i => stat.count(i.toFloat) must beBetween(1L, 2L))
stat.count(200f) mustEqual 0
}
"serialize and deserialize" >> {
val stat = floatStat(1)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[jFloat]]
unpacked.asInstanceOf[Frequency[jFloat]].property mustEqual stat.property
unpacked.asInstanceOf[Frequency[jFloat]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[jFloat]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[jFloat]].toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = floatStat(1, observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[jFloat]]
unpacked.asInstanceOf[Frequency[jFloat]].property mustEqual stat.property
unpacked.asInstanceOf[Frequency[jFloat]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[jFloat]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[jFloat]].toJson mustEqual stat.toJson
}
"combine two Frequencies" >> {
val stat = floatStat(1)
val stat2 = floatStat(1, observe = false)
features2.foreach { stat2.observe }
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(i.toFloat) must beBetween(1L, 2L))
stat2.count(300f) mustEqual 0L
stat += stat2
stat.size mustEqual 200
forall(0 until 200)(i => stat.count(i.toFloat) must beBetween(1L, 3L))
stat.count(300f) mustEqual 0L
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(i.toFloat) must beBetween(1L, 2L))
stat2.count(300f) mustEqual 0L
}
"clear" >> {
val stat = floatStat(1)
stat.clear()
stat.isEmpty must beTrue
stat.size mustEqual 0
forall(0 until 200)(i => stat.count(i.toFloat) mustEqual 0)
}
}
"work with doubles" >> {
"be empty initially" >> {
val stat = doubleStat(1, observe = false)
stat.isEmpty must beTrue
stat.size mustEqual 0
}
"correctly bin values" >> {
val stat = doubleStat(1)
stat.isEmpty must beFalse
stat.size mustEqual 100
forall(0 until 100)(i => stat.count(i.toDouble) must beBetween(1L, 2L))
stat.count(200d) mustEqual 0
}
"serialize and deserialize" >> {
val stat = doubleStat(1)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[jDouble]]
unpacked.asInstanceOf[Frequency[jDouble]].property mustEqual stat.property
unpacked.asInstanceOf[Frequency[jDouble]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[jDouble]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[jDouble]].toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = doubleStat(1, observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[jDouble]]
unpacked.asInstanceOf[Frequency[jDouble]].property mustEqual stat.property
unpacked.asInstanceOf[Frequency[jDouble]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[jDouble]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[jDouble]].toJson mustEqual stat.toJson
}
"combine two Frequencies" >> {
val stat = doubleStat(1)
val stat2 = doubleStat(1, observe = false)
features2.foreach { stat2.observe }
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(i.toDouble) must beBetween(1L, 2L))
stat2.count(300d) mustEqual 0L
stat += stat2
stat.size mustEqual 200
forall(0 until 200)(i => stat.count(i.toDouble) must beBetween(1L, 3L))
stat.count(300d) mustEqual 0L
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(i.toDouble) must beBetween(1L, 2L))
stat2.count(300d) mustEqual 0L
}
"clear" >> {
val stat = doubleStat(1)
stat.clear()
stat.isEmpty must beTrue
stat.size mustEqual 0
forall(0 until 200)(i => stat.count(i.toDouble) mustEqual 0)
}
}
"work with dates" >> {
"be empty initially" >> {
val stat = dateStat(1, observe = false)
stat.isEmpty must beTrue
stat.size mustEqual 0
}
"correctly bin values" >> {
val stat = dateStat(1)
stat.isEmpty must beFalse
stat.size mustEqual 100
forall(0 until 100)(i => stat.count(toDate(f"2012-01-01T${i%24}%02d:00:00.000Z")) must beBetween(4L, 5L))
stat.count(toDate(f"2012-01-05T00:00:00.000Z")) mustEqual 0
}
"serialize and deserialize" >> {
val stat = dateStat(1)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[Date]]
unpacked.asInstanceOf[Frequency[Date]].property mustEqual stat.property
unpacked.asInstanceOf[Frequency[Date]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[Date]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[Date]].toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = dateStat(1, observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[Date]]
unpacked.asInstanceOf[Frequency[Date]].property mustEqual stat.property
unpacked.asInstanceOf[Frequency[Date]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[Date]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[Date]].toJson mustEqual stat.toJson
}
"combine two Frequencies" >> {
val stat = dateStat(1)
val stat2 = dateStat(1, observe = false)
features2.foreach { stat2.observe }
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(toDate(f"2012-01-02T${i%24}%02d:00:00.000Z")) must beBetween(4L, 5L))
stat2.count(toDate(f"2012-01-05T00:00:00.000Z")) mustEqual 0L
stat += stat2
stat.size mustEqual 200
forall(0 until 100)(i => stat.count(toDate(f"2012-01-01T${i%24}%02d:00:00.000Z")) must beBetween(4L, 5L))
forall(100 until 200)(i => stat.count(toDate(f"2012-01-02T${i%24}%02d:00:00.000Z")) must beBetween(4L, 5L))
stat.count(toDate(f"2012-01-05T00:00:00.000Z")) mustEqual 0L
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(toDate(f"2012-01-02T${i%24}%02d:00:00.000Z")) must beBetween(4L, 5L))
stat2.count(toDate(f"2012-01-05T00:00:00.000Z")) mustEqual 0L
}
"clear" >> {
val stat = dateStat(1)
stat.clear()
stat.isEmpty must beTrue
stat.size mustEqual 0
forall(0 until 100)(i => stat.count(toDate(f"2012-01-01T${i%24}%02d:00:00.000Z")) mustEqual 0)
forall(100 until 200)(i => stat.count(toDate(f"2012-01-02T${i%24}%02d:00:00.000Z")) mustEqual 0)
}
}
"work with geometries" >> {
"be empty initially" >> {
val stat = geomStat(24, observe = false)
stat.isEmpty must beTrue
stat.size mustEqual 0
}
"correctly bin values" >> {
val stat = geomStat(24)
stat.isEmpty must beFalse
stat.size mustEqual 100
forall(0 until 100)(i => stat.count(toGeom(s"POINT(-$i ${i / 2})")) must beBetween(1L, 6L))
}
"serialize and deserialize" >> {
val stat = geomStat(24)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[Geometry]]
unpacked.asInstanceOf[Frequency[Geometry]].property mustEqual stat.property
unpacked.asInstanceOf[Frequency[Geometry]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[Geometry]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[Geometry]].toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = geomStat(24, observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Frequency[Geometry]]
unpacked.asInstanceOf[Frequency[Geometry]].property mustEqual stat.property
unpacked.asInstanceOf[Frequency[Geometry]].precision mustEqual stat.precision
unpacked.asInstanceOf[Frequency[Geometry]].size mustEqual stat.size
unpacked.asInstanceOf[Frequency[Geometry]].toJson mustEqual stat.toJson
}
"combine two Frequencies" >> {
val stat = geomStat(24)
val stat2 = geomStat(24, observe = false)
features2.foreach { stat2.observe }
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(toGeom(s"POINT(${i -20} ${i / 2 - 20})")) must beBetween(1L, 6L))
stat += stat2
stat.size mustEqual 200
forall(0 until 100)(i => stat.count(toGeom(s"POINT(-$i ${i / 2})")) must beBetween(1L, 10L))
forall(100 until 200)(i => stat.count(toGeom(s"POINT(${i -20} ${i / 2 - 20})")) must beBetween(1L, 10L))
stat2.size mustEqual 100
forall(100 until 200)(i => stat2.count(toGeom(s"POINT(${i -20} ${i / 2 - 20})")) must beBetween(1L, 6L))
}
"clear" >> {
val stat = geomStat(24)
stat.clear()
stat.isEmpty must beTrue
stat.size mustEqual 0
forall(0 until 100)(i => stat.count(toGeom(s"POINT(-$i ${i / 2})")) mustEqual 0)
forall(100 until 200)(i => stat.count(toGeom(s"POINT(${i -20} ${i / 2 - 20})")) mustEqual 0)
}
}
}
}
| elahrvivaz/geomesa | geomesa-utils/src/test/scala/org/locationtech/geomesa/utils/stats/FrequencyTest.scala | Scala | apache-2.0 | 23,770 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.