code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Copyright 2013 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright 2013-2020 http4s.org
*
* SPDX-License-Identifier: Apache-2.0
*
* Based on https://github.com/twitter/finagle/blob/6e2462acc32ac753bf4e9d8e672f9f361be6b2da/finagle-http/src/main/scala/com/twitter/finagle/http/path/Path.scala
* Copyright 2017, Twitter Inc.
*/
package org.http4s.dsl.impl
import cats.Applicative
import cats.Foldable
import cats.Monad
import cats.data.Validated._
import cats.data._
import cats.syntax.all._
import org.http4s.Uri.Path
import org.http4s.Uri.Path._
import org.http4s._
import org.http4s.headers.Allow
import scala.util.Try
object :? {
def unapply[F[_]](req: Request[F]): Some[(Request[F], Map[String, collection.Seq[String]])] =
Some((req, req.multiParams))
}
/** File extension extractor */
object ~ {
/** File extension extractor for Path:
* Path("example.json") match {
* case Root / "example" ~ "json" => ...
*/
def unapply(path: Path): Option[(Path, String)] =
path match {
case Root => None
case parent / last =>
unapply(last).map { case (base, ext) =>
(parent / Path.Segment(base), ext)
}
}
/** File extension matcher for String:
* {{{
* "example.json" match {
* case => "example" ~ "json" => ...
* }}}
*/
def unapply(fileName: String): Option[(String, String)] =
fileName.lastIndexOf('.') match {
case -1 => Some((fileName, ""))
case index => Some((fileName.substring(0, index), fileName.substring(index + 1)))
}
}
object / {
def unapply(path: Path): Option[(Path, String)] =
if (path != Root && path.endsWithSlash)
Some(path.dropEndsWithSlash -> "")
else
path.segments match {
case allButLast :+ last if allButLast.isEmpty =>
if (path.absolute)
Some(Root -> last.decoded())
else
Some(empty -> last.decoded())
case allButLast :+ last =>
Some(Path(allButLast, absolute = path.absolute) -> last.decoded())
case _ => None
}
}
object -> {
/** HttpMethod extractor:
* {{{
* (request.method, Path(request.path)) match {
* case Method.GET -> Root / "test.json" => ...
* }}}
*/
def unapply[F[_]](req: Request[F]): Some[(Method, Path)] =
Some((req.method, req.pathInfo))
}
object ->> {
private val allMethods = Method.all.toSet
/** Extractor to match an http resource and then enumerate all supported methods:
* {{{
* (request.method, Path(request.path)) match {
* case withMethod ->> Root / "test.json" => withMethod {
* case Method.GET => ...
* case Method.POST => ...
* }}}
*
* Returns an error response if the method is not matched, in accordance with [[https://datatracker.ietf.org/doc/html/rfc7231#section-4.1 RFC7231]]
*/
def unapply[F[_]: Applicative](
req: Request[F]
): Some[(PartialFunction[Method, F[Response[F]]] => F[Response[F]], Path)] =
Some {
(
pf =>
pf.applyOrElse(
req.method,
(method: Method) =>
Applicative[F].pure {
if (allMethods.contains(method)) {
Response(
status = Status.MethodNotAllowed,
headers = Headers(Allow(allMethods.filter(pf.isDefinedAt))),
)
} else { Response(status = Status.NotImplemented) }
},
),
req.pathInfo,
)
}
}
class MethodConcat(val methods: Set[Method]) {
/** HttpMethod 'or' extractor:
* {{{
* val request: Request = ???
* request match {
* case (Method.GET | Method.POST) -> Root / "123" => ???
* }
* }}}
*/
def unapply(method: Method): Option[Method] =
Some(method).filter(methods)
}
/** Path separator extractor:
* {{{
* Path("/1/2/3/test.json") match {
* case "1" /: "2" /: _ => ...
* }}}
*/
object /: {
def unapply(path: Path): Option[(String, Path)] =
path.segments match {
case head +: tail => Some(head.decoded() -> Path(tail))
case _ => None
}
}
protected class PathVar[A](cast: String => Try[A]) {
def unapply(str: String): Option[A] =
if (!str.isEmpty)
cast(str).toOption
else
None
}
/** Integer extractor of a path variable:
* {{{
* Path("/user/123") match {
* case Root / "user" / IntVar(userId) => ...
* }}}
*/
object IntVar extends PathVar(str => Try(str.toInt))
/** Long extractor of a path variable:
* {{{
* Path("/user/123") match {
* case Root / "user" / LongVar(userId) => ...
* }}}
*/
object LongVar extends PathVar(str => Try(str.toLong))
/** UUID extractor of a path variable:
* {{{
* Path("/user/13251d88-7a73-4fcf-b935-54dfae9f023e") match {
* case Root / "user" / UUIDVar(userId) => ...
* }}}
*/
object UUIDVar extends PathVar(str => Try(java.util.UUID.fromString(str)))
/** Matrix path variable extractor
* For an example see [[https://www.w3.org/DesignIssues/MatrixURIs.html MatrixURIs]]
* This is useful for representing a resource that may be addressed in multiple dimensions where order is unimportant
*
* {{{
*
* object BoardVar extends MatrixVar("square", List("x", "y"))
* Path("/board/square;x=5;y=3") match {
* case Root / "board" / BoardVar(IntVar(x), IntVar(y)) => ...
* }
* }}}
*/
abstract class MatrixVar[F[_]: Foldable](name: String, domain: F[String]) {
private val domainList = domain.toList
def unapplySeq(str: String): Option[Seq[String]] =
if (str.nonEmpty) {
val firstSemi = str.indexOf(';')
if (firstSemi < 0 && (domain.nonEmpty || name != str)) None
else if (firstSemi < 0 && name == str) Some(Seq.empty[String])
// Matrix segment didn't match the expected name
else if (str.substring(0, firstSemi) != name) None
else {
val assocListOpt =
if (firstSemi >= 0)
Monad[Option].tailRecM(MatrixVar.RecState(str, firstSemi + 1, List.empty))(toAssocList)
else Some(List.empty[(String, String)])
assocListOpt.flatMap { assocList =>
domainList.traverse(dom => assocList.find(_._1 == dom).map(_._2))
}
}
} else None
private def toAssocList(
recState: MatrixVar.RecState
): Option[Either[MatrixVar.RecState, List[(String, String)]]] =
// We can't extract anything else but there was a trailing ;
if (recState.position >= recState.str.length - 1)
Some(Right(recState.accumulated))
else {
val nextSplit = recState.str.indexOf(';', recState.position)
// This is the final ; delimited segment
if (nextSplit < 0)
toAssocListElem(recState.str, recState.position, recState.str.length)
.map(elem => Right(elem :: recState.accumulated))
// An internal empty ; delimited segment so just skip
else if (nextSplit == recState.position)
Some(Left(recState.copy(position = nextSplit + 1)))
else
toAssocListElem(recState.str, recState.position, nextSplit)
.map(elem =>
Left(
recState.copy(position = nextSplit + 1, accumulated = elem :: recState.accumulated)
)
)
}
private def toAssocListElem(str: String, position: Int, end: Int): Option[(String, String)] = {
val delimSplit = str.indexOf('=', position)
val nextDelimSplit = str.indexOf('=', delimSplit + 1)
// if the segment does not contain an = inside then it is invalid
if (delimSplit < 0 || delimSplit === position || delimSplit >= end) None
// if the segment contains multiple = then it is invalid
else if (nextDelimSplit < end && nextDelimSplit >= 0) None
else Some(str.substring(position, delimSplit) -> str.substring(delimSplit + 1, end))
}
}
object MatrixVar {
private final case class RecState(str: String, position: Int, accumulated: List[(String, String)])
}
/** Multiple param extractor:
* {{{
* object A extends QueryParamDecoderMatcher[String]("a")
* object B extends QueryParamDecoderMatcher[Int]("b")
* val routes = HttpRoutes.of {
* case GET -> Root / "user" :? A(a) +& B(b) => ...
* }}}
*/
object +& {
def unapply(
params: Map[String, collection.Seq[String]]
): Some[(Map[String, collection.Seq[String]], Map[String, collection.Seq[String]])] =
Some((params, params))
}
/** param extractor using [[QueryParamDecoder]]:
* {{{
* case class Foo(i: Int)
* implicit val fooDecoder: QueryParamDecoder[Foo] = ...
*
* object FooMatcher extends QueryParamDecoderMatcher[Foo]("foo")
* val routes = HttpRoutes.of {
* case GET -> Root / "closest" :? FooMatcher(2) => ...
* }}}
*/
abstract class QueryParamDecoderMatcher[T: QueryParamDecoder](name: String) {
def unapplySeq(params: Map[String, collection.Seq[String]]): Option[collection.Seq[T]] =
params
.get(name)
.flatMap(values =>
values.toList.traverse(s => QueryParamDecoder[T].decode(QueryParameterValue(s)).toOption)
)
def unapply(params: Map[String, collection.Seq[String]]): Option[T] =
params
.get(name)
.flatMap(_.headOption)
.flatMap(s => QueryParamDecoder[T].decode(QueryParameterValue(s)).toOption)
}
/** param extractor using [[QueryParamDecoder]]:
*
* {{{
* case class Foo(i: Int)
* implicit val fooDecoder: QueryParamDecoder[Foo] = ...
* implicit val fooParam: QueryParam[Foo] = ...
*
* object FooMatcher extends QueryParamDecoderMatcher[Foo]
* val routes = HttpRoutes.of {
* case GET -> Root / "closest" :? FooMatcher(2) => ...
* }}}
*/
abstract class QueryParamMatcher[T: QueryParamDecoder: QueryParam]
extends QueryParamDecoderMatcher[T](QueryParam[T].key.value)
abstract class OptionalQueryParamDecoderMatcher[T: QueryParamDecoder](name: String) {
def unapply(params: Map[String, collection.Seq[String]]): Option[Option[T]] =
params
.get(name)
.flatMap(_.headOption)
.traverse(s => QueryParamDecoder[T].decode(QueryParameterValue(s)))
.toOption
}
/** A param extractor with a default value. If the query param is not present, the default value is returned
* If the query param is present but incorrectly formatted, will return `None`
*/
abstract class QueryParamDecoderMatcherWithDefault[T: QueryParamDecoder](name: String, default: T) {
def unapply(params: Map[String, collection.Seq[String]]): Option[T] =
params
.get(name)
.flatMap(_.headOption)
.traverse(s => QueryParamDecoder[T].decode(QueryParameterValue(s)))
.toOption
.map(_.getOrElse(default))
}
abstract class QueryParamMatcherWithDefault[T: QueryParamDecoder: QueryParam](default: T)
extends QueryParamDecoderMatcherWithDefault[T](QueryParam[T].key.value, default)
/** Flag (value-less) query param extractor
*/
abstract class FlagQueryParamMatcher(name: String) {
def unapply(params: Map[String, collection.Seq[String]]): Option[Boolean] =
Some(params.contains(name))
}
/** Capture a query parameter that appears 0 or more times.
*
* {{{
* case class Foo(i: Int)
* implicit val fooDecoder: QueryParamDecoder[Foo] = ...
* implicit val fooParam: QueryParam[Foo] = ...
*
* object FooMatcher extends OptionalMultiQueryParamDecoderMatcher[Foo]("foo")
* val routes = HttpRoutes.of {
* // matches http://.../closest?foo=2&foo=3&foo=4
* case GET -> Root / "closest" :? FooMatcher(Validated.Valid(Seq(Foo(2),Foo(3),Foo(4)))) => ...
*
* /*
* * matches http://.../closest?foo=2&foo=3&foo=4 as well as http://.../closest (no parameters)
* * or http://.../closest?foo=2 (single occurrence)
* */
* case GET -> Root / "closest" :? FooMatcher(is) => ...
* }}}
*/
abstract class OptionalMultiQueryParamDecoderMatcher[T: QueryParamDecoder](name: String) {
def unapply(
params: Map[String, collection.Seq[String]]
): Option[ValidatedNel[ParseFailure, List[T]]] =
params.get(name) match {
case Some(values) =>
Some(values.toList.traverse(s => QueryParamDecoder[T].decode(QueryParameterValue(s))))
case None => Some(Valid(Nil)) // absent
}
}
abstract class OptionalQueryParamMatcher[T: QueryParamDecoder: QueryParam]
extends OptionalQueryParamDecoderMatcher[T](QueryParam[T].key.value)
/** param extractor using [[org.http4s.QueryParamDecoder]]. Note that this will return a
* [[ParseFailure]] if the parameter cannot be decoded.
*
* {{{
* case class Foo(i: Int)
* implicit val fooDecoder: QueryParamDecoder[Foo] = ...
*
* object FooMatcher extends ValidatingQueryParamDecoderMatcher[Foo]("foo")
* val routes: HttpRoutes.of = {
* case GET -> Root / "closest" :? FooMatcher(fooValue) =>
* fooValue.fold(
* nelE => BadRequest(nelE.toList.map(_.sanitized).mkString("\\n")),
* foo => { ... }
* )
* }}}
*/
abstract class ValidatingQueryParamDecoderMatcher[T: QueryParamDecoder](name: String) {
def unapply(params: Map[String, collection.Seq[String]]): Option[ValidatedNel[ParseFailure, T]] =
params.get(name).flatMap(_.headOption).map { s =>
QueryParamDecoder[T].decode(QueryParameterValue(s))
}
}
/** param extractor using [[org.http4s.QueryParamDecoder]]. Note that this will _always_ match, but will return
* an Option possibly containing the result of the conversion to T
*
* {{{
* case class Foo(i: Int)
* implicit val fooDecoder: QueryParamDecoder[Foo] = ...
*
* case class Bar(i: Int)
* implicit val barDecoder: QueryParamDecoder[Bar] = ...
*
* object FooMatcher extends ValidatingQueryParamDecoderMatcher[Foo]("foo")
* object BarMatcher extends OptionalValidatingQueryParamDecoderMatcher[Bar]("bar")
*
* val routes = HttpRoutes.of {
* case GET -> Root / "closest" :? FooMatcher(fooValue) +& BarMatcher(barValue) =>
* ^(fooValue, barValue getOrElse 42.right) { (foo, bar) =>
* ...
* }.fold(
* nelE => BadRequest(nelE.toList.map(_.sanitized).mkString("\\n")),
* baz => { ... }
* )
* }}}
*/
abstract class OptionalValidatingQueryParamDecoderMatcher[T: QueryParamDecoder](name: String) {
def unapply(
params: Map[String, collection.Seq[String]]
): Some[Option[ValidatedNel[ParseFailure, T]]] =
Some {
params.get(name).flatMap(_.headOption).fold[Option[ValidatedNel[ParseFailure, T]]](None) {
s =>
Some(QueryParamDecoder[T].decode(QueryParameterValue(s)))
}
}
}
| http4s/http4s | dsl/src/main/scala/org/http4s/dsl/impl/Path.scala | Scala | apache-2.0 | 15,122 |
package gapt.proofs.lk.transformations
import gapt.expr._
import gapt.expr.formula.All
import gapt.expr.formula.And
import gapt.expr.formula.Bottom
import gapt.expr.formula.Ex
import gapt.expr.formula.Formula
import gapt.expr.formula.Imp
import gapt.expr.formula.Neg
import gapt.expr.formula.Or
import gapt.expr.subst.Substitution
import gapt.logic.Polarity
import gapt.proofs.Ant
import gapt.proofs.ProofBuilder
import gapt.proofs.SequentIndex
import gapt.proofs.Suc
import gapt.proofs.context.Context
import gapt.proofs.context.facet.ProofNames
import gapt.proofs.lk
import gapt.proofs.lk.LKProof
import gapt.proofs.lk.rules.AndLeftRule
import gapt.proofs.lk.rules.AndRightRule
import gapt.proofs.lk.rules.BottomAxiom
import gapt.proofs.lk.rules.ContractionLeftRule
import gapt.proofs.lk.rules.ContractionRightRule
import gapt.proofs.lk.rules.CutRule
import gapt.proofs.lk.rules.ConversionLeftRule
import gapt.proofs.lk.rules.ConversionRightRule
import gapt.proofs.lk.rules.EqualityLeftRule
import gapt.proofs.lk.rules.EqualityRightRule
import gapt.proofs.lk.rules.ExistsLeftRule
import gapt.proofs.lk.rules.ExistsRightRule
import gapt.proofs.lk.rules.ExistsSkLeftRule
import gapt.proofs.lk.rules.ForallLeftRule
import gapt.proofs.lk.rules.ForallRightRule
import gapt.proofs.lk.rules.ForallSkRightRule
import gapt.proofs.lk.rules.ImpLeftRule
import gapt.proofs.lk.rules.ImpRightRule
import gapt.proofs.lk.rules.NegLeftRule
import gapt.proofs.lk.rules.NegRightRule
import gapt.proofs.lk.rules.OrLeftRule
import gapt.proofs.lk.rules.OrRightRule
import gapt.proofs.lk.rules.ReflexivityAxiom
import gapt.proofs.lk.rules.TopAxiom
import gapt.proofs.lk.rules.WeakeningLeftRule
import gapt.proofs.lk.rules.WeakeningRightRule
import gapt.proofs.nd
import gapt.proofs.nd._
object LKToND {
/**
* Converts an LKProof π into a natural deduction proof.
*
* @param proof The proof π.
* @param focus The index in the LK succedent of the formula to be proved in the ND proof,
* or None if the succedent is empty.
* @return The natural deduction proof translate(π).
*/
def apply( proof: LKProof, focus: Option[SequentIndex] = null )( implicit ctx: Context = Context() ): NDProof = {
translate( proof, focus =
if ( focus != null ) focus else if ( proof.endSequent.succedent.isEmpty ) None else Some( Suc( 0 ) ) )
}
private def check( nd: NDProof, lk: LKProof, focus: Option[SequentIndex] ) = {
if ( lk.endSequent.succedent.isEmpty ) {
assert( ( lk.endSequent.size + 1 ) == nd.endSequent.size )
assert( nd.endSequent( Suc( 0 ) ) == Bottom() )
} else {
assert( lk.endSequent.size == nd.endSequent.size )
assert( lk.endSequent.succedent.contains( nd.endSequent( Suc( 0 ) ) ) )
assert( lk.endSequent( focus.get ) == nd.endSequent( Suc( 0 ) ) )
}
assert( lk.endSequent.antecedent.forall( nd.endSequent.antecedent.contains( _ ) ) )
assert( lk.endSequent.succedent.filter( _ != nd.endSequent( Suc( 0 ) ) ).forall( x =>
nd.endSequent.antecedent.contains( Neg( x ) ) ) )
}
private def exchange( subProof: NDProof, mainFormula: Option[Formula] ): NDProof =
mainFormula.map( exchange( subProof, _ ) ).getOrElse( subProof )
/**
* Macro rule to exchange a mainFormula (A) with the formula in the succedent (B) in subProof (π).
*
* <pre>
* (π)
* Γ, ¬A :- B
* ------------- ex ¬A
* Γ, ¬B :- A
* </pre>
*
* @param subProof The proof π.
* @param mainFormula The formula ¬A.
* @return The natural deduction proof after exchanging ¬A and B.
*/
private def exchange( subProof: NDProof, mainFormula: Formula ): NDProof = {
if ( mainFormula == subProof.endSequent( Suc( 0 ) ) ) {
subProof
} else {
val negMain = -mainFormula
if ( subProof.endSequent.antecedent.contains( negMain ) ) {
// Negated main formula in antecedent:
// Move it using LEM
val r = subProof.endSequent( Suc( 0 ) )
val ax1 = nd.LogicalAxiom( mainFormula )
val pr2 = if ( subProof.endSequent( Suc( 0 ) ) == Bottom() ) {
BottomElimRule( subProof, mainFormula )
} else {
ProofBuilder.
c( nd.LogicalAxiom( -r ) ).
u( NegElimRule( _, subProof ) ).
u( BottomElimRule( _, mainFormula ) ).
qed
}
val i = pr2.endSequent.indexOfOption( negMain, Polarity.InAntecedent )
ExcludedMiddleRule( ax1, Ant( 0 ), pr2, i.get )
} else {
// Negated main formula not in antecedent
// Use BottomElimRule to add main formula to succedent
val r = subProof.endSequent( Suc( 0 ) )
if ( subProof.endSequent( Suc( 0 ) ) == Bottom() ) {
BottomElimRule( subProof, mainFormula )
} else {
ProofBuilder.
c( nd.LogicalAxiom( -r ) ).
u( NegElimRule( _, subProof ) ).
u( BottomElimRule( _, mainFormula ) ).
qed
}
}
}
}
private def heuristicIndex( proof: LKProof ) =
if ( proof.endSequent.succedent.isEmpty ) None else Some( Suc( 0 ) )
private def translate( proof: LKProof, focus: Option[SequentIndex] )( implicit ctx: Context ): NDProof = {
assert( focus.forall( _ => proof.endSequent.succedent.nonEmpty ) )
assert( focus.forall( _.isSuc ) )
// Optimization when the end-sequent has the form :- ¬A, A with focus ¬A, then just return ¬A :- ¬A
focus match {
case Some( i ) =>
proof.endSequent( i ) match {
case Neg( f ) if ( proof.endSequent.size == 2 && proof.endSequent.delete( i ).forall( _ == f ) ) => return nd.LogicalAxiom( Neg( f ) )
case _ => ()
}
case _ => ()
}
val ndProof = proof match {
// Axioms
case lk.rules.LogicalAxiom( f ) =>
nd.LogicalAxiom( f )
case lk.rules.ProofLink( prf, seq ) =>
val Apps( Const( proofName, _, _ ), args ) = prf
val ( genprf, genseq ) = ctx.get[ProofNames].names( proofName )
val Apps( _, vs ) = genprf
def handleSuccedent( seq: Vector[Formula], toProve: Formula ): NDProof = {
if ( seq.size == 1 ) {
ProofBuilder.
c( nd.LogicalAxiom( -seq.last ) ).
c( nd.LogicalAxiom( seq.last ) ).
b( NegElimRule( _, _ ) ).
u( BottomElimRule( _, toProve ) ).
qed
} else {
ProofBuilder.
c( nd.LogicalAxiom( -seq.last ) ).
c( nd.LogicalAxiom( Or( seq ) ) ).
c( handleSuccedent( seq.reverse.tail.reverse, seq.last ) ).
c( nd.LogicalAxiom( seq.last ) ).
t( OrElimRule( _, _, _ ) ).
b( NegElimRule( _, _ ) ).
u( BottomElimRule( _, toProve ) ).
qed
}
}
val t = ProofBuilder.
c( nd.TheoryAxiom( All.Block( vs.asInstanceOf[List[Var]], genseq.toImplication ) ) ).
u( nd.ForallElimBlock( _, args ) ).
c( nd.LogicalAxiom( seq( Ant( 0 ) ) ) ).
u( seq.antecedent.tail.foldLeft( _ )( ( a, b ) => AndIntroRule( a, nd.LogicalAxiom( b ) ) ) ).
b( ImpElimRule( _, _ ) ).
qed
val tsuc = if ( seq.succedent.size > 1 ) {
ProofBuilder.
c( t ).
c( handleSuccedent( seq.succedent.reverse.tail.reverse, seq.succedent.last ) ).
c( nd.LogicalAxiom( seq.succedent.last ) ).
t( OrElimRule( _, _, _ ) ).
qed
} else t
exchange( tsuc, focus.map( seq.apply ) )
case ReflexivityAxiom( s ) =>
nd.EqualityIntroRule( s )
case TopAxiom =>
nd.TopIntroRule
case BottomAxiom =>
nd.LogicalAxiom( Bottom() )
// Structural rules
case WeakeningLeftRule( subProof, formula ) =>
WeakeningRule( translate( subProof, focus ), formula )
case p @ WeakeningRightRule( subProof, formula ) =>
if ( p.mainFormula == p.endSequent( focus.get ) ) {
// Pick arbitrary focus
val ndProof = translate( subProof, heuristicIndex( subProof ) )
// This check solves a bug that occured when WeakeningRightRule
// was applied after BottomAxiom (cf. classical pairing test case)
if ( proof.endSequent.forall( f => proof.endSequent.filter( _ == f ).size ==
ndProof.endSequent.filter( _ == f ).size ) )
ndProof
else
exchange( WeakeningRule( ndProof, -formula ), p.mainFormula )
} else {
// simply weaken with negated formula on the left
WeakeningRule( translate( subProof, focus.map( p.getSequentConnector.parent ) ), -formula )
}
case p @ ContractionLeftRule( subProof, aux1, aux2 ) =>
ContractionRule( translate( subProof, focus ), p.mainFormula )
case p @ ContractionRightRule( subProof, aux1, aux2 ) =>
if ( p.mainFormula == p.endSequent( focus.get ) ) {
val l = subProof.endSequent( aux1 )
val t = translate( subProof, Some( aux1 ) )
val il = t.endSequent.indexOf( -l, Polarity.InAntecedent )
ProofBuilder.
c( nd.LogicalAxiom( l ) ).
c( t ).
b( ExcludedMiddleRule( _, Ant( 0 ), _, il ) ).
qed
} else {
val focusMain = p.endSequent.indexOf( p.mainFormula, Polarity.InSuccedent )
exchange( translate( proof, Some( focusMain ) ), focus.map( p.endSequent.apply ) )
}
case p @ CutRule( leftSubProof, aux1, rightSubProof, aux2 ) =>
val tl = translate( leftSubProof, Some( aux1 ) )
val tr = translate(
rightSubProof,
if ( rightSubProof.endSequent.succedent.nonEmpty )
Some( p.getRightSequentConnector.parentOption( focus.get ).getOrElse( Suc( 0 ) ) )
else None )
val i = tr.endSequent.indexOf( rightSubProof.endSequent( aux2 ), Polarity.InAntecedent )
val partialProof = ProofBuilder.
c( tr ).
u( ImpIntroRule( _, i ) ).
c( tl ).
b( ImpElimRule( _, _ ) ).
qed
exchange( partialProof, focus.map( p.endSequent.apply ) )
// Propositional rules
case p @ NegLeftRule( subProof, aux ) =>
focus.map( p.endSequent.apply ) match {
case Some( f ) =>
val focusMain = subProof.endSequent.indexOf( f, Polarity.InSuccedent )
translate( subProof, Some( focusMain ) )
case None =>
val Neg( a ) = p.mainFormula
val focusMain = subProof.endSequent.indexOf( a, Polarity.InSuccedent )
ProofBuilder.
c( nd.LogicalAxiom( p.mainFormula ) ).
c( translate( subProof, Some( focusMain ) ) ).
b( NegElimRule( _, _ ) ).
qed
}
case p @ NegRightRule( subProof, aux ) =>
if ( p.mainFormula == p.endSequent( focus.get ) ) {
val Neg( a ) = p.mainFormula
val t = translate( subProof, heuristicIndex( subProof ) )
if ( t.endSequent( Suc( 0 ) ) == Bottom() ) {
NegIntroRule( t, a )
} else {
ProofBuilder.
c( nd.LogicalAxiom( -t.endSequent( Suc( 0 ) ) ) ).
c( t ).
b( NegElimRule( _, _ ) ).
u( NegIntroRule( _, a ) ).
qed
}
} else {
val focusMain = p.endSequent.indexOf( p.mainFormula, Polarity.InSuccedent )
exchange( translate( proof, Some( focusMain ) ), focus.map( p.endSequent.apply ) )
}
case p @ AndLeftRule( subProof, aux1, aux2 ) =>
val t = translate(
subProof,
if ( p.endSequent.succedent.nonEmpty )
Some( p.getSequentConnector.parent( focus.get ) )
else None )
val And( a, b ) = p.mainFormula
val ax = nd.LogicalAxiom( p.mainFormula )
ProofBuilder.
c( t ).
u( ImpIntroRule( _, a ) ).
c( ax ).
u( AndElim1Rule( _ ) ).
b( ImpElimRule( _, _ ) ).
u( ImpIntroRule( _, b ) ).
c( ax ).
u( AndElim2Rule( _ ) ).
b( ImpElimRule( _, _ ) ).
u( ContractionRule( _, p.mainFormula ) ).
qed
case p @ AndRightRule( leftSubProof, aux1, rightSubProof, aux2 ) =>
if ( p.mainFormula == p.endSequent( focus.get ) ) {
val tl = translate( leftSubProof, Some( aux1 ) )
val tr = translate( rightSubProof, Some( aux2 ) )
AndIntroRule( tl, tr )
} else {
val focusMain = p.endSequent.indexOf( p.mainFormula, Polarity.InSuccedent )
exchange( translate( proof, Some( focusMain ) ), focus.map( p.endSequent.apply ) )
}
case p @ OrLeftRule( leftSubProof, aux1, rightSubProof, aux2 ) =>
val tl = translate(
leftSubProof,
if ( leftSubProof.endSequent.succedent.nonEmpty )
Some( p.getLeftSequentConnector.parentOption( focus.get ).getOrElse( Suc( 0 ) ) )
else None )
val wtl = if ( p.endSequent.succedent.nonEmpty &&
p.getLeftSequentConnector.parentOption( focus.get ) == None ) {
if ( tl.endSequent( Suc( 0 ) ) == Bottom() )
BottomElimRule( tl, p.endSequent( focus.get ) )
else {
ProofBuilder.
c( nd.LogicalAxiom( -tl.endSequent( Suc( 0 ) ) ) ).
c( tl ).
b( NegElimRule( _, _ ) ).
u( BottomElimRule( _, p.endSequent( focus.get ) ) ).
qed
}
} else tl
val tr = translate(
rightSubProof,
if ( rightSubProof.endSequent.succedent.nonEmpty )
Some( p.getRightSequentConnector.parentOption( focus.get ).getOrElse( Suc( 0 ) ) )
else None )
val wtr = if ( p.endSequent.succedent.nonEmpty &&
p.getRightSequentConnector.parentOption( focus.get ) == None ) {
if ( tr.endSequent( Suc( 0 ) ) == Bottom() )
BottomElimRule( tr, p.endSequent( focus.get ) )
else {
ProofBuilder.
c( nd.LogicalAxiom( -tr.endSequent( Suc( 0 ) ) ) ).
c( tr ).
b( NegElimRule( _, _ ) ).
u( BottomElimRule( _, p.endSequent( focus.get ) ) ).
qed
}
} else tr
OrElimRule( nd.LogicalAxiom( p.mainFormula ), wtl, wtr )
case p @ OrRightRule(
subProof1 @ WeakeningRightRule( subProof2, f ), aux1, aux2
) if f == subProof1.endSequent( aux1 ) || f == subProof1.endSequent( aux2 ) =>
if ( p.mainFormula == p.endSequent( focus.get ) ) {
val Or( a, b ) = p.mainFormula
f match {
case `b` =>
val i = subProof1.getSequentConnector.parent( aux1 )
ProofBuilder.
c( translate( subProof2, Some( i ) ) ).
u( OrIntro1Rule( _, f ) ).
qed
case `a` =>
val i = subProof1.getSequentConnector.parent( aux2 )
ProofBuilder.
c( translate( subProof2, Some( i ) ) ).
u( OrIntro2Rule( _, f ) ).
qed
}
} else {
val focusMain = p.endSequent.indexOf( p.mainFormula, Polarity.InSuccedent )
exchange( translate( proof, Some( focusMain ) ), focus.map( p.endSequent.apply ) )
}
case p @ OrRightRule( subProof, aux1, aux2 ) =>
if ( p.mainFormula == p.endSequent( focus.get ) ) {
val Or( a, b ) = p.mainFormula
val rp = ProofBuilder.
c( translate( subProof, Some( aux2 ) ) ).
u( OrIntro2Rule( _, a ) ).
qed
val lp = ProofBuilder.
c( nd.LogicalAxiom( a ) ).
u( OrIntro1Rule( _, b ) ).
qed
val i = rp.endSequent.indexOf( Neg( a ), Polarity.InAntecedent )
ExcludedMiddleRule( lp, Ant( 0 ), rp, i )
} else {
val focusMain = p.endSequent.indexOf( p.mainFormula, Polarity.InSuccedent )
exchange( translate( proof, Some( focusMain ) ), focus.map( p.endSequent.apply ) )
}
case p @ ImpLeftRule( leftSubProof, aux1, rightSubProof, aux2 ) =>
val tl = translate( leftSubProof, Some( aux1 ) )
val tr = translate(
rightSubProof,
if ( rightSubProof.endSequent.succedent.nonEmpty )
Some( p.getRightSequentConnector.parentOption( focus.get ).getOrElse( Suc( 0 ) ) )
else None )
val Imp( _, b ) = p.mainFormula
val i = tr.endSequent.indexOf( b, Polarity.InAntecedent )
val partialProof = ProofBuilder.
c( tr ).
u( ImpIntroRule( _, i ) ).
c( nd.LogicalAxiom( p.mainFormula ) ).
c( tl ).
b( ImpElimRule( _, _ ) ).
b( ImpElimRule( _, _ ) ).
qed
exchange( partialProof, focus.map( p.endSequent.apply ) )
case p @ ImpRightRule( subProof, aux1, aux2 ) =>
if ( p.mainFormula == p.endSequent( focus.get ) ) {
val Imp( a, _ ) = p.mainFormula
ProofBuilder.
c( translate( subProof, Some( aux2 ) ) ).
u( ImpIntroRule( _, a ) ).
qed
} else {
val focusMain = p.endSequent.indexOf( p.mainFormula, Polarity.InSuccedent )
exchange( translate( proof, Some( focusMain ) ), focus.map( p.endSequent.apply ) )
}
// Quantifier rules
case p @ ForallLeftRule( subProof, aux, a: Formula, term: Expr, v: Var ) =>
val t = translate(
subProof,
if ( p.endSequent.succedent.nonEmpty )
Some( p.getSequentConnector.parent( focus.get ) )
else None )
val i = t.endSequent.indexOf( Substitution( v, term )( a ), Polarity.InAntecedent )
ProofBuilder.
c( t ).
u( ImpIntroRule( _, i ) ).
c( nd.LogicalAxiom( p.mainFormula ) ).
u( ForallElimRule( _, term ) ).
b( ImpElimRule( _, _ ) ).
qed
case p @ ForallRightRule( subProof, aux, eigen, _ ) =>
if ( p.mainFormula == p.endSequent( focus.get ) ) {
ProofBuilder.
c( translate( subProof, Some( aux ) ) ).
u( ForallIntroRule( _, p.mainFormula, eigen ) ).
qed
} else {
val focusMain = p.endSequent.indexOf( p.mainFormula, Polarity.InSuccedent )
exchange( translate( proof, Some( focusMain ) ), focus.map( p.endSequent.apply ) )
}
case ForallSkRightRule( subProof, aux, main, skT ) =>
throw new LKToNDTranslationException(
"ForallSkRightRule",
"LK proofs containing skolem functions are not supported." )
case p @ ExistsLeftRule( subProof, aux, eigen, v ) =>
val t = translate(
subProof,
if ( p.endSequent.succedent.nonEmpty )
Some( p.getSequentConnector.parent( focus.get ) )
else None )
val Ex( _, a ) = p.mainFormula
val i = t.endSequent.indexOf( Substitution( v, eigen )( a ), Polarity.InAntecedent )
ProofBuilder.
c( nd.LogicalAxiom( p.mainFormula ) ).
c( t ).
b( ExistsElimRule( _, _, i, eigen ) ).
qed
case ExistsSkLeftRule( subProof, aux, main, skT ) =>
throw new LKToNDTranslationException(
"ExistsSkLeftRule",
"LK proofs containing skolem functions are not supported." )
case p @ ExistsRightRule( subProof, aux, _, t, _ ) =>
if ( p.mainFormula == p.endSequent( focus.get ) ) {
ProofBuilder.
c( translate( subProof, Some( aux ) ) ).
u( ExistsIntroRule( _, p.mainFormula, t ) ).
qed
} else {
val focusMain = p.endSequent.indexOf( p.mainFormula, Polarity.InSuccedent )
exchange( translate( proof, Some( focusMain ) ), focus.map( p.endSequent.apply ) )
}
// Equality rules
case p @ EqualityLeftRule( subProof, eq, aux, replacementContext ) =>
val t = translate(
subProof,
if ( p.endSequent.succedent.nonEmpty )
Some( p.getSequentConnector.parent( focus.get ) )
else None )
val Abs( x, term ) = replacementContext
ProofBuilder.
c( t ).
u( ImpIntroRule( _, subProof.endSequent( aux ) ) ).
c( nd.LogicalAxiom( subProof.endSequent( eq ) ) ).
c( nd.LogicalAxiom( p.mainFormula ) ).
b( EqualityElimRule( _, _, term.asInstanceOf[Formula], x ) ).
b( ImpElimRule( _, _ ) ).
u( ContractionRule( _, subProof.endSequent( eq ) ) ).
qed
case p @ EqualityRightRule( subProof, eq, aux, replacementContext ) =>
if ( p.mainFormula == p.endSequent( focus.get ) ) {
val Abs( x, term ) = replacementContext
ProofBuilder.
c( nd.LogicalAxiom( subProof.endSequent( eq ) ) ).
c( translate( subProof, Some( aux ) ) ).
b( EqualityElimRule( _, _, term.asInstanceOf[Formula], x ) ).
u( ContractionRule( _, subProof.endSequent( eq ) ) ).
qed
} else {
val focusMain = p.endSequent.indexOf( p.mainFormula, Polarity.InSuccedent )
exchange( translate( proof, Some( focusMain ) ), focus.map( p.endSequent.apply ) )
}
case lk.rules.InductionRule( cases, formula, term ) =>
val ndCases = cases.map {
case lk.rules.InductionCase( proof, constructor, hypotheses, eigenVars, conclusion ) =>
val prfNd = translate( proof, Some( conclusion ) )
val hypNd = hypotheses.map { case i: SequentIndex => prfNd.endSequent.indexOf( proof.endSequent( i ) ) }
nd.InductionCase( prfNd, constructor, hypNd, eigenVars )
}
nd.InductionRule( ndCases, formula, term )
case p @ ConversionLeftRule( subProof: LKProof, aux: SequentIndex, main: Formula ) =>
val t = translate( subProof, focus )
ProofBuilder.
c( t ).
u( ImpIntroRule( _, subProof.endSequent( aux ) ) ).
u( nd.DefinitionRule( _, Imp( main, t.endSequent( Suc( 0 ) ) ) ) ).
c( nd.LogicalAxiom( main ) ).
b( ImpElimRule( _, _ ) ).
qed
case p @ ConversionRightRule( subProof, aux, main ) =>
if ( p.mainFormula == p.endSequent( focus.get ) ) {
val t = translate( subProof, focus )
ProofBuilder.
c( t ).
u( nd.DefinitionRule( _, main ) ).
qed
} else {
val focusMain = p.endSequent.indexOf( p.mainFormula, Polarity.InSuccedent )
exchange( translate( proof, Some( focusMain ) ), focus.map( p.endSequent.apply ) )
}
}
check( ndProof, proof, focus )
ndProof
}
}
class LKToNDTranslationException( name: String, message: String )
extends Exception( s"Cannot translate $name: " + message )
| gapt/gapt | core/src/main/scala/gapt/proofs/lk/transformations/LKToND.scala | Scala | gpl-3.0 | 23,029 |
/* sbt -- Simple Build Tool
* Copyright 2008,2010 Mark Harrah
*/
package sbt.internal.util
package complete
sealed trait UpperBound {
/** True if and only if the given value meets this bound.*/
def >=(min: Int): Boolean
/** True if and only if this bound is one.*/
def isOne: Boolean
/** True if and only if this bound is zero.*/
def isZero: Boolean
/**
* If this bound is zero or Infinite, `decrement` returns this bound.
* Otherwise, this bound is finite and greater than zero and `decrement` returns the bound that is one less than this bound.
*/
def decrement: UpperBound
/** True if and only if this is unbounded.*/
def isInfinite: Boolean
}
/** Represents unbounded. */
case object Infinite extends UpperBound {
/** All finite numbers meet this bound. */
def >=(min: Int) = true
def isOne = false
def isZero = false
def decrement = this
def isInfinite = true
override def toString = "Infinity"
}
/**
* Represents a finite upper bound. The maximum allowed value is 'value', inclusive.
* It must positive.
*/
final case class Finite(value: Int) extends UpperBound {
assume(value >= 0, "Maximum occurences must be nonnegative.")
def >=(min: Int) = value >= min
def isOne = value == 1
def isZero = value == 0
def decrement = Finite(scala.math.max(0, value - 1))
def isInfinite = false
override def toString = value.toString
}
object UpperBound {
implicit def intToFinite(i: Int): Finite = Finite(i)
}
| Duhemm/sbt | internal/util-complete/src/main/scala/sbt/internal/util/complete/UpperBound.scala | Scala | bsd-3-clause | 1,483 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.mv.plans.util
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.carbondata.mv.plans._
object CheckSPJG {
def isSPJG(subplan: LogicalPlan): Boolean = {
subplan match {
case a: Aggregate =>
a.child.collect {
case Join(_, _, _, _) | Project(_, _) | Filter(_, _) |
HiveTableRelation(_, _, _) | LogicalRelation(_, _, _) | LocalRelation(_, _) => true
case _ => false
}.forall(identity)
case _ => false
}
}
}
object LogicalPlanSignatureGenerator extends SignatureGenerator[LogicalPlan] {
lazy val rule: SignatureRule[LogicalPlan] = LogicalPlanRule
override def generate(plan: LogicalPlan): Option[Signature] = {
if ( plan.isSPJG ) {
super.generate(plan)
} else {
None
}
}
}
object LogicalPlanRule extends SignatureRule[LogicalPlan] {
def apply(plan: LogicalPlan, childSignatures: Seq[Option[Signature]]): Option[Signature] = {
plan match {
case LogicalRelation(_, _, _) =>
// TODO: implement this (link to BaseRelation)
None
case HiveTableRelation(tableMeta, _, _) =>
Some(Signature(false,
Set(Seq(tableMeta.database, tableMeta.identifier.table).mkString("."))))
case l : LocalRelation =>
// LocalRelation is for unit test cases
Some(Signature(groupby = false, Set(l.toString())))
case Filter(_, _) =>
if (childSignatures.length == 1 && !childSignatures(0).getOrElse(Signature()).groupby) {
// if (!childSignatures(0).getOrElse(Signature()).groupby) {
childSignatures(0)
// }
} else {
None
}
case Project(_, _) =>
if ( childSignatures.length == 1 && !childSignatures(0).getOrElse(Signature()).groupby ) {
childSignatures(0)
} else {
None
}
case Join(_, _, _, _) =>
if ( childSignatures.length == 2 &&
!childSignatures(0).getOrElse(Signature()).groupby &&
!childSignatures(1).getOrElse(Signature()).groupby ) {
Some(Signature(false,
childSignatures(0).getOrElse(Signature()).datasets
.union(childSignatures(1).getOrElse(Signature()).datasets)))
} else {
None
}
case Aggregate(_, _, _) =>
if ( childSignatures.length == 1 && !childSignatures(0).getOrElse(Signature()).groupby ) {
Some(Signature(true, childSignatures(0).getOrElse(Signature()).datasets))
} else {
None
}
case _ => None
}
}
}
| jatin9896/incubator-carbondata | datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/LogicalPlanSignatureGenerator.scala | Scala | apache-2.0 | 3,520 |
/**
* Copyright 2015, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.deeplang.doperations.exceptions
import org.apache.spark.sql.types.StructField
case class ValueConversionException(value: String, field: StructField)
extends DOperationExecutionException(
"Value \\"" + value + "\\" can't be converted to a column \\"" + field.name + "\\" " +
"type \\"" + field.dataType.simpleString + "\\"",
None)
| deepsense-io/seahorse-workflow-executor | deeplang/src/main/scala/io/deepsense/deeplang/doperations/exceptions/ValueConversionException.scala | Scala | apache-2.0 | 962 |
package com.getbootstrap.no_carrier.github.util
import com.jcabi.github.Coordinates.{Simple=>RepoId}
object RepositoryId {
private val OwnerSlashRepo = "([a-zA-Z0-9_-]+)/([a-zA-Z0-9._-]+)".r
def unapply(ownerRepo: String): Option[RepoId] = {
ownerRepo match {
case OwnerSlashRepo(owner, repo) => Some(new RepoId(owner, repo))
case _ => None
}
}
}
| twbs/no-carrier | src/main/scala/com/getbootstrap/no_carrier/github/util/RepositoryId.scala | Scala | mit | 376 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rpc.akka
import java.io.File
import java.nio.channels.ReadableByteChannel
import java.util.concurrent.ConcurrentHashMap
import scala.concurrent.Future
import scala.language.postfixOps
import scala.reflect.ClassTag
import scala.util.control.NonFatal
import akka.actor.{ActorSystem, ExtendedActorSystem, Actor, ActorRef, Props, Address}
import akka.event.Logging.Error
import akka.pattern.{ask => akkaAsk}
import akka.remote.{AssociationEvent, AssociatedEvent, DisassociatedEvent, AssociationErrorEvent}
import akka.serialization.JavaSerializer
import org.apache.spark.{HttpFileServer, Logging, SecurityManager, SparkConf, SparkException}
import org.apache.spark.rpc._
import org.apache.spark.util.{ActorLogReceive, AkkaUtils, ThreadUtils}
/**
* A RpcEnv implementation based on Akka.
*
* TODO Once we remove all usages of Akka in other place, we can move this file to a new project and
* remove Akka from the dependencies.
*/
private[spark] class AkkaRpcEnv private[akka] (
val actorSystem: ActorSystem,
val securityManager: SecurityManager,
conf: SparkConf,
boundPort: Int)
extends RpcEnv(conf) with Logging {
private val defaultAddress: RpcAddress = {
val address = actorSystem.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress
// In some test case, ActorSystem doesn't bind to any address.
// So just use some default value since they are only some unit tests
RpcAddress(address.host.getOrElse("localhost"), address.port.getOrElse(boundPort))
}
override val address: RpcAddress = defaultAddress
/**
* A lookup table to search a [[RpcEndpointRef]] for a [[RpcEndpoint]]. We need it to make
* [[RpcEndpoint.self]] work.
*/
private val endpointToRef = new ConcurrentHashMap[RpcEndpoint, RpcEndpointRef]()
/**
* Need this map to remove `RpcEndpoint` from `endpointToRef` via a `RpcEndpointRef`
*/
private val refToEndpoint = new ConcurrentHashMap[RpcEndpointRef, RpcEndpoint]()
private val _fileServer = new AkkaFileServer(conf, securityManager)
private def registerEndpoint(endpoint: RpcEndpoint, endpointRef: RpcEndpointRef): Unit = {
endpointToRef.put(endpoint, endpointRef)
refToEndpoint.put(endpointRef, endpoint)
}
private def unregisterEndpoint(endpointRef: RpcEndpointRef): Unit = {
val endpoint = refToEndpoint.remove(endpointRef)
if (endpoint != null) {
endpointToRef.remove(endpoint)
}
}
/**
* Retrieve the [[RpcEndpointRef]] of `endpoint`.
*/
override def endpointRef(endpoint: RpcEndpoint): RpcEndpointRef = endpointToRef.get(endpoint)
override def setupEndpoint(name: String, endpoint: RpcEndpoint): RpcEndpointRef = {
@volatile var endpointRef: AkkaRpcEndpointRef = null
// Use defered function because the Actor needs to use `endpointRef`.
// So `actorRef` should be created after assigning `endpointRef`.
val actorRef = () => actorSystem.actorOf(Props(new Actor with ActorLogReceive with Logging {
assert(endpointRef != null)
override def preStart(): Unit = {
// Listen for remote client network events
context.system.eventStream.subscribe(self, classOf[AssociationEvent])
safelyCall(endpoint) {
endpoint.onStart()
}
}
override def receiveWithLogging: Receive = {
case AssociatedEvent(_, remoteAddress, _) =>
safelyCall(endpoint) {
endpoint.onConnected(akkaAddressToRpcAddress(remoteAddress))
}
case DisassociatedEvent(_, remoteAddress, _) =>
safelyCall(endpoint) {
endpoint.onDisconnected(akkaAddressToRpcAddress(remoteAddress))
}
case AssociationErrorEvent(cause, localAddress, remoteAddress, inbound, _) =>
safelyCall(endpoint) {
endpoint.onNetworkError(cause, akkaAddressToRpcAddress(remoteAddress))
}
case e: AssociationEvent =>
// TODO ignore?
case m: AkkaMessage =>
logDebug(s"Received RPC message: $m")
safelyCall(endpoint) {
processMessage(endpoint, m, sender)
}
case AkkaFailure(e) =>
safelyCall(endpoint) {
throw e
}
case message: Any => {
logWarning(s"Unknown message: $message")
}
}
override def postStop(): Unit = {
unregisterEndpoint(endpoint.self)
safelyCall(endpoint) {
endpoint.onStop()
}
}
}), name = name)
endpointRef = new AkkaRpcEndpointRef(defaultAddress, actorRef, conf, initInConstructor = false)
registerEndpoint(endpoint, endpointRef)
// Now actorRef can be created safely
endpointRef.init()
endpointRef
}
private def processMessage(endpoint: RpcEndpoint, m: AkkaMessage, _sender: ActorRef): Unit = {
val message = m.message
val needReply = m.needReply
val pf: PartialFunction[Any, Unit] =
if (needReply) {
endpoint.receiveAndReply(new RpcCallContext {
override def sendFailure(e: Throwable): Unit = {
_sender ! AkkaFailure(e)
}
override def reply(response: Any): Unit = {
_sender ! AkkaMessage(response, false)
}
// Use "lazy" because most of RpcEndpoints don't need "senderAddress"
override lazy val senderAddress: RpcAddress =
new AkkaRpcEndpointRef(defaultAddress, _sender, conf).address
})
} else {
endpoint.receive
}
try {
pf.applyOrElse[Any, Unit](message, { message =>
throw new SparkException(s"Unmatched message $message from ${_sender}")
})
} catch {
case NonFatal(e) =>
_sender ! AkkaFailure(e)
if (!needReply) {
// If the sender does not require a reply, it may not handle the exception. So we rethrow
// "e" to make sure it will be processed.
throw e
}
}
}
/**
* Run `action` safely to avoid to crash the thread. If any non-fatal exception happens, it will
* call `endpoint.onError`. If `endpoint.onError` throws any non-fatal exception, just log it.
*/
private def safelyCall(endpoint: RpcEndpoint)(action: => Unit): Unit = {
try {
action
} catch {
case NonFatal(e) => {
try {
endpoint.onError(e)
} catch {
case NonFatal(e) => logError(s"Ignore error: ${e.getMessage}", e)
}
}
}
}
private def akkaAddressToRpcAddress(address: Address): RpcAddress = {
RpcAddress(address.host.getOrElse(defaultAddress.host),
address.port.getOrElse(defaultAddress.port))
}
override def asyncSetupEndpointRefByURI(uri: String): Future[RpcEndpointRef] = {
import actorSystem.dispatcher
actorSystem.actorSelection(uri).resolveOne(defaultLookupTimeout.duration).
map(new AkkaRpcEndpointRef(defaultAddress, _, conf)).
// this is just in case there is a timeout from creating the future in resolveOne, we want the
// exception to indicate the conf that determines the timeout
recover(defaultLookupTimeout.addMessageIfTimeout)
}
override def uriOf(systemName: String, address: RpcAddress, endpointName: String): String = {
AkkaUtils.address(
AkkaUtils.protocol(actorSystem), systemName, address.host, address.port, endpointName)
}
override def shutdown(): Unit = {
actorSystem.shutdown()
_fileServer.shutdown()
}
override def stop(endpoint: RpcEndpointRef): Unit = {
require(endpoint.isInstanceOf[AkkaRpcEndpointRef])
actorSystem.stop(endpoint.asInstanceOf[AkkaRpcEndpointRef].actorRef)
}
override def awaitTermination(): Unit = {
actorSystem.awaitTermination()
}
override def toString: String = s"${getClass.getSimpleName}($actorSystem)"
override def deserialize[T](deserializationAction: () => T): T = {
JavaSerializer.currentSystem.withValue(actorSystem.asInstanceOf[ExtendedActorSystem]) {
deserializationAction()
}
}
override def openChannel(uri: String): ReadableByteChannel = {
throw new UnsupportedOperationException(
"AkkaRpcEnv's files should be retrieved using an HTTP client.")
}
override def fileServer: RpcEnvFileServer = _fileServer
}
private[akka] class AkkaFileServer(
conf: SparkConf,
securityManager: SecurityManager) extends RpcEnvFileServer {
@volatile private var httpFileServer: HttpFileServer = _
override def addFile(file: File): String = {
getFileServer().addFile(file)
}
override def addJar(file: File): String = {
getFileServer().addJar(file)
}
def shutdown(): Unit = {
if (httpFileServer != null) {
httpFileServer.stop()
}
}
private def getFileServer(): HttpFileServer = {
if (httpFileServer == null) synchronized {
if (httpFileServer == null) {
httpFileServer = startFileServer()
}
}
httpFileServer
}
private def startFileServer(): HttpFileServer = {
val fileServerPort = conf.getInt("spark.fileserver.port", 0)
val server = new HttpFileServer(conf, securityManager, fileServerPort)
server.initialize()
server
}
}
private[spark] class AkkaRpcEnvFactory extends RpcEnvFactory {
def create(config: RpcEnvConfig): RpcEnv = {
val (actorSystem, boundPort) = AkkaUtils.createActorSystem(
config.name, config.host, config.port, config.conf, config.securityManager)
actorSystem.actorOf(Props(classOf[ErrorMonitor]), "ErrorMonitor")
new AkkaRpcEnv(actorSystem, config.securityManager, config.conf, boundPort)
}
}
/**
* Monitor errors reported by Akka and log them.
*/
private[akka] class ErrorMonitor extends Actor with ActorLogReceive with Logging {
override def preStart(): Unit = {
context.system.eventStream.subscribe(self, classOf[Error])
}
override def receiveWithLogging: Actor.Receive = {
case Error(cause: Throwable, _, _, message: String) => logDebug(message, cause)
}
}
private[akka] class AkkaRpcEndpointRef(
@transient private val defaultAddress: RpcAddress,
@transient private val _actorRef: () => ActorRef,
conf: SparkConf,
initInConstructor: Boolean)
extends RpcEndpointRef(conf) with Logging {
def this(
defaultAddress: RpcAddress,
_actorRef: ActorRef,
conf: SparkConf) = {
this(defaultAddress, () => _actorRef, conf, true)
}
lazy val actorRef = _actorRef()
override lazy val address: RpcAddress = {
val akkaAddress = actorRef.path.address
RpcAddress(akkaAddress.host.getOrElse(defaultAddress.host),
akkaAddress.port.getOrElse(defaultAddress.port))
}
override lazy val name: String = actorRef.path.name
private[akka] def init(): Unit = {
// Initialize the lazy vals
actorRef
address
name
}
if (initInConstructor) {
init()
}
override def send(message: Any): Unit = {
actorRef ! AkkaMessage(message, false)
}
override def ask[T: ClassTag](message: Any, timeout: RpcTimeout): Future[T] = {
actorRef.ask(AkkaMessage(message, true))(timeout.duration).flatMap {
// The function will run in the calling thread, so it should be short and never block.
case msg @ AkkaMessage(message, reply) =>
if (reply) {
logError(s"Receive $msg but the sender cannot reply")
Future.failed(new SparkException(s"Receive $msg but the sender cannot reply"))
} else {
Future.successful(message)
}
case AkkaFailure(e) =>
Future.failed(e)
}(ThreadUtils.sameThread).mapTo[T].
recover(timeout.addMessageIfTimeout)(ThreadUtils.sameThread)
}
override def toString: String = s"${getClass.getSimpleName}($actorRef)"
final override def equals(that: Any): Boolean = that match {
case other: AkkaRpcEndpointRef => actorRef == other.actorRef
case _ => false
}
final override def hashCode(): Int = if (actorRef == null) 0 else actorRef.hashCode()
}
/**
* A wrapper to `message` so that the receiver knows if the sender expects a reply.
* @param message
* @param needReply if the sender expects a reply message
*/
private[akka] case class AkkaMessage(message: Any, needReply: Boolean)
/**
* A reply with the failure error from the receiver to the sender
*/
private[akka] case class AkkaFailure(e: Throwable)
| chenc10/Spark-PAF | core/src/main/scala/org/apache/spark/rpc/akka/AkkaRpcEnv.scala | Scala | apache-2.0 | 13,100 |
/**
* This file is part of the TA Buddy project.
* Copyright (c) 2014 Alexey Aksenov ezh@ezh.msk.ru
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Affero General Global License version 3
* as published by the Free Software Foundation with the addition of the
* following permission added to Section 15 as permitted in Section 7(a):
* FOR ANY PART OF THE COVERED WORK IN WHICH THE COPYRIGHT IS OWNED
* BY Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS»,
* Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS» DISCLAIMS
* THE WARRANTY OF NON INFRINGEMENT OF THIRD PARTY RIGHTS.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Affero General Global License for more details.
* You should have received a copy of the GNU Affero General Global License
* along with this program; if not, see http://www.gnu.org/licenses or write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA, 02110-1301 USA, or download the license from the following URL:
* http://www.gnu.org/licenses/agpl.html
*
* The interactive user interfaces in modified source and object code versions
* of this program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU Affero General Global License.
*
* In accordance with Section 7(b) of the GNU Affero General Global License,
* you must retain the producer line in every report, form or document
* that is created or manipulated using TA Buddy.
*
* You can be released from the requirements of the license by purchasing
* a commercial license. Buying such a license is mandatory as soon as you
* develop commercial activities involving the TA Buddy software without
* disclosing the source code of your own applications.
* These activities include: offering paid services to customers,
* serving files in a web or/and network application,
* shipping TA Buddy with a closed source product.
*
* For more information, please contact Digimead Team at this
* address: ezh@ezh.msk.ru
*/
package org.digimead.tabuddy.desktop.logic.ui
import com.escalatesoft.subcut.inject.NewBindingModule
import org.digimead.tabuddy.desktop.core.definition.api.XPreferencePage
package object preference {
lazy val default = new NewBindingModule(module ⇒ {
module.bind[XPreferencePage] identifiedBy "UI.Preference.SignatureValidator" toSingle { new SignatureValidator }
})
}
| digimead/digi-TABuddy-desktop | part-logic/src/main/scala/org/digimead/tabuddy/desktop/logic/ui/preference/package.scala | Scala | agpl-3.0 | 2,623 |
package org.denigma.kappa.notebook.views.comments
import fastparse.all._
import org.denigma.codemirror.Editor
import org.denigma.kappa.messages._
import org.denigma.kappa.notebook.parsers.FilesParser
import org.denigma.kappa.notebook.views.figures.Figure
import org.denigma.kappa.parsers.AST
import org.scalajs.dom.raw.MouseEvent
import rx.Var
import scalatags.JsDom.all._
class FiguresWatcher(val filesParser: FilesParser, val input: Var[KappaMessage]) extends Watcher {
override type Data = AST.IRI
override def parse(editor: Editor, lines: List[(Int, String)], currentNum: Int): Unit = {
val images = lines.map{ case (num, line)=> (num, line) -> filesParser.image.parse(line) }.collect{
case ((num, line), Parsed.Success(result, _))=> (num, line) -> result
}
images.collectFirst{
case ((n, line), result) if n == currentNum =>
//dom.console.log("IMAGE PARSE = "+result)
val marker = makeFigureMarker(result, "Image")
editor.setGutterMarker(n, "breakpoints", marker)
//val image = img(src := result).render
//editor.getDoc().dyn.addLineWidget(n, "/files/"+image) //trying to add figure directly to the code
}
val videos = lines.map{ case (num, line)=> (num, line) -> filesParser.video.parse(line) }.collect{
case ((num, line), Parsed.Success(result, _))=> (num, line) -> result
}
videos.collectFirst{
case ((n, line), result) if n == currentNum =>
// dom.console.log("VIDEO PARSE = "+result)
val marker = makeFigureMarker(result, "Video")
editor.setGutterMarker(n, "breakpoints", marker)
}
/**
* doc.addLineWidget(line: integer|LineHandle, node: Element, ?options: object) → LineWidget
Adds a line widget, an element shown below a line, spanning the whole of the editor's width,
and moving the lines below it downwards. line should be either an integer or a line handle,
and node should be a DOM node, which will be displayed below the given line.
options, when given, should be an object that configures the behavior of the widget.
The following options are supported (all default to false):
* */
}
protected def makeFigureMarker(figure: Figure, icon: String) = {
val tag = i(`class` := s"label pointed File $icon Outline icon", onclick := {
//println(s"mouse down on $num")
})
val html = tag.render
html.onclick = {
event: MouseEvent =>
input() = Animate(Go.ToFigure(figure), true)
}
html
}
}
| antonkulaga/kappa-notebook | app/js/src/main/scala/org/denigma/kappa/notebook/views/comments/FiguresWatcher.scala | Scala | mpl-2.0 | 2,517 |
package fpinscala.parsing
import java.util.regex._
import scala.util.matching.Regex
import fpinscala.testing._
import fpinscala.testing.Prop._
import language.higherKinds
import language.implicitConversions
trait Parsers[Parser[+ _]] { self => // so inner classes may call methods of trait
def run[A](p: Parser[A])(input: String): Either[ParseError, A]
implicit def string(s: String): Parser[String]
implicit def operators[A](p: Parser[A]) = ParserOps[A](p)
implicit def asStringParser[A](a: A)(implicit f: A => Parser[String]): ParserOps[String] =
ParserOps(f(a))
def char(c: Char): Parser[Char] =
string(c.toString) map (_.charAt(0))
/*
* A default `succeed` implementation in terms of `string` and `map`.
* We leave `succeed` abstract, since `map` is defined below in terms of
* `flatMap` and `succeed`, which would be a circular definition! But we include
* the definition here in case implementations wish to use it
* (say if they provide a custom implementation of `map`, breaking the cycle)
*/
def defaultSucceed[A](a: A): Parser[A] =
string("") map (_ => a)
def succeed[A](a: A): Parser[A]
def slice[A](p: Parser[A]): Parser[String]
def many1[A](p: Parser[A]): Parser[List[A]] =
map2(p, many(p))(_ :: _)
def listOfN[A](n: Int, p: Parser[A]): Parser[List[A]] =
if (n <= 0) succeed(List())
else map2(p, listOfN(n - 1, p))(_ :: _)
def many[A](p: Parser[A]): Parser[List[A]] =
map2(p, many(p))(_ :: _) or succeed(List())
def or[A](p1: Parser[A], p2: => Parser[A]): Parser[A]
def flatMap[A, B](p: Parser[A])(f: A => Parser[B]): Parser[B]
implicit def regex(r: Regex): Parser[String]
/*
These can be implemented using a for-comprehension, which delegates to the `flatMap` and `map` implementations we've provided on `ParserOps`, or they can be implemented in terms of these functions directly.
*/
def product[A, B](p: Parser[A], p2: => Parser[B]): Parser[(A, B)] =
flatMap(p)(a => map(p2)(b => (a, b)))
def map2[A, B, C](p: Parser[A], p2: => Parser[B])(f: (A, B) => C): Parser[C] =
for { a <- p; b <- p2 } yield f(a, b)
def map[A, B](a: Parser[A])(f: A => B): Parser[B] =
flatMap(a)(f andThen succeed)
def label[A](msg: String)(p: Parser[A]): Parser[A]
def scope[A](msg: String)(p: Parser[A]): Parser[A]
def attempt[A](p: Parser[A]): Parser[A]
/**
* Sequences two parsers, ignoring the result of the first.
* We wrap the ignored half in slice, since we don't care about its result.
*/
def skipL[B](p: Parser[Any], p2: => Parser[B]): Parser[B] =
map2(slice(p), p2)((_, b) => b)
/**
* Sequences two parsers, ignoring the result of the second.
* We wrap the ignored half in slice, since we don't care about its result.
*/
def skipR[A](p: Parser[A], p2: => Parser[Any]): Parser[A] =
map2(p, slice(p2))((a, b) => a)
def opt[A](p: Parser[A]): Parser[Option[A]] =
p.map(Some(_)) or succeed(None)
/** Parser which consumes zero or more whitespace characters. */
def whitespace: Parser[String] = "\\\\s*".r
/** Parser which consumes 1 or more digits. */
def digits: Parser[String] = "\\\\d+".r
/** Parser which consumes reluctantly until it encounters the given string. */
def thru(s: String): Parser[String] = (".*?" + Pattern.quote(s)).r
/** Unescaped string literals, like "foo" or "bar". */
def quoted: Parser[String] = string("\\"") *> thru("\\"").map(_.dropRight(1))
/** Unescaped or escaped string literals, like "An \\n important \\"Quotation\\"" or "bar". */
def escapedQuoted: Parser[String] =
// rather annoying to write, left as an exercise
// we'll just use quoted (unescaped literals) for now
token(quoted label "string literal")
/**
* C/Java style floating point literals, e.g .1, -1.0, 1e9, 1E-23, etc.
* Result is left as a string to keep full precision
*/
def doubleString: Parser[String] =
token("[-+]?([0-9]*\\\\.)?[0-9]+([eE][-+]?[0-9]+)?".r)
/** Floating point literals, converted to a `Double`. */
def double: Parser[Double] =
doubleString map (_.toDouble) label "double literal"
/** Attempts `p` and strips trailing whitespace, usually used for the tokens of a grammar. */
def token[A](p: Parser[A]): Parser[A] =
attempt(p) <* whitespace
/** Zero or more repetitions of `p`, separated by `p2`, whose results are ignored. */
def sep[A](p: Parser[A], p2: Parser[Any]): Parser[List[A]] = // use `Parser[Any]` since don't care about result type of separator
sep1(p, p2) or succeed(List())
/** One or more repetitions of `p`, separated by `p2`, whose results are ignored. */
def sep1[A](p: Parser[A], p2: Parser[Any]): Parser[List[A]] =
map2(p, many(p2 *> p))(_ :: _)
/** Parses a sequence of left-associative binary operators with the same precedence. */
def opL[A](p: Parser[A])(op: Parser[(A, A) => A]): Parser[A] =
map2(p, many(op ** p))((h, t) => t.foldLeft(h)((a, b) => b._1(a, b._2)))
/** Wraps `p` in start/stop delimiters. */
def surround[A](start: Parser[Any], stop: Parser[Any])(p: => Parser[A]) =
start *> p <* stop
/** A parser that succeeds when given empty input. */
def eof: Parser[String] =
regex("\\\\z".r).label("unexpected trailing characters")
/** The root of the grammar, expects no further input following `p`. */
def root[A](p: Parser[A]): Parser[A] =
p <* eof
case class ParserOps[A](p: Parser[A]) {
def |[B >: A](p2: => Parser[B]): Parser[B] =
self.or(p, p2) // use `self` to explicitly disambiguate reference to the `or` method on the `trait`
def or[B >: A](p2: => Parser[B]): Parser[B] = self.or(p, p2)
def map[B](f: A => B): Parser[B] = self.map(p)(f)
def many = self.many(p)
def slice: Parser[String] = self.slice(p)
def **[B](p2: => Parser[B]): Parser[(A, B)] =
self.product(p, p2)
def product[B](p2: => Parser[B]): Parser[(A, B)] =
self.product(p, p2)
def flatMap[B](f: A => Parser[B]): Parser[B] =
self.flatMap(p)(f)
def label(msg: String): Parser[A] = self.label(msg)(p)
def scope(msg: String): Parser[A] = self.scope(msg)(p)
def *>[B](p2: => Parser[B]) = self.skipL(p, p2)
def <*(p2: => Parser[Any]) = self.skipR(p, p2)
def token = self.token(p)
def sep(separator: Parser[Any]) = self.sep(p, separator)
def sep1(separator: Parser[Any]) = self.sep1(p, separator)
def as[B](b: B): Parser[B] = self.map(self.slice(p))(_ => b)
def opL(op: Parser[(A, A) => A]): Parser[A] = self.opL(p)(op)
}
object Laws {
def equal[A](p1: Parser[A], p2: Parser[A])(in: Gen[String]): Prop =
forAll(in)(s => run(p1)(s) == run(p2)(s))
def mapLaw[A](p: Parser[A])(in: Gen[String]): Prop =
equal(p, p.map(a => a))(in)
}
}
case class Location(input: String, offset: Int = 0) {
lazy val line = input.slice(0, offset + 1).count(_ == '\\n') + 1
lazy val col = input.slice(0, offset + 1).lastIndexOf('\\n') match {
case -1 => offset + 1
case lineStart => offset - lineStart
}
def toError(msg: String): ParseError =
ParseError(List((this, msg)))
def advanceBy(n: Int) = copy(offset = offset + n)
/* Returns the line corresponding to this location */
def currentLine: String =
if (input.length > 1) input.lines.drop(line - 1).next
else ""
def columnCaret = (" " * (col - 1)) + "^"
}
case class ParseError(stack: List[(Location, String)] = List()) {
def push(loc: Location, msg: String): ParseError =
copy(stack = (loc, msg) :: stack)
def label[A](s: String): ParseError =
ParseError(latestLoc.map((_, s)).toList)
def latest: Option[(Location, String)] =
stack.lastOption
def latestLoc: Option[Location] =
latest map (_._1)
/**
* Display collapsed error stack - any adjacent stack elements with the
* same location are combined on one line. For the bottommost error, we
* display the full line, with a caret pointing to the column of the error.
* Example:
*
* 1.1 file 'companies.json'; array
* 5.1 object
* 5.2 key-value
* 5.10 ':'
*
* { "MSFT" ; 24,
*/
override def toString =
if (stack.isEmpty) "no error message"
else {
val collapsed = collapseStack(stack)
val context =
collapsed.lastOption.map("\\n\\n" + _._1.currentLine).getOrElse("") +
collapsed.lastOption.map("\\n" + _._1.columnCaret).getOrElse("")
collapsed.map { case (loc, msg) => loc.line.toString + "." + loc.col + " " + msg }
.mkString("\\n") +
context
}
/* Builds a collapsed version of the given error stack -
* messages at the same location have their messages merged,
* separated by semicolons */
def collapseStack(s: List[(Location, String)]): List[(Location, String)] =
s.groupBy(_._1).mapValues(_.map(_._2).mkString("; ")).toList.sortBy(_._1.offset)
def formatLoc(l: Location): String = l.line + "." + l.col
}
object Parsers {}
| lhohan/fpscala | exercises/src/main/scala/fpinscala/parsing/Parsers.scala | Scala | mit | 9,074 |
package com.softwaremill.bootzooka.service.user
import com.softwaremill.bootzooka.dao.UserDao
import com.softwaremill.bootzooka.domain.User
import com.softwaremill.bootzooka.service.email.EmailService
import com.softwaremill.bootzooka.service.templates.{EmailContentWithSubject, EmailTemplatingEngine}
import com.softwaremill.bootzooka.test.{UserTestHelpers, FlatSpecWithSql}
import org.mockito.BDDMockito._
import org.mockito.Matchers
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatest
import org.scalatest.FlatSpec
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.mock.MockitoSugar
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
class UserServiceSpec extends FlatSpecWithSql with scalatest.Matchers with MockitoSugar with UserTestHelpers {
def prepareUserDaoMock: UserDao = {
val dao = new UserDao(sqlDatabase)
Future.sequence(Seq(
dao.add(newUser("Admin", "admin@sml.com", "pass", "salt", "token1")),
dao.add(newUser("Admin2", "admin2@sml.com", "pass", "salt", "token2"))
)).futureValue
dao
}
val registrationDataValidator: RegistrationDataValidator = mock[RegistrationDataValidator]
val emailService = mock[EmailService]
val emailTemplatingEngine = mock[EmailTemplatingEngine]
var userDao: UserDao = _
var userService: UserService = _
override protected def beforeEach() = {
super.beforeEach()
userDao = prepareUserDaoMock
userService = new UserService(userDao, registrationDataValidator, emailService, emailTemplatingEngine)
}
// this test is silly :\\
"findByEmail" should "return user for admin@sml.pl" in {
val userOpt = userService.findByEmail("admin@sml.com").futureValue
userOpt.map(_.login) should be (Some("Admin"))
}
"findByEmail" should "return user for uppercased ADMIN@SML.PL" in {
val userOpt = userService.findByEmail("ADMIN@SML.COM").futureValue
userOpt.map(_.login) should be (Some("Admin"))
}
"checkExistence" should "don't find given user login and e-mail" in {
val userExistence: Either[String, Unit] = userService.checkUserExistenceFor("newUser", "newUser@sml.com")
.futureValue
userExistence.isRight should be (true)
}
"checkExistence" should "find duplicated login" in {
val userExistence: Either[String, Unit] = userService.checkUserExistenceFor("Admin", "newUser@sml.com")
.futureValue
userExistence.isLeft should be (true)
userExistence.left.get.equals("Login already in use!")
}
"checkExistence" should "find duplicated login written as upper cased string" in {
val userExistence: Either[String, Unit] = userService.checkUserExistenceFor("ADMIN", "newUser@sml.com")
.futureValue
userExistence.isLeft should be (true)
userExistence.left.get.equals("Login already in use!")
}
"checkExistence" should "find duplicated email" in {
val userExistence: Either[String, Unit] = userService.checkUserExistenceFor("newUser", "admin@sml.com")
.futureValue
userExistence.isLeft should be (true)
userExistence.left.get.equals("E-mail already in use!")
}
"checkExistence" should "find duplicated email written as upper cased string" in {
val userExistence: Either[String, Unit] = userService.checkUserExistenceFor("newUser", "ADMIN@sml.com")
.futureValue
userExistence.isLeft should be (true)
userExistence.left.get.equals("E-mail already in use!")
}
"registerNewUser" should "add user with unique lowercased login info" in {
// Given
given(emailService.scheduleEmail(any(), any())).willReturn(Future {})
// When
userService.registerNewUser("John", "newUser@sml.com", "password").futureValue
// Then
val userOpt: Option[User] = userDao.findByLowerCasedLogin("John").futureValue
userOpt.isDefined should be (true)
val user = userOpt.get
user.login should be ("John")
user.loginLowerCased should be ("john")
verify(emailTemplatingEngine).registrationConfirmation(Matchers.eq("John"))
verify(emailService)
.scheduleEmail(Matchers.eq("newUser@sml.com"), any[EmailContentWithSubject])
}
"registerNewUser" should "not schedule an email on existing login" in {
// When
try {
userService.registerNewUser("Admin", "secondEmail@sml.com", "password").futureValue
}
catch {
case e: Exception =>
}
// Then
verify(emailService, never()).scheduleEmail(Matchers.eq("secondEmail@sml.com"), any[EmailContentWithSubject])
}
"changeEmail" should "change email for specified user" in {
val user = userDao.findByLowerCasedLogin("admin").futureValue
val userEmail = user.get.email
val newEmail = "new@email.com"
userService.changeEmail(userEmail, newEmail).futureValue should be ('right)
userDao.findByEmail(newEmail).futureValue match {
case Some(cu) =>
case None => fail("User not found. Maybe e-mail wasn't really changed?")
}
}
"changeEmail" should "not change email if already used by someone else" in {
userService.changeEmail("admin@sml.com", "admin2@sml.com").futureValue should be ('left)
}
"changeLogin" should "change login for specified user" in {
val user = userDao.findByLowerCasedLogin("admin").futureValue
val userLogin = user.get.login
val newLogin = "newadmin"
userService.changeLogin(userLogin, newLogin).futureValue should be ('right)
userDao.findByLowerCasedLogin(newLogin).futureValue match {
case Some(cu) =>
case None => fail("User not found. Maybe login wasn't really changed?")
}
}
"changeLogin" should "not change login if already used by someone else" in {
userService.changeLogin("admin", "admin2").futureValue should be ('left)
}
"changePassword" should "change password if current is correct and new is present" in {
// Given
val user = userDao.findByLowerCasedLogin("admin").futureValue.get
val currentPassword = "pass"
val newPassword = "newPass"
// When
val changePassResult = userService.changePassword(user.token, currentPassword, newPassword).futureValue
// Then
changePassResult should be ('right)
userDao.findByLowerCasedLogin("admin").futureValue match {
case Some(cu) => cu.password should be (User.encryptPassword(newPassword, cu.salt))
case None => fail("Something bad happened, maybe mocked Dao is broken?")
}
}
"changePassword" should "not change password if current is incorrect" in {
// Given
val user = userDao.findByLowerCasedLogin("admin").futureValue.get
// When, Then
userService.changePassword(user.token, "someillegalpass", "newpass").futureValue should be ('left)
}
"changePassword" should "complain when user cannot be found" in {
userService.changePassword("someirrelevanttoken", "pass", "newpass").futureValue should be ('left)
}
}
| umitunal/bootzooka | backend/src/test/scala/com/softwaremill/bootzooka/service/user/UserServiceSpec.scala | Scala | apache-2.0 | 6,877 |
package mesosphere.marathon
package storage.repository
import java.time.OffsetDateTime
import java.time.format.DateTimeFormatter
import akka.http.scaladsl.marshalling.Marshaller
import akka.http.scaladsl.unmarshalling.Unmarshaller
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import akka.{Done, NotUsed}
import com.typesafe.scalalogging.StrictLogging
import mesosphere.util.summarize
import mesosphere.marathon.core.pod.PodDefinition
import mesosphere.marathon.core.storage.repository.impl.PersistenceStoreVersionedRepository
import mesosphere.marathon.core.storage.store.impl.BasePersistenceStore
import mesosphere.marathon.core.storage.store.impl.cache.{
LazyCachingPersistenceStore,
LazyVersionCachingPersistentStore,
LoadTimeCachingPersistenceStore
}
import mesosphere.marathon.core.storage.store.{IdResolver, PersistenceStore}
import mesosphere.marathon.state._
import scala.jdk.CollectionConverters._
import mesosphere.marathon.util.{RichLock, toRichFuture}
import scala.annotation.tailrec
import scala.async.Async.{async, await}
import scala.collection.concurrent.TrieMap
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util.control.NonFatal
import scala.util.{Failure, Success}
case class StoredGroup(
id: AbsolutePathId,
appIds: Map[AbsolutePathId, OffsetDateTime],
podIds: Map[AbsolutePathId, OffsetDateTime],
storedGroups: Seq[StoredGroup],
dependencies: Set[AbsolutePathId],
version: OffsetDateTime,
enforceRole: Option[Boolean]
) extends StrictLogging {
lazy val transitiveAppIds: Map[AbsolutePathId, OffsetDateTime] = appIds ++ storedGroups.flatMap(_.appIds)
lazy val transitivePodIds: Map[AbsolutePathId, OffsetDateTime] = podIds ++ storedGroups.flatMap(_.podIds)
/**
* Load all apps and pods referenced by id and version.
*
* The [[StoredGroup]] does not hold the actual app and pod definitions but only their ids and versions.
* This method resolves these, ie loads them.
*
* @param appRepository The app repository used to load all apps.
* @param podRepository The pod repository used to load all pods.
* @param ctx The execution context for async/await.
* @return A [[Group]] with all apps and pods attached.
*/
def resolve(appRepository: AppRepository, podRepository: PodRepository)(implicit ctx: ExecutionContext): Future[Group] =
async { // linter:ignore UnnecessaryElseBranch
if (id.isTopLevel)
require(enforceRole.isDefined, s"BUG! Top-level group $id has no defined enforce role filed which should be done by migration.")
val appFutures = appIds.map {
case (appId, appVersion) =>
appRepository
.getVersion(appId, appVersion)
.recover {
case NonFatal(ex) =>
logger.error(s"Failed to load $appId:$appVersion for group $id ($version)", ex)
throw ex
}
.map { maybeAppDef =>
(appId, maybeAppDef)
}
}
val podFutures = podIds.map {
case (podId, podVersion) =>
podRepository
.getVersion(podId, podVersion)
.recover {
case NonFatal(ex) =>
logger.error(s"Failed to load $podId:$podVersion for group $id ($version)", ex)
throw ex
}
.map { maybePodDef =>
(podId, maybePodDef)
}
}
val groupFutures = storedGroups.map(_.resolve(appRepository, podRepository))
val allApps = await(Future.sequence(appFutures))
if (allApps.exists { case (_, maybeAppDef) => maybeAppDef.isEmpty }) {
val missingApps = allApps.filter { case (_, maybeAppDef) => maybeAppDef.isEmpty }
val summarizedMissingApps = summarize(missingApps.toIterator.map(_._1))
logger.warn(s"Group $id $version is missing apps: $summarizedMissingApps")
}
val allPods = await(Future.sequence(podFutures))
if (allPods.exists { case (_, maybePodDef) => maybePodDef.isEmpty }) {
val missingPods = allPods.filter { case (_, maybePodDef) => maybePodDef.isEmpty }
val summarizedMissingPods = summarize(missingPods.toIterator.map(_._1))
logger.warn(s"Group $id $version is missing pods: $summarizedMissingPods")
}
val apps: Map[AbsolutePathId, AppDefinition] = allApps.iterator.collect {
case (_, Some(app: AppDefinition)) =>
app.id -> app
}.toMap
val pods: Map[AbsolutePathId, PodDefinition] = allPods.iterator.collect {
case (_, Some(pod: PodDefinition)) =>
pod.id -> pod
}.toMap
val groups: Map[AbsolutePathId, Group] = await(Future.sequence(groupFutures)).iterator.map { group =>
group.id -> group
}.toMap
Group(
id = id,
apps = apps,
pods = pods,
groupsById = groups,
dependencies = dependencies,
version = Timestamp(version),
enforceRole = enforceRole
)
}
def toProto: Protos.GroupDefinition = {
import StoredGroup.DateFormat
val b = Protos.GroupDefinition.newBuilder
.setId(id.safePath)
.setVersion(DateFormat.format(version))
appIds.foreach {
case (app, appVersion) =>
b.addApps(
Protos.GroupDefinition.AppReference
.newBuilder()
.setId(app.safePath)
.setVersion(DateFormat.format(appVersion))
)
}
podIds.foreach {
case (pod, podVersion) =>
b.addPods(
Protos.GroupDefinition.AppReference
.newBuilder()
.setId(pod.safePath)
.setVersion(DateFormat.format(podVersion))
)
}
storedGroups.foreach { storedGroup => b.addGroups(storedGroup.toProto) }
dependencies.foreach { dependency => b.addDependencies(dependency.safePath) }
enforceRole.foreach { flag => b.setEnforceRole(flag) }
b.build()
}
}
object StoredGroup {
val DateFormat = DateTimeFormatter.ISO_OFFSET_DATE_TIME
def apply(group: Group): StoredGroup =
StoredGroup(
id = group.id,
appIds = group.apps.map { case (id, app) => id -> app.version.toOffsetDateTime },
podIds = group.pods.map { case (id, pod) => id -> pod.version.toOffsetDateTime },
storedGroups = group.groupsById.iterator.map { case (_, group) => StoredGroup(group) }.toSeq,
dependencies = group.dependencies,
version = group.version.toOffsetDateTime,
enforceRole = group.enforceRole
)
def apply(proto: Protos.GroupDefinition): StoredGroup = {
val apps: Map[AbsolutePathId, OffsetDateTime] = proto.getAppsList.asScala.iterator.map { appId =>
PathId.fromSafePath(appId.getId) -> OffsetDateTime.parse(appId.getVersion, DateFormat)
}.toMap
val pods: Map[AbsolutePathId, OffsetDateTime] = proto.getPodsList.asScala.iterator.map { podId =>
PathId.fromSafePath(podId.getId) -> OffsetDateTime.parse(podId.getVersion, DateFormat)
}.toMap
val id = PathId.fromSafePath(proto.getId)
// Default to false for top-level group.
val enforceRole: Option[Boolean] =
if (proto.hasEnforceRole()) Some(proto.getEnforceRole)
else None
val groups = proto.getGroupsList.asScala.map(StoredGroup(_))
StoredGroup(
id = id,
appIds = apps,
podIds = pods,
storedGroups = groups.toIndexedSeq,
dependencies = proto.getDependenciesList.asScala.iterator.map(PathId.fromSafePath).toSet,
version = OffsetDateTime.parse(proto.getVersion, DateFormat),
enforceRole = enforceRole
)
}
}
class StoredGroupRepositoryImpl[K, C, S](
persistenceStore: PersistenceStore[K, C, S],
appRepository: AppRepository,
podRepository: PodRepository,
versionCacheMaxSize: Int,
newGroupStrategy: RootGroup.NewGroupStrategy
)(implicit
ir: IdResolver[AbsolutePathId, StoredGroup, C, K],
marshaller: Marshaller[StoredGroup, S],
unmarshaller: Unmarshaller[S, StoredGroup],
val ctx: ExecutionContext,
val mat: Materializer
) extends GroupRepository
with StrictLogging {
import StoredGroupRepositoryImpl._
/*
Basic strategy for caching:
get -> "wait" on the future, if it fails, create a new promise for it and actually fetch the root,
completing the promise with the fetch result.
set -> create a new promise for the root. If store succeeds, go update it, if it doesn't
complete the new future with the result of the previous root future.
This gives us read-after-write consistency.
*/
private val lock = RichLock()
private val rootNotLoaded: Future[RootGroup] = Future.failed[RootGroup](new Exception("Root not yet loaded"))
private var rootFuture: Future[RootGroup] = rootNotLoaded
private[storage] var beforeStore = Option.empty[StoredGroup => Future[Done]]
private val versionCache = TrieMap.empty[OffsetDateTime, Group]
private val storedRepo = {
@tailrec
def leafStore(store: PersistenceStore[K, C, S]): PersistenceStore[K, C, S] =
store match {
case s: BasePersistenceStore[K, C, S] => s
case s: LoadTimeCachingPersistenceStore[K, C, S] => leafStore(s.store)
case s: LazyCachingPersistenceStore[K, C, S] => leafStore(s.store)
case s: LazyVersionCachingPersistentStore[K, C, S] => leafStore(s.store)
}
new PersistenceStoreVersionedRepository[AbsolutePathId, StoredGroup, K, C, S](leafStore(persistenceStore), _.id, _.version)
}
def addToVersionCache(version: Option[OffsetDateTime], group: Group): Group = {
if (versionCache.size > versionCacheMaxSize) {
// remove the oldest root by default
versionCache.remove(versionCache.minBy(_._1)._1)
}
versionCache.put(version.getOrElse(group.version.toOffsetDateTime), group)
group
}
private[storage] def underlyingRoot(): Future[RootGroup] =
async { // linter:ignore UnnecessaryElseBranch
val root = await(storedRepo.get(RootId))
val resolved = root.map(_.resolve(appRepository, podRepository))
resolved match {
case Some(x) => RootGroup.fromGroup(await(x), newGroupStrategy = newGroupStrategy)
case None => RootGroup.empty(newGroupStrategy = newGroupStrategy)
}
}
override def root(): Future[RootGroup] =
async { // linter:ignore UnnecessaryElseBranch
await(lock(rootFuture).asTry) match {
case Failure(_) =>
val promise = Promise[RootGroup]()
lock {
rootFuture = promise.future
}
val unresolved = await(storedRepo.get(RootId))
val newRoot = unresolved.map(_.resolve(appRepository, podRepository)) match {
case Some(group) =>
RootGroup.fromGroup(await(group), newGroupStrategy)
case None =>
// In case there is no root group yet a new (Empty) group is returned after it is persisted
// to the repository. Otherwise attempts to read this group later would fail.
val root = RootGroup.empty(newGroupStrategy = newGroupStrategy)
await(storeRoot(root, Nil, Nil, Nil, Nil))
root
}
promise.success(newRoot)
newRoot
case Success(root) =>
root
}
}
override def invalidateGroupCache(): Future[Done] = {
lock {
rootFuture = rootNotLoaded
Future.successful(Done)
}
}
override def rootVersions(): Source[OffsetDateTime, NotUsed] =
storedRepo.versions(RootId)
override def rootVersion(version: OffsetDateTime): Future[Option[RootGroup]] = {
async {
versionCache.get(version) match {
case Some(group) =>
Some(RootGroup.fromGroup(group, newGroupStrategy))
case None =>
val unresolved = await(storedRepo.getVersion(RootId, version))
unresolved.map(_.resolve(appRepository, podRepository)) match {
case Some(group) =>
val resolved = await(group)
addToVersionCache(Some(version), resolved)
Some(RootGroup.fromGroup(resolved, newGroupStrategy))
case None =>
logger.warn(s"Failed to load root group with version=$version")
None
}
}
}
}
override def storeRoot(
rootGroup: RootGroup,
updatedApps: Seq[AppDefinition],
deletedApps: Seq[AbsolutePathId],
updatedPods: Seq[PodDefinition],
deletedPods: Seq[AbsolutePathId]
): Future[Done] =
async {
val storedGroup = StoredGroup(rootGroup)
beforeStore match {
case Some(preStore) =>
await(preStore(storedGroup))
case _ =>
}
val promise = Promise[RootGroup]()
val oldRootFuture = lock {
val old = rootFuture
rootFuture = promise.future
old
}
val storeAppFutures = updatedApps.map(appRepository.store)
val storePodFutures = updatedPods.map(podRepository.store)
val deleteAppFutures = deletedApps.map(appRepository.deleteCurrent)
val deletePodFutures = deletedPods.map(podRepository.deleteCurrent)
val storedApps = await(Future.sequence(storeAppFutures).asTry)
val storedPods = await(Future.sequence(storePodFutures).asTry)
await(Future.sequence(deleteAppFutures).recover { case NonFatal(e) => Done })
await(Future.sequence(deletePodFutures).recover { case NonFatal(e) => Done })
def revertRoot(ex: Throwable): Done = {
promise.completeWith(oldRootFuture)
throw ex
}
(storedApps, storedPods) match {
case (Success(_), Success(_)) =>
val storedRoot = await(storedRepo.store(storedGroup).asTry)
storedRoot match {
case Success(_) =>
addToVersionCache(None, rootGroup)
promise.success(rootGroup)
Done
case Failure(ex) =>
logger.error(s"Unable to store updated group $rootGroup", ex)
revertRoot(ex)
}
case (Failure(ex), Success(_)) =>
val summarizedApps = summarize(updatedApps.toIterator.map(_.id))
val summarizedPods = summarize(updatedPods.toIterator.map(_.id))
logger.error(s"Unable to store updated apps or pods: $summarizedApps $summarizedPods", ex)
revertRoot(ex)
case (Success(_), Failure(ex)) =>
val summarizedApps = summarize(updatedApps.toIterator.map(_.id))
val summarizedPods = summarize(updatedPods.toIterator.map(_.id))
logger.error(s"Unable to store updated apps or pods: $summarizedApps $summarizedPods", ex)
revertRoot(ex)
case (Failure(ex), Failure(_)) =>
val summarizedApps = summarize(updatedApps.toIterator.map(_.id))
val summarizedPods = summarize(updatedPods.toIterator.map(_.id))
logger.error(s"Unable to store updated apps or pods: $summarizedApps $summarizedPods", ex)
revertRoot(ex)
}
}
override def storeRootVersion(rootGroup: RootGroup, updatedApps: Seq[AppDefinition], updatedPods: Seq[PodDefinition]): Future[Done] =
async {
val storedGroup = StoredGroup(rootGroup)
beforeStore match {
case Some(preStore) =>
await(preStore(storedGroup))
case _ =>
}
val storeAppFutures = updatedApps.map(appRepository.store)
val storePodFutures = updatedPods.map(podRepository.store)
val storedApps = await(Future.sequence(Seq(storeAppFutures, storePodFutures).flatten).asTry)
storedApps match {
case Success(_) =>
val storedRoot = await(storedRepo.storeVersion(storedGroup).asTry)
storedRoot match {
case Success(_) =>
addToVersionCache(None, rootGroup)
Done
case Failure(ex) =>
logger.error(s"Unable to store updated group $rootGroup", ex)
throw ex
}
case Failure(ex) =>
val summarizedApps = summarize(updatedApps.toIterator.map(_.id))
val summarizedPods = summarize(updatedPods.toIterator.map(_.id))
logger.error(s"Unable to store updated apps or pods: $summarizedApps $summarizedPods", ex)
throw ex
}
}
private[storage] def lazyRootVersion(version: OffsetDateTime): Future[Option[StoredGroup]] = {
storedRepo.getVersion(RootId, version)
}
private[storage] def deleteRootVersion(version: OffsetDateTime): Future[Done] = {
versionCache.remove(version)
persistenceStore.deleteVersion(RootId, version)
}
override def appVersions(id: AbsolutePathId): Source[OffsetDateTime, NotUsed] = appRepository.versions(id)
override def appVersion(id: AbsolutePathId, version: OffsetDateTime): Future[Option[AppDefinition]] =
appRepository.getVersion(id, version)
override def podVersions(id: AbsolutePathId): Source[OffsetDateTime, NotUsed] = podRepository.versions(id)
override def podVersion(id: AbsolutePathId, version: OffsetDateTime): Future[Option[PodDefinition]] =
podRepository.getVersion(id, version)
}
object StoredGroupRepositoryImpl {
val RootId: AbsolutePathId = PathId.root
}
| mesosphere/marathon | src/main/scala/mesosphere/marathon/storage/repository/GroupRepositoryImpl.scala | Scala | apache-2.0 | 17,129 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.nscplugin.test
import org.scalajs.nscplugin.test.util._
import org.junit.Test
import org.junit.Ignore
// scalastyle:off line.size.limit
class JSGlobalScopeTest extends DirectTest with TestHelpers {
override def preamble: String = {
"""
import scala.scalajs.js
import scala.scalajs.js.annotation._
object Symbols {
val sym: js.Symbol = js.Symbol()
}
@js.native
@JSGlobalScope
object SomeGlobalScope extends js.Any {
var validVar: Int = js.native
def validDef(): Int = js.native
var `not-a-valid-identifier-var`: Int = js.native
def `not-a-valid-identifier-def`(): Int = js.native
def +(that: Int): Int = js.native
def apply(x: Int): Int = js.native
@JSBracketAccess
def bracketSelect(name: String): Int = js.native
@JSBracketAccess
def bracketUpdate(name: String, v: Int): Unit = js.native
@JSBracketCall
def bracketCall(name: String)(arg: Int): Int = js.native
@JSName(Symbols.sym)
var symbolVar: Int = js.native
@JSName(Symbols.sym)
def symbolDef(): Int = js.native
var arguments: js.Array[Any] = js.native
@JSName("arguments") def arguments2(x: Int): Int = js.native
}
"""
}
@Test
def canAccessLegitMembers(): Unit = {
s"""
object Main {
def main(): Unit = {
val a = js.Dynamic.global.validVar
js.Dynamic.global.validVar = 3
val b = js.Dynamic.global.validDef()
val c = SomeGlobalScope.validVar
SomeGlobalScope.validVar = 3
val d = SomeGlobalScope.validDef()
val e = SomeGlobalScope.bracketSelect("validVar")
SomeGlobalScope.bracketUpdate("validVar", 3)
val f = SomeGlobalScope.bracketCall("validDef")(4)
}
}
""".hasNoWarns()
}
@Test
def noLoadGlobalValue(): Unit = {
s"""
object Main {
def main(): Unit = {
val g1 = js.Dynamic.global
val g2 = SomeGlobalScope
}
}
""" hasErrors
s"""
|newSource1.scala:41: error: Loading the global scope as a value (anywhere but as the left-hand-side of a `.`-selection) is not allowed.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| val g1 = js.Dynamic.global
| ^
|newSource1.scala:42: error: Loading the global scope as a value (anywhere but as the left-hand-side of a `.`-selection) is not allowed.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| val g2 = SomeGlobalScope
| ^
"""
}
@Test
def rejectInvalidJSIdentifiers(): Unit = {
s"""
object Main {
def main(): Unit = {
val a = js.Dynamic.global.`not-a-valid-identifier-var`
js.Dynamic.global.`not-a-valid-identifier-var` = 3
val b = js.Dynamic.global.`not-a-valid-identifier-def`()
val c = SomeGlobalScope.`not-a-valid-identifier-var`
SomeGlobalScope.`not-a-valid-identifier-var` = 3
val d = SomeGlobalScope.`not-a-valid-identifier-def`()
val e = SomeGlobalScope.bracketSelect("not-a-valid-identifier-var")
SomeGlobalScope.bracketUpdate("not-a-valid-identifier-var", 3)
val f = SomeGlobalScope.bracketCall("not-a-valid-identifier-def")(4)
}
}
""" hasErrors
s"""
|newSource1.scala:41: error: Selecting a field of the global scope whose name is not a valid JavaScript identifier is not allowed.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| val a = js.Dynamic.global.`not-a-valid-identifier-var`
| ^
|newSource1.scala:42: error: Selecting a field of the global scope whose name is not a valid JavaScript identifier is not allowed.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| js.Dynamic.global.`not-a-valid-identifier-var` = 3
| ^
|newSource1.scala:43: error: Calling a method of the global scope whose name is not a valid JavaScript identifier is not allowed.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| val b = js.Dynamic.global.`not-a-valid-identifier-def`()
| ^
|newSource1.scala:45: error: Selecting a field of the global scope whose name is not a valid JavaScript identifier is not allowed.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| val c = SomeGlobalScope.`not-a-valid-identifier-var`
| ^
|newSource1.scala:46: error: Selecting a field of the global scope whose name is not a valid JavaScript identifier is not allowed.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| SomeGlobalScope.`not-a-valid-identifier-var` = 3
| ^
|newSource1.scala:47: error: Calling a method of the global scope whose name is not a valid JavaScript identifier is not allowed.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| val d = SomeGlobalScope.`not-a-valid-identifier-def`()
| ^
|newSource1.scala:49: error: Selecting a field of the global scope whose name is not a valid JavaScript identifier is not allowed.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| val e = SomeGlobalScope.bracketSelect("not-a-valid-identifier-var")
| ^
|newSource1.scala:50: error: Selecting a field of the global scope whose name is not a valid JavaScript identifier is not allowed.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| SomeGlobalScope.bracketUpdate("not-a-valid-identifier-var", 3)
| ^
|newSource1.scala:51: error: Calling a method of the global scope whose name is not a valid JavaScript identifier is not allowed.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| val f = SomeGlobalScope.bracketCall("not-a-valid-identifier-def")(4)
| ^
"""
}
@Test
def rejectInvalidJSIdentifiersInNestedObjectClass(): Unit = {
"""
@js.native
@JSGlobalScope
object EnclosingGlobalScope extends js.Any {
@js.native
class `not-a-valid-JS-identifier` extends js.Object
@js.native
@JSName("not-a-valid-JS-identifier")
object A extends js.Object
@js.native
@JSName("foo.bar")
object B extends js.Object
@js.native
@JSName("")
object C extends js.Object
}
""" hasErrors
"""
|newSource1.scala:43: error: The name of a JS global variable must be a valid JS identifier (got 'not-a-valid-JS-identifier')
| class `not-a-valid-JS-identifier` extends js.Object
| ^
|newSource1.scala:47: error: The name of a JS global variable must be a valid JS identifier (got 'not-a-valid-JS-identifier')
| object A extends js.Object
| ^
|newSource1.scala:51: error: The name of a JS global variable must be a valid JS identifier (got 'foo.bar')
| object B extends js.Object
| ^
|newSource1.scala:55: error: The name of a JS global variable must be a valid JS identifier (got '')
| object C extends js.Object
| ^
"""
}
@Test
def rejectJSOperators(): Unit = {
"""
object Main {
def main(): Unit = {
val a = js.Dynamic.global + 3.asInstanceOf[js.Dynamic]
}
}
""" hasErrors
s"""
|newSource1.scala:41: error: type mismatch;
| found : scala.scalajs.js.Dynamic
| required: String
| val a = js.Dynamic.global + 3.asInstanceOf[js.Dynamic]
| ^
"""
"""
object Main {
def main(): Unit = {
val a = SomeGlobalScope + 3
}
}
""" hasErrors
s"""
|newSource1.scala:41: error: Loading the global scope as a value (anywhere but as the left-hand-side of a `.`-selection) is not allowed.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| val a = SomeGlobalScope + 3
| ^
"""
}
@Test
def rejectApply(): Unit = {
"""
object Main {
def main(): Unit = {
val a = js.Dynamic.global(3)
}
}
""" hasErrors
s"""
|newSource1.scala:41: error: Loading the global scope as a value (anywhere but as the left-hand-side of a `.`-selection) is not allowed.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| val a = js.Dynamic.global(3)
| ^
"""
"""
object Main {
def main(): Unit = {
val a = SomeGlobalScope(3)
}
}
""" hasErrors
s"""
|newSource1.scala:41: error: Loading the global scope as a value (anywhere but as the left-hand-side of a `.`-selection) is not allowed.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| val a = SomeGlobalScope(3)
| ^
"""
}
@Test
def rejectDynamicNames(): Unit = {
s"""
object Main {
def dynName: String = "foo"
def main(): Unit = {
val a = js.Dynamic.global.selectDynamic(dynName)
js.Dynamic.global.updateDynamic(dynName)(3)
val b = js.Dynamic.global.applyDynamic(dynName)(3)
val e = SomeGlobalScope.bracketSelect(dynName)
SomeGlobalScope.bracketUpdate(dynName, 3)
val f = SomeGlobalScope.bracketCall(dynName)(4)
val i = SomeGlobalScope.symbolVar
SomeGlobalScope.symbolVar = 3
val k = SomeGlobalScope.symbolDef()
}
}
""" hasErrors
s"""
|newSource1.scala:43: error: Selecting a field of the global scope with a dynamic name is not allowed.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| val a = js.Dynamic.global.selectDynamic(dynName)
| ^
|newSource1.scala:44: error: Selecting a field of the global scope with a dynamic name is not allowed.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| js.Dynamic.global.updateDynamic(dynName)(3)
| ^
|newSource1.scala:45: error: Calling a method of the global scope with a dynamic name is not allowed.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| val b = js.Dynamic.global.applyDynamic(dynName)(3)
| ^
|newSource1.scala:47: error: Selecting a field of the global scope with a dynamic name is not allowed.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| val e = SomeGlobalScope.bracketSelect(dynName)
| ^
|newSource1.scala:48: error: Selecting a field of the global scope with a dynamic name is not allowed.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| SomeGlobalScope.bracketUpdate(dynName, 3)
| ^
|newSource1.scala:49: error: Calling a method of the global scope with a dynamic name is not allowed.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| val f = SomeGlobalScope.bracketCall(dynName)(4)
| ^
|newSource1.scala:51: error: Selecting a field of the global scope with a dynamic name is not allowed.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| val i = SomeGlobalScope.symbolVar
| ^
|newSource1.scala:52: error: Selecting a field of the global scope with a dynamic name is not allowed.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| SomeGlobalScope.symbolVar = 3
| ^
|newSource1.scala:53: error: Calling a method of the global scope with a dynamic name is not allowed.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| val k = SomeGlobalScope.symbolDef()
| ^
"""
}
@Test
def rejectAllReservedIdentifiers(): Unit = {
val reservedIdentifiers = List(
"arguments", "break", "case", "catch", "class", "const", "continue",
"debugger", "default", "delete", "do", "else", "enum", "export",
"extends", "false", "finally", "for", "function", "if", "implements",
"import", "in", "instanceof", "interface", "let", "new", "null",
"package", "private", "protected", "public", "return", "static",
"super", "switch", "this", "throw", "true", "try", "typeof", "var",
"void", "while", "with", "yield")
for (reservedIdentifier <- reservedIdentifiers) {
val spaces = " " * reservedIdentifier.length()
s"""
@js.native
@JSGlobalScope
object CustomGlobalScope extends js.Any {
var `$reservedIdentifier`: Int = js.native
@JSName("$reservedIdentifier")
def `${reservedIdentifier}2`(x: Int): Int = js.native
}
object Main {
def main(): Unit = {
val a = js.Dynamic.global.`$reservedIdentifier`
js.Dynamic.global.`$reservedIdentifier` = 5
val b = js.Dynamic.global.`$reservedIdentifier`(5)
val c = CustomGlobalScope.`$reservedIdentifier`
CustomGlobalScope.`$reservedIdentifier` = 5
val d = CustomGlobalScope.`${reservedIdentifier}2`(5)
}
}
""" hasErrors
s"""
|newSource1.scala:49: error: Invalid selection in the global scope of the reserved identifier name `$reservedIdentifier`.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| val a = js.Dynamic.global.`$reservedIdentifier`
| ^
|newSource1.scala:50: error: Invalid selection in the global scope of the reserved identifier name `$reservedIdentifier`.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| js.Dynamic.global.`$reservedIdentifier` = 5
| ^
|newSource1.scala:51: error: Invalid call in the global scope of the reserved identifier name `$reservedIdentifier`.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| val b = js.Dynamic.global.`$reservedIdentifier`(5)
| $spaces^
|newSource1.scala:53: error: Invalid selection in the global scope of the reserved identifier name `$reservedIdentifier`.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| val c = CustomGlobalScope.`$reservedIdentifier`
| ^
|newSource1.scala:54: error: Invalid selection in the global scope of the reserved identifier name `$reservedIdentifier`.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| CustomGlobalScope.`$reservedIdentifier` = 5
| $spaces^
|newSource1.scala:55: error: Invalid call in the global scope of the reserved identifier name `$reservedIdentifier`.
| See https://www.scala-js.org/doc/interoperability/global-scope.html for further information.
| val d = CustomGlobalScope.`${reservedIdentifier}2`(5)
| $spaces^
"""
}
}
}
| scala-js/scala-js | compiler/src/test/scala/org/scalajs/nscplugin/test/JSGlobalScopeTest.scala | Scala | apache-2.0 | 17,183 |
abstract class A { def foo(a: Int): A }
class B extends A {
implicit def spackle(x: Int): A = new B
def foo(a) = a
} | loskutov/intellij-scala | testdata/scalacTests/failed/infer_override_def_args.scala | Scala | apache-2.0 | 120 |
package pl.touk.nussknacker.engine.component
import com.typesafe.config.Config
import net.ceedubs.ficus.Ficus._
import pl.touk.nussknacker.engine.api.component._
import pl.touk.nussknacker.engine.api.process._
import pl.touk.nussknacker.engine.api.{CustomStreamTransformer, Service}
import pl.touk.nussknacker.engine.component.ComponentExtractor.{ComponentsGroupedByType, componentConfigPath}
import pl.touk.nussknacker.engine.util.loader.ScalaServiceLoader
import scala.reflect.ClassTag
object ComponentExtractor {
val componentConfigPath = "components"
def apply(classLoader: ClassLoader): ComponentExtractor = {
ComponentExtractor(classLoader, NussknackerVersion.current)
}
case class ComponentsGroupedByType(services: Map[String, WithCategories[Service]],
sourceFactories: Map[String, WithCategories[SourceFactory]],
sinkFactories: Map[String, WithCategories[SinkFactory]],
customTransformers: Map[String, WithCategories[CustomStreamTransformer]])
}
case class ComponentExtractor(classLoader: ClassLoader, nussknackerVersion: NussknackerVersion) {
private lazy val providers: Map[String, List[ComponentProvider]] = {
ScalaServiceLoader
.load[ComponentProvider](classLoader)
.groupBy(_.providerName)
}
private def loadCorrectProviders(config: Config): Map[String, (ComponentProviderConfig, ComponentProvider)] = {
val componentsConfig = config.getAs[Map[String, ComponentProviderConfig]](componentConfigPath).getOrElse(Map.empty)
val manuallyLoadedProvidersWithConfig = loadManuallyLoadedProviders(componentsConfig)
val autoLoadedProvidersWithConfig = loadAutoLoadedProviders(componentsConfig, manuallyLoadedProvidersWithConfig)
manuallyLoadedProvidersWithConfig ++ autoLoadedProvidersWithConfig
}
private def loadManuallyLoadedProviders(componentsConfig: Map[String, ComponentProviderConfig]) = {
componentsConfig.filterNot(_._2.disabled).map {
case (name, providerConfig: ComponentProviderConfig) =>
val providerName = providerConfig.providerType.getOrElse(name)
val componentProviders = providers.getOrElse(providerName, throw new IllegalArgumentException(s"Provider $providerName (for component $name) not found"))
val provider: ComponentProvider = findSingleCompatible(name, providerName, componentProviders)
name -> (providerConfig, provider)
}
}
private def loadAutoLoadedProviders(componentsConfig: Map[String, ComponentProviderConfig], manuallyLoadedProvidersWithConfig: Map[String, (ComponentProviderConfig, ComponentProvider)]) = {
val manuallyLoadedProviders = manuallyLoadedProvidersWithConfig.values.map(_._2).toSet
val autoLoadedProvidersWithConfig = providers.values
.flatten
.filter(provider => provider.isAutoLoaded && !manuallyLoadedProviders.contains(provider) && !componentsConfig.get(provider.providerName).exists(_.disabled))
.map { provider =>
if (!provider.isCompatible(nussknackerVersion)) {
throw new IllegalArgumentException(s"Auto-loaded component provider ${provider.providerName} is not compatible with $nussknackerVersion, please use correct component provider version or disable it explicitly.")
}
provider.providerName -> (ComponentProviderConfig(providerType = None, componentPrefix = None), provider)
}
autoLoadedProvidersWithConfig
}
def extractComponents(processObjectDependencies: ProcessObjectDependencies): ComponentsGroupedByType = {
val components = loadCorrectProviders(processObjectDependencies.config)
.toList
.flatMap { case (_, (config, provider)) => extractOneProviderConfig(config, provider, processObjectDependencies) }
groupByComponentType(components)
}
def loadAdditionalConfig(inputConfig: Config, configWithDefaults: Config): Config = {
val resolvedConfigs = loadCorrectProviders(configWithDefaults).map {
case (name, (config, provider)) => name -> provider.resolveConfigForExecution(config.config)
}
resolvedConfigs.foldLeft(inputConfig) {
case (acc, (name, conf)) => acc.withValue(s"$componentConfigPath.$name", conf.root())
}
}
private def extractOneProviderConfig(config: ComponentProviderConfig, provider: ComponentProvider, processObjectDependencies: ProcessObjectDependencies): List[(String, WithCategories[Component])] = {
provider.create(config.config, processObjectDependencies).map { cd =>
val finalName = config.componentPrefix.map(_ + cd.name).getOrElse(cd.name)
finalName -> WithCategories(cd.component, config.categories, SingleComponentConfig.zero.copy(docsUrl = cd.docsUrl, icon = cd.icon))
}
}
private def groupByComponentType(definitions: List[(String, WithCategories[Component])]) = {
def checkDuplicates[T <: Component : ClassTag](components: List[(String, WithCategories[Component])]): Unit = {
components.groupBy(_._1)
.foreach { case (_, duplicatedComponents) =>
if (duplicatedComponents.length > 1) {
throw new IllegalArgumentException(s"Found duplicate keys: ${duplicatedComponents.mkString(", ")}, please correct configuration")
}
}
}
def forClass[T <: Component : ClassTag] = {
val defs = definitions.collect {
case (id, a@WithCategories(definition: T, _, _)) => id -> a.copy(value = definition)
}
checkDuplicates(defs)
defs.toMap
}
ComponentsGroupedByType(
services = forClass[Service],
sourceFactories = forClass[SourceFactory],
sinkFactories = forClass[SinkFactory],
customTransformers = forClass[CustomStreamTransformer])
}
def findSingleCompatible(name: String, providerName: String, componentProviders: List[ComponentProvider]): ComponentProvider = {
val (compatible, incompatible) = componentProviders.partition(_.isCompatible(nussknackerVersion))
compatible match {
case List() =>
incompatible match {
case List() => throw new IllegalArgumentException(s"Provider $providerName (for component $name) not found")
case _ => throw new IllegalArgumentException(s"Component provider $name (of type $providerName) is not compatible with $nussknackerVersion, please use correct component provider version or disable it explicitly.")
}
case x :: Nil => x
case _ :: _ => throw new IllegalArgumentException(s"Multiple providers for provider name $providerName (for component $name)")
}
}
}
| TouK/nussknacker | interpreter/src/main/scala/pl/touk/nussknacker/engine/component/ComponentExtractor.scala | Scala | apache-2.0 | 6,563 |
package lila.common
import play.api.http.HeaderNames
import play.api.mvc.RequestHeader
object HTTPRequest {
def isXhr(req: RequestHeader): Boolean =
(req.headers get "X-Requested-With") == Some("XMLHttpRequest")
def isSocket(req: RequestHeader): Boolean =
(req.headers get HeaderNames.UPGRADE) == Some("websocket")
def isSynchronousHttp(req: RequestHeader) = !isXhr(req) && !isSocket(req)
def isSafe(req: RequestHeader) = req.method == "GET"
def isRedirectable(req: RequestHeader) = isSynchronousHttp(req) && isSafe(req)
def fullUrl(req: RequestHeader): String = "http://" + req.host + req.uri
def userAgent(req: RequestHeader): Option[String] = req.headers get HeaderNames.USER_AGENT
def referer(req: RequestHeader): Option[String] = req.headers get HeaderNames.REFERER
def sid(req: RequestHeader): Option[String] = req.session get "sid"
private val isBotPattern = {
"""(?i).*(googlebot|googlebot-mobile|googlebot-image|mediapartners-google|bingbot|slurp|java|wget|curl|commons-httpclient|python-urllib|libwww|httpunit|nutch|phpcrawl|msnbot|adidxbot|blekkobot|teoma|ia_archiver|gingercrawler|webmon|httrack|webcrawler|fast-webcrawler|fastenterprisecrawler|convera|biglotron|grub\\.org|usinenouvellecrawler|antibot|netresearchserver|speedy|fluffy|jyxobot|bibnum\\.bnf|findlink|exabot|gigabot|msrbot|seekbot|ngbot|panscient|yacybot|aisearchbot|ioi|ips-agent|tagoobot|mj12bot|dotbot|woriobot|yanga|buzzbot|mlbot|purebot|lingueebot|yandex\\.com/bots|""" +
"""voyager|cyberpatrol|voilabot|baiduspider|citeseerxbot|spbot|twengabot|postrank|turnitinbot|scribdbot|page2rss|sitebot|linkdex|ezooms|dotbot|mail\\.ru|discobot|zombie\\.js|heritrix|findthatfile|europarchive\\.org|nerdbynature\\.bot|sistrixcrawler|ahrefsbot|aboundex|domaincrawler|wbsearchbot|summify|ccbot|edisterbot|seznambot|ec2linkfinder|gslfbot|aihitbot|intelium_bot|yeti|retrevopageanalyzer|lb-spider|sogou|lssbot|careerbot|wotbox|wocbot|ichiro|duckduckbot|lssrocketcrawler|drupact|webcompanycrawler|acoonbot|openindexspider|gnamgnamspider|web-archive-net\\.com\\.bot|backlinkcrawler|""" +
"""coccoc|integromedb|contentcrawlerspider|toplistbot|seokicks-robot|it2media-domain-crawler|ip-web-crawler\\.com|siteexplorer\\.info|elisabot|proximic|changedetection|blexbot|arabot|wesee:search|niki-bot|crystalsemanticsbot|rogerbot|360spider|psbot|interfaxscanbot|lipperheyseoservice|ccmetadatascaper|g00g1e\\.net|grapeshotcrawler|urlappendbot|brainobot|fr-crawler|binlar|simplecrawler|simplecrawler|livelapbot|twitterbot|cxensebot|smtbot|facebookexternalhit).*"""
}.r.pattern
def isBot(req: RequestHeader): Boolean = userAgent(req) ?? { ua =>
isBotPattern.matcher(ua).matches
}
def isFacebookBot(req: RequestHeader) = userAgent(req) ?? (_ contains "facebookexternalhit")
private val fileExtensionPattern = """.+\\.[a-z0-9]{2,4}$""".r.pattern
def hasFileExtension(req: RequestHeader) =
fileExtensionPattern.matcher(req.path).matches
}
| danilovsergey/i-bur | modules/common/src/main/HTTPRequest.scala | Scala | mit | 2,954 |
/*
* Licensed to Intel Corporation under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* Intel Corporation licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.tensor
import com.intel.analytics.bigdl.utils.T
import org.scalatest.{FlatSpec, Matchers}
@com.intel.analytics.bigdl.tags.Parallel
class DenseTensorMathSpec extends FlatSpec with Matchers {
"a.dist(b, 1)" should "be correct" in {
val a: Tensor[Double] = new DenseTensor(Storage(Array(1.0, 2.0, 3.0)))
val b: Tensor[Double] = new DenseTensor(Storage(Array(2.0, 3.0, 4.0)))
a.dist(b, 1) should equal(3)
}
"a.dist(b, 2)" should "be correct" in {
val a: Tensor[Double] = new DenseTensor(Storage(Array(1.0, 2.0, 3.0)))
val b: Tensor[Double] = new DenseTensor(Storage(Array(3.0, 4.0, 5.0)))
a.dist(b, 2) should equal(math.sqrt(12))
}
"a.dist(b, 3)" should "be correct" in {
val a: Tensor[Double] = new DenseTensor(Storage(Array(1.0, 2.0, 3.0)))
val b: Tensor[Double] = new DenseTensor(Storage(Array(3.0, 4.0, 5.0)))
a.dist(b, 3) should equal(math.pow(24, 1.0 / 3))
}
"vector + scalar" should "be correct" in {
val s = 2.0
val v: Tensor[Double] = new DenseTensor(Storage(Array(1.0, 2.0, 3.0)))
val r = v + s
r(Array(1)) should be(3.0)
r(Array(2)) should be(4.0)
r(Array(3)) should be(5.0)
}
"vector + vector" should "be correct" in {
val v1: Tensor[Double] = new DenseTensor(Storage(Array(1.0, 2.0, 3.0)))
val v2: Tensor[Double] = new DenseTensor(Storage(Array(1.0, 2.0, 3.0)))
val r = v1 + v2
r(Array(1)) should be(2.0)
r(Array(2)) should be(4.0)
r(Array(3)) should be(6.0)
}
"vector + vector which is not contiguous" should "be correct" in {
val v1: Tensor[Double] = new DenseTensor[Double](2, 4).fill(1)
v1.t()
val v2: Tensor[Double] = new DenseTensor(Storage(
Array(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0)))
val r = v1 + v2
r(Array(1, 1)) should be(2.0)
r(Array(1, 2)) should be(3.0)
r(Array(1, 3)) should be(4.0)
r(Array(1, 4)) should be(5.0)
r(Array(2, 1)) should be(6.0)
r(Array(2, 2)) should be(7.0)
r(Array(2, 3)) should be(8.0)
r(Array(2, 4)) should be(9.0)
}
"vector - scalar" should "be correct" in {
val s = 2.0
val v: Tensor[Double] = new DenseTensor(Storage(Array(1.0, 2.0, 3.0)))
val r = v - s
r(Array(1)) should be(-1.0)
r(Array(2)) should be(0.0)
r(Array(3)) should be(1.0)
}
"vector - vector" should "be correct" in {
val v1: Tensor[Double] = new DenseTensor(Storage(Array(1.0, 2.0, 3.0)))
val v2: Tensor[Double] = new DenseTensor(Storage(Array(2.0, 0.0, -1.0)))
val r = v1 - v2
r(Array(1)) should be(-1.0)
r(Array(2)) should be(2.0)
r(Array(3)) should be(4.0)
}
"vector * scalar" should "be correct" in {
val s = 2.0
val v: Tensor[Double] = new DenseTensor(Storage(Array(1.0, 2.0, 3.0)))
val r = v * s
r(Array(1)) should be(2.0)
r(Array(2)) should be(4.0)
r(Array(3)) should be(6.0)
}
"vector * vector" should "be correct" in {
val v1: Tensor[Double] = new DenseTensor(Storage(Array(1.0, 2.0, 3.0)))
val v2: Tensor[Double] = new DenseTensor(Storage(Array(2.0, 0.0, -1.0)))
val r = v1 * v2
r(Array(1)) should be(-1.0)
}
"matrix * vector" should "be correct" in {
val mat: Tensor[Double] = new DenseTensor(2, 3)
mat(Array(1, 1)) = 2
mat(Array(1, 2)) = 4
mat(Array(1, 3)) = 3
mat(Array(2, 1)) = 5
mat(Array(2, 2)) = 6
mat(Array(2, 3)) = 1
val vec: Tensor[Double] = new DenseTensor(Storage(Array(3.0, 1, 1)))
val r = mat * vec
r(Array(1)) should be(13.0)
r(Array(2)) should be(22.0)
}
"transpose matrix * vector" should "be correct" in {
val mat: Tensor[Double] = new DenseTensor(3, 2)
mat(Array(1, 1)) = 2
mat(Array(1, 2)) = 4
mat(Array(2, 1)) = 3
mat(Array(2, 2)) = 5
mat(Array(3, 1)) = 6
mat(Array(3, 2)) = 1
val mat1 = mat.t
val vec: Tensor[Double] = new DenseTensor(Storage(Array(3.0, 1, 1)))
val r = mat1 * vec
r(Array(1)) should be(15.0)
r(Array(2)) should be(18.0)
}
"uncontiguous matrix * vector" should "be correct" in {
val tensor: Tensor[Double] = new DenseTensor(3, 2, 2)
tensor(Array(1, 1, 1)) = 2
tensor(Array(1, 2, 1)) = 4
tensor(Array(2, 1, 1)) = 3
tensor(Array(2, 2, 1)) = 5
tensor(Array(3, 1, 1)) = 6
tensor(Array(3, 2, 1)) = 1
val matrix = tensor(T(T(), T(), 1)).t()
val vec: Tensor[Double] = new DenseTensor(Storage(Array(3.0, 1, 1)))
val r = matrix * vec
r(Array(1)) should be(15.0)
r(Array(2)) should be(18.0)
}
"matrix * matrix" should "be correct" in {
val mat1: Tensor[Double] = new DenseTensor(3, 2)
var i = 0
mat1.apply1(_ => {
i = i + 1;
i
})
val mat2: Tensor[Double] = new DenseTensor(2, 3)
i = 0
mat2.apply1(_ => {
i = i + 1;
i
})
val r = mat2 * mat1
r(Array(1, 1)) should be(22)
r(Array(1, 2)) should be(28)
r(Array(2, 1)) should be(49)
r(Array(2, 2)) should be(64)
}
"transpose matrix * matrix" should "be correct" in {
val mat1: Tensor[Double] = new DenseTensor(3, 2)
var i = 0
mat1.apply1(_ => {
i = i + 1;
i
})
val mat2: Tensor[Double] = new DenseTensor(3, 2)
i = 0
mat2.apply1(_ => {
i = i + 1;
i
})
val r = mat2.t * mat1
r(Array(1, 1)) should be(35)
r(Array(1, 2)) should be(44)
r(Array(2, 1)) should be(44)
r(Array(2, 2)) should be(56)
}
"matrix * transpose matrix" should "be correct" in {
val mat1: Tensor[Double] = new DenseTensor(2, 3)
var i = 0
mat1.apply1(_ => {
i = i + 1;
i
})
val mat2: Tensor[Double] = new DenseTensor(2, 3)
i = 0
mat2.apply1(_ => {
i = i + 1;
i
})
val r = mat2 * mat1.t
r(Array(1, 1)) should be(14)
r(Array(1, 2)) should be(32)
r(Array(2, 1)) should be(32)
r(Array(2, 2)) should be(77)
}
"transpose matrix * transpose matrix" should "be correct" in {
val mat1: Tensor[Double] = new DenseTensor(3, 2)
var i = 0
mat1.apply1(_ => {
i = i + 1;
i
})
val mat2: Tensor[Double] = new DenseTensor(2, 3)
i = 0
mat2.apply1(_ => {
i = i + 1;
i
})
val r = mat1.t * mat2.t
r(Array(1, 1)) should be(22)
r(Array(1, 2)) should be(49)
r(Array(2, 1)) should be(28)
r(Array(2, 2)) should be(64)
}
"noncontiguous matrix * noncontiguous matrix" should "be correct" in {
val tensor: Tensor[Double] = new DenseTensor(3, 2, 2)
tensor(Array(1, 1, 1)) = 1
tensor(Array(1, 2, 1)) = 2
tensor(Array(2, 1, 1)) = 3
tensor(Array(2, 2, 1)) = 4
tensor(Array(3, 1, 1)) = 5
tensor(Array(3, 2, 1)) = 6
val mat1: Tensor[Double] = tensor(T(T(), T(), 1)).t
val mat2: Tensor[Double] = tensor(T(T(), T(), 1))
val r = mat1 * mat2
r(Array(1, 1)) should be(35)
r(Array(1, 2)) should be(44)
r(Array(2, 1)) should be(44)
r(Array(2, 2)) should be(56)
}
"vector / scalar" should "be correct" in {
val s = 2.0
val v: Tensor[Double] = new DenseTensor(Storage(Array(1.0, 2.0, 3.0)))
val r = v / s
r(Array(1)) should be(0.5)
r(Array(2)) should be(1.0)
r(Array(3)) should be(1.5)
}
"vector / vector" should "be correct" in {
val v1: Tensor[Double] = new DenseTensor(Storage(Array(1.0, 2.0, 3.0)))
val v2: Tensor[Double] = new DenseTensor(Storage(Array(2.0, 1.0, -1.0)))
val r = v1 / v2
r(Array(1)) should be(0.5)
r(Array(2)) should be(2.0)
r(Array(3)) should be(-3.0)
}
"-vector" should "be correct" in {
val v: Tensor[Double] = new DenseTensor(Storage(Array(1.0, 2.0, 3.0)))
val r = -v
r(Array(1)) should be(-1.0)
r(Array(2)) should be(-2.0)
r(Array(3)) should be(-3.0)
}
"max operation" should "return correct value" in {
val t: Tensor[Double] = new DenseTensor(3, 3)
var i = 0
t.apply1(v => {
i = i + 1;
i
})
t.max() should be(9)
}
"max with dim" should "return correct value" in {
val test = Tensor[Double](Storage(Array(1.0, 2, 3, 4, 5, 6, 7, 8)), 1, Array(2, 4))
val (values1, indices1) = test.max(1)
values1 should be(Tensor[Double](Storage(Array(5.0, 6, 7, 8)), 1, Array(1, 4)))
indices1 should be(Tensor[Double](Storage(Array(2.0, 2, 2, 2)), 1, Array(1, 4)))
val (values2, indices2) = test.max(2)
values2 should be(Tensor[Double](Storage(Array(4.0, 8.0)), 1, Array(2, 1)))
indices2 should be(Tensor[Double](Storage(Array(4.0, 4)), 1, Array(2, 1)))
}
"max with dim on 1d tensor" should "return correct value" in {
val test = Tensor[Double](Storage(Array(1.0, 2, 3, 4, 5, 6, 7, 8)))
val (values, indices) = test.max(1)
values should be(Tensor[Double](Storage(Array(8.0))))
indices should be(Tensor[Double](Storage(Array(8.0))))
}
"sum operation" should "return correct value" in {
val t: Tensor[Double] = new DenseTensor(2, 3)
var i = 0
t.apply1(e => {
i = i + 1;
i
})
t.sum() should be(21)
val result1 = t.sum(1)
result1.size(1) should be(1)
result1.size(2) should be(3)
result1(Array(1, 1)) should be(5)
result1(Array(1, 2)) should be(7)
result1(Array(1, 3)) should be(9)
val result2 = t.sum(2)
result2.size(1) should be(2)
result2.size(2) should be(1)
result2(Array(1, 1)) should be(6)
result2(Array(2, 1)) should be(15)
}
"addmm" should "return correct value" in {
val a_data = Array(
1.0, 2, 3, 4,
1, 2, 3, 4,
1, 2, 3, 4
)
val a = new DenseTensor[Double](Storage(a_data), 1, Array(3, 4))
val b_data = Array(
1.0, 2,
1, 2,
1, 2,
1, 2
)
val b = new DenseTensor[Double](Storage(b_data), 1, Array(4, 2))
val c = Tensor[Double]()
c.resize(Array(3, 2))
c.addmm(a, b)
val expect_c_data = Array(
10.0, 20.0,
10, 20,
10, 20
)
val expect_c = new DenseTensor[Double](Storage(expect_c_data), 1, Array(3, 2))
c.map(expect_c, (a, b) => {
a should be(b +- 1e-6)
a
})
}
"addmm plus another tensor" should "return correct value" in {
val a_data = Array(
1.0, 2, 3, 4,
1, 2, 3, 4,
1, 2, 3, 4
)
val a = new DenseTensor[Double](Storage(a_data), 1, Array(3, 4))
val b_data = Array(
1.0, 2,
1, 2,
1, 2,
1, 2
)
val b = new DenseTensor[Double](Storage(b_data), 1, Array(4, 2))
val m_data = Array(
1.0, 2,
1, 2,
1, 2
)
val m = new DenseTensor[Double](Storage(m_data), 1, Array(3, 2))
val c = Tensor[Double]()
c.addmm(m, a, b)
val expect_c_data = Array(
11.0, 22.0,
11, 22,
11, 22
)
val expect_c = new DenseTensor[Double](Storage(expect_c_data), 1, Array(3, 2))
c.map(expect_c, (a, b) => {
a should be(b +- 1e-6)
a
})
}
"addr transpose" should "return correct value" in {
val v1: Tensor[Double] = new DenseTensor(Storage(Array(1.0, 2.0, 3.0)))
val v2: Tensor[Double] = new DenseTensor(Storage(Array(2.0, 0.0, -1.0)))
val tensor: Tensor[Double] = new DenseTensor(3, 3)
tensor(Array(1, 1)) = 1
tensor(Array(1, 2)) = 2
tensor(Array(1, 3)) = 2
tensor(Array(2, 1)) = 3
tensor(Array(2, 2)) = 4
tensor(Array(2, 3)) = 4
tensor(Array(3, 1)) = 5
tensor(Array(3, 2)) = 6
tensor(Array(3, 3)) = 6
val mat: Tensor[Double] = tensor.t
val r = Tensor[Double]()
r.resize(Array(3, 3))
r.addr(1.0, mat, 1.0, v1, v2)
val expect_r = new DenseTensor(Storage(Array(3.0, 3.0, 4.0,
6.0, 4.0, 4.0,
8.0, 4.0, 3.0)), 1, Array(3, 3))
r should be (expect_r)
}
"addr" should "return correct value" in {
val v1: Tensor[Double] = new DenseTensor(Storage(Array(1.0, 2.0, 3.0)))
val v2: Tensor[Double] = new DenseTensor(Storage(Array(2.0, 0.0, -1.0)))
val r = Tensor[Double]()
r.resize(Array(3, 3))
r.addr(v1, v2)
r should be (new DenseTensor[Double](Storage(Array(2.0, 0.0, -1.0,
4.0, 0.0, -2.0,
6.0, 0.0, -3.0)), 1, Array(3, 3)))
}
"addr noncontiguous" should "return correct value" in {
val v1: Tensor[Double] = new DenseTensor(Storage(Array(1.0, 2.0, 3.0)))
val v2: Tensor[Double] = new DenseTensor(Storage(Array(2.0, 0.0, -1.0)))
val tensor: Tensor[Double] = new DenseTensor(3, 3, 2)
tensor(Array(1, 1, 1)) = 1
tensor(Array(1, 2, 1)) = 2
tensor(Array(1, 3, 1)) = 2
tensor(Array(2, 1, 1)) = 3
tensor(Array(2, 2, 1)) = 4
tensor(Array(2, 3, 1)) = 4
tensor(Array(3, 1, 1)) = 5
tensor(Array(3, 2, 1)) = 6
tensor(Array(3, 3, 1)) = 6
val mat: Tensor[Double] = tensor(T(T(), T(), 1)).t
val r = Tensor[Double]()
r.resize(Array(3, 3))
r.addr(1, mat, 1, v1, v2)
r should be (new DenseTensor[Double](Storage(Array(3.0, 3.0, 4.0,
6.0, 4.0, 4.0,
8.0, 4.0, 3.0)), 1, Array(3, 3)))
}
"uniform" should "return correct value" in {
val t = Tensor[Double]()
for (i <- 0 to 1000) {
val rand = t.uniform()
rand should be(0.5 +- 0.5)
}
}
"uniform(n)" should "return correct value" in {
val t = Tensor[Double]()
t.uniform(1.0) should be(1.0)
for (i <- 0 to 1000) {
val rand = t.uniform(11.0)
rand should be(6.0 +- 5.0)
}
}
"uniform(l, n)" should "return correct value" in {
val t = Tensor[Double]()
t.uniform(1.0, 1.0) should be(1.0)
t.uniform(-2.0, -2.0) should be(-2.0)
for (i <- 0 to 1000) {
val rand = t.uniform(-11.0, 11.0)
rand should be(0.0 +- 11.0)
}
}
"mean operation" should "return correct value" in {
val t: Tensor[Double] = new DenseTensor(2, 3)
var i = 0
t.apply1(e => {
i = i + 1;
i
})
t.mean() should be(3.5)
val result1 = t.mean(1)
result1.size(1) should be(1)
result1.size(2) should be(3)
result1(Array(1, 1)) should be(2.5)
result1(Array(1, 2)) should be(3.5)
result1(Array(1, 3)) should be(4.5)
val result2 = t.mean(2)
result2.size(1) should be(2)
result2.size(2) should be(1)
result2(Array(1, 1)) should be(2)
result2(Array(2, 1)) should be(5)
}
"mean operation on 3D tensor" should "return correct value" in {
val t: Tensor[Double] = new DenseTensor(2, 3, 4)
var i = 0
t.apply1(e => {
i = i + 1;
i
})
t.mean() should be(12.5)
val result1 = t.mean(1)
result1.size(1) should be(1)
result1.size(2) should be(3)
result1.size(3) should be(4)
result1(Array(1, 1, 1)) should be(7)
result1(Array(1, 1, 2)) should be(8)
result1(Array(1, 1, 3)) should be(9)
result1(Array(1, 1, 4)) should be(10)
result1(Array(1, 2, 1)) should be(11)
result1(Array(1, 2, 2)) should be(12)
result1(Array(1, 2, 3)) should be(13)
result1(Array(1, 2, 4)) should be(14)
result1(Array(1, 3, 1)) should be(15)
result1(Array(1, 3, 2)) should be(16)
result1(Array(1, 3, 3)) should be(17)
result1(Array(1, 3, 4)) should be(18)
val result2 = t.mean(2)
result2.size(1) should be(2)
result2.size(2) should be(1)
result2.size(3) should be(4)
result2(Array(1, 1, 1)) should be(5)
result2(Array(1, 1, 2)) should be(6)
result2(Array(1, 1, 3)) should be(7)
result2(Array(1, 1, 4)) should be(8)
result2(Array(2, 1, 1)) should be(17)
result2(Array(2, 1, 2)) should be(18)
result2(Array(2, 1, 3)) should be(19)
result2(Array(2, 1, 4)) should be(20)
val result3 = t.mean(3)
result3.size(1) should be(2)
result3.size(2) should be(3)
result3.size(3) should be(1)
result3(Array(1, 1, 1)) should be(2.5)
result3(Array(1, 2, 1)) should be(6.5)
result3(Array(1, 3, 1)) should be(10.5)
result3(Array(2, 1, 1)) should be(14.5)
result3(Array(2, 2, 1)) should be(18.5)
result3(Array(2, 3, 1)) should be(22.5)
}
"topk" should "be correct for 1d tensor" in {
val t = Tensor(Storage(Array(0.0, 1.0, 5.0, 3.0, 9.0, 0.8, 6.3)))
val (v, i) = t.topk(5)
v should be(Tensor(Storage(Array(0.0, 0.8, 1.0, 3.0, 5.0))))
i should be(Tensor(Storage(Array(1.0, 6.0, 2.0, 4.0, 3.0))))
}
"topk" should "be correct for 2d tensor" in {
val t = Tensor(Storage(Array(
0.0, 1.0, 5.0, 3.0, 9.0, 0.8, 6.3,
0.0, 1.0, 5.0, 3.0, 9.0, 0.8, 6.3,
0.0, 1.0, 5.0, 3.0, 9.0, 0.8, 6.3,
0.0, 1.0, 5.0, 3.0, 9.0, 0.8, 6.3,
0.0, 1.0, 5.0, 3.0, 9.0, 0.8, 6.3
)), 1, Array(5, 7))
val (v, i) = t.topk(5)
v should be(Tensor(Storage(Array(
0.0, 0.8, 1.0, 3.0, 5.0,
0.0, 0.8, 1.0, 3.0, 5.0,
0.0, 0.8, 1.0, 3.0, 5.0,
0.0, 0.8, 1.0, 3.0, 5.0,
0.0, 0.8, 1.0, 3.0, 5.0
)), 1, Array(5, 5)))
i should be(Tensor(Storage(Array(
1.0, 6.0, 2.0, 4.0, 3.0,
1.0, 6.0, 2.0, 4.0, 3.0,
1.0, 6.0, 2.0, 4.0, 3.0,
1.0, 6.0, 2.0, 4.0, 3.0,
1.0, 6.0, 2.0, 4.0, 3.0
)), 1, Array(5, 5)))
}
"powx(x,a)" should "return correct value" in {
val t: Tensor[Double] = Tensor(Storage(Array(2.0, 3.0, 4.0)))
val r: Tensor[Double] = Tensor(Storage(Array(0.0, 0.0, 0.0)))
r.pow(t, 2)
r should be(Tensor(Storage(Array(4.0, 9.0, 16.0))))
}
"powx(a)" should "return correct value" in {
val t: Tensor[Double] = Tensor(Storage(Array(2.0, 3.0, 4.0)))
t.pow(2)
t should be(Tensor(Storage(Array(4.0, 9.0, 16.0))))
}
"log(x)" should "return correct value" in {
val t: Tensor[Double] = Tensor(Storage(Array(2.0, 3.0, 4.0)))
val r: Tensor[Double] = Tensor(Storage(Array(0.0, 0.0, 0.0)))
r.log(t)
r should be(Tensor(Storage(Array(0.6931472, 1.0986123, 1.3862944))))
}
"log()" should "return correct value" in {
val t: Tensor[Double] = Tensor(Storage(Array(2.0, 3.0, 4.0)))
t.log(t)
t should be(Tensor(Storage(Array(0.6931472, 1.0986123, 1.3862944))))
}
"exp(x)" should "return correct value" in {
val t: Tensor[Double] = Tensor(Storage(Array(2.0, 3.0, 4.0)))
val r: Tensor[Double] = Tensor(Storage(Array(0.0, 0.0, 0.0)))
r.exp(t)
r should be(Tensor(Storage(Array(7.389056, 20.085537, 54.59815))))
}
"exp()" should "return correct value" in {
val t: Tensor[Double] = Tensor(Storage(Array(2.0, 3.0, 4.0)))
t.exp()
t should be(Tensor(Storage(Array(7.389056, 20.085537, 54.59815))))
}
"sqrt(x)" should "return correct value" in {
val t: Tensor[Double] = Tensor(Storage(Array(2.0, 3.0, 4.0)))
val r: Tensor[Double] = Tensor(Storage(Array(0.0, 0.0, 0.0)))
r.sqrt(t)
r should be(Tensor(Storage(Array(1.4142135, 1.7320508, 2.0))))
}
"sqrt()" should "return correct value" in {
val t: Tensor[Double] = Tensor(Storage(Array(2.0, 3.0, 4.0)))
t.sqrt()
t should be(Tensor(Storage(Array(1.4142135, 1.7320508, 2.0))))
}
"log1p(x)" should "return correct value" in {
val t: Tensor[Double] = Tensor(Storage(Array(2.0, 3.0, 4.0)))
val r: Tensor[Double] = Tensor(Storage(Array(0.0, 0.0, 0.0)))
r.log1p(t)
r should be(Tensor(Storage(Array(1.0986123, 1.3862944, 1.609438))))
}
"log1p()" should "return correct value" in {
val t: Tensor[Double] = Tensor(Storage(Array(2.0, 3.0, 4.0)))
t.log1p()
t should be(Tensor(Storage(Array(1.0986123, 1.3862944, 1.609438))))
}
"matrix sub(T)" should "return correct value" in{
val a : Tensor[Double] = Tensor(Storage(Array(2.0, 3.0, 4.0)))
val m = 1
a.sub(m)
a should be (Tensor(Storage(Array(1.0, 2.0, 3.0))))
}
"matrix sub(T,Tensor[T])" should "return correct value" in{
val a : Tensor[Double] = Tensor(Storage(Array(2.0, 3.0, 4.0)))
val b : Tensor[Double] = Tensor(Storage(Array(1.0, 2.0, 3.0)))
val m = 2
a.sub(m, b)
a should be (Tensor(Storage(Array(0.0, -1.0, -2.0))))
}
"matrix sub(Tensor[T])" should "return correct value" in{
val a : Tensor[Double] = Tensor(Storage(Array(2.0, 3.0, 4.0)))
val b : Tensor[Double] = Tensor(Storage(Array(1.0, 2.0, 3.0)))
a.sub(b)
val r = Tensor(Storage(Array(1.0, 1.0, 1.0)))
a should be (r)
}
"matrix sub(Tensor[T],T,Tensor[T])" should "return correct value" in{
val a : Tensor[Double] = Tensor(Storage(Array(2.0, 3.0, 4.0)))
val b : Tensor[Double] = Tensor(Storage(Array(1.0, 2.0, 3.0)))
val c : Tensor[Double] = Tensor(Storage(Array(1.0, 2.0, 3.0)))
val m = 2
val d = a.sub(c, m, b)
d should be (Tensor(Storage(Array(-1.0, -2.0, -3.0))))
}
"gemm(N, N)" should "return correct value" in {
val matrixA = Tensor[Float](2, 3)
val matrixB = Tensor[Float](3, 2)
var i = 0
matrixA.apply1(_ => {
i = i + 1;
i
})
matrixB.copy(matrixA)
val matrixC = Tensor[Float](2, 2)
DenseTensorBLAS.gemm[Float](
'N', 'N',
2, 2, 3,
1,
matrixA.storage().array(), matrixA.storageOffset() - 1, 2,
matrixB.storage().array(), matrixB.storageOffset() - 1, 3,
0,
matrixC.storage().array(), matrixC.storageOffset() - 1, 2
)
val result = Tensor[Float](Storage(Array[Float](22, 28, 49, 64)), 1, Array(2, 2))
matrixC should be (result)
}
"gemm(N, T)" should "return correct value" in {
val matrixA = Tensor[Float](2, 3)
val matrixB = Tensor[Float](2, 3)
var i = 0
matrixA.apply1(_ => {
i = i + 1;
i
})
matrixB.copy(matrixA)
val matrixC = Tensor[Float](2, 2)
DenseTensorBLAS.gemm[Float](
'N', 'T',
2, 2, 3,
1,
matrixA.storage().array(), matrixA.storageOffset() - 1, 2,
matrixB.storage().array(), matrixB.storageOffset() - 1, 2,
0,
matrixC.storage().array(), matrixC.storageOffset() - 1, 2
)
val result = Tensor[Float](Storage(Array[Float](35, 44, 44, 56)), 1, Array(2, 2))
matrixC should be (result)
}
"gemm(T, N)" should "return correct value" in {
val matrixA = Tensor[Float](3, 2)
val matrixB = Tensor[Float](3, 2)
var i = 0
matrixA.apply1(_ => {
i = i + 1;
i
})
matrixB.copy(matrixA)
val matrixC = Tensor[Float](2, 2)
DenseTensorBLAS.gemm[Float](
't', 'n',
2, 2, 3,
1,
matrixA.storage().array(), matrixA.storageOffset() - 1, 3,
matrixB.storage().array(), matrixB.storageOffset() - 1, 3,
0,
matrixC.storage().array(), matrixC.storageOffset() - 1, 2
)
val result = Tensor[Float](Storage(Array[Float](14, 32, 32, 77)), 1, Array(2, 2))
matrixC should be (result)
}
"gemm(T, T)" should "return correct value" in {
val matrixA = Tensor[Float](3, 2)
val matrixB = Tensor[Float](2, 3)
var i = 0
matrixA.apply1(_ => {
i = i + 1;
i
})
matrixB.copy(matrixA)
val matrixC = Tensor[Float](2, 2)
DenseTensorBLAS.gemm[Float](
'T', 'T',
2, 2, 3,
1,
matrixA.storage().array(), matrixA.storageOffset() - 1, 3,
matrixB.storage().array(), matrixB.storageOffset() - 1, 2,
0,
matrixC.storage().array(), matrixC.storageOffset() - 1, 2
)
val result = Tensor[Float](Storage(Array[Float](22, 49, 28, 64)), 1, Array(2, 2))
matrixC should be (result)
}
"cdiv" should "return right result" in {
val x = Tensor[Float](2, 2).fill(1f)
val y = Tensor(Storage(Array(1f, 2, 3, 4)), 1, Array(2, 2))
x.cdiv(y)
x should be (Tensor(Storage(Array(1f / 1, 1f / 2, 1f / 3, 1f / 4)), 1, Array(2, 2)))
y should be (Tensor(Storage(Array(1f, 2, 3, 4)), 1, Array(2, 2)))
}
"cdiv" should "return right result 2" in {
val x = Tensor[Float](2, 2).fill(1f)
val y = Tensor(Storage(Array(1f, 2, 3, 4)), 1, Array(2, 2))
y.cdiv(x, y)
x should be (Tensor(Storage(Array(1f, 1f, 1f, 1f)), 1, Array(2, 2)))
y should be (Tensor(Storage(Array(1f / 1, 1f / 2, 1f / 3, 1f / 4)), 1, Array(2, 2)))
}
"cdiv" should "return right result 3" in {
val x = Tensor[Float](2, 2).fill(1f)
val y = Tensor(Storage(Array(1f, 2, 3, 4)), 1, Array(2, 2))
val z = Tensor[Float](2, 2).zero()
z.cdiv(x, y)
x should be (Tensor(Storage(Array(1f, 1f, 1f, 1f)), 1, Array(2, 2)))
y should be (Tensor(Storage(Array(1f, 2, 3, 4)), 1, Array(2, 2)))
z should be (Tensor(Storage(Array(1f / 1, 1f / 2, 1f / 3, 1f / 4)), 1, Array(2, 2)))
}
"cmul" should "return right result" in {
val x = Tensor[Float](2, 2).fill(2f)
val y = Tensor(Storage(Array(1f, 2, 3, 4)), 1, Array(2, 2))
x.cmul(y)
x should be (Tensor(Storage(Array(2f * 1, 2f * 2, 2f * 3, 2f * 4)), 1, Array(2, 2)))
y should be (Tensor(Storage(Array(1f, 2, 3, 4)), 1, Array(2, 2)))
}
"cmul" should "return right result 2" in {
val x = Tensor[Float](2, 2).fill(2f)
val y = Tensor(Storage(Array(1f, 2, 3, 4)), 1, Array(2, 2))
y.cmul(x, y)
x should be (Tensor(Storage(Array(2f, 2f, 2f, 2f)), 1, Array(2, 2)))
y should be (Tensor(Storage(Array(2f * 1, 2f * 2, 2f * 3, 2f * 4)), 1, Array(2, 2)))
}
"cmul" should "return right result 3" in {
val x = Tensor[Float](2, 2).fill(2f)
val y = Tensor(Storage(Array(1f, 2, 3, 4)), 1, Array(2, 2))
val z = Tensor[Float](2, 2).zero()
z.cmul(x, y)
x should be (Tensor(Storage(Array(2f, 2f, 2f, 2f)), 1, Array(2, 2)))
y should be (Tensor(Storage(Array(1f, 2, 3, 4)), 1, Array(2, 2)))
z should be (Tensor(Storage(Array(2f * 1, 2f * 2, 2f * 3, 2f * 4)), 1, Array(2, 2)))
}
"cmul" should "return right result 4" in {
val x = Tensor[Float](Storage(Array(1f, 2)), 1, Array(2, 1))
val y = Tensor(Storage(Array(1f, 2, 3, 4, 5, 6)), 1, Array(2, 3))
x.expandAs(y)
val z = Tensor[Float](2, 3).zero()
z.cmul(x, y)
x should be (Tensor(Storage(Array(1f, 2)), 1, Array(2, 3), Array(1, 0)))
y should be (Tensor(Storage(Array(1f, 2, 3, 4, 5, 6)), 1, Array(2, 3)))
z should be (Tensor(Storage(Array(1f * 1, 1f * 2, 1f * 3, 2f * 4, 2f * 5, 2f * 6)),
1, Array(2, 3)))
}
"cmul" should "return right result 5" in {
val x = Tensor[Float](Storage(Array(1f, 2, 3)), 1, Array(1, 3))
val y = Tensor(Storage(Array(1f, 2, 3, 4, 5, 6)), 1, Array(2, 3))
x.expandAs(y)
val z = Tensor[Float](2, 3).zero()
z.cmul(x, y)
x should be (Tensor(Storage(Array(1f, 2, 3)), 1, Array(2, 3), Array(0, 1)))
y should be (Tensor(Storage(Array(1f, 2, 3, 4, 5, 6)), 1, Array(2, 3)))
z should be (Tensor(Storage(Array(1f * 1, 2f * 2, 3f * 3, 1f * 4, 2f * 5, 3f * 6)),
1, Array(2, 3)))
}
"add" should "return right result" in {
val x = Tensor[Float](2, 2).fill(2f)
val y = Tensor(Storage(Array(1f, 2, 3, 4)), 1, Array(2, 2))
x.add(y)
x should be (Tensor(Storage(Array(2f + 1, 2f + 2, 2f + 3, 2f + 4)), 1, Array(2, 2)))
y should be (Tensor(Storage(Array(1f, 2, 3, 4)), 1, Array(2, 2)))
}
"add" should "return right result 2" in {
val x = Tensor[Float](2, 2).fill(2f)
val y = Tensor(Storage(Array(1f, 2, 3, 4)), 1, Array(2, 2))
y.add(x, 2, y)
x should be (Tensor(Storage(Array(2f, 2f, 2f, 2f)), 1, Array(2, 2)))
y should be (Tensor(Storage(Array(2f + 2, 2f + 4, 2f + 6, 2f + 8)), 1, Array(2, 2)))
}
}
| SeaOfOcean/BigDL | dl/src/test/scala/com/intel/analytics/bigdl/tensor/DenseTensorMathSpec.scala | Scala | apache-2.0 | 27,802 |
/*§
===========================================================================
OSGi-Test
===========================================================================
Copyright (C) 2015 Gianluca Costa
===========================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===========================================================================
*/
package info.gianlucacosta.osgitest.hotswap.boot
import java.util.ServiceLoader
import org.osgi.framework.Constants
import org.osgi.framework.launch.FrameworkFactory
import scala.collection.JavaConversions._
object OsgiService {
def startOSGi(bundleNames: Seq[String]) {
val frameworkFactory = ServiceLoader
.load(classOf[FrameworkFactory])
.iterator()
.next()
val configurationMap = Map(
Constants.FRAMEWORK_STORAGE_CLEAN ->
Constants.FRAMEWORK_STORAGE_CLEAN_ONFIRSTINIT,
"org.osgi.framework.system.packages.extra" ->
List(
"javafx.application",
"javafx.collections",
"javafx.event",
"javafx.geometry",
"javafx.scene",
"javafx.scene.control",
"javafx.scene.layout",
"javafx.stage").mkString(",")
)
val framework = frameworkFactory.newFramework(configurationMap)
framework.start()
val bundleContext = framework.getBundleContext
bundleNames.foreach(bundleName => {
val bundlePath = s"/info/gianlucacosta/osgitest/bundles/${bundleName}"
val bundleUrl = getClass.getResource(bundlePath).toString
bundleContext.installBundle(
bundleUrl
)
})
bundleContext.getBundles.foreach(_.start())
framework.waitForStop(0)
}
}
| giancosta86/OSGi-Test | hotswap-boot/src/main/scala/info/gianlucacosta/osgitest/hotswap/boot/OsgiService.scala | Scala | apache-2.0 | 2,239 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.rules.physical.batch
import org.apache.flink.table.api.OperatorType
import org.apache.flink.table.calcite.FlinkContext
import org.apache.flink.table.plan.nodes.logical.FlinkLogicalJoin
import org.apache.flink.table.plan.nodes.physical.batch.BatchExecNestedLoopJoin
import org.apache.calcite.plan.RelOptRule.{any, operand}
import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall}
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.core.{Join, JoinRelType}
/**
* Rule that converts [[FlinkLogicalJoin]] to [[BatchExecNestedLoopJoin]]
* if NestedLoopJoin is enabled.
*/
class BatchExecNestedLoopJoinRule
extends RelOptRule(
operand(classOf[FlinkLogicalJoin],
operand(classOf[RelNode], any)),
"BatchExecNestedLoopJoinRule")
with BatchExecJoinRuleBase
with BatchExecNestedLoopJoinRuleBase {
override def matches(call: RelOptRuleCall): Boolean = {
val tableConfig = call.getPlanner.getContext.asInstanceOf[FlinkContext].getTableConfig
tableConfig.isOperatorEnabled(OperatorType.NestedLoopJoin)
}
override def onMatch(call: RelOptRuleCall): Unit = {
val join: Join = call.rel(0)
val left = join.getLeft
val right = join.getJoinType match {
case JoinRelType.SEMI | JoinRelType.ANTI =>
// We can do a distinct to buildSide(right) when semi join.
val distinctKeys = 0 until join.getRight.getRowType.getFieldCount
val useBuildDistinct = chooseSemiBuildDistinct(join.getRight, distinctKeys)
if (useBuildDistinct) {
addLocalDistinctAgg(join.getRight, distinctKeys, call.builder())
} else {
join.getRight
}
case _ => join.getRight
}
val leftIsBuild = isLeftBuild(join, left, right)
val newJoin = createNestedLoopJoin(join, left, right, leftIsBuild, singleRowJoin = false)
call.transformTo(newJoin)
}
private def isLeftBuild(join: Join, left: RelNode, right: RelNode): Boolean = {
join.getJoinType match {
case JoinRelType.LEFT => false
case JoinRelType.RIGHT => true
case JoinRelType.INNER | JoinRelType.FULL =>
val leftSize = binaryRowRelNodeSize(left)
val rightSize = binaryRowRelNodeSize(right)
// use left as build size if leftSize or rightSize is unknown.
if (leftSize == null || rightSize == null) {
true
} else {
leftSize <= rightSize
}
case JoinRelType.SEMI | JoinRelType.ANTI => false
}
}
}
object BatchExecNestedLoopJoinRule {
val INSTANCE: RelOptRule = new BatchExecNestedLoopJoinRule
}
| shaoxuan-wang/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/rules/physical/batch/BatchExecNestedLoopJoinRule.scala | Scala | apache-2.0 | 3,409 |
package edu.gemini.sp.vcs.log.impl
import edu.gemini.sp.vcs.log._
import edu.gemini.spModel.core.SPProgramID
import edu.gemini.util.security.principal.{StaffPrincipal, UserPrincipal, GeminiPrincipal}
import doobie.imports._
import java.io.File
import java.sql.Timestamp
import java.util.logging.Logger
import scalaz._, Scalaz._, effect.IO
object PersistentVcsLog2 {
import PersistentVcsMappers._
lazy val Log = Logger.getLogger(getClass.getName)
final val Anonymous: NonEmptyList[GeminiPrincipal] = NonEmptyList(UserPrincipal("Anonymous"))
val TimeSlice = 1000 * 60 * 60 // 1 hour
def info(s: String): ConnectionIO[Unit] =
FC.delay(Log.info(s))
def warn(s: String): ConnectionIO[Unit] =
FC.delay(Log.warning(s))
def fail[A](s: String): ConnectionIO[A] =
FC.delay(sys.error(s))
// The idea here is that when we change the schema, we update this number and add a case to the upgradeFrom
// function below. This may end up being difficult in practice but at least we have a mechanism to do it.
val SchemaVersion = 4
// These are DB-specific, sadly
val DUPLICATE_KEY = SqlState("what is it?")
val TABLE_OR_VIEW_NOT_FOUND = SqlState("42S02")
val createSchema: ConnectionIO[Unit] =
sql"""
create table VERSION (
VALUE INTEGER NOT NULL
);
create table PRINCIPAL (
PRINCIPAL_ID INTEGER GENERATED BY DEFAULT AS IDENTITY(START WITH 1) NOT NULL PRIMARY KEY,
CLASS VARCHAR NOT NULL,
NAME VARCHAR NOT NULL
);
create unique index PRINCIPAL_IDX on PRINCIPAL (CLASS,NAME);
create table EVENT (
EVENT_ID INTEGER GENERATED BY DEFAULT AS IDENTITY(START WITH 1) NOT NULL PRIMARY KEY,
OP VARCHAR NOT NULL,
TIMESTAMP TIMESTAMP NOT NULL,
PROGRAM_ID VARCHAR NOT NULL,
PRINCIPAL_HASH VARCHAR NOT NULL
);
create table EVENT_PRINCIPAL (
EVENT_ID INTEGER NOT NULL,
PRINCIPAL_ID INTEGER NOT NULL
);
create unique index EVENT_PRINCIPAL_IDX on EVENT_PRINCIPAL (EVENT_ID,PRINCIPAL_ID);
alter table EVENT_PRINCIPAL
add constraint EVENT_PRINCIPAL_FK1
foreign key(EVENT_ID) references EVENT(EVENT_ID)
on update NO ACTION
on delete NO ACTION;
alter table EVENT_PRINCIPAL
add constraint EVENT_PRINCIPAL_FK3
foreign key(PRINCIPAL_ID) references PRINCIPAL(PRINCIPAL_ID)
on update NO ACTION
on delete NO ACTION;
""".update.run.void
def insertSchemaVersion(version: Int): ConnectionIO[Unit] =
sql"insert into VERSION (VALUE) values ($version)".update.run.void
val getSchemaVersion: ConnectionIO[Int] =
sql"select VALUE from VERSION".query[Int].unique
def checkSchema(path: String): ConnectionIO[Unit] =
open(path) exceptSomeSqlState {
case TABLE_OR_VIEW_NOT_FOUND => createNewDatabase
}
def createNewDatabase: ConnectionIO[Unit] =
for {
_ <- info("This is a new database. Creating schema...")
_ <- createSchema
_ <- insertSchemaVersion(SchemaVersion)
} yield ()
def open(path:String): ConnectionIO[Unit] =
for {
v <- getSchemaVersion
_ <- info(s"Opened database with schema version $SchemaVersion on ${path}")
_ <- (v != SchemaVersion).whenM(upgradeFrom(v) >> checkSchema(path))
} yield ()
def upgradeFrom(version: Int): ConnectionIO[Unit] =
version match {
case 1 =>
sql""";
update EVENT set op = 'Fetch' where op = 'OpFetch';
update EVENT set op = 'Store' where op = 'OpStore';
update VERSION set VALUE = 2;
""".update.run.void
case 2 =>
warn("Major upgrade; dropping all event data.") *>
sql"""
delete from EVENT_PRINCIPAL;
delete from EVENT;
alter table EVENT add (PRINCIPAL_HASH varchar NOT NULL DEFAULT '');
update VERSION set VALUE = 3;
""".update.run.void
case 3 =>
sql"""
alter table EVENT add (PRINCIPAL_HASH varchar NOT NULL DEFAULT '')";
update VERSION set VALUE = 4";
""".update.run.void
// Newer versions here
case n =>
fail(s"Don't know how to upgrade from version $version.")
}
def insertJoin(eid: Id[VcsEvent], pid: Id[GeminiPrincipal]): ConnectionIO[Int] =
sql"""
insert into EVENT_PRINCIPAL (EVENT_ID, PRINCIPAL_ID)
values ($eid, $pid)
""".update.run
// OCSINF-118: if the principal set is empty, add an anonymous principal
def doLog(op: VcsOp, time:Timestamp, pid: SPProgramID, principals: List[GeminiPrincipal]): ConnectionIO[VcsEvent] =
doLog2(op, time, pid, principals.toNel.getOrElse(Anonymous))
// Log implementation. Insert the event, insert the principals, hook them up, read it back.
def doLog2(op: VcsOp, time:Timestamp, pid: SPProgramID, principals: NonEmptyList[GeminiPrincipal]): ConnectionIO[VcsEvent] =
for {
ids <- principals.traverse(insertPrincipal)
eid <- insertEvent(op, time, pid, PersistentVcsUtil.setHash(ids.map(_.n)))
_ <- ids.traverse(insertJoin(eid, _))
e <- selectEvent(eid)
} yield e
// An uninspiring type that we're selecting twice below.
type U = ((Id[VcsEvent], VcsOp, Timestamp, SPProgramID, String), (String, String))
// To select by program we join with the principal table and stream results back, chunking by
// program and principals and then decoding into a stream of event sets. We can then drop the
// offset and take the size. This is rather complex and should be revisited.
def doSelectByProgram(pid: SPProgramID, offset: Int, size: Int): ConnectionIO[(List[VcsEventSet], Boolean)] =
sql"""
select E.EVENT_ID, E.OP, E.TIMESTAMP, E.PROGRAM_ID, E.PRINCIPAL_HASH, P.CLASS, P.NAME
from EVENT E
join EVENT_PRINCIPAL J on J.EVENT_ID = E.EVENT_ID
join PRINCIPAL P on P.PRINCIPAL_ID = J.PRINCIPAL_ID
where E.PROGRAM_ID = $pid
order by E.EVENT_ID desc
""".query[U].process.chunkBy2 {
case (((_, _, ts0, pid0, ph0), _), ((_, _, ts1, pid1, ph1), _)) =>
(ts0.getTime - ts1.getTime < TimeSlice) && (pid0 == pid1) && (ph0 == ph1)
} .map(decode2)
.drop(offset)
.take(size + 1)
.vector
.map { v =>
(v.take(size).toList, v.size > size)
}
// Selecting a single event is a special case of the above, and uses the same decoder.
def selectEvent(id: Id[VcsEvent]): ConnectionIO[VcsEvent] =
sql"""
select E.EVENT_ID, E.OP, E.TIMESTAMP, E.PROGRAM_ID, E.PRINCIPAL_HASH, P.CLASS, P.NAME
from EVENT E
join EVENT_PRINCIPAL J on J.EVENT_ID = E.EVENT_ID
join PRINCIPAL P on P.PRINCIPAL_ID = J.PRINCIPAL_ID
where E.EVENT_ID = $id
""".query[U].list.map(decode)
// Decode a chunk of rows, which must be uniform and no-empty.
def decode(chunk: List[U]): VcsEvent = {
val ps = chunk.map(_._2).map(p => GeminiPrincipal(p._1, p._2))
chunk.head._1 match {
case (id, op, ts, pid, _) => VcsEvent(id.n, op, ts.getTime, pid, ps.toSet)
}
}
// Decode a chunk of rows, which must be uniform and no-empty.
def decode2(chunk: Vector[U]): VcsEventSet = {
// Pull rollup data out of the chunk
val ids:Set[Int] = chunk.map(_._1._1.n).toSet
val ops:Map[VcsOp, Int] = chunk.map(_._1._2).groupBy(identity).mapValues(_.length)
val tss:Set[Long] = chunk.map(_._1._3.getTime).toSet
val pid:SPProgramID = chunk.head._1._4
val gps:Set[GeminiPrincipal] = chunk.map(_._2).map(p => GeminiPrincipal(p._1, p._2)).toSet
// And construct our event set!
VcsEventSet(
ids.min to ids.max,
ops.toSeq.toMap, // Hack: ops is actually a MapLike and isn't serializable
(tss.min, tss.max),
pid,
gps)
}
// Insert the event and return its Id
def insertEvent(op: VcsOp, time: Timestamp, pid: SPProgramID, principalHash:String): ConnectionIO[Id[VcsEvent]] =
sql"""
insert into EVENT (OP, TIMESTAMP, PROGRAM_ID, PRINCIPAL_HASH)
values ($op, $time, $pid, $principalHash)
""".update.withUniqueGeneratedKeys[Id[VcsEvent]]("EVENT_ID")
// Canonicalize a principal. To be more efficient we do the lookup first, and if that fails we
// insert. This means we there's a race we need to handle.
def insertPrincipal(p: GeminiPrincipal): ConnectionIO[Id[GeminiPrincipal]] =
lookupPrincipal(p) >>= {
case Some(id) => id.point[ConnectionIO]
case None =>
sql"""
insert into PRINCIPAL (CLASS, NAME)
values (${p.clazz}, ${p.getName})
""".update
.withUniqueGeneratedKeys[Id[GeminiPrincipal]]("PRINCIPAL_ID")
.exceptSomeSqlState {
case DUPLICATE_KEY => insertPrincipal(p)
}
}
// Look up a principla by name and class.
def lookupPrincipal(gp: GeminiPrincipal): ConnectionIO[Option[Id[GeminiPrincipal]]] =
sql"""
select PRINCIPAL_ID
from PRINCIPAL
where CLASS = ${gp.clazz}
and NAME = ${gp.getName}
""".query[Id[GeminiPrincipal]].option
}
| fnussber/ocs | bundle/edu.gemini.sp.vcs.log/src/main/scala/edu/gemini/sp/vcs/log/impl/PersistentVcsLog2.scala | Scala | bsd-3-clause | 9,095 |
/*
*
* * Copyright 2016 Skymind,Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package org.dhira.core.nnet.graph
import lombok.Setter
import org.deeplearning4j.berkeley.Pair
import org.deeplearning4j.berkeley.Triple
import org.deeplearning4j.datasets.iterator.AsyncDataSetIterator
import org.deeplearning4j.datasets.iterator.AsyncMultiDataSetIterator
import org.deeplearning4j.nn.api.Layer
import org.deeplearning4j.nn.api.Model
import org.deeplearning4j.nn.api.layers.IOutputLayer
import org.deeplearning4j.nn.api.layers.RecurrentLayer
import org.deeplearning4j.nn.conf.BackpropType
import org.deeplearning4j.nn.conf.ComputationGraphConfiguration
import org.deeplearning4j.nn.conf.NeuralNetConfiguration
import org.deeplearning4j.nn.gradient.DefaultGradient
import org.deeplearning4j.nn.gradient.Gradient
import org.deeplearning4j.nn.graph.util.ComputationGraphUtil
import org.deeplearning4j.nn.graph.vertex.GraphVertex
import org.deeplearning4j.nn.graph.vertex.VertexIndices
import org.deeplearning4j.nn.graph.vertex.impl.InputVertex
import org.deeplearning4j.nn.layers.BasePretrainNetwork
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork
import org.deeplearning4j.nn.updater.graph.ComputationGraphUpdater
import org.deeplearning4j.optimize.Solver
import org.deeplearning4j.optimize.api.ConvexOptimizer
import org.deeplearning4j.optimize.api.IterationListener
import org.deeplearning4j.util.ModelSerializer
import org.deeplearning4j.util.TimeSeriesUtils
import org.nd4j.linalg.api.ndarray.INDArray
import org.nd4j.linalg.dataset.api.DataSet
import org.nd4j.linalg.dataset.api.MultiDataSet
import org.nd4j.linalg.dataset.api.iterator.DataSetIterator
import org.nd4j.linalg.dataset.api.iterator.MultiDataSetIterator
import org.nd4j.linalg.factory.Nd4j
import org.nd4j.linalg.heartbeat.Heartbeat
import org.nd4j.linalg.heartbeat.reports.Environment
import org.nd4j.linalg.heartbeat.reports.Event
import org.nd4j.linalg.heartbeat.reports.Task
import org.nd4j.linalg.heartbeat.utils.EnvironmentUtils
import org.nd4j.linalg.heartbeat.utils.TaskUtils
import org.nd4j.linalg.indexing.NDArrayIndex
import org.slf4j.Logger
import org.slf4j.LoggerFactory
import java.io.Serializable
import java.util._
/**
* A ComputationGraph network is a neural network with arbitrary (directed acyclic graph) connection structure.
* A ComputationGraph may also have an arbitrary number of inputs and outputs.
*
* @author Alex Black
*/
object ComputationGraph {
private val log: Logger = LoggerFactory.getLogger(classOf[ComputationGraph])
}
class ComputationGraph extends Serializable with Model {
protected var configuration: ComputationGraphConfiguration = null
protected var initCalled: Boolean = false
@transient
protected var solver: Nothing = null
protected var flattenedParams: INDArray = null
@transient
protected var flattenedGradients: INDArray = null
protected var gradient: Gradient = null
protected var score: Double = .0
@Setter private var initDone: Boolean = false
/**
* All GraphVertex objects in the network.
*/
protected var vertices: Array[GraphVertex] = null
/**
* Map of vertices by name
*/
protected var verticesMap: Map[String, GraphVertex] = null
/**
* Indexes of graph vertices, in topological order. The topological order defines the order in which forward pass
* (and hence also backward pass, which is the opposite to this) is conducted in the network.
*/
protected var topologicalOrder: Array[Int] = null
/**
* A list of layers. Each of these layers is present in a GraphVertex, but are here for easy reference.
* This array also defines the order in which the getLayer(int) method returns layers.
*/
protected var layers: Array[Nothing] = null
/**
* The number of input arrays to the network. Many networks only have 1 input; however, a ComputationGraph may
* have an arbitrary number (>=1) separate input arrays
*/
private var numInputArrays: Int = 0
/**
* The number of output arrays to the network. Many networks only have 1 output; however, a ComputationGraph may
* have an arbitrary number (>=1) separate output arrays
*/
private var numOutputArrays: Int = 0
@transient
private var inputs: Array[INDArray] = null
@transient
private var labels: Array[INDArray] = null
@transient
private var inputMaskArrays: Array[INDArray] = null
@transient
private var labelMaskArrays: Array[INDArray] = null
private var defaultConfiguration: Nothing = null
private var listeners: Collection[Nothing] = new ArrayList[E]
def this(configuration: ComputationGraphConfiguration) {
this()
this.configuration = configuration
this.numInputArrays = configuration.getNetworkInputs.size
this.numOutputArrays = configuration.getNetworkOutputs.size
this.inputs = new Array[INDArray](numInputArrays)
this.labels = new Array[INDArray](numOutputArrays)
this.defaultConfiguration = configuration.getDefaultConfiguration
}
def getConfiguration: ComputationGraphConfiguration = {
return configuration
}
/**
* Returns the number of layers in the ComputationGraph
*/
def getNumLayers: Int = {
return (if (layers != null) layers.length else 0)
}
/**
* Get the layer by the number of that layer, in range 0 to getNumLayers()-1
* NOTE: This is different from the internal GraphVertex index for the layer
*/
def getLayer(idx: Int): Nothing = {
return layers(idx)
}
/**
* Get all layers in the ComputationGraph
*/
def getLayers: Array[Nothing] = {
return layers
}
/**
* Get a given layer by name.
*/
def getLayer(name: String): Nothing = {
return verticesMap.get(name).getLayer
}
/**
* Returns an array of all GraphVertex objects.
*/
def getVertices: Array[GraphVertex] = {
return vertices
}
/**
* Return a given GraphVertex by name, or null if no vertex with that name exists
*/
def getVertex(name: String): GraphVertex = {
return verticesMap.get(name)
}
/**
* The number of inputs to this network
*/
def getNumInputArrays: Int = {
return numInputArrays
}
/**
* The number of output (arrays) for this network
*/
def getNumOutputArrays: Int = {
return numOutputArrays
}
/**
* Set the specified input for the ComputationGraph
*/
def setInput(inputNum: Int, input: INDArray) {
inputs(inputNum) = input
}
/**
* Set all inputs for the ComputationGraph network
*/
def setInputs(inputs: INDArray*) {
if (inputs != null && inputs.length != this.numInputArrays) {
throw new IllegalArgumentException("Invalid input array: network has " + numInputArrays + " inputs, but array is of length " + inputs.length)
}
this.inputs = inputs
}
/**
* Get the previously set input for the ComputationGraph
*/
def getInput(inputNum: Int): INDArray = {
if (inputs == null) return null
return inputs(inputNum)
}
/**
* Get the previously set inputs for the ComputationGraph
*/
def getInputs: Array[INDArray] = {
return inputs
}
/**
* Get the previously set feature/input mask arrays for the ComputationGraph
*/
def getInputMaskArrays: Array[INDArray] = {
return inputMaskArrays
}
/**
* Get the previously set label/output mask arrays for the ComputationGraph
*/
def getLabelMaskArrays: Array[INDArray] = {
return labelMaskArrays
}
/**
* Set the specified label for the ComputationGraph
*/
def setLabel(labelNum: Int, label: INDArray) {
labels(labelNum) = label
}
/**
* Set all labels for the ComputationGraph network
*/
def setLabels(labels: INDArray*) {
if (labels != null && labels.length != this.numOutputArrays) {
throw new IllegalArgumentException("Invalid output array: network has " + numOutputArrays + " outputs, but array is of length " + labels.length)
}
this.labels = labels
}
/**
* Initialize the ComputationGraph network
*/
def init {
init(null, false)
}
/**
* Initialize the ComputationGraph, optionally with an existing parameters array.
* If an existing parameters array is specified, it will be used (and the values will not be modified) in the network;
* if no parameters array is specified, parameters will be initialized randomly according to the network configuration.
*
* @param parameters Network parameter. May be null. If null: randomly initialize.
* @param cloneParametersArray Whether the parameter array (if any) should be cloned, or used directly
*/
def init(parameters: INDArray, cloneParametersArray: Boolean) {
if (initCalled) return
topologicalOrder = topologicalSortOrder
val configVertexMap: Map[String, Nothing] = configuration.getVertices
val networkInputNames: List[String] = configuration.getNetworkInputs
val vertexInputs: Map[String, List[String]] = configuration.getVertexInputs
this.vertices = new Array[GraphVertex](networkInputNames.size + configuration.getVertices.size)
val allNamesReverse: Map[String, Integer] = new HashMap[String, Integer]
val vertexNumber: Int = 0
import scala.collection.JavaConversions._
for (name <- networkInputNames) {
val gv: GraphVertex = new InputVertex(this, name, vertexNumber, null)
allNamesReverse.put(name, vertexNumber)
vertices(({
vertexNumber += 1; vertexNumber - 1
})) = gv
}
var numParams: Int = 0
val numParamsForVertex: Array[Int] = new Array[Int](topologicalOrder.length)
var i: Int = 0
while (i < configuration.getNetworkInputs.size) {
{
numParamsForVertex(i) = 0
}
({
i += 1; i - 1
})
}
import scala.collection.JavaConversions._
for (nodeEntry <- configVertexMap.entrySet) {
val n: Nothing = nodeEntry.getValue
numParamsForVertex(i) = n.numParams(true)
numParams += numParamsForVertex(i)
i += 1
}
var initializeParams: Boolean = false
if (parameters != null) {
if (!parameters.isRowVector) throw new IllegalArgumentException("Invalid parameters: should be a row vector")
if (parameters.length != numParams) throw new IllegalArgumentException("Invalid parameters: expected length " + numParams + ", got length " + parameters.length)
if (cloneParametersArray) flattenedParams = parameters.dup
else flattenedParams = parameters
initializeParams = false
}
else {
flattenedParams = Nd4j.create(1, numParams)
initializeParams = true
}
val paramsViewForVertex: Array[INDArray] = new Array[INDArray](topologicalOrder.length)
var paramOffsetSoFar: Int = 0
i = 0
for (vertexIdx <- topologicalOrder) {
val nParamsThisVertex: Int = numParamsForVertex(vertexIdx)
if (nParamsThisVertex != 0) {
paramsViewForVertex(vertexIdx) = flattenedParams.get(NDArrayIndex.point(0), NDArrayIndex.interval(paramOffsetSoFar, paramOffsetSoFar + nParamsThisVertex))
}
i += 1
paramOffsetSoFar += nParamsThisVertex
}
var numLayers: Int = 0
val tempLayerList: List[Nothing] = new ArrayList[E]
import scala.collection.JavaConversions._
for (nodeEntry <- configVertexMap.entrySet) {
val n: Nothing = nodeEntry.getValue
val name: String = nodeEntry.getKey
val gv: GraphVertex = n.instantiate(this, name, vertexNumber, paramsViewForVertex(vertexNumber), initializeParams)
if (gv.hasLayer) {
numLayers += 1
tempLayerList.add(gv.getLayer)
}
allNamesReverse.put(name, vertexNumber)
vertices(({
vertexNumber += 1; vertexNumber - 1
})) = gv
}
layers = tempLayerList.toArray(new Array[Nothing](numLayers))
verticesMap = new HashMap[String, GraphVertex]
for (gv <- vertices) {
verticesMap.put(gv.getVertexName, gv)
}
val verticesOutputTo: Map[String, List[String]] = new HashMap[String, List[String]]
for (gv <- vertices) {
val vertexName: String = gv.getVertexName
var vertexInputNames: List[String] = null
vertexInputNames = vertexInputs.get(vertexName)
if (vertexInputNames == null) continue //todo: continue is not supported
import scala.collection.JavaConversions._
for (s <- vertexInputNames) {
var list: List[String] = verticesOutputTo.get(s)
if (list == null) {
list = new ArrayList[String]
verticesOutputTo.put(s, list)
}
list.add(vertexName)
}
}
for (gv <- vertices) {
val vertexName: String = gv.getVertexName
val vertexIndex: Int = gv.getVertexIndex
var vertexInputNames: List[String] = null
vertexInputNames = vertexInputs.get(vertexName)
if (vertexInputNames == null) continue //todo: continue is not supported
val inputIndices: Array[VertexIndices] = new Array[VertexIndices](vertexInputNames.size)
{
var j: Int = 0
while (j < vertexInputNames.size) {
{
val inName: String = vertexInputNames.get(j)
val inputVertexIndex: Int = allNamesReverse.get(inName)
val inputVertex: GraphVertex = vertices(inputVertexIndex)
val inputVertexOutputsTo: List[String] = verticesOutputTo.get(inName)
val outputNumberOfInput: Int = inputVertexOutputsTo.indexOf(vertexName)
if (outputNumberOfInput == -1) throw new IllegalStateException("Could not find vertex " + vertexIndex + " in the list of outputs " + "for vertex " + inputVertex + "; error in graph structure?")
inputIndices(j) = new VertexIndices(inputVertexIndex, outputNumberOfInput)
}
({
j += 1; j - 1
})
}
}
gv.setInputVertices(inputIndices)
}
for (gv <- vertices) {
val vertexName: String = gv.getVertexName
val thisVertexOutputsTo: List[String] = verticesOutputTo.get(vertexName)
if (thisVertexOutputsTo == null || thisVertexOutputsTo.isEmpty) continue //todo: continue is not supported
val outputIndices: Array[VertexIndices] = new Array[VertexIndices](thisVertexOutputsTo.size)
val j: Int = 0
import scala.collection.JavaConversions._
for (s <- thisVertexOutputsTo) {
val nextVertexInputNames: List[String] = vertexInputs.get(s)
val outputVertexInputNumber: Int = nextVertexInputNames.indexOf(vertexName)
val outputVertexIndex: Int = allNamesReverse.get(s)
outputIndices(({
j += 1; j - 1
})) = new VertexIndices(outputVertexIndex, outputVertexInputNumber)
}
gv.setOutputVertices(outputIndices)
}
initCalled = true
}
/**
* This method: initializes the flattened gradients array (used in backprop) and sets the appropriate subset in all layers.
* As a general rule, this shouldn't ever need to be called manually when doing training via fit(DataSet), fit(DataSetIterator)
* or fit(MultiDataSet) methods
*/
def initGradientsView {
if (!initCalled) init
var numParams: Int = 0
val numParamsForVertex: Array[Int] = new Array[Int](topologicalOrder.length)
var i: Int = 0
while (i < configuration.getNetworkInputs.size) {
{
numParamsForVertex(i) = 0
}
({
i += 1; i - 1
})
}
val configVertexMap: Map[String, Nothing] = configuration.getVertices
import scala.collection.JavaConversions._
for (nodeEntry <- configVertexMap.entrySet) {
val n: Nothing = nodeEntry.getValue
numParamsForVertex(i) = n.numParams(true)
numParams += numParamsForVertex(i)
i += 1
}
flattenedGradients = Nd4j.create(1, numParams)
var paramOffsetSoFar: Int = 0
i = 0
for (vertexIdx <- topologicalOrder) {
val nParamsThisVertex: Int = numParamsForVertex(vertexIdx)
if (nParamsThisVertex != 0) {
val gradientView: INDArray = flattenedGradients.get(NDArrayIndex.point(0), NDArrayIndex.interval(paramOffsetSoFar, paramOffsetSoFar + nParamsThisVertex))
vertices(vertexIdx).setBackpropGradientsViewArray(gradientView)
}
i += 1
paramOffsetSoFar += nParamsThisVertex
}
}
/**
* Pretrain network with a single input and single output. DataSetIterators can only be used if the number of input
* and output arrays for the ComputationGraph are both 1.
* For networks with more than one input or output, use {@link #pretrain(MultiDataSetIterator)}
*/
def pretrain(iter: DataSetIterator) {
if (numInputArrays != 1 || numOutputArrays != 1) throw new UnsupportedOperationException("Cannot train ComputationGraph network with " + " multiple inputs or outputs using a DataSetIterator")
pretrain(ComputationGraphUtil.toMultiDataSetIterator(iter))
}
/**
* Pretrain network with multiple inputs and/or outputs
*/
def pretrain(iter: MultiDataSetIterator) {
{
var i: Int = 0
while (i < topologicalOrder.length) {
{
if (!vertices(i).hasLayer) continue //todo: continue is not supported
if (vertices(i).getLayer.isInstanceOf[Nothing]) continue //todo: continue is not supported
val partialTopoSort: LinkedList[Integer] = new LinkedList[Integer]
val seenSoFar: Set[Integer] = new HashSet[Integer]
partialTopoSort.add(topologicalOrder(i))
seenSoFar.add(topologicalOrder(i))
{
var j: Int = i - 1
while (j >= 0) {
{
val outputsTo: Array[VertexIndices] = vertices(topologicalOrder(j)).getOutputVertices
var needed: Boolean = false
for (vi <- outputsTo) {
if (seenSoFar.contains(vi.getVertexIndex)) {
needed = true
break //todo: break is not supported
}
}
if (needed) {
partialTopoSort.addFirst(topologicalOrder(j))
seenSoFar.add(topologicalOrder(j))
}
}
({
j -= 1; j + 1
})
}
}
val fwdPassOrder: Array[Int] = new Array[Int](partialTopoSort.size)
val k: Int = 0
import scala.collection.JavaConversions._
for (g <- partialTopoSort) fwdPassOrder(({
k += 1; k - 1
})) = g
val gv: GraphVertex = vertices(fwdPassOrder(fwdPassOrder.length - 1))
val layer: Nothing = gv.getLayer
if (!(layer.isInstanceOf[BasePretrainNetwork[_ <: Nothing]])) throw new IllegalStateException("Cannot pretrain network with layer that is not pretrainable")
ComputationGraph.log.info("Pretraining on layer \\"{}\\"", vertices(i).getVertexName)
val toPretrain: BasePretrainNetwork[_] = layer.asInstanceOf[BasePretrainNetwork[_]]
if (listeners != null) toPretrain.setListeners(listeners)
while (iter.hasNext) {
val multiDataSet: MultiDataSet = iter.next
setInputs(multiDataSet.getFeatures)
{
var j: Int = 0
while (j < fwdPassOrder.length - 1) {
{
val current: GraphVertex = vertices(fwdPassOrder(j))
if (current.isInputVertex) {
val inputsTo: Array[VertexIndices] = current.getOutputVertices
val input: INDArray = inputs(current.getVertexIndex)
for (v <- inputsTo) {
val vIdx: Int = v.getVertexIndex
val vIdxInputNum: Int = v.getVertexEdgeNumber
vertices(vIdx).setInput(vIdxInputNum, input.dup)
}
}
else {
val out: INDArray = current.doForward(true)
val outputsTo: Array[VertexIndices] = current.getOutputVertices
if (outputsTo != null) {
for (v <- outputsTo) {
val vIdx: Int = v.getVertexIndex
val inputNum: Int = v.getVertexEdgeNumber
vertices(vIdx).setInput(inputNum, out)
}
}
}
}
({
j += 1; j - 1
})
}
}
toPretrain.fit(gv.getInputs(0))
}
iter.reset
}
({
i += 1; i - 1
})
}
}
}
/**
* Fit the ComputationGraph using a DataSet.
* Note that this method can only be used with ComputationGraphs with 1 input and 1 output.
* For networks with more than one input or output, use {@link #fit(MultiDataSetIterator)}
*/
def fit(dataSet: DataSet) {
if (numInputArrays != 1 || numOutputArrays != 1) throw new UnsupportedOperationException("Cannot train ComputationGraph network with " + " multiple inputs or outputs using a DataSet")
val hasMaskArrays: Boolean = dataSet.hasMaskArrays
if (hasMaskArrays) {
val fMask: Array[INDArray] = (if (dataSet.getFeaturesMaskArray != null) Array[INDArray](dataSet.getFeaturesMaskArray) else null)
val lMask: Array[INDArray] = (if (dataSet.getLabelsMaskArray != null) Array[INDArray](dataSet.getLabelsMaskArray) else null)
setLayerMaskArrays(fMask, lMask)
}
fit(Array[INDArray](dataSet.getFeatureMatrix), Array[INDArray](dataSet.getLabels))
if (hasMaskArrays) clearLayerMaskArrays
}
/**
* Fit the ComputationGraph using a DataSetIterator.
* Note that this method can only be used with ComputationGraphs with 1 input and 1 output
*/
def fit(iterator: DataSetIterator) {
if (numInputArrays != 1 || numOutputArrays != 1) throw new UnsupportedOperationException("Cannot train ComputationGraph network with " + " multiple inputs or outputs using a DataSetIterator")
var dataSetIterator: DataSetIterator = null
if (iterator.asyncSupported) {
dataSetIterator = new Nothing(iterator, 2)
}
else dataSetIterator = iterator
if (configuration.isPretrain) {
pretrain(dataSetIterator)
}
if (configuration.isBackprop) {
update(TaskUtils.buildTask(dataSetIterator))
while (dataSetIterator.hasNext) {
val next: DataSet = dataSetIterator.next
if (next.getFeatureMatrix == null || next.getLabels == null) break //todo: break is not supported
val hasMaskArrays: Boolean = next.hasMaskArrays
if (hasMaskArrays) {
val fMask: Array[INDArray] = (if (next.getFeaturesMaskArray != null) Array[INDArray](next.getFeaturesMaskArray) else null)
val lMask: Array[INDArray] = (if (next.getLabelsMaskArray != null) Array[INDArray](next.getLabelsMaskArray) else null)
setLayerMaskArrays(fMask, lMask)
}
if (configuration.getBackpropType eq BackpropType.TruncatedBPTT) {
doTruncatedBPTT(Array[INDArray](next.getFeatures), Array[INDArray](next.getLabels), (if (hasMaskArrays) Array[INDArray](next.getFeaturesMaskArray) else null), (if (hasMaskArrays) Array[INDArray](next.getLabelsMaskArray) else null))
}
else {
setInput(0, next.getFeatureMatrix)
setLabel(0, next.getLabels)
if (solver == null) {
solver = new Nothing().configure(defaultConfiguration).listeners(listeners).model(this).build
}
solver.optimize
}
if (hasMaskArrays) {
clearLayerMaskArrays
}
}
}
}
/**
* Fit the ComputationGraph using a MultiDataSet
*/
def fit(multiDataSet: MultiDataSet) {
if (multiDataSet.hasMaskArrays) {
setLayerMaskArrays(multiDataSet.getFeaturesMaskArrays, multiDataSet.getLabelsMaskArrays)
}
fit(multiDataSet.getFeatures, multiDataSet.getLabels)
if (multiDataSet.hasMaskArrays) clearLayerMaskArrays
}
/**
* Fit the ComputationGraph using a MultiDataSetIterator
*/
def fit(multi: MultiDataSetIterator) {
var multiDataSetIterator: MultiDataSetIterator = null
if (multi.asyncSupported) {
multiDataSetIterator = new AsyncMultiDataSetIterator(multi, 2)
}
else multiDataSetIterator = multi
if (configuration.isPretrain) {
pretrain(multiDataSetIterator)
}
if (configuration.isBackprop) {
while (multiDataSetIterator.hasNext) {
val next: MultiDataSet = multiDataSetIterator.next
if (next.getFeatures == null || next.getLabels == null) break //todo: break is not supported
if (configuration.getBackpropType eq BackpropType.TruncatedBPTT) {
doTruncatedBPTT(next.getFeatures, next.getLabels, next.getFeaturesMaskArrays, next.getLabelsMaskArrays)
}
else {
val hasMaskArrays: Boolean = next.hasMaskArrays
if (hasMaskArrays) {
setLayerMaskArrays(next.getFeaturesMaskArrays, next.getLabelsMaskArrays)
}
setInputs(next.getFeatures)
setLabels(next.getLabels)
if (solver == null) {
solver = new Nothing().configure(defaultConfiguration).listeners(listeners).model(this).build
}
solver.optimize
if (hasMaskArrays) {
clearLayerMaskArrays
}
}
}
}
}
/**
* Fit the ComputationGraph given arrays of inputs and labels.
*
* @param inputs The network inptus
* @param labels The labels
*/
def fit(inputs: Array[INDArray], labels: Array[INDArray]) {
fit(inputs, labels, null, null)
}
/**
* Fit the ComputationGraph using the specified inputs and labels (and mask arrays)
*
* @param inputs The network inputs (features)
* @param labels The network labels
* @param featureMaskArrays Mask arrays for inputs/features. Typically used for RNN training. May be null.
* @param labelMaskArrays Mas arrays for the labels/outputs. Typically used for RNN training. May be null.
*/
def fit(inputs: Array[INDArray], labels: Array[INDArray], featureMaskArrays: Array[INDArray], labelMaskArrays: Array[INDArray]) {
setInputs(inputs)
setLabels(labels)
setLayerMaskArrays(featureMaskArrays, labelMaskArrays)
update(TaskUtils.buildTask(inputs, labels))
if (configuration.isPretrain) {
throw new UnsupportedOperationException("Pretraining: Not yet implemented")
}
if (configuration.isBackprop) {
if (configuration.getBackpropType eq BackpropType.TruncatedBPTT) {
doTruncatedBPTT(inputs, labels, null, null)
}
else {
if (solver == null) {
solver = new Nothing().configure(conf).listeners(getListeners).model(this).build
}
solver.optimize
}
}
}
/**
* Calculate a topological sort order for the vertices in the graph.
* Note that this is used for
* (a) working out what order to do forward pass,
* (b) what order to do backprop (i.e., reverse of this)
* (c) order to flatten parameters (and gradients)
*/
def topologicalSortOrder: Array[Int] = {
if (topologicalOrder != null) return topologicalOrder
val nodeMap: Map[String, Nothing] = configuration.getVertices
val networkInputNames: List[String] = configuration.getNetworkInputs
val numVertices: Int = networkInputNames.size + configuration.getVertices.size
val out: Array[Int] = new Array[Int](numVertices)
val outCounter: Int = 0
val vertexNamesMap: Map[Integer, String] = new HashMap[Integer, String]
val vertexNamesMap2: Map[String, Integer] = new HashMap[String, Integer]
var i: Int = 0
import scala.collection.JavaConversions._
for (inputName <- configuration.getNetworkInputs) {
vertexNamesMap.put(i, inputName)
vertexNamesMap2.put(inputName, i)
i += 1
}
import scala.collection.JavaConversions._
for (entry <- nodeMap.entrySet) {
val name: String = entry.getKey
vertexNamesMap.put(i, name)
vertexNamesMap2.put(name, i)
i += 1
}
val inputEdges: Map[Integer, Set[Integer]] = new HashMap[Integer, Set[Integer]]
val outputEdges: Map[Integer, Set[Integer]] = new HashMap[Integer, Set[Integer]]
import scala.collection.JavaConversions._
for (s <- configuration.getNetworkInputs) {
val idx: Int = vertexNamesMap2.get(s)
inputEdges.put(idx, null)
}
import scala.collection.JavaConversions._
for (entry <- nodeMap.entrySet) {
val thisVertexName: String = entry.getKey
val idx: Int = vertexNamesMap2.get(thisVertexName)
val inputsToThisVertex: List[String] = configuration.getVertexInputs.get(thisVertexName)
if (inputsToThisVertex == null || inputsToThisVertex.isEmpty) {
inputEdges.put(idx, null)
continue //todo: continue is not supported
}
val inputSet: Set[Integer] = new HashSet[Integer]
import scala.collection.JavaConversions._
for (s <- inputsToThisVertex) {
val inputIdx: Integer = vertexNamesMap2.get(s)
if (inputIdx == null) {
System.out.println
}
inputSet.add(inputIdx)
var outputSetForInputIdx: Set[Integer] = outputEdges.get(inputIdx)
if (outputSetForInputIdx == null) {
outputSetForInputIdx = new HashSet[Integer]
outputEdges.put(inputIdx, outputSetForInputIdx)
}
outputSetForInputIdx.add(idx)
}
inputEdges.put(idx, inputSet)
}
val noIncomingEdges: LinkedList[Integer] = new LinkedList[Integer]
import scala.collection.JavaConversions._
for (entry <- inputEdges.entrySet) {
val inputsFrom: Set[Integer] = entry.getValue
if (inputsFrom == null || inputsFrom.isEmpty) {
noIncomingEdges.add(entry.getKey)
}
}
while (!noIncomingEdges.isEmpty) {
val next: Int = noIncomingEdges.removeFirst
out(({
outCounter += 1; outCounter - 1
})) = next
val vertexOutputsTo: Set[Integer] = outputEdges.get(next)
if (vertexOutputsTo != null) {
import scala.collection.JavaConversions._
for (v <- vertexOutputsTo) {
val set: Set[Integer] = inputEdges.get(v)
set.remove(next)
if (set.isEmpty) {
noIncomingEdges.add(v)
}
}
}
}
import scala.collection.JavaConversions._
for (entry <- inputEdges.entrySet) {
val set: Set[Integer] = entry.getValue
if (set == null) continue //todo: continue is not supported
if (!set.isEmpty) throw new IllegalStateException("Invalid configuration: cycle detected in graph. Cannot calculate topological ordering with graph cycle (" + "cycle includes vertex \\"" + vertexNamesMap.get(entry.getKey) + "\\")")
}
return out
}
def computeGradientAndScore {
if (configuration.getBackpropType eq BackpropType.TruncatedBPTT) {
rnnActivateUsingStoredState(inputs, true, true)
calcBackpropGradients(true)
}
else {
feedForward(true, true)
calcBackpropGradients(false)
}
var l1: Double = calcL1
var l2: Double = calcL2
score = 0.0
import scala.collection.JavaConversions._
for (s <- configuration.getNetworkOutputs) {
val gv: GraphVertex = verticesMap.get(s)
score += (gv.getLayer.asInstanceOf[Nothing]).computeScore(l1, l2, true)
l1 = 0.0
l2 = 0.0
}
}
/**
* Conduct forward pass using a single input array. Note that this method can only be used with ComputationGraphs
* with a single input array.
*
* @param input The input array
* @param train If true: do forward pass at training time
* @return A map of activations for each layer (not each GraphVertex). Keys = layer name, values = layer activations
*/
def feedForward(input: INDArray, train: Boolean): Map[String, INDArray] = {
if (numInputArrays != 1) throw new UnsupportedOperationException("Cannot feedForward with single input for graph network with " + numInputArrays + " expected inputs")
setInput(0, input)
return feedForward(train)
}
/**
* Conduct forward pass using an array of inputs
*
* @param input An array of ComputationGraph inputs
* @param train If true: do forward pass at training time; false: do forward pass at test time
* @return A map of activations for each layer (not each GraphVertex). Keys = layer name, values = layer activations
*/
def feedForward(input: Array[INDArray], train: Boolean): Map[String, INDArray] = {
if (numInputArrays != input.length) throw new UnsupportedOperationException("Cannot feedForward with " + input.length + " inputs for graph network with " + numInputArrays + " expected inputs")
{
var i: Int = 0
while (i < input.length) {
setInput(i, input(i))
({
i += 1; i - 1
})
}
}
return feedForward(train)
}
/**
* Conduct forward pass using the stored inputs, at test time
*
* @return A map of activations for each layer (not each GraphVertex). Keys = layer name, values = layer activations
*/
def feedForward: Map[String, INDArray] = {
return feedForward(false)
}
/**
* Conduct forward pass using the stored inputs
*
* @param train If true: do forward pass at training time; false: do forward pass at test time
* @return A map of activations for each layer (not each GraphVertex). Keys = layer name, values = layer activations
*/
def feedForward(train: Boolean): Map[String, INDArray] = {
return feedForward(train, false)
}
private def feedForward(train: Boolean, excludeOutputLayers: Boolean): Map[String, INDArray] = {
val layerActivations: Map[String, INDArray] = new HashMap[String, INDArray]
{
var i: Int = 0
while (i < topologicalOrder.length) {
{
val current: GraphVertex = vertices(topologicalOrder(i))
if (current.isInputVertex) {
val inputsTo: Array[VertexIndices] = current.getOutputVertices
val input: INDArray = inputs(current.getVertexIndex)
layerActivations.put(current.getVertexName, input)
for (v <- inputsTo) {
val vIdx: Int = v.getVertexIndex
val vIdxInputNum: Int = v.getVertexEdgeNumber
vertices(vIdx).setInput(vIdxInputNum, input.dup)
}
}
else {
if (excludeOutputLayers && current.isOutputVertex && current.hasLayer && current.getLayer.isInstanceOf[Nothing]) {
continue //todo: continue is not supported
}
val out: INDArray = current.doForward(train)
if (current.hasLayer) {
layerActivations.put(current.getVertexName, out)
}
val outputsTo: Array[VertexIndices] = current.getOutputVertices
if (outputsTo != null) {
for (v <- outputsTo) {
val vIdx: Int = v.getVertexIndex
val inputNum: Int = v.getVertexEdgeNumber
vertices(vIdx).setInput(inputNum, out)
}
}
}
}
({
i += 1; i - 1
})
}
}
return layerActivations
}
/**
* Return an array of network outputs (predictions) at test time, given the specified network inputs
* Network outputs are for output layers only.
*
* @param input Inputs to the network
* @return Output activations (order: same as defined in network configuration)
*/
def output(input: INDArray*): Array[INDArray] = {
return output(false, input)
}
/**
* A convenience method that returns a single INDArray, instead of an INDArray[].
* Useful for ComputationGraphs that have only a single output.
* Otherwise identical to {@link #output(INDArray...)}
*
* @param input Inputs to the network
* @return Output activations array
*/
def outputSingle(input: INDArray*): INDArray = {
return outputSingle(false, input)
}
/**
* Return an array of network outputs (predictions), given the specified network inputs
* Network outputs are for output layers only.
*
* @param train If true: do forward pass at training time; false: do forward pass at test time
* @param input Inputs to the network
* @return Output activations (order: same as defined in network configuration)
*/
def output(train: Boolean, input: INDArray*): Array[INDArray] = {
setInputs(input)
val activations: Map[String, INDArray] = feedForward(train)
val outputs: Array[INDArray] = new Array[INDArray](numOutputArrays)
val i: Int = 0
import scala.collection.JavaConversions._
for (s <- configuration.getNetworkOutputs) {
outputs(({
i += 1; i - 1
})) = activations.get(s)
}
return outputs
}
/**
* A convenience method that returns a single INDArray, instead of an INDArray[].
* Useful for ComputationGraphs that have only a single output.
* Otherwise identical to {@link #output(boolean, INDArray...)}
*
* @param train If true: do forward pass at training time; false: do forward pass at test time
* @param input Inputs to the network
* @return Output activations array
*/
def outputSingle(train: Boolean, input: INDArray*): INDArray = {
if (numOutputArrays != 1) {
throw new IllegalStateException("Cannot use outputSingle with ComputationGraph that does not have exactly 1 output. nOutputs: " + numOutputArrays)
}
return output(train, input)(0)
}
/**
* Calculate the gradient of the network with respect to some external errors.
* Note that this is typically used for things like reinforcement learning, not typical networks that include
* an OutputLayer or RnnOutputLayer
*
* @param epsilons Epsilons (errors) at the output. Same order with which the output layers are defined in configuration setOutputs(String...)
* @return Gradient for the network
*/
def backpropGradient(epsilons: INDArray*): Gradient = {
if (epsilons == null || epsilons.length != numOutputArrays) throw new IllegalArgumentException("Invalid input: must have epsilons length equal to number of output arrays")
calcBackpropGradients(configuration.getBackpropType eq BackpropType.TruncatedBPTT, epsilons)
return gradient
}
/**
* Do backprop (gradient calculation)
*
* @param truncatedBPTT false: normal backprop. true: calculate gradients using truncated BPTT for RNN layers
* @param externalEpsilons null usually (for typical supervised learning). If not null (and length > 0) then assume that
* the user has provided some errors externally, as they would do for example in reinforcement
* learning situations.
*/
protected def calcBackpropGradients(truncatedBPTT: Boolean, externalEpsilons: INDArray*) {
if (flattenedGradients == null) initGradientsView
val gradients: LinkedList[Nothing] = new LinkedList[E]
{
var i: Int = topologicalOrder.length - 1
while (i >= 0) {
{
val current: GraphVertex = vertices(topologicalOrder(i))
if (current.isInputVertex) continue //todo: continue is not supported
if (current.isOutputVertex) {
val thisOutputNumber: Int = configuration.getNetworkOutputs.indexOf(current.getVertexName)
if (current.getLayer.isInstanceOf[Nothing]) {
val outputLayer: Nothing = current.getLayer.asInstanceOf[Nothing]
val currLabels: INDArray = labels(thisOutputNumber)
outputLayer.setLabels(currLabels)
}
else {
current.setErrors(externalEpsilons(thisOutputNumber))
}
}
val pair: Nothing = current.doBackward(truncatedBPTT)
val epsilons: Array[INDArray] = pair.getSecond
val inputVertices: Array[VertexIndices] = current.getInputVertices
if (inputVertices != null) {
var j: Int = 0
for (v <- inputVertices) {
val gv: GraphVertex = vertices(v.getVertexIndex)
val outputNumberOfInputVertex: Int = v.getVertexEdgeNumber
gv.setError(outputNumberOfInputVertex, epsilons(({
j += 1; j - 1
})))
}
}
if (pair.getFirst != null) {
val g: Gradient = pair.getFirst
val map: Map[String, INDArray] = g.gradientForVariable
val tempList: LinkedList[Nothing] = new LinkedList[E]
import scala.collection.JavaConversions._
for (entry <- map.entrySet) {
val origName: String = entry.getKey
val newName: String = current.getVertexName + "_" + origName
tempList.addFirst(new Nothing(newName, entry.getValue, g.flatteningOrderForVariable(origName)))
}
import scala.collection.JavaConversions._
for (t <- tempList) gradients.addFirst(t)
}
}
({
i -= 1; i + 1
})
}
}
val gradient: Gradient = new DefaultGradient(flattenedGradients)
import scala.collection.JavaConversions._
for (t <- gradients) {
gradient.setGradientFor(t.getFirst, t.getSecond, t.getThird)
}
this.gradient = gradient
}
override def clone: ComputationGraph = {
val cg: ComputationGraph = new ComputationGraph(configuration.clone)
cg.init(params.dup, false)
if (solver != null) {
val u: ComputationGraphUpdater = this.getUpdater
val updaterState: INDArray = u.getStateViewArray
if (updaterState != null) {
cg.getUpdater.setStateViewArray(updaterState.dup)
}
}
cg.listeners = this.listeners
return cg
}
/**
* Calculate the L2 regularization term for all layers in the entire network. This is the sum of the L2 terms
* for each layer individually
*/
def calcL2: Double = {
var l2: Double = 0.0
for (l <- layers) {
l2 += l.calcL2
}
return l2
}
/**
* Calculate the L1 regularization term for all layers in the entire network. This is the sum of the L1 terms
* for each layer individually
*/
def calcL1: Double = {
var l1: Double = 0.0
for (l <- layers) {
l1 += l.calcL1
}
return l1
}
/**
* Set the IterationListeners for the ComputationGraph (and all layers in the network)
*/
def setListeners(listeners: Collection[Nothing]) {
this.listeners = listeners
if (layers == null) init
for (l <- layers) {
l.setListeners(listeners)
}
if (solver != null) {
solver.setListeners(listeners)
}
}
/**
* Set the IterationListeners for the ComputationGraph (and all layers in the network)
*/
def setListeners(listeners: Nothing*) {
val list: List[Nothing] = new ArrayList[E]
if (listeners != null && listeners.length > 0) {
for (i <- listeners) {
if (i != null) list.add(i)
}
}
setListeners(list)
}
/**
* Get the IterationListeners for the ComputationGraph
*/
def getListeners: Collection[Nothing] = {
return listeners
}
/**
* Get the ComputationGraphUpdater for the network
*/
def getUpdater: ComputationGraphUpdater = {
if (solver == null) {
solver = new Nothing().configure(conf).listeners(getListeners).model(this).build
solver.getOptimizer.setUpdaterComputationGraph(new ComputationGraphUpdater(this))
}
return solver.getOptimizer.getComputationGraphUpdater
}
/**
* Set the computationGraphUpdater for the network
*/
def setUpdater(updater: ComputationGraphUpdater) {
if (solver == null) {
solver = new Nothing().configure(conf).listeners(getListeners).model(this).build
}
solver.getOptimizer.setUpdaterComputationGraph(updater)
}
/**
* Get the specified output layer, by index. The index of the output layer may be 0 to {@link #getNumOutputArrays()}-1
*/
def getOutputLayer(outputLayerIdx: Int): Nothing = {
if (outputLayerIdx >= numOutputArrays) throw new IllegalArgumentException("Invalid index: cannot get output layer " + outputLayerIdx + ", total number of network outputs = " + numOutputArrays)
return getLayer(configuration.getNetworkOutputs.get(outputLayerIdx))
}
/**
* Get the parameters for the ComputationGraph
*
* @param backwardOnly If true: backprop parameters only (i.e., no visible layer biases used in layerwise pretraining layers)
*/
def params(backwardOnly: Boolean): INDArray = {
if (backwardOnly) return flattenedParams
val list: List[INDArray] = new ArrayList[INDArray](layers.length)
{
var i: Int = 0
while (i < topologicalOrder.length) {
{
if (!vertices(topologicalOrder(i)).hasLayer) continue //todo: continue is not supported
val l: Nothing = vertices(topologicalOrder(i)).getLayer
val layerParams: INDArray = l.params
if (layerParams != null) list.add(layerParams)
}
({
i += 1; i - 1
})
}
}
return Nd4j.toFlattened('f', list)
}
/**
* Sets the input and labels and returns a score for the prediction with respect to the true labels<br>
* This is equivalent to {@link #score(DataSet, boolean)} with training==true.<br>
* <b>NOTE:</b> this version of the score function can only be used with ComputationGraph networks that have
* a single input and a single output.
*
* @param dataSet the data to score
* @return the score for the given input,label pairs
* @see #score(DataSet, boolean)
*/
def score(dataSet: DataSet): Double = {
return score(dataSet, false)
}
/**
* Sets the input and labels and returns a score for the prediction with respect to the true labels<br>
* <b>NOTE:</b> this version of the score function can only be used with ComputationGraph networks that have
* a single input and a single output. Use {@link #score(MultiDataSet, boolean)} for multiple input/output networks
*
* @param dataSet the data to score
* @param training whether score is being calculated at training time (true) or test time (false)
* @return the score for the given input,label pairs
* @see #score(DataSet, boolean)
*/
def score(dataSet: DataSet, training: Boolean): Double = {
if (numInputArrays != 1 || numOutputArrays != 1) throw new UnsupportedOperationException("Cannot score ComputationGraph network with " + " DataSet: network does not have 1 input and 1 output arrays")
return score(ComputationGraphUtil.toMultiDataSet(dataSet), training)
}
/**
* Score the network given the MultiDataSet, at test time
*/
def score(dataSet: MultiDataSet): Double = {
return score(dataSet, false)
}
/**
* Sets the input and labels and returns a score for the prediction with respect to the true labels<br>
*
* @param dataSet the data to score
* @param training whether score is being calculated at training time (true) or test time (false)
* @return the score for the given input,label pairs
*/
def score(dataSet: MultiDataSet, training: Boolean): Double = {
val hasMaskArrays: Boolean = dataSet.hasMaskArrays
if (hasMaskArrays) {
setLayerMaskArrays(dataSet.getFeaturesMaskArrays, dataSet.getLabelsMaskArrays)
}
feedForward(dataSet.getFeatures, training)
val labels: Array[INDArray] = dataSet.getLabels
setLabels(labels)
var l1: Double = calcL1
var l2: Double = calcL2
var score: Double = 0.0
var i: Int = 0
import scala.collection.JavaConversions._
for (s <- configuration.getNetworkOutputs) {
val outLayer: Nothing = verticesMap.get(s).getLayer
if (outLayer == null || !(outLayer.isInstanceOf[Nothing])) {
ComputationGraph.log.warn("Cannot calculate score: vertex \\"" + s + "\\" is not an output layer")
return 0.0
}
val ol: Nothing = outLayer.asInstanceOf[Nothing]
ol.setLabels(labels(({
i += 1; i - 1
})))
score += ol.computeScore(l1, l2, true)
l1 = 0.0
l2 = 0.0
}
if (hasMaskArrays) clearLayerMaskArrays
return score
}
/**
* Calculate the score for each example in a DataSet individually. Unlike {@link #score(DataSet)} and {@link #score(DataSet, boolean)}
* this method does not average/sum over examples. This method allows for examples to be scored individually (at test time only), which
* may be useful for example for autoencoder architectures and the like.<br>
* Each row of the output (assuming addRegularizationTerms == true) is equivalent to calling score(DataSet) with a single example.
*
* @param data The data to score
* @param addRegularizationTerms If true: add l1/l2 regularization terms (if any) to the score. If false: don't add regularization terms
* @return An INDArray (column vector) of size input.numRows(); the ith entry is the score (loss value) of the ith example
*/
def scoreExamples(data: DataSet, addRegularizationTerms: Boolean): INDArray = {
if (numInputArrays != 1 || numOutputArrays != 1) throw new UnsupportedOperationException("Cannot score ComputationGraph network with " + " DataSet: network does not have 1 input and 1 output arrays")
return scoreExamples(ComputationGraphUtil.toMultiDataSet(data), addRegularizationTerms)
}
/**
* Calculate the score for each example in a DataSet individually. Unlike {@link #score(MultiDataSet)} and {@link #score(MultiDataSet, boolean)}
* this method does not average/sum over examples. This method allows for examples to be scored individually (at test time only), which
* may be useful for example for autoencoder architectures and the like.<br>
* Each row of the output (assuming addRegularizationTerms == true) is equivalent to calling score(MultiDataSet) with a single example.
*
* @param data The data to score
* @param addRegularizationTerms If true: add l1/l2 regularization terms (if any) to the score. If false: don't add regularization terms
* @return An INDArray (column vector) of size input.numRows(); the ith entry is the score (loss value) of the ith example
*/
def scoreExamples(data: MultiDataSet, addRegularizationTerms: Boolean): INDArray = {
val hasMaskArray: Boolean = data.hasMaskArrays
if (hasMaskArray) setLayerMaskArrays(data.getFeaturesMaskArrays, data.getLabelsMaskArrays)
feedForward(data.getFeatures, false)
setLabels(data.getLabels)
var out: INDArray = null
var l1: Double = (if (addRegularizationTerms) calcL1 else 0.0)
var l2: Double = (if (addRegularizationTerms) calcL2 else 0.0)
var i: Int = 0
import scala.collection.JavaConversions._
for (s <- configuration.getNetworkOutputs) {
val outLayer: Nothing = verticesMap.get(s).getLayer
if (outLayer == null || !(outLayer.isInstanceOf[Nothing])) {
throw new UnsupportedOperationException("Cannot calculate score: vertex \\"" + s + "\\" is not an output layer")
}
val ol: Nothing = outLayer.asInstanceOf[Nothing]
ol.setLabels(labels(({
i += 1; i - 1
})))
val scoreCurrLayer: INDArray = ol.computeScoreForExamples(l1, l2)
if (out == null) out = scoreCurrLayer
else out.addi(scoreCurrLayer)
l1 = 0.0
l2 = 0.0
}
if (hasMaskArray) clearLayerMaskArrays
return out
}
def fit {
fit(inputs, labels, inputMaskArrays, labelMaskArrays)
}
def update(gradient: INDArray, paramType: String) {
throw new UnsupportedOperationException("Not implemented")
}
def update(gradient: Gradient) {
if (gradient.gradient.length != numParams(true)) throw new IllegalArgumentException("Invalid input: expect gradients array of length " + numParams(true))
import scala.collection.JavaConversions._
for (entry <- gradient.gradientForVariable.entrySet) {
val key: String = entry.getKey
val `val`: INDArray = entry.getValue
val idx: Int = key.indexOf('_')
if (idx == -1) throw new IllegalStateException("Invalid param key: not have layer separator: \\"" + key + "\\"")
val layerName: String = key.substring(0, idx)
val paramType: String = key.split("_")(1)
this.gradient.gradientForVariable.put(key, `val`)
getLayer(layerName).update(`val`, paramType)
}
setBackpropGradientsViewArray(gradient.gradient)
}
private def update(task: Task) {
if (!initDone) {
initDone = true
val heartbeat: Heartbeat = Heartbeat.getInstance
task = ModelSerializer.taskByModel(this)
val env: Environment = EnvironmentUtils.buildEnvironment
heartbeat.reportEvent(Event.STANDALONE, env, task)
}
}
def score: Double = {
return score
}
def setScore(score: Double) {
this.score = score
}
def accumulateScore(accum: Double) {
throw new UnsupportedOperationException("Not implemented")
}
def params: INDArray = {
return params(true)
}
def numParams: Int = {
return numParams(true)
}
def numParams(backwards: Boolean): Int = {
var nParams: Int = 0
for (layer <- layers) {
nParams += layer.numParams(backwards)
}
return nParams
}
def setParams(params: INDArray) {
if (params eq flattenedParams) return
if (this.flattenedParams != null && this.flattenedParams.length == params.length) {
this.flattenedParams.assign(params)
return
}
var idx: Int = 0
{
var i: Int = 0
while (i < topologicalOrder.length) {
{
if (!vertices(topologicalOrder(i)).hasLayer) continue //todo: continue is not supported
val layer: Nothing = vertices(topologicalOrder(i)).getLayer
val range: Int = layer.numParams
if (range <= 0) continue //todo: continue is not supported
val get: INDArray = params.get(NDArrayIndex.point(0), NDArrayIndex.interval(idx, range + idx))
layer.setParams(get)
idx += range
}
({
i += 1; i - 1
})
}
}
}
def setParamsViewArray(gradient: INDArray) {
throw new RuntimeException("Not yet implemented")
}
def setBackpropGradientsViewArray(gradient: INDArray) {
var paramsSoFar: Int = 0
{
var i: Int = 0
while (i < topologicalOrder.length) {
{
if (!vertices(topologicalOrder(i)).hasLayer) continue //todo: continue is not supported
val layer: Nothing = vertices(topologicalOrder(i)).getLayer
val range: Int = layer.numParams
if (range <= 0) continue //todo: continue is not supported
layer.setBackpropGradientsViewArray(gradient.get(NDArrayIndex.point(0), NDArrayIndex.interval(paramsSoFar, paramsSoFar + range)))
paramsSoFar += range
}
({
i += 1; i - 1
})
}
}
}
def applyLearningRateScoreDecay {
throw new UnsupportedOperationException("Not implemented")
}
def fit(data: INDArray) {
throw new UnsupportedOperationException("Cannot pretrain ComputationGraph with single INDArray")
}
def iterate(input: INDArray) {
throw new UnsupportedOperationException("Not implemented")
}
def gradient: Gradient = {
return gradient
}
def gradientAndScore: Nothing = {
return new Nothing(gradient, score)
}
def batchSize: Int = {
return inputs(0).size(0)
}
def conf: Nothing = {
return defaultConfiguration
}
def setConf(conf: Nothing) {
throw new UnsupportedOperationException
}
def input: INDArray = {
if (numInputArrays == 1) return (if (inputs != null) inputs(0) else null)
else throw new UnsupportedOperationException("Cannot return single input: ComputationGraph has multiple inputs")
}
def validateInput {
}
def getOptimizer: Nothing = {
return solver.getOptimizer
}
def getParam(param: String): INDArray = {
throw new UnsupportedOperationException("Not implemented")
}
def initParams {
throw new UnsupportedOperationException("Not implemented")
}
def paramTable: Map[String, INDArray] = {
val allParams: Map[String, INDArray] = new LinkedHashMap[String, INDArray]
for (layer <- layers) {
val paramMap: Map[String, INDArray] = layer.paramTable
import scala.collection.JavaConversions._
for (entry <- paramMap.entrySet) {
val newKey: String = layer.conf.getLayer.getLayerName + "_" + entry.getKey
allParams.put(newKey, entry.getValue)
}
}
return allParams
}
def setParamTable(paramTable: Map[String, INDArray]) {
throw new UnsupportedOperationException("Not implemented")
}
def setParam(key: String, `val`: INDArray) {
val idx: Int = key.indexOf('_')
if (idx == -1) throw new IllegalStateException("Invalid param key: not have layer separator: \\"" + key + "\\"")
val layerName: String = key.substring(0, idx)
val paramType: String = key.substring(idx + 1)
getLayer(layerName).setParam(paramType, `val`)
}
def clear {
inputs = null
labels = null
inputMaskArrays = null
labelMaskArrays = null
}
/**
* If this ComputationGraph contains one or more RNN layers: conduct forward pass (prediction)
* but using previous stored state for any RNN layers. The activations for the final step are
* also stored in the RNN layers for use next time rnnTimeStep() is called.<br>
* This method can be used to generate output one or more steps at a time instead of always having to do
* forward pass from t=0. Example uses are for streaming data, and for generating samples from network output
* one step at a time (where samples are then fed back into the network as input)<br>
* If no previous state is present in RNN layers (i.e., initially or after calling rnnClearPreviousState()),
* the default initialization (usually 0) is used.<br>
* Supports mini-batch (i.e., multiple predictions/forward pass in parallel) as well as for single examples.<br>
*
* @param inputs Input to network. May be for one or multiple time steps. For single time step:
* input has shape [miniBatchSize,inputSize] or [miniBatchSize,inputSize,1]. miniBatchSize=1 for single example.<br>
* For multiple time steps: [miniBatchSize,inputSize,inputTimeSeriesLength]
* @return Output activations. If output is RNN layer (such as RnnOutputLayer): if all inputs have shape [miniBatchSize,inputSize]
* i.e., is 2d, then outputs have shape [miniBatchSize,outputSize] (i.e., also 2d) instead of [miniBatchSize,outputSize,1].<br>
* Otherwise output is 3d [miniBatchSize,outputSize,inputTimeSeriesLength] when using RnnOutputLayer (or unmodified otherwise).
*/
def rnnTimeStep(inputs: INDArray*): Array[INDArray] = {
this.inputs = inputs
var inputIs2d: Boolean = true
for (i <- inputs) {
if (i.rank != 2) {
inputIs2d = false
break //todo: break is not supported
}
}
val outputs: Array[INDArray] = new Array[INDArray](this.numOutputArrays)
for (currVertexIdx <- topologicalOrder) {
val current: GraphVertex = vertices(currVertexIdx)
if (current.isInputVertex) {
val inputsTo: Array[VertexIndices] = current.getOutputVertices
val input: INDArray = inputs(current.getVertexIndex)
for (v <- inputsTo) {
val vIdx: Int = v.getVertexIndex
val vIdxInputNum: Int = v.getVertexEdgeNumber
vertices(vIdx).setInput(vIdxInputNum, input.dup)
}
}
else {
var out: INDArray = null
if (current.hasLayer) {
val l: Nothing = current.getLayer
if (l.isInstanceOf[Nothing]) {
out = (l.asInstanceOf[Nothing]).rnnTimeStep(current.getInputs(0))
}
else if (l.isInstanceOf[Nothing]) {
out = (l.asInstanceOf[Nothing]).rnnTimeStep(current.getInputs(0))
}
else {
out = current.doForward(false)
}
}
else {
out = current.doForward(false)
}
if (current.isOutputVertex) {
val idx: Int = configuration.getNetworkOutputs.indexOf(current.getVertexName)
outputs(idx) = out
}
val outputsTo: Array[VertexIndices] = current.getOutputVertices
if (outputsTo != null) {
for (v <- outputsTo) {
val vIdx: Int = v.getVertexIndex
val inputNum: Int = v.getVertexEdgeNumber
vertices(vIdx).setInput(inputNum, out)
}
}
}
}
if (inputIs2d) {
{
var i: Int = 0
while (i < outputs.length) {
{
if (outputs(i).rank == 3 && outputs(i).size(2) == 1) {
outputs(i) = outputs(i).tensorAlongDimension(0, 1, 0)
}
}
({
i += 1; i - 1
})
}
}
}
this.inputs = null
return outputs
}
/**
* Get the state of the RNN layer, as used in {@link #rnnTimeStep(INDArray...)}.
*
* @param layer Number/index of the layer.
* @return Hidden state, or null if layer is not an RNN layer
*/
def rnnGetPreviousState(layer: Int): Map[String, INDArray] = {
return rnnGetPreviousState(layers(layer).conf.getLayer.getLayerName)
}
/**
* Get the state of the RNN layer, as used in {@link #rnnTimeStep(INDArray...)}.
*
* @param layerName name of the layer
* @return Hidden state, or null if layer is not an RNN layer
*/
def rnnGetPreviousState(layerName: String): Map[String, INDArray] = {
val l: Nothing = verticesMap.get(layerName).getLayer
if (l == null || !(l.isInstanceOf[Nothing])) return null
return (l.asInstanceOf[Nothing]).rnnGetPreviousState
}
/**
* Get a map of states for ALL RNN layers, as used in {@link #rnnTimeStep(INDArray...)}.
* Layers that are not RNN layers will not have an entry in the returned map
*
* @return Map of states (keyed by layer name) or null if layer is not an RNN layer
* @see #rnnSetPreviousStates(Map)
*/
def rnnGetPreviousStates: Map[String, Map[String, INDArray]] = {
val states: Map[String, Map[String, INDArray]] = new HashMap[String, Map[String, INDArray]]
for (l <- layers) {
if (l.isInstanceOf[Nothing]) {
states.put(l.conf.getLayer.getLayerName, (l.asInstanceOf[Nothing]).rnnGetPreviousState)
}
}
return states
}
/**
* Set the state of the RNN layer, for use in {@link #rnnTimeStep(INDArray...)}
*
* @param layer The number/index of the layer.
* @param state The state to set the specified layer to
*/
def rnnSetPreviousState(layer: Int, state: Map[String, INDArray]) {
rnnSetPreviousState(layers(layer).conf.getLayer.getLayerName, state)
}
/**
* Set the state of the RNN layer, for use in {@link #rnnTimeStep(INDArray...)}
*
* @param layerName The name of the layer.
* @param state The state to set the specified layer to
*/
def rnnSetPreviousState(layerName: String, state: Map[String, INDArray]) {
val l: Nothing = verticesMap.get(layerName).getLayer
if (l == null || !(l.isInstanceOf[Nothing])) {
throw new UnsupportedOperationException("Layer \\"" + layerName + "\\" is not a recurrent layer. Cannot set state")
}
(l.asInstanceOf[Nothing]).rnnSetPreviousState(state)
}
/**
* Set the states for all RNN layers, for use in {@link #rnnTimeStep(INDArray...)}
*
* @param previousStates The previous time step states for all layers (key: layer name. Value: layer states)
* @see #rnnGetPreviousStates()
*/
def rnnSetPreviousStates(previousStates: Map[String, Map[String, INDArray]]) {
import scala.collection.JavaConversions._
for (entry <- previousStates.entrySet) {
rnnSetPreviousState(entry.getKey, entry.getValue)
}
}
/**
* Clear the previous state of the RNN layers (if any), used in {@link #rnnTimeStep(INDArray...)}
*/
def rnnClearPreviousState {
if (layers == null) return
for (layer <- layers) {
if (layer.isInstanceOf[Nothing]) (layer.asInstanceOf[Nothing]).rnnClearPreviousState
else if (layer.isInstanceOf[Nothing]) {
(layer.asInstanceOf[Nothing]).rnnClearPreviousState
}
}
}
/**
* Fit the network using truncated BPTT
*/
protected def doTruncatedBPTT(inputs: Array[INDArray], labels: Array[INDArray], featureMasks: Array[INDArray], labelMasks: Array[INDArray]) {
if (flattenedGradients == null) initGradientsView
var timeSeriesLength: Int = -1
for (in <- inputs) {
if (in.rank != 3) continue //todo: continue is not supported
if (timeSeriesLength == -1) timeSeriesLength = in.size(2)
else if (timeSeriesLength != in.size(2)) {
ComputationGraph.log.warn("Cannot do TBPTT with time series of different lengths")
return
}
}
for (out <- labels) {
if (out.rank != 3) continue //todo: continue is not supported
if (timeSeriesLength == -1) timeSeriesLength = out.size(2)
else if (timeSeriesLength != out.size(2)) {
ComputationGraph.log.warn("Cannot do TBPTT with time series of different lengths")
return
}
}
val fwdLen: Int = configuration.getTbpttFwdLength
if (fwdLen > timeSeriesLength) {
ComputationGraph.log.warn("Cannot do TBPTT: Truncated BPTT forward length (" + fwdLen + ") > input time series length (" + timeSeriesLength + ")")
return
}
val nSubsets: Int = timeSeriesLength / fwdLen
rnnClearPreviousState
val newInputs: Array[INDArray] = new Array[INDArray](inputs.length)
val newLabels: Array[INDArray] = new Array[INDArray](labels.length)
val newFeatureMasks: Array[INDArray] = (if (featureMasks != null) new Array[INDArray](featureMasks.length) else null)
val newLabelMasks: Array[INDArray] = (if (labelMasks != null) new Array[INDArray](labelMasks.length) else null)
{
var i: Int = 0
while (i < nSubsets) {
{
val startTimeIdx: Int = i * fwdLen
val endTimeIdx: Int = startTimeIdx + fwdLen
{
var j: Int = 0
while (j < inputs.length) {
{
if (inputs(j).rank != 3) newInputs(j) = inputs(j)
else {
newInputs(j) = inputs(j).get(NDArrayIndex.all, NDArrayIndex.all, NDArrayIndex.interval(startTimeIdx, endTimeIdx))
}
}
({
j += 1; j - 1
})
}
}
{
var j: Int = 0
while (j < labels.length) {
{
if (labels(j).rank != 3) newLabels(j) = labels(j)
else {
newLabels(j) = labels(j).get(NDArrayIndex.all, NDArrayIndex.all, NDArrayIndex.interval(startTimeIdx, endTimeIdx))
}
}
({
j += 1; j - 1
})
}
}
if (featureMasks != null) {
{
var j: Int = 0
while (j < featureMasks.length) {
{
if (featureMasks(j) == null) continue //todo: continue is not supported
newFeatureMasks(j) = featureMasks(j).get(NDArrayIndex.all, NDArrayIndex.interval(startTimeIdx, endTimeIdx))
}
({
j += 1; j - 1
})
}
}
}
if (labelMasks != null) {
{
var j: Int = 0
while (j < labelMasks.length) {
{
if (labelMasks(j) == null) continue //todo: continue is not supported
newLabelMasks(j) = labelMasks(j).get(NDArrayIndex.all, NDArrayIndex.interval(startTimeIdx, endTimeIdx))
}
({
j += 1; j - 1
})
}
}
}
setInputs(newInputs)
setLabels(newLabels)
setLayerMaskArrays(newFeatureMasks, newLabelMasks)
if (solver == null) {
solver = new Nothing().configure(conf).listeners(getListeners).model(this).build
}
solver.optimize
rnnUpdateStateWithTBPTTState
}
({
i += 1; i - 1
})
}
}
rnnClearPreviousState
}
/**
* Similar to rnnTimeStep and feedForward() methods. Difference here is that this method:<br>
* (a) like rnnTimeStep does forward pass using stored state for RNN layers, and<br>
* (b) unlike rnnTimeStep does not modify the RNN layer state<br>
* Therefore multiple calls to this method with the same input should have the same output.<br>
* Typically used during training only. Use rnnTimeStep for prediction/forward pass at test time.
*
* @param inputs Input to network
* @param training Whether training or not
* @param storeLastForTBPTT set to true if used as part of truncated BPTT training
* @return Activations for each layer (including input, as per feedforward() etc)
*/
def rnnActivateUsingStoredState(inputs: Array[INDArray], training: Boolean, storeLastForTBPTT: Boolean): Map[String, INDArray] = {
val layerActivations: Map[String, INDArray] = new HashMap[String, INDArray]
for (currVertexIdx <- topologicalOrder) {
val current: GraphVertex = vertices(currVertexIdx)
if (current.isInputVertex) {
val inputsTo: Array[VertexIndices] = current.getOutputVertices
val input: INDArray = inputs(current.getVertexIndex)
layerActivations.put(current.getVertexName, input)
for (v <- inputsTo) {
val vIdx: Int = v.getVertexIndex
val vIdxInputNum: Int = v.getVertexEdgeNumber
vertices(vIdx).setInput(vIdxInputNum, input.dup)
}
}
else {
var out: INDArray = null
if (current.hasLayer) {
val l: Nothing = current.getLayer
if (l.isInstanceOf[Nothing]) {
out = (l.asInstanceOf[Nothing]).rnnActivateUsingStoredState(current.getInputs(0), training, storeLastForTBPTT)
}
else if (l.isInstanceOf[Nothing]) {
val temp: List[INDArray] = (l.asInstanceOf[Nothing]).rnnActivateUsingStoredState(current.getInputs(0), training, storeLastForTBPTT)
out = temp.get(temp.size - 1)
}
else {
out = current.doForward(training)
}
layerActivations.put(current.getVertexName, out)
}
else {
out = current.doForward(training)
}
val outputsTo: Array[VertexIndices] = current.getOutputVertices
if (outputsTo != null) {
for (v <- outputsTo) {
val vIdx: Int = v.getVertexIndex
val inputNum: Int = v.getVertexEdgeNumber
vertices(vIdx).setInput(inputNum, out)
}
}
}
}
return layerActivations
}
/**
* Set the mask arrays for features and labels. Mask arrays are typically used in situations such as one-to-many
* and many-to-one learning with recurrent neural networks, as well as for supporting time series of varying lengths
* within the same minibatch.<br>
* For example, with RNN data sets with input of shape [miniBatchSize,nIn,timeSeriesLength] and outputs of shape
* [miniBatchSize,nOut,timeSeriesLength], the features and mask arrays will have shape [miniBatchSize,timeSeriesLength]
* and contain values 0 or 1 at each element (to specify whether a given input/example is present - or merely padding -
* at a given time step).<br>
* <b>NOTE</b>: This method is not usually used directly. Instead, the various feedForward and fit methods handle setting
* of masking internally.
*
* @param featureMaskArrays Mask array for features (input)
* @param labelMaskArrays Mask array for labels (output)
* @see #clearLayerMaskArrays()
*/
def setLayerMaskArrays(featureMaskArrays: Array[INDArray], labelMaskArrays: Array[INDArray]) {
this.inputMaskArrays = featureMaskArrays
this.labelMaskArrays = labelMaskArrays
if (featureMaskArrays != null) {
if (featureMaskArrays.length != numInputArrays) {
throw new IllegalArgumentException("Invalid number of feature mask arrays")
}
{
var i: Int = 0
while (i < featureMaskArrays.length) {
{
val inputName: String = configuration.getNetworkInputs.get(i)
val reshapedFeaturesMask: INDArray = TimeSeriesUtils.reshapeTimeSeriesMaskToVector(featureMaskArrays(i))
val stack: LinkedList[String] = new LinkedList[String]
val gv: GraphVertex = verticesMap.get(inputName)
var outputsFromThisInput: Array[VertexIndices] = gv.getOutputVertices
for (v <- outputsFromThisInput) {
stack.addLast(vertices(v.getVertexIndex).getVertexName)
}
while (!stack.isEmpty) {
val nextVertexName: String = stack.removeLast
val nextVertex: GraphVertex = verticesMap.get(nextVertexName)
if (nextVertex.hasLayer) {
val l: Nothing = nextVertex.getLayer
if (l.isInstanceOf[Nothing]) {
continue //todo: continue is not supported
}
else if (l.`type` eq Layer.Type.FEED_FORWARD || l.`type` eq Layer.Type.CONVOLUTIONAL) {
l.setMaskArray(reshapedFeaturesMask)
}
}
outputsFromThisInput = nextVertex.getOutputVertices
if (outputsFromThisInput != null) {
for (v <- outputsFromThisInput) {
stack.addLast(vertices(v.getVertexIndex).getVertexName)
}
}
}
}
({
i += 1; i - 1
})
}
}
}
if (labelMaskArrays != null) {
if (labelMaskArrays.length != numOutputArrays) {
throw new IllegalArgumentException("Invalid number of label mask arrays")
}
{
var i: Int = 0
while (i < labelMaskArrays.length) {
{
val outputName: String = configuration.getNetworkOutputs.get(i)
val v: GraphVertex = verticesMap.get(outputName)
val ol: Nothing = v.getLayer
ol.setMaskArray(labelMaskArrays(i))
}
({
i += 1; i - 1
})
}
}
}
}
/**
* Remove the mask arrays from all layers.<br>
* See {@link #setLayerMaskArrays(INDArray[], INDArray[])} for details on mask arrays.
*/
def clearLayerMaskArrays {
for (layer <- layers) {
layer.setMaskArray(null)
}
this.inputMaskArrays = null
this.labelMaskArrays = null
}
/**
* Update the internal state of RNN layers after a truncated BPTT fit call
*/
protected def rnnUpdateStateWithTBPTTState {
{
var i: Int = 0
while (i < layers.length) {
{
if (layers(i).isInstanceOf[Nothing]) {
val l: Nothing = (layers(i).asInstanceOf[Nothing])
l.rnnSetPreviousState(l.rnnGetTBPTTState)
}
else if (layers(i).isInstanceOf[Nothing]) {
(layers(i).asInstanceOf[Nothing]).updateRnnStateWithTBPTTState
}
}
({
i += 1; i - 1
})
}
}
}
} | Mageswaran1989/aja | src/main/scala/org/aja/dhira/src/main/scala/org/dhira/core/nnet/graph/ComputationGraph.scala | Scala | apache-2.0 | 75,841 |
// Copyright (C) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See LICENSE in project root for information.
package com.microsoft.ml.spark
import java.util.UUID
import com.microsoft.ml.spark.schema.{SchemaConstants, SparkSchema}
import org.apache.hadoop.fs.Path
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.ml.param._
import org.apache.spark.ml.regression._
import org.apache.spark.ml.util._
import org.apache.spark.ml._
import org.apache.spark.sql._
import org.apache.spark.sql.types._
import scala.reflect.runtime.universe.{TypeTag, typeTag}
/** Trains a regression model. */
class TrainRegressor(override val uid: String) extends Estimator[TrainedRegressorModel]
with HasLabelCol with MMLParams {
def this() = this(Identifiable.randomUID("TrainRegressor"))
/** Regressor to run
* @group param
*/
val model = new EstimatorParam(this, "model", "Regressor to run")
/** @group getParam */
def getModel: Estimator[_ <: Model[_]] = $(model)
/** @group setParam */
def setModel(value: Estimator[_ <: Model[_]]): this.type = set(model, value)
val featuresColumn = this.uid + "_features"
/** Number of feature to hash to
* @group param
*/
val numFeatures = IntParam(this, "numFeatures", "number of features to hash to", 0)
/** @group getParam */
def getNumFeatures: Int = $(numFeatures)
/** @group setParam */
def setNumFeatures(value: Int): this.type = set(numFeatures, value)
/** Fits the regression model.
*
* @param dataset The input dataset to train.
* @return The trained regression model.
*/
override def fit(dataset: Dataset[_]): TrainedRegressorModel = {
val labelColumn = getLabelCol
var oneHotEncodeCategoricals = true
val numFeatures: Int = getModel match {
case _: DecisionTreeRegressor | _: GBTRegressor | _: RandomForestRegressor =>
oneHotEncodeCategoricals = false
FeaturizeUtilities.numFeaturesTreeOrNNBased
case _ =>
FeaturizeUtilities.numFeaturesDefault
}
val regressor = getModel match {
case predictor: Predictor[_, _, _] => {
predictor
.setLabelCol(labelColumn)
.setFeaturesCol(featuresColumn).asInstanceOf[Estimator[_ <: PipelineStage]]
}
case default@defaultType if defaultType.isInstanceOf[Estimator[_ <: PipelineStage]] => {
// assume label col and features col already set
default
}
case _ => throw new Exception("Unsupported learner type " + getModel.getClass.toString)
}
val featuresToHashTo =
if (getNumFeatures != 0) {
getNumFeatures
} else {
numFeatures
}
// TODO: Handle DateType, TimestampType and DecimalType for label
// Convert the label column during train to the correct type and drop missings
val convertedLabelDataset = dataset.withColumn(labelColumn,
dataset.schema(labelColumn).dataType match {
case _: IntegerType |
_: BooleanType |
_: FloatType |
_: ByteType |
_: LongType |
_: ShortType => {
dataset(labelColumn).cast(DoubleType)
}
case _: StringType => {
throw new Exception("Invalid type: Regressors are not able to train on a string label column: " + labelColumn)
}
case _: DoubleType => {
dataset(labelColumn)
}
case default => throw new Exception("Unknown type: " + default.typeName + ", for label column: " + labelColumn)
}
).na.drop(Seq(labelColumn))
val featureColumns = convertedLabelDataset.columns.filter(col => col != labelColumn).toSeq
val featurizer = new Featurize()
.setFeatureColumns(Map(featuresColumn -> featureColumns))
.setOneHotEncodeCategoricals(oneHotEncodeCategoricals)
.setNumberOfFeatures(featuresToHashTo)
val featurizedModel = featurizer.fit(convertedLabelDataset)
val processedData = featurizedModel.transform(convertedLabelDataset)
processedData.cache()
// Train the learner
val fitModel = regressor.fit(processedData)
processedData.unpersist()
// Note: The fit shouldn't do anything here
val pipelineModel = new Pipeline().setStages(Array(featurizedModel, fitModel)).fit(convertedLabelDataset)
new TrainedRegressorModel(uid, labelColumn, pipelineModel, featuresColumn)
}
override def copy(extra: ParamMap): Estimator[TrainedRegressorModel] = defaultCopy(extra)
@DeveloperApi
override def transformSchema(schema: StructType): StructType = TrainRegressor.validateTransformSchema(schema)
}
object TrainRegressor extends DefaultParamsReadable[TrainRegressor] {
def validateTransformSchema(schema: StructType): StructType = {
StructType(schema.fields :+ StructField(SchemaConstants.ScoresColumn, DoubleType))
}
}
/** Model produced by [[TrainRegressor]].
* @param uid The id of the module
* @param labelColumn The label column
* @param model The trained model
* @param featuresColumn The features column
*/
class TrainedRegressorModel(val uid: String,
val labelColumn: String,
val model: PipelineModel,
val featuresColumn: String)
extends Model[TrainedRegressorModel] with ConstructorWritable[TrainedRegressorModel] {
val ttag: TypeTag[TrainedRegressorModel] = typeTag[TrainedRegressorModel]
val objectsToSave: List[Any] = List(uid, labelColumn, model, featuresColumn)
override def copy(extra: ParamMap): TrainedRegressorModel =
new TrainedRegressorModel(
uid, labelColumn, model.copy(extra), featuresColumn)
override def transform(dataset: Dataset[_]): DataFrame = {
// re-featurize and score the data
val scoredData = model.transform(dataset)
// Drop the vectorized features column
val cleanedScoredData = scoredData.drop(featuresColumn)
// Update the schema - TODO: create method that would generate GUID and add it to the scored model
val moduleName = SchemaConstants.ScoreModelPrefix + UUID.randomUUID().toString
val labelColumnExists = cleanedScoredData.columns.contains(labelColumn)
val schematizedScoredDataWithLabel =
if (!labelColumnExists) cleanedScoredData
else SparkSchema.setLabelColumnName(
cleanedScoredData, moduleName, labelColumn, SchemaConstants.RegressionKind)
SparkSchema.setScoresColumnName(
schematizedScoredDataWithLabel.withColumnRenamed(
SchemaConstants.SparkPredictionColumn,
SchemaConstants.ScoresColumn),
moduleName,
SchemaConstants.ScoresColumn,
SchemaConstants.RegressionKind)
}
@DeveloperApi
override def transformSchema(schema: StructType): StructType =
TrainRegressor.validateTransformSchema(schema)
def getParamMap: ParamMap = model.stages.last.extractParamMap()
}
object TrainedRegressorModel extends ConstructorReadable[TrainedRegressorModel]
| rastala/mmlspark | src/train-regressor/src/main/scala/TrainRegressor.scala | Scala | mit | 6,966 |
package org.retistruen
trait Functor[T, R] extends Receiver[T] with Emitter[R]
trait SimpleFunctor[T, R] extends Functor[T, R] with CachingEmitter[R] {
protected def operate(datum: Datum[T]): Datum[R]
def receive(emitter: Emitter[T], datum: Datum[T]) = emit(operate(datum))
}
trait SlidingFunctor[T, R] extends Functor[T, R] with SlidingReceiver[T] with CachingEmitter[R] with Reset {
protected def operate(data: Seq[Datum[T]]): Datum[R]
override def receive(emitter: Emitter[T], datum: Datum[T]) = {
super.receive(emitter, datum)
emit(operate(window))
}
override def reset {
super[SlidingReceiver].reset
super[CachingEmitter].reset
}
}
trait SlidingCollectorFunctor[T] extends SlidingFunctor[T, Seq[Datum[T]]] {
protected def operate(data: Seq[Datum[T]]): Datum[Seq[Datum[T]]] =
Datum(data)
}
| plalloni/retistruen | src/main/scala/org/retistruen/Functor.scala | Scala | mit | 843 |
/*
* Copyright 2013 Akira Ueda
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.physalis.shirahae
import scala.language.reflectiveCalls
trait Using {
def using[A <: { def close() }, B](resource: A)(f: A => B): B = try { f(resource) } finally { resource.close }
}
| akr4/shirahae-sql | src/main/scala/util.scala | Scala | apache-2.0 | 794 |
package org.embulk.parser.xpath2
import java.io.{File, FileInputStream}
import java.nio.file
import java.nio.file.Paths
import org.embulk.EmbulkTestRuntime
import org.embulk.config.{ConfigLoader, ConfigSource, TaskSource}
import org.embulk.spi.json.JsonParser
import org.embulk.spi.util.InputStreamFileInput
import org.embulk.spi.{Exec, _}
import org.junit.Assert._
import org.junit.{Rule, Test}
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
class XPath2ParserPluginJsonSpec {
@Rule
def runtime = new EmbulkTestRuntime
val yamlPath: file.Path = Paths.get(classOf[XPath2ParserPlugin].getClassLoader.getResource("json_config.yml").toURI)
val dataPath: String = classOf[XPath2ParserPlugin].getClassLoader.getResource("json_data.xml").getPath
def configSource: ConfigSource = new ConfigLoader(Exec.getModelManager).fromYamlFile(yamlPath.toFile).getNested("in").getNested("parser")
@Test def testParseJsonArrayXML() {
val cs = configSource
val task = cs.loadConfig(classOf[PluginTask])
var schema: Schema = null
val plugin = new XPath2ParserPlugin()
plugin.transaction(cs, (_: TaskSource, s: Schema) => {schema = s})
val result: mutable.Buffer[collection.mutable.Map[String, Any]] = mutable.Buffer()
plugin.run(
task.dump(),
schema,
new InputStreamFileInput(Exec.getBufferAllocator, new FileInputStream(new File(dataPath))),
new TestTransactionalPageOutput(schema, result)
)
println(result)
val expectedJson =
"""{
"list": [
{
"elements": [
{
"elementActive": true,
"elementName": "foo1",
"elementValue": 1
},
{
"elementActive": false,
"elementName": "foo2",
"elementValue": 2
}
]
},
{
"elements": [
{
"elementActive": true,
"elementName": "bar1",
"elementValue": 3
}
]
}
]
}"""
assertEquals(ArrayBuffer(
Map(
"id" -> 1L,
"list" -> new JsonParser().parse(expectedJson)
)
), result)
}
}
| maji-KY/embulk-parser-xpath2 | src/test/scala/org/embulk/parser/xpath2/XPath2ParserPluginJsonSpec.scala | Scala | gpl-2.0 | 2,105 |
package sbt
package internal
package server
import java.io._
import java.net._
import java.nio.file._
import java.util.concurrent._
import scala.collection.mutable
import xsbti._
import sbt.io.IO
import sbt.internal.util._
import sbt.internal.BuildStreams.{ Streams => _, _ }
import sbt.internal.Load._
import sbt.util._
import sbt.BuildPaths._
import sbt.Def.{ ScopeLocal, ScopedKey, Setting }
import sbt.Keys._
object SettingQueryTest extends org.specs2.mutable.Specification {
implicit class PathOps(val path: Path) extends AnyVal {
def /(other: String): Path = if (other == ".") path else path resolve other
}
val baseDir: Path = Files createTempDirectory "sbt-setting-query-test"
val globalDir: Path = Files createTempDirectory "sbt-setting-query-test-global-dir"
val bootDir: Path = Files createTempDirectory "sbt-setting-query-test-boot-dir"
val ivyHome: Path = Files createTempDirectory "sbt-setting-query-test-ivy-home"
val logFile: File = File.createTempFile("sbt", ".log")
val baseFile: File = baseDir.toFile
val baseUri: URI = IO directoryURI baseFile
IO assertAbsolute baseUri
val globalDirFile: File = globalDir.toFile
def ??? : Nothing = { Thread.dumpStack(); throw new NotImplementedError }
val noopLoader: ClassLoader = new URLClassLoader(Array(), null)
object NoGlobalLock extends GlobalLock {
def apply[T](lockFile: File, run: Callable[T]) = run.call()
}
lazy val structure: BuildStructure = {
val projectSettings: Seq[Setting[_]] = Seq(scalaVersion := "2.12.1")
val appConfig: AppConfiguration = new AppConfiguration {
def baseDirectory(): File = baseFile
def arguments(): Array[String] = Array()
def provider(): AppProvider = new AppProvider {
def scalaProvider(): ScalaProvider = new ScalaProvider { scalaProvider =>
def launcher(): Launcher = new Launcher {
def getScala(version: String): ScalaProvider = getScala(version, "")
def getScala(version: String, reason: String): ScalaProvider =
getScala(version, reason, "org.scala-lang")
def getScala(version: String, reason: String, scalaOrg: String): ScalaProvider =
scalaProvider
def app(id: ApplicationID, version: String): AppProvider = ???
def topLoader(): ClassLoader = noopLoader
def globalLock(): GlobalLock = NoGlobalLock
def bootDirectory(): File = bootDir.toFile
def ivyRepositories(): Array[Repository] = Array()
def appRepositories(): Array[Repository] = Array()
def isOverrideRepositories: Boolean = false
def ivyHome(): File = SettingQueryTest.this.ivyHome.toFile
def checksums(): Array[String] = Array()
}
def version(): String = "2.12.1"
def loader(): ClassLoader = noopLoader
def jars(): Array[File] = Array(libraryJar, compilerJar)
def libraryJar(): File = new File("scala-library.jar")
def compilerJar(): File = new File("scala-compiler.jar")
def app(id: ApplicationID): AppProvider = ???
}
def id(): ApplicationID = sbt.ApplicationID(
"org.scala-sbt",
"sbt",
"0.13.13",
"sbt.xMain",
components = Seq(),
crossVersionedValue = CrossValue.Disabled,
extra = Seq()
)
def loader(): ClassLoader = noopLoader
def entryPoint(): Class[_] = ???
def mainClass(): Class[_ <: AppMain] = ???
def newMain(): AppMain = ???
def mainClasspath(): Array[File] = Array()
def components(): ComponentProvider = new ComponentProvider {
def componentLocation(id: String): File = ???
def component(componentID: String): Array[File] = ???
def defineComponent(componentID: String, components: Array[File]): Unit = ???
def addToComponent(componentID: String, components: Array[File]): Boolean = ???
def lockFile(): File = ???
}
}
}
val state: State =
StandardMain
.initialState(appConfig, initialDefinitions = Seq(), preCommands = Seq())
.put(globalBaseDirectory, globalDirFile)
val config0 = defaultPreGlobal(state, baseFile, globalDirFile, state.log)
val config = defaultWithGlobal(state, baseFile, config0, globalDirFile, state.log)
val buildUnit: BuildUnit = {
val loadedPlugins: LoadedPlugins =
noPlugins(projectStandard(baseFile),
config.copy(pluginManagement = config.pluginManagement.forPlugin))
val project: Project = {
val project0 = Project("t", baseFile) settings projectSettings
val fileToLoadedSbtFileMap = new mutable.HashMap[File, LoadedSbtFile]
val autoPlugins = loadedPlugins.detected.deducePluginsFromProject(project0, state.log)
val injectSettings = config.injectSettings
resolveProject(project0,
autoPlugins,
loadedPlugins,
injectSettings,
fileToLoadedSbtFileMap,
state.log)
}
val projects: Seq[Project] = Seq(project)
val builds: Seq[BuildDef] = BuildDef.defaultAggregated(project.id, Nil) :: Nil
val defs: LoadedDefinitions =
new LoadedDefinitions(baseFile, Nil, noopLoader, builds, projects, Nil)
new BuildUnit(baseUri, baseFile, defs, loadedPlugins)
}
val (partBuildUnit: PartBuildUnit, projectRefs: List[ProjectReference]) = loaded(buildUnit)
val partBuildUnits: Map[URI, PartBuildUnit] = Map(buildUnit.uri -> partBuildUnit)
val allProjectRefs: Map[URI, List[ProjectReference]] = Map(buildUnit.uri -> projectRefs)
checkAll(allProjectRefs, partBuildUnits)
val partBuild: PartBuild = new PartBuild(baseUri, partBuildUnits)
val loadedBuild: LoadedBuild = resolveProjects(partBuild)
val units: Map[URI, LoadedBuildUnit] = loadedBuild.units
val settings: Seq[Setting[_]] = finalTransforms(
buildConfigurations(loadedBuild, getRootProject(units), config.injectSettings))
val delegates: Scope => Seq[Scope] = defaultDelegates(loadedBuild)
val scopeLocal: ScopeLocal = EvaluateTask.injectStreams
val display: Show[ScopedKey[_]] = Project showLoadingKey loadedBuild
val data: Settings[Scope] = Def.make(settings)(delegates, scopeLocal, display)
val extra: KeyIndex => BuildUtil[_] = index => BuildUtil(baseUri, units, index, data)
val index: StructureIndex = structureIndex(data, settings, extra, units)
val streams: State => Streams = mkStreams(units, baseUri, data)
val structure: BuildStructure =
new BuildStructure(units, baseUri, settings, data, index, streams, delegates, scopeLocal)
structure
}
def query(setting: String): String = {
import sbt.protocol._
val req: SettingQuery = protocol.SettingQuery(setting)
val rsp: SettingQueryResponse = server.SettingQuery.handleSettingQuery(req, structure)
val bytes: Array[Byte] = Serialization serializeEventMessage rsp
val payload: String = new String(bytes, java.nio.charset.StandardCharsets.UTF_8)
payload
}
// -.- avoid specs2's ko/ok
import org.specs2.matcher.MatchResult
def qok(x: String, t: String): String => MatchResult[Any] =
query(_) must_== """{"type":"SettingQuerySuccess","value":""" + x + ""","contentType":"""" + t + """"}"""
def qko(msg: String): String => MatchResult[Any] =
query(_) must_== """{"type":"SettingQueryFailure","message":"""" + msg + """"}"""
"setting query" should {
"t/scalaVersion" in qok("\\"2.12.1\\"", "java.lang.String")
// "t/pollInterval" in qok("500", "Int")
"t/sourcesInBase" in qok("true", "Boolean")
"t/startYear" in qok("null", "scala.Option[Int]")
"t/scalaArtifacts" in qok(
"""["scala-library","scala-compiler","scala-reflect","scala-actors","scalap"]""",
"scala.collection.Seq[java.lang.String]")
"t/libraryDependencies" in qok(
"""[{"organization":"org.scala-lang","name":"scala-library","revision":"2.12.1","isChanging":false,"isTransitive":true,"isForce":false,"crossVersion":{"type":"Disabled"}}]""",
"scala.collection.Seq[sbt.librarymanagement.ModuleID]"
)
"scalaVersion" in qko("Not a valid project ID: scalaVersion\\\\nscalaVersion\\\\n ^")
"t/scalacOptions" in qko(
s"Key {$baseUri}t/compile:scalacOptions is a task, can only query settings")
"t/fooo" in qko(
"Expected ':' (if selecting a configuration)\\\\nNot a valid key: fooo (similar: fork)\\\\nt/fooo\\\\n ^")
}
}
| Duhemm/sbt | main/src/test/scala/sbt/internal/server/SettingQueryTest.scala | Scala | bsd-3-clause | 8,616 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.utils
import java.io.{BufferedInputStream, FileInputStream}
import java.nio.ByteBuffer
import java.nio.file.{Files, Paths}
object RandomGenerator {
val generators = new ThreadLocal[RandomGenerator]()
// scalastyle:off methodName
def RNG: RandomGenerator = {
if (generators.get() == null) {
generators.set(new RandomGenerator())
}
generators.get()
}
// scalastyle:on methodName
def shuffle[T](data: Array[T]): Array[T] = {
var i = 0
val length = data.length
while (i < length) {
val exchange = RNG.uniform(0, length - i).toInt + i
val tmp = data(exchange)
data(exchange) = data(i)
data(i) = tmp
i += 1
}
data
}
}
/**
* A mersenne twister based fake random number generator.
* Please refer https://en.wikipedia.org/wiki/Mersenne_Twister.
* Note that it has its own state so it is not thread safe.
* So you should use RandomGenerator.RNG to get a thread local instance to use.
* That's thread-safe.
*/
class RandomGenerator private[bigdl]() {
private val MERSENNE_STATE_N = 624
private val MERSENNE_STATE_M = 397
private val MARTRX_A = 0x9908b0dfL
private val UMASK = 0x80000000L
/* most significant w-r bits */
private val LMASK = 0x7fffffffL
/* least significant r bits */
private val randomFileOS = "/dev/urandom"
private var state: Array[Long] = new Array[Long](MERSENNE_STATE_N)
private var seed: Long = 0
private var next: Int = 0
private var left: Int = 1
private var normalX: Double = 0
private var normalY: Double = 0
private var normalRho: Double = 0
private var normalIsValid: Boolean = false
setSeed(randomSeed())
private[bigdl] def this(seed: Long) = {
this()
setSeed(seed)
}
override def clone(): RandomGenerator = {
val result = new RandomGenerator()
result.copy(this)
result
}
def copy(from: RandomGenerator): this.type = {
this.state = from.state.clone()
this.seed = from.seed
this.next = from.next
this.normalX = from.normalX
this.normalY = from.normalY
this.normalRho = from.normalRho
this.normalIsValid = from.normalIsValid
this
}
private def randomSeed(): Long = {
if (Files.exists(Paths.get(randomFileOS))) {
val fis = new FileInputStream(randomFileOS)
val bis = new BufferedInputStream(fis)
val buffer = new Array[Byte](8)
bis.read(buffer, 0, 8)
val randomNumber = ByteBuffer.wrap(buffer).getLong
bis.close()
fis.close()
randomNumber
}
else {
System.nanoTime()
}
}
@inline
private def twist(u: Long, v: Long): Long = {
((((u) & UMASK) | ((v) & LMASK)) >> 1) ^ (
if ((v & 0x00000001L) != 0) {
MARTRX_A
} else {
0
}
)
}
def reset(): this.type = {
var i = 0
while (i < MERSENNE_STATE_N) {
this.state(i) = 0L
i += 1
}
this.seed = 0
this.next = 0
this.normalX = 0
this.normalY = 0
this.normalRho = 0
this.normalIsValid = false
this
}
def setSeed(seed: Long): this.type = {
this.reset()
this.seed = seed
this.state(0) = this.seed & 0xffffffffL
var i = 1
while (i < MERSENNE_STATE_N) {
this.state(i) = (1812433253L * (this.state(i - 1) ^ (this.state(i - 1) >> 30)) + i)
/* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */
/* In the previous versions, mSBs of the seed affect */
/* only mSBs of the array state[]. */
/* 2002/01/09 modified by makoto matsumoto x */
this.state(i) = this.state(i) & 0xffffffffL; /* for >32 bit machines */
i += 1
}
this.left = 1
this
}
def getSeed(): Long = {
this.seed
}
private def nextState(): this.type = {
var j: Int = MERSENNE_STATE_N - MERSENNE_STATE_M + 1
var k: Int = 0
this.left = MERSENNE_STATE_N
this.next = 0
while (j > 1) {
j -= 1
this.state(k) = this.state(MERSENNE_STATE_M + k) ^ twist(this.state(k), this.state(k + 1))
k += 1
}
j = MERSENNE_STATE_M
while (j > 1) {
j -= 1
this.state(k) = this.state(MERSENNE_STATE_M - MERSENNE_STATE_N + k) ^ twist(this.state(k),
this.state(k + 1))
k += 1
}
this.state(k) = this.state(MERSENNE_STATE_M - MERSENNE_STATE_N + k) ^ twist(this.state(k),
this.state(0))
this
}
/**
* Generates a random number on [0,0xffffffff]-interval
*/
private def random(): Long = {
var y: Long = 0
this.left = this.left - 1
if (this.left == 0) {
this.nextState()
}
y = this.state(0 + this.next)
this.next = this.next + 1
/* Tempering */
y ^= (y >> 11)
y ^= (y << 7) & 0x9d2c5680L
y ^= (y << 15) & 0xefc60000L
y ^= (y >> 18)
y
}
/**
* Generates a random number on [0, 1)-real-interval
*/
private def basicUniform(): Double = {
this.random() * (1.0 / 4294967296.0)
}
/**
* Generates a random number on [a, b)-real-interval uniformly
*/
def uniform(a: Double, b: Double): Double = {
this.basicUniform() * (b - a) + a
}
def normal(mean: Double, stdv: Double): Double = {
require(stdv > 0, "standard deviation must be strictly positive")
/* This is known as the Box-Muller method */
if (!this.normalIsValid) {
this.normalX = this.basicUniform()
this.normalY = this.basicUniform()
this.normalRho = Math.sqrt(-2 * Math.log(1.0 - this.normalY))
this.normalIsValid = true
} else {
this.normalIsValid = false
}
if (this.normalIsValid) {
this.normalRho * Math.cos(2 * Math.PI * this.normalX) * stdv + mean
} else {
this.normalRho * Math.sin(2 * Math.PI * this.normalX) * stdv + mean
}
}
def exponential(lambda: Double): Double = {
-1 / lambda * Math.log(1 - this.basicUniform())
}
def cauchy(median: Double, sigma: Double): Double = {
median + sigma * Math.tan(Math.PI * (this.basicUniform() - 0.5))
}
def logNormal(mean: Double, stdv: Double): Double = {
val zm = mean * mean
val zs = stdv * stdv
require(stdv > 0, "standard deviation must be strictly positive")
Math.exp(normal(Math.log(zm / Math.sqrt(zs + zm)), Math.sqrt(Math.log(zs / zm + 1))))
}
def geometric(p: Double): Int = {
require(p >= 0 && p <= 1, "must be >= 0 and <= 1")
((Math.log(1 - this.basicUniform()) / Math.log(p)) + 1).toInt
}
def bernoulli(p: Double): Boolean = {
require(p >= 0 && p <= 1, "must be >= 0 and <= 1")
this.basicUniform() <= p
}
}
| psyyz10/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/utils/RandomGenerator.scala | Scala | apache-2.0 | 7,185 |
package net.lshift.diffa.kernel.frontend
import org.junit.Test
/**
* Verify that DomainDef constraints are enforced.
*/
class DomainDefValidationTest extends DefValidationTestBase {
@Test
def shouldRejectDomainNameThatIsTooLong {
validateExceedsMaxKeyLength("config/domain[name=%s]: name",
domain => DomainDef(name = domain))
}
}
| aprescott/diffa | kernel/src/test/scala/net/lshift/diffa/kernel/frontend/DomainDefValidationTest.scala | Scala | apache-2.0 | 350 |
package org.vaadin.addons.vaactor
import javax.servlet.ServletConfig
import com.vaadin.flow.server.VaadinServlet
import akka.actor.ActorSystem
object VaactorServlet {
/** the actor system */
val system: ActorSystem = ActorSystem(
config.getString("system-name"),
config.withFallback(loadedConfig)
)
private def dummyInit(): Unit = {}
}
/** Servlet creates and destroys ActorSystem
*
* @author Otto Ringhofer
*/
abstract class VaactorServlet extends VaadinServlet {
/** Initialize actor system */
override def init(servletConfig: ServletConfig): Unit = {
super.init(servletConfig)
VaactorServlet.dummyInit() // trigger creation of system
}
/** Terminate actor system */
override def destroy(): Unit = {
super.destroy()
VaactorServlet.system.terminate()
}
}
| otto-ringhofer/vaactor | addon/src/main/scala/org/vaadin/addons/vaactor/VaactorServlet.scala | Scala | apache-2.0 | 817 |
package com.blinkbox.books.agora.catalogue.book
import scala.concurrent.duration.FiniteDuration
import com.typesafe.config.Config
import java.util.concurrent.TimeUnit
import scala.concurrent.duration._
case class BookConfig(
path: String,
synopsisPathLink: String,
maxAge: FiniteDuration,
maxResults: Int
)
object BookConfig {
def apply(config: Config): BookConfig = BookConfig(
config.getString("path"),
config.getString("synopsisLink"),
config.getDuration("maxAge", TimeUnit.MILLISECONDS).millis,
config.getInt("maxResults")
)
}
| blinkboxbooks/catalogue-v2.scala | catalogue2-service-public/src/main/scala/com/blinkbox/books/agora/catalogue/book/BookConfig.scala | Scala | mit | 562 |
// scalac: -deprecation
//
import scala.language.{ postfixOps }
object Test {
def main(args: Array[String]): Unit = {
ApplyFromJcl.run()
Bug1093.run()
Bug1094.run()
Bug1270.run()
Bug1281.run()
Bug457.run()
Bug508.run()
Bug789.run()
Bug881.run()
Bug995.run()
ClassDefInGuard.run()
SeqUnapply.run()
SimpleUnapply.run()
Test1163_Order.run()
Test717.run()
Test903.run()
TestEqualsPatternOpt.run()
TestGuards.run()
TestSequence01.run()
TestSequence02.run()
TestSequence03.run()
TestSequence04.run()
TestSequence05.run()
TestSequence06.run()
TestSequence07.run()
TestSequence08.run()
TestSimpleIntSwitch.run()
TestLazyList.run()
TestUnbox.run()
Ticket11.run()
Ticket2.run()
Ticket346.run()
Ticket37.run()
Ticket44.run()
NullMatch.run()
}
def assertEquals(a: Any, b: Any): Unit = { assert(a == b) }
def assertEquals(msg: String, a: Any, b: Any): Unit = { assert(a == b, msg) }
object SimpleUnapply {
def run(): Unit = { // from sortedmap, old version
List((1, 2)).head match {
case kv@(key, _) => kv.toString + " " + key.toString
}
}
}
object SeqUnapply {
case class SFB(i: Int, xs: List[Int])
def run(): Unit = {
(List(1, 2): @unchecked) match {
case List(1) => assert(false, "wrong case")
case List(1, 2, xs@_*) => assert(xs.isEmpty, "not empty")
case Nil => assert(false, "wrong case")
}
SFB(1, List(1)) match {
case SFB(_, List(x)) => assert(x == 1)
case SFB(_, _) => assert(false)
}
}
}
object ApplyFromJcl {
def run(): Unit = {
val p = (1, 2)
Some(2) match {
case Some(p._2) =>
case _ => assert(false)
}
}
}
object TestSimpleIntSwitch {
def run(): Unit = {
assertEquals("s1", 1, 1 match {
case 3 => 3
case 2 => 2
case 1 => 1
case 0 => 0
})
assertEquals("s2", 1, 1 match {
case 1 => 1
case _ => 0
})
assertEquals("s2boxed", 1, (1: Any) match {
case 1 => 1
case _ => 0
})
assertEquals("s3", 1, ("hello") match {
case s: String => 1
//case _ => 0 // unreachable!
})
val xyz: (Int, String, Boolean) = (1, "abc", true);
assertEquals("s4", 1, xyz._1 match {
case 1 => 1
case _ => 0
})
}
}
// #717 test path of case classes
object Test717 {
class Foo(j: Int) {
case class Bar(i: Int)
}
val foo1 = new Foo(1)
val foo2 = new Foo(2)
def run(): Unit = {
val res = (foo1.Bar(2): Any) match {
case foo1.Bar(2) => true
}
assert(res)
}
}
///
trait Treez { self: Shmeez =>
abstract class Tree
case class Beez(i: Int) extends Tree
case object HagbardCeline extends Tree
}
trait Shmeez extends AnyRef with Treez {
val tree: Tree
def foo = tree match {
case Beez(2) => 1
case HagbardCeline => 0
}
}
// multiple guards for same pattern
object TestGuards extends Shmeez {
val tree: Tree = Beez(2)
def run(): Unit = {
val res = tree match {
case Beez(x) if x == 3 => false
case Beez(x) if x == 2 => true
}
assert(res)
val ret = (Beez(3): Tree) match {
case Beez(x) if x == 3 => true
case Beez(x) if x == 2 => false
}
assert(ret)
}
}
// test EqualsPatternClass in combination with MixTypes opt, bug #1276
object TestEqualsPatternOpt {
val NoContext = new Object
def run(): Unit = {
assertEquals(1, ((NoContext: Any) match {
case that: AnyRef if this eq that => 0
case NoContext => 1
case _ => 2
}))
}
}
// all ignoring patterns on List
object TestSequence01 {
def doMatch(xs: List[String]): String = xs match {
case List(_*) => "ok"
}
def doMatch2(xs: List[String]): List[String] = xs match {
case List(_, rest@_*) => rest.toList
}
def run(): Unit = {
val list1 = List()
assertEquals(doMatch(list1), "ok")
val list2 = List("1", "2", "3")
assertEquals(doMatch(list2), "ok")
val list3 = List("1", "2", "3")
assertEquals(doMatch2(list3), List("2", "3"))
}
}
// all ignoring patterns on Seq
object TestSequence02 {
def doMatch(l: Seq[String]): String = l match {
case Seq(_*) => "ok"
}
def run(): Unit = {
val list1 = List()
assertEquals(doMatch(list1), "ok")
val list2 = List("1", "2", "3")
assertEquals(doMatch(list2), "ok")
val array3 = Array[String]()
assertEquals(doMatch(array3.toIndexedSeq), "ok")
val array4 = Array[String]("ga", "gu")
assertEquals(doMatch(array4.toIndexedSeq), "ok")
}
}
// right-ignoring patterns on List, defaults
object TestSequence03 {
def doMatch(xs: List[String]): String = xs match {
case List(_, _, _, _*) => "ok"
case _ => "not ok"
}
def run(): Unit = {
val list1 = List()
assertEquals(doMatch(list1), "not ok")
val list2 = List("1", "2", "3")
assertEquals(doMatch(list2), "ok")
val list3 = List("1", "2", "3", "4")
assertEquals(doMatch(list3), "ok")
}
}
// all- and right-ignoring pattern on case class w/ seq param
object TestSequence04 {
case class Foo(i: Int, chars: Char*)
def run(): Unit = {
val a = Foo(0, 'a') match {
case Foo(i, c, chars@_*) => c
case _ => null
}
assertEquals(a, 'a')
val b = Foo(0, 'a') match {
case Foo(i, chars@_*) => 'b'
case _ => null
}
assertEquals(b, 'b')
}
}
// sealed case class with ignoring seq patterns
object TestSequence05 {
sealed abstract class Con;
case class Foo() extends Con
case class Bar(xs: Con*) extends Con
def run(): Unit = {
val res = (Bar(Foo()): Con) match {
case Bar(xs@_*) => xs // this should be optimized away to a pattern Bar(xs)
case _ => Nil
}
assertEquals("res instance" + res.isInstanceOf[Seq[Con] forSome { type Con }] + " res(0)=" + res(0), true, res.isInstanceOf[Seq[Foo] forSome { type Foo }] && res(0) == Foo())
}
}
// (not regular) fancy guards / bug#644
object TestSequence06 {
case class A(i: Any)
def doMatch(x: Any, bla: Int) = x match {
case x: A if (bla == 1) => 0
case A(1) => 1
case A(A(1)) => 2
}
def run(): Unit = {
assertEquals(doMatch(A(null), 1), 0)
assertEquals(doMatch(A(1), 2), 1)
assertEquals(doMatch(A(A(1)), 2), 2)
}
}
// List of chars
object TestSequence07 {
def doMatch1(xs: List[Char]) = xs match {
case List(x, y, _*) => x :: y :: Nil
}
def doMatch2(xs: List[Char]) = xs match {
case List(x, y, z, w) => List(z, w)
}
def doMatch3(xs: Seq[Char]) = xs match {
case Seq(x, y, 'c', w@_*) => x :: y :: Nil
case Seq(x, y, z@_*) => z
}
def doMatch4(xs: Seq[Char]) = xs match {
case Seq(x, 'b') => x :: 'b' :: Nil
case Seq(x, y, z@_*) => z.toList
}
def run(): Unit = {
assertEquals(List('a', 'b'), doMatch1(List('a', 'b', 'c', 'd')))
assertEquals(List('c', 'd'), doMatch2(List('a', 'b', 'c', 'd')))
assertEquals(List('a', 'b'), doMatch3(List('a', 'b', 'c', 'd')))
assertEquals(List('c', 'd'), doMatch4(List('a', 'b', 'c', 'd')))
}
}
// backquoted identifiers in pattern
object TestSequence08 {
def run(): Unit = {
val xs = List(2, 3)
val ys = List(1, 2, 3) match {
case x :: `xs` => xs
case _ => Nil
}
assertEquals(xs, ys)
}
}
// unapply for LazyLists
object TestLazyList {
def sum(lazyList: LazyList[Int]): Int =
lazyList match {
case ll if ll.isEmpty => 0
case LazyList.cons(hd, tl) => hd + sum(tl)
}
val str: LazyList[Int] = List(1, 2, 3).to(LazyList)
def run(): Unit = { assertEquals(sum(str), 6) }
}
// bug#1163 order of temps must be preserved
object Test1163_Order {
abstract class Function
case class Var(n: String) extends Function
case class Const(v: Double) extends Function
def f(): (Function, Function) = {
(Var("x"): Function, Var("y"): Function) match {
case (Const(v), Const(w)) => throw new Error
case (leftOne, Var("z")) => throw new Error
case (leftTwo, rightTwo) => (leftTwo, rightTwo) // was giving "y","x"
}
}
def flips(l: List[Int]): Int = (l: @unchecked) match {
case 1 :: ls => 0
case n :: ls => flips((l take n reverse) ::: (l drop n)) + 1
}
def run(): Unit = { assertEquals("both", (Var("x"), Var("y")), f()) }
}
object TestUnbox {
def run(): Unit = {
val xyz: (Int, String, Boolean) = (1, "abc", true)
xyz._1 match {
case 1 => "OK"
case 2 => assert(false); "KO"
case 3 => assert(false); "KO"
}
}
}
object Test903 {
class Person(_name: String, _father: Person) {
def name = _name
def father = _father
}
object PersonFather {
def unapply(p: Person): Option[Person] =
if (p.father == null)
None
else
Some(p.father)
}
def run(): Unit = {
val p1 = new Person("p1", null)
val p2 = new Person("p2", p1)
assertEquals((p2.name, p1.name), p2 match {
case aPerson@PersonFather(f) => (aPerson.name, f.name)
case _ => "No father"
})
}
}
object Bug881 {
object Foo1 {
class Bar1(val x: String)
def p(b: Bar1) = b.x
def unapply(s: String): Option[Bar1] =
Some(new Bar1(s))
}
class Foo(j: Int) {
case class Bar(i: Int)
}
def run(): Unit = {
"baz" match {
case Foo1(x) =>
Foo1.p(x)
}
}
}
// these are exhaustive matches
// should not generate any warnings
def f[A](z: (Option[A], Option[A])) = z match {
case (None, Some(x)) => 1
case (Some(x), None) => 2
case (Some(x), Some(y)) => 3
case _ => 4
}
def g1[A](z: Option[List[A]]) = z match {
case Some(Nil) => true
case Some(x :: Nil) => true
case _ => true
}
def g2[A](z: Option[List[A]]) = z match {
case Some(x :: Nil) => true
case Some(_) => false
case _ => true
}
def h[A](x: (Option[A], Option[A])) = x match {
case (None, _: Some[_]) => 1
case (_: Some[_], None) => 2
case (_: Some[_], _: Some[_]) => 3
case _ => 4
}
def j = (List[Int](), List[Int](1)) match {
case (Nil, _) => 'a'
case (_, Nil) => 'b'
case (h1 :: t1, h2 :: t2) => 'c'
}
def k(x: AnyRef) = x match {
case null => 1
case _ => 2
}
val FooBar = 42
def lala() = 42 match {
case FooBar => true
}
object Bug1270 { // unapply13
class Sync {
def apply(x: Int): Int = 42
def unapply(scrut: Any): Option[Int] = None
}
class Buffer {
object Get extends Sync
var ps: PartialFunction[Any, Any] = {
case Get(y) if y > 4 => // y gets a wildcard type for some reason?! hack
}
}
def run(): Unit = {
assert(!(new Buffer).ps.isDefinedAt(42))
}
}
object Bug1281 {
class Sync {
def unapplySeq(scrut: Int): Option[Seq[Int]] = {
if (scrut == 42) Some(List(1, 2))
else None
}
}
class Buffer {
val Get = new Sync
val jp: PartialFunction[Any, Any] = {
case Get(xs) => // the argDummy <unapply-selector> should have proper arg.tpe (Int in this case)
}
}
def run(): Unit = {
assert(!(new Buffer).jp.isDefinedAt(40))
assert(!(new Buffer).jp.isDefinedAt(42))
}
}
object ClassDefInGuard {
val z: PartialFunction[Any, Any] = {
case x :: xs if xs.forall { y => y.hashCode() > 0 } => 1
}
def run(): Unit = {
val s: PartialFunction[Any, Any] = {
case List(4 :: xs) => 1
case List(5 :: xs) => 1
case _ if false =>
case List(3 :: xs) if List(3: Any).forall { g => g.hashCode() > 0 } => 1
}
z.isDefinedAt(42)
s.isDefinedAt(42)
// just load the thing, to see if the classes are found
(None: Option[Boolean] @unchecked) match {
case x if x.map(x => x).isEmpty =>
}
}
}
// bug#457
object Bug457 {
def method1() = {
val x = "Hello, world"; val y = 100;
y match {
case _: Int if (x match { case t => t.trim().length() > 0 }) => false;
case _ => true;
}
}
def method2(): scala.Boolean = {
val x: String = "Hello, world"; val y: scala.Int = 100; {
var temp1: scala.Int = y
var result: scala.Boolean = false
if ({
var result1: scala.Boolean = true;
if (y == 100)
result1
else
throw new MatchError("crazybox.scala, line 11")
} && (y > 90))
result
else
throw new MatchError("crazybox.scala, line 9")
}
}
def run(): Unit = {
method1();
method2();
}
}
// bug#508
object Bug508 {
case class Operator(x: Int);
val EQ = new Operator(2);
def analyze(x: Tuple2[Operator, Int]) = (x: @unchecked) match {
case (EQ, 0) => "0"
case (EQ, 1) => "1"
case (EQ, 2) => "2"
}
def run(): Unit = {
val x = (EQ, 0);
assertEquals("0", analyze(x)); // should print "0"
val y = (EQ, 1);
assertEquals("1", analyze(y)); // should print "1"
val z = (EQ, 2);
assertEquals("2", analyze(z)); // should print "2"
}
}
// bug#789
object Bug789 { // don't do this at home
trait Impl
trait SizeImpl extends Impl { def size = 42 }
trait ColorImpl extends Impl { def color = "red" }
type Both = SizeImpl with ColorImpl
def info(x: Impl) = x match {
case x: Both => "size " + x.size + " color " + x.color // you wish
case x: SizeImpl => "!size " + x.size
case x: ColorImpl => "color " + x.color
case _ => "n.a."
}
def info2(x: Impl) = x match {
case x: SizeImpl with ColorImpl => "size " + x.size + " color " + x.color // you wish
case x: SizeImpl => "!size " + x.size
case x: ColorImpl => "color " + x.color
case _ => "n.a."
}
def run(): Unit = {
// make up some class that has a size
class MyNode extends SizeImpl
assertEquals("!size 42", info(new MyNode))
assertEquals("!size 42", info2(new MyNode))
}
}
// bug#995
object Bug995 {
def foo(v: Any): String = v match {
case s: Seq[_] => "Seq" // see hack in object Seq.unapplySeq
case a: AnyRef if runtime.ScalaRunTime.isArray(a) => "Array"
case _ => v.toString
}
def run(): Unit = { assertEquals("Array", foo(Array(0))) }
}
// bug#1093 (contribution #460)
object Bug1093 {
def run(): Unit = {
assert((Some(3): @unchecked) match {
case Some(1 | 2) => false
case Some(3) => true
})
}
}
// bug#1094 (contribution #461)
object Bug1094 {
def foo(ps: String*) = "Foo"
case class X(p: String, ps: String*)
def bar =
X("a", "b") match {
case X(p, ps@_*) => foo(ps: _*)
}
def run(): Unit = { assertEquals("Foo", bar) }
}
// #2
class Outer_2 {
case class Foo(x: Int, y: Int) {
override def equals(other: Any) = other match {
case Outer_2.this.Foo(`x`, `y`) => true
case _ => false
}
}
}
object Ticket2 {
def run(): Unit = {
val o1 = new Outer_2; val o2 = new Outer_2; val x: Any = o1.Foo(1, 2); val y: Any = o2.Foo(1, 2)
assert(x != y, "equals test returns true (but should not)")
assert(x match {
case o2.Foo(x, y) => false
case o1.Foo(x, y) => true
case _ => false
}, "match enters wrong case")
}
}
// #11
class MyException1 extends Exception
// Commenting out the following line and uncommenting the second line
// will cause the test to succeed.
trait SpecialException extends MyException1
// trait SpecialException
class MyException2 extends MyException1 with SpecialException
object Ticket11 {
def run(): Unit = {
Array[Throwable](new Exception("abc"),
new MyException1,
new MyException2).foreach { e =>
try {
throw e
} catch {
case e: SpecialException => {
assume(e.isInstanceOf[SpecialException])
}
case e => {
assume(e.isInstanceOf[Throwable])
}
}
}
}
}
// #37
object Ticket37 {
def foo(): Unit = {}
val (a, b) = { foo(); (2, 3) }
def run(): Unit = { assertEquals(this.a, 2) }
}
// #44
trait _X {
case class _Foo();
object _Bar {
def unapply(foo: _Foo): Boolean = true;
}
}
object Y extends _X {
val foo = _Foo()
foo match {
case _Bar() =>
case _ => assert(false)
}
}
object Ticket44 {
def run(): Unit = { assert(Y.toString ne null) /*instantiate Y*/ }
}
object Ticket211 {
def run(): Unit = {
(Some(123): Option[Int]) match {
case (x: Option[a]) if false => {};
case (y: Option[b]) => {};
}
}
}
// this test case checks nothing more than whether
// case N for object N is translated to a check scrutinee.equals(N)
// (or the other way round)... for a long time, we got away with
// scrutinee eq N, but those golden days are, apparently, over.
object Ticket346 {
class L(val content: List[Int]) {
def isEmpty = content.isEmpty
def head = content.head
def tail = content.tail
override def equals(that: Any): Boolean = {
val result = that.isInstanceOf[N.type]
println("L(" + content + ").equals(" + that + ") returning " + result)
result
}
}
object N extends L(Nil) {
override def equals(that: Any): Boolean =
(that.isInstanceOf[L] && that.asInstanceOf[L].isEmpty)
}
object C {
def unapply(xs: L): Option[(Int, L)] = {
if (xs.isEmpty) { println("xs is empty"); None }
else
Some((xs.head, new L(xs.tail)))
}
}
def empty(xs: L): Boolean = xs match {
case N => true
case _ => false
}
def singleton(xs: L): Boolean = xs match {
case C(_, N) => true
case _ => false
}
def run(): Unit = {
assert(empty(new L(Nil)))
assert(singleton(new L(List(1))))
}
} // end Ticket346
// scala/bug#4364
object NullMatch {
object XArray {
def unapplySeq[A](x: Array[A]): Option[IndexedSeq[A]] =
if (x eq null) sys.error("Unexpected null!")
else Some(x.toIndexedSeq)
}
object YArray {
def unapply(xs: Array[Int]): Boolean =
if (xs eq null) sys.error("Unexpected null!")
else true
}
object Animal {
def unapply(x: AnyRef): Option[AnyRef] =
if (x.toString == "Animal") Some(x)
else None
}
def nullMatch[A](xs: Array[A]): Boolean = xs match {
case Array(xs @_*) => false
case _ => true
}
def nullMatch2[A](xs: Array[A]): Boolean = xs match {
case XArray(xs @_*) => false
case _ => true
}
def nullMatch3[A](xs: Array[A]): Boolean = xs match {
case XArray(xs @_*) if 1 == 1 => false
case _ => true
}
def nullMatch4(xs: Array[Int]): Boolean = xs match {
case YArray() => false
case _ => true
}
def nullMatch5(x: AnyRef): Boolean = x match {
case Animal(x) => false
case _ => true
}
def t8787nullMatch() = {
val r = """\\d+""".r
val s: String = null
val x = s match { case r() => 1 ; case _ => 2 }
2 == x
}
def t8787nullMatcher() = {
val r = """(\\d+):(\\d+)""".r
val s = "1:2 3:4 5:6"
val z = ((r findAllMatchIn s).toList :+ null) flatMap {
case r(x, y) => Some((x.toInt, y.toInt))
case _ => None
}
List((1,2),(3,4),(5,6)) == z
}
def run(): Unit = {
assert(nullMatch(null))
assert(nullMatch2(null))
assert(nullMatch3(null))
assert(nullMatch4(null))
assert(nullMatch5(null))
assert(t8787nullMatch())
assert(t8787nullMatcher())
}
}
}
| scala/scala | test/files/run/patmatnew.scala | Scala | apache-2.0 | 20,548 |
package com.softwaremill.codebrag.dao
import org.scalatest.Tag
object RequiresDb extends Tag("requiresDb")
| softwaremill/codebrag | codebrag-dao/src/test/scala/com/softwaremill/codebrag/dao/RequiresDb.scala | Scala | agpl-3.0 | 109 |
package json.source
import java.io.File
import java.net.URI
import argonaut.Argonaut._
import argonaut.Json
import scala.collection.mutable
import scala.util.control.NonFatal
import scalaz._
trait JsonSource[A] {
def uri(addr: A): URI
def json(addr: A): String \\/ Json
}
object JsonSource {
implicit val json: JsonSource[Json] = new JsonSource[Json] {
override def uri(t: Json): URI = new URI("#")
override def json(t: Json): String \\/ Json = \\/-(t)
}
implicit val string: JsonSource[String] = new JsonSource[String] {
override def uri(t: String): URI = new URI("#")
override def json(t: String): String \\/ Json = t.parse
}
implicit val file: JsonSource[File] = new JsonSource[File] {
override def uri(t: File): URI = t.toURI
override def json(t: File): String \\/ Json = try {
scala.io.Source.fromFile(t).mkString.parse
} catch {
case NonFatal(e) => -\\/(e.getMessage)
}
}
implicit val uri: JsonSource[URI] = new JsonSource[URI] {
override def uri(t: URI): URI = t
override def json(t: URI): String \\/ Json = try {
import scala.io.Source
val html = if (t.isAbsolute) Source.fromURL(t.toURL) else Source.fromURI(t)
val s = html.mkString
s.parse
} catch {
case NonFatal(e) => -\\/(e.getMessage)
}
}
def apply[T: JsonSource]: JsonSource[T] = implicitly[JsonSource[T]]
def withCaching[T](implicit wrapped: JsonSource[T]): JsonSource[T] = new JsonSource[T] {
val cache = mutable.Map.empty[URI, String \\/ Json]
override def uri(t: T): URI = wrapped.uri(t)
override def json(t: T): \\/[String, Json] = {
// remove fragment, as whole document for that uri is cached
val key = uri(t).resolve("#")
cache.getOrElseUpdate(key, wrapped.json(t))
}
}
}
| abhimehta/json-schema-parser | src/main/scala/json/source/JsonSource.scala | Scala | apache-2.0 | 1,806 |
/*
* Copyright 2001-2014 Stephen Colebourne
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.joda.time.chrono
import java.util.concurrent.ConcurrentHashMap
import org.joda.time.Chronology
import org.joda.time.DateTimeConstants
import org.joda.time.DateTimeZone
/**
* Implements a pure proleptic Gregorian calendar system, which defines every
* fourth year as leap, unless the year is divisible by 100 and not by 400.
* This improves upon the Julian calendar leap year rule.
* <p>
* Although the Gregorian calendar did not exist before 1582 CE, this
* chronology assumes it did, thus it is proleptic. This implementation also
* fixes the start of the year at January 1, and defines the year zero.
* <p>
* GregorianChronology is thread-safe and immutable.
*
* @see <a href="http://en.wikipedia.org/wiki/Gregorian_calendar">Wikipedia</a>
* @see JulianChronology
* @see GJChronology
*
* @author Guy Allard
* @author Stephen Colebourne
* @author Brian S O'Neill
* @since 1.0
*/
@SerialVersionUID(-861407383323710522L)
object GregorianChronology {
private val MILLIS_PER_YEAR: Long = (365.2425 * DateTimeConstants.MILLIS_PER_DAY).toLong
private val MILLIS_PER_MONTH: Long = (365.2425 * DateTimeConstants.MILLIS_PER_DAY / 12).toLong
private val DAYS_0000_TO_1970: Int = 719527
/** The lowest year that can be fully supported. */
private val MIN_YEAR: Int = -292275054
/** The highest year that can be fully supported. */
private val MAX_YEAR: Int = 292278993
/** Singleton instance of a UTC GregorianChronology */
private val INSTANCE_UTC: GregorianChronology = null
/** Cache of zone to chronology arrays */
private val cCache: ConcurrentHashMap[DateTimeZone, Array[GregorianChronology]] = new ConcurrentHashMap[DateTimeZone, Array[GregorianChronology]]
/**
* Gets an instance of the GregorianChronology.
* The time zone of the returned instance is UTC.
*
* @return a singleton UTC instance of the chronology
*/
def getInstanceUTC: GregorianChronology = {
return INSTANCE_UTC
}
/**
* Gets an instance of the GregorianChronology in the default time zone.
*
* @return a chronology in the default time zone
*/
def getInstance: GregorianChronology = {
return getInstance(DateTimeZone.getDefault, 4)
}
/**
* Gets an instance of the GregorianChronology in the given time zone.
*
* @param zone the time zone to get the chronology in, null is default
* @return a chronology in the specified time zone
*/
def getInstance(zone: DateTimeZone): GregorianChronology = {
return getInstance(zone, 4)
}
/**
* Gets an instance of the GregorianChronology in the given time zone.
*
* @param zone the time zone to get the chronology in, null is default
* @param minDaysInFirstWeek minimum number of days in first week of the year; default is 4
* @return a chronology in the specified time zone
*/
def getInstance(zone: DateTimeZone, minDaysInFirstWeek: Int): GregorianChronology = {
if (zone == null) {
zone = DateTimeZone.getDefault
}
var chrono: GregorianChronology = null
var chronos: Array[GregorianChronology] = cCache.get(zone)
if (chronos == null) {
chronos = new Array[GregorianChronology](7)
val oldChronos: Array[GregorianChronology] = cCache.putIfAbsent(zone, chronos)
if (oldChronos != null) {
chronos = oldChronos
}
}
try {
chrono = chronos(minDaysInFirstWeek - 1)
}
catch {
case e: ArrayIndexOutOfBoundsException => {
throw new IllegalArgumentException("Invalid min days in first week: " + minDaysInFirstWeek)
}
}
if (chrono == null) {
chronos synchronized {
chrono = chronos(minDaysInFirstWeek - 1)
if (chrono == null) {
if (zone eq DateTimeZone.UTC) {
chrono = new GregorianChronology(null, null, minDaysInFirstWeek)
}
else {
chrono = getInstance(DateTimeZone.UTC, minDaysInFirstWeek)
chrono = new GregorianChronology(ZonedChronology.getInstance(chrono, zone), null, minDaysInFirstWeek)
}
chronos(minDaysInFirstWeek - 1) = chrono
}
}
}
return chrono
}
try {
INSTANCE_UTC = getInstance(DateTimeZone.UTC)
}
}
@SerialVersionUID(-861407383323710522L)
final class GregorianChronology extends BasicGJChronology {
/**
* Restricted constructor
*/
private def this(base: Chronology, param: AnyRef, minDaysInFirstWeek: Int) {
this()
`super`(base, param, minDaysInFirstWeek)
}
/**
* Serialization singleton
*/
private def readResolve: AnyRef = {
val base: Chronology = getBase
var minDays: Int = getMinimumDaysInFirstWeek
minDays = (if (minDays == 0) 4 else minDays)
return if (base == null) GregorianChronology.getInstance(DateTimeZone.UTC, minDays) else GregorianChronology.getInstance(base.getZone, minDays)
}
/**
* Gets the Chronology in the UTC time zone.
*
* @return the chronology in UTC
*/
def withUTC: Chronology = {
return GregorianChronology.INSTANCE_UTC
}
/**
* Gets the Chronology in a specific time zone.
*
* @param zone the zone to get the chronology in, null is default
* @return the chronology
*/
def withZone(zone: DateTimeZone): Chronology = {
if (zone == null) {
zone = DateTimeZone.getDefault
}
if (zone eq getZone) {
return this
}
return GregorianChronology.getInstance(zone)
}
protected override def assemble(fields: AssembledChronology.Fields) {
if (getBase == null) {
super.assemble(fields)
}
}
private[chrono] def isLeapYear(year: Int): Boolean = {
return ((year & 3) == 0) && ((year % 100) != 0 || (year % 400) == 0)
}
private[chrono] def calculateFirstDayOfYearMillis(year: Int): Long = {
var leapYears: Int = year / 100
if (year < 0) {
leapYears = ((year + 3) >> 2) - leapYears + ((leapYears + 3) >> 2) - 1
}
else {
leapYears = (year >> 2) - leapYears + (leapYears >> 2)
if (isLeapYear(year)) {
leapYears -= 1
}
}
return (year * 365L + (leapYears - GregorianChronology.DAYS_0000_TO_1970)) * DateTimeConstants.MILLIS_PER_DAY
}
private[chrono] def getMinYear: Int = {
return GregorianChronology.MIN_YEAR
}
private[chrono] def getMaxYear: Int = {
return GregorianChronology.MAX_YEAR
}
private[chrono] def getAverageMillisPerYear: Long = {
return GregorianChronology.MILLIS_PER_YEAR
}
private[chrono] def getAverageMillisPerYearDividedByTwo: Long = {
return GregorianChronology.MILLIS_PER_YEAR / 2
}
private[chrono] def getAverageMillisPerMonth: Long = {
return GregorianChronology.MILLIS_PER_MONTH
}
private[chrono] def getApproxMillisAtEpochDividedByTwo: Long = {
return (1970L * GregorianChronology.MILLIS_PER_YEAR) / 2
}
} | aparo/scalajs-joda | src/main/scala/org/joda/time/chrono/GregorianChronology.scala | Scala | apache-2.0 | 7,430 |
package com.github.truerss.base
import com.typesafe.config.Config
trait ConfigProvider {
val config: Config
}
| truerss/plugins | base/src/main/scala/com/github/truerss/base/ConfigProvider.scala | Scala | mit | 114 |
/*
* SourcesTest.scala
* Sources example tests.
*
* Created By: Avi Pfeffer (apfeffer@cra.com)
* Creation Date: Jan 1, 2009
*
* Copyright 2013 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email figaro@cra.com for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.test.example
import org.scalatest.Matchers
import org.scalatest.WordSpec
import com.cra.figaro.algorithm._
import com.cra.figaro.algorithm.factored._
import com.cra.figaro.algorithm.sampling._
import com.cra.figaro.language._
import com.cra.figaro.library.compound._
import com.cra.figaro.library.atomic.continuous._
import com.cra.figaro.util._
import com.cra.figaro.test._
class SourcesTest extends WordSpec with Matchers {
"The sources example" should {
"produce the correct answer under variable elimination with dependent universe reasoning" taggedAs (ExampleTest) in {
def peAlg(universe: Universe, evidence: List[NamedEvidence[_]]) = () => ProbEvidenceSampler.computeProbEvidence(1000000, evidence)(universe)
test((dependentUniverses: List[(Universe, List[NamedEvidence[_]])], element: Element[Source]) =>
VariableElimination(dependentUniverses, peAlg _, element))
}
}
class Source(val name: String) {
override val toString = name
}
abstract class Sample(val name: String) {
val fromSource: Element[Source]
override val toString = name
}
class Pair(val source: Source, val sample: Sample) {
val universe = new Universe(List(sample.fromSource))
val isTheRightSource = Apply(sample.fromSource, (s: Source) => s == source)("", universe)
val rightSourceDistance = Normal(0.0, 1.0)("", universe)
val wrongSourceDistance = Uniform(0.0, 10.0)("", universe)
val distance = If(isTheRightSource, rightSourceDistance, wrongSourceDistance)("distance", universe)
}
def test(algorithmCreator: (List[(Universe, List[NamedEvidence[_]])], Element[Source]) => ProbQueryAlgorithm) {
Universe.createNew()
val source1 = new Source("Source 1")
val source2 = new Source("Source 2")
val source3 = new Source("Source 3")
val sample1 = new Sample("Sample 1") { val fromSource = Select(0.5 -> source1, 0.5 -> source2) }
val sample2 = new Sample("Sample 2") { val fromSource = Select(0.9 -> source1, 0.1 -> source3) }
val pair1 = new Pair(source1, sample1)
val pair2 = new Pair(source2, sample1)
val pair3 = new Pair(source1, sample2)
val pair4 = new Pair(source3, sample2)
val values = Values()
val samples = List(sample1, sample2)
for {
(firstSample, secondSample) <- upperTriangle(samples)
sources1 = values(firstSample.fromSource)
sources2 = values(secondSample.fromSource)
if sources1.intersect(sources2).nonEmpty
} {
^^(firstSample.fromSource, secondSample.fromSource).addCondition((p: (Source, Source)) => p._1 != p._2)
}
val condition1 = (d: Double) => d > 0.5 && d < 0.6
val condition2 = (d: Double) => d > 1.5 && d < 1.6
val condition3 = (d: Double) => d > 2.5 && d < 2.6
val condition4 = (d: Double) => d > 0.5 && d < 0.6
val evidence1 = List(NamedEvidence("distance", Condition(condition1)))
val evidence2 = List(NamedEvidence("distance", Condition(condition2)))
val evidence3 = List(NamedEvidence("distance", Condition(condition3)))
val evidence4 = List(NamedEvidence("distance", Condition(condition4)))
val dependent1 = (pair1.universe, evidence1)
val dependent2 = (pair2.universe, evidence2)
val dependent3 = (pair3.universe, evidence3)
val dependent4 = (pair4.universe, evidence4)
// Uniform probability of each range = 0.1 / 10.0 = 0.01
// Normal cdf:
// 0.5: 0.6915
// 0.6: 0.7257
// 1.5: 0.9332
// 1.6: 0.9452
// 2.5: 0.9938
// 2.6: 0.9953
// Normal probability of (0.5, 0.6) = 0.7257 - 0.6915 = 0.0342
// Normal probability of (1.5, 1.6) = 0.9452 - 0.9332 = 0.012
// Normal probability of (2.5, 2.6) = 0.9953 - 0.9938 = 0.0015
// Code: pxy = Probability sample 1 is from source x and sample 2 is from source y
val p13 = 0.5 * 0.1 * 0.0342 * 0.0342
val p21 = 0.5 * 0.9 * 0.012 * 0.0015
val p23 = 0.5 * 0.1 * 0.012 * 0.0342
val answer = p13 / (p13 + p21 + p23)
val alg = algorithmCreator(List(dependent1, dependent2, dependent3, dependent4), sample1.fromSource)
alg.start()
alg.probability(sample1.fromSource, source1) should be(answer +- 0.01)
alg.kill
}
}
| wkretschmer/figaro | Figaro/src/test/scala/com/cra/figaro/test/example/SourcesTest.scala | Scala | bsd-3-clause | 4,556 |
/******************************
*
* DeepDive.run cannot be called more than once in integration
* tests, so we run tests one by one to cover them.
*
******************************/
package org.deepdive.test.integration
import anorm._
import com.typesafe.config._
import org.deepdive.test._
import org.deepdive.Context
import org.deepdive._
import org.deepdive.settings._
import org.deepdive.datastore._
import org.scalatest._
import org.deepdive.Logging
import org.deepdive.helpers.Helpers
import org.deepdive.test.helpers.TestHelper
import java.io._
import scala.sys.process._
import scalikejdbc.ConnectionPool
/** Text chunking with linear chain CRF. Test whether we get a reasonable F1 score.
*
* Please refer to examples/chunking for more details.
*/
class ChunkingApp extends FunSpec with Logging{
val config = ConfigFactory.parseString(getConfig).withFallback(ConfigFactory.load).resolve()
val env = TestHelper.getTestEnv()
val ds = env match {
case TestHelper.Psql => PostgresDataStore
case TestHelper.Mysql => MysqlDataStore
}
/** prepare data */
def prepareData() {
Helpers.executeCmd("rm -f out/test_chunking/tmp/*")
JdbcDataStore.init(config)
env match {
case TestHelper.Psql =>
ds.withConnection { implicit conn =>
ds.executeSqlQueries("drop schema if exists public cascade; create schema public;")
ds.executeSqlQueries("""create table words_raw(
word_id bigserial,
word text,
pos text,
tag text,
id bigint);""")
ds.executeSqlQueries("""create table words(
sent_id bigint,
word_id bigint,
word text,
pos text,
true_tag text,
tag int,
id bigint);""")
ds.executeSqlQueries("""create table word_features(
word_id bigint,
feature text,
id bigint);""")
ds.executeSqlQueries(s"""copy words_raw(word, pos, tag) from '${getClass.getResource("/chunking/data/train_null_terminated.txt").getFile}'
delimiter ' ';""")
ds.executeSqlQueries(s"""copy words_raw(word, pos, tag) from '${getClass.getResource("/chunking/data/test_null_terminated.txt").getFile}'
delimiter ' ';""")
}
case TestHelper.Mysql =>
ds.withConnection { implicit conn =>
ds.executeSqlQueries("""create table words_raw(
word_id bigint primary key auto_increment,
word text,
pos text,
tag text,
id bigint);""")
ds.executeSqlQueries("""create table words(
sent_id bigint,
word_id bigint,
word text,
pos text,
true_tag text,
tag int,
id bigint);""")
ds.executeSqlQueries("""create table word_features(
word_id bigint,
feature text,
id bigint);""")
ds.executeSqlQueries(s"""LOAD DATA INFILE '${getClass.getResource("/chunking/data/train_null_terminated.txt").getFile}'
INTO TABLE words_raw FIELDS TERMINATED BY ' ' (word, pos, tag);""")
ds.executeSqlQueries(s"""LOAD DATA INFILE '${getClass.getResource("/chunking/data/test_null_terminated.txt").getFile}'
INTO TABLE words_raw FIELDS TERMINATED BY ' ' (word, pos, tag);""")
}
case _ =>
}
JdbcDataStore.close()
}
def query1 = s"""${"\\"\\"\\""}
select w1.word_id as "w1.word_id", w1.word as "w1.word", w1.pos as "w1.pos",
w2.word as "w2.word", w2.pos as "w2.pos"
from words w1, words w2
where w1.word_id = w2.word_id + 1 and w1.word is not null ${"\\"\\"\\""}"""
def query2 = s"""${"\\"\\"\\""}
select words.id as "words.id", words.tag as "words.tag", word_features.feature as "feature"
from words, word_features
where words.word_id = word_features.word_id and words.word is not null ${"\\"\\"\\""}"""
def query3 = s"""${"\\"\\"\\""}
select w1.id as "words.w1.id", w2.id as "words.w2.id", w1.tag as "words.w1.tag", w2.tag as "words.w2.tag"
from words w1, words w2
where w2.word_id = w1.word_id + 1 ${"\\"\\"\\""}"""
/** application.conf configuration */
def getConfig = s"""
deepdive {
db.default {
driver: ${TestHelper.getDriverFromEnv()}
url: "${System.getenv("DBCONNSTRING")}"
user: "${System.getenv("DBUSER")}"
password: "${System.getenv("DBPASSWORD")}"
dbname: "${System.getenv("DBNAME")}"
host: "${System.getenv("DBHOST")}"
port: "${System.getenv("DBPORT")}"
}
schema.variables {
words.tag: Categorical(13)
}
extraction.extractors {
# extract training data
ext_training {
style: "tsv_extractor"
input: "select * from words_raw"
output_relation: "words"
udf: "${getClass.getResource("/chunking/udf/ext_training.py").getFile}"
}
# create index
ext_index {
dependencies: ["ext_training"]
style: "sql_extractor"
sql: "create index words_word_id_idx on words(word_id);"
}
# add features
ext_features.style: "tsv_extractor"
ext_features.input: ${query1}
ext_features.output_relation: "word_features"
ext_features.udf: "${getClass.getResource("/chunking/udf/ext_features.py").getFile}"
ext_features.dependencies: ["ext_index"]
}
inference.factors {
factor_feature {
input_query: ${query2}
function: "Multinomial(words.tag)"
weight: "?(feature)"
}
factor_linear_chain_crf {
input_query: ${query3}
function: "Multinomial(words.w1.tag, words.w2.tag)"
weight: "?"
}
}
calibration: {
holdout_query: "INSERT INTO dd_graph_variables_holdout(variable_id) SELECT id FROM words WHERE word_id > 50078"
}
inference.parallel_grounding: ${System.getenv("PARALLEL_GROUNDING") match {
case "true" | "1" | "True" | "TRUE" => "true"
case _ => "false"
}}
}
"""
/** Process DeepDive's results */
def processResults() : Double = {
JdbcDataStore.init(config)
val resultFile = File.createTempFile("result", "")
resultFile.setWritable(true, false)
ds.withConnection { implicit conn =>
ds.executeSqlQueries("""drop table if exists result cascade;""")
ds.executeSqlQueries("""create table result
(word_id bigint, word text, pos text, true_tag text, tag text);""")
ds.executeSqlQueries("""insert into result
select b.word_id, b.word, b.pos, b.true_tag, b.category
from (select word_id, max(expectation) as m
from words_tag_inference group by word_id
) as a inner join words_tag_inference as b
on a.word_id = b.word_id and a.m = b.expectation;""")
// Use DataLoader to unload result data into file
val dbSettings = Settings.loadFromConfig(config).dbSettings
val du = new DataLoader
du.unload(resultFile.getName, resultFile.getAbsolutePath,
dbSettings, //
false, // usingGreenPlum
"""select word, pos, true_tag, max(tag) from result
group by word_id, word, pos, true_tag order by word_id""")
}
JdbcDataStore.close()
val converter = s"""${getClass.getResource("/chunking/convert.py").getFile}"""
val evaluator = s"""${getClass.getResource("/chunking/conlleval.pl").getFile}"""
// TODO: this sometimes stuck forever. It's a bug not reproducible.
val cmd = s"python ${converter} ${resultFile.getAbsolutePath()}" #| s"perl ${evaluator}"
log.debug(s"Executing evaluation command: ${cmd}")
val f1 = cmd.!!
resultFile.delete()
f1.toDouble
}
describe("Chunking with linear chain CRF") {
it("should get F1 score > 0.8") {
// Assume GP is not running on the system, or skip this test
assume("which gpfdist".! != 0)
prepareData()
DeepDive.run(config, "out/test_chunking")
// Make sure the data is in the database
val f1 = processResults()
assert(f1 > 80)
}
}
}
| gaapt/deepdive | src/test/scala/integration/Chunking.scala | Scala | apache-2.0 | 8,220 |
package sp.areus
import akka.actor._
import sp.system.messages._
import sp.domain._
import sp.domain.Logic._
import akka.pattern.ask
import scala.concurrent.duration._
import akka.util.Timeout
/**
* Created by Kristofer on 2014-06-27.
*/
class DelmiaV5Service(modelHandler: ActorRef) extends Actor {
implicit val timeout = Timeout(1 seconds)
import context.dispatcher
def receive = {
case None => "TODO"
// case Request(_, attr) => {
// val reply = sender
// extract(attr) match {
// case Some((xmlString, name)) => {
//
// val areus = scala.xml.XML.loadString(xmlString)
//
// val items: Seq[IDAble] = areus \\\\ "Resource" flatMap { resource =>
//
// val als = resource \\\\ "ActivityList"
//
// val opsXML = als filter {a =>
// val acts = a \\ "Activity"
// acts.nonEmpty && acts.head.attribute("ActivityType") == Some(xml.Text("DNBRobotMotionActivity"))
// }
// //println(opsXML.size)
//
// import sp.domain._
//
// val ops = opsXML flatMap {n =>
// val name = n.attribute("Task").map(_.toString).getOrElse("noName")
// val nOP = Operation(name)
// val opChildren = n.child.collect {
// case c if c.attribute("Operation") != None => {
// val cName = c.attribute("Operation").map(_.text).getOrElse("noName")
// val t = toAttr(c)
// Operation(cName, List(), SPAttributes(t.asMap.get))
// }
// }.toList
// val chIDs = ListPrimitive(opChildren.map(SPAttributeValue.apply))
// val upd = nOP.copy(attributes = nOP.attributes + ("children" -> chIDs))
// upd :: opChildren
// }
//
// //println(ops.head)
//
//
// val sopsXML = als filter {a =>
// val acts = a \\ "Activity"
// acts.nonEmpty && acts.head.attribute("ActivityType") == Some(xml.Text("DNBIgpCallRobotTask"))
// }
//
// val sopspecs = (sopsXML map { n =>
// val name = n.attribute("Task").map(_.toString).getOrElse("noName")
// val opChildren = n.child.collect {
// case c if {
// val cn = (c \\ "CallName")
// cn.nonEmpty && ops.exists(_.name == cn.text)
// } => {
// val cName = c \\ "CallName" text;
// ops.find(_.name == cName).get
//
// }
// }.toList
// val sop = Sequence(opChildren.map(SOP.apply):_*)
// SOPSpec(name, List(sop))
// }).toList
//
//
//
// val items: List[IDAble] = ops.toList ++ sopspecs
// val resourceChildren = ListPrimitive(items.filter(x => x.isInstanceOf[Operation] && x.attributes.attrs.contains("children")).map(x => IDPrimitive(x.id)))
//
// val resXML = resource \\ "GeneralInfo"
//
// val res = resXML.map { r =>
// val name = (r \\ "ResourceName") text
// val attr = toAttr(r) match {
// case m: MapPrimitive => m.asSPAttributes + ("kind" -> "resource")
// case x @ _ => Attr("value" -> x)
// }
// Thing(name, attr + ("children" -> resourceChildren))
// }
//
// val childrenIds = sopspecs.map(x => IDPrimitive(x.id)) ++ List(IDPrimitive(res.head.id))
//
// val resSpec = SPSpec(res.head.name, Attr("children" -> ListPrimitive(childrenIds)))
// resSpec :: items ++ res
//
// }
//
//
//
// println(name)
//
// val id = ID.newID
// val n = name.flatMap(_.asString).getOrElse("noName")
// for {
// model <- (modelHandler ? CreateModel(id, n, Attr("attributeTags"-> MapPrimitive(Map()), "conditionGroups"-> ListPrimitive(List())))).mapTo[ModelInfo]
// items <- modelHandler ? UpdateIDs(id, model.version, items.toList)
// } yield {
// println(s"MADE IT: $model")
// reply ! model.model.toString
//
//
// }
//
//
//
// }
// case None => sender ! errorMessage(attr)
// }
// }
}
def extract(attr: SPAttributes) = {
for {
xml <- attr.getAs[String]("file")
} yield (xml, attr.get("name"))
}
def errorMessage(attr: SPAttributes) = {
SPError("The request is missing parameters: \\n" +
s"file: ${attr.getAs[String]("file")}" + "\\n" +
s"Request: ${attr}" )
}
// import scala.xml._
//
// def toAttr(n : Node): SPValue = {
// val attr = n.attributes.asAttrMap.map{case (k, v) => k -> StringPrimitive(v)}
// val value: SPAttributeValue = {
// if (n.child.count(_.isInstanceOf[Text]) == 1) {
// val value = StringPrimitive(n.text)
// if (attr.isEmpty) value
// else MapPrimitive(attr + ("value"->value))
// }
// else {
// val children = n.child //.filter(n => n.isInstanceOf[Text] || n.isInstanceOf[Elem])
// val fold = children.foldLeft(Map[String,List[SPAttributeValue]]()){
// case (aggr, e: Elem) => {
// val newAttr = toAttr(e)
// val prev = aggr.getOrElse(e.label, List())
// val xs = if (newAttr != MapPrimitive(Map())) newAttr :: prev else prev
// aggr + (e.label -> xs)
// }
// case (aggr, t: Text) => aggr
// }
//
// val map = fold collect {
// case (k, x :: Nil) => k -> x
// case (k, x :: xs) => k -> ListPrimitive(x :: xs)
// }
// MapPrimitive(map ++ attr)
// }
//
// }
// value
// }
}
object DelmiaV5Service{
def props(modelHandler: ActorRef) = Props(classOf[DelmiaV5Service], modelHandler)
}
| kristoferB/SP | sp1/src/main/scala/sp/areus/DelmiaV5Service.scala | Scala | mit | 5,815 |
package querious
import TestUtils._
import fastparse.core.Parsed
import fastparse.core.Parsed.{Failure, Success}
import hedgehog._
import hedgehog.runner._
import eu.timepit.refined.auto._
/**
* @author Kevin Lee
* @since 2017-07-22
*/
object NumberParserSpec extends Properties {
override def tests: List[Test] = List(
property(
s"test digits between 0 and ${Int.MaxValue}",
testParseDigitsBetweenZeroAndMax
),
property(
s"test digits between 0 and ${Int.MaxValue} (captured)",
testParseDigitsBetweenZeroAndMaxCaptured
),
property(
s"test numbers between ${Long.MinValue} and ${Long.MaxValue}",
testParseNumbersBetweenMinAndMax
),
property(
"Parsers.Digit(digit char) should return true",
testParsersDigitWithDigitCharShouldReturnTrue
),
property(
"Parsers.Digit(non digit char) should return false",
testParsersDigitWithNonDigitCharShouldReturnFalse
),
property(
"Parsers.digits.parse(digit String) should return Success((), the length of the String)",
testParsersDigitsParseWithDigitStringShouldReturnSuccessWithLengthOfString
),
property(
"Parsers.digits.parserOfString.parse(digit String) should return Success(digit String, the length of the String)",
testParsersDigitsParserOfStringParseWithDigitStringShouldReturnSuccessWithLengthOfString
),
example(
"""Parsers.digits.parse("") should return Failure(_, 0, _)""",
testParsersDigitsParseWithAnEmptyStringShouldReturnFailure
),
property(
"Parsers.digits.parse(non-negative int in String) should return Success((), the length of the String)",
testParsersDigitsParseWithNonNegativeIntStringShouldReturnSuccessWithLengthOfString
),
property(
"Parsers.digits.parserOfString.parse(non-negative int in String) should return Success(int parsed in String, the length of the String)",
testParsersDigitsParserOfStringParseWithNonNegativeIntStringShouldReturnSuccessWithStringAndLengthOfString
)
)
def testParseDigitsBetweenZeroAndMax: Property =
for {
n <- Gen.int(Range.linear(0, Int.MaxValue)).log("n")
} yield {
val input = n.toString
val expected: Parsed[Unit, Char, String] = Success((), input.length)
val actual: Parsed[Unit, Char, String] = Parsers.digits.parse(input)
actual ==== expected
}
def testParseDigitsBetweenZeroAndMaxCaptured: Property =
for {
n <- Gen.int(Range.linear(0, Int.MaxValue)).log("n")
} yield {
val input = n.toString
val expected: Parsed[String, Char, String] = Success(input, input.length)
val actual: Parsed[String, Char, String] =
Parsers.digits.parserOfString.parse(input)
actual ==== expected
}
def testParseNumbersBetweenMinAndMax: Property =
for {
n <- Gen.long(Range.linear(Long.MinValue, Long.MaxValue)).log("n")
} yield {
val input = n.toString
val expected: Parsed[BigDecimal, Char, String] =
Success(BigDecimal(input), input.length)
val actual: Parsed[BigDecimal, Char, String] =
Parsers.numbers.parse(input)
actual ==== expected
}
def testParsersDigitWithDigitCharShouldReturnTrue: Property =
for {
n <- Gen.digit.log("n")
} yield {
val actual = Parsers.Digit(n)
actual ==== true
}
def testParsersDigitWithNonDigitCharShouldReturnFalse: Property =
for {
c <- Gen.alpha.log("c")
} yield {
val actual = Parsers.Digit(c)
actual ==== false
}
def testParsersDigitsParseWithDigitStringShouldReturnSuccessWithLengthOfString: Property = for {
digitString <- Gens.genNonEmptyDigitString(20).log("digitString")
} yield {
val expected: Parsed[Unit, Char, String] = Success((), digitString.value.length)
val actual = Parsers.digits.parse(digitString.value)
actual ==== expected
}
def testParsersDigitsParserOfStringParseWithDigitStringShouldReturnSuccessWithLengthOfString: Property = for {
digitString <- Gens.genNonEmptyDigitString(20).log("digitString")
} yield {
val expected: Parsed[String, Char, String] = Success(digitString.value, digitString.value.length)
val actual = Parsers.digits.parserOfString.parse(digitString.value)
actual ==== expected
}
def testParsersDigitsParseWithAnEmptyStringShouldReturnFailure: Result = {
val actual = Parsers.digits.parse("")
(actual matchPattern { case Failure(_, 0, _) => })
.log(s"Expect Failure(_, 0, _) but got $actual instead.")
}
def testParsersDigitsParseWithNonNegativeIntStringShouldReturnSuccessWithLengthOfString: Property = for {
nonNegativeInt <- Gen.int(Range.linear(0, Int.MaxValue)).log("nonNegativeInt")
} yield {
val input = nonNegativeInt.toString
val expected: Parsed[Unit, Char, String] = Success((), input.length)
val actual = Parsers.digits.parse(input)
actual ==== expected
}
def testParsersDigitsParserOfStringParseWithNonNegativeIntStringShouldReturnSuccessWithStringAndLengthOfString: Property = for {
nonNegativeInt <- Gen.int(Range.linear(0, Int.MaxValue)).log("nonNegativeInt")
} yield {
val input = nonNegativeInt.toString
val expected: Parsed[String, Char, String] = Success(input, input.length)
val actual = Parsers.digits.parserOfString.parse(input)
actual ==== expected
}
}
| Kevin-Lee/sql-parser-scala | src/test/scala/querious/NumberParserSpec.scala | Scala | mit | 5,392 |
package com.chrisomeara.pillar
import org.scalatest.FunSpec
import org.scalatest.matchers.ShouldMatchers
import org.scalatest.mock.MockitoSugar
import org.mockito.Mockito._
import com.datastax.driver.core.Session
import java.util.Date
class ReportingMigrationSpec extends FunSpec with ShouldMatchers with MockitoSugar {
val reporter = mock[Reporter]
val wrapped = mock[Migration]
val migration = new ReportingMigration(reporter, wrapped)
val session = mock[Session]
describe("#executeUpStatement") {
migration.executeUpStatement(session)
it("reports the applying action") {
verify(reporter).applying(wrapped)
}
it("delegates to the wrapped migration") {
verify(wrapped).executeUpStatement(session)
}
}
describe("#executeDownStatement") {
migration.executeDownStatement(session)
it("reports the reversing action") {
verify(reporter).reversing(wrapped)
}
it("delegates to the wrapped migration") {
verify(wrapped).executeDownStatement(session)
}
}
}
| smr-co-uk/pillar | src/test/scala/com/chrisomeara/pillar/ReportingMigrationSpec.scala | Scala | mit | 1,036 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package views.attachments
import models.api.{AttachmentType, IdentityEvidence, TransactorIdentityEvidence, VAT2, VAT51, VAT5L}
import org.jsoup.Jsoup
import org.jsoup.nodes.Document
import views.VatRegViewSpec
import views.html.attachments.EmailCoverSheet
class EmailCoverSheetViewSpec extends VatRegViewSpec {
val testRef = "VRN12345689"
val testAttachments: List[AttachmentType] = List[AttachmentType](VAT2, VAT51, IdentityEvidence, VAT5L)
val testVat2: List[AttachmentType] = List[AttachmentType](VAT2)
lazy val view: EmailCoverSheet = app.injector.instanceOf[EmailCoverSheet]
object ExpectedContent {
val heading = "How to email documents to HMRC"
val title = s"$heading - Register for VAT - GOV.UK"
val para1 = "The subject line of the email must include your Register for VAT reference number. This will enable us to match your online application to your supporting documents."
val panel1 = s"Register for VAT reference number: $testRef"
val heading2 = "What you must attach to the email"
val para2 = "You must send us additional documents in order for us to process this VAT application:"
val heading3 = "Email address"
val vat2Bullet = "a completed VAT2 form (opens in new tab) to capture the details of all the partners"
val vat51Bullet = "a completed VAT 50/51 form (opens in new tab) to provide us with details of the VAT group, including details of each subsidiary"
val vat5LBullet = "a completed VAT5L form (opens in new tab)"
val idEvidence = "three documents to confirm your identity"
def idEvidenceNamed(name: String) = s"three documents to confirm $name’s identity"
val para3 = "Send the supporting documents to:"
val panel2 = "VATREGBETA@hmrc.gov.uk"
val print = "Print this page"
val transactorName = "Transactor Name"
val applicantName = "Applicant Name"
}
object IdentityEvidenceBlock {
val summary = "What identity documents can I provide?"
val content: String = "Include a copy of one piece of evidence that includes a government issued photo. This could be a: " +
"passport " +
"driving licence photocard " +
"national identity card " +
"And " +
"Also include two additional pieces of evidence which can be copies of a: " +
"mortgage statement " +
"lease or rental agreement " +
"work permit or visa " +
"letter from the Department for Work and Pensions for confirming entitlement to benefits " +
"utility bill " +
"birth certificate"
}
"The Email Cover Sheet page" must {
implicit val doc: Document = Jsoup.parse(view(testRef, testAttachments, None, None).body)
"have the correct heading" in new ViewSetup {
doc.heading mustBe Some(ExpectedContent.heading)
}
"have the correct panel text" in new ViewSetup {
doc.select(Selectors.indent).get(0).text mustBe ExpectedContent.panel1
}
"have the correct heading2" in new ViewSetup {
doc.headingLevel2(1) mustBe Some(ExpectedContent.heading2)
}
"have the correct heading3" in new ViewSetup {
doc.headingLevel2(2) mustBe Some(ExpectedContent.heading3)
}
"have the correct page title" in new ViewSetup {
doc.title mustBe ExpectedContent.title
}
"have the correct paragraph1 text" in new ViewSetup {
doc.para(1) mustBe Some(ExpectedContent.para1)
}
"have the correct paragraph2 text" in new ViewSetup {
doc.para(2) mustBe Some(ExpectedContent.para2)
}
"have the correct paragraph3 text" in new ViewSetup {
doc.para(3) mustBe Some(ExpectedContent.para3)
}
"not show the identity documents bullet point when attachment list does not contain IdentityEvidence" in new ViewSetup {
override val doc: Document = Jsoup.parse(view(testRef, testVat2, None, None).body)
doc.unorderedList(1) mustBe List(ExpectedContent.vat2Bullet)
doc.unorderedList(1) mustNot contain(ExpectedContent.idEvidence)
}
"not show the vat51 bullet point when attachment list does not contain VAT51" in new ViewSetup {
override val doc: Document = Jsoup.parse(view(testRef, testVat2, None, None).body)
doc.unorderedList(1) mustBe List(ExpectedContent.vat2Bullet)
doc.unorderedList(1) mustNot contain(ExpectedContent.vat51Bullet)
}
"not show the vat5L bullet point when attachment list does not contain VAT5L" in new ViewSetup {
override val doc: Document = Jsoup.parse(view(testRef, testVat2, None, None).body)
doc.unorderedList(1) mustBe List(ExpectedContent.vat2Bullet)
doc.unorderedList(1) mustNot contain(ExpectedContent.vat5LBullet)
}
"have the correct first bullet list" in new ViewSetup {
doc.unorderedList(1) mustBe List(
ExpectedContent.vat2Bullet,
ExpectedContent.vat51Bullet,
ExpectedContent.idEvidence,
ExpectedContent.vat5LBullet
)
}
"have the correct first bullet list for the transactor flow" when {
"transactor is unverified" in new ViewSetup {
override val doc: Document = Jsoup.parse(view(testRef, List(TransactorIdentityEvidence), None, Some(ExpectedContent.transactorName)).body)
doc.unorderedList(1) mustBe List(
ExpectedContent.idEvidenceNamed(ExpectedContent.transactorName)
)
}
"applicant is unverified" in new ViewSetup {
override val doc: Document = Jsoup.parse(view(testRef, List(IdentityEvidence), Some(ExpectedContent.applicantName), None).body)
doc.unorderedList(1) mustBe List(
ExpectedContent.idEvidenceNamed(ExpectedContent.applicantName)
)
}
"both are unverified" in new ViewSetup {
override val doc: Document = Jsoup.parse(view(testRef, List(IdentityEvidence, TransactorIdentityEvidence), Some(ExpectedContent.applicantName), Some(ExpectedContent.transactorName)).body)
doc.unorderedList(1) mustBe List(
ExpectedContent.idEvidenceNamed(ExpectedContent.applicantName),
ExpectedContent.idEvidenceNamed(ExpectedContent.transactorName)
)
}
}
"have a details block" in new ViewSetup {
doc.details mustBe Some(Details(IdentityEvidenceBlock.summary, IdentityEvidenceBlock.content))
}
"have the correct panel text two" in new ViewSetup {
doc.select(Selectors.indent).get(1).text mustBe ExpectedContent.panel2
}
"have a print button" in new ViewSetup {
doc.submitButton mustBe Some(ExpectedContent.print)
}
}
}
| hmrc/vat-registration-frontend | test/views/attachments/EmailCoverSheetViewSpec.scala | Scala | apache-2.0 | 7,112 |
package breeze.linalg.operators
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import breeze.generic.{UFunc, MMRegistry2}
import breeze.generic.UFunc.InPlaceImpl2
import scala.reflect.ClassTag
/**
* This is a special kind of BinaryUpdateOp that supports registration
* of specialized implementations for a given operation.
* @author dlwh
*/
// This trait could reuse code from Multimethod2, but not doing so allows us to reduce code size a lot
// because we don't need BinaryOp's to inherit from Function2, which has a lot of @specialzied cruft.
trait BinaryUpdateRegistry[A<:AnyRef, B, Op<:OpType] extends UFunc.InPlaceImpl2[Op, A, B] with MMRegistry2[UFunc.InPlaceImpl2[Op, _ <: A, _ <: B]] {
protected def bindingMissing(a: A, b: B):Unit = throw new UnsupportedOperationException("Types not found!" + a + b + " " + ops)
protected def multipleOptions(a: A, b: B, m: Map[(Class[_],Class[_]),UFunc.InPlaceImpl2[Op, _ <: A, _ <: B]]):Unit = {
throw new RuntimeException("Multiple bindings for method: " + m)
}
def apply(a: A, b: B) {
val ac = a.asInstanceOf[AnyRef].getClass
val bc = b.asInstanceOf[AnyRef].getClass
val cached = cache.get(ac -> bc)
if(cached != null) {
cached match {
case None => bindingMissing(a, b)
case Some(m) =>
m.asInstanceOf[InPlaceImpl2[Op, A, B]].apply(a, b)
}
} else {
val options = resolve(ac, bc.asInstanceOf[Class[_<:B]])
options.size match {
case 0 =>
cache.put(ac -> bc, None)
bindingMissing(a, b)
case 1 =>
val method = options.values.head
cache.put(ac -> bc, Some(method))
method.asInstanceOf[InPlaceImpl2[Op, A, B]].apply(a, b)
case _ =>
val selected = selectBestOption(options)
if(selected.size != 1)
multipleOptions(a, b, options)
else {
val method = selected.values.head
cache.put(ac -> bc, Some(method))
method.asInstanceOf[InPlaceImpl2[Op, A, B]].apply(a, b)
}
}
}
}
def register[AA<:A, BB<:B](op: InPlaceImpl2[Op, AA, BB])(implicit cA: ClassTag[AA], cB: ClassTag[BB]) {
super.register(cA.runtimeClass, cB.runtimeClass, op)
}
}
| chen0031/breeze | math/src/main/scala/breeze/linalg/operators/BinaryUpdateRegistry.scala | Scala | apache-2.0 | 2,757 |
/*
* Copyright 2016 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mongodb.scala.gridfs.helpers
import java.io.{ByteArrayInputStream, ByteArrayOutputStream}
import java.nio.ByteBuffer
import org.mongodb.scala.RequiresMongoDBISpec
import org.mongodb.scala.gridfs.helpers.AsyncStreamHelper._
import org.mongodb.scala.gridfs.{AsyncInputStream, AsyncOutputStream, GridFSBucket}
import org.scalatest.Inspectors.forEvery
class AsyncStreamHelperSpec extends RequiresMongoDBISpec {
val content = "Hello GridFS Round Trip".getBytes()
behavior of "AsyncStreamHelper"
trait SourceAndDestination[T, R] {
val description: String
val sourceData: T
val destinationData: R
def inputStream: AsyncInputStream
def outputStream: AsyncOutputStream
def roundTripped: Boolean
}
val arrayOfBytesData = new SourceAndDestination[Array[Byte], Array[Byte]] {
override val description: String = "Array[Byte]"
override val sourceData: Array[Byte] = content
override val destinationData: Array[Byte] = new Array[Byte](content.length)
override def inputStream: AsyncInputStream = toAsyncInputStream(sourceData)
override def outputStream: AsyncOutputStream = toAsyncOutputStream(destinationData)
override def roundTripped: Boolean = sourceData sameElements destinationData
}
val byteBufferData = new SourceAndDestination[ByteBuffer, ByteBuffer] {
override val description: String = "ByteBuffer"
override val sourceData: ByteBuffer = ByteBuffer.wrap(content)
override val destinationData: ByteBuffer = ByteBuffer.allocate(content.length)
override def inputStream: AsyncInputStream = toAsyncInputStream(sourceData)
override def outputStream: AsyncOutputStream = toAsyncOutputStream(destinationData)
override def roundTripped: Boolean = sourceData == destinationData
}
val inputOutputStreams = new SourceAndDestination[ByteArrayInputStream, ByteArrayOutputStream] {
override val description: String = "InputStream and OutputStream"
override val sourceData: ByteArrayInputStream = new ByteArrayInputStream(content)
override val destinationData: ByteArrayOutputStream = new ByteArrayOutputStream(content.length)
override def inputStream: AsyncInputStream = toAsyncInputStream(sourceData)
override def outputStream: AsyncOutputStream = toAsyncOutputStream(destinationData)
override def roundTripped: Boolean = destinationData.toByteArray sameElements content
}
val sourceAndDestination = Seq(arrayOfBytesData, byteBufferData, inputOutputStreams)
forEvery(sourceAndDestination) { (data: SourceAndDestination[_, _]) =>
it should s"be able to roundtrip ${data.description}" in withDatabase(databaseName) {
database =>
val gridFSBucket = GridFSBucket(database, "fs")
val filesCollection = database.getCollection("fs.files")
val chunksCollection = database.getCollection("fs.chunks")
gridFSBucket.drop().futureValue
info("Testing uploading data")
val objectId = gridFSBucket.uploadFromStream("myfile", data.inputStream).head().futureValue
filesCollection.count().head().futureValue should equal(1)
chunksCollection.count().head().futureValue should equal(1)
info("Testing downloading data")
gridFSBucket.downloadToStream(objectId, data.outputStream).head().futureValue
data.outputStream.close().head().futureValue
data.roundTripped should be(true)
}
}
}
| jCalamari/mongo-scala-driver | driver/src/it/scala/org/mongodb/scala/gridfs/helpers/AsyncStreamHelperSpec.scala | Scala | apache-2.0 | 4,007 |
package com.twitter.finagle.postgresql.machine
import java.nio.charset.StandardCharsets
import java.security.MessageDigest
import com.twitter.finagle.postgresql.BackendMessage
import com.twitter.finagle.postgresql.BackendMessage.AuthenticationCleartextPassword
import com.twitter.finagle.postgresql.BackendMessage.AuthenticationGSS
import com.twitter.finagle.postgresql.BackendMessage.AuthenticationKerberosV5
import com.twitter.finagle.postgresql.BackendMessage.AuthenticationMD5Password
import com.twitter.finagle.postgresql.BackendMessage.AuthenticationOk
import com.twitter.finagle.postgresql.BackendMessage.AuthenticationSASL
import com.twitter.finagle.postgresql.BackendMessage.AuthenticationSCMCredential
import com.twitter.finagle.postgresql.BackendMessage.AuthenticationSSPI
import com.twitter.finagle.postgresql.BackendMessage.NoTx
import com.twitter.finagle.postgresql.BackendMessage.ReadyForQuery
import com.twitter.finagle.postgresql.FrontendMessage
import com.twitter.finagle.postgresql.Params
import com.twitter.finagle.postgresql.PgSqlNoSuchTransition
import com.twitter.finagle.postgresql.PgSqlPasswordRequired
import com.twitter.finagle.postgresql.PgSqlServerError
import com.twitter.finagle.postgresql.PgSqlUnsupportedAuthenticationMechanism
import com.twitter.finagle.postgresql.Response
import com.twitter.io.Buf
import com.twitter.util.Return
import com.twitter.util.Throw
/**
* Implements the "Start-up" message flow described here https://www.postgresql.org/docs/current/protocol-flow.html#id-1.10.5.7.3
*
* This process involves authenticating the client and accumulating parameters about the server's configuration for this connection.
* Failure to authenticate will produce an exception.
* A successful response [[Response.ConnectionParameters]] which includes the connection's parameters
* such as character encoding and timezone.
*/
case class HandshakeMachine(
credentials: Params.Credentials,
database: Params.Database,
statementTimeout: Params.StatementTimeout,
sessionDefaults: Params.SessionDefaults)
extends StateMachine[Response.ConnectionParameters] {
import StateMachine._
sealed trait State
case object Authenticating extends State
case class BackendStarting(
params: List[BackendMessage.ParameterStatus],
bkd: Option[BackendMessage.BackendKeyData])
extends State
override def start: StateMachine.TransitionResult[State, Response.ConnectionParameters] = {
val sessionParams = sessionDefaults.defaults ++
statementTimeout.timeout().map(timeout => "statement_timeout" -> timeout.inMillis.toString)
Transition(
Authenticating,
Send(
FrontendMessage.StartupMessage(
user = credentials.username,
database = database.name,
params = sessionParams))
)
}
override def receive(
state: State,
msg: BackendMessage
): StateMachine.TransitionResult[State, Response.ConnectionParameters] = (state, msg) match {
case (Authenticating, AuthenticationMD5Password(salt)) =>
def hex(input: Array[Byte]) = input.map(s => f"$s%02x").mkString
def bytes(str: String) = str.getBytes(StandardCharsets.UTF_8)
def md5(input: Array[Byte]*): String =
hex(
input
.foldLeft(MessageDigest.getInstance("MD5")) { case (d, v) => d.update(v); d }.digest())
credentials.password match {
case None => Complete(ReadyForQuery(NoTx), Some(Throw(PgSqlPasswordRequired)))
case Some(password) =>
// concat('md5', md5(concat(md5(concat(password, username)), random-salt)))
val hashed = md5(
bytes(md5(bytes(password), bytes(credentials.username))),
Buf.ByteArray.Owned.extract(salt)
)
Transition(Authenticating, Send(FrontendMessage.PasswordMessage(s"md5$hashed")))
}
case (Authenticating, AuthenticationCleartextPassword) =>
credentials.password match {
case None => Complete(ReadyForQuery(NoTx), Some(Throw(PgSqlPasswordRequired)))
case Some(password) =>
Transition(Authenticating, Send(FrontendMessage.PasswordMessage(password)))
}
case (
Authenticating,
AuthenticationOk
) => // This can happen at Startup when there's no password
Transition(BackendStarting(Nil, None), NoOp)
case (BackendStarting(params, bkd), p: BackendMessage.ParameterStatus) =>
Transition(BackendStarting(p :: params, bkd), NoOp)
case (BackendStarting(params, _), bkd: BackendMessage.BackendKeyData) =>
Transition(BackendStarting(params, Some(bkd)), NoOp)
case (BackendStarting(params, bkd), ready: BackendMessage.ReadyForQuery) =>
Complete(ready, Some(Return(Response.ConnectionParameters(params, bkd))))
case (state, _: BackendMessage.NoticeResponse) => Transition(state, NoOp) // TODO: don't ignore
case (_, e: BackendMessage.ErrorResponse) =>
// The backend closes the connection, so we use a bogus ReadyForQuery value
Complete(ReadyForQuery(NoTx), Some(Throw(PgSqlServerError(e))))
case (
_,
AuthenticationGSS | AuthenticationKerberosV5 | AuthenticationSCMCredential |
AuthenticationSSPI | AuthenticationSASL(_)
) =>
Complete(
ReadyForQuery(NoTx),
Some(
Throw(PgSqlUnsupportedAuthenticationMechanism(
msg.asInstanceOf[BackendMessage.AuthenticationMessage])))
)
case (state, msg) => throw PgSqlNoSuchTransition("HandshakeMachine", state, msg)
}
}
| twitter/finagle | finagle-postgresql/src/main/scala/com/twitter/finagle/postgresql/machine/HandshakeMachine.scala | Scala | apache-2.0 | 5,533 |
package net.flatmap.ace
import CypherParser.ResultSet
object CypherParser {
import MayErr._
import java.util.Date
type ResultSet = Stream[Row]
def scalar[T](implicit transformer: Column[T]): RowParser[T] = RowParser[T] { row =>
(for {
meta <- row.metaData.ms.headOption.toRight(NoColumnsInReturnedResult)
value <- row.data.headOption.toRight(NoColumnsInReturnedResult)
result <- transformer(value, meta)
} yield result).fold(e => Error(e), a => Success(a))
}
def flatten[T1, T2, R](implicit f: net.flatmap.ace.TupleFlattener[(T1 ~ T2) => R]): ((T1 ~ T2) => R) = f.f
def str(columnName: String): RowParser[String] = get[String](columnName)(implicitly[net.flatmap.ace.Column[String]])
def bool(columnName: String): RowParser[Boolean] = get[Boolean](columnName)(implicitly[Column[Boolean]])
def int(columnName: String): RowParser[Int] = get[Int](columnName)(implicitly[Column[Int]])
def long(columnName: String): RowParser[Long] = get[Long](columnName)(implicitly[Column[Long]])
def date(columnName: String): RowParser[Date] = get[Date](columnName)(implicitly[Column[Date]])
def node(columnName: String): RowParser[org.neo4j.graphdb.Node] = get[org.neo4j.graphdb.Node](columnName)(implicitly[Column[org.neo4j.graphdb.Node]])
def relationship(columnName: String): RowParser[org.neo4j.graphdb.Relationship] = get[org.neo4j.graphdb.Relationship](columnName)(implicitly[Column[org.neo4j.graphdb.Relationship]])
def path(columnName: String): RowParser[Seq[org.neo4j.graphdb.PropertyContainer]] = get[Seq[org.neo4j.graphdb.PropertyContainer]](columnName)(implicitly[Column[Seq[org.neo4j.graphdb.PropertyContainer]]])
def get[T](columnName: String)(implicit extractor: net.flatmap.ace.Column[T]): RowParser[T] = RowParser { row =>
import MayErr._
(for {
meta <- row.metaData.get(columnName)
.toRight(ColumnNotFound(columnName, row.metaData.availableColumns))
value <- row.get1(columnName)
result <- extractor(value, MetaDataItem(meta._1, meta._2, meta._3))
} yield result).fold(e => Error(e), a => Success(a))
}
def contains[TT: Column, T <: TT](columnName: String, t: T): RowParser[Unit] =
get[TT](columnName)(implicitly[Column[TT]])
.collect("Row doesn't contain a column: " + columnName + " with value " + t) { case a if a == t => Unit }
}
case class ~[+A, +B](_1: A, _2: B)
trait CypherResult[+A] {
self =>
def flatMap[B](k: A => CypherResult[B]): CypherResult[B] = self match {
case Success(a) => k(a)
case e @ Error(_) => e
}
def map[B](f: A => B): CypherResult[B] = self match {
case Success(a) => Success(f(a))
case e @ Error(_) => e
}
}
case class Success[A](a: A) extends CypherResult[A]
case class Error(msg: CypherRequestError) extends CypherResult[Nothing]
object RowParser {
def apply[A](f: Row => CypherResult[A]): RowParser[A] = new RowParser[A] {
def apply(row: Row): CypherResult[A] = f(row)
}
}
trait RowParser[+A] extends (Row => CypherResult[A]) {
parent =>
def map[B](f: A => B): RowParser[B] = RowParser(parent.andThen(_.map(f)))
def collect[B](otherwise: String)(f: PartialFunction[A, B]): RowParser[B] = RowParser(row => parent(row).flatMap(a => if (f.isDefinedAt(a)) Success(f(a)) else Error(CypherMappingError(otherwise))))
def flatMap[B](k: A => RowParser[B]): RowParser[B] = RowParser(row => parent(row).flatMap(a => k(a)(row)))
def ~[B](p: RowParser[B]): RowParser[A ~ B] = RowParser(row => parent(row).flatMap(a => p(row).map(new ~(a, _))))
def ~>[B](p: RowParser[B]): RowParser[B] = RowParser(row => parent(row).flatMap(a => p(row)))
def <~[B](p: RowParser[B]): RowParser[A] = parent.~(p).map(_._1)
def |[B >: A](p: RowParser[B]): RowParser[B] = RowParser { row =>
parent(row) match {
case Error(_) => p(row)
case a => a
}
}
def ? : RowParser[Option[A]] = RowParser { row =>
parent(row) match {
case Success(a) => Success(Some(a))
case Error(_) => Success(None)
}
}
def >>[B](f: A => RowParser[B]): RowParser[B] = flatMap(f)
def * : ResultSetParser[List[A]] = ResultSetParser.list(parent)
def + : ResultSetParser[List[A]] = ResultSetParser.nonEmptyList(parent)
def single = ResultSetParser.single(parent)
def singleOpt = ResultSetParser.singleOpt(parent)
}
trait ResultSetParser[+A] extends (ResultSet => CypherResult[A]) {
parent =>
def map[B](f: A => B): ResultSetParser[B] = ResultSetParser(rs => parent(rs).map(f))
}
object ResultSetParser {
def apply[A](f: ResultSet => CypherResult[A]): ResultSetParser[A] = new ResultSetParser[A] { rows =>
def apply(rows: ResultSet): CypherResult[A] = f(rows)
}
def list[A](p: RowParser[A]): ResultSetParser[List[A]] = {
@scala.annotation.tailrec
def sequence(results: CypherResult[List[A]], rows: Stream[Row]): CypherResult[List[A]] = {
(results, rows) match {
case (Success(rs), row #:: tail) => sequence(p(row).map(_ +: rs), tail)
case (r, _) => r
}
}
ResultSetParser { rows => sequence(Success(List()), rows).map(_.reverse) }
}
def nonEmptyList[A](p: RowParser[A]): ResultSetParser[List[A]] = ResultSetParser(rows => if (rows.isEmpty) Error(CypherMappingError("Empty Result Set")) else list(p)(rows))
def single[A](p: RowParser[A]): ResultSetParser[A] = ResultSetParser {
case head #:: Stream.Empty => p(head)
case Stream.Empty => Error(CypherMappingError("No rows when expecting a single one"))
case _ => Error(CypherMappingError("too many rows when expecting a single one"))
}
def singleOpt[A](p: RowParser[A]): ResultSetParser[Option[A]] = ResultSetParser {
case head #:: Stream.Empty => p.map(Some(_))(head)
case Stream.Empty => Success(None)
case _ => Error(CypherMappingError("too many rows when expecting a single one"))
}
} | fynnfeldpausch/frame | app/lib/ace/CypherParser.scala | Scala | mit | 5,852 |
/*
* Skylark
* http://skylark.io
*
* Copyright 2012-2017 Quantarray, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.quantarray.skylark.measure
/**
* Binary multiple
*
* @author Araik Grigoryan
*/
case class BinaryMultiple(prefix: String, powerOf2: Int)
{
val multiple: Double = math.pow(2, powerOf2)
def ^(exponent: Int) = BinaryMultiple(prefix * exponent, math.pow(multiple, exponent).toInt)
def *[M <: Measure[M]](measure: M): M = measure.composes(s"$prefix$measure", multiple)
override def toString = s"$prefix ($multiple)"
} | quantarray/skylark | skylark-measure/src/main/scala/com/quantarray/skylark/measure/BinaryMultiple.scala | Scala | apache-2.0 | 1,084 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs10x.boxes
import uk.gov.hmrc.ct.accounts.frs10x.retriever.Frs10xDirectorsBoxRetriever
import uk.gov.hmrc.ct.box._
case class AC8033(value: Option[String]) extends CtBoxIdentifier(name = "Secretary name") with CtOptionalString with Input with ValidatableBox[Frs10xDirectorsBoxRetriever] {
override def validate(boxRetriever: Frs10xDirectorsBoxRetriever): Set[CtValidation] = {
validateOptionalStringByLength("AC8033", this, 1, 40) ++ validateCoHoStringReturnIllegalChars("AC8033", this)
}
}
| liquidarmour/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frs10x/boxes/AC8033.scala | Scala | apache-2.0 | 1,141 |
package play.api.modules.tepkinmongo
import javax.inject._
import com.github.jeroenr.tepkin.MongoClient
import play.api._
import play.api.inject.{ApplicationLifecycle, Binding, Module}
/**
* MongoDB module.
*/
@Singleton
final class TepkinMongoModule extends Module {
override def bindings(environment: Environment, configuration: Configuration): Seq[Binding[_]] = Seq(bind[TepkinMongoApi].to[DefaultTepkinMongoApi].in[Singleton])
}
trait TepkinMongoApi {
def client: MongoClient
def db: String
}
final class DefaultTepkinMongoApi @Inject()(configuration: Configuration,
applicationLifecycle: ApplicationLifecycle) extends TepkinMongoApi {
val client = MongoClient(configuration.getString("mongodb.uri").getOrElse(throw new IllegalStateException("Please configure mongodb.uri in your application.conf for example \\"mongodb://localhost\\" ")))
val db = configuration.getString("mongodb.database").getOrElse(throw new IllegalStateException("Please configure mongodb.database in your application.conf for example \\"example\\""))
} | burungbangkai/salestock-cart-rest-assignment | app/play/api/modules/tepkinmongo/TepkinMongoModule.scala | Scala | mit | 1,091 |
package org.scalatra
/**
* Trait representing an object that can't be fully initialized by its
* constructor. Useful for unifying the initialization process of an
* HttpServlet and a Filter.
*/
trait Initializable {
type ApplicationContextT <: ApplicationContext
type ConfigT
trait Config {
def context: ApplicationContextT
def initParameters: Map[String, String]
}
protected implicit def configWrapper(config: ConfigT): Config
/**
* A hook to initialize the class with some configuration after it has
* been constructed.
*
* Not called init because GenericServlet doesn't override it, and then
* we get into https://lampsvn.epfl.ch/trac/scala/ticket/2497.
*/
def initialize(config: ConfigT)
}
| louk/scalatra | core/src/main/scala/org/scalatra/Initializable.scala | Scala | bsd-2-clause | 743 |
/*
* Copyright (c) 2015 Alpine Data Labs
* All rights reserved.
*/
package com.alpine.model.pack.ml
import org.scalatest.FunSuite
import org.scalatest.Matchers._
/**
* Tests scoring of LinearRegressionTransformer.
*/
class LinearRegressionTransformerTest extends FunSuite {
val coefficients = Seq[java.lang.Double](0.9, 1, 5)
val intercept = 1d
val scorer = new LinearRegressionTransformer(coefficients, intercept)
test("testScore") {
assert(intercept === scorer.score(Seq[Any](0,0,0)).value)
assert(intercept + 0.9 === scorer.score(Seq[Any](1,0,0)).value)
assert(intercept + 0.9 + 1 === scorer.score(Seq[Any](1,1,0)).value)
assert(intercept + 0.9 + 1 + 5 === scorer.score(Seq[Any](1,1,1)).value)
for (i <- Range(0,10)) {
testScorer(Seq[Any](math.random,math.random,math.random))
}
}
def testScorer(input: Seq[Any]): Unit = {
val result = scorer.score(input)
val expected = (input.map(x => x.asInstanceOf[Number].doubleValue()) zip coefficients).map(x => x._1 * x._2).sum + intercept
result.value should equal (expected +- 1E-10)
}
}
| holdenk/PluginSDK | alpine-model-pack/src/test/scala/com/alpine/model/pack/ml/LinearRegressionTransformerTest.scala | Scala | apache-2.0 | 1,105 |
/*
* Seldon -- open source prediction engine
* =======================================
* Copyright 2011-2015 Seldon Technologies Ltd and Rummble Ltd (http://www.seldon.io/)
*
**********************************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************************************
*/
package io.seldon.spark.analytics
import org.apache.log4j.Level
import org.apache.log4j.Logger
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.joda.time.DateTime
import org.joda.time.format.DateTimeFormat
import org.joda.time.format.DateTimeFormatter
import io.seldon.spark.SparkUtils
import scala.collection.mutable.ListBuffer
import io.seldon.spark.rdd.FileUtils
import io.seldon.spark.rdd.DataSourceMode
case class EngagementConfig(
local : Boolean = false,
inputPath : String = "",
outputPath : String = "",
awsKey : String = "",
awsSecret : String = "",
startDate : String = "",
endDate : String = "",
maxIntraSessionGapSecs : Int = 600,
maxSessionTimeSecs : Int = 1800,
maxSessionPageView : Int = 50,
recTag : String = "sitewide",
influxdb_host : String = "",
influxdb_user : String = "root",
influxdb_pass : String = "",
filterUsersFile : String = "")
case class EngImpression(consumer: String, time: Long, user : String,abkey : String)
class Engagement(private val sc : SparkContext,config : EngagementConfig) {
def parseJson(path : String,recTagRequired : String) = {
val rdd = sc.textFile(path).flatMap{line =>
val formatter = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'");
val parts = line.split("\\t")
val date = formatter.parseDateTime(parts(0))
if (parts(1) == "restapi.ctralg")
{
import org.json4s._
import org.json4s.jackson.JsonMethods._
implicit val formats = DefaultFormats
val json = parse(parts(2))
val consumer = (json \\ "consumer").extract[String]
val click = (json \\ "click").extract[String]
val user = (json \\ "userid").extract[String]
val abkey = (json \\ "abkey").extract[String]
val rectag = (json \\ "rectag").extract[String]
if (click == "IMP" && rectag == recTagRequired && (abkey == "baseline" || abkey == "normal"))
{
Seq((consumer+"_"+user,EngImpression(consumer,date.getMillis(),user,abkey)))
}
else
None
}
else
None
}
rdd
}
def sendStatsToInfluxDb(startDate : String,
diffs : scala.collection.mutable.Map[String,(Double,Double,Double)],
baseline : scala.collection.mutable.Map[String,(Double,Double,Double)],
normal : scala.collection.mutable.Map[String,(Double,Double,Double)],iHost : String, iUser : String, iPass : String) = {
import org.influxdb.InfluxDBFactory
import java.util.concurrent.TimeUnit
val influxDB = InfluxDBFactory.connect("http://"+iHost+":8086", iUser, iPass);
val formatter = DateTimeFormat.forPattern("yyyy-MM-dd");
val date = formatter.parseDateTime(startDate)
val serie = new org.influxdb.dto.Serie.Builder("engagement")
.columns("time", "client", "abkey", "avg_time", "avg_pages", "multi_percent","diff_time","diff_pages","diff_multi")
for (client <- baseline.keys)
{
if (normal.contains(client))
{
val (bTime,bPv,bPlus) = baseline(client)
val (nTime,nPv,nPlus) = normal(client)
val (dTime,dPv,dPlus) = diffs(client)
serie.values(date.getMillis() : java.lang.Long,client,"normal",nTime : java.lang.Double,nPv : java.lang.Double,nPlus : java.lang.Double,dTime : java.lang.Double,dPv : java.lang.Double,dPlus : java.lang.Double)
serie.values(date.getMillis() : java.lang.Long,client,"baseline",bTime : java.lang.Double,bPv : java.lang.Double,bPlus : java.lang.Double,-1.0 * dTime : java.lang.Double,-1.0 * dPv : java.lang.Double,-1.0 * dPlus : java.lang.Double)
}
}
influxDB.write("stats", TimeUnit.MILLISECONDS, serie.build());
}
def run()
{
val startDate = DateTime.parse(config.startDate)
val endDate = DateTime.parse(config.endDate)
val glob = config.inputPath + "/" + startDate.getYear() + "/" + SparkUtils.getS3UnixGlob(startDate, endDate)+"/*/*"
println(glob)
val maxGapMsecs = config.maxIntraSessionGapSecs * 1000
val maxSessionPageViews = config.maxSessionPageView
val data = parseJson(glob,config.recTag).coalesce(50, false)
val badUsers = sc.accumulator(0, "bad user count")
val goodUsers = sc.accumulator(0, "good user count")
// calulate session time and number of page views per session for each user page view history
val perUserStats = data.groupByKey().flatMapValues{v =>
val buf = new ListBuffer[(Long,Long,String,String,Int,Int,Int)]()
val sorted = v.toArray.sortBy(_.time)
var lastTime : Long = 0
var timeSecs : Long = 0
var pv = 0
var abkey = sorted(0).abkey
val client = sorted(0).consumer
var numSessions = 0
var badUser = false
for(e <- sorted)
{
if (lastTime > 0)
{
val gap = (e.time - lastTime)
if (gap > maxGapMsecs)
{
if (timeSecs == 0){ timeSecs = 1}
if (pv <= maxSessionPageViews)
{
numSessions += 1
var userCount = 0
var pv1Plus = 0
if (numSessions == 1) {userCount = 1}
if (pv > 1) { pv1Plus = 1}
buf.append((timeSecs,pv,abkey,client,userCount,1,pv1Plus))
}
timeSecs = 0
pv = 0
}
else
{
timeSecs += (gap/1000)
}
}
lastTime = e.time
pv += 1
if (abkey != e.abkey)
badUser = true
//abkey = e.abkey // user can change group they are in after a certain time
}
if (pv > 0)
{
if (timeSecs == 0){ timeSecs = 1}
if (pv <= maxSessionPageViews)
{
numSessions += 1
var userCount = 0
var pv1Plus = 0
if (numSessions == 1) {userCount = 1}
if (pv > 1) { pv1Plus = 1}
buf.append((timeSecs,pv,abkey,client,userCount,1,pv1Plus))
}
}
if (badUser)
{
badUsers += 1
buf.clear()
}
else
{
goodUsers += 1
}
buf
}
// create new per client key
val stats = perUserStats.map{case (key,(time,pv,abkey,client,userCount,sessionCount,pv1plusCount)) => (client+"_"+abkey,(client,abkey,time,pv,userCount,1,pv1plusCount))}
// get sums and counts
val stats2 = stats.reduceByKey{case ((client,abkey,time1,pv1,userCount1,sessionCount1,pv1p1),(_,_,time2,pv2,userCount2,sessionCount2,pv1p2)) => (client,abkey,time1+time2,pv1+pv2,userCount1+userCount2,sessionCount1+sessionCount2,pv1p1+pv1p2)}
// calculate averages
val stats3 = stats2.mapValues{case (client,abkey,timeSum,pvSum,userCount,sessionCount,pv1plusCount) => (client,abkey,1.0*timeSum/sessionCount,1.0*pvSum/sessionCount,1.0*pv1plusCount/sessionCount)}
val fstats = stats3.collect()
val baseline = scala.collection.mutable.Map[String,(Double,Double,Double)]()
val normal = scala.collection.mutable.Map[String,(Double,Double,Double)]()
for ((key,(client,abkey,avgTime,avgPv,plusPercent)) <- fstats)
{
if (abkey == "baseline")
baseline(client) = (avgTime,avgPv,plusPercent)
else
normal(client) = (avgTime,avgPv,plusPercent)
}
val diffs = scala.collection.mutable.Map[String,(Double,Double,Double)]()
for (client <- baseline.keys)
{
if (normal.contains(client))
{
val (bTime,bPv,bPlus) = baseline(client)
val (nTime,nPv,nPlus) = normal(client)
diffs(client) = (nTime-bTime,nPv-bPv,nPlus-bPlus)
}
}
if (config.influxdb_host.nonEmpty)
{
sendStatsToInfluxDb(config.startDate,diffs,baseline,normal,config.influxdb_host,config.influxdb_user,config.influxdb_pass)
}
val outPath = config.outputPath + "/" + config.startDate+"_"+config.endDate
val bCsv = baseline.map{case (client,(t,pv,p)) => client+","+t.toString()+","+pv.toString()+","+p.toString()}
val nCsv = normal.map{case (client,(t,pv,p)) => client+","+t.toString()+","+pv.toString()+","+p.toString()}
val dCsv = diffs.map{case (client,(t,pv,p)) => client+","+t.toString()+","+pv.toString()+","+p.toString()}
FileUtils.outputModelToFile(bCsv.toArray, outPath, DataSourceMode.fromString(outPath), "baseline.csv")
FileUtils.outputModelToFile(nCsv.toArray, outPath, DataSourceMode.fromString(outPath), "normal.csv")
FileUtils.outputModelToFile(dCsv.toArray, outPath, DataSourceMode.fromString(outPath), "diffs.csv")
println("Bad users "+badUsers.value.toString())
println("Good users "+goodUsers.value.toString())
println("percent good users "+goodUsers.value.toFloat/(1.0 * badUsers.value+goodUsers.value))
}
}
object Engagement {
def main(args: Array[String])
{
Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)
val parser = new scopt.OptionParser[EngagementConfig]("Engagement") {
head("Engagement", "1.x")
opt[Unit]('l', "local") action { (_, c) => c.copy(local = true) } text("debug mode - use local Master")
opt[String]('i', "input-path") required() valueName("path url") action { (x, c) => c.copy(inputPath = x) } text("path prefix for input")
opt[String]('o', "output-path") required() valueName("path url") action { (x, c) => c.copy(outputPath = x) } text("path prefix for output")
opt[String]('a', "awskey") valueName("aws access key") action { (x, c) => c.copy(awsKey = x) } text("aws key")
opt[String]('s', "awssecret") valueName("aws secret") action { (x, c) => c.copy(awsSecret = x) } text("aws secret")
opt[String]('d', "start-date") required() valueName("start date") action { (x, c) => c.copy(startDate = x) } text("start date yyyy-mm-dd")
opt[String]('e', "end-date") required() valueName("end date") action { (x, c) => c.copy(endDate = x) } text("end date yyyy-mm-dd")
opt[Int]('m', "max-session-pv") valueName("max session page views") action { (x, c) => c.copy(maxSessionPageView = x) } text("max session page views")
opt[Int]('g', "max-intra-session-gap-secs") valueName("max intra session gap secs") action { (x, c) => c.copy(maxIntraSessionGapSecs = x) } text("max intra session gap secs")
opt[String]("influxdb-host") valueName("influxdb host") action { (x, c) => c.copy(influxdb_host = x) } text("influx db hostname")
opt[String]('u', "influxdb-user") valueName("influxdb username") action { (x, c) => c.copy(influxdb_user = x) } text("influx db username")
opt[String]('p', "influxdb-pass") valueName("influxdb password") action { (x, c) => c.copy(influxdb_pass = x) } text("influx db password")
opt[String]('r', "recTag") valueName("rectag") action { (x, c) => c.copy(recTag = x) } text("restrict to rectag")
}
parser.parse(args, EngagementConfig()) map { config =>
val conf = new SparkConf()
.setAppName("Engagement "+config.startDate+" to "+config.endDate)
if (config.local)
conf.setMaster("local")
.set("spark.executor.memory", "8g")
val sc = new SparkContext(conf)
try
{
sc.hadoopConfiguration.set("fs.s3.impl", "org.apache.hadoop.fs.s3native.NativeS3FileSystem")
if (config.awsKey.nonEmpty && config.awsSecret.nonEmpty)
{
sc.hadoopConfiguration.set("fs.s3n.awsAccessKeyId", config.awsKey)
sc.hadoopConfiguration.set("fs.s3n.awsSecretAccessKey", config.awsSecret)
}
val cByd = new Engagement(sc,config)
cByd.run()
}
finally
{
println("Shutting down job")
sc.stop()
}
} getOrElse
{
}
// set up environment
}
} | tmylk/seldon-server | offline-jobs/spark/src/main/scala/io/seldon/spark/analytics/Engagement.scala | Scala | apache-2.0 | 12,749 |
import doodle.core._
import doodle.core.Image._
import doodle.syntax._
import doodle.jvm.Java2DCanvas._
import doodle.backend.StandardInterpreter._
// To use this example, open the SBT console and type:
//
// Example.image.draw
object Example {
val image = circle(10) on circle(20) on circle(30)
}
| PengScala/creative-scala-template | src/main/scala/Example.scala | Scala | apache-2.0 | 301 |
package com.github.andyglow.relaxed
import org.scalatest.MustMatchers._
import org.scalatest.WordSpec
abstract class AbstractRelaxedSpec(prefix: String) extends WordSpec {
implicit def stringAsReader(json: String): Reader
implicit val profileUpdater: Updater[Profile]
s"$prefix.Relaxed.Update" must {
"not affect entity if json is empty" in new Scope {
(Relaxed(profile) updated """{}""") mustBe profile
}
"not affect entity if json is not empty, but there is no related fields" in new Scope {
(Relaxed(profile) updated """{"foo": "bar"}""") mustBe profile
}
"not affect entity's 'id' if json contains 'id' but it is marked as 'skip'" in new Scope {
(Relaxed(profile) updated """{"id": "updated"}""") mustBe profile
}
"affect entity's 'name' if json contains 'name' property" in new Scope {
(Relaxed(profile) updated """{"name": "updated"}""") mustBe profile.copy(name = FullName("updated"))
}
"affect entity's 'alias' if json contains 'alias' property" in new Scope {
(Relaxed(profile) updated """{"alias": "updated"}""") mustBe profile.copy(alias = Some("updated"))
}
"affect entity's 'alias' if json contains 'alias' property with value null" in new Scope {
(Relaxed(profile) updated """{"alias": null}""") mustBe profile.copy(alias = None)
}
"affect entity's 'address/street' if json contains 'address/street' property" in new Scope {
(Relaxed(profile) updated """{"address": { "street": "updated" }}""") mustBe profile.copy(address = profile.address.copy(street = "updated"))
}
"affect entity's 'phone/area' if json contains 'phone/area' property" in new Scope {
(Relaxed(profile) updated """{"phone": { "area": "updated" }}""") mustBe profile.copy(
phone = profile.phone.map(_.copy(area = "updated"))
)
}
"affect entity's 'phone' if json contains 'phone' property" in new Scope {
val noPhoneProfile = profile.copy(phone = None)
(Relaxed(noPhoneProfile) updated """{"phone": { "area": "updated", "number": "updated" }}""") mustBe profile.copy(
phone = Some(Phone("updated", "updated"))
)
}
"affect entity's 'phone' if json contains 'phone' property with null" in new Scope {
(Relaxed(profile) updated """{"phone": null}""") mustBe profile.copy(phone = None)
}
}
trait Scope {
val profile = Profile(
id = ProfileId("id"),
name = FullName("name"),
address = Address(
city = "city",
street = "street",
building = 1),
alias = Some("alias"),
phone = Some(Phone(
area = "area",
number = "number")))
}
}
| andyglow/relaxed-json-update | src/test/scala-2.11/com/github/andyglow/relaxed/AbstractRelaxedSpec.scala | Scala | lgpl-3.0 | 2,668 |
/**
* Copyright (c) 2013, The National Archives <digitalpreservation@nationalarchives.gov.uk>
* http://www.nationalarchives.gov.uk
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package uk.gov.nationalarchives.csv.validator.schema.v1_0
import java.io.StringReader
import org.junit.runner.RunWith
import org.specs2.runner.JUnitRunner
import uk.gov.nationalarchives.csv.validator.{SchemaDefinitionError, FailMessage}
import uk.gov.nationalarchives.csv.validator.schema._
import scalaz.{Failure => FailureZ, Success => SuccessZ, IList}
@RunWith(classOf[JUnitRunner])
class SchemaParserColumnDefinitionsSpecs extends SchemaSpecBase {
import TestSchemaParser._
"Schema" should {
val globalDirsOne = List(TotalColumns(1))
val globalDirsTwo = List(TotalColumns(2))
"succeed for valid schema with all possible column definitions" in {
val columnDefinitions = List(new ColumnDefinition(NamedColumnIdentifier("column1")),new ColumnDefinition(NamedColumnIdentifier("column2")),new ColumnDefinition(NamedColumnIdentifier("column3")),
new ColumnDefinition(NamedColumnIdentifier(".")),new ColumnDefinition(NamedColumnIdentifier("_-co.l")),new ColumnDefinition(NamedColumnIdentifier("0.a-B-z_Z")),new ColumnDefinition(NamedColumnIdentifier("-abc.csvs")))
val schema = """version 1.0
@totalColumns 7
column1:
column2:
column3:
.:
_-co.l:
0.a-B-z_Z:
-abc.csvs:"""
parse(new StringReader(schema)) must beLike { case Success(schemaResult, _) => schemaResult mustEqual buildSchema1_0(TotalColumns(7))(columnDefinitions:_*) }
}
"succeed for valid schema with all possible quoted column definitions" in {
val columnDefinitions = List(new ColumnDefinition(NamedColumnIdentifier("column1")),new ColumnDefinition(NamedColumnIdentifier("column2")),new ColumnDefinition(NamedColumnIdentifier("column 3")),
new ColumnDefinition(NamedColumnIdentifier("column 4/5")), new ColumnDefinition(NamedColumnIdentifier(".")),new ColumnDefinition(NamedColumnIdentifier("_-co.l")),
new ColumnDefinition(NamedColumnIdentifier("0.a-B-z_Z")),new ColumnDefinition(NamedColumnIdentifier("-abc.csvs")))
val schema = """version 1.0
@totalColumns 8
"column1":
"column2":
"column 3":
"column 4/5":
".":
"_-co.l":
"0.a-B-z_Z":
"-abc.csvs":"""
parse(new StringReader(schema)) must beLike { case Success(schemaResult, _) => schemaResult mustEqual buildSchema1_0(TotalColumns(8))(columnDefinitions:_*) }
}
"fail if column ident contains an invalid char ie not 0-9 a-z A-Z . - _" in {
val schema = """version 1.0
@totalColumns 1
column1':"""
parse(new StringReader(schema)) must beLike {
case Failure(messages, _) => messages mustEqual "Invalid column definition"
}
}
"fail if quoted column ident contains a quote" in {
val schema = """version 1.0
@totalColumns 2
"column "1":
"column "2":"""
parse(new StringReader(schema)) must beLike {
case Failure(messages, _) => messages mustEqual "Invalid column definition"
}
}
"fail if the total number of columns does not match the number of column definitions" in {
val schema = """version 1.0
|@totalColumns 2
|LastName: regex ("[a]")""".stripMargin
parseAndValidate(new StringReader(schema)) must beLike { case FailureZ(msgs) => msgs.list mustEqual IList(FailMessage(SchemaDefinitionError, "@totalColumns = 2 but number of columns defined = 1 at line: 2, column: 1")) }
}
"fail for invalid column identifier" in {
val schema = """version 1.0
@totalColumns 1
Last Name """
parse(new StringReader(schema)) must beLike { case Failure(message, _) => message mustEqual "Invalid column definition" }
}
"succeed for column definition with no rules" in {
val schema = """version 1.0
@totalColumns 1
Name:"""
parse(new StringReader(schema)) must beLike { case Success(schemaResult, _) => schemaResult mustEqual buildSchema1_0(globalDirsOne:_*)(namedColumn("Name")) }
}
"succeed for column definition with single regex rule" in {
val schema = """version 1.0
@totalColumns 1
Age: regex ("[1-9]*")"""
parse(new StringReader(schema)) must beLike { case Success(Schema(globalDirsOne1, List(ColumnDefinition(NamedColumnIdentifier("Age"), List(RegExpRule(r)), _)), v), _) => r mustEqual "[1-9]*" }
}
"fail for more than one column definition on a line" in {
val schema = """version 1.0
@totalColumns 1
LastName: regex ("[a-z]*") Age"""
parse(new StringReader(schema)) must beLike { case Failure(message, _) => message mustEqual """Invalid column definition""" }
}
"fail for extra text after column definition on a line" in {
val schema = """version 1.0
@totalColumns 3
LastName: regex ("[a-z]*")
FirstName: dfsdfsdfwe
Age:"""
parse(new StringReader(schema)) must beLike { case Failure(message, _) => message mustEqual "Invalid column definition" }
}
"fail when one invalid column reference" in {
val schema = """version 1.0
@totalColumns 2
|Column1: in($NotAColumn)
|Column2:""".stripMargin
parseAndValidate(new StringReader(schema)) must beLike {
case FailureZ(msgs) => msgs.list mustEqual IList(FailMessage(SchemaDefinitionError, "Column: Column1 has invalid cross reference in($NotAColumn) at line: 3, column: 10"))
}
}
"fail when there are two rules and one is invalid" in {
val schema = """version 1.0
@totalColumns 2
|Column1: in($Column2) in($NotAColumn2)
|Column2:""".stripMargin
parseAndValidate(new StringReader(schema)) must beLike {
case FailureZ(msgs) => msgs.list mustEqual IList(FailMessage(SchemaDefinitionError, "Column: Column1 has invalid cross reference in($NotAColumn2) at line: 3, column: 23"))
}
}
"fail when two columns have two rules and each has one invalid column" in {
val schema ="""version 1.0
@totalColumns 2
|Column1: in($Column2) in($NotAColumn2)
|Column2: in($NotAColumn3) in($Column2)""".stripMargin
parseAndValidate(new StringReader(schema)) must beLike {
case FailureZ(msgs) => msgs.list mustEqual IList(FailMessage(SchemaDefinitionError,
"""Column: Column1 has invalid cross reference in($NotAColumn2) at line: 3, column: 23
|Column: Column2 has invalid cross reference in($NotAColumn3) at line: 4, column: 10""".stripMargin))
}
}
"fail when two columns have two rules and each has one invalid column with different rules" in {
val schema ="""version 1.0
|@totalColumns 2
|Column1: is($Column1) is($NotAColumn1)
|Column2: not($Column2) not($NotAColumn2)
|Column3: in($Column3) in($NotAColumn3)
|Column4: starts($Column4) starts($NotAColumn4)
|Column5: ends($Column5) ends($NotAColumn5)""".stripMargin
parseAndValidate(new StringReader(schema)) must beLike {
case FailureZ(msgs) => msgs.list mustEqual IList(FailMessage(SchemaDefinitionError, """@totalColumns = 2 but number of columns defined = 5 at line: 2, column: 1
|Column: Column1 has invalid cross reference is($NotAColumn1) at line: 3, column: 23
|Column: Column2 has invalid cross reference not($NotAColumn2) at line: 4, column: 24
|Column: Column3 has invalid cross reference in($NotAColumn3) at line: 5, column: 23
|Column: Column4 has invalid cross reference starts($NotAColumn4) at line: 6, column: 27
|Column: Column5 has invalid cross reference ends($NotAColumn5) at line: 7, column: 25""".stripMargin))
}
}
"fail for multiple columns with same name" in {
val schema = """version 1.0
@totalColumns 4
Column1:
Column2:
Column1: regex("A")
Column2:"""
parseAndValidate(new StringReader(schema)) must beLike {
case FailureZ(msgs) => msgs.list mustEqual IList(FailMessage(SchemaDefinitionError, """Column: Column1 has duplicates on lines 3, 5
|Column: Column2 has duplicates on lines 4, 6""".stripMargin))
}
}
"succeed if Column1 correctly has InRule that points to Column2" in {
val schema = """version 1.0
@totalColumns 2
Column1: in($Column2)
Column2:"""
parseAndValidate(new StringReader(schema)) must beLike {
case SuccessZ(schemaResult) => schemaResult mustEqual buildSchema1_0(globalDirsTwo:_*)(ColumnDefinition(NamedColumnIdentifier("Column1"), List(InRule(ColumnReference(NamedColumnIdentifier("Column2"))))),
namedColumn("Column2"))
}
}
}
} | valydia/csv-validator | csv-validator-core/src/test/scala/uk/gov/nationalarchives/csv/validator/schema/v1_0/SchemaParserColumnDefinitionsSpecs.scala | Scala | mpl-2.0 | 10,140 |
/**
* Copyright (c) 2013 Mark S. Kolich
* http://mark.koli.ch
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
package com.kolich.spray.protocols
import com.kolich.spray.models._
import spray.json._
import spray.httpx._
private[protocols] trait WebAppJsonProtocol {
object WebAppJsonFormat extends DefaultJsonProtocol with HtmlSafeSprayJsonSupport {
implicit def ListResponseFormat[A: JsonFormat] = jsonFormat3(ListResponse.apply[A])
implicit def ObjResponseFormat[A: JsonFormat] = jsonFormat2(ObjResponse.apply[A])
implicit object URIFormat extends RootJsonFormat[java.net.URI] {
def write(u: java.net.URI) = JsString(u.toString)
def read(v: JsValue) = java.net.URI.create(v.toString)
}
implicit object ErrorFormat extends RootJsonFormat[Error] {
def write(u: Error) = JsObject(
"success" -> JsBoolean(u.success),
"errorCode" -> JsNumber(u.errorCode),
"message" -> JsString(u.message.getOrElse(""))
)
def read(v: JsValue) = throw new DeserializationException("Read of models.Error not implemented")
}
implicit object UserFormat extends RootJsonFormat[User] {
def write(u: User) = JsObject(
"name" -> JsString(u.name),
"email" -> JsString(u.email),
"phone" -> JsString(u.phone),
"notes" -> JsString(u.notes.getOrElse(""))
)
def read(v: JsValue) = v.asJsObject.getFields("name", "email", "phone") match {
case Seq(JsString(name), JsString(email), JsString(phone)) =>
User(name, email, phone)
}
}
}
} | markkolich/spray-servlet-webapp | src/main/scala/com/kolich/spray/protocols/WebAppJsonProtocol.scala | Scala | mit | 2,613 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import java.io.File
import java.sql.Timestamp
import java.util.Date
import scala.collection.mutable.ArrayBuffer
import org.scalatest.concurrent.TimeLimits
import org.scalatest.exceptions.TestFailedDueToTimeoutException
import org.scalatest.time.SpanSugar._
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.test.ProcessTestUtils.ProcessOutputCapturer
import org.apache.spark.util.Utils
trait SparkSubmitTestUtils extends SparkFunSuite with TimeLimits {
// NOTE: This is an expensive operation in terms of time (10 seconds+). Use sparingly.
// This is copied from org.apache.spark.deploy.SparkSubmitSuite
protected def runSparkSubmit(args: Seq[String], sparkHomeOpt: Option[String] = None): Unit = {
val sparkHome = sparkHomeOpt.getOrElse(
sys.props.getOrElse("spark.test.home", fail("spark.test.home is not set!")))
val history = ArrayBuffer.empty[String]
val sparkSubmit = if (Utils.isWindows) {
// On Windows, `ProcessBuilder.directory` does not change the current working directory.
new File("..\\\\..\\\\bin\\\\spark-submit.cmd").getAbsolutePath
} else {
"./bin/spark-submit"
}
val commands = Seq(sparkSubmit) ++ args
val commandLine = commands.mkString("'", "' '", "'")
val builder = new ProcessBuilder(commands: _*).directory(new File(sparkHome))
val env = builder.environment()
env.put("SPARK_TESTING", "1")
env.put("SPARK_HOME", sparkHome)
def captureOutput(source: String)(line: String): Unit = {
// This test suite has some weird behaviors when executed on Jenkins:
//
// 1. Sometimes it gets extremely slow out of unknown reason on Jenkins. Here we add a
// timestamp to provide more diagnosis information.
// 2. Log lines are not correctly redirected to unit-tests.log as expected, so here we print
// them out for debugging purposes.
val logLine = s"${new Timestamp(new Date().getTime)} - $source> $line"
// scalastyle:off println
println(logLine)
// scalastyle:on println
history += logLine
}
val process = builder.start()
new ProcessOutputCapturer(process.getInputStream, captureOutput("stdout")).start()
new ProcessOutputCapturer(process.getErrorStream, captureOutput("stderr")).start()
try {
val exitCode = failAfter(300.seconds) { process.waitFor() }
if (exitCode != 0) {
// include logs in output. Note that logging is async and may not have completed
// at the time this exception is raised
Thread.sleep(1000)
val historyLog = history.mkString("\\n")
fail {
s"""spark-submit returned with exit code $exitCode.
|Command line: $commandLine
|
|$historyLog
""".stripMargin
}
}
} catch {
case to: TestFailedDueToTimeoutException =>
val historyLog = history.mkString("\\n")
fail(s"Timeout of $commandLine" +
s" See the log4j logs for more detail." +
s"\\n$historyLog", to)
case t: Throwable => throw t
} finally {
// Ensure we still kill the process in case it timed out
process.destroy()
}
}
}
| minixalpha/spark | sql/hive/src/test/scala/org/apache/spark/sql/hive/SparkSubmitTestUtils.scala | Scala | apache-2.0 | 4,033 |
package wrappers.scala
import scala.language.{existentials,implicitConversions}
import scalan._
import special.wrappers.WrappersModule
import special.wrappers.OptionWrapSpec
import scala.collection.mutable.WrappedArray
package impl {
// Abs -----------------------------------
trait WOptionsDefs extends scalan.Scalan with WOptions {
self: WrappersModule =>
object WOption extends EntityObject("WOption") {
// entityConst: single const for each entity
import Liftables._
case class WOptionConst[SA, A](
constValue: Option[SA],
lA: Liftable[SA, A]
) extends LiftedConst[Option[SA], WOption[A]] with WOption[A]
with Def[WOption[A]] with WOptionConstMethods[A] {
implicit final def eA: Elem[A] = lA.eW
val liftable: Liftable[Option[SA], WOption[A]] = liftableOption(lA)
val resultType: Elem[WOption[A]] = liftable.eW
}
trait WOptionConstMethods[A] extends WOption[A] { thisConst: Def[_] =>
implicit def eA: Elem[A]
private val WOptionClass = classOf[WOption[A]]
override def fold[B](ifEmpty: Ref[Thunk[B]], f: Ref[A => B]): Ref[B] = {
implicit val eB = ifEmpty.elem.eItem
asRep[B](mkMethodCall(self,
WOptionClass.getMethod("fold", classOf[Sym], classOf[Sym]),
Array[AnyRef](ifEmpty, f),
true, false, element[B]))
}
override def isEmpty: Ref[Boolean] = {
asRep[Boolean](mkMethodCall(self,
WOptionClass.getMethod("isEmpty"),
WrappedArray.empty,
true, false, element[Boolean]))
}
override def isDefined: Ref[Boolean] = {
asRep[Boolean](mkMethodCall(self,
WOptionClass.getMethod("isDefined"),
WrappedArray.empty,
true, false, element[Boolean]))
}
override def filter(p: Ref[A => Boolean]): Ref[WOption[A]] = {
asRep[WOption[A]](mkMethodCall(self,
WOptionClass.getMethod("filter", classOf[Sym]),
Array[AnyRef](p),
true, false, element[WOption[A]]))
}
override def flatMap[B](f: Ref[A => WOption[B]]): Ref[WOption[B]] = {
implicit val eB = f.elem.eRange.typeArgs("A")._1.asInstanceOf[Elem[B]]
asRep[WOption[B]](mkMethodCall(self,
WOptionClass.getMethod("flatMap", classOf[Sym]),
Array[AnyRef](f),
true, false, element[WOption[B]]))
}
override def map[B](f: Ref[A => B]): Ref[WOption[B]] = {
implicit val eB = f.elem.eRange
asRep[WOption[B]](mkMethodCall(self,
WOptionClass.getMethod("map", classOf[Sym]),
Array[AnyRef](f),
true, false, element[WOption[B]]))
}
override def getOrElse[B](default: Ref[Thunk[B]]): Ref[B] = {
implicit val eB = default.elem.eItem
asRep[B](mkMethodCall(self,
WOptionClass.getMethod("getOrElse", classOf[Sym]),
Array[AnyRef](default),
true, false, element[B]))
}
override def get: Ref[A] = {
asRep[A](mkMethodCall(self,
WOptionClass.getMethod("get"),
WrappedArray.empty,
true, false, element[A]))
}
}
case class LiftableOption[SA, A](lA: Liftable[SA, A])
extends Liftable[Option[SA], WOption[A]] {
lazy val eW: Elem[WOption[A]] = wOptionElement(lA.eW)
lazy val sourceType: RType[Option[SA]] = {
implicit val tagSA = lA.sourceType.asInstanceOf[RType[SA]]
RType[Option[SA]]
}
def lift(x: Option[SA]): Ref[WOption[A]] = WOptionConst(x, lA)
def unlift(w: Ref[WOption[A]]): Option[SA] = w match {
case Def(WOptionConst(x: Option[_], _lA))
if _lA == lA => x.asInstanceOf[Option[SA]]
case _ => unliftError(w)
}
}
implicit final def liftableOption[SA, A](implicit lA: Liftable[SA,A]): Liftable[Option[SA], WOption[A]] =
LiftableOption(lA)
private val _OptionWrapSpec = new OptionWrapSpec {}
private val WOptionClass = classOf[WOption[_]]
// entityAdapter for WOption trait
case class WOptionAdapter[A](source: Ref[WOption[A]])
extends Node with WOption[A]
with Def[WOption[A]] {
implicit lazy val eA = source.elem.typeArgs("A")._1.asInstanceOf[Elem[A]]
val resultType: Elem[WOption[A]] = element[WOption[A]]
override def transform(t: Transformer) = WOptionAdapter[A](t(source))
def fold[B](ifEmpty: Ref[Thunk[B]], f: Ref[A => B]): Ref[B] = {
implicit val eB = ifEmpty.elem.eItem
asRep[B](mkMethodCall(source,
WOptionClass.getMethod("fold", classOf[Sym], classOf[Sym]),
Array[AnyRef](ifEmpty, f),
true, true, element[B]))
}
def isEmpty: Ref[Boolean] = {
asRep[Boolean](mkMethodCall(source,
WOptionClass.getMethod("isEmpty"),
WrappedArray.empty,
true, true, element[Boolean]))
}
def isDefined: Ref[Boolean] = {
asRep[Boolean](mkMethodCall(source,
WOptionClass.getMethod("isDefined"),
WrappedArray.empty,
true, true, element[Boolean]))
}
def filter(p: Ref[A => Boolean]): Ref[WOption[A]] = {
asRep[WOption[A]](mkMethodCall(source,
WOptionClass.getMethod("filter", classOf[Sym]),
Array[AnyRef](p),
true, true, element[WOption[A]]))
}
def flatMap[B](f: Ref[A => WOption[B]]): Ref[WOption[B]] = {
implicit val eB = f.elem.eRange.typeArgs("A")._1.asInstanceOf[Elem[B]]
asRep[WOption[B]](mkMethodCall(source,
WOptionClass.getMethod("flatMap", classOf[Sym]),
Array[AnyRef](f),
true, true, element[WOption[B]]))
}
def map[B](f: Ref[A => B]): Ref[WOption[B]] = {
implicit val eB = f.elem.eRange
asRep[WOption[B]](mkMethodCall(source,
WOptionClass.getMethod("map", classOf[Sym]),
Array[AnyRef](f),
true, true, element[WOption[B]]))
}
def getOrElse[B](default: Ref[Thunk[B]]): Ref[B] = {
implicit val eB = default.elem.eItem
asRep[B](mkMethodCall(source,
WOptionClass.getMethod("getOrElse", classOf[Sym]),
Array[AnyRef](default),
true, true, element[B]))
}
def get: Ref[A] = {
asRep[A](mkMethodCall(source,
WOptionClass.getMethod("get"),
WrappedArray.empty,
true, true, element[A]))
}
}
// entityUnref: single unref method for each type family
implicit final def unrefWOption[A](p: Ref[WOption[A]]): WOption[A] = {
if (p.node.isInstanceOf[WOption[A]@unchecked]) p.node.asInstanceOf[WOption[A]]
else
WOptionAdapter(p)
}
implicit final def castWOptionElement[A](elem: Elem[WOption[A]]): WOptionElem[A, WOption[A]] =
elem.asInstanceOf[WOptionElem[A, WOption[A]]]
implicit lazy val containerWOption: Functor[WOption] = new Functor[WOption] {
def lift[A](implicit evA: Elem[A]) = element[WOption[A]]
def unlift[A](implicit eFT: Elem[WOption[A]]) =
castWOptionElement(eFT).eA
def unapply[T](e: Elem[_]) = e match {
case e: WOptionElem[_,_] => Some(asElem[WOption[T]](e))
case _ => None
}
def map[A,B](xs: Ref[WOption[A]])(f: Ref[A] => Ref[B]) = { implicit val eA = unlift(xs.elem); xs.map(fun(f))}
}
// manual fix: WOptionIso, wOptionIso
// familyElem
class WOptionElem[A, To <: WOption[A]](implicit _eA: Elem[A])
extends EntityElem1[A, To, WOption](_eA, container[WOption]) {
def eA = _eA
override val liftable: Liftables.Liftable[_, To] = asLiftable[Option[_], To](liftableOption(_eA.liftable))
override protected def collectMethods: Map[java.lang.reflect.Method, MethodDesc] = {
super.collectMethods ++
Elem.declaredWrapperMethods(_OptionWrapSpec, classOf[WOption[A]], Set(
"fold", "isEmpty", "isDefined", "filter", "flatMap", "map", "getOrElse", "get"
))
}
override def buildTypeArgs = super.buildTypeArgs ++ TypeArgs("A" -> (eA -> scalan.util.Invariant))
}
implicit final def wOptionElement[A](implicit eA: Elem[A]): Elem[WOption[A]] =
cachedElemByClass(eA)(classOf[WOptionElem[A, WOption[A]]])
implicit case object WOptionCompanionElem extends CompanionElem[WOptionCompanionCtor]
abstract class WOptionCompanionCtor extends CompanionDef[WOptionCompanionCtor] with WOptionCompanion {
def resultType = WOptionCompanionElem
override def toString = "WOption"
}
implicit final def unrefWOptionCompanionCtor(p: Ref[WOptionCompanionCtor]): WOptionCompanionCtor =
p.node.asInstanceOf[WOptionCompanionCtor]
lazy val RWOption: MutableLazy[WOptionCompanionCtor] = MutableLazy(new WOptionCompanionCtor {
private val thisClass = classOf[WOptionCompanion]
})
// manual fix: ViewWOption
object WOptionMethods {
object fold {
def unapply(d: Def[_]): Nullable[(Ref[WOption[A]], Ref[Thunk[B]], Ref[A => B]) forSome {type A; type B}] = d match {
case MethodCall(receiver, method, args, _) if method.getName == "fold" && receiver.elem.isInstanceOf[WOptionElem[_, _]] =>
val res = (receiver, args(0), args(1))
Nullable(res).asInstanceOf[Nullable[(Ref[WOption[A]], Ref[Thunk[B]], Ref[A => B]) forSome {type A; type B}]]
case _ => Nullable.None
}
def unapply(exp: Sym): Nullable[(Ref[WOption[A]], Ref[Thunk[B]], Ref[A => B]) forSome {type A; type B}] = unapply(exp.node)
}
object isEmpty {
def unapply(d: Def[_]): Nullable[Ref[WOption[A]] forSome {type A}] = d match {
case MethodCall(receiver, method, _, _) if method.getName == "isEmpty" && receiver.elem.isInstanceOf[WOptionElem[_, _]] =>
val res = receiver
Nullable(res).asInstanceOf[Nullable[Ref[WOption[A]] forSome {type A}]]
case _ => Nullable.None
}
def unapply(exp: Sym): Nullable[Ref[WOption[A]] forSome {type A}] = unapply(exp.node)
}
object isDefined {
def unapply(d: Def[_]): Nullable[Ref[WOption[A]] forSome {type A}] = d match {
case MethodCall(receiver, method, _, _) if method.getName == "isDefined" && receiver.elem.isInstanceOf[WOptionElem[_, _]] =>
val res = receiver
Nullable(res).asInstanceOf[Nullable[Ref[WOption[A]] forSome {type A}]]
case _ => Nullable.None
}
def unapply(exp: Sym): Nullable[Ref[WOption[A]] forSome {type A}] = unapply(exp.node)
}
object filter {
def unapply(d: Def[_]): Nullable[(Ref[WOption[A]], Ref[A => Boolean]) forSome {type A}] = d match {
case MethodCall(receiver, method, args, _) if method.getName == "filter" && receiver.elem.isInstanceOf[WOptionElem[_, _]] =>
val res = (receiver, args(0))
Nullable(res).asInstanceOf[Nullable[(Ref[WOption[A]], Ref[A => Boolean]) forSome {type A}]]
case _ => Nullable.None
}
def unapply(exp: Sym): Nullable[(Ref[WOption[A]], Ref[A => Boolean]) forSome {type A}] = unapply(exp.node)
}
object flatMap {
def unapply(d: Def[_]): Nullable[(Ref[WOption[A]], Ref[A => WOption[B]]) forSome {type A; type B}] = d match {
case MethodCall(receiver, method, args, _) if method.getName == "flatMap" && receiver.elem.isInstanceOf[WOptionElem[_, _]] =>
val res = (receiver, args(0))
Nullable(res).asInstanceOf[Nullable[(Ref[WOption[A]], Ref[A => WOption[B]]) forSome {type A; type B}]]
case _ => Nullable.None
}
def unapply(exp: Sym): Nullable[(Ref[WOption[A]], Ref[A => WOption[B]]) forSome {type A; type B}] = unapply(exp.node)
}
object map {
def unapply(d: Def[_]): Nullable[(Ref[WOption[A]], Ref[A => B]) forSome {type A; type B}] = d match {
case MethodCall(receiver, method, args, _) if method.getName == "map" && receiver.elem.isInstanceOf[WOptionElem[_, _]] =>
val res = (receiver, args(0))
Nullable(res).asInstanceOf[Nullable[(Ref[WOption[A]], Ref[A => B]) forSome {type A; type B}]]
case _ => Nullable.None
}
def unapply(exp: Sym): Nullable[(Ref[WOption[A]], Ref[A => B]) forSome {type A; type B}] = unapply(exp.node)
}
object getOrElse {
def unapply(d: Def[_]): Nullable[(Ref[WOption[A]], Ref[Thunk[B]]) forSome {type A; type B}] = d match {
case MethodCall(receiver, method, args, _) if method.getName == "getOrElse" && receiver.elem.isInstanceOf[WOptionElem[_, _]] =>
val res = (receiver, args(0))
Nullable(res).asInstanceOf[Nullable[(Ref[WOption[A]], Ref[Thunk[B]]) forSome {type A; type B}]]
case _ => Nullable.None
}
def unapply(exp: Sym): Nullable[(Ref[WOption[A]], Ref[Thunk[B]]) forSome {type A; type B}] = unapply(exp.node)
}
object get {
def unapply(d: Def[_]): Nullable[Ref[WOption[A]] forSome {type A}] = d match {
case MethodCall(receiver, method, _, _) if method.getName == "get" && receiver.elem.isInstanceOf[WOptionElem[_, _]] =>
val res = receiver
Nullable(res).asInstanceOf[Nullable[Ref[WOption[A]] forSome {type A}]]
case _ => Nullable.None
}
def unapply(exp: Sym): Nullable[Ref[WOption[A]] forSome {type A}] = unapply(exp.node)
}
}
object WOptionCompanionMethods {
}
} // of object WOption
registerEntityObject("WOption", WOption)
// manual fix: UserTypeWOption removed
// manual fix: unapplyViews removed
// manual fix: RepWOption removed
// manual fix: rewriteDef removed
registerModule(WOptionsModule)
}
object WOptionsModule extends scalan.ModuleInfo("wrappers.scala", "WOptions")
}
trait WOptionsModule extends wrappers.scala.impl.WOptionsDefs {self: WrappersModule =>}
| ScorexFoundation/sigmastate-interpreter | library/src/main/scala/wrappers/scala/impl/WOptionsImpl.scala | Scala | mit | 13,372 |
object Example extends App {
if (args.length > 1)
println("Say: " + args(0) + " " + args(1))
else if (args.length > 0)
println("Say: " + args(0))
else
println("Say nothing")
}
| Jade-Shan/Jade-XMPP | src/main/scala/Example.scala | Scala | gpl-3.0 | 216 |
package mesosphere.marathon
package api.akkahttp.v2
import java.net.InetAddress
import akka.event.EventStream
import akka.http.scaladsl.model.Uri.{ Path, Query }
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.headers.{ Location, `Remote-Address` }
import mesosphere.{ UnitTest, ValidationTestLike }
import akka.http.scaladsl.testkit.ScalatestRouteTest
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import mesosphere.marathon.api.TestAuthFixture
import mesosphere.marathon.api.akkahttp.EntityMarshallers.ValidationFailed
import mesosphere.marathon.api.akkahttp.{ Headers, Rejections }
import mesosphere.marathon.api.akkahttp.Rejections.{ EntityNotFound, Message }
import mesosphere.marathon.api.v2.validation.NetworkValidationMessages
import mesosphere.marathon.core.appinfo.PodStatusService
import mesosphere.marathon.core.deployment.DeploymentPlan
import mesosphere.marathon.core.election.ElectionService
import mesosphere.marathon.core.group.GroupManager
import mesosphere.marathon.core.plugin.PluginManager
import mesosphere.marathon.core.pod.{ PodDefinition, PodManager }
import mesosphere.marathon.state.PathId
import mesosphere.marathon.core.pod.PodManager
import mesosphere.marathon.raml.{ FixedPodScalingPolicy, LocalVolumeId, PersistentVolumeInfo, PersistentVolumeType, PodPersistentVolume, VolumeMount }
import mesosphere.marathon.state.PathId
import mesosphere.marathon.test.SettableClock
import mesosphere.marathon.util.SemanticVersion
import play.api.libs.json._
import play.api.libs.json.Json
import scala.concurrent.Future
import scala.concurrent.duration._
class PodsControllerTest extends UnitTest with ScalatestRouteTest with RouteBehaviours with ValidationTestLike with ResponseMatchers {
"PodsController" should {
"support pods" in {
val controller = Fixture().controller()
Head(Uri./) ~> controller.route ~> check {
response.status should be(StatusCodes.OK)
responseAs[String] shouldBe empty
}
}
// Unauthenticated access test cases
{
val controller = Fixture(authenticated = false).controller()
behave like unauthenticatedRoute(forRoute = controller.route, withRequest = Head(Uri./))
behave like unauthenticatedRoute(forRoute = controller.route, withRequest = Post(Uri./))
behave like unauthenticatedRoute(forRoute = controller.route, withRequest = Get("/::status"))
behave like unauthenticatedRoute(forRoute = controller.route, withRequest = Delete("/mypod"))
behave like unauthenticatedRoute(forRoute = controller.route, withRequest = Get("/mypod"))
behave like unauthenticatedRoute(forRoute = controller.route, withRequest = Get("/mypod::status"))
}
// Unauthorized access test cases
{
val f = Fixture(authorized = false)
val controller = f.controller()
val podSpecJson = """
| { "id": "/mypod", "networks": [ { "mode": "host" } ], "containers": [
| { "name": "webapp",
| "resources": { "cpus": 0.03, "mem": 64 },
| "image": { "kind": "DOCKER", "id": "busybox" },
| "exec": { "command": { "shell": "sleep 1" } } } ] }
""".stripMargin
val entity = HttpEntity(podSpecJson).withContentType(ContentTypes.`application/json`)
val request = Post(Uri./.withQuery(Query("force" -> "false")))
.withEntity(entity)
.withHeaders(`Remote-Address`(RemoteAddress(InetAddress.getByName("192.168.3.12"))))
val podDefinition = PodDefinition(id = PathId("mypod"))
f.podManager.find(any).returns(Some(podDefinition))
behave like unauthorizedRoute(forRoute = controller.route, withRequest = request)
behave like unauthorizedRoute(forRoute = controller.route, withRequest = Delete("/mypod"))
behave like unauthorizedRoute(forRoute = controller.route, withRequest = Get("/mypod"))
behave like unauthorizedRoute(forRoute = controller.route, withRequest = Get("/mypod::status"))
}
// Entity not found test cases
{
val f = Fixture()
val controller = f.controller()
val mypodId = PathId("mypod")
val podDefinition = PodDefinition(id = mypodId)
f.podManager.find(eq(PathId("unknown-pod"))).returns(None)
f.podManager.find(eq(mypodId)).returns(Some(podDefinition))
f.podManager.version(any, any).returns(Future.successful(None))
def unknownPod(withRequest: HttpRequest, withMessage: String) = unknownEntity(controller.route, withRequest, withMessage)
behave like unknownPod(withRequest = Delete("/unknown-pod"), withMessage = "Pod 'unknown-pod' does not exist")
behave like unknownPod(withRequest = Get("/unknown-pod"), withMessage = "Pod 'unknown-pod' does not exist")
behave like unknownPod(withRequest = Get("/unknown-pod::versions"), withMessage = "Pod 'unknown-pod' does not exist")
behave like unknownPod(withRequest = Get("/mypod::versions/2015-04-09T12:30:00.000Z"), withMessage = "Pod 'mypod' does not exist in version 2015-04-09T12:30:00.000Z")
behave like unknownPod(withRequest = Get("/mypod::versions/unparsable-datetime"), withMessage = "Pod 'mypod' does not exist in version unparsable-datetime")
}
"be able to create a simple single-container pod from docker image w/ shell command" in {
val f = Fixture(configArgs = Seq("--default_network_name", "blah")) // should not be injected into host network spec
val controller = f.controller()
val deploymentPlan = DeploymentPlan.empty
f.podManager.create(any, eq(false)).returns(Future.successful(deploymentPlan))
val podSpecJson = """
| { "id": "/mypod", "networks": [ { "mode": "host" } ], "containers": [
| { "name": "webapp",
| "resources": { "cpus": 0.03, "mem": 64 },
| "image": { "kind": "DOCKER", "id": "busybox" },
| "exec": { "command": { "shell": "sleep 1" } } } ] }
""".stripMargin
val entity = HttpEntity(podSpecJson).withContentType(ContentTypes.`application/json`)
val request = Post(Uri./.withQuery(Query("force" -> "false")))
.withEntity(entity)
.withHeaders(`Remote-Address`(RemoteAddress(InetAddress.getByName("192.168.3.12"))))
request ~> controller.route ~> check {
response.status should be(StatusCodes.Created)
response.header[Headers.`Marathon-Deployment-Id`].value.value() should be(deploymentPlan.id)
response.header[Location].value.value() should be("/mypod")
val jsonResponse: JsValue = Json.parse(responseAs[String])
jsonResponse should have (
executorResources(cpus = 0.1, mem = 32.0, disk = 10.0),
noDefinedNetworkname,
networkMode(raml.NetworkMode.Host)
)
}
}
"be able to create a simple single-container pod with bridge network" in {
val f = Fixture(configArgs = Seq("--default_network_name", "blah"))
val controller = f.controller()
val deploymentPlan = DeploymentPlan.empty
f.podManager.create(any, eq(false)).returns(Future.successful(deploymentPlan))
val podSpecJsonWithBridgeNetwork = """
| { "id": "/mypod", "networks": [ { "mode": "container/bridge" } ], "containers": [
| { "name": "webapp",
| "resources": { "cpus": 0.03, "mem": 64 },
| "image": { "kind": "DOCKER", "id": "busybox" },
| "exec": { "command": { "shell": "sleep 1" } } } ] }
""".stripMargin
val entity = HttpEntity(podSpecJsonWithBridgeNetwork).withContentType(ContentTypes.`application/json`)
val request = Post(Uri./.withQuery(Query("force" -> "false")))
.withEntity(entity)
.withHeaders(`Remote-Address`(RemoteAddress(InetAddress.getByName("192.168.3.12"))))
request ~> controller.route ~> check {
response.status should be(StatusCodes.Created)
response.header[Headers.`Marathon-Deployment-Id`].value.value() should be(deploymentPlan.id)
response.header[Location].value.value() should be("/mypod")
val jsonResponse = Json.parse(responseAs[String])
jsonResponse should have (
executorResources (cpus = 0.1, mem = 32.0, disk = 10.0),
noDefinedNetworkname,
networkMode(raml.NetworkMode.ContainerBridge)
)
}
}
"The secrets feature is NOT enabled and create pod (that uses file base secrets) fails" in {
val f = Fixture(configArgs = Seq("--default_network_name", "blah")) // should not be injected into host network spec
val controller = f.controller()
val deploymentPlan = DeploymentPlan.empty
f.podManager.create(any, eq(false)).returns(Future.successful(deploymentPlan))
val podSpecJsonWithFileBasedSecret = """
| { "id": "/mypod", "networks": [ { "mode": "host" } ], "containers":
| [
| { "name": "webapp",
| "resources": { "cpus": 0.03, "mem": 64 },
| "image": { "kind": "DOCKER", "id": "busybox" },
| "exec": { "command": { "shell": "sleep 1" } },
| "volumeMounts": [ { "name": "vol", "mountPath": "mnt2" } ]
| }
| ],
| "volumes": [ { "name": "vol", "secret": "secret1" } ],
| "secrets": { "secret1": { "source": "/path/to/my/secret" } }
| }
""".stripMargin
val entity = HttpEntity(podSpecJsonWithFileBasedSecret).withContentType(ContentTypes.`application/json`)
val request = Post(Uri./.withQuery(Query("force" -> "false")))
.withEntity(entity)
.withHeaders(`Remote-Address`(RemoteAddress(InetAddress.getByName("192.168.3.12"))))
request ~> controller.route ~> check {
rejection shouldBe a[ValidationFailed]
inside(rejection) {
case ValidationFailed(failure) =>
failure should haveViolations("/podSecretVolumes(pod)" -> "Feature secrets is not enabled. Enable with --enable_features secrets)")
}
}
}
"The secrets feature is NOT enabled and create pod (that uses env secret refs) fails" in {
val f = Fixture(configArgs = Seq("--default_network_name", "blah")) // should not be injected into host network spec
val controller = f.controller()
val deploymentPlan = DeploymentPlan.empty
f.podManager.create(any, eq(false)).returns(Future.successful(deploymentPlan))
val podSpecJsonWithEnvRefSecret = """
| { "id": "/mypod", "networks": [ { "mode": "host" } ], "containers":
| [
| { "name": "webapp",
| "resources": { "cpus": 0.03, "mem": 64 },
| "image": { "kind": "DOCKER", "id": "busybox" },
| "exec": { "command": { "shell": "sleep 1" } }
| }
| ],
| "environment": { "vol": { "secret": "secret1" } },
| "secrets": { "secret1": { "source": "/foo" } }
| }
""".stripMargin
val entity = HttpEntity(podSpecJsonWithEnvRefSecret).withContentType(ContentTypes.`application/json`)
val request = Post(Uri./.withQuery(Query("force" -> "false")))
.withEntity(entity)
.withHeaders(`Remote-Address`(RemoteAddress(InetAddress.getByName("192.168.3.12"))))
request ~> controller.route ~> check {
rejection shouldBe a[ValidationFailed]
inside(rejection) {
case ValidationFailed(failure) =>
failure should haveViolations("/secrets" -> "Feature secrets is not enabled. Enable with --enable_features secrets)")
}
}
}
"The secrets feature is NOT enabled and create pod (that uses env secret refs on container level) fails" in {
val f = Fixture(configArgs = Seq("--default_network_name", "blah")) // should not be injected into host network spec
val controller = f.controller()
val deploymentPlan = DeploymentPlan.empty
f.podManager.create(any, eq(false)).returns(Future.successful(deploymentPlan))
val podSpecJsonWithEnvRefSecretOnContainerLevel = """
| { "id": "/mypod", "networks": [ { "mode": "host" } ], "containers":
| [
| { "name": "webapp",
| "resources": { "cpus": 0.03, "mem": 64 },
| "image": { "kind": "DOCKER", "id": "busybox" },
| "exec": { "command": { "shell": "sleep 1" } },
| "environment": { "vol": { "secret": "secret1" } }
| }
| ],
| "secrets": { "secret1": { "source": "/path/to/my/secret" } }
| }
""".stripMargin
val entity = HttpEntity(podSpecJsonWithEnvRefSecretOnContainerLevel).withContentType(ContentTypes.`application/json`)
val request = Post(Uri./.withQuery(Query("force" -> "false")))
.withEntity(entity)
.withHeaders(`Remote-Address`(RemoteAddress(InetAddress.getByName("192.168.3.12"))))
request ~> controller.route ~> check {
rejection shouldBe a[ValidationFailed]
inside(rejection) {
case ValidationFailed(failure) =>
failure should haveViolations("/secrets" -> "Feature secrets is not enabled. Enable with --enable_features secrets)")
}
}
}
"The secrets feature is enabled and create pod (that uses env secret refs on container level) succeeds" in {
val f = Fixture(configArgs = Seq("--default_network_name", "blah", "--enable_features", Features.SECRETS)) // should not be injected into host network spec
val controller = f.controller()
val deploymentPlan = DeploymentPlan.empty
f.podManager.create(any, eq(false)).returns(Future.successful(deploymentPlan))
val podSpecJsonWithEnvRefSecretOnContainerLevel = """
| { "id": "/mypod", "networks": [ { "mode": "host" } ], "containers":
| [
| { "name": "webapp",
| "resources": { "cpus": 0.03, "mem": 64 },
| "image": { "kind": "DOCKER", "id": "busybox" },
| "exec": { "command": { "shell": "sleep 1" } },
| "environment": { "vol": { "secret": "secret1" } }
| }
| ],
| "secrets": { "secret1": { "source": "/path/to/my/secret" } }
| }
""".stripMargin
val entity = HttpEntity(podSpecJsonWithEnvRefSecretOnContainerLevel).withContentType(ContentTypes.`application/json`)
val request = Post(Uri./.withQuery(Query("force" -> "false")))
.withEntity(entity)
.withHeaders(`Remote-Address`(RemoteAddress(InetAddress.getByName("192.168.3.12"))))
request ~> controller.route ~> check {
response.status should be(StatusCodes.Created)
val jsonResponse = Json.parse(responseAs[String])
jsonResponse should have (podContainerWithEnvSecret("secret1"))
}
}
"The secrets feature is enabled and create pod (that uses file based secrets) succeeds" in {
val f = Fixture(configArgs = Seq("--default_network_name", "blah", "--enable_features", Features.SECRETS)) // should not be injected into host network spec
val controller = f.controller()
val deploymentPlan = DeploymentPlan.empty
f.podManager.create(any, eq(false)).returns(Future.successful(deploymentPlan))
val podSpecJsonWithFileBasedSecret = """
| { "id": "/mypod", "networks": [ { "mode": "host" } ], "containers":
| [
| { "name": "webapp",
| "resources": { "cpus": 0.03, "mem": 64 },
| "image": { "kind": "DOCKER", "id": "busybox" },
| "exec": { "command": { "shell": "sleep 1" } },
| "volumeMounts": [ { "name": "vol", "mountPath": "mnt2" } ]
| }
| ],
| "volumes": [ { "name": "vol", "secret": "secret1" } ],
| "secrets": { "secret1": { "source": "/path/to/my/secret" } }
| }
""".stripMargin
val entity = HttpEntity(podSpecJsonWithFileBasedSecret).withContentType(ContentTypes.`application/json`)
val request = Post(Uri./.withQuery(Query("force" -> "false")))
.withEntity(entity)
.withHeaders(`Remote-Address`(RemoteAddress(InetAddress.getByName("192.168.3.12"))))
request ~> controller.route ~> check {
response.status should be(StatusCodes.Created)
val jsonResponse = Json.parse(responseAs[String])
jsonResponse should have (podWithFileBasedSecret ("secret1"))
}
}
"create a pod w/ container networking" in {
val f = Fixture(configArgs = Seq("--default_network_name", "blah")) // required since network name is missing from JSON
val controller = f.controller()
val deploymentPlan = DeploymentPlan.empty
f.podManager.create(any, eq(false)).returns(Future.successful(deploymentPlan))
val podSpecJsonWithContainerNetworking = """
| { "id": "/mypod", "networks": [ { "mode": "container" } ], "containers": [
| { "name": "webapp",
| "resources": { "cpus": 0.03, "mem": 64 },
| "image": { "kind": "DOCKER", "id": "busybox" },
| "exec": { "command": { "shell": "sleep 1" } } } ] }
""".stripMargin
val entity = HttpEntity(podSpecJsonWithContainerNetworking).withContentType(ContentTypes.`application/json`)
val request = Post(Uri./.withQuery(Query("force" -> "false")))
.withEntity(entity)
.withHeaders(`Remote-Address`(RemoteAddress(InetAddress.getByName("192.168.3.12"))))
request ~> controller.route ~> check {
response.status should be(StatusCodes.Created)
response.header[Headers.`Marathon-Deployment-Id`].value.value() should be(deploymentPlan.id)
response.header[Location].value.value() should be("/mypod")
val jsonResponse = Json.parse(responseAs[String])
jsonResponse should have(
executorResources(cpus = 0.1, mem = 32.0, disk = 10.0),
definedNetworkName("blah"),
networkMode(raml.NetworkMode.Container)
)
}
}
"create a pod w/ container networking w/o default network name" in {
val f = Fixture()
val controller = f.controller()
val deploymentPlan = DeploymentPlan.empty
f.podManager.create(any, eq(false)).returns(Future.successful(deploymentPlan))
val podSpecJsonWithContainerNetworking = """
| { "id": "/mypod", "networks": [ { "mode": "container" } ], "containers": [
| { "name": "webapp",
| "resources": { "cpus": 0.03, "mem": 64 },
| "image": { "kind": "DOCKER", "id": "busybox" },
| "exec": { "command": { "shell": "sleep 1" } } } ] }
""".stripMargin
val entity = HttpEntity(podSpecJsonWithContainerNetworking).withContentType(ContentTypes.`application/json`)
val request = Post(Uri./.withQuery(Query("force" -> "false")))
.withEntity(entity)
.withHeaders(`Remote-Address`(RemoteAddress(InetAddress.getByName("192.168.3.12"))))
request ~> controller.route ~> check {
rejection shouldBe a[ValidationFailed]
inside(rejection) {
case ValidationFailed(failure) =>
failure should haveViolations("/networks" -> NetworkValidationMessages.NetworkNameMustBeSpecified)
}
}
}
"create a pod with custom executor resource declaration" in {
val f = Fixture()
val controller = f.controller()
val deploymentPlan = DeploymentPlan.empty
f.podManager.create(any, eq(false)).returns(Future.successful(deploymentPlan))
val podSpecJsonWithExecutorResources = """
| { "id": "/mypod", "networks": [ { "mode": "host" } ], "containers": [
| { "name": "webapp",
| "resources": { "cpus": 0.03, "mem": 64 },
| "image": { "kind": "DOCKER", "id": "busybox" },
| "exec": { "command": { "shell": "sleep 1" } } } ],
| "executorResources": { "cpus": 100, "mem": 100 } }
""".stripMargin
val entity = HttpEntity(podSpecJsonWithExecutorResources).withContentType(ContentTypes.`application/json`)
val request = Post(Uri./.withQuery(Query("force" -> "false")))
.withEntity(entity)
.withHeaders(`Remote-Address`(RemoteAddress(InetAddress.getByName("192.168.3.12"))))
request ~> controller.route ~> check {
response.status should be(StatusCodes.Created)
response.header[Headers.`Marathon-Deployment-Id`].value.value() should be(deploymentPlan.id)
response.header[Location].value.value() should be("/mypod")
val jsonResponse = Json.parse(responseAs[String])
jsonResponse should have(executorResources(cpus = 100.0, mem = 100.0, disk = 10.0))
}
}
"create a pod with a persistent volume" in {
val f = Fixture()
val controller = f.controller()
val deploymentPlan = DeploymentPlan.empty
f.podManager.create(any, eq(false)).returns(Future.successful(deploymentPlan))
val podSpecJsonWithPersistentVolume =
"""
| { "id": "/mypod",
| "containers": [ {
| "name": "dataapp",
| "resources": { "cpus": 0.03, "mem": 64 },
| "image": { "kind": "DOCKER", "id": "busybox" },
| "exec": { "command": { "shell": "sleep 1" } },
| "volumeMounts": [ { "name": "pst", "mountPath": "pst1", "readOnly": false } ]
| } ],
| "volumes": [ {
| "name": "pst",
| "persistent": { "type": "root", "size": 10 }
| } ] }
""".stripMargin
val entity = HttpEntity(podSpecJsonWithPersistentVolume).withContentType(ContentTypes.`application/json`)
val request = Post(Uri./.withQuery(Query("force" -> "false")))
.withEntity(entity)
.withHeaders(`Remote-Address`(RemoteAddress(InetAddress.getByName("192.168.3.12"))))
request ~> controller.route ~> check {
response.status should be(StatusCodes.Created)
response.header[Headers.`Marathon-Deployment-Id`].value.value() should be(deploymentPlan.id)
response.header[Location].value.value() should be("/mypod")
val jsonResponse = Json.parse(responseAs[String])
val volumeInfo = PersistentVolumeInfo(`type` = Some(PersistentVolumeType.Root), size = 10)
val volume = PodPersistentVolume(name = "pst", persistent = volumeInfo)
val volumeMount = VolumeMount(name = "pst", mountPath = "pst1", readOnly = Some(false))
jsonResponse should have(
podVolume(0, volume),
podVolumeMount(0, 0, volumeMount))
}
}
"delete a pod" in {
val f = Fixture()
val controller = f.controller()
val pod = PodDefinition(id = PathId("mypod"))
f.podManager.find(eq(PathId("mypod"))).returns(Some(pod))
val plan = DeploymentPlan.empty
f.podManager.delete(any, eq(false)).returns(Future.successful(plan))
Delete("/mypod") ~> controller.route ~> check {
response.status should be(StatusCodes.Accepted)
response.header[Headers.`Marathon-Deployment-Id`].value.value() should be(plan.id)
}
}
"respond with a pod for a lookup" in {
val f = Fixture()
val controller = f.controller()
val podDefinition = PodDefinition(id = PathId("mypod"))
f.podManager.find(eq(PathId("mypod"))).returns(Some(podDefinition))
Get("/mypod") ~> controller.route ~> check {
response.status should be(StatusCodes.OK)
val jsonResponse = Json.parse(responseAs[String])
jsonResponse should have(podId("mypod"))
}
}
"respond with status for a pod" in {
val f = Fixture()
val controller = f.controller()
val pod = PodDefinition(id = PathId("an-awesome-group/mypod"))
f.podManager.find(eq(PathId("an-awesome-group/mypod"))).returns(Some(pod))
val podStatus = raml.PodStatus(
id = "an-awesome-group/mypod",
spec = raml.Pod(id = "an-awesome-group/mypod", containers = Seq.empty),
status = raml.PodState.Stable,
statusSince = f.clock.now().toOffsetDateTime,
lastUpdated = f.clock.now().toOffsetDateTime,
lastChanged = f.clock.now().toOffsetDateTime
)
f.podStatusService.selectPodStatus(eq(PathId("an-awesome-group/mypod")), any).returns(Future.successful(Some(podStatus)))
Get("/an-awesome-group/mypod::status") ~> controller.route ~> check {
response.status should be(StatusCodes.OK)
val jsonResponse = Json.parse(responseAs[String])
jsonResponse should have(
podId("an-awesome-group/mypod"),
podState(raml.PodState.Stable)
)
}
}
"respond with all pods" in {
val f = Fixture()
val controller = f.controller()
val podDefinitions = Seq(PodDefinition(id = PathId("mypod")), PodDefinition(id = PathId("another_pod")))
f.podManager.findAll(any).returns(podDefinitions)
Get(Uri./) ~> controller.route ~> check {
response.status should be(StatusCodes.OK)
val jsonResponse = Json.parse(responseAs[String])
jsonResponse shouldBe a[JsArray]
(jsonResponse \\ 0).get should have(podId("mypod"))
(jsonResponse \\ 1).get should have(podId("another_pod"))
}
}
"response with statuses for all pods" in {
val f = Fixture()
val controller = f.controller()
val podStatus0 = raml.PodStatus(
id = "mypod",
spec = raml.Pod(id = "mypod", containers = Seq.empty),
status = raml.PodState.Stable,
statusSince = f.clock.now().toOffsetDateTime,
lastUpdated = f.clock.now().toOffsetDateTime,
lastChanged = f.clock.now().toOffsetDateTime
)
f.podStatusService.selectPodStatus(eq(PathId("mypod")), any).returns(Future.successful(Some(podStatus0)))
val podStatus1 = raml.PodStatus(
id = "another-pod",
spec = raml.Pod(id = "another-pod", containers = Seq.empty),
status = raml.PodState.Degraded,
statusSince = f.clock.now().toOffsetDateTime,
lastUpdated = f.clock.now().toOffsetDateTime,
lastChanged = f.clock.now().toOffsetDateTime
)
f.podStatusService.selectPodStatus(eq(PathId("another-pod")), any).returns(Future.successful(Some(podStatus1)))
f.podManager.ids().returns(Set(PathId("mypod"), PathId("another-pod")))
Get("/::status") ~> controller.route ~> check {
response.status should be(StatusCodes.OK)
val jsonResponse = Json.parse(responseAs[String])
(jsonResponse \\ 0).get should have(
podId("mypod"),
podState(raml.PodState.Stable)
)
(jsonResponse \\ 1).get should have(
podId("another-pod"),
podState(raml.PodState.Degraded)
)
}
}
"respond with all available versions" in {
val f = Fixture()
val controller = f.controller()
val pod = PodDefinition(id = PathId("mypod"))
f.podManager.find(eq(PathId("mypod"))).returns(Some(pod))
val versions = Seq(f.clock.now(), f.clock.now() + 1.minute)
f.podManager.versions(eq(PathId("mypod"))).returns(Source(versions))
Get("/mypod::versions") ~> controller.route ~> check {
response.status should be(StatusCodes.OK)
val jsonResponse = Json.parse(responseAs[String])
(jsonResponse \\ 0).get.asOpt[String] should be(Some("2015-04-09T12:30:00.000Z"))
(jsonResponse \\ 1).get.asOpt[String] should be(Some("2015-04-09T12:31:00.000Z"))
}
}
"respond with a specific version" in {
val f = Fixture()
val controller = f.controller()
val pod = PodDefinition(id = PathId("mypod"))
val version = f.clock.now()
f.podManager.version(eq(PathId("mypod")), eq(version)).returns(Future.successful(Some(pod)))
Get("/mypod::versions/2015-04-09T12:30:00.000Z") ~> controller.route ~> check {
response.status should be(StatusCodes.OK)
val jsonResponse = Json.parse(responseAs[String])
jsonResponse should have(podId("mypod"))
}
}
"update a simple single-container pod from docker image w/ shell command" in {
implicit val podSystem = mock[PodManager]
val f = Fixture()
val controller = f.controller()
val deploymentPlan = DeploymentPlan.empty
f.podManager.update(any, eq(false)).returns(Future.successful(deploymentPlan))
val postJson = """
| { "id": "/mypod", "networks": [ { "mode": "host" } ], "containers": [
| { "name": "webapp",
| "resources": { "cpus": 0.03, "mem": 64 },
| "image": { "kind": "DOCKER", "id": "busybox" },
| "exec": { "command": { "shell": "sleep 1" } } } ] }
""".stripMargin
val entity = HttpEntity(postJson).withContentType(ContentTypes.`application/json`)
val request = Put("/mypod")
.withEntity(entity)
.withHeaders(`Remote-Address`(RemoteAddress(InetAddress.getByName("192.168.3.12"))))
request ~> controller.route ~> check {
response.status should be(StatusCodes.OK)
response.header[Headers.`Marathon-Deployment-Id`].value.value() should be(deploymentPlan.id)
val jsonResponse = Json.parse(responseAs[String])
jsonResponse should have(
podId("/mypod"),
executorResources(cpus = 0.1, mem = 32.0, disk = 10.0),
noDefinedNetworkname,
networkMode(raml.NetworkMode.Host)
)
}
}
"do not update if we have concurrent change error" in {
implicit val podSystem = mock[PodManager]
val f = Fixture()
val controller = f.controller()
val podId = PathId("/unknownpod")
f.podManager.update(any, eq(false)).returns(Future.failed(ConflictingChangeException("pod is already there")))
val postJson = """
| { "id": "/unknownpod", "networks": [ { "mode": "host" } ], "containers": [
| { "name": "webapp",
| "resources": { "cpus": 0.03, "mem": 64 },
| "image": { "kind": "DOCKER", "id": "busybox" },
| "exec": { "command": { "shell": "sleep 1" } } } ] }
""".stripMargin
val entity = HttpEntity(postJson).withContentType(ContentTypes.`application/json`)
val request = Put(podId.toString)
.withEntity(entity)
.withHeaders(`Remote-Address`(RemoteAddress(InetAddress.getByName("192.168.3.12"))))
request ~> controller.route ~> check {
rejection shouldBe a[Rejections.ConflictingChange]
inside(rejection) {
case r: Rejections.ConflictingChange => r.message.message shouldEqual "pod is already there"
}
}
}
"save pod with more than one instance" in {
val f = Fixture()
val controller = f.controller()
val deploymentPlan = DeploymentPlan.empty
f.podManager.update(any, eq(false)).returns(Future.successful(deploymentPlan))
val postJson = """
| { "id": "/mypod", "networks": [ { "mode": "host" } ],
| "scaling": { "kind": "fixed", "instances": 2 }, "containers": [
| { "name": "webapp",
| "resources": { "cpus": 0.03, "mem": 64 },
| "exec": { "command": { "shell": "sleep 1" } } } ] }
""".stripMargin
val entity = HttpEntity(postJson).withContentType(ContentTypes.`application/json`)
val request = Put("/mypod")
.withEntity(entity)
.withHeaders(`Remote-Address`(RemoteAddress(InetAddress.getByName("192.168.3.12"))))
request ~> controller.route ~> check {
response.status should be(StatusCodes.OK)
response.header[Headers.`Marathon-Deployment-Id`].value.value() should be(deploymentPlan.id)
val jsonResponse = Json.parse(responseAs[String])
jsonResponse should have(
scalingPolicyInstances(2)
)
}
}
}
case class Fixture(
configArgs: Seq[String] = Seq.empty[String],
authenticated: Boolean = true,
authorized: Boolean = true,
isLeader: Boolean = true) {
val config = AllConf.withTestConfig(configArgs: _*)
val clock = new SettableClock
val auth = new TestAuthFixture()
auth.authenticated = authenticated
auth.authorized = authorized
val electionService = mock[ElectionService]
val groupManager = mock[GroupManager]
val podManager = mock[PodManager]
val podStatusService = mock[PodStatusService]
val pluginManager = PluginManager.None
val eventBus = mock[EventStream]
val scheduler = mock[MarathonScheduler]
electionService.isLeader returns (isLeader)
scheduler.mesosMasterVersion() returns Some(SemanticVersion(0, 0, 0))
implicit val authenticator = auth.auth
implicit val mat: Materializer = mock[Materializer]
def controller() = new PodsController(config, electionService, podManager, podStatusService, groupManager, pluginManager, eventBus, scheduler, clock)
}
}
| janisz/marathon | src/test/scala/mesosphere/marathon/api/akkahttp/v2/PodsControllerTest.scala | Scala | apache-2.0 | 36,850 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import com.codahale.metrics.{Gauge,MetricRegistry}
import org.apache.spark.SparkContext
import org.apache.spark.metrics.source.Source
private[spark] class DAGSchedulerSource(val dagScheduler: DAGScheduler)
extends Source {
override val metricRegistry = new MetricRegistry()
override val sourceName = "DAGScheduler"
metricRegistry.register(MetricRegistry.name("stage", "failedStages"), new Gauge[Int] {
override def getValue: Int = dagScheduler.failedStages.size
})
metricRegistry.register(MetricRegistry.name("stage", "runningStages"), new Gauge[Int] {
override def getValue: Int = dagScheduler.runningStages.size
})
metricRegistry.register(MetricRegistry.name("stage", "waitingStages"), new Gauge[Int] {
override def getValue: Int = dagScheduler.waitingStages.size
})
metricRegistry.register(MetricRegistry.name("job", "allJobs"), new Gauge[Int] {
override def getValue: Int = dagScheduler.numTotalJobs
})
metricRegistry.register(MetricRegistry.name("job", "activeJobs"), new Gauge[Int] {
override def getValue: Int = dagScheduler.activeJobs.size
})
}
| Dax1n/spark-core | core/src/main/scala/org/apache/spark/scheduler/DAGSchedulerSource.scala | Scala | apache-2.0 | 1,946 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.convert.scripting
import java.io.File
import com.typesafe.config.ConfigFactory
import org.junit.runner.RunWith
import org.locationtech.geomesa.convert.{DefaultCounter, EvaluationContextImpl, SimpleFeatureConverters}
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ScriptingFunctionFactoryTest extends Specification {
sequential
val srctestresourcesdir = new File(ClassLoader.getSystemResource("geomesa-convert-scripts/hello.js").toURI)
val parent = srctestresourcesdir.getParentFile.getParentFile.getParentFile.getParent
val staticPaths = Seq(
s"$parent/src/test/static", // directory
s"$parent/src/test/static2", // directory that doesn't exist
s"$parent/src/test/static3/whatsup.js", // file that exists
s"$parent/src/test/static3/random.js" // file that doesnt exists
)
val path = staticPaths.mkString(":")
System.setProperty("geomesa.convert.scripts.path", path)
"ScriptingFunctionFactory " should {
val sff = new ScriptingFunctionFactory
"load functions" >> {
sff.functions.flatMap(_.names) must contain("js:hello")
}
"execute functions" >> {
implicit val ec = new EvaluationContextImpl(IndexedSeq.empty[String], Array.empty[Any], new DefaultCounter, Map.empty)
val fn = sff.functions.find(_.names.contains("js:hello")).head
val res = fn.eval(Array("geomesa"))
res must beEqualTo("hello: geomesa")
}
"work in a transformer" >> {
val data =
"""
|1,hello,45.0,45.0
|2,world,90.0,90.0
|willfail,hello
""".stripMargin
val conf = ConfigFactory.parseString(
"""
| {
| type = "delimited-text",
| format = "DEFAULT",
| id-field = "md5(string2bytes($0))",
| fields = [
| { name = "oneup", transform = "$1::string" },
| { name = "phrase", transform = "js:hello($2)" },
| { name = "gbye", transform = "js:gbye($2)" },
| { name = "whatsup", transform = "js:whatsup($2)" },
| { name = "lat", transform = "$3::double" },
| { name = "lon", transform = "$4::double" },
| { name = "lit", transform = "'hello'" },
| { name = "geom", transform = "point($lat, $lon)" }
| { name = "l1", transform = "concat($lit, $lit)" }
| { name = "l2", transform = "concat($l1, $lit)" }
| { name = "l3", transform = "concat($l2, $lit)" }
| ]
| }
""".stripMargin)
val sft = SimpleFeatureTypes.createType(
ConfigFactory.parseString(
"""
|{
| type-name = "testsft"
| attributes = [
| { name = "oneup", type = "String", index = false },
| { name = "phrase", type = "String", index = false },
| { name = "gbye", type = "String", index = false },
| { name = "whatsup", type = "String", index = false },
| { name = "lineNr", type = "Int", index = false },
| { name = "fn", type = "String", index = false },
| { name = "lat", type = "Double", index = false },
| { name = "lon", type = "Double", index = false },
| { name = "lit", type = "String", index = false },
| { name = "geom", type = "Point", index = true, srid = 4326, default = true }
| ]
|}
""".stripMargin
))
val converter = SimpleFeatureConverters.build[String](sft, conf)
val res = converter.processInput(data.split("\n").toIterator.filterNot( s => "^\\s*$".r.findFirstIn(s).isDefined)).toList
converter.close()
"and process some data" >> {
res.size must be equalTo 2
res(0).getAttribute("phrase").asInstanceOf[String] must be equalTo "hello: hello"
res(0).getAttribute("gbye").asInstanceOf[String] must be equalTo "goodbye: hello"
res(0).getAttribute("whatsup").asInstanceOf[String] must be equalTo "whatsup: hello"
res(1).getAttribute("phrase").asInstanceOf[String] must be equalTo "hello: world"
}
}
}
}
| jahhulbert-ccri/geomesa | geomesa-convert/geomesa-convert-scripting/src/test/scala/org/locationtech/geomesa/convert/scripting/ScriptingFunctionFactoryTest.scala | Scala | apache-2.0 | 4,913 |
object Test {
// @annotation.tailrec
def lazyFilter[E](s: Stream[E], p: E => Boolean): Stream[E] = s match {
case h #:: t => if (p(h)) h #:: lazyFilter(t, p) else lazyFilter(t, p)
}
}
| loskutov/intellij-scala | testdata/scalacTests/pos/t4649.scala | Scala | apache-2.0 | 194 |
/*
* Applied Processing
* Copyright 2014 yueh
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package de.orod.minecraft
package appliedprocessing
package reference
object Names {
final val BLOCK_SKY_WOOD = "BlockSkyWood"
final val BLOCK_SKY_LEAVES = "BlockSkyLeaves"
final val BLOCK_SKY_PLANK = "BlockSkyWoodPlanks"
final val ITEM_SKY_WOODDUST = "SkyWoodDust"
final val BLOCK_SKY_FURNACE = "BlockSkyFurnace"
} | yueh/AppliedProcessing | src/main/scala/de/orod/minecraft/appliedprocessing/reference/Names.scala | Scala | gpl-2.0 | 1,054 |
/*
Copyright 2009 David Hall, Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scalanlp.util;
import org.scalatest._;
import org.scalatest.junit._;
import org.scalatest.prop._;
import org.junit.runner.RunWith;
@RunWith(classOf[JUnitRunner])
class TopKTest extends FunSuite with Checkers {
test("Check that we always get the top elements: doubles") {
check {
(values : List[Double], k : Int) => (k < 0) || (k > 30) || {
val topk = TopK(k, values.iterator);
topk.size == math.min(k, values.size) &&
(topk.iterator zip topk.iterator.drop(1)).forall(tup => (tup._1 >= tup._2)) &&
(topk.isEmpty || { val set = topk.toSet;
val last = topk.min;
values.forall(n => n < last || set.contains(n)) })
}
};
}
test("Check that we always get the top elements: ints") {
check {
(values : List[Int], k : Int) => (k < 0) || (k > 30) || {
val topk = TopK(k, values.iterator);
topk.size == math.min(k, values.size) &&
(topk.iterator zip topk.iterator.drop(1)).forall(tup => (tup._1 >= tup._2)) &&
(topk.isEmpty || { val set = topk.toSet;
val last = topk.min;
values.forall(n => n < last || set.contains(n)) })
}
};
}
}
| MLnick/scalanlp-core | data/src/test/scala/scalanlp/util/TopKTest.scala | Scala | apache-2.0 | 1,831 |
/*
* Copyright (C) 2010 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.core.workflow.tools
import org.openmole.core.workflow.mole._
import scala.collection.mutable
/**
*
* Registry to register value in fonction of a key and a ticket. The values are
* stored in a WeakHashMap and are garbage collected after the ticket is gabage
* collected.
*
* @author Romain Reuillon <romain.Romain Reuillon at openmole.org>
* @tparam K the type of the keys
* @tparam V the type of the values
*/
class RegistryWithTicket[K, V] {
class Registry extends mutable.HashMap[K, V]
val registries = new mutable.WeakHashMap[Ticket, Registry]
def registry(ticket: Ticket): Registry = synchronized {
registries.getOrElseUpdate(ticket, new Registry)
}
/**
*
* Consult a value for a given key and ticket.
*
* @param key the index key
* @param ticket the index ticket
* @return the value or null if not found
*/
def consult(key: K, ticket: Ticket): Option[V] = synchronized {
registry(ticket)(key)
}
/**
*
* Look if a value is registred for a given key and ticket.
*
* @param key the index key
* @param ticket the index ticket
* @return true if the value is present
*/
def isRegistred(key: K, ticket: Ticket): Boolean = synchronized {
registry(ticket).contains(key)
}
/**
*
* Register a value for given key and ticket.
*
* @param key the index key
* @param ticket the index ticket
* @param value the value to register
*/
def register(key: K, ticket: Ticket, value: V) = synchronized {
registry(ticket) += (key -> value)
}
/**
*
* Remove a value from the registry.
*
* @param key the index key
* @param ticket the index ticket
*/
def remove(key: K, ticket: Ticket): Option[V] = synchronized {
val ret = registry(ticket).remove(key)
if (registries(ticket).isEmpty) registries -= ticket
ret
}
def getOrElseUpdate(key: K, ticket: Ticket, f: ⇒ V): V = synchronized {
registries.getOrElseUpdate(ticket, new Registry).getOrElseUpdate(key, f)
}
}
| ISCPIF/PSEExperiments | openmole-src/openmole/core/org.openmole.core.workflow/src/main/scala/org/openmole/core/workflow/tools/RegistryWithTicket.scala | Scala | agpl-3.0 | 2,747 |
// Copyright (C) 2011-2012 the original author or authors.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.scalastyle.scalariform
import org.junit.Test
import org.scalastyle.file.CheckerTest
import org.scalatest.junit.AssertionsForJUnit
// scalastyle:off magic.number
class NotImplementedErrorUsageTest extends AssertionsForJUnit with CheckerTest {
val key = "not.implemented.error.usage"
val classUnderTest = classOf[NotImplementedErrorUsage]
@Test
def noErrors(): Unit = {
val source = """
class X {
val x = 0
}
"""
assertErrors(Nil, source)
}
@Test
def notImplementedErrorFound(): Unit = {
val source = """
class X {
val x = ???
}
"""
assertErrors(List(columnError(3, 10)), source)
}
}
| scalastyle/scalastyle | src/test/scala/org/scalastyle/scalariform/NotImplementedErrorUsageTest.scala | Scala | apache-2.0 | 1,378 |
package org.apache.spark.repl
import scala.reflect._
import scala.reflect.api.{Mirror, Universe, TypeCreator}
import scala.tools.nsc.{io, Properties, Settings, interpreter}
import scala.tools.nsc.interpreter._
import scala.tools.nsc.util.ScalaClassLoader._
import scala.reflect.api.{Mirror, TypeCreator, Universe => ApiUniverse}
import scala.tools.nsc.interpreter._
class HackSparkILoop(out:JPrintWriter) extends SparkILoop(None, out, None) {
private def getMaster(): String = {
val master = this.master match {
case Some(m) => m
case None =>
val envMaster = sys.env.get("MASTER")
val propMaster = sys.props.get("spark.master")
propMaster.orElse(envMaster).getOrElse("local[*]")
}
master
}
private def tagOfStaticClass[T: ClassTag]: u.TypeTag[T] =
u.TypeTag[T](
m,
new TypeCreator {
def apply[U <: ApiUniverse with Singleton](m: Mirror[U]): U # Type =
m.staticClass(classTag[T].runtimeClass.getName).toTypeConstructor.asInstanceOf[U # Type]
})
override def initializeSpark() {
/*intp.beQuietDuring {
command("""
@transient val sc = org.apache.spark.repl.Main.interp.createSparkContext();
""")
command("import org.apache.spark.SparkContext._")
}
echo("Spark context available as sc.")*/
}
override def process(settings: Settings): Boolean = savingContextLoader {
if (getMaster() == "yarn-client") System.setProperty("SPARK_YARN_MODE", "true")
this.settings = settings
createInterpreter()
// sets in to some kind of reader depending on environmental cues
in ={
// some post-initialization
chooseReader(settings) match {
case x: SparkJLineReader => addThunk(x.consoleReader.postInit) ; x
case x => x
}
}
lazy val tagOfSparkIMain = tagOfStaticClass[org.apache.spark.repl.SparkIMain]
// Bind intp somewhere out of the regular namespace where
// we can get at it in generated code.
addThunk(intp.quietBind(NamedParam[SparkIMain]("$intp", intp)(tagOfSparkIMain, classTag[SparkIMain])))
addThunk({
import scala.tools.nsc.io._
import Properties.userHome
import scala.compat.Platform.EOL
val autorun = replProps.replAutorunCode.option flatMap (f => io.File(f).safeSlurp())
if (autorun.isDefined) intp.quietRun(autorun.get)
})
addThunk(printWelcome())
addThunk(initializeSpark())
// it is broken on startup; go ahead and exit
if (intp.reporter.hasErrors)
return false
// This is about the illusion of snappiness. We call initialize()
// which spins off a separate thread, then print the prompt and try
// our best to look ready. The interlocking lazy vals tend to
// inter-deadlock, so we break the cycle with a single asynchronous
// message to an actor.
if (isAsync) {
intp initialize initializedCallback()
createAsyncListener() // listens for signal to run postInitialization
}
else {
// ??? intp.getInterpreterClassLoader
intp.initializeSynchronous()
postInitialization()
}
// printWelcome()
loadFiles(settings)
//try loop()
//catch AbstractOrMissingHandler()
//finally closeInterpreter()
true
}
} | jayfans3/spark-notebook | modules/spark/src/main/scala_2.10/spark-1.2.3/HackSparkILoop.scala | Scala | apache-2.0 | 3,310 |
package org.zouzias.spark.lucenerdd.examples.linkage
import org.apache.spark.sql.SparkSession
import org.apache.spark.SparkConf
import org.zouzias.spark.lucenerdd.LuceneRDD
import org.zouzias.spark.lucenerdd._
import org.zouzias.spark.lucenerdd.logging.Logging
/**
* Record linkage example between Abt and Buy product's descriptions using [[LuceneRDD]]
*
* You can run this locally with, ./spark-linkage-abt-vs-buy-products.sh
*/
object LinkageAbtvsBuy extends Logging {
def main(args: Array[String]) {
// initialise sparkSession context
val conf = new SparkConf().setAppName(LinkageAbtvsBuy.getClass.getName)
implicit val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
import spark.implicits._
val start = System.currentTimeMillis()
val abtDF = spark.read.parquet("data/linkage-products2/linkage-products-abt.parquet")
logInfo(s"Loaded ${abtDF.count} Abt product descriptions")
val buyDF = spark.read.parquet("data/linkage-products2/linkage-products-buy.parquet")
logInfo(s"Loaded ${buyDF.count} Buy product descriptions")
val groundTruthDF = spark.read.parquet("data/linkage-products2/linkage-products-abt-vs-buy.parquet")
val abt = abtDF.map( row => (row.get(0).toString, row.getString(1), row.getString(2), row.getString(3)))
val buy = LuceneRDD(buyDF.rdd.map( row => (row.get(0).toString, row.getString(1), row.getString(2), row.getString(3))))
val linker: (String, String, String, String) => String = {
case (_, name, description, _) => {
val nameTokens = name.split(" ")
.map(_.replaceAll("[^a-zA-Z0-9]", ""))
.filter(_.length > 0).mkString(" OR ")
val descTerms = description.split(" ")
.map(_.replaceAll("[^a-zA-Z0-9]", ""))
.filter(_.length > 0).mkString(" OR ")
if (descTerms.nonEmpty) {
s"(_2:($nameTokens)) OR (_3:$descTerms)"
}
else{
s"_2:($nameTokens)"
}
}
}
// Perform linkage and return top-5 results
val linkedResults = buy.link(abt.rdd, linker.tupled, 3)
// Compute the performance of linkage (accuracy)
val linkageResultsIds = spark.createDataFrame(linkedResults.map{ case (abtId, topDocs) =>
val rightId = topDocs.head.getString(topDocs.head.fieldIndex("_1"))
val leftId = abtId._1.toInt
(leftId, rightId)
}).toDF("idAbt","idBuy")
val correctHits: Double = linkageResultsIds
.join(groundTruthDF, groundTruthDF.col("idAbt").equalTo(linkageResultsIds("idAbt")) && groundTruthDF.col("idBuy").equalTo(linkageResultsIds("idBuy")))
.count()
val total: Double = groundTruthDF.count
val accuracy = correctHits / total
val end = System.currentTimeMillis()
logInfo("=" * 40)
logInfo(s"|| Elapsed time: ${(end - start) / 1000.0} seconds ||")
logInfo("=" * 40)
logInfo("*" * 40)
logInfo(s"* Accuracy of linkage is $accuracy *")
logInfo("*" * 40)
// terminate sparkSession context
spark.stop()
}
}
| zouzias/spark-lucenerdd-examples | src/main/scala/org/zouzias/spark/lucenerdd/examples/linkage/LinkageAbtvsBuy.scala | Scala | apache-2.0 | 3,033 |
/*
* A real-time collaborative tool to develop files over the network.
* Copyright (C) 2010 Mauro Ciancio and Leandro Gilioli
* {maurociancio,legilioli} at gmail dot com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package ar.noxit.paralleleditor.gui
import scala.swing._
import ar.noxit.paralleleditor.common.Message
import ar.noxit.paralleleditor.common.operation._
import reflect.BeanProperty
trait Synchronizer {
def generate(op: EditOperation, send: Message[EditOperation] => Unit)
def receive(message: Message[EditOperation], apply: EditOperation => Unit)
}
class DocumentArea(private val docTitle: String, private val initialContent: String) extends ScrollPane {
@BeanProperty
var sync: Synchronizer = _
private val areaEdicion = new NotificationEditPane {
text = initialContent
}
private val scrollAreaEdicion = new ScrollPane(areaEdicion)
scrollAreaEdicion preferredSize = new Dimension(320, 240)
contents = scrollAreaEdicion
listenTo(areaEdicion)
reactions += {
case WrappedEvent(e) => {
e match {
//generar ops de a 1
case InsertionEvent(pos, text) =>
(0 until text.size) foreach {index => generateOp(new AddTextOperation(text.substring(index, index + 1), pos + index))}
case DeletionEvent(pos, count) =>
(1 to count).foreach {Int => generateOp(new DeleteTextOperation(pos, 1))}
}
}
}
def processRemoteOperation(m: Message[EditOperation]) {
SwingUtil.invokeLater {
sync.receive(m, {op => processOperation(op)})
}
}
private def generateOp(op: EditOperation) {
sync.generate(op, {
msg =>
val docOp = new DocumentOperation(docTitle, msg)
publish(OperationEvent(docOp))
})
}
private def processOperation(o: EditOperation) = {
doInGuard({
val docData = new DocumentData {
override def data = areaEdicion.text
override def replace(offset: Int, length: Int, newText: String) = {
val result = data.substring(0, offset) + (if (newText == null) "" else newText) + "" + data.substring(offset + length)
areaEdicion.text = result
}
val caret = new Caret {
def selectionLength = 0
def offset = areaEdicion.caret.position
def change(offset: Int, selectionLength: Int) = {
areaEdicion.caret.position = offset
}
}
}
o.executeOn(docData)
})
}
private def doInGuard(closure: => Unit) = {
try {
areaEdicion.disableFiringEvents
closure
areaEdicion.repaint
}
finally
areaEdicion.enableFiringEvents
}
def text = areaEdicion text
}
| maurociancio/parallel-editor | src/parallel-editor-gui/src/main/scala/ar/noxit/paralleleditor/gui/DocumentArea.scala | Scala | gpl-3.0 | 3,637 |
package org.http4s
package parser
import java.io.UnsupportedEncodingException
import java.nio.CharBuffer
import scala.annotation.switch
import scala.collection.immutable.BitSet
import scala.io.Codec
import cats.data._
import cats.implicits._
import org.http4s.util.UrlCodingUtils
/** Split an encoded query string into unencoded key value pairs
* It always assumes any input is a valid query, including "".
* If "" should be interpreted as no query that __MUST__ be
* checked beforehand.
*/
private[http4s] class QueryParser(codec: Codec, colonSeparators: Boolean, qChars: BitSet = QueryParser.ExtendedQChars) {
import QueryParser._
/** Decodes the input into key value pairs.
* `flush` signals that this is the last input */
def decode(input: CharBuffer, flush: Boolean): ParseResult[Query] = {
val acc = Query.newBuilder
decodeBuffer(input, (k,v) => acc += ((k,v)), flush) match {
case Some(e) => ParseResult.fail("Decoding of url encoded data failed.", e)
case None => ParseResult.success(acc.result)
}
}
// Some[String] represents an error message, None = success
def decodeBuffer(input: CharBuffer, acc: (String, Option[String]) => Query.Builder, flush: Boolean): Option[String] = {
val valAcc = new StringBuilder(InitialBufferCapactiy)
var error: String = null
var key: String = null
var state: State = KEY
def appendValue(): Unit = {
if (state == KEY) {
val s = valAcc.result()
val k = decodeParam(s)
valAcc.clear()
acc(k, None)
}
else {
val k = decodeParam(key)
key = null
val s = valAcc.result()
valAcc.clear()
val v = Some(decodeParam(s))
acc(k, v)
}
()
}
def endPair(): Unit = {
if (!flush) input.mark()
appendValue()
state = KEY
}
if (!flush) input.mark()
// begin iterating through the chars
while(error == null && input.hasRemaining) {
val c = input.get()
(c: @switch) match {
case '&' => endPair()
case ';' if colonSeparators => endPair()
case '=' =>
if (state == VALUE) valAcc.append('=')
else {
state = VALUE
key = valAcc.result()
valAcc.clear()
}
case c if (qChars.contains(c.toInt)) => valAcc.append(c)
case c => error = s"Invalid char while splitting key/value pairs: '$c'"
}
}
if (error != null) Some(error)
else {
if (flush) appendValue()
else input.reset() // rewind to the last mark position
None
}
}
private def decodeParam(str: String): String =
try UrlCodingUtils.urlDecode(str, codec.charSet, plusIsSpace = true)
catch {
case e: IllegalArgumentException => ""
case e: UnsupportedEncodingException => ""
}
}
private[http4s] object QueryParser {
private val InitialBufferCapactiy = 32
def parseQueryString(queryString: String, codec: Codec = Codec.UTF8): ParseResult[Query] = {
if (queryString.isEmpty) Either.right(Query.empty)
else new QueryParser(codec, true).decode(CharBuffer.wrap(queryString), true)
}
private sealed trait State
private case object KEY extends State
private case object VALUE extends State
/** Defines the characters that are allowed unquoted within a query string as
* defined in RFC 3986*/
val QChars = BitSet((Pchar ++ "/?".toSet - '&' - '=').map(_.toInt).toSeq:_*)
/** PHP also includes square brackets ([ and ]) with query strings. This goes
* against the spec but due to PHP's widespread adoption it is necessary to
* support this extension. */
val ExtendedQChars = QChars ++ ("[]".map(_.toInt).toSet)
private def Pchar = Unreserved ++ SubDelims ++ ":@%".toSet
private def Unreserved = "-._~".toSet ++ AlphaNum
private def SubDelims = "!$&'()*+,;=".toSet
private def AlphaNum = (('a' to 'z') ++ ('A' to 'Z') ++ ('0' to '9')).toSet
}
| ZizhengTai/http4s | core/src/main/scala/org/http4s/parser/QueryParser.scala | Scala | apache-2.0 | 3,977 |
/*
// Copyright 2012/2013 de Gustavo Steinberg, Flavio Soares, Pierre Andrews, Gustavo Salazar Torres, Thomaz Abramo
//
// Este arquivo é parte do programa Vigia Político. O projeto Vigia
// Político é um software livre; você pode redistribuí-lo e/ou
// modificá-lo dentro dos termos da GNU Affero General Public License
// como publicada pela Fundação do Software Livre (FSF); na versão 3 da
// Licença. Este programa é distribuído na esperança que possa ser útil,
// mas SEM NENHUMA GARANTIA; sem uma garantia implícita de ADEQUAÇÃO a
// qualquer MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a licença para
// maiores detalhes. Você deve ter recebido uma cópia da GNU Affero
// General Public License, sob o título "LICENCA.txt", junto com este
// programa, se não, acesse http://www.gnu.org/licenses/
*/
package utils
import play.api.libs.json._
import play.api.libs.json.util._
import play.api.libs.json.Writes._
import play.api.libs.functional.syntax._
import java.util.Date
import models._
object ModelJson {
// private def comNoId(com: Comissoe): Option[(String, String)] = Some((com.name, com.short))
// implicit val comissoeWrites: Writes[Comissoe] = (
// (__ \ "nome").write[String] and
// (__ \ "short").write[String]
// )(unlift(comNoId))
// private def tagNoId(tag: Tag): Option[(String, Option[Long])] = Some((tag.tag, tag.count))
// implicit val tagWrites: Writes[Tag] = (
// (__ \ "tag").write[String] and
// (__ \ "count").write[Option[Long]]
// )(unlift(tagNoId))
// private def plNoId(proj: ProjetoLei): Option[(Int, String, Date, String, Option[String], Option[Int], Option[Date], Set[String], Set[Comissoe])] = {
// val tags = ProjetoLei.getTags(proj).map(_.tag)
// val coms = ProjetoLei.getComissoes(proj)
// Some((proj.numero, proj.tipo, proj.data, proj.ementa, proj.tipoNorma, proj.numeroNorma, proj.dataNorma, tags, coms))
// }
// implicit val projetoWrites: Writes[ProjetoLei] = (
// (__ \ "numero").write[Int] and
// (__ \ "tipo").write[String] and
// (__ \ "data").write[Date] and
// (__ \ "ementa").write[String] and
// (__ \ "tipoNorma").write[Option[String]] and
// (__ \ "numeroNorma").write[Option[Int]] and
// (__ \ "dataNorma").write[Option[Date]] and
// (__ \ "tags").write[Set[String]] and
// (__ \ "comissoes").write[Set[Comissoe]]
// )(unlift(plNoId))
// implicit val pageWrites: Writes[PLPage] = (
// (__ \ "page").write[Long] and
// (__ \ "pages").write[Long] and
// (__ \ "perpage").write[Int] and
// (__ \ "total").write[Long] and
// (__ \ "content").write[Seq[ProjetoLei]]
// )(unlift(PLPage.unapply))
}
| cidadao-automatico/cidadao-server | app/utils/ModelJson.scala | Scala | agpl-3.0 | 2,630 |
/* Copyright © 2017 University of Texas at Arlington
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import edu.uta.diql._
import org.apache.spark._
import org.apache.spark.rdd._
object Test {
type Matrix = RDD[ ( Double, Int, Int ) ]
def main ( args: Array[String] ) {
val conf = new SparkConf().setAppName("Test")
val sc = new SparkContext(conf)
def readMatrix ( file: String )
= sc.textFile(file)
.map( line => { val Array(v,i,j) = line.split(",")
(v.toDouble,i.toInt,j.toInt)
} )
explain(true)
val a = 0.002
val b = 0.02
val iterations = 5
m("""
def transpose ( X: Matrix ) =
select (x,j,i)
from (x,i,j) <- X;
// matrix multiplication:
def multiply ( X: Matrix, Y: Matrix ) =
select ( +/z, i, j )
from (x,i,k) <- X, (y,k_,j) <- Y, z = x*y
where k == k_
group by (i,j);
// multiplication by a number:
def mult ( a: Double, X: Matrix ) =
select ( a*x, i, j )
from (x,i,j) <- X;
// cell-wise addition:
def Cadd ( X: Matrix, Y: Matrix ) =
select ( +/x + +/y, i, j )
from (x,i,j) <- X
group by (i,j)
from (y,i_,j_) <- Y
group by (i_,j_);
// cell-wise subtraction:
def Csub ( X: Matrix, Y: Matrix ) =
select ( +/x - +/y, i, j )
from (x,i,j) <- X
group by (i,j)
from (y,i_,j_) <- Y
group by (i_,j_);
// Matrix Factorization using Gradient Descent
def factorize ( R: Matrix, Pinit: Matrix, Qinit: Matrix ) =
repeat (E,P,Q) = (R,Pinit,Qinit)
step ( Csub(R,multiply(P,transpose(Q))),
Cadd(P,mult(a,Csub(mult(2,multiply(E,transpose(Q))),mult(b,P)))),
Cadd(Q,mult(a,Csub(mult(2,multiply(E,transpose(P))),mult(b,Q)))) )
limit iterations
""")
q("""
let M = readMatrix("matrix.txt"),
I = (select (0.5D,i,j) from (_,i,j) <- M),
(E,L,R) = factorize(M,I,I)
in (L,R)//multiply(L,transpose(R))
""")//.foreach(println)
}
}
| fegaras/DIQL | tests/spark/factorization.scala | Scala | apache-2.0 | 2,651 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.util.{ArrayData, MapData}
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
/**
* An abstract class for row used internally in Spark SQL, which only contains the columns as
* internal types.
*/
abstract class InternalRow extends SpecializedGetters with Serializable {
def numFields: Int
// This is only use for test and will throw a null pointer exception if the position is null.
def getString(ordinal: Int): String = getUTF8String(ordinal).toString
def setNullAt(i: Int): Unit
/**
* Updates the value at column `i`. Note that after updating, the given value will be kept in this
* row, and the caller side should guarantee that this value won't be changed afterwards.
*/
def update(i: Int, value: Any): Unit
// default implementation (slow)
def setBoolean(i: Int, value: Boolean): Unit = update(i, value)
def setByte(i: Int, value: Byte): Unit = update(i, value)
def setShort(i: Int, value: Short): Unit = update(i, value)
def setInt(i: Int, value: Int): Unit = update(i, value)
def setLong(i: Int, value: Long): Unit = update(i, value)
def setFloat(i: Int, value: Float): Unit = update(i, value)
def setDouble(i: Int, value: Double): Unit = update(i, value)
/**
* Update the decimal column at `i`.
*
* Note: In order to support update decimal with precision > 18 in UnsafeRow,
* CAN NOT call setNullAt() for decimal column on UnsafeRow, call setDecimal(i, null, precision).
*/
def setDecimal(i: Int, value: Decimal, precision: Int): Unit = update(i, value)
/**
* Make a copy of the current [[InternalRow]] object.
*/
def copy(): InternalRow
/** Returns true if there are any NULL values in this row. */
def anyNull: Boolean = {
val len = numFields
var i = 0
while (i < len) {
if (isNullAt(i)) { return true }
i += 1
}
false
}
/* ---------------------- utility methods for Scala ---------------------- */
/**
* Return a Scala Seq representing the row. Elements are placed in the same order in the Seq.
*/
def toSeq(fieldTypes: Seq[DataType]): Seq[Any] = {
val len = numFields
assert(len == fieldTypes.length)
val values = new Array[Any](len)
var i = 0
while (i < len) {
values(i) = get(i, fieldTypes(i))
i += 1
}
values
}
def toSeq(schema: StructType): Seq[Any] = toSeq(schema.map(_.dataType))
}
object InternalRow {
/**
* This method can be used to construct a [[InternalRow]] with the given values.
*/
def apply(values: Any*): InternalRow = new GenericInternalRow(values.toArray)
/**
* This method can be used to construct a [[InternalRow]] from a [[Seq]] of values.
*/
def fromSeq(values: Seq[Any]): InternalRow = new GenericInternalRow(values.toArray)
/** Returns an empty [[InternalRow]]. */
val empty = apply()
/**
* Copies the given value if it's string/struct/array/map type.
*/
def copyValue(value: Any): Any = value match {
case v: UTF8String => v.copy()
case v: InternalRow => v.copy()
case v: ArrayData => v.copy()
case v: MapData => v.copy()
case _ => value
}
/**
* Returns an accessor for an `InternalRow` with given data type. The returned accessor
* actually takes a `SpecializedGetters` input because it can be generalized to other classes
* that implements `SpecializedGetters` (e.g., `ArrayData`) too.
*/
def getAccessor(dt: DataType, nullable: Boolean = true): (SpecializedGetters, Int) => Any = {
val getValueNullSafe: (SpecializedGetters, Int) => Any = dt match {
case BooleanType => (input, ordinal) => input.getBoolean(ordinal)
case ByteType => (input, ordinal) => input.getByte(ordinal)
case ShortType => (input, ordinal) => input.getShort(ordinal)
case IntegerType | DateType => (input, ordinal) => input.getInt(ordinal)
case LongType | TimestampType => (input, ordinal) => input.getLong(ordinal)
case FloatType => (input, ordinal) => input.getFloat(ordinal)
case DoubleType => (input, ordinal) => input.getDouble(ordinal)
case StringType => (input, ordinal) => input.getUTF8String(ordinal)
case BinaryType => (input, ordinal) => input.getBinary(ordinal)
case CalendarIntervalType => (input, ordinal) => input.getInterval(ordinal)
case t: DecimalType => (input, ordinal) => input.getDecimal(ordinal, t.precision, t.scale)
case t: StructType => (input, ordinal) => input.getStruct(ordinal, t.size)
case _: ArrayType => (input, ordinal) => input.getArray(ordinal)
case _: MapType => (input, ordinal) => input.getMap(ordinal)
case u: UserDefinedType[_] => getAccessor(u.sqlType, nullable)
case _ => (input, ordinal) => input.get(ordinal, dt)
}
if (nullable) {
(getter, index) => {
if (getter.isNullAt(index)) {
null
} else {
getValueNullSafe(getter, index)
}
}
} else {
getValueNullSafe
}
}
/**
* Returns a writer for an `InternalRow` with given data type.
*/
def getWriter(ordinal: Int, dt: DataType): (InternalRow, Any) => Unit = dt match {
case BooleanType => (input, v) => input.setBoolean(ordinal, v.asInstanceOf[Boolean])
case ByteType => (input, v) => input.setByte(ordinal, v.asInstanceOf[Byte])
case ShortType => (input, v) => input.setShort(ordinal, v.asInstanceOf[Short])
case IntegerType | DateType => (input, v) => input.setInt(ordinal, v.asInstanceOf[Int])
case LongType | TimestampType => (input, v) => input.setLong(ordinal, v.asInstanceOf[Long])
case FloatType => (input, v) => input.setFloat(ordinal, v.asInstanceOf[Float])
case DoubleType => (input, v) => input.setDouble(ordinal, v.asInstanceOf[Double])
case DecimalType.Fixed(precision, _) =>
(input, v) => input.setDecimal(ordinal, v.asInstanceOf[Decimal], precision)
case udt: UserDefinedType[_] => getWriter(ordinal, udt.sqlType)
case NullType => (input, _) => input.setNullAt(ordinal)
case StringType => (input, v) => input.update(ordinal, v.asInstanceOf[UTF8String].copy())
case _: StructType => (input, v) => input.update(ordinal, v.asInstanceOf[InternalRow].copy())
case _: ArrayType => (input, v) => input.update(ordinal, v.asInstanceOf[ArrayData].copy())
case _: MapType => (input, v) => input.update(ordinal, v.asInstanceOf[MapData].copy())
case _ => (input, v) => input.update(ordinal, v)
}
}
| caneGuy/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/InternalRow.scala | Scala | apache-2.0 | 7,376 |
package views.html
import play.templates._
import play.templates.TemplateMagic._
import play.api.templates._
import play.api.templates.PlayMagic._
import models._
import controllers._
import play.api.i18n._
import play.api.mvc._
import play.api.data._
import views.html._
/**/
object index extends BaseScalaTemplate[play.api.templates.HtmlFormat.Appendable,Format[play.api.templates.HtmlFormat.Appendable]](play.api.templates.HtmlFormat) with play.api.templates.Template1[String,play.api.templates.HtmlFormat.Appendable] {
/**/
def apply/*1.2*/(message: String):play.api.templates.HtmlFormat.Appendable = {
_display_ {
Seq[Any](format.raw/*1.19*/("""
"""),_display_(/*3.2*/main("Welcome to Foo Service")/*3.32*/ {_display_(Seq[Any](format.raw/*3.34*/("""
<div class="well">
<center>
<a href=""""),_display_(/*6.17*/routes/*6.23*/.Auth.userSignup()),format.raw/*6.41*/(""""><h3>User Signup</h3></a>
<a href="/user"><h3>User Login</h3></a>
<a href="/admin"><h3>Admin Login</h3></a>
</center>
</div>
""")))}),format.raw/*12.2*/("""
"""))}
}
def render(message:String): play.api.templates.HtmlFormat.Appendable = apply(message)
def f:((String) => play.api.templates.HtmlFormat.Appendable) = (message) => apply(message)
def ref: this.type = this
}
/*
-- GENERATED --
DATE: Mon Jun 30 14:48:22 IST 2014
SOURCE: /home/nagarjuna/FooService/app/views/index.scala.html
HASH: 1d9922e5ec71be9ba547eada55eb3a0913728b3d
MATRIX: 556->1|667->18|695->21|733->51|772->53|852->107|866->113|904->131|1083->280
LINES: 19->1|22->1|24->3|24->3|24->3|27->6|27->6|27->6|33->12
-- GENERATED --
*/
| pamu/FooService | FooService1/target/scala-2.10/src_managed/main/views/html/index.template.scala | Scala | apache-2.0 | 1,846 |
/**
* Copyright 2014 Andrea Esposito <and1989@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package it.unipi.thesis.andrea.esposito.onjag.protocols
import it.unipi.thesis.andrea.esposito.onjag.core._
import scala.reflect.ClassTag
import scala.collection.mutable.ListBuffer
import it.unipi.thesis.andrea.esposito.onjag.protocols.randompeersampling.RandomPeerSamplingVertexContext
import org.apache.spark.SparkContext
import scala.collection.mutable
/**
* Created by Andrea Esposito <and1989@gmail.com> on 18/10/13.
*
* T-MAN Protocol rearranged for the ONJAG platform.
* Pipeline of random id chucks is employed.
*
* Paper:
* M. Jelasity, A. Montresor, and O. Babaoglu. 2009.
* "T-Man: Gossip-based fast overlay topology construction".
* Comput. Netw. 53, 13 (August 2009), 2321-2339.
*/
abstract class TMANProtocol(c: Int, H: Int, R: Int, startStep_param: Int = 0,
step_param: Int = 1, name_param: String = "tman") extends Protocol {
var name: String = name_param
val startStep: Int = startStep_param
type aggregatorType = Nothing
def step: Int = step_param
override def init(sc: SparkContext): Unit = {
}
// override in case of a descriptor that needs a message that is possible to clean from the context references
def factoryMessage[K, viewType](source: TMANVertexContext[K],
targetId: K,
messageTypeFlag_param: Boolean,
viewDescriptors: viewType): TMANMessage[K] = {
new TMANMessage[K](source.getId, source.descriptor, targetId, messageTypeFlag_param, viewDescriptors.asInstanceOf[source.viewType])(this)
}
def brutalStoppable(): Boolean = true
def compute[K: ClassTag](self: ProtocolVertexContext[K],
messages: Seq[Message[K]],
responseProtocolCxtMsgs: Seq[ResponseVertexContextMessage[K]],
aggregator: Option[TMANProtocol#aggregatorType],
superstep: Int)
: (Boolean, Seq[_ <: Message[K]], Seq[RequestVertexContextMessage[K]]) = {
val tmanContext = self.asInstanceOf[TMANVertexContext[K]]
val newMsgs = new ListBuffer[Message[K]]()
val reqMsgs = new ListBuffer[RequestVertexContextMessage[K]]()
val randomResponseContexts = responseProtocolCxtMsgs.map(m => m.cxt.asInstanceOf[TMANVertexContext[K]])
val randomTMANWindow = tmanContext.randomIdChunks.sliding(R)
def prepareBuffer(peerId: K, peerDescriptor: Option[tmanContext.descriptorType]): tmanContext.viewType = {
val currentRndIdChunk = randomTMANWindow.next()
val randomTMANBuffer = randomResponseContexts.filter(cxt => currentRndIdChunk.exists(id => id == cxt.getId()))
val rndView: tmanContext.viewType = randomTMANBuffer.map(
peer => (peer.getId, Some(peer.descriptor.asInstanceOf[tmanContext.descriptorType]), 0.asInstanceOf[TMANVertexContext[K]#ageType])
).toArray
// var buffer = tmanContext.merge(tmanContext.view, Array())
val myselfEntry = (tmanContext.getId, Some(tmanContext.descriptor), 0.asInstanceOf[TMANVertexContext[K]#ageType])
var buffer = tmanContext.merge(tmanContext.view, rndView.+:(myselfEntry))
buffer = tmanContext.removeOldest(buffer, H)
tmanContext.selectView(c, buffer, peerId, peerDescriptor)
}
// Active/Push "Thread"
val p = tmanContext.selectPeer()
val pushBuffer = prepareBuffer(p._1, p._2)
//Process pendingPull
tmanContext.pendingPull.foreach {
elem =>
val peerId = elem._1
val peerDescriptor = elem._2
val pullBuffer = prepareBuffer(peerId, Some(peerDescriptor))
newMsgs += factoryMessage(tmanContext, peerId, true, pullBuffer)
}
tmanContext.pendingPull.clear()
tmanContext.randomIdChunks.clear()
reqMsgs ++= askNextRandomPeerChunks(tmanContext)
newMsgs += factoryMessage(tmanContext, p._1, false, pushBuffer)
var globalBuffer: tmanContext.viewType = tmanContext.view.clone()
// Receive either pull and active messages
messages.foreach {
m =>
val mTMAN = m.asInstanceOf[TMANMessage[K]]
// push receive
if (mTMAN.messageTypeFlag) {
globalBuffer = tmanContext.merge(globalBuffer, mTMAN.descriptors.asInstanceOf[tmanContext.viewType])
} else {
// pull receive
globalBuffer = tmanContext.merge(globalBuffer, mTMAN.descriptors.asInstanceOf[tmanContext.viewType])
reqMsgs ++= askNextRandomPeerChunks(tmanContext)
tmanContext.pendingPull.append((mTMAN.sourceId, mTMAN.sourceDescriptor.asInstanceOf[tmanContext.descriptorType]))
}
}
tmanContext.view = tmanContext.selectView(c, globalBuffer)
tmanContext.increaseAge()
(true, newMsgs, reqMsgs)
}
private def askNextRandomPeerChunks[K: ClassTag](tmanContext: TMANVertexContext[K]): Array[RequestVertexContextMessage[K]] = {
val msgs = new ListBuffer[RequestVertexContextMessage[K]]()
// Ask for the next random peers chunk
val peerSamplingContext = tmanContext.accessProtocol("peersampling")
peerSamplingContext match {
case Some(peerCxt) =>
val rndPeerCxt = peerCxt.asInstanceOf[RandomPeerSamplingVertexContext[K]]
for (i <- 1 to R) {
val rndPeer = rndPeerCxt.getPeer()
rndPeer match {
case Some(peer) =>
tmanContext.randomIdChunks += peer
msgs += new RequestVertexContextMessage[K](tmanContext.getId, peer, name)
case None => {}
}
}
case None => {}
}
msgs.toArray
}
def createInitMessages[K: ClassTag](context: ProtocolVertexContext[K], data: Any)
: (Seq[_ <: Message[K]], Seq[RequestVertexContextMessage[K]]) =
(new Array[Message[K]](0), askNextRandomPeerChunks(context.asInstanceOf[TMANVertexContext[K]]))
def createProtocolVertexContext[K: ClassTag](id: K, data: Array[Any]): TMANVertexContext[K]
}
abstract class TMANVertexContext[K] extends ProtocolVertexContext[K] {
/**
* Type parameter in order to specialize the age field
*/
type ageType = Int
type descriptorType
/**
* Descriptor is optional because usually at the beginning each peer doesn't know the descriptor value of the others
*/
type entryType = (K, Option[_ <: descriptorType], ageType)
type viewType = Array[entryType]
var view: viewType
var descriptor: descriptorType
var pendingPull = new ListBuffer[(K, descriptorType)]
var randomIdChunks = new ListBuffer[K]
override def fixSendable[F](p: ProtocolVertexContext[F]): ProtocolVertexContext[F] = {
val tmanContext = super.fixSendable(p).asInstanceOf[TMANVertexContext[F]]
tmanContext.pendingPull = null
tmanContext.randomIdChunks = null
tmanContext.view = null
tmanContext
}
override def toString(): String = {
val strBuilder = new StringBuilder()
strBuilder.append("[ID: %s, Descriptor: %s, View:".format(getId, descriptor))
if (view != null) {
view.foreach(v => strBuilder.append(" (%s, %s, %d)".format(v._1, v._2, v._3)))
}
strBuilder.append("]\\n")
strBuilder.toString()
}
def removeOldest(view: viewType, H: Int): viewType = {
val ret = new mutable.UnrolledBuffer[entryType]()
ret ++= view
for (i <- 0 until H if ret.length > 0) {
val older = ret.max(Ordering.fromLessThan[entryType]((a, b) => a._3 < b._3))
ret -= older
}
ret.toArray
}
/**
* The TMAN Ranking Function
* @param d1 V Descriptor
* @param d2 Another Descriptor
* @return rank between d1 and d2
*/
def rankingFunction(d1: descriptorType, d2: descriptorType): Double
def selectView(c: Int, buffer: viewType): viewType = selectView(c, buffer, getId, Some(descriptor))
def selectView(c: Int, buffer: viewType, otherId: K, otherPeerDescriptor: Option[descriptorType]): viewType = {
val noDuplicates = merge(buffer.clone, new viewType(0)).filterNot(s => s._1 == otherId) // Remove duplicates and other peer entry, maintain the freshest ones
val size = if (c > noDuplicates.length) noDuplicates.length else c
val sorted = if (otherPeerDescriptor.isDefined)
noDuplicates.sortWith((a, b) => {
if (a._2.isDefined && b._2.isDefined)
rankingFunction(otherPeerDescriptor.get, a._2.get) < rankingFunction(otherPeerDescriptor.get, b._2.get)
else
a._2.isDefined
})
else
noDuplicates
sorted.slice(0, size)
}
def selectPeer(): (K, Option[descriptorType]) = {
// Procedural Style (fast)
var minPeer: (K, Double, Option[descriptorType]) = null
for (i <- 0 until view.length) {
val v = view(i)
if (minPeer == null) {
var distance = Double.MaxValue
var descriptorPeer: Option[descriptorType] = None
if (view(i)._2.isDefined) {
distance = rankingFunction(descriptor, v._2.get)
descriptorPeer = v._2
}
minPeer = (v._1, distance, descriptorPeer)
} else if (v._2.isDefined) {
val distance = rankingFunction(descriptor, v._2.get)
val descriptorPeer: Option[descriptorType] = v._2
if (distance < minPeer._2)
minPeer = (v._1, distance, descriptorPeer)
}
}
(minPeer._1, minPeer._3)
// Function style (slow)
/*
val distances = view.map {
v =>
if (v._2.isDefined) {
(v._1, rankingFunction(descriptor, v._2.get), v._2)
} else {
(v._1, Double.MaxValue, None)
}
}
val ret = distances.min(Ordering.fromLessThan[(K, Double, Option[descriptorType])]((a, b) => a._2 < b._2))
(ret._1, ret._3)
*/
}
def merge(view1: viewType, view2: viewType): viewType = {
// More procedural way but also more efficient than functional expression
val result = new mutable.LinkedHashMap[K, entryType]()
view1.foreach(e =>
result.get(e._1) match {
case Some(entry) =>
if (entry._2.isDefined && e._3 < entry._3) {
result.update(e._1, e)
} else if (e._2.isDefined) {
result.update(e._1, e)
}
case None => result.update(e._1, e)
}
)
view2.foreach(e =>
result.get(e._1) match {
case Some(entry) =>
if (entry._2.isDefined && e._3 < entry._3) {
result.update(e._1, e)
} else if (e._2.isDefined) {
result.update(e._1, e)
}
case None => result.update(e._1, e)
}
)
result.values.toArray[entryType]
// Functional expression
/*
(view1 ++ view2).groupBy(v => v._1).map(
e => {
e._2.min(Ordering.fromLessThan[entryType](
(a, b) =>
if (a._2.isDefined && b._2.isDefined) a._3 < b._3
else a._2.isDefined))
}
).toArray
*/
}
def increaseAge(): Unit = {
view = view.map(v => (v._1, v._2, (v._3 + 1).asInstanceOf[ageType]))
}
}
class TMANMessage[K](sourceId: K,
val sourceDescriptor: TMANVertexContext[K]#descriptorType,
targetId: K,
val messageTypeFlag: Boolean,
viewDescriptors: TMANVertexContext[K]#viewType)(implicit tmanProtocol: TMANProtocol)
extends GraphMessage[K, (K, _ <: TMANVertexContext[K]#descriptorType, TMANVertexContext[K]#ageType)](
sourceId,
tmanProtocol.name,
targetId,
tmanProtocol.name,
viewDescriptors.asInstanceOf[Array[(K, _ <: TMANVertexContext[K]#descriptorType, TMANVertexContext[K]#ageType)]]) {}
| roy20021/ONJAG | src/it/unipi/thesis/andrea/esposito/onjag/protocols/TMANProtocol.scala | Scala | apache-2.0 | 12,107 |
import stainless.annotation.{ghost => ghostAnnot, _}
import stainless.collection._
import stainless.lang._
import stainless.lang.Option._
import stainless.lang.StaticChecks._
import stainless.proof.check
object MutListExample {
final case class Node private (var value: BigInt, var nextOpt: Option[Node], @ghostAnnot var repr: List[AnyHeapRef]) extends AnyHeapRef {
@ghostAnnot
def valid: Boolean = {
reads(repr.content ++ Set(this))
decreases(repr.size)
nextOpt match {
case None() =>
repr == List(this)
case Some(next) =>
repr.content.contains(next) &&
repr == this :: next.repr &&
repr.content == next.repr.content ++ Set(this) &&
!next.repr.content.contains(this) &&
next.valid
}
}
def size: BigInt = {
reads(repr.content ++ Set(this))
require(valid)
decreases(repr.size)
nextOpt match {
case None() => BigInt(1)
case Some(next) => 1 + next.size
}
} ensuring (_ > 0)
def last: Node = {
reads(repr.content ++ Set(this))
require(valid)
decreases(size)
nextOpt match {
case None() => this
case Some(next) => next.last
}
}
@opaque
def append(node: Node): Unit = {
reads(repr.content ++ node.repr.content ++ Set(this, node))
modifies(repr.content ++ Set(this))
require(valid && node.valid && (repr.content & node.repr.content).isEmpty)
decreases(size)
@ghostAnnot val oldRepr = repr
@ghostAnnot val oldReprConcat = repr ++ node.repr
@ghostAnnot val oldReprConcatContents = repr.content ++ node.repr.content
nextOpt match {
case None() =>
nextOpt = Some(node)
repr = this :: node.repr
@ghostAnnot val unused1 = check(valid)
@ghostAnnot val unused2 = check(repr == oldReprConcat)
@ghostAnnot val unused3 = check(repr.content == oldReprConcatContents)
case Some(next) =>
assert(next.valid)
assert(next.repr.content subsetOf repr.content)
@ghostAnnot val oldReprNext = next.repr
@ghostAnnot val oldReprC = next.repr.content ++ node.repr.content
next.append(node)
assert(next.repr.content == oldReprC)
assert(next.valid)
repr = this :: next.repr
assert(next.repr.content subsetOf repr.content)
assert(repr == this :: (oldReprNext ++ node.repr))
assert(repr == (this :: oldReprNext) ++ node.repr)
ghost { check(repr == oldRepr ++ node.repr) }
}
} ensuring { _ => valid &&& repr == old(repr ++ node.repr) &&& repr.content == old(repr.content ++ node.repr.content) }
}
def readInvariant(l1: Node, l2: Node): Unit = {
reads(l1.repr.content ++ l2.repr.content ++ Set(l1, l2))
modifies(Set(l2))
require(l1.valid && l2.valid && (l1.repr.content & l2.repr.content).isEmpty)
val h1 = l1.value
l2.value += 1
val h2 = l1.value
assert(h1 == h2)
}
}
| epfl-lara/stainless | frontends/benchmarks/full-imperative/valid/MutList.scala | Scala | apache-2.0 | 3,056 |
package com.twitter.finagle.loadbalancer.p2c
import com.twitter.finagle.loadbalancer.Balancer
import com.twitter.finagle.loadbalancer.EndpointFactory
import com.twitter.finagle.loadbalancer.PeakEwma
import com.twitter.finagle.loadbalancer.Updating
import com.twitter.finagle.NoBrokersAvailableException
import com.twitter.finagle.ServiceFactoryProxy
import com.twitter.finagle.loadbalancer.LoadBalancerFactory.PanicMode
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finagle.util.Rng
import com.twitter.util.Activity
import com.twitter.util.Duration
/**
* Like [[com.twitter.finagle.loadbalancer.p2c.P2CLeastLoaded]] but
* using the Peak EWMA load metric.
*
* Peak EWMA is designed to converge quickly when encountering
* slow endpoints. It is quick to react to latency spikes, recovering
* only cautiously. Peak EWMA takes history into account, so that
* slow behavior is penalized relative to the supplied decay time.
*
* @param endpoints An activity that updates with the set of node pairs
* over which we distribute load.
*
* @param maxEffort the maximum amount of "effort" we're willing to
* expend on a load balancing decision without reweighing.
*
* @param decayTime The window of latency observations.
*
* @param rng The PRNG used for flipping coins. Override for
* deterministic tests.
*
* @param statsReceiver The stats receiver to which operational
* statistics are reported.
*
* [1] Michael Mitzenmacher. 2001. The Power of Two Choices in
* Randomized Load Balancing. IEEE Trans. Parallel Distrib. Syst. 12,
* 10 (October 2001), 1094-1104.
*/
private[loadbalancer] final class P2CPeakEwma[Req, Rep](
protected val endpoints: Activity[IndexedSeq[EndpointFactory[Req, Rep]]],
protected val decayTime: Duration,
protected val nanoTime: () => Long,
private[loadbalancer] val panicMode: PanicMode,
protected val rng: Rng,
protected val statsReceiver: StatsReceiver,
protected val emptyException: NoBrokersAvailableException)
extends Balancer[Req, Rep]
with PeakEwma[Req, Rep]
with P2C[Req, Rep]
with Updating[Req, Rep] {
case class Node(factory: EndpointFactory[Req, Rep])
extends ServiceFactoryProxy[Req, Rep](factory)
with PeakEwmaNode
protected def newNode(factory: EndpointFactory[Req, Rep]): Node = Node(factory)
}
| twitter/finagle | finagle-core/src/main/scala/com/twitter/finagle/loadbalancer/p2c/P2CPeakEwma.scala | Scala | apache-2.0 | 2,322 |
package io.getclump
import org.junit.runner.RunWith
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ClumpApiSpec extends Spec {
"the Clump object" >> {
"allows to create a constant clump" >> {
"from a future (Clump.future)" >> {
"success" >> {
clumpResult(Clump.future(Future.successful(1))) mustEqual Some(1)
}
"failure" in {
clumpResult(Clump.future(Future.failed(new IllegalStateException))) must throwA[IllegalStateException]
}
}
"from a value (Clump.apply)" >> {
"propogates exceptions" in {
val clump = Clump { throw new IllegalStateException }
clumpResult(clump) must throwA[IllegalStateException]
}
"no exception" in {
clumpResult(Clump(1)) mustEqual Some(1)
}
}
"from a value (Clump.value)" in {
clumpResult(Clump.value(1)) mustEqual Some(1)
}
"from a value (Clump.successful)" in {
clumpResult(Clump.successful(1)) mustEqual Some(1)
}
"failed (Clump.exception)" in {
clumpResult(Clump.exception(new IllegalStateException)) must throwA[IllegalStateException]
}
"failed (Clump.failed)" in {
clumpResult(Clump.failed(new IllegalStateException)) must throwA[IllegalStateException]
}
}
"allows to create a clump traversing multiple inputs (Clump.traverse)" in {
"list" in {
val inputs = List(1, 2, 3)
val clump = Clump.traverse(inputs)(i => Clump.value(i + 1))
clumpResult(clump) ==== Some(List(2, 3, 4))
}
"set" in {
val inputs = Set(1, 2, 3)
val clump = Clump.traverse(inputs)(i => Clump.value(i + 1))
clumpResult(clump) ==== Some(Set(2, 3, 4))
}
"seq" in {
val inputs = Seq(1, 2, 3)
val clump = Clump.traverse(inputs)(i => Clump.value(i + 1))
clumpResult(clump) ==== Some(Seq(2, 3, 4))
}
"varargs" in {
val clump = Clump.traverse(1, 2, 3)(i => Clump.value(i + 1))
clumpResult(clump) ==== Some(List(2, 3, 4))
}
}
"allows to collect multiple clumps in only one (Clump.collect)" >> {
"list" in {
val clumps = List(Clump.value(1), Clump.value(2))
clumpResult(Clump.collect(clumps)) ==== Some(List(1, 2))
}
"set" in {
val clumps = Set(Clump.value(1), Clump.value(2))
clumpResult(Clump.collect(clumps)) ==== Some(Set(1, 2))
}
"seq" in {
val clumps = Seq(Clump.value(1), Clump.value(2))
clumpResult(Clump.collect(clumps)) ==== Some(Seq(1, 2))
}
"varargs" in {
val clump = Clump.collect(Clump.value(1), Clump.value(2))
clumpResult(clump) ==== Some(List(1, 2))
}
}
"allows to collect multiple clumps in only one (Clump.sequence)" >> {
"list" in {
val clumps = List(Clump.value(1), Clump.value(2))
clumpResult(Clump.sequence(clumps)) ==== Some(List(1, 2))
}
"set" in {
val clumps = Set(Clump.value(1), Clump.value(2))
clumpResult(Clump.sequence(clumps)) ==== Some(Set(1, 2))
}
"seq" in {
val clumps = Seq(Clump.value(1), Clump.value(2))
clumpResult(Clump.sequence(clumps)) ==== Some(Seq(1, 2))
}
"varargs" in {
val clump = Clump.sequence(Clump.value(1), Clump.value(2))
clumpResult(clump) ==== Some(List(1, 2))
}
}
"allows to create an empty Clump (Clump.empty)" in {
clumpResult(Clump.empty) ==== None
}
"allows to join clumps" >> {
def c(int: Int) = Clump.value(int)
"2 instances" in {
val clump = Clump.join(c(1), c(2))
clumpResult(clump) mustEqual Some(1, 2)
}
"3 instances" in {
val clump = Clump.join(c(1), c(2), c(3))
clumpResult(clump) mustEqual Some(1, 2, 3)
}
"4 instances" in {
val clump = Clump.join(c(1), c(2), c(3), c(4))
clumpResult(clump) mustEqual Some(1, 2, 3, 4)
}
"5 instances" in {
val clump = Clump.join(c(1), c(2), c(3), c(4), c(5))
clumpResult(clump) mustEqual Some(1, 2, 3, 4, 5)
}
"6 instances" in {
val clump = Clump.join(c(1), c(2), c(3), c(4), c(5), c(6))
clumpResult(clump) mustEqual Some(1, 2, 3, 4, 5, 6)
}
"7 instances" in {
val clump = Clump.join(c(1), c(2), c(3), c(4), c(5), c(6), c(7))
clumpResult(clump) mustEqual Some(1, 2, 3, 4, 5, 6, 7)
}
"8 instances" in {
val clump = Clump.join(c(1), c(2), c(3), c(4), c(5), c(6), c(7), c(8))
clumpResult(clump) mustEqual Some(1, 2, 3, 4, 5, 6, 7, 8)
}
"9 instances" in {
val clump = Clump.join(c(1), c(2), c(3), c(4), c(5), c(6), c(7), c(8), c(9))
clumpResult(clump) mustEqual Some(1, 2, 3, 4, 5, 6, 7, 8, 9)
}
"10 instances" in {
val clump = Clump.join(c(1), c(2), c(3), c(4), c(5), c(6), c(7), c(8), c(9), c(10))
clumpResult(clump) mustEqual Some(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
}
}
}
"a Clump instance" >> {
"can be mapped to a new clump" >> {
"using simple a value transformation (clump.map)" in {
clumpResult(Clump.value(1).map(_ + 1)) mustEqual Some(2)
}
"using a transformation that creates a new clump (clump.flatMap)" >> {
"both clumps are defined" in {
clumpResult(Clump.value(1).flatMap(i => Clump.value(i + 1))) mustEqual Some(2)
}
"initial clump is undefined" in {
clumpResult(Clump.empty[Int].flatMap(i => Clump.value(2))) mustEqual None
}
}
}
"can be joined with another clump and produce a new clump with the value of both (clump.join)" >> {
"both clumps are defined" in {
clumpResult(Clump.value(1).join(Clump.value(2))) mustEqual Some(1, 2)
}
"one of them is undefined" in {
clumpResult(Clump.empty[Int].join(Clump.value(None))) mustEqual None
}
}
"allows to recover from failures" >> {
"using a function that recovers using a new value (clump.handle)" >> {
"exception happens" in {
val clump =
Clump.exception(new IllegalStateException).handle {
case e: IllegalStateException => 2
}
clumpResult(clump) mustEqual Some(2)
}
"exception doesn't happen" in {
val clump =
Clump.value(1).handle {
case e: IllegalStateException => 2
}
clumpResult(clump) mustEqual Some(1)
}
"exception isn't caught" in {
val clump =
Clump.exception(new NullPointerException).handle {
case e: IllegalStateException => 1
}
clumpResult(clump) must throwA[NullPointerException]
}
}
"using a function that recovers using a new value (clump.recover)" >> {
"exception happens" in {
val clump =
Clump.exception(new IllegalStateException).recover {
case e: IllegalStateException => 2
}
clumpResult(clump) mustEqual Some(2)
}
"exception doesn't happen" in {
val clump =
Clump.value(1).recover {
case e: IllegalStateException => 2
}
clumpResult(clump) mustEqual Some(1)
}
"exception isn't caught" in {
val clump =
Clump.exception(new NullPointerException).recover {
case e: IllegalStateException => 1
}
clumpResult(clump) must throwA[NullPointerException]
}
}
"using a function that recovers the failure using a new clump (clump.rescue)" >> {
"exception happens" in {
val clump =
Clump.exception(new IllegalStateException).rescue {
case e: IllegalStateException => Clump.value(2)
}
clumpResult(clump) mustEqual Some(2)
}
"exception doesn't happen" in {
val clump =
Clump.value(1).rescue {
case e: IllegalStateException => Clump.empty
}
clumpResult(clump) mustEqual Some(1)
}
"exception isn't caught" in {
val clump =
Clump.exception(new NullPointerException).rescue {
case e: IllegalStateException => Clump.value(1)
}
clumpResult(clump) must throwA[NullPointerException]
}
}
"using a function that recovers the failure using a new clump (clump.recoverWith)" >> {
"exception happens" in {
val clump =
Clump.exception(new IllegalStateException).recoverWith {
case e: IllegalStateException => Clump.value(2)
}
clumpResult(clump) mustEqual Some(2)
}
"exception doesn't happen" in {
val clump =
Clump.value(1).recoverWith {
case e: IllegalStateException => Clump.empty
}
clumpResult(clump) mustEqual Some(1)
}
"exception isn't caught" in {
val clump =
Clump.exception(new NullPointerException).recoverWith {
case e: IllegalStateException => Clump.value(1)
}
clumpResult(clump) must throwA[NullPointerException]
}
}
"using a function that recovers using a new value (clump.fallback) on any exception" >> {
"exception happens" in {
val clump = Clump.exception(new IllegalStateException).fallback(1)
clumpResult(clump) mustEqual Some(1)
}
"exception doesn't happen" in {
val clump = Clump.value(1).fallback(2)
clumpResult(clump) mustEqual Some(1)
}
}
"using a function that recovers using a new clump (clump.fallbackTo) on any exception" >> {
"exception happens" in {
val clump = Clump.exception(new IllegalStateException).fallbackTo(Clump.value(1))
clumpResult(clump) mustEqual Some(1)
}
"exception doesn't happen" in {
val clump = Clump.value(1).fallbackTo(Clump.value(2))
clumpResult(clump) mustEqual Some(1)
}
}
}
"can have its result filtered (clump.filter)" in {
clumpResult(Clump.value(1).filter(_ != 1)) mustEqual None
clumpResult(Clump.value(1).filter(_ == 1)) mustEqual Some(1)
}
"uses a covariant type parameter" in {
trait A
class B extends A
class C extends A
val clump = Clump.traverse(List(new B, new C))(Clump.value(_))
(clump: Clump[List[A]]) must beAnInstanceOf[Clump[List[A]]]
}
"allows to defined a fallback value (clump.orDefault)" >> {
"undefined" in {
clumpResult(Clump.empty.orDefault(1)) ==== Some(1)
}
"defined" in {
clumpResult(Clump.value(1).orDefault(2)) ==== Some(1)
}
}
"allows to defined a fallback clump (clump.orElse)" >> {
"undefined" in {
clumpResult(Clump.empty.orElse(Clump.value(1))) ==== Some(1)
}
"defined" in {
clumpResult(Clump.value(1).orElse(Clump.value(2))) ==== Some(1)
}
}
"can represent its result as a collection (clump.list) when its type is a collection" >> {
"list" in {
awaitResult(Clump.value(List(1, 2)).list) ==== List(1, 2)
}
"set" in {
awaitResult(Clump.value(Set(1, 2)).list) ==== Set(1, 2)
}
"seq" in {
awaitResult(Clump.value(Seq(1, 2)).list) ==== Seq(1, 2)
}
"empty" in {
awaitResult(Clump.empty[List[Int]].list) ==== List()
}
// Clump.value(1).flatten //doesn't compile
}
"can provide a result falling back to a default (clump.getOrElse)" >> {
"initial clump is undefined" in {
awaitResult(Clump.empty.getOrElse(1)) ==== 1
}
"initial clump is defined" in {
awaitResult(Clump.value(2).getOrElse(1)) ==== 2
}
}
"has a utility method (clump.apply) for unwrapping optional result" in {
awaitResult(Clump.value(1).apply()) ==== 1
awaitResult(Clump.empty[Int].apply()) must throwA[NoSuchElementException]
}
"can be made optional (clump.optional) to avoid lossy joins" in {
val clump: Clump[String] = Clump.empty
val optionalClump: Clump[Option[String]] = clump.optional
clumpResult(optionalClump) ==== Some(None)
val valueClump: Clump[String] = Clump.value("foo")
clumpResult(valueClump.join(clump)) ==== None
clumpResult(valueClump.join(optionalClump)) ==== Some("foo", None)
}
}
}
| getclump/clump | src/test/scala/io/getclump/ClumpApiSpec.scala | Scala | apache-2.0 | 12,671 |
package gameover.fwk.logging
import com.badlogic.gdx.Gdx
trait Logs {
object LogType extends Enumeration {
val GUI = Value("gui")
val GAME = Value("game")
val GFX = Value("gfx")
val UTILS = Value("utils")
}
def logInfo(t: LogType.Value, message: String) {
if (Gdx.app != null)
Gdx.app.log(t.toString, s"[${System.nanoTime}] $message")
else
println(message)
}
def logError(t: LogType.Value, message: String) {
if (Gdx.app != null)
Gdx.app.error(t.toString, s"[${System.nanoTime}] $message")
else
sys.error(message)
}
def logError(t: LogType.Value, message: String, thr: Throwable) {
if (Gdx.app != null)
Gdx.app.error(t.toString, s"[${System.nanoTime}] $message", thr)
else
sys.error(message)
}
def logDebug(t: LogType.Value, message: String) {
if (Gdx.app != null)
Gdx.app.debug(t.toString, s"[${System.nanoTime}] $message")
else
println(message)
}
}
| PixelDuck/gameover-game-framework | src/main/scala/gameover/fwk/logging/Logs.scala | Scala | mit | 974 |
package se.gigurra.gpt.common
import com.sun.jna.Native
import com.sun.jna.Pointer
import com.sun.jna.platform.win32.BaseTSD.SIZE_T
import com.sun.jna.platform.win32.Kernel32
import com.sun.jna.platform.win32.WinBase
import com.sun.jna.platform.win32.WinDef.DWORD
import com.sun.jna.platform.win32.WinNT
import com.sun.jna.platform.win32.WinNT.HANDLE
object SharedMemory {
Native.register("Kernel32")
val VIEW_ACCESS = WinNT.SECTION_MAP_READ | WinNT.SECTION_MAP_WRITE
val PAGE_ACCESS = WinNT.PAGE_READWRITE
val INVALID_HANDLE_VALUE = WinBase.INVALID_HANDLE_VALUE
@native def OpenFileMappingA(access: DWORD, bInheritHandle: Boolean, s: String): Long
@native def VirtualQuery(p: Pointer, infoOut: MEMORY_BASIC_INFORMATION, sz: SIZE_T): Long
@native def FlushViewOfFile(p: Pointer, n: SIZE_T): Int
private def validate[T](handle: T, fGetPtr: T => Pointer): Option[T] = {
if (Pointer.nativeValue(fGetPtr(handle)) != 0) {
Some(handle)
} else {
None
}
}
def openHandle(name: String, sz: Int, mayCreateNew: Boolean): Option[HANDLE] = {
if (mayCreateNew) {
validate(
Kernel32.INSTANCE.CreateFileMapping(INVALID_HANDLE_VALUE, null, PAGE_ACCESS, 0, sz, name),
(h: HANDLE) => h.getPointer())
} else {
validate(
new HANDLE(new Pointer(OpenFileMappingA(new DWORD(VIEW_ACCESS), false, name))),
(h: HANDLE) => h.getPointer())
}
}
def findSz(ptr: Pointer): Int = {
if (ptr != null) {
val info = new MEMORY_BASIC_INFORMATION()
VirtualQuery(ptr, info, new SIZE_T(info.size()))
info.RegionSize.intValue()
} else {
0
}
}
def mapView(handle: HANDLE): Option[Pointer] = {
validate(
Kernel32.INSTANCE.MapViewOfFile(handle, VIEW_ACCESS, 0, 0, 0),
(p: Pointer) => p)
}
}
class SharedMemory(
val name: String, szIfNew: Int, allowNew: Boolean) {
val handle: Option[HANDLE] = SharedMemory.openHandle(name, szIfNew, allowNew)
val view: Option[Pointer] = handle.flatMap(SharedMemory.mapView(_))
val size: Int = view.map(SharedMemory.findSz(_)).getOrElse(0)
private var _valid: Boolean = handle.isDefined && view.isDefined && size > 0
if (!valid) {
close()
}
def valid() = {
_valid
}
def close() {
_valid = false
view.foreach(Kernel32.INSTANCE.UnmapViewOfFile(_))
handle.foreach(Kernel32.INSTANCE.CloseHandle(_))
}
def flush() {
view.foreach(SharedMemory.FlushViewOfFile(_, new SIZE_T(size)))
}
def write(data: Array[Byte], offs: Int, n: Int) {
view.foreach(_.write(0, data, offs, math.min(n, size)))
}
def read(ints: Array[Int], n: Int) {
view.foreach(_.read(0, ints, 0, n))
}
def read(bytes: Array[Byte], n: Int) {
read(bytes, 0, n)
}
def read(bytes: Array[Byte], offs: Int, n: Int) {
view.foreach(_.read(0, bytes, offs, n))
}
def copy(): Array[Byte] = {
val out = new Array[Byte](size.toInt)
read(out, math.min(out.length, size))
out
}
def getPtr() = {
view
}
def getByteBuffer() = {
getPtr().map(_.getByteBuffer(0, size))
}
override def toString(): String = {
s"super.toString(), size: $size"
}
}
| GiGurra/gpt | gpt-common/src/main/scala/se/gigurra/gpt/common/SharedMemory.scala | Scala | gpl-2.0 | 3,178 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.avocado.algorithms.math
import scala.math.abs
object MathTestUtils {
def assertAlmostEqual(a: Double, b: Double, epsilon: Double = 1e-6) {
if (!(a * 0.99 < b && a * 1.01 > b) &&
!(abs(a - b) < epsilon)) {
throw new AssertionError(a + " != " + b)
}
}
}
| FusionWorks/avocado | avocado-core/src/test/scala/org/bdgenomics/avocado/algorithms/math/MathTestUtils.scala | Scala | apache-2.0 | 1,096 |
/*
* Copyright 2015 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.jobs.index
import com.twitter.scalding._
import org.apache.accumulo.core.data.{Range => AcRange}
import org.geotools.data.DataStoreFinder
import org.locationtech.geomesa.core.data._
import org.locationtech.geomesa.jobs.GeoMesaBaseJob
import org.locationtech.geomesa.jobs.scalding.ConnectionParams._
import org.locationtech.geomesa.jobs.scalding._
import scala.collection.JavaConverters._
/**
* Class to copy a schema and all data from one data store to another.
*
* Can be used to 'update' geomesa data from older versions. It does this by reading data in the old format
* and writing it to a new schema which will use the latest format. This way, improvements in serialization,
* etc can be leveraged for old data.
*/
class SchemaCopyJob(args: Args) extends GeoMesaBaseJob(args) {
val feature = args(FEATURE_IN)
val dsInParams = toDataStoreInParams(args)
val dsOutParams = toDataStoreOutParams(args)
val input = GeoMesaInputOptions(dsInParams, feature)
val output = GeoMesaOutputOptions(dsOutParams)
{
// validation
val dsIn = DataStoreFinder.getDataStore(dsInParams.asJava).asInstanceOf[AccumuloDataStore]
assert(dsIn != null, "The specified input data store could not be created - check your job parameters")
val dsOut = DataStoreFinder.getDataStore(dsOutParams.asJava).asInstanceOf[AccumuloDataStore]
assert(dsOut != null, "The specified output data store could not be created - check your job parameters")
val sft = dsIn.getSchema(feature)
assert(sft != null, s"The feature '$feature' does not exist in the input data store")
// create the schema in the output datastore if it does not exist already
dsOut.createSchema(sft)
}
// scalding job
GeoMesaSource(input).write(GeoMesaSource(output))
}
| mmatz-ccri/geomesa | geomesa-jobs/src/main/scala/org/locationtech/geomesa/jobs/index/SchemaCopyJob.scala | Scala | apache-2.0 | 2,414 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.convert.xml
import java.io.{ByteArrayInputStream, File, FileInputStream}
import com.typesafe.config.ConfigFactory
import com.vividsolutions.jts.geom.Point
import org.junit.runner.RunWith
import org.locationtech.geomesa.convert.SimpleFeatureConverters
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.text.WKTUtils
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class XMLConverterTest extends Specification {
sequential
val sftConf = ConfigFactory.parseString(
"""{ type-name = "xmlFeatureType"
| attributes = [
| {name = "number", type = "Integer"}
| {name = "color", type = "String"}
| {name = "weight", type = "Double"}
| {name = "source", type = "String"}
| ]
|}
""".stripMargin)
val sft = SimpleFeatureTypes.createType(sftConf)
"XML Converter" should {
"parse multiple features out of a single document" >> {
val xml =
"""<doc>
| <DataSource>
| <name>myxml</name>
| </DataSource>
| <Feature>
| <number>123</number>
| <color>red</color>
| <physical weight="127.5" height="5'11"/>
| </Feature>
| <Feature>
| <number>456</number>
| <color>blue</color>
<physical weight="150" height="h2"/>
| </Feature>
|</doc>
""".stripMargin
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "xml"
| id-field = "uuid()"
| feature-path = "Feature" // can be any xpath - relative to the root, or absolute
| fields = [
| // paths can be any xpath - relative to the feature-path, or absolute
| { name = "number", path = "number", transform = "$0::integer" }
| { name = "color", path = "color", transform = "trim($0)" }
| { name = "weight", path = "physical/@weight", transform = "$0::double" }
| { name = "source", path = "/doc/DataSource/name/text()" }
| ]
| }
""".stripMargin)
val converter = SimpleFeatureConverters.build[String](sft, parserConf)
val features = converter.processInput(Iterator(xml)).toList
features must haveLength(2)
features.head.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.head.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.head.getAttribute("weight").asInstanceOf[Double] mustEqual 127.5
features.head.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
features(1).getAttribute("number").asInstanceOf[Integer] mustEqual 456
features(1).getAttribute("color").asInstanceOf[String] mustEqual "blue"
features(1).getAttribute("weight").asInstanceOf[Double] mustEqual 150
features(1).getAttribute("source").asInstanceOf[String] mustEqual "myxml"
}
"parse multiple features out of a single document with the geometry in the repeated XML tag" >> {
val sftConf2 = ConfigFactory.parseString(
"""{ type-name = "xmlFeatureType"
| attributes = [
| {name = "number", type = "Integer"}
| {name = "color", type = "String"}
| {name = "weight", type = "Double"}
| {name = "source", type = "String"}
| {name = "geom", type = "Point"}
| ]
|}
""".stripMargin)
val sft2 = SimpleFeatureTypes.createType(sftConf2)
val xml =
"""<doc>
| <DataSource>
| <name>myxml</name>
| </DataSource>
| <Feature lon="1.23" lat="4.23">
| <number>123</number>
| <color>red</color>
| <physical weight="127.5" height="5'11"/>
| </Feature>
| <Feature lon="4.56" lat="7.56">
| <number>456</number>
| <color>blue</color>
<physical weight="150" height="h2"/>
| </Feature>
|</doc>
""".stripMargin
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "xml"
| id-field = "uuid()"
| feature-path = "Feature" // can be any xpath - relative to the root, or absolute
| fields = [
| // paths can be any xpath - relative to the feature-path, or absolute
| { name = "number", path = "number", transform = "$0::integer" }
| { name = "color", path = "color", transform = "trim($0)" }
| { name = "weight", path = "physical/@weight", transform = "$0::double" }
| { name = "source", path = "/doc/DataSource/name/text()" }
| { name = "lon", path = "./@lon", transform = "$0::double" }
| { name = "lat", path = "./@lat", transform = "$0::double" }
| { name = "geom", transform = "point($lon, $lat)" }
| ]
| }
""".stripMargin)
val converter = SimpleFeatureConverters.build[String](sft2, parserConf)
val features = converter.processInput(Iterator(xml)).toList
features must haveLength(2)
features.head.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.head.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.head.getAttribute("weight").asInstanceOf[Double] mustEqual 127.5
features.head.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
features.head.getAttribute("geom").asInstanceOf[Point] mustEqual WKTUtils.read("POINT(1.23 4.23)").asInstanceOf[Point]
features(1).getAttribute("number").asInstanceOf[Integer] mustEqual 456
features(1).getAttribute("color").asInstanceOf[String] mustEqual "blue"
features(1).getAttribute("weight").asInstanceOf[Double] mustEqual 150
features(1).getAttribute("source").asInstanceOf[String] mustEqual "myxml"
features(1).getAttribute("geom").asInstanceOf[Point] mustEqual WKTUtils.read("POINT(4.56 7.56)").asInstanceOf[Point]
}
"parse nested feature nodes" >> {
val xml =
"""<doc>
| <DataSource>
| <name>myxml</name>
| </DataSource>
| <IgnoreMe>
| <Feature>
| <number>123</number>
| <color>red</color>
| <physical weight="127.5" height="5'11"/>
| </Feature>
| </IgnoreMe>
| <IgnoreMe>
| <Feature>
| <number>456</number>
| <color>blue</color>
| <physical weight="150" height="h2"/>
| </Feature>
| </IgnoreMe>
|</doc>
""".stripMargin
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "xml"
| id-field = "uuid()"
| feature-path = "/doc/IgnoreMe/Feature" // can be any xpath - relative to the root, or absolute
| fields = [
| // paths can be any xpath - relative to the feature-path, or absolute
| { name = "number", path = "number", transform = "$0::integer" }
| { name = "color", path = "color", transform = "trim($0)" }
| { name = "weight", path = "physical/@weight", transform = "$0::double" }
| { name = "source", path = "/doc/DataSource/name/text()" }
| ]
| }
""".stripMargin)
val converter = SimpleFeatureConverters.build[String](sft, parserConf)
val features = converter.processInput(Iterator(xml)).toList
features must haveLength(2)
features.head.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.head.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.head.getAttribute("weight").asInstanceOf[Double] mustEqual 127.5
features.head.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
features(1).getAttribute("number").asInstanceOf[Integer] mustEqual 456
features(1).getAttribute("color").asInstanceOf[String] mustEqual "blue"
features(1).getAttribute("weight").asInstanceOf[Double] mustEqual 150
features(1).getAttribute("source").asInstanceOf[String] mustEqual "myxml"
}
"apply xpath functions" >> {
val xml =
"""<doc>
| <DataSource>
| <name>myxml</name>
| </DataSource>
| <Feature>
| <number>123</number>
| <color>red</color>
| <physical weight="127.5" height="5'11"/>
| </Feature>
|</doc>
""".stripMargin
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "xml"
| id-field = "uuid()"
| feature-path = "Feature" // can be any xpath - relative to the root, or absolute
| fields = [
| // paths can be any xpath - relative to the feature-path, or absolute
| { name = "number", path = "number", transform = "$0::integer" }
| { name = "color", path = "color", transform = "trim($0)" }
| { name = "weight", path = "floor(physical/@weight)", transform = "$0::double" }
| { name = "source", path = "/doc/DataSource/name/text()" }
| ]
| }
""".stripMargin)
val converter = SimpleFeatureConverters.build[String](sft, parserConf)
val features = converter.processInput(Iterator(xml)).toList
features must haveLength(1)
features.head.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.head.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.head.getAttribute("weight").asInstanceOf[Double] mustEqual 127
features.head.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
}
"use an ID hash for each node" >> {
val xml =
"""<doc>
| <DataSource>
| <name>myxml</name>
| </DataSource>
| <Feature>
| <number>123</number>
| <color>red</color>
| <physical weight="127.5" height="5'11"/>
| </Feature>
| <Feature>
| <number>456</number>
| <color>blue</color>
<physical weight="150" height="h2"/>
| </Feature>
|</doc>
""".stripMargin
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "xml"
| id-field = "md5(string2bytes(xml2string($0)))"
| feature-path = "Feature" // can be any xpath - relative to the root, or absolute
| fields = [
| // paths can be any xpath - relative to the feature-path, or absolute
| { name = "number", path = "number", transform = "$0::integer" }
| { name = "color", path = "color", transform = "trim($0)" }
| { name = "weight", path = "physical/@weight", transform = "$0::double" }
| { name = "source", path = "/doc/DataSource/name/text()" }
| ]
| }
""".stripMargin)
val converter = SimpleFeatureConverters.build[String](sft, parserConf)
val features = converter.processInput(Iterator(xml)).toList
features must haveLength(2)
features.head.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.head.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.head.getAttribute("weight").asInstanceOf[Double] mustEqual 127.5
features.head.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
features(1).getAttribute("number").asInstanceOf[Integer] mustEqual 456
features(1).getAttribute("color").asInstanceOf[String] mustEqual "blue"
features(1).getAttribute("weight").asInstanceOf[Double] mustEqual 150
features(1).getAttribute("source").asInstanceOf[String] mustEqual "myxml"
features.head.getID mustEqual "441dd9114a1a345fe59f0dfe461f01ca"
features(1).getID mustEqual "42aae6286c7204c3aa1aa99a4e8dae35"
}
"validate with an xsd" >> {
val xml =
"""<?xml version="1.0" encoding="UTF-8" ?>
|<f:doc xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:f="http://geomesa.org/test-feature">
| <f:DataSource>
| <f:name>myxml</f:name>
| </f:DataSource>
| <f:Feature>
| <f:number>123</f:number>
| <f:color>red</f:color>
| <f:physical weight="127.5" height="5'11"/>
| </f:Feature>
|</f:doc>
""".stripMargin
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "xml"
| id-field = "uuid()"
| feature-path = "ns:Feature" // can be any xpath - relative to the root, or absolute
| xsd = "xml-feature.xsd" // looked up by class.getResource
| fields = [
| // paths can be any xpath - relative to the feature-path, or absolute
| { name = "number", path = "ns:number", transform = "$0::integer" }
| { name = "color", path = "ns:color", transform = "trim($0)" }
| { name = "weight", path = "ns:physical/@weight", transform = "$0::double" }
| { name = "source", path = "/ns:doc/ns:DataSource/ns:name/text()" }
| ]
| xml-namespaces = {
| ns = "http://geomesa.org/test-feature"
| }
| }
""".stripMargin)
val converter = SimpleFeatureConverters.build[String](sft, parserConf)
"parse as itr" >> {
val features = converter.processInput(Iterator(xml)).toList
features must haveLength(1)
features.head.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.head.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.head.getAttribute("weight").asInstanceOf[Double] mustEqual 127.5
features.head.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
}
"parse as stream" >> {
val features = converter.process(new ByteArrayInputStream(xml.replaceAllLiterally("\\n", " ").getBytes)).toList
features must haveLength(1)
features.head.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.head.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.head.getAttribute("weight").asInstanceOf[Double] mustEqual 127.5
features.head.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
}
}
"parse xml im multi line mode" >> {
val xml =
"""<?xml version="1.0" encoding="UTF-8" ?>
|<f:doc xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:f="http://geomesa.org/test-feature">
| <f:DataSource>
| <f:name>myxml</f:name>
| </f:DataSource>
| <f:Feature>
| <f:number>123</f:number>
| <f:color>red</f:color>
| <f:physical weight="127.5" height="5'11"/>
| </f:Feature>
|</f:doc>
""".stripMargin
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "xml"
| id-field = "uuid()"
| feature-path = "ns:Feature" // can be any xpath - relative to the root, or absolute
| xsd = "xml-feature.xsd" // looked up by class.getResource
| options {
| line-mode = "multi"
| }
| fields = [
| // paths can be any xpath - relative to the feature-path, or absolute
| { name = "number", path = "ns:number", transform = "$0::integer" }
| { name = "color", path = "ns:color", transform = "trim($0)" }
| { name = "weight", path = "ns:physical/@weight", transform = "$0::double" }
| { name = "source", path = "/ns:doc/ns:DataSource/ns:name/text()" }
| ]
| xml-namespaces = {
| ns = "http://geomesa.org/test-feature"
| }
| }
""".stripMargin)
val converter = SimpleFeatureConverters.build[String](sft, parserConf)
val features = converter.process(new ByteArrayInputStream(xml.getBytes)).toList
features must haveLength(1)
features.head.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.head.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.head.getAttribute("weight").asInstanceOf[Double] mustEqual 127.5
features.head.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
}
"parse xml in single line mode" >> {
val origXml =
"""<?xml version="1.0" encoding="UTF-8" ?>
|<f:doc xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:f="http://geomesa.org/test-feature">
| <f:DataSource>
| <f:name>myxml</f:name>
| </f:DataSource>
| <f:Feature>
| <f:number>123</f:number>
| <f:color>red</f:color>
| <f:physical weight="127.5" height="5'11"/>
| </f:Feature>
|</f:doc>
""".stripMargin
val xml = origXml.replaceAllLiterally("\\n", " ") + "\\n" + origXml.replaceAllLiterally("\\n", " ")
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "xml"
| id-field = "uuid()"
| feature-path = "ns:Feature" // can be any xpath - relative to the root, or absolute
| xsd = "xml-feature.xsd" // looked up by class.getResource
| options {
| line-mode = "single"
| }
| fields = [
| // paths can be any xpath - relative to the feature-path, or absolute
| { name = "number", path = "ns:number", transform = "$0::integer" }
| { name = "color", path = "ns:color", transform = "trim($0)" }
| { name = "weight", path = "ns:physical/@weight", transform = "$0::double" }
| { name = "source", path = "/ns:doc/ns:DataSource/ns:name/text()" }
| ]
| xml-namespaces = {
| ns = "http://geomesa.org/test-feature"
| }
| }
""".stripMargin)
val converter = SimpleFeatureConverters.build[String](sft, parserConf)
val features = converter.process(new ByteArrayInputStream(xml.getBytes)).toList
features must haveLength(2)
features.head.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.head.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.head.getAttribute("weight").asInstanceOf[Double] mustEqual 127.5
features.head.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
features.last.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.last.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.last.getAttribute("weight").asInstanceOf[Double] mustEqual 127.5
features.last.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
}
"invalidate with an xsd" >> {
val xml =
"""<f:doc2 xmlns:f="http://geomesa.org/test-feature" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
| <f:DataSource>
| <f:name>myxml</f:name>
| </f:DataSource>
| <f:Feature>
| <f:number>123</f:number>
| <f:color>red</f:color>
| <f:physical weight="127.5" height="5'11"/>
| </f:Feature>
|</f:doc2>
""".stripMargin
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "xml"
| id-field = "uuid()"
| feature-path = "ns:Feature" // can be any xpath - relative to the root, or absolute
| xsd = "xml-feature.xsd" // looked up by class.getResource
| fields = [
| // paths can be any xpath - relative to the feature-path, or absolute
| { name = "number", path = "ns:number", transform = "$0::integer" }
| { name = "color", path = "ns:color", transform = "trim($0)" }
| { name = "weight", path = "ns:physical/@weight", transform = "$0::double" }
| { name = "source", path = "/ns:doc/ns:DataSource/ns:name/text()" }
| ]
| xml-namespaces = {
| ns = "http://geomesa.org/test-feature"
| }
| }
""".stripMargin)
val converter = SimpleFeatureConverters.build[String](sft, parserConf)
val features = converter.processInput(Iterator(xml)).toList
features must haveLength(0)
}
"handle user data" >> {
val xml =
"""<doc>
| <DataSource>
| <name>myxml</name>
| </DataSource>
| <Feature>
| <number>123</number>
| <color>red</color>
| <physical weight="127.5" height="5'11"/>
| </Feature>
|</doc>
""".stripMargin
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "xml"
| id-field = "uuid()"
| feature-path = "Feature" // can be any xpath - relative to the root, or absolute
| user-data = {
| my.user.key = "$weight"
| }
| fields = [
| // paths can be any xpath - relative to the feature-path, or absolute
| { name = "number", path = "number", transform = "$0::integer" }
| { name = "color", path = "color", transform = "trim($0)" }
| { name = "weight", path = "floor(physical/@weight)", transform = "$0::double" }
| { name = "source", path = "/doc/DataSource/name/text()" }
| ]
| }
""".stripMargin)
val converter = SimpleFeatureConverters.build[String](sft, parserConf)
val features = converter.processInput(Iterator(xml)).toList
features must haveLength(1)
features.head.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.head.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.head.getAttribute("weight").asInstanceOf[Double] mustEqual 127
features.head.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
features.head.getUserData.get("my.user.key") mustEqual 127d
}
"Parse XMLs with a BOM" >> {
val xml = new File("src/test/resources/bomTest.xml")
xml.exists() mustEqual true
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "xml"
| id-field = "uuid()"
| feature-path = "Feature" // can be any xpath - relative to the root, or absolute
| options {
| line-mode = "multi"
| }
| fields = [
| // paths can be any xpath - relative to the feature-path, or absolute
| { name = "number", path = "number", transform = "$0::integer" }
| { name = "color", path = "color", transform = "trim($0)" }
| { name = "weight", path = "physical/@weight", transform = "$0::double" }
| { name = "source", path = "/doc/DataSource/name/text()" }
| ]
| }
""".stripMargin)
val xmlConverter = (new XMLConverterFactory).buildConverter(sft, parserConf)
val features = xmlConverter.process(new FileInputStream(xml)).toList
features must haveLength(2)
features.head.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.head.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.head.getAttribute("weight").asInstanceOf[Double] mustEqual 127.5
features.head.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
features(1).getAttribute("number").asInstanceOf[Integer] mustEqual 456
features(1).getAttribute("color").asInstanceOf[String] mustEqual "blue"
features(1).getAttribute("weight").asInstanceOf[Double] mustEqual 150
features(1).getAttribute("source").asInstanceOf[String] mustEqual "myxml"
}
"support namespaces with saxon" >> {
val xml =
"""<ns:doc xmlns:ns="http://geomesa.example.com/foo" xmlns:ns2="http://geomesa.example.com/foo2">
| <ns:DataSource>
| <ns:name>myxml</ns:name>
| </ns:DataSource>
| <ns:Feature>
| <ns:number>123</ns:number>
| <ns:color>red</ns:color>
| <ns2:physical weight="127.5" height="5'11"/>
| </ns:Feature>
|</ns:doc>
""".stripMargin
val parserConf = ConfigFactory.parseString(
"""
| {
| type = "xml"
| id-field = "uuid()"
| feature-path = "ns:Feature" // can be any xpath - relative to the root, or absolute
| fields = [
| // paths can be any xpath - relative to the feature-path, or absolute
| { name = "number", path = "ns:number", transform = "$0::integer" }
| { name = "color", path = "ns:color", transform = "trim($0)" }
| { name = "weight", path = "floor(ns2:physical/@weight)", transform = "$0::double" }
| { name = "source", path = "/ns:doc/ns:DataSource/ns:name/text()" }
| ]
| xml-namespaces = {
| ns = "http://geomesa.example.com/foo"
| ns2 = "http://geomesa.example.com/foo2"
| }
| }
""".stripMargin)
val converter = SimpleFeatureConverters.build[String](sft, parserConf)
val features = converter.processInput(Iterator(xml)).toList
features must haveLength(1)
features.head.getAttribute("number").asInstanceOf[Integer] mustEqual 123
features.head.getAttribute("color").asInstanceOf[String] mustEqual "red"
features.head.getAttribute("weight").asInstanceOf[Double] mustEqual 127
features.head.getAttribute("source").asInstanceOf[String] mustEqual "myxml"
}
}
}
| jahhulbert-ccri/geomesa | geomesa-convert/geomesa-convert-xml/src/test/scala/org/locationtech/geomesa/convert/xml/XMLConverterTest.scala | Scala | apache-2.0 | 27,319 |
package com.twitter.util.reflect
import com.twitter.util.Future
trait Fungible[+T]
trait BarService
trait ToBarService {
def toBarService: BarService
}
abstract class GeneratedFooService
trait DoEverything[+MM[_]] extends GeneratedFooService
object DoEverything extends GeneratedFooService { self =>
trait ServicePerEndpoint extends ToBarService with Fungible[ServicePerEndpoint]
class DoEverything$Client extends DoEverything[Future]
}
object testclasses {
trait TestTraitA
trait TestTraitB
case class Foo[T](data: T)
case class Baz[U, T](bar: U, foo: Foo[T])
case class Bez[I, J](m: Map[I, J])
trait TypedTrait[A, B]
object ver2_3 {
// this "package" naming will blow up class.getSimpleName
// packages with underscore then a number make Java unhappy
final case class Ext(a: String, b: String)
case class Response(a: String, b: String)
class This$Breaks$In$ManyWays
}
object okNaming {
case class Ok(a: String, b: String)
}
object has_underscore {
case class ClassB(a: String, b: String)
}
object number_1 {
// this "package" naming will blow up class.getSimpleName
// packages with underscore then number make Java unhappy
final case class FooNumber(a: String)
}
final case class ClassA(a: String, b: String)
case class Request(a: String)
class Bar
case class NoConstructor()
case class CaseClassOneTwo(@Annotation1 one: String, @Annotation2 two: String)
case class CaseClassOneTwoWithFields(@Annotation1 one: String, @Annotation2 two: String) {
val city: String = "San Francisco"
val state: String = "California"
}
case class CaseClassOneTwoWithAnnotatedField(@Annotation1 one: String, @Annotation2 two: String) {
@Annotation3 val three: String = "three"
}
case class CaseClassThreeFour(@Annotation3 three: String, @Annotation4 four: String)
case class CaseClassFive(@Annotation5 five: String)
case class CaseClassOneTwoThreeFour(
@Annotation1 one: String,
@Annotation2 two: String,
@Annotation3 three: String,
@Annotation4 four: String)
case class WithThings(
@Annotation1 @Thing("thing1") thing1: String,
@Annotation2 @Thing("thing2") thing2: String)
case class WithWidgets(
@Annotation3 @Widget("widget1") widget1: String,
@Annotation4 @Widget("widget2") widget2: String)
case class WithSecondaryConstructor(
@Annotation1 one: Int,
@Annotation2 two: Int) {
def this(@Annotation3 three: String, @Annotation4 four: String) {
this(three.toInt, four.toInt)
}
}
object StaticSecondaryConstructor {
// NOTE: this is a factory method and not a constructor, so annotations will not be picked up
def apply(@Annotation3 three: String, @Annotation4 four: String): StaticSecondaryConstructor =
StaticSecondaryConstructor(three.toInt, four.toInt)
}
case class StaticSecondaryConstructor(@Annotation1 one: Int, @Annotation2 two: Int)
object StaticSecondaryConstructorWithMethodAnnotation {
def apply(@Annotation3 three: String, @Annotation4 four: String): StaticSecondaryConstructor =
StaticSecondaryConstructor(three.toInt, four.toInt)
}
case class StaticSecondaryConstructorWithMethodAnnotation(
@Annotation1 one: Int,
@Annotation2 two: Int) {
// will not be found as Annotations only scans for declared field annotations, this is a method
@Widget("widget1") def widget1: String = "this is widget 1 method"
}
case class GenericTestCaseClass[T](@Annotation1 one: T)
case class GenericTestCaseClassWithMultipleArgs[T](@Annotation1 one: T, @Annotation2 two: Int)
trait AncestorWithAnnotations {
@Annotation1 def one: String
@Annotation2 def two: String
}
case class CaseClassThreeFourAncestorOneTwo(
one: String,
two: String,
@Annotation3 three: String,
@Annotation4 four: String)
extends AncestorWithAnnotations
case class CaseClassAncestorOneTwo(@Annotation5 five: String) extends AncestorWithAnnotations {
override val one: String = "one"
override val two: String = "two"
}
}
| twitter/util | util-reflect/src/test/scala/com/twitter/util/reflect/testclasses.scala | Scala | apache-2.0 | 4,076 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.rudder.web.services
import org.junit.Test
import org.junit._
import org.junit.Assert._
import org.junit.runner.RunWith
import org.junit.runners.BlockJUnit4ClassRunner
import org.junit.runner._
import org.specs2.mutable._
import org.specs2.runner._
import com.normation.rudder.web.model.SectionField
import com.normation.cfclerk.domain._
import com.normation.rudder.web.model._
import org.springframework.context.{ ApplicationContext, ApplicationContextAware }
import org.springframework.context.annotation.AnnotationConfigApplicationContext
import com.normation.rudder.domain.policies.RuleVal
import org.springframework.context.annotation.{ Bean, Configuration, Import }
import org.springframework.beans.factory.annotation.Value
import org.springframework.context.annotation.Lazy
import org.springframework.context.annotation.{ Bean, Configuration, Import, ImportResource }
import org.springframework.beans.factory.annotation.Value
import org.springframework.context.{ ApplicationContext, ApplicationContextAware }
import org.springframework.context.annotation.AnnotationConfigApplicationContext
import org.springframework.core.io.ClassPathResource
import com.normation.spring._
@RunWith(classOf[JUnitRunner])
class Section2FieldServiceTest extends Specification {
// <sections>
// <section name="multSect" multivalued="true">
// <section name="innerSect">
// <select>
// ...
// <policyinstance>
// </section>
// <select>
// <name>selectInMultSect</name>
// </select>
// </section>
// <input>
// <name>inputInRoot</name>
// </input>
// </sections>
object Sections {
val rootSectField = RootSectionField()
val multSect = rootSectField.getAllSectionFields(1)
val innerSect = multSect.getAllSectionFields(2)
}
"multSect" should {
implicit val multSect = Sections.multSect
beMultivalued
haveName("multSect")
haveNbChildren(3)
}
"innerSect" should {
implicit val innerSect = Sections.innerSect
haveName("innerSect")
haveNbChildren(3)
}
def haveNbChildren(nbChildren: Int)(implicit section: SectionField) = {
"have %d children".format(nbChildren) in {
section.childFields.size mustEqual nbChildren
}
}
def beMultivalued(implicit section: SectionField) = {
"be multivalued" in {
section.isMultivalued
}
}
def haveName(name: String)(implicit section: SectionField) = {
"have name '%s'".format(name) in {
section.name mustEqual name
}
}
def haveId(id: String)(implicit varField: DirectiveField) = {
"have id '%s'".format(id) in {
varField.id mustEqual id
}
}
def haveAllVars(implicit section: SectionField) = {
"have all kinds of variable" in {
val vars = section.childFields.collect { case v: DirectiveField => v }
isSelect(vars(0))
}
}
def isSelect(varField: DirectiveField) = {
"is a select variable" in {
varField must beAnInstanceOf[SelectField]
}
}
def isText(varField: DirectiveField) = {
"is an input variable" in {
varField must beAnInstanceOf[TextField]
}
}
def allVars(): Seq[SectionVariableSpec] = {
SelectVariableSpec("select", "selectDesc") ::
SelectOneVariableSpec("selectOne", "selectOneDesc") ::
InputVariableSpec("input", "inputDesc") ::
Nil
}
object RootSectionField {
def apply() = {
val rootSectSpec = createRootSectionSpec
ConfigSection2FieldService.section2FieldService.createSectionField(rootSectSpec,Map(),true)
}
def createRootSectionSpec = {
val innerMultSect = SectionSpec("innerMultSect", isMultivalued = true, children = allVars())
val innerSect = SectionSpec("innerSect", children = allVars())
val selectInMultSect = SelectVariableSpec("selectInMultSect", "selectInMultSectDesc")
val childrenMultSect = Seq(innerMultSect, innerSect, selectInMultSect)
val multSect = SectionSpec("multSect", isMultivalued = true, children = childrenMultSect)
val inputInRoot = InputVariableSpec("inputInRoot", "inputInRootDesc")
val rootSect = SectionSpec("rootSect", children = Seq(inputInRoot, multSect))
rootSect
}
}
}
object ConfigSection2FieldService {
object FieldFactoryImpl extends DirectiveFieldFactory {
//only one field
override def forType(v: VariableSpec, id: String): DirectiveField = {
v match {
case selectOne: SelectOneVariableSpec => new SelectOneField(id, selectOne.valueslabels)
case select: SelectVariableSpec => new SelectField(id, select.valueslabels)
case input: InputVariableSpec => v.constraint.typeName match {
case UploadedFileVType => new UploadedFileField("")(id)
case DestinationPathVType => default(id)
case PermVType => new FilePermsField(id)
case BooleanVType => new CheckboxField(id)
case s:SizeVType => new InputSizeField(id)
case _ => default(id)
}
case _ =>
default(id)
}
}
override def default(id: String) = new TextField(id)
}
def section2FieldService: Section2FieldService = {
def translators = {
val t = new Translators()
t.add(StringTranslator)
t.add(FilePermsTranslator)
t.add(FileTranslator)
t.add(DestinationFileTranslator)
t.add(SelectFieldTranslator)
t
}
new Section2FieldService(FieldFactoryImpl, translators)
}
}
| Kegeruneku/rudder | rudder-web/src/test/scala/com/normation/rudder/web/services/Section2FieldServiceTest.scala | Scala | agpl-3.0 | 7,181 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.csv
import java.nio.charset.{Charset, StandardCharsets}
import com.univocity.parsers.csv.CsvParser
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FileStatus
import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapred.TextInputFormat
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat
import org.apache.spark.TaskContext
import org.apache.spark.input.{PortableDataStream, StreamInputFormat}
import org.apache.spark.rdd.{BinaryFileRDD, RDD}
import org.apache.spark.sql.{Dataset, Encoders, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.execution.datasources.text.TextFileFormat
import org.apache.spark.sql.types.StructType
/**
* Common functions for parsing CSV files
*/
abstract class CSVDataSource extends Serializable {
def isSplitable: Boolean
/**
* Parse a [[PartitionedFile]] into [[InternalRow]] instances.
*/
def readFile(
conf: Configuration,
file: PartitionedFile,
parser: UnivocityParser,
schema: StructType): Iterator[InternalRow]
/**
* Infers the schema from `inputPaths` files.
*/
final def inferSchema(
sparkSession: SparkSession,
inputPaths: Seq[FileStatus],
parsedOptions: CSVOptions): Option[StructType] = {
if (inputPaths.nonEmpty) {
Some(infer(sparkSession, inputPaths, parsedOptions))
} else {
None
}
}
protected def infer(
sparkSession: SparkSession,
inputPaths: Seq[FileStatus],
parsedOptions: CSVOptions): StructType
/**
* Generates a header from the given row which is null-safe and duplicate-safe.
*/
protected def makeSafeHeader(
row: Array[String],
caseSensitive: Boolean,
options: CSVOptions): Array[String] = {
if (options.headerFlag) {
val duplicates = {
val headerNames = row.filter(_ != null)
.map(name => if (caseSensitive) name else name.toLowerCase)
headerNames.diff(headerNames.distinct).distinct
}
row.zipWithIndex.map { case (value, index) =>
if (value == null || value.isEmpty || value == options.nullValue) {
// When there are empty strings or the values set in `nullValue`, put the
// index as the suffix.
s"_c$index"
} else if (!caseSensitive && duplicates.contains(value.toLowerCase)) {
// When there are case-insensitive duplicates, put the index as the suffix.
s"$value$index"
} else if (duplicates.contains(value)) {
// When there are duplicates, put the index as the suffix.
s"$value$index"
} else {
value
}
}
} else {
row.zipWithIndex.map { case (_, index) =>
// Uses default column names, "_c#" where # is its position of fields
// when header option is disabled.
s"_c$index"
}
}
}
}
object CSVDataSource {
def apply(options: CSVOptions): CSVDataSource = {
if (options.wholeFile) {
WholeFileCSVDataSource
} else {
TextInputCSVDataSource
}
}
}
object TextInputCSVDataSource extends CSVDataSource {
override val isSplitable: Boolean = true
override def readFile(
conf: Configuration,
file: PartitionedFile,
parser: UnivocityParser,
schema: StructType): Iterator[InternalRow] = {
val lines = {
val linesReader = new HadoopFileLinesReader(file, conf)
Option(TaskContext.get()).foreach(_.addTaskCompletionListener(_ => linesReader.close()))
linesReader.map { line =>
new String(line.getBytes, 0, line.getLength, parser.options.charset)
}
}
val shouldDropHeader = parser.options.headerFlag && file.start == 0
UnivocityParser.parseIterator(lines, shouldDropHeader, parser, schema)
}
override def infer(
sparkSession: SparkSession,
inputPaths: Seq[FileStatus],
parsedOptions: CSVOptions): StructType = {
val csv = createBaseDataset(sparkSession, inputPaths, parsedOptions)
val maybeFirstLine =
CSVUtils.filterCommentAndEmpty(csv, parsedOptions).takeInternal(1).headOption
inferFromDataset(sparkSession, csv, maybeFirstLine, parsedOptions)
}
/**
* Infers the schema from `Dataset` that stores CSV string records.
*/
def inferFromDataset(
sparkSession: SparkSession,
csv: Dataset[String],
maybeFirstLine: Option[String],
parsedOptions: CSVOptions): StructType = maybeFirstLine match {
case Some(firstLine) =>
val firstRow = new CsvParser(parsedOptions.asParserSettings).parseLine(firstLine)
val caseSensitive = sparkSession.sessionState.conf.caseSensitiveAnalysis
val header = makeSafeHeader(firstRow, caseSensitive, parsedOptions)
val tokenRDD = csv.rdd.mapPartitions { iter =>
val filteredLines = CSVUtils.filterCommentAndEmpty(iter, parsedOptions)
val linesWithoutHeader =
CSVUtils.filterHeaderLine(filteredLines, firstLine, parsedOptions)
val parser = new CsvParser(parsedOptions.asParserSettings)
linesWithoutHeader.map(parser.parseLine)
}
CSVInferSchema.infer(tokenRDD, header, parsedOptions)
case None =>
// If the first line could not be read, just return the empty schema.
StructType(Nil)
}
private def createBaseDataset(
sparkSession: SparkSession,
inputPaths: Seq[FileStatus],
options: CSVOptions): Dataset[String] = {
val paths = inputPaths.map(_.getPath.toString)
if (Charset.forName(options.charset) == StandardCharsets.UTF_8) {
sparkSession.baseRelationToDataFrame(
DataSource.apply(
sparkSession,
paths = paths,
className = classOf[TextFileFormat].getName
).resolveRelation(checkFilesExist = false))
.select("value").as[String](Encoders.STRING)
} else {
val charset = options.charset
val rdd = sparkSession.sparkContext
.hadoopFile[LongWritable, Text, TextInputFormat](paths.mkString(","))
.mapPartitions(_.map(pair => new String(pair._2.getBytes, 0, pair._2.getLength, charset)))
sparkSession.createDataset(rdd)(Encoders.STRING)
}
}
}
object WholeFileCSVDataSource extends CSVDataSource {
override val isSplitable: Boolean = false
override def readFile(
conf: Configuration,
file: PartitionedFile,
parser: UnivocityParser,
schema: StructType): Iterator[InternalRow] = {
UnivocityParser.parseStream(
CodecStreams.createInputStreamWithCloseResource(conf, file.filePath),
parser.options.headerFlag,
parser,
schema)
}
override def infer(
sparkSession: SparkSession,
inputPaths: Seq[FileStatus],
parsedOptions: CSVOptions): StructType = {
val csv = createBaseRdd(sparkSession, inputPaths, parsedOptions)
csv.flatMap { lines =>
UnivocityParser.tokenizeStream(
CodecStreams.createInputStreamWithCloseResource(lines.getConfiguration, lines.getPath()),
shouldDropHeader = false,
new CsvParser(parsedOptions.asParserSettings))
}.take(1).headOption match {
case Some(firstRow) =>
val caseSensitive = sparkSession.sessionState.conf.caseSensitiveAnalysis
val header = makeSafeHeader(firstRow, caseSensitive, parsedOptions)
val tokenRDD = csv.flatMap { lines =>
UnivocityParser.tokenizeStream(
CodecStreams.createInputStreamWithCloseResource(
lines.getConfiguration,
lines.getPath()),
parsedOptions.headerFlag,
new CsvParser(parsedOptions.asParserSettings))
}
CSVInferSchema.infer(tokenRDD, header, parsedOptions)
case None =>
// If the first row could not be read, just return the empty schema.
StructType(Nil)
}
}
private def createBaseRdd(
sparkSession: SparkSession,
inputPaths: Seq[FileStatus],
options: CSVOptions): RDD[PortableDataStream] = {
val paths = inputPaths.map(_.getPath)
val name = paths.mkString(",")
val job = Job.getInstance(sparkSession.sessionState.newHadoopConf())
FileInputFormat.setInputPaths(job, paths: _*)
val conf = job.getConfiguration
val rdd = new BinaryFileRDD(
sparkSession.sparkContext,
classOf[StreamInputFormat],
classOf[String],
classOf[PortableDataStream],
conf,
sparkSession.sparkContext.defaultMinPartitions)
// Only returns `PortableDataStream`s without paths.
rdd.setName(s"CSVFile: $name").values
}
}
| bOOm-X/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVDataSource.scala | Scala | apache-2.0 | 9,482 |
sealed abstract class AbstractFile
class PlainFile(path: String) extends AbstractFile
class VirtualFile(name: String) extends AbstractFile
abstract class ZipArchive(path: String) extends AbstractFile {
sealed abstract class Entry(name: String) extends VirtualFile(name)
class DirEntry(path: String) extends Entry(path)
}
object Test {
def foo(file: AbstractFile) = file match {
case a: PlainFile =>
case b: ZipArchive =>
case c1: ZipArchive#Entry =>
case c1: ZipArchive#DirEntry =>
case c: VirtualFile =>
}
}
| som-snytt/dotty | tests/patmat/i4880a.scala | Scala | apache-2.0 | 538 |
package chapter.two
import scala.annotation.tailrec
object Fibonacci {
def main(args: Array[String]): Unit = {
println(fibonacciTailRecursive(10))
}
def fibonacciTailRecursive(num: Int) : Int = {
@tailrec
def go(num: Int, acc1: Int, acc2: Int) : Int = {
if(num == 0 || num == 1) acc1 + acc2
else go(num - 1, acc2, acc1 + acc2)
}
go(num, 0, 1)
}
}
| zeigernz/fp-in-scala | src/main/scala/chapter/two/Fibonacci.scala | Scala | mit | 392 |
/**
* Created by Variant on 16/3/16.
*/
class AbstractClassOps{
//使用占位符_ 初始化成员变量不能用val(不可变)
//val id :Int = _
var id :Int =_
}
abstract class SuperTeacher(val name :String){
var id : Int
var age :Int
def teach
}
class TeacherForMaths(name : String) extends SuperTeacher(name){
//按照惯例要表达复写关系要写override
override var id = name.hashCode
override var age: Int = 29
override def teach{
println("Teaching !!!")
}
}
object AbstractClass {
def main(args: Array[String]) {
val teacher = new TeacherForMaths("Spark")
teacher.teach
println("teacher.id :" + teacher.id)
println("teacher.age :" + teacher.age)
}
}
| sparkLiwei/ProgrammingNote | scalaLearning/scalaOOP/AbstractClass.scala | Scala | cc0-1.0 | 715 |
package xyz.nabijaczleweli.scala_game_of_life.engine.rendering
/** Holder of data for screen [[xyz.nabijaczleweli.scala_game_of_life.engine.GameRenderer.RenderInfoHolder.settingspause]]
*
* @author Jędrzej
* @since 27.04.14
*/
class SettingsPauseData extends ExtendedScreenData[String] {
override def startup = ""
}
| nabijaczleweli/Scala-Game-of-Life | src/main/scala/xyz/nabijaczleweli/scala_game_of_life/engine/rendering/SettingsPauseData.scala | Scala | mit | 328 |
package dotty.communitybuild
import java.nio.file._
import java.io.{PrintWriter, File}
import java.nio.charset.StandardCharsets.UTF_8
object CommunityBuildRunner:
/** Depending on the mode of operation, either
* runs the test or updates the project. Updating
* means that all the dependencies are fetched but
* minimal other extra other work is done. Updating
* is necessary since we run tests each time on a fresh
* Docker container. We run the update on Docker container
* creation time to create the cache of the dependencies
* and avoid network overhead. See https://github.com/lampepfl/dotty-drone
* for more infrastructural details.
*/
extension (self: CommunityProject) def run()(using suite: CommunityBuildRunner): Unit =
if self.requiresExperimental && !self.compilerSupportExperimental then
log(s"Skipping ${self.project} - it needs experimental features unsupported in this build.")
return
self.dependencies().foreach(_.publish())
self.testOnlyDependencies().foreach(_.publish())
suite.runProject(self)
trait CommunityBuildRunner:
/** fails the current operation, can be specialised in a concrete Runner
* - overridden in `CommunityBuildTest`
*/
def failWith(msg: String): Nothing = throw IllegalStateException(msg)
/** Build the given project with the published local compiler and sbt plugin.
*
* This test reads the compiler version from community-build/dotty-bootstrapped.version
* and expects community-build/sbt-injected-plugins to set any necessary plugins.
*
* @param project The project name, should be a git submodule in community-build/
* @param command The binary file of the program used to test the project – usually
* a build tool like SBT or Mill
* @param arguments Arguments to pass to the testing program
*/
def runProject(projectDef: CommunityProject): Unit =
val project = projectDef.project
val command = projectDef.binaryName
val arguments = projectDef.buildCommands
val compilerVersion = projectDef.compilerVersion
@annotation.tailrec
def execTimes(task: () => Int, timesToRerun: Int): Boolean =
val exitCode = task()
if exitCode == 0
then true
else if timesToRerun == 0
then false
else
log(s"Rerunning tests in $project because of a previous run failure.")
execTimes(task, timesToRerun - 1)
log(s"Building $project with dotty-bootstrapped $compilerVersion...")
val projectDir = communitybuildDir.resolve("community-projects").resolve(project)
if !Files.exists(projectDir.resolve(".git")) then
failWith(s"""
|
|Missing $project submodule. You can initialize this module using
|
| git submodule update --init community-build/community-projects/$project
|
|""".stripMargin)
val testsCompletedSuccessfully = execTimes(projectDef.build, 3)
if !testsCompletedSuccessfully then
failWith(s"""
|
|$command exited with an error code. To reproduce without JUnit, use:
|
| sbt community-build/prepareCommunityBuild
| cd community-build/community-projects/$project
| $command ${arguments.init.mkString(" ")} "${arguments.last}"
|
|For a faster feedback loop on SBT projects, one can try to extract a direct call to dotc
|using the sbt export command. For instance, for scalacheck, use
| sbt export jvm/test:compileIncremental
|
|""".stripMargin)
end runProject
end CommunityBuildRunner
| lampepfl/dotty | community-build/src/scala/dotty/communitybuild/CommunityBuildRunner.scala | Scala | apache-2.0 | 3,663 |
package services
import java.util.UUID
import com.mohiva.play.silhouette.api.LoginInfo
import models.{MailToken, User}
import models.daos.MailTokenDAO
import models.services.MailServiceImpl
import org.joda.time.DateTime
import org.specs2.mock.Mockito
import org.specs2.specification.Scope
import play.api.i18n.MessagesApi
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.libs.mailer.MailerClient
import play.api.mvc.RequestHeader
import play.api.test._
import play.api.test.Helpers._
import reactivemongo.api.commands.WriteResult
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global
import scala.reflect.ClassTag
/**
* Created by Biacco42 on 2016/04/19.
*/
class MailServiceSpec extends PlaySpecification with Mockito with Inject {
lazy val mailerClient = inject[MailerClient]
lazy val messagesApi = inject[MessagesApi]
"MailService#sendConfirm" should {
"send an email for testUser" in new Context {
val requestHeaderMock = mock[RequestHeader]
requestHeaderMock.secure returns false
requestHeaderMock.host returns "hoge.com"
mailTokenDAOMock.create(any[MailToken]) returns Future(mock[WriteResult])
val testMailService= new MailServiceImpl(mailerClient, messagesApi, mailTokenDAOMock)
val ret = await(testMailService.sendConfirm(testUser)(requestHeaderMock))
ret must beMatching("")
}
}
"MailService#consumeToken" should {
"removes token from repo and returns user Option[UUID]" in new ConsumeTokenContext {
val testMailService = new MailServiceImpl(mailerClient, messagesApi, mailTokenDAOMock)
val ret = await(testMailService.consumeToken(testToken, "confirm"))
ret must beSome.which{_ == testUser.userID}
}
"fails when token doesn't match anyone and returns None" in new ConsumeTokenContext {
val testInvalidToken = UUID.randomUUID()
mailTokenDAOMock.read(any[UUID]) returns Future(None)
val testMailService = new MailServiceImpl(mailerClient, messagesApi, mailTokenDAOMock)
val ret = await(testMailService.consumeToken(testInvalidToken, "confirm"))
ret must beNone
}
"fails when token kind doesn't match and returns None" in new ConsumeTokenContext {
val testMailService = new MailServiceImpl(mailerClient, messagesApi, mailTokenDAOMock)
val ret = await(testMailService.consumeToken(testToken, "reset"))
ret must beNone
}
"fails when token has already expired and returns None" in new ConsumeTokenContext {
mailTokenDAOMock.read(testToken) returns Future(Option(MailToken(testToken, testUser.userID, new DateTime().minusDays(1), "confirm")))
val testMailService = new MailServiceImpl(mailerClient, messagesApi, mailTokenDAOMock)
val ret = await(testMailService.consumeToken(testToken, "confirm"))
ret must beNone
}
}
trait Context extends Scope {
val mailTokenDAOMock = mock[MailTokenDAO]
val testUser = User(UUID.randomUUID(),
LoginInfo("email", "hoge@piyo.com"),
Option("Test"), Option("Taro"),
Option("Test Taro"),
Option("hoge@piyo.com"),
None,
None)
}
trait ConsumeTokenContext extends Context {
val testToken = UUID.randomUUID()
mailTokenDAOMock.read(testToken) returns Future(Option(MailToken(testToken, testUser.userID, new DateTime().plusDays(1), "confirm")))
mailTokenDAOMock.delete(any[UUID]) returns Future(mock[WriteResult])
}
}
trait Inject {
lazy val injector = (new GuiceApplicationBuilder).injector()
def inject[T : ClassTag]: T = injector.instanceOf[T]
}
| Biacco42/play-silhouette-mail-confirm-seed | test/Services/MailServiceSpec.scala | Scala | apache-2.0 | 3,628 |
/*
* The MIT License
*
* Copyright (c) 2017 Fulcrum Genomics
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
package com.fulcrumgenomics.util
import java.nio.file.Paths
import com.fulcrumgenomics.fasta.{SequenceDictionary, SequenceMetadata}
import com.fulcrumgenomics.testing.UnitSpec
import com.fulcrumgenomics.util.GeneAnnotations.Exon
class RefFlatSourceTest extends UnitSpec {
// Adapted from http://hgdownload.cse.ucsc.edu/goldenPath/hg18/database/refFlat.txt.gz
private val RefFlatFile = Paths.get("src/test/resources/com/fulcrumgenomics/util/refFlat.txt.gz")
private val Dictionary = SequenceDictionary(SequenceMetadata(name="chr1", length=249250621))
"RefFlatSource" should "read valid refFlat from various kinds of input resources" in {
val rf = RefFlatFile
val lines = Io.readLines(rf)
Seq(
RefFlatSource(rf, Some(Dictionary)),
RefFlatSource(rf.toFile, Some(Dictionary)),
RefFlatSource(Io.toInputStream(rf), Some(Dictionary)),
RefFlatSource(lines, Some(Dictionary))
).foreach(source => {
val genes = source.toSeq
// # of genes
genes.length shouldBe 142
// # of transcripts across all genes
genes.map(_.size).sum shouldBe 360
// # of exons across all transcripts across all genes
genes.map { gene => gene.map(_.exons.size).sum }.sum shouldBe 4613
// verify the first gene
val gene = genes.head
gene.name shouldBe "ANKRD20A12P"
gene.loci.size shouldBe 1
gene.loci.head.chrom shouldBe "chr1"
gene.loci.head.start shouldBe 141638944
gene.loci.head.end shouldBe 141655128
gene.loci.head.negativeStrand shouldBe true
gene.size shouldBe 1
// verify the first transcript in the first gene
val transcript = gene.head
transcript.name shouldBe "NR_046228"
transcript.start shouldBe 141638944
transcript.end shouldBe 141655128
transcript.cdsStart shouldBe None
transcript.cdsEnd shouldBe None
transcript.exons.size shouldBe 5
//verify the first exon in the first transcript
val exon = transcript.exons.head
exon shouldBe Exon(141654484, 141655128)
})
}
it should "separate loci where genes have transcripts that are not overlapping" in {
val lines = Iterator(
Seq("ACKR4", "NM_178445-1", "chr1", "+", "133801670", "133804175", "133801931", "133802984", "1", "133801670", "133804175").mkString("\\t"),
Seq("ACKR4", "NM_178445-2", "chr3", "+", "133801670", "133804175", "133801931", "133802984", "1", "133801670", "133804175").mkString("\\t")
)
val source = RefFlatSource(lines, dict=None).toSeq
source should have size 1
source.head.loci should have size 2
}
it should "separate loci where genes have transcripts on different strands" in {
val lines = Iterator(
Seq("ACKR4", "NM_178445-1", "chr3", "+", "133801670", "133804175", "133801931", "133802984", "1", "133801670", "133804175").mkString("\\t"),
Seq("ACKR4", "NM_178445-2", "chr3", "-", "133801670", "133804175", "133801931", "133802984", "1", "133801670", "133804175").mkString("\\t")
)
val source = RefFlatSource(lines, dict=None).toSeq
source should have size 1
source.head.loci should have size 2
}
it should "fail if the # of exon starts or ends do not equal the exon count for a transcript" in {
val startsMismatch = Iterator(
Seq("ACKR4", "NM_178445", "chr3", "+", "133801670", "133804175", "133801931", "133802984", "1", "133801670,133801680", "133804175").mkString("\\t")
)
val endsMismatch = Iterator(
Seq("ACKR4", "NM_178445", "chr3", "+", "133801670", "133804175", "133801931", "133802984", "1", "133801670", "133804175,133801685").mkString("\\t")
)
val bothMismatch = Iterator(
Seq("ACKR4", "NM_178445", "chr3", "+", "133801670", "133804175", "133801931", "133802984", "1", "133801670,133801680", "133804175,133804185").mkString("\\t")
)
an[Exception] should be thrownBy RefFlatSource(startsMismatch, dict=None)
an[Exception] should be thrownBy RefFlatSource(endsMismatch, dict=None)
an[Exception] should be thrownBy RefFlatSource(bothMismatch, dict=None)
}
it should "filter out genes with chromosomes not in the sequence dictionary" in {
val lines = Iterator(
Seq("ACKR4", "NM_178445-1", "chr1", "+", "133801670", "133804175", "133801931", "133802984", "1", "133801670", "133804175").mkString("\\t"),
Seq("ACKR4", "NM_178445-1", "chr3", "+", "133801670", "133804175", "133801931", "133802984", "1", "133801670", "133804175").mkString("\\t")
)
RefFlatSource(lines, dict=Some(Dictionary)) should have size 1
}
}
| fulcrumgenomics/fgbio | src/test/scala/com/fulcrumgenomics/util/RefFlatSourceTest.scala | Scala | mit | 5,699 |
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.directed
import cc.factorie.infer.{DiscreteSummary1, Maximize, SimpleDiscreteMarginal1, Summary}
import cc.factorie.model.Model
import cc.factorie.variable._
trait DiscreteGeneratingFactor extends DirectedFactor {
//type ChildType <: GeneratedDiscreteVar
def prValue(value:Int): Double
//def prValue(s:StatisticsType, value:Int): Double
}
object Discrete extends DirectedFamily2[DiscreteVariable,ProportionsVariable] {
case class Factor(override val _1:DiscreteVariable, override val _2:ProportionsVariable) extends super.Factor(_1, _2) with DiscreteGeneratingFactor {
//def proportions: Proportions = _2 // Just an alias
def pr(child:DiscreteValue, proportions:Proportions) = proportions(child.intValue)
override def pr: Double = _2.value.apply(_1.intValue)
def prValue(p:Proportions, intValue:Int): Double = p.apply(intValue)
override def prValue(intValue:Int): Double = _2.value.apply(intValue)
def sampledValue(p:Proportions)(implicit random: scala.util.Random): DiscreteValue = _1.domain.apply(p.sampleIndex)
override def sampledValue(implicit random: scala.util.Random): DiscreteValue = _1.domain.apply(_2.value.sampleIndex)
def maxIntValue(p:Proportions): Int = p.maxIndex // TODO
override def updateCollapsedParents(weight:Double): Boolean = { _2.value.masses.+=(_1.intValue, weight); true }
}
def newFactor(a:DiscreteVariable, b:ProportionsVariable) = {
if (a.domain.size != b.value.length) throw new Error("Discrete child domain size different from parent Proportions size.")
Factor(a, b)
}
}
object MaximizeGeneratedDiscrete extends Maximize[Iterable[DiscreteVariable],Model] {
def apply(d:DiscreteVariable, model:Model): Unit = {
val dFactors = model.factors(d)
require(dFactors.size == 1)
dFactors.head match {
case factor:Discrete.Factor => d.set(factor._2.value.maxIndex)(null)
case _ => throw new Error("This Maximizer only handles factors of type Discrete.Factor.")
}
}
def apply(varying:Iterable[DiscreteVariable], model:Model): Unit = for (d <- varying) apply(d, model)
def infer[V<:DiscreteVariable](varying:V, model:Model): Option[SimpleDiscreteMarginal1[V]] = {
val dFactors = model.factors(varying)
require(dFactors.size == 1)
dFactors.head match {
case factor:Discrete.Factor => Some(new SimpleDiscreteMarginal1(varying, new SingletonProportions1(varying.domain.size, factor._2.value.maxIndex)))
case _ => None
}
}
def infer(variables:Iterable[DiscreteVariable], model:Model, marginalizing:Summary): DiscreteSummary1[DiscreteVariable] = {
if (marginalizing ne null) throw new Error("Multivariate case yet implemented.")
val result = new DiscreteSummary1[DiscreteVariable]
for (v <- variables) infer(v, model).foreach(result += _)
result
}
}
/*class Binomial(p:RealVarParameter, trials:Int) extends OrdinalVariable with GeneratedVariable {
this := 0
}*/
// The binary special case, for convenience
// TODO Rename this Boolean, inherit from BooleanVariable, and move it to a new file
/** The outcome of a coin flip, with boolean value. */
class Flip(value:Boolean = false) extends BooleanVariable(value)
/** A coin, with Multinomial distribution over outcomes, which are Flips. */
class Coin(p:Double) extends ProportionsVariable(new DenseProportions1(2)) {
def this() = this(0.5)
value(0) = 1.0 - p
value(1) = p
assert (p >= 0.0 && p <= 1.0)
//def flip: Flip = { new Flip :~ Discrete(this) }
//def flip(n:Int) : Seq[Flip] = for (i <- 0 until n) yield flip
}
object Coin {
def apply(p:Double) = new Coin(p)
}
| patverga/factorie | src/main/scala/cc/factorie/directed/Discrete.scala | Scala | apache-2.0 | 4,379 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.junit
import org.junit._
import org.junit.Assert._
import org.scalajs.junit.utils._
class ExceptionInBeforeTest {
@Before def before(): Unit =
throw new UnsupportedOperationException("Exception in before()")
@After def after(): Unit =
throw new IllegalArgumentException("after() must actually be called")
/* Even if the test method declares expecting the exception thrown by the
* before() method, it must result in an error, not a success.
*/
@Test(expected = classOf[UnsupportedOperationException])
def test(): Unit =
throw new IllegalStateException("test() must not be called")
}
class ExceptionInBeforeTestAssertions extends JUnitTest
| scala-js/scala-js | junit-test/shared/src/test/scala/org/scalajs/junit/ExceptionInBeforeTest.scala | Scala | apache-2.0 | 967 |
package fpinscala.monoids
import java.util.concurrent.Executors
import fpinscala.parallelism.Nonblocking.Par
import fpinscala.testing.{Gen, Prop}
import org.scalatest.{FlatSpec, Matchers}
class MonoidSpec extends FlatSpec with Matchers {
import Monoid._
"intAddition" should "obey monoid laws" in {
Prop.run(Monoid.monoidLaws(Monoid.intAddition, Gen.integer))
}
"intMultiplication" should "obey monoid laws" in {
Prop.run(Monoid.monoidLaws(Monoid.intMultiplication, Gen.integer))
}
"booleanOr" should "obey monoid laws" in {
Prop.run(Monoid.monoidLaws(Monoid.booleanOr, Gen.boolean))
}
"booleanAnd" should "obey monoid laws" in {
Prop.run(Monoid.monoidLaws(Monoid.booleanAnd, Gen.boolean))
}
"optionMonoid" should "obey monoid laws" in {
Prop.run(Monoid.monoidLaws(Monoid.optionMonoid[Int], Gen.integer.map(Some(_))) &&
Monoid.monoidLaws(Monoid.optionMonoid[Boolean], Gen.boolean.map(_ => None)))
}
"endoMonoid" should "obey monoid laws" in {
// We do not have a generator for Gen[A => A] although we could implement this as a
// generator that creates a LUT?
def f1(i: Int) = i + 2
def f2(i: Int) = i + 3
def f3(i: Int) = i + 5
val m = Monoid.endoMonoid[Int]
m.op(f1, f2)(10) should be(15)
m.op(m.op(f1, f2), f3)(100) should be(m.op(f1, m.op(f2, f3))(100))
m.op(f1, m.zero)(10) should be(f1(10))
}
"concatenate" should "concatenate a list of strings" in {
val words = List("Hic", "Est", "Index")
Monoid.concatenate(words, Monoid.stringMonoid) should be("HicEstIndex")
}
"foldMap" should "map and then fold using a monoid" in {
val numbers = List(123, 456, 789)
Monoid.foldMap(numbers, Monoid.stringMonoid)(_.toString) should be("123456789")
}
"foldRight" should "be implemented via foldMap" in {
val l = List(1, 2, 3, 4)
Monoid.foldRight(l)(List[Int]())(_ :: _) should be(List(4, 3, 2, 1))
}
"foldLeft" should "be implemented via foldMap" in {
val l = List(1, 2, 3, 4)
Monoid.foldLeft(l)(List[Int]())((b, a) => a :: b) should be(List(1, 2, 3, 4))
}
"foldMapV" should "map and then fold using a monoid" in {
val numbers = List(1, 2, 3, 4, 5, 6, 7, 8, 9)
Monoid.foldMap(numbers, Monoid.stringMonoid)(_.toString) should be("123456789")
}
"parFoldMap" should "map items in parallel" in {
val numbers = IndexedSeq(1, 2, 3, 4, 5, 6, 7, 8, 9)
val es = Executors.newSingleThreadExecutor
Par.run(es)(Monoid.parFoldMap(numbers, Monoid.stringMonoid)(_.toString)) should be("123456789")
}
"numbers" should "be ordered" in {
val numbers = IndexedSeq(-100, -10, 3, 1000000, 444444444)
Monoid.ordered(numbers) should be(true)
}
"numbers" should "not be ordered" in {
val numbers = IndexedSeq(1, 2, 3, 5, 4, 6, 7)
Monoid.ordered(numbers) should not be true
}
"same numbers" should "be ordered" in {
val numbers = IndexedSeq(3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3)
Monoid.ordered(numbers) should be(true)
}
"wcMonoid" should "handle two parts" in {
wcMonoid.op(Part("fee", 2, "fi"), Part("fo", 3, "fum")) should be(Part("fee", 6, "fum"))
wcMonoid.op(Part("fee", 2, "fi"), Stub("fo")) should be(Part("fee", 2, "fifo"))
wcMonoid.op(Stub("fo"), Part("fee", 2, "fi")) should be(Part("fofee", 2, "fi"))
wcMonoid.op(Stub("fo"), Stub("fi")) should be(Stub("fofi"))
wcMonoid.op(Part("fee", 2, ""), Part("fi", 3, "fo")) should be(Part("fee", 6, "fo"))
wcMonoid.op(Part("fee", 3, "fi"), Part("", 3, "fo")) should be(Part("fee", 7, "fo"))
wcMonoid.op(Part("fee", 3, ""), Part("", 3, "fum")) should be(Part("fee", 6, "fum"))
}
"wcMonoid" should "obey monoid laws" in {
// We could use a better Gen[WC]
val g = Gen.boolean.flatMap(
b => if (b)
for {
c <- Gen.choose(0, 10)
s <- Gen.stringN(c) map (_.filter(!_.isWhitespace))
} yield Stub(s)
else
for {
lc <- Gen.choose(0, 4)
l <- Gen.stringN(lc) map (_.filter(!_.isWhitespace))
c <- Gen.integer
rc <- Gen.choose(0, 4)
r <- Gen.stringN(rc) map (_.filter(!_.isWhitespace))
} yield Part(l, c, r)
)
Prop.run(Monoid.monoidLaws(Monoid.wcMonoid, g))
}
"count" should """parse "lorem ipsum do" as 3""" in {
val str = "lorem ipsum do"
count(str) should be(3)
}
"count" should """parse "lor sit amet" as 3""" in {
val str = "lor sit amet"
count(str) should be(3)
}
"count" should "count 5 words" in {
val str = "lorem ipsum dolor ist amet, "
count(str) should be(5)
}
"count" should "count correctly" in {
count("") should be(0)
count(" ") should be(0)
count("f") should be(1)
}
"TreeFoldable" should "foldMap" in {
val tree = Branch(Branch(Leaf(3), Branch(Leaf(4), Leaf(5))), Branch(Leaf(7), Leaf(9)))
TreeFoldable.foldMap(tree)(_.toString)(stringMonoid) should be("34579")
}
"TreeFoldable" should "foldLeft" in {
val tree = Branch(Branch(Leaf(3), Branch(Leaf(4), Leaf(5))), Branch(Leaf(7), Leaf(9)))
def f(s: String, i: Int) = s + i.toString
TreeFoldable.foldLeft(tree)("z")(f) should be("z34579")
}
"TreeFoldable" should "foldRight" in {
val tree = Branch(Branch(Leaf(3), Branch(Leaf(4), Leaf(5))), Branch(Leaf(7), Leaf(9)))
def f(i: Int, s: String) = s + i.toString
TreeFoldable.foldRight(tree)("z")(f) should be("z97543")
}
"OptionFoldable" should "foldMap" in {
OptionFoldable.foldMap(Some(5))(_.toString)(stringMonoid) should be("5")
OptionFoldable.foldMap(None)(_.toString)(stringMonoid) should be("")
}
"OptionFoldable" should "foldLeft" in {
def f(a: String, b: Int) = a + b.toString
OptionFoldable.foldLeft(Some(5))("z")(f) should be("z5")
OptionFoldable.foldLeft(None)("z")(f) should be("z")
}
"OptionFoldable" should "foldRight" in {
def f(a: Int, b: String) = a.toString + b
OptionFoldable.foldRight(Some(5))("z")(f) should be("5z")
OptionFoldable.foldRight(None)("z")(f) should be("z")
}
"Monoid" should "product monoids" in {
val m = Monoid.productMonoid(Monoid.stringMonoid, Monoid.intMultiplication)
m.op(("2", 3), ("4", 5)) should be(("24", 15))
val g = for {
c <- Gen.stringN(1)
i <- Gen.integer
} yield (c, i)
Prop.run(Monoid.monoidLaws(m, g))
}
"Monoid.bag" should "do a wordcount" in {
val v = Vector("a", "rose", "is", "a", "rose")
bag(v) should be(Map("a" -> 2, "rose" -> 2, "is" -> 1))
}
}
| Tillaert/fpinscala | exercises/src/test/scala/fpinscala/monoids/MonoidSpec.scala | Scala | mit | 6,539 |
package com.webtrends.harness.component.spray
import _root_.spray.http.StatusCodes
import _root_.spray.routing.Route
import akka.pattern.ask
import akka.util.Timeout
import com.webtrends.harness.HarnessConstants
import com.webtrends.harness.app.Harness
import com.webtrends.harness.app.HarnessActor.ShutdownSystem
import com.webtrends.harness.component._
import com.webtrends.harness.component.messages.{ClusterState, Rejoin, StatusRequest, Subscriptions}
import com.webtrends.harness.component.spray.route.{RouteAccessibility, RouteManager}
import com.webtrends.harness.health._
import com.webtrends.harness.service.ServiceManager
import com.webtrends.harness.service.ServiceManager.GetMetaDataByName
import com.webtrends.harness.service.messages.GetMetaData
import com.webtrends.harness.service.meta.ServiceMetaData
import org.json4s._
import org.joda.time.{DateTime, DateTimeZone}
import scala.util.{Failure, Success}
class InternalSprayWorker extends CoreSprayWorker {
import context.dispatcher
implicit val timeout = Timeout(spSettings.requestTimeout.toMillis)
val healthActor = actorRefFactory.actorSelection(HarnessConstants.HealthFullName)
val serviceActor = actorRefFactory.actorSelection(HarnessConstants.ServicesFullName)
override def getRoutes: Route = {
val serviceRoutes = RouteManager.getRoutes(RouteAccessibility.INTERNAL).filter(r => !r.equals(Map.empty))
(serviceRoutes ++ List(this.baseRoutes, this.staticRoutes)).reduceLeft(_ ~ _)
}
override def baseRoutes = {
get {
path("favicon.ico") {
complete(StatusCodes.NoContent)
} ~
path("ping") {
respondPlain {
complete("pong: ".concat(new DateTime(System.currentTimeMillis(), DateTimeZone.UTC).toString))
}
} ~
cidrFilter {
pathPrefix("healthcheck") {
path("lb") {
respondPlain {
complete((healthActor ? HealthRequest(HealthResponseType.LB)).mapTo[String])
}
} ~
path("nagios") {
//time(nagiosHealthTimer) {
respondPlain {
complete((healthActor ? HealthRequest(HealthResponseType.NAGIOS)).mapTo[String])
}
//}
} ~
path("full") {
//time(healthTimer) {
respondJson {
complete((healthActor ? HealthRequest(HealthResponseType.FULL)).mapTo[ApplicationHealth])
}
//}
}
} ~
path("metrics") {
respondJson {
ctx =>
componentRequest[StatusRequest, JValue]("wookiee-metrics", ComponentRequest(StatusRequest())) onComplete {
case Success(s) => ctx.complete(s.resp)
case Failure(f) => ctx.failWith(f)
}
}
} ~
pathPrefix("services") {
pathEnd {
respondJson {
complete((serviceActor ? GetMetaData(None)).mapTo[Seq[ServiceMetaData]])
}
} ~
path(Segment) {
(service) =>
respondJson {
complete((serviceActor ? GetMetaDataByName(service)).mapTo[ServiceMetaData])
}
}
} ~
pathPrefix("cluster") {
pathEnd {
respondJson {
ctx =>
val req = ComponentRequest(ClusterState(), Some("cluster"))
componentRequest[ClusterState, JValue]("wookiee-cluster", req) onComplete {
case Success(s) => ctx.complete(s.resp)
case Failure(f) => ctx.failWith(f)
}
}
} ~
path("discovery") {
respondJson {
ctx =>
componentRequest[Subscriptions, JValue]("wookiee-cluster", ComponentRequest(Subscriptions())) onComplete {
case Success(s) => ctx.complete(s.resp)
case Failure(f) => ctx.failWith(f)
}
}
}
}
}
} ~
post {
cidrFilter {
pathPrefix("services") {
path(Segment / "restart") {
(service) =>
respondPlain {
ctx =>
serviceActor ! ServiceManager.RestartService(service)
ctx.complete(s"The service $service has been asked to restart")
}
}
} ~
path("shutdown") {
respondPlain {
ctx =>
ctx.complete("The system is being shutdown: ".concat(new DateTime(System.currentTimeMillis(), DateTimeZone.UTC).toString))
context.parent ! ShutdownSystem
}
} ~
path("restart") {
respondPlain {
ctx =>
ctx.complete("The actor system is being restarted: ".concat(new DateTime(System.currentTimeMillis(), DateTimeZone.UTC).toString))
Harness.restartActorSystem
}
} ~
pathPrefix("cluster") {
path("rejoin") {
respondPlain {
ctx =>
message("wookiee-cluster", ComponentMessage(Rejoin(true), Some("cluster")))
ctx.complete("The cluster is being rejoined: ".concat(new DateTime(System.currentTimeMillis(), DateTimeZone.UTC).toString))
}
}
}
}
}
}
def staticRoutes = {
val rootPath = context.system.settings.config.getString(SprayManager.KeyStaticRoot)
context.system.settings.config.getString(SprayManager.KeyStaticType) match {
case "file" =>
getFromBrowseableDirectory(rootPath)
case "jar" =>
getFromResourceDirectory(rootPath)
case _ =>
getFromResourceDirectory(rootPath)
}
}
}
| mjwallin1/wookiee-spray | src/main/scala/com/webtrends/harness/component/spray/InternalSprayWorker.scala | Scala | apache-2.0 | 6,138 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import java.util.{Locale, OptionalLong}
import org.apache.commons.lang3.StringUtils
import org.apache.hadoop.fs.Path
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.IO_WARNING_LARGEFILETHRESHOLD
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.expressions.{AttributeSet, Expression, ExpressionSet}
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection
import org.apache.spark.sql.catalyst.plans.QueryPlan
import org.apache.spark.sql.connector.read.{Batch, InputPartition, Scan, Statistics, SupportsReportStatistics}
import org.apache.spark.sql.errors.QueryCompilationErrors
import org.apache.spark.sql.execution.PartitionedFileUtil
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.internal.connector.SupportsMetadata
import org.apache.spark.sql.sources.Filter
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.Utils
trait FileScan extends Scan
with Batch with SupportsReportStatistics with SupportsMetadata with Logging {
/**
* Returns whether a file with `path` could be split or not.
*/
def isSplitable(path: Path): Boolean = {
false
}
def sparkSession: SparkSession
def fileIndex: PartitioningAwareFileIndex
def dataSchema: StructType
/**
* Returns the required data schema
*/
def readDataSchema: StructType
/**
* Returns the required partition schema
*/
def readPartitionSchema: StructType
/**
* Returns the filters that can be use for partition pruning
*/
def partitionFilters: Seq[Expression]
/**
* Returns the data filters that can be use for file listing
*/
def dataFilters: Seq[Expression]
/**
* If a file with `path` is unsplittable, return the unsplittable reason,
* otherwise return `None`.
*/
def getFileUnSplittableReason(path: Path): String = {
assert(!isSplitable(path))
"undefined"
}
protected def seqToString(seq: Seq[Any]): String = seq.mkString("[", ", ", "]")
private lazy val (normalizedPartitionFilters, normalizedDataFilters) = {
val output = readSchema().toAttributes
val partitionFilterAttributes = AttributeSet(partitionFilters).map(a => a.name -> a).toMap
val dataFiltersAttributes = AttributeSet(dataFilters).map(a => a.name -> a).toMap
val normalizedPartitionFilters = ExpressionSet(partitionFilters.map(
QueryPlan.normalizeExpressions(_,
output.map(a => partitionFilterAttributes.getOrElse(a.name, a)))))
val normalizedDataFilters = ExpressionSet(dataFilters.map(
QueryPlan.normalizeExpressions(_,
output.map(a => dataFiltersAttributes.getOrElse(a.name, a)))))
(normalizedPartitionFilters, normalizedDataFilters)
}
override def equals(obj: Any): Boolean = obj match {
case f: FileScan =>
fileIndex == f.fileIndex && readSchema == f.readSchema &&
normalizedPartitionFilters == f.normalizedPartitionFilters &&
normalizedDataFilters == f.normalizedDataFilters
case _ => false
}
override def hashCode(): Int = getClass.hashCode()
val maxMetadataValueLength = sparkSession.sessionState.conf.maxMetadataStringLength
override def description(): String = {
val metadataStr = getMetaData().toSeq.sorted.map {
case (key, value) =>
val redactedValue =
Utils.redact(sparkSession.sessionState.conf.stringRedactionPattern, value)
key + ": " + StringUtils.abbreviate(redactedValue, maxMetadataValueLength)
}.mkString(", ")
s"${this.getClass.getSimpleName} $metadataStr"
}
override def getMetaData(): Map[String, String] = {
val locationDesc =
fileIndex.getClass.getSimpleName +
Utils.buildLocationMetadata(fileIndex.rootPaths, maxMetadataValueLength)
Map(
"Format" -> s"${this.getClass.getSimpleName.replace("Scan", "").toLowerCase(Locale.ROOT)}",
"ReadSchema" -> readDataSchema.catalogString,
"PartitionFilters" -> seqToString(partitionFilters),
"DataFilters" -> seqToString(dataFilters),
"Location" -> locationDesc)
}
protected def partitions: Seq[FilePartition] = {
val selectedPartitions = fileIndex.listFiles(partitionFilters, dataFilters)
val maxSplitBytes = FilePartition.maxSplitBytes(sparkSession, selectedPartitions)
val partitionAttributes = fileIndex.partitionSchema.toAttributes
val attributeMap = partitionAttributes.map(a => normalizeName(a.name) -> a).toMap
val readPartitionAttributes = readPartitionSchema.map { readField =>
attributeMap.get(normalizeName(readField.name)).getOrElse {
throw QueryCompilationErrors.cannotFindPartitionColumnInPartitionSchemaError(
readField, fileIndex.partitionSchema)
}
}
lazy val partitionValueProject =
GenerateUnsafeProjection.generate(readPartitionAttributes, partitionAttributes)
val splitFiles = selectedPartitions.flatMap { partition =>
// Prune partition values if part of the partition columns are not required.
val partitionValues = if (readPartitionAttributes != partitionAttributes) {
partitionValueProject(partition.values).copy()
} else {
partition.values
}
partition.files.flatMap { file =>
val filePath = file.getPath
PartitionedFileUtil.splitFiles(
sparkSession = sparkSession,
file = file,
filePath = filePath,
isSplitable = isSplitable(filePath),
maxSplitBytes = maxSplitBytes,
partitionValues = partitionValues
)
}.toArray.sortBy(_.length)(implicitly[Ordering[Long]].reverse)
}
if (splitFiles.length == 1) {
val path = new Path(splitFiles(0).filePath)
if (!isSplitable(path) && splitFiles(0).length >
sparkSession.sparkContext.getConf.get(IO_WARNING_LARGEFILETHRESHOLD)) {
logWarning(s"Loading one large unsplittable file ${path.toString} with only one " +
s"partition, the reason is: ${getFileUnSplittableReason(path)}")
}
}
FilePartition.getFilePartitions(sparkSession, splitFiles, maxSplitBytes)
}
override def planInputPartitions(): Array[InputPartition] = {
partitions.toArray
}
override def estimateStatistics(): Statistics = {
new Statistics {
override def sizeInBytes(): OptionalLong = {
val compressionFactor = sparkSession.sessionState.conf.fileCompressionFactor
val size = (compressionFactor * fileIndex.sizeInBytes /
(dataSchema.defaultSize + fileIndex.partitionSchema.defaultSize) *
(readDataSchema.defaultSize + readPartitionSchema.defaultSize)).toLong
OptionalLong.of(size)
}
override def numRows(): OptionalLong = OptionalLong.empty()
}
}
override def toBatch: Batch = this
override def readSchema(): StructType =
StructType(readDataSchema.fields ++ readPartitionSchema.fields)
// Returns whether the two given arrays of [[Filter]]s are equivalent.
protected def equivalentFilters(a: Array[Filter], b: Array[Filter]): Boolean = {
a.sortBy(_.hashCode()).sameElements(b.sortBy(_.hashCode()))
}
private val isCaseSensitive = sparkSession.sessionState.conf.caseSensitiveAnalysis
private def normalizeName(name: String): String = {
if (isCaseSensitive) {
name
} else {
name.toLowerCase(Locale.ROOT)
}
}
}
| chuckchen/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/FileScan.scala | Scala | apache-2.0 | 8,220 |
package demo.pages
import com.acework.js.components.bootstrap._
import demo.examples.util.CodeContent
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.prefix_<^._
import scala.scalajs.js.UndefOr
import scala.scalajs.js.UndefOr._
/**
* Created by weiyin on 17/03/15.
*/
object Navbars {
val handleSelect = (selectedKey: Seq[UndefOr[String]]) => Console.println(s"selected $selectedKey")
val exampleSource =
"""
|handleSelect = (selectedKey: Seq[UndefOr[String]]) => Console.println(s"selected $selectedKey")
|NavBar.NavBar(brand = "React-Bootstrap": ReactNode, componentClass = "nav")(
| Nav.Nav()(
| NavItem.NavItem(eventKey = "1", href = "#")("Link"),
| NavItem.NavItem(eventKey = "2", href = "#")("Link"),
| DropdownButton.DropdownButton(eventKey = "3", title = "Dropdown": ReactNode)(
| MenuItem.MenuItem(eventKey = "1")("Action"),
| MenuItem.MenuItem(eventKey = "2")("Action action"),
| MenuItem.MenuItem(eventKey = "3")("Something else here"),
| MenuItem.MenuItem(divider = true)(),
| MenuItem.MenuItem(eventKey = "4")("Separated link")
| )
| )
|)
""".stripMargin
val exampleContent = CodeContent.Content(exampleSource,
NavBar.NavBar(brand = "React-Bootstrap": ReactNode, componentClass = "nav")(
Nav.Nav()(
NavItem.NavItem(eventKey = "1", href = "#")("Link"),
NavItem.NavItem(eventKey = "2", href = "#")("Link"),
DropdownButton.DropdownButton(eventKey = "3", title = "Dropdown": ReactNode)(
MenuItem.MenuItem(eventKey = "1")("Action"),
MenuItem.MenuItem(eventKey = "2")("Action action"),
MenuItem.MenuItem(eventKey = "3")("Something else here"),
MenuItem.MenuItem(divider = true)(),
MenuItem.MenuItem(eventKey = "4")("Separated link")
)
)
)
)
val mobileSource =
"""
|val handleSelect = (selectedKey: String) => Console.println(s"selected $selectedKey")
|NavBar.NavBar(brand = "React-Bootstrap": ReactNode, inverse = true, toggleNavKey = "0")(
| Nav.Nav(right = true, eventKey = "0")(/*This is the eventKey referenced */
| NavItem.NavItem(eventKey = "1", href = "#")("Link"),
| NavItem.NavItem(eventKey = "2", href = "#")("Link"),
| DropdownButton.DropdownButton(eventKey = "3", title = "Dropdown": ReactNode)(
| MenuItem.MenuItem(eventKey = "1")("Action"),
| MenuItem.MenuItem(eventKey = "2")("Action action"),
| MenuItem.MenuItem(eventKey = "3")("Something else here"),
| MenuItem.MenuItem(divider = true)(),
| MenuItem.MenuItem(eventKey = "4")("Separated link")
| )
| )
|)
""".stripMargin
val mobileContent = CodeContent.Content(mobileSource,
NavBar.NavBar(brand = "React-Bootstrap": ReactNode, inverse = true, toggleNavKey = "0")(
Nav.Nav(right = true, eventKey = "0")(/*This is the eventKey referenced */
NavItem.NavItem(eventKey = "1", href = "#")("Link"),
NavItem.NavItem(eventKey = "2", href = "#")("Link"),
DropdownButton.DropdownButton(eventKey = "3", title = "Dropdown": ReactNode)(
MenuItem.MenuItem(eventKey = "1")("Action"),
MenuItem.MenuItem(eventKey = "2")("Action action"),
MenuItem.MenuItem(eventKey = "3")("Something else here"),
MenuItem.MenuItem(divider = true)(),
MenuItem.MenuItem(eventKey = "4")("Separated link")
)
)
)
)
val mobileMultipleSource =
"""
|NavBar.NavBar(brand = "React-Bootstrap": ReactNode, toggleNavKey = "0")(
| CollapsableNav.CollapsableNav(eventKey = "0")(/*This is the eventKey referenced */
| Nav.Nav(navbar = true)(
| NavItem.NavItem(eventKey = "1", href = "#")("Link"),
| NavItem.NavItem(eventKey = "2", href = "#")("Link"),
| DropdownButton.DropdownButton(eventKey = "3", title = "Dropdown": ReactNode)(
| MenuItem.MenuItem(eventKey = "1")("Action"),
| MenuItem.MenuItem(eventKey = "2")("Action action"),
| MenuItem.MenuItem(eventKey = "3")("Something else here"),
| MenuItem.MenuItem(divider = true)(),
| MenuItem.MenuItem(eventKey = "4")("Separated link")
| )
| ),
| Nav.Nav(right = true, navbar = true)(
| NavItem.NavItem(eventKey = "1", href = "#")("Link Right"),
| NavItem.NavItem(eventKey = "2", href = "#")("Link Right")
| )
| )
|)
""".stripMargin
val mobileMultipleContent = CodeContent.Content(mobileMultipleSource,
NavBar.NavBar(brand = "React-Bootstrap": ReactNode, toggleNavKey = "0")(
CollapsableNav.CollapsableNav(eventKey = "0")(/*This is the eventKey referenced */
Nav.Nav(navbar = true)(
NavItem.NavItem(eventKey = "1", href = "#")("Link"),
NavItem.NavItem(eventKey = "2", href = "#")("Link"),
DropdownButton.DropdownButton(eventKey = "3", title = "Dropdown": ReactNode)(
MenuItem.MenuItem(eventKey = "1")("Action"),
MenuItem.MenuItem(eventKey = "2")("Action action"),
MenuItem.MenuItem(eventKey = "3")("Something else here"),
MenuItem.MenuItem(divider = true)(),
MenuItem.MenuItem(eventKey = "4")("Separated link")
)
),
Nav.Nav(right = true, navbar = true)(
NavItem.NavItem(eventKey = "1", href = "#")("Link Right"),
NavItem.NavItem(eventKey = "2", href = "#")("Link Right")
)
)
)
)
val content = Section("navbars", <.span("Navbars ", <.small("Navbar, Nav, NavItem"))
, SubSection("navbars-examples", "Example navbars",
<.p("You can specify a brand by passing a renderable component or string in ", <.code("brand")),
<.p("Navbars are by default accessible and will provide ", <.code("role=\\"navigation\\")", ".")),
<.p("They also supports all the different Bootstrap classes as properties. Just camelCase the css class and remove navbar from it. For example ",
<.code("navbar-fixed-top"), " becomes the property ", <.code("fixedTop"),
". The different properties are ", <.code("fixedTop"), ", ", <.code("fixedBottom"),
", ", <.code("staticTop"), ", ", <.code("inverse"), ", ", <.code("fluid"), "."),
<.p("You can drag elements to the ", <.code("right"), " by specifying the right property on a nav group."),
exampleContent())
, SubSection("navbars-mobile", "Mobile Friendly",
<.p("To have a mobile friendly Navbar, specify the property ", <.code("toggleNavKey"),
" on the Navbar with a value corresponding to an ", <.code("eventKey"),
" of one of his Nav children. This child will be the one collapsed."),
<.p("By setting the property ", <.code("defaultNavExpanded=true"), " the Navbar will start expanded by default."),
mobileContent())
, SubSection("navbars-mobile-multiple", "Mobile Friendly (Multiple Nav Components)",
<.p("To have a mobile friendly Navbar that handles multiple ", <.code("Nav"),
" components use ", <.code("CollapsableNav"), ". The ", <.code("toggleNavKey"),
" must still be set, however, the corresponding ", <.code("eventKey"),
" must now be on the ", <.code("CollapsableNav"), " component."),
mobileMultipleContent())
)
}
| lvitaly/scalajs-react-bootstrap | demo/src/main/scala/demo/pages/Navbars.scala | Scala | mit | 7,447 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
import java.io.File
import java.util.Properties
import kafka.api.KAFKA_0_11_0_IV0
import kafka.api.{KAFKA_0_10_0_IV1, KAFKA_0_9_0}
import kafka.server.{KafkaConfig, LogOffsetMetadata}
import kafka.server.checkpoints.OffsetCheckpointFile
import kafka.utils._
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.record._
import org.junit.Assert._
import org.junit._
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import org.junit.runners.Parameterized.Parameters
import scala.Seq
import scala.collection._
/**
* This is an integration test that tests the fully integrated log cleaner
*/
@RunWith(value = classOf[Parameterized])
class LogCleanerParameterizedIntegrationTest(compressionCodec: String) extends AbstractLogCleanerIntegrationTest {
val codec: CompressionType = CompressionType.forName(compressionCodec)
val time = new MockTime()
val topicPartitions = Array(new TopicPartition("log", 0), new TopicPartition("log", 1), new TopicPartition("log", 2))
@Test
def cleanerTest() {
val largeMessageKey = 20
val (largeMessageValue, largeMessageSet) = createLargeSingleMessageSet(largeMessageKey, RecordBatch.CURRENT_MAGIC_VALUE)
val maxMessageSize = largeMessageSet.sizeInBytes
cleaner = makeCleaner(partitions = topicPartitions, maxMessageSize = maxMessageSize)
val log = cleaner.logs.get(topicPartitions(0))
val appends = writeDups(numKeys = 100, numDups = 3, log = log, codec = codec)
val startSize = log.size
cleaner.startup()
val firstDirty = log.activeSegment.baseOffset
checkLastCleaned("log", 0, firstDirty)
val compactedSize = log.logSegments.map(_.size).sum
assertTrue(s"log should have been compacted: startSize=$startSize compactedSize=$compactedSize", startSize > compactedSize)
checkLogAfterAppendingDups(log, startSize, appends)
val appendInfo = log.appendAsLeader(largeMessageSet, leaderEpoch = 0)
val largeMessageOffset = appendInfo.firstOffset.get
val dups = writeDups(startKey = largeMessageKey + 1, numKeys = 100, numDups = 3, log = log, codec = codec)
val appends2 = appends ++ Seq((largeMessageKey, largeMessageValue, largeMessageOffset)) ++ dups
val firstDirty2 = log.activeSegment.baseOffset
checkLastCleaned("log", 0, firstDirty2)
checkLogAfterAppendingDups(log, startSize, appends2)
// simulate deleting a partition, by removing it from logs
// force a checkpoint
// and make sure its gone from checkpoint file
cleaner.logs.remove(topicPartitions(0))
cleaner.updateCheckpoints(logDir)
val checkpoints = new OffsetCheckpointFile(new File(logDir, cleaner.cleanerManager.offsetCheckpointFile)).read()
// we expect partition 0 to be gone
assertFalse(checkpoints.contains(topicPartitions(0)))
}
@Test
def testCleansCombinedCompactAndDeleteTopic(): Unit = {
val logProps = new Properties()
val retentionMs: Integer = 100000
logProps.put(LogConfig.RetentionMsProp, retentionMs: Integer)
logProps.put(LogConfig.CleanupPolicyProp, "compact,delete")
def runCleanerAndCheckCompacted(numKeys: Int): (Log, Seq[(Int, String, Long)]) = {
cleaner = makeCleaner(partitions = topicPartitions.take(1), propertyOverrides = logProps, backOffMs = 100L)
val log = cleaner.logs.get(topicPartitions(0))
val messages = writeDups(numKeys = numKeys, numDups = 3, log = log, codec = codec)
val startSize = log.size
log.highWatermarkMetadata = LogOffsetMetadata(log.logEndOffset)
val firstDirty = log.activeSegment.baseOffset
cleaner.startup()
// should compact the log
checkLastCleaned("log", 0, firstDirty)
val compactedSize = log.logSegments.map(_.size).sum
assertTrue(s"log should have been compacted: startSize=$startSize compactedSize=$compactedSize", startSize > compactedSize)
(log, messages)
}
val (log, _) = runCleanerAndCheckCompacted(100)
// should delete old segments
log.logSegments.foreach(_.lastModified = time.milliseconds - (2 * retentionMs))
TestUtils.waitUntilTrue(() => log.numberOfSegments == 1, "There should only be 1 segment remaining", 10000L)
assertEquals(1, log.numberOfSegments)
cleaner.shutdown()
// run the cleaner again to make sure if there are no issues post deletion
val (log2, messages) = runCleanerAndCheckCompacted(20)
val read = readFromLog(log2)
assertEquals("Contents of the map shouldn't change", toMap(messages), toMap(read))
}
@Test
def testCleanerWithMessageFormatV0(): Unit = {
// zstd compression is not supported with older message formats
if (codec == CompressionType.ZSTD)
return
val largeMessageKey = 20
val (largeMessageValue, largeMessageSet) = createLargeSingleMessageSet(largeMessageKey, RecordBatch.MAGIC_VALUE_V0)
val maxMessageSize = codec match {
case CompressionType.NONE => largeMessageSet.sizeInBytes
case _ =>
// the broker assigns absolute offsets for message format 0 which potentially causes the compressed size to
// increase because the broker offsets are larger than the ones assigned by the client
// adding `5` to the message set size is good enough for this test: it covers the increased message size while
// still being less than the overhead introduced by the conversion from message format version 0 to 1
largeMessageSet.sizeInBytes + 5
}
cleaner = makeCleaner(partitions = topicPartitions, maxMessageSize = maxMessageSize)
val log = cleaner.logs.get(topicPartitions(0))
val props = logConfigProperties(maxMessageSize = maxMessageSize)
props.put(LogConfig.MessageFormatVersionProp, KAFKA_0_9_0.version)
log.config = new LogConfig(props)
val appends = writeDups(numKeys = 100, numDups = 3, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V0)
val startSize = log.size
cleaner.startup()
val firstDirty = log.activeSegment.baseOffset
checkLastCleaned("log", 0, firstDirty)
val compactedSize = log.logSegments.map(_.size).sum
assertTrue(s"log should have been compacted: startSize=$startSize compactedSize=$compactedSize", startSize > compactedSize)
checkLogAfterAppendingDups(log, startSize, appends)
val appends2: Seq[(Int, String, Long)] = {
val dupsV0 = writeDups(numKeys = 40, numDups = 3, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V0)
val appendInfo = log.appendAsLeader(largeMessageSet, leaderEpoch = 0)
val largeMessageOffset = appendInfo.firstOffset.get
// also add some messages with version 1 and version 2 to check that we handle mixed format versions correctly
props.put(LogConfig.MessageFormatVersionProp, KAFKA_0_11_0_IV0.version)
log.config = new LogConfig(props)
val dupsV1 = writeDups(startKey = 30, numKeys = 40, numDups = 3, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V1)
val dupsV2 = writeDups(startKey = 15, numKeys = 5, numDups = 3, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V2)
appends ++ dupsV0 ++ Seq((largeMessageKey, largeMessageValue, largeMessageOffset)) ++ dupsV1 ++ dupsV2
}
val firstDirty2 = log.activeSegment.baseOffset
checkLastCleaned("log", 0, firstDirty2)
checkLogAfterAppendingDups(log, startSize, appends2)
}
@Test
def testCleaningNestedMessagesWithMultipleVersions(): Unit = {
// zstd compression is not supported with older message formats
if (codec == CompressionType.ZSTD)
return
val maxMessageSize = 192
cleaner = makeCleaner(partitions = topicPartitions, maxMessageSize = maxMessageSize, segmentSize = 256)
val log = cleaner.logs.get(topicPartitions(0))
val props = logConfigProperties(maxMessageSize = maxMessageSize, segmentSize = 256)
props.put(LogConfig.MessageFormatVersionProp, KAFKA_0_9_0.version)
log.config = new LogConfig(props)
// with compression enabled, these messages will be written as a single message containing
// all of the individual messages
var appendsV0 = writeDupsSingleMessageSet(numKeys = 2, numDups = 3, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V0)
appendsV0 ++= writeDupsSingleMessageSet(numKeys = 2, startKey = 3, numDups = 2, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V0)
props.put(LogConfig.MessageFormatVersionProp, KAFKA_0_10_0_IV1.version)
log.config = new LogConfig(props)
var appendsV1 = writeDupsSingleMessageSet(startKey = 4, numKeys = 2, numDups = 2, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V1)
appendsV1 ++= writeDupsSingleMessageSet(startKey = 4, numKeys = 2, numDups = 2, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V1)
appendsV1 ++= writeDupsSingleMessageSet(startKey = 6, numKeys = 2, numDups = 2, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V1)
val appends = appendsV0 ++ appendsV1
val startSize = log.size
cleaner.startup()
val firstDirty = log.activeSegment.baseOffset
assertTrue(firstDirty > appendsV0.size) // ensure we clean data from V0 and V1
checkLastCleaned("log", 0, firstDirty)
val compactedSize = log.logSegments.map(_.size).sum
assertTrue(s"log should have been compacted: startSize=$startSize compactedSize=$compactedSize", startSize > compactedSize)
checkLogAfterAppendingDups(log, startSize, appends)
}
@Test
def cleanerConfigUpdateTest() {
val largeMessageKey = 20
val (largeMessageValue, largeMessageSet) = createLargeSingleMessageSet(largeMessageKey, RecordBatch.CURRENT_MAGIC_VALUE)
val maxMessageSize = largeMessageSet.sizeInBytes
cleaner = makeCleaner(partitions = topicPartitions, backOffMs = 1, maxMessageSize = maxMessageSize,
cleanerIoBufferSize = Some(1))
val log = cleaner.logs.get(topicPartitions(0))
writeDups(numKeys = 100, numDups = 3, log = log, codec = codec)
val startSize = log.size
cleaner.startup()
assertEquals(1, cleaner.cleanerCount)
// Verify no cleaning with LogCleanerIoBufferSizeProp=1
val firstDirty = log.activeSegment.baseOffset
val topicPartition = new TopicPartition("log", 0)
cleaner.awaitCleaned(topicPartition, firstDirty, maxWaitMs = 10)
assertTrue("Should not have cleaned", cleaner.cleanerManager.allCleanerCheckpoints.isEmpty)
def kafkaConfigWithCleanerConfig(cleanerConfig: CleanerConfig): KafkaConfig = {
val props = TestUtils.createBrokerConfig(0, "localhost:2181")
props.put(KafkaConfig.LogCleanerThreadsProp, cleanerConfig.numThreads.toString)
props.put(KafkaConfig.LogCleanerDedupeBufferSizeProp, cleanerConfig.dedupeBufferSize.toString)
props.put(KafkaConfig.LogCleanerDedupeBufferLoadFactorProp, cleanerConfig.dedupeBufferLoadFactor.toString)
props.put(KafkaConfig.LogCleanerIoBufferSizeProp, cleanerConfig.ioBufferSize.toString)
props.put(KafkaConfig.MessageMaxBytesProp, cleanerConfig.maxMessageSize.toString)
props.put(KafkaConfig.LogCleanerBackoffMsProp, cleanerConfig.backOffMs.toString)
props.put(KafkaConfig.LogCleanerIoMaxBytesPerSecondProp, cleanerConfig.maxIoBytesPerSecond.toString)
KafkaConfig.fromProps(props)
}
// Verify cleaning done with larger LogCleanerIoBufferSizeProp
val oldConfig = kafkaConfigWithCleanerConfig(cleaner.currentConfig)
val newConfig = kafkaConfigWithCleanerConfig(CleanerConfig(numThreads = 2,
dedupeBufferSize = cleaner.currentConfig.dedupeBufferSize,
dedupeBufferLoadFactor = cleaner.currentConfig.dedupeBufferLoadFactor,
ioBufferSize = 100000,
maxMessageSize = cleaner.currentConfig.maxMessageSize,
maxIoBytesPerSecond = cleaner.currentConfig.maxIoBytesPerSecond,
backOffMs = cleaner.currentConfig.backOffMs))
cleaner.reconfigure(oldConfig, newConfig)
assertEquals(2, cleaner.cleanerCount)
checkLastCleaned("log", 0, firstDirty)
val compactedSize = log.logSegments.map(_.size).sum
assertTrue(s"log should have been compacted: startSize=$startSize compactedSize=$compactedSize", startSize > compactedSize)
}
private def checkLastCleaned(topic: String, partitionId: Int, firstDirty: Long) {
// wait until cleaning up to base_offset, note that cleaning happens only when "log dirty ratio" is higher than
// LogConfig.MinCleanableDirtyRatioProp
val topicPartition = new TopicPartition(topic, partitionId)
cleaner.awaitCleaned(topicPartition, firstDirty)
val lastCleaned = cleaner.cleanerManager.allCleanerCheckpoints(topicPartition)
assertTrue(s"log cleaner should have processed up to offset $firstDirty, but lastCleaned=$lastCleaned",
lastCleaned >= firstDirty)
}
private def checkLogAfterAppendingDups(log: Log, startSize: Long, appends: Seq[(Int, String, Long)]) {
val read = readFromLog(log)
assertEquals("Contents of the map shouldn't change", toMap(appends), toMap(read))
assertTrue(startSize > log.size)
}
private def toMap(messages: Iterable[(Int, String, Long)]): Map[Int, (String, Long)] = {
messages.map { case (key, value, offset) => key -> (value, offset) }.toMap
}
private def readFromLog(log: Log): Iterable[(Int, String, Long)] = {
import JavaConverters._
for (segment <- log.logSegments; deepLogEntry <- segment.log.records.asScala) yield {
val key = TestUtils.readString(deepLogEntry.key).toInt
val value = TestUtils.readString(deepLogEntry.value)
(key, value, deepLogEntry.offset)
}
}
private def writeDupsSingleMessageSet(numKeys: Int, numDups: Int, log: Log, codec: CompressionType,
startKey: Int = 0, magicValue: Byte): Seq[(Int, String, Long)] = {
val kvs = for (_ <- 0 until numDups; key <- startKey until (startKey + numKeys)) yield {
val payload = counter.toString
incCounter()
(key, payload)
}
val records = kvs.map { case (key, payload) =>
new SimpleRecord(key.toString.getBytes, payload.toString.getBytes)
}
val appendInfo = log.appendAsLeader(MemoryRecords.withRecords(magicValue, codec, records: _*), leaderEpoch = 0)
val offsets = appendInfo.firstOffset.get to appendInfo.lastOffset
kvs.zip(offsets).map { case (kv, offset) => (kv._1, kv._2, offset) }
}
}
object LogCleanerParameterizedIntegrationTest {
@Parameters
def parameters: java.util.Collection[Array[String]] = {
val list = new java.util.ArrayList[Array[String]]()
for (codec <- CompressionType.values)
list.add(Array(codec.name))
list
}
}
| KevinLiLu/kafka | core/src/test/scala/unit/kafka/log/LogCleanerParameterizedIntegrationTest.scala | Scala | apache-2.0 | 15,415 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.