code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package dotty.tools
package dotc
package ast
import dotty.tools.dotc.typer.ProtoTypes.FunProtoTyped
import transform.SymUtils._
import core._
import util.Positions._, Types._, Contexts._, Constants._, Names._, Flags._
import SymDenotations._, Symbols._, StdNames._, Annotations._, Trees._, Symbols._
import Denotations._, Decorators._, DenotTransformers._
import config.Printers._
import typer.Mode
import collection.mutable
import typer.ErrorReporting._
import scala.annotation.tailrec
/** Some creators for typed trees */
object tpd extends Trees.Instance[Type] with TypedTreeInfo {
private def ta(implicit ctx: Context) = ctx.typeAssigner
def Modifiers(sym: Symbol)(implicit ctx: Context): Modifiers = Modifiers(
sym.flags & ModifierFlags,
if (sym.privateWithin.exists) sym.privateWithin.asType.name else tpnme.EMPTY,
sym.annotations map (_.tree))
def Ident(tp: NamedType)(implicit ctx: Context): Ident =
ta.assignType(untpd.Ident(tp.name), tp)
def Select(qualifier: Tree, name: Name)(implicit ctx: Context): Select =
ta.assignType(untpd.Select(qualifier, name), qualifier)
def SelectFromTypeTree(qualifier: Tree, name: Name)(implicit ctx: Context): SelectFromTypeTree =
ta.assignType(untpd.SelectFromTypeTree(qualifier, name), qualifier)
def SelectFromTypeTree(qualifier: Tree, tp: NamedType)(implicit ctx: Context): SelectFromTypeTree =
untpd.SelectFromTypeTree(qualifier, tp.name).withType(tp)
def This(cls: ClassSymbol)(implicit ctx: Context): This =
untpd.This(cls.name).withType(cls.thisType)
def Super(qual: Tree, mix: TypeName, inConstrCall: Boolean, mixinClass: Symbol = NoSymbol)(implicit ctx: Context): Super =
ta.assignType(untpd.Super(qual, mix), qual, inConstrCall, mixinClass)
def Apply(fn: Tree, args: List[Tree])(implicit ctx: Context): Apply =
ta.assignType(untpd.Apply(fn, args), fn, args)
def TypeApply(fn: Tree, args: List[Tree])(implicit ctx: Context): TypeApply =
ta.assignType(untpd.TypeApply(fn, args), fn, args)
def Literal(const: Constant)(implicit ctx: Context): Literal =
ta.assignType(untpd.Literal(const))
def unitLiteral(implicit ctx: Context): Literal =
Literal(Constant(()))
def New(tpt: Tree)(implicit ctx: Context): New =
ta.assignType(untpd.New(tpt), tpt)
def New(tp: Type)(implicit ctx: Context): New = New(TypeTree(tp))
def Pair(left: Tree, right: Tree)(implicit ctx: Context): Pair =
ta.assignType(untpd.Pair(left, right), left, right)
def Typed(expr: Tree, tpt: Tree)(implicit ctx: Context): Typed =
ta.assignType(untpd.Typed(expr, tpt), tpt)
def NamedArg(name: Name, arg: Tree)(implicit ctx: Context) =
ta.assignType(untpd.NamedArg(name, arg), arg)
def Assign(lhs: Tree, rhs: Tree)(implicit ctx: Context): Assign =
ta.assignType(untpd.Assign(lhs, rhs))
def Block(stats: List[Tree], expr: Tree)(implicit ctx: Context): Block =
ta.assignType(untpd.Block(stats, expr), stats, expr)
def maybeBlock(stats: List[Tree], expr: Tree)(implicit ctx: Context): Tree =
if (stats.isEmpty) expr else Block(stats, expr)
def If(cond: Tree, thenp: Tree, elsep: Tree)(implicit ctx: Context): If =
ta.assignType(untpd.If(cond, thenp, elsep), thenp, elsep)
def Closure(env: List[Tree], meth: Tree, tpt: Tree)(implicit ctx: Context): Closure =
ta.assignType(untpd.Closure(env, meth, tpt), meth, tpt)
/** A function def
*
* vparams => expr
*
* gets expanded to
*
* { def $anonfun(vparams) = expr; Closure($anonfun) }
*
* where the closure's type is the target type of the expression (FunctionN, unless
* otherwise specified).
*/
def Closure(meth: TermSymbol, rhsFn: List[List[Tree]] => Tree, targs: List[Tree] = Nil, targetType: Type = NoType)(implicit ctx: Context): Block = {
val targetTpt = if (targetType.exists) TypeTree(targetType) else EmptyTree
val call =
if (targs.isEmpty) Ident(TermRef(NoPrefix, meth))
else TypeApply(Ident(TermRef(NoPrefix, meth)), targs)
Block(
DefDef(meth, rhsFn) :: Nil,
Closure(Nil, call, targetTpt))
}
def CaseDef(pat: Tree, guard: Tree, body: Tree)(implicit ctx: Context): CaseDef =
ta.assignType(untpd.CaseDef(pat, guard, body), body)
def Match(selector: Tree, cases: List[CaseDef])(implicit ctx: Context): Match =
ta.assignType(untpd.Match(selector, cases), cases)
def Return(expr: Tree, from: Tree)(implicit ctx: Context): Return =
ta.assignType(untpd.Return(expr, from))
def Try(block: Tree, cases: List[CaseDef], finalizer: Tree)(implicit ctx: Context): Try =
ta.assignType(untpd.Try(block, cases, finalizer), block, cases)
def Throw(expr: Tree)(implicit ctx: Context): Throw =
ta.assignType(untpd.Throw(expr))
def SeqLiteral(elems: List[Tree])(implicit ctx: Context): SeqLiteral =
ta.assignType(untpd.SeqLiteral(elems), elems)
def SeqLiteral(tpe: Type, elems: List[Tree])(implicit ctx: Context): SeqLiteral =
if (tpe derivesFrom defn.SeqClass) SeqLiteral(elems) else JavaSeqLiteral(elems)
def JavaSeqLiteral(elems: List[Tree])(implicit ctx: Context): SeqLiteral =
ta.assignType(new untpd.JavaSeqLiteral(elems), elems)
def TypeTree(original: Tree)(implicit ctx: Context): TypeTree =
TypeTree(original.tpe, original)
def TypeTree(tp: Type, original: Tree = EmptyTree)(implicit ctx: Context): TypeTree =
untpd.TypeTree(original).withType(tp)
def SingletonTypeTree(ref: Tree)(implicit ctx: Context): SingletonTypeTree =
ta.assignType(untpd.SingletonTypeTree(ref), ref)
def AndTypeTree(left: Tree, right: Tree)(implicit ctx: Context): AndTypeTree =
ta.assignType(untpd.AndTypeTree(left, right), left, right)
def OrTypeTree(left: Tree, right: Tree)(implicit ctx: Context): OrTypeTree =
ta.assignType(untpd.OrTypeTree(left, right), left, right)
// RefinedTypeTree is missing, handled specially in Typer and Unpickler.
def AppliedTypeTree(tycon: Tree, args: List[Tree])(implicit ctx: Context): AppliedTypeTree =
ta.assignType(untpd.AppliedTypeTree(tycon, args), tycon, args)
def ByNameTypeTree(result: Tree)(implicit ctx: Context): ByNameTypeTree =
ta.assignType(untpd.ByNameTypeTree(result), result)
def TypeBoundsTree(lo: Tree, hi: Tree)(implicit ctx: Context): TypeBoundsTree =
ta.assignType(untpd.TypeBoundsTree(lo, hi), lo, hi)
def Bind(sym: TermSymbol, body: Tree)(implicit ctx: Context): Bind =
ta.assignType(untpd.Bind(sym.name, body), sym)
def Alternative(trees: List[Tree])(implicit ctx: Context): Alternative =
ta.assignType(untpd.Alternative(trees), trees)
def UnApply(fun: Tree, implicits: List[Tree], patterns: List[Tree], proto: Type)(implicit ctx: Context): UnApply =
ta.assignType(untpd.UnApply(fun, implicits, patterns), proto)
def ValDef(sym: TermSymbol, rhs: Tree = EmptyTree)(implicit ctx: Context): ValDef =
ta.assignType(untpd.ValDef(sym.name, TypeTree(sym.info), rhs), sym)
def SyntheticValDef(name: TermName, rhs: Tree)(implicit ctx: Context): ValDef =
ValDef(ctx.newSymbol(ctx.owner, name, Synthetic, rhs.tpe.widen, coord = rhs.pos), rhs)
def DefDef(sym: TermSymbol, rhs: Tree = EmptyTree)(implicit ctx: Context): DefDef =
ta.assignType(DefDef(sym, Function.const(rhs) _), sym)
def DefDef(sym: TermSymbol, rhsFn: List[List[Tree]] => Tree)(implicit ctx: Context): DefDef =
polyDefDef(sym, Function.const(rhsFn))
def polyDefDef(sym: TermSymbol, rhsFn: List[Type] => List[List[Tree]] => Tree)(implicit ctx: Context): DefDef = {
val (tparams, mtp) = sym.info match {
case tp: PolyType =>
val tparams = ctx.newTypeParams(sym, tp.paramNames, EmptyFlags, tp.instantiateBounds)
(tparams, tp.instantiate(tparams map (_.typeRef)))
case tp => (Nil, tp)
}
def valueParamss(tp: Type): (List[List[TermSymbol]], Type) = tp match {
case tp @ MethodType(paramNames, paramTypes) =>
def valueParam(name: TermName, info: Type): TermSymbol =
ctx.newSymbol(sym, name, TermParam, info)
val params = (paramNames, paramTypes).zipped.map(valueParam)
val (paramss, rtp) = valueParamss(tp.instantiate(params map (_.termRef)))
(params :: paramss, rtp)
case tp => (Nil, tp.widenExpr)
}
val (vparamss, rtp) = valueParamss(mtp)
val targs = tparams map (_.typeRef)
val argss = vparamss.nestedMap(vparam => Ident(vparam.termRef))
ta.assignType(
untpd.DefDef(
sym.name,
tparams map TypeDef,
vparamss.nestedMap(ValDef(_)),
TypeTree(rtp),
rhsFn(targs)(argss)),
sym)
}
def TypeDef(sym: TypeSymbol)(implicit ctx: Context): TypeDef =
ta.assignType(untpd.TypeDef(sym.name, TypeTree(sym.info)), sym)
def ClassDef(cls: ClassSymbol, constr: DefDef, body: List[Tree], superArgs: List[Tree] = Nil)(implicit ctx: Context): TypeDef = {
val firstParent :: otherParents = cls.info.parents
val superRef =
if (cls is Trait) TypeTree(firstParent)
else {
def isApplicable(ctpe: Type): Boolean = ctpe match {
case ctpe: PolyType =>
isApplicable(ctpe.instantiate(firstParent.argTypes))
case ctpe: MethodType =>
(superArgs corresponds ctpe.paramTypes)(_.tpe <:< _)
case _ =>
false
}
val constr = firstParent.decl(nme.CONSTRUCTOR).suchThat(constr => isApplicable(constr.info))
New(firstParent, constr.symbol.asTerm, superArgs)
}
val parents = superRef :: otherParents.map(TypeTree(_))
val selfType =
if (cls.classInfo.selfInfo ne NoType) ValDef(ctx.newSelfSym(cls))
else EmptyValDef
def isOwnTypeParam(stat: Tree) =
(stat.symbol is TypeParam) && stat.symbol.owner == cls
val bodyTypeParams = body filter isOwnTypeParam map (_.symbol)
val newTypeParams =
for (tparam <- cls.typeParams if !(bodyTypeParams contains tparam))
yield TypeDef(tparam)
val findLocalDummy = new FindLocalDummyAccumulator(cls)
val localDummy = ((NoSymbol: Symbol) /: body)(findLocalDummy)
.orElse(ctx.newLocalDummy(cls))
val impl = untpd.Template(constr, parents, selfType, newTypeParams ++ body)
.withType(localDummy.nonMemberTermRef)
ta.assignType(untpd.TypeDef(cls.name, impl), cls)
}
def Import(expr: Tree, selectors: List[untpd.Tree])(implicit ctx: Context): Import =
ta.assignType(untpd.Import(expr, selectors), ctx.newImportSymbol(expr))
def PackageDef(pid: RefTree, stats: List[Tree])(implicit ctx: Context): PackageDef =
ta.assignType(untpd.PackageDef(pid, stats), pid)
def Annotated(annot: Tree, arg: Tree)(implicit ctx: Context): Annotated =
ta.assignType(untpd.Annotated(annot, arg), annot, arg)
// ------ Making references ------------------------------------------------------
def prefixIsElidable(tp: NamedType)(implicit ctx: Context) = {
def test(implicit ctx: Context) = tp.prefix match {
case NoPrefix =>
true
case pre: ThisType =>
pre.cls.isStaticOwner ||
tp.symbol.is(ParamOrAccessor) && ctx.owner.enclosingClass.derivesFrom(pre.cls)
case pre: TermRef =>
pre.symbol.is(Module) && pre.symbol.isStatic
case _ =>
false
}
try test || tp.symbol.is(JavaStatic)
catch { // See remark in SymDenotations#accessWithin
case ex: NotDefinedHere => test(ctx.addMode(Mode.FutureDefsOK))
}
}
def needsSelect(tp: Type)(implicit ctx: Context) = tp match {
case tp: TermRef => !prefixIsElidable(tp)
case _ => false
}
/** A tree representing the same reference as the given type */
def ref(tp: NamedType)(implicit ctx: Context): Tree =
if (tp.isType) TypeTree(tp)
else if (prefixIsElidable(tp)) Ident(tp)
else tp.prefix match {
case pre: SingletonType => singleton(pre).select(tp)
case pre => SelectFromTypeTree(TypeTree(pre), tp)
} // no checks necessary
def ref(sym: Symbol)(implicit ctx: Context): Tree =
ref(NamedType(sym.owner.thisType, sym.name, sym.denot))
def singleton(tp: Type)(implicit ctx: Context): Tree = tp match {
case tp: TermRef => ref(tp)
case tp: ThisType => This(tp.cls)
case SuperType(qual, _) => singleton(qual)
case ConstantType(value) => Literal(value)
}
/** A tree representing a `newXYZArray` operation of the right
* kind for the given element type in `typeArg`. No type arguments or
* `length` arguments are given.
*/
def newArray(typeArg: Tree, pos: Position)(implicit ctx: Context): Tree = {
val elemType = typeArg.tpe
val elemClass = elemType.classSymbol
def newArr(kind: String) =
ref(defn.DottyArraysModule).select(s"new${kind}Array".toTermName).withPos(pos)
if (TypeErasure.isUnboundedGeneric(elemType))
newArr("Generic").appliedToTypeTrees(typeArg :: Nil)
else if (elemClass.isPrimitiveValueClass)
newArr(elemClass.name.toString)
else
newArr("Ref").appliedToTypeTrees(
TypeTree(defn.ArrayType(elemType)).withPos(typeArg.pos) :: Nil)
}
// ------ Creating typed equivalents of trees that exist only in untyped form -------
/** new C(args), calling the primary constructor of C */
def New(tp: Type, args: List[Tree])(implicit ctx: Context): Apply =
New(tp, tp.typeSymbol.primaryConstructor.asTerm, args)
/** new C(args), calling given constructor `constr` of C */
def New(tp: Type, constr: TermSymbol, args: List[Tree])(implicit ctx: Context): Apply = {
val targs = tp.argTypes
New(tp withoutArgs targs)
.select(TermRef.withSig(tp.normalizedPrefix, constr))
.appliedToTypes(targs)
.appliedToArgs(args)
}
/** An object def
*
* object obs extends parents { decls }
*
* gets expanded to
*
* <module> val obj = new obj$
* <module> class obj$ extends parents { this: obj.type => decls }
*
* (The following no longer applies:
* What's interesting here is that the block is well typed
* (because class obj$ is hoistable), but the type of the `obj` val is
* not expressible. What needs to happen in general when
* inferring the type of a val from its RHS, is: if the type contains
* a class that has the val itself as owner, then that class
* is remapped to have the val's owner as owner. Remapping could be
* done by cloning the class with the new owner and substituting
* everywhere in the tree. We know that remapping is safe
* because the only way a local class can appear in the RHS of a val is
* by being hoisted outside of a block, and the necessary checks are
* done at this point already.
*
* On the other hand, for method result type inference, if the type of
* the RHS of a method contains a class owned by the method, this would be
* an error.)
*/
def ModuleDef(sym: TermSymbol, body: List[Tree])(implicit ctx: Context): tpd.Thicket = {
val modcls = sym.moduleClass.asClass
val constrSym = modcls.primaryConstructor orElse ctx.newDefaultConstructor(modcls).entered
val constr = DefDef(constrSym.asTerm, EmptyTree)
val clsdef = ClassDef(modcls, constr, body)
val valdef = ValDef(sym, New(modcls.typeRef).select(constrSym).appliedToNone)
Thicket(valdef, clsdef)
}
def initValue(tpe: Types.Type)(implicit ctx: Context) = {
val tpw = tpe.widen
if (tpw isRef defn.IntClass) Literal(Constant(0))
else if (tpw isRef defn.LongClass) Literal(Constant(0L))
else if (tpw isRef defn.BooleanClass) Literal(Constant(false))
else if (tpw isRef defn.CharClass) Literal(Constant('\\u0000'))
else if (tpw isRef defn.FloatClass) Literal(Constant(0f))
else if (tpw isRef defn.DoubleClass) Literal(Constant(0d))
else if (tpw isRef defn.ByteClass) Literal(Constant(0.toByte))
else if (tpw isRef defn.ShortClass) Literal(Constant(0.toShort))
else Literal(Constant(null)).select(defn.Any_asInstanceOf).appliedToType(tpe)
}
private class FindLocalDummyAccumulator(cls: ClassSymbol)(implicit ctx: Context) extends TreeAccumulator[Symbol] {
def apply(sym: Symbol, tree: Tree) =
if (sym.exists) sym
else if (tree.isDef) {
val owner = tree.symbol.owner
if (owner.isLocalDummy && owner.owner == cls) owner
else if (owner == cls) foldOver(sym, tree)
else sym
} else foldOver(sym, tree)
}
implicit class modsDeco(mdef: MemberDef)(implicit ctx: Context) extends ModsDeco {
def mods = if (mdef.hasType) Modifiers(mdef.symbol) else mdef.rawMods
}
override val cpy = new TypedTreeCopier
class TypedTreeCopier extends TreeCopier {
def postProcess(tree: Tree, copied: untpd.Tree): copied.ThisTree[Type] =
copied.withTypeUnchecked(tree.tpe)
def postProcess(tree: Tree, copied: untpd.MemberDef): copied.ThisTree[Type] =
copied.withTypeUnchecked(tree.tpe)
override def Select(tree: Tree)(qualifier: Tree, name: Name)(implicit ctx: Context): Select = {
val tree1 = untpd.cpy.Select(tree)(qualifier, name)
tree match {
case tree: Select if (qualifier.tpe eq tree.qualifier.tpe) => tree1.withTypeUnchecked(tree.tpe)
case _ => tree.tpe match {
case tpe: NamedType => tree1.withType(tpe.derivedSelect(qualifier.tpe))
case _ => tree1.withTypeUnchecked(tree.tpe)
}
}
}
override def Apply(tree: Tree)(fun: Tree, args: List[Tree])(implicit ctx: Context): Apply =
ta.assignType(untpd.cpy.Apply(tree)(fun, args), fun, args)
// Note: Reassigning the original type if `fun` and `args` have the same types as before
// does not work here: The computed type depends on the widened function type, not
// the function type itself. A treetransform may keep the function type the
// same but its widened type might change.
override def TypeApply(tree: Tree)(fun: Tree, args: List[Tree])(implicit ctx: Context): TypeApply =
ta.assignType(untpd.cpy.TypeApply(tree)(fun, args), fun, args)
// Same remark as for Apply
override def Literal(tree: Tree)(const: Constant)(implicit ctx: Context): Literal =
ta.assignType(untpd.cpy.Literal(tree)(const))
override def New(tree: Tree)(tpt: Tree)(implicit ctx: Context): New =
ta.assignType(untpd.cpy.New(tree)(tpt), tpt)
override def Pair(tree: Tree)(left: Tree, right: Tree)(implicit ctx: Context): Pair = {
val tree1 = untpd.cpy.Pair(tree)(left, right)
tree match {
case tree: Pair if (left.tpe eq tree.left.tpe) && (right.tpe eq tree.right.tpe) => tree1.withTypeUnchecked(tree.tpe)
case _ => ta.assignType(tree1, left, right)
}
}
override def Typed(tree: Tree)(expr: Tree, tpt: Tree)(implicit ctx: Context): Typed =
ta.assignType(untpd.cpy.Typed(tree)(expr, tpt), tpt)
override def NamedArg(tree: Tree)(name: Name, arg: Tree)(implicit ctx: Context): NamedArg =
ta.assignType(untpd.cpy.NamedArg(tree)(name, arg), arg)
override def Assign(tree: Tree)(lhs: Tree, rhs: Tree)(implicit ctx: Context): Assign =
ta.assignType(untpd.cpy.Assign(tree)(lhs, rhs))
override def Block(tree: Tree)(stats: List[Tree], expr: Tree)(implicit ctx: Context): Block = {
val tree1 = untpd.cpy.Block(tree)(stats, expr)
tree match {
case tree: Block if (expr.tpe eq tree.expr.tpe) => tree1.withTypeUnchecked(tree.tpe)
case _ => ta.assignType(tree1, stats, expr)
}
}
override def If(tree: Tree)(cond: Tree, thenp: Tree, elsep: Tree)(implicit ctx: Context): If = {
val tree1 = untpd.cpy.If(tree)(cond, thenp, elsep)
tree match {
case tree: If if (thenp.tpe eq tree.thenp.tpe) && (elsep.tpe eq tree.elsep.tpe) => tree1.withTypeUnchecked(tree.tpe)
case _ => ta.assignType(tree1, thenp, elsep)
}
}
override def Closure(tree: Tree)(env: List[Tree], meth: Tree, tpt: Tree)(implicit ctx: Context): Closure =
ta.assignType(untpd.cpy.Closure(tree)(env, meth, tpt), meth, tpt)
// Same remark as for Apply
override def Match(tree: Tree)(selector: Tree, cases: List[CaseDef])(implicit ctx: Context): Match = {
val tree1 = untpd.cpy.Match(tree)(selector, cases)
tree match {
case tree: Match if sameTypes(cases, tree.cases) => tree1.withTypeUnchecked(tree.tpe)
case _ => ta.assignType(tree1, cases)
}
}
override def CaseDef(tree: Tree)(pat: Tree, guard: Tree, body: Tree)(implicit ctx: Context): CaseDef = {
val tree1 = untpd.cpy.CaseDef(tree)(pat, guard, body)
tree match {
case tree: CaseDef if (body.tpe eq tree.body.tpe) => tree1.withTypeUnchecked(tree.tpe)
case _ => ta.assignType(tree1, body)
}
}
override def Return(tree: Tree)(expr: Tree, from: Tree)(implicit ctx: Context): Return =
ta.assignType(untpd.cpy.Return(tree)(expr, from))
override def Try(tree: Tree)(expr: Tree, cases: List[CaseDef], finalizer: Tree)(implicit ctx: Context): Try = {
val tree1 = untpd.cpy.Try(tree)(expr, cases, finalizer)
tree match {
case tree: Try if (expr.tpe eq tree.expr.tpe) && (sameTypes(cases, tree.cases)) => tree1.withTypeUnchecked(tree.tpe)
case _ => ta.assignType(tree1, expr, cases)
}
}
override def Throw(tree: Tree)(expr: Tree)(implicit ctx: Context): Throw =
ta.assignType(untpd.cpy.Throw(tree)(expr))
override def SeqLiteral(tree: Tree)(elems: List[Tree])(implicit ctx: Context): SeqLiteral = {
val tree1 = untpd.cpy.SeqLiteral(tree)(elems)
tree match {
case tree: SeqLiteral if sameTypes(elems, tree.elems) => tree1.withTypeUnchecked(tree.tpe)
case _ => ta.assignType(tree1, elems)
}
}
override def Annotated(tree: Tree)(annot: Tree, arg: Tree)(implicit ctx: Context): Annotated = {
val tree1 = untpd.cpy.Annotated(tree)(annot, arg)
tree match {
case tree: Annotated if (arg.tpe eq tree.arg.tpe) && (annot eq tree.annot) => tree1.withTypeUnchecked(tree.tpe)
case _ => ta.assignType(tree1, annot, arg)
}
}
override def If(tree: If)(cond: Tree = tree.cond, thenp: Tree = tree.thenp, elsep: Tree = tree.elsep)(implicit ctx: Context): If =
If(tree: Tree)(cond, thenp, elsep)
override def Closure(tree: Closure)(env: List[Tree] = tree.env, meth: Tree = tree.meth, tpt: Tree = tree.tpt)(implicit ctx: Context): Closure =
Closure(tree: Tree)(env, meth, tpt)
override def CaseDef(tree: CaseDef)(pat: Tree = tree.pat, guard: Tree = tree.guard, body: Tree = tree.body)(implicit ctx: Context): CaseDef =
CaseDef(tree: Tree)(pat, guard, body)
override def Try(tree: Try)(expr: Tree = tree.expr, cases: List[CaseDef] = tree.cases, finalizer: Tree = tree.finalizer)(implicit ctx: Context): Try =
Try(tree: Tree)(expr, cases, finalizer)
}
implicit class TreeOps[ThisTree <: tpd.Tree](val tree: ThisTree) extends AnyVal {
def isValue(implicit ctx: Context): Boolean =
tree.isTerm && tree.tpe.widen.isValueType
def isValueOrPattern(implicit ctx: Context) =
tree.isValue || tree.isPattern
def isValueType: Boolean =
tree.isType && tree.tpe.isValueType
def isInstantiation: Boolean = tree match {
case Apply(Select(New(_), nme.CONSTRUCTOR), _) => true
case _ => false
}
def shallowFold[T](z: T)(op: (T, tpd.Tree) => T) =
new ShallowFolder(op).apply(z, tree)
def deepFold[T](z: T)(op: (T, tpd.Tree) => T) =
new DeepFolder(op).apply(z, tree)
def find[T](pred: (tpd.Tree) => Boolean): Option[tpd.Tree] =
shallowFold[Option[tpd.Tree]](None)((accum, tree) => if (pred(tree)) Some(tree) else accum)
def subst(from: List[Symbol], to: List[Symbol])(implicit ctx: Context): ThisTree =
new TreeTypeMap(substFrom = from, substTo = to).apply(tree)
/** Change owner from `from` to `to`. If `from` is a weak owner, also change its
* owner to `to`, and continue until a non-weak owner is reached.
*/
def changeOwner(from: Symbol, to: Symbol)(implicit ctx: Context): ThisTree = {
def loop(from: Symbol, froms: List[Symbol], tos: List[Symbol]): ThisTree = {
if (from.isWeakOwner && !from.owner.isClass)
loop(from.owner, from :: froms, to :: tos)
else {
//println(i"change owner ${from :: froms}%, % ==> $tos of $tree")
new TreeTypeMap(oldOwners = from :: froms, newOwners = tos).apply(tree)
}
}
loop(from, Nil, to :: Nil)
}
/** After phase `trans`, set the owner of every definition in this tree that was formerly
* owner by `from` to `to`.
*/
def changeOwnerAfter(from: Symbol, to: Symbol, trans: DenotTransformer)(implicit ctx: Context): ThisTree = {
assert(ctx.phase == trans.next)
val traverser = new TreeTraverser {
def traverse(tree: Tree) = tree match {
case tree: DefTree =>
val sym = tree.symbol
if (sym.denot(ctx.withPhase(trans)).owner == from)
sym.copySymDenotation(owner = to).installAfter(trans)
if (sym.isWeakOwner) traverseChildren(tree)
case _ =>
traverseChildren(tree)
}
}
traverser.traverse(tree)
tree
}
def select(name: Name)(implicit ctx: Context): Select =
Select(tree, name)
def select(tp: NamedType)(implicit ctx: Context): Select =
untpd.Select(tree, tp.name).withType(tp)
def select(sym: Symbol)(implicit ctx: Context): Select =
untpd.Select(tree, sym.name).withType(
TermRef.withSigAndDenot(tree.tpe, sym.name.asTermName, sym.signature, sym.denot.asSeenFrom(tree.tpe)))
def selectWithSig(name: Name, sig: Signature)(implicit ctx: Context) =
untpd.SelectWithSig(tree, name, sig)
.withType(TermRef.withSig(tree.tpe, name.asTermName, sig))
def appliedTo(arg: Tree)(implicit ctx: Context): Tree =
appliedToArgs(arg :: Nil)
def appliedTo(arg: Tree, args: Tree*)(implicit ctx: Context): Tree =
appliedToArgs(arg :: args.toList)
def appliedToArgs(args: List[Tree])(implicit ctx: Context): Apply =
Apply(tree, args)
def appliedToArgss(argss: List[List[Tree]])(implicit ctx: Context): Tree =
((tree: Tree) /: argss)(Apply(_, _))
def appliedToNone(implicit ctx: Context): Apply = appliedToArgs(Nil)
def appliedToType(targ: Type)(implicit ctx: Context): Tree =
appliedToTypes(targ :: Nil)
def appliedToTypes(targs: List[Type])(implicit ctx: Context): Tree =
appliedToTypeTrees(targs map (TypeTree(_)))
def appliedToTypeTrees(targs: List[Tree])(implicit ctx: Context): Tree =
if (targs.isEmpty) tree else TypeApply(tree, targs)
def ensureApplied(implicit ctx: Context): Tree =
if (tree.tpe.widen.isParameterless) tree else tree.appliedToNone
def isInstance(tp: Type)(implicit ctx: Context): Tree =
tree.select(defn.Any_isInstanceOf).appliedToType(tp)
def asInstance(tp: Type)(implicit ctx: Context): Tree = {
assert(tp.isValueType, i"bad cast: $tree.asInstanceOf[$tp]")
tree.select(defn.Any_asInstanceOf).appliedToType(tp)
}
def ensureConforms(tp: Type)(implicit ctx: Context): Tree =
if (tree.tpe <:< tp) tree else asInstance(tp)
def and(that: Tree)(implicit ctx: Context): Tree =
tree.select(defn.Boolean_&&).appliedTo(that)
def or(that: Tree)(implicit ctx: Context): Tree =
tree.select(defn.Boolean_||).appliedTo(that)
def becomes(rhs: Tree)(implicit ctx: Context): Tree =
if (tree.symbol is Method) {
val setr = tree match {
case Ident(_) =>
val setter = tree.symbol.setter
assert(setter.exists, tree.symbol.showLocated)
ref(tree.symbol.setter)
case Select(qual, _) => qual.select(tree.symbol.setter)
}
setr.appliedTo(rhs)
}
else Assign(tree, rhs)
// --- Higher order traversal methods -------------------------------
def foreachSubTree(f: Tree => Unit): Unit = { //TODO should go in tpd.
val traverser = new TreeTraverser {
def traverse(tree: Tree) = foldOver(f(tree), tree)
}
traverser.traverse(tree)
}
def existsSubTree(p: Tree => Boolean): Boolean = {
val acc = new TreeAccumulator[Boolean] {
def apply(x: Boolean, t: Tree) = x || p(t) || foldOver(x, t)
}
acc(false, tree)
}
def filterSubTrees(f: Tree => Boolean): List[Tree] = {
val buf = new mutable.ListBuffer[Tree]
foreachSubTree { tree => if (f(tree)) buf += tree }
buf.toList
}
}
implicit class ListOfTreeDecorator(val xs: List[tpd.Tree]) extends AnyVal {
def tpes: List[Type] = xs map (_.tpe)
}
// convert a numeric with a toXXX method
def primitiveConversion(tree: Tree, numericCls: Symbol)(implicit ctx: Context): Tree = {
val mname = ("to" + numericCls.name).toTermName
val conversion = tree.tpe member mname
if (conversion.symbol.exists)
tree.select(conversion.symbol.termRef).ensureApplied
else if (tree.tpe.widen isRef numericCls)
tree
else {
ctx.warning(i"conversion from ${tree.tpe.widen} to ${numericCls.typeRef} will always fail at runtime.")
Throw(New(defn.ClassCastExceptionClass.typeRef, Nil)) withPos tree.pos
}
}
def applyOverloaded(receiver: Tree, method: TermName, args: List[Tree], targs: List[Type], expectedType: Type, isAnnotConstructor: Boolean = false)(implicit ctx: Context): Tree = {
val typer = ctx.typer
val proto = new FunProtoTyped(args, expectedType, typer)
val alts = receiver.tpe.member(method).alternatives.map(_.termRef)
val alternatives = ctx.typer.resolveOverloaded(alts, proto, Nil)
assert(alternatives.size == 1) // this is parsed from bytecode tree. there's nothing user can do about it
val selected = alternatives.head
val fun = receiver
.select(TermRef.withSig(receiver.tpe.normalizedPrefix, selected.termSymbol.asTerm))
.appliedToTypes(targs)
def adaptLastArg(lastParam: Tree, expectedType: Type) = {
if (isAnnotConstructor && !(lastParam.tpe <:< expectedType)) {
val defn = ctx.definitions
val prefix = args.take(selected.widen.paramTypess.head.size - 1)
expectedType match {
case defn.ArrayType(el) =>
lastParam.tpe match {
case defn.ArrayType(el2) if (el2 <:< el) =>
// we have a JavaSeqLiteral with a more precise type
// we cannot construct a tree as JavaSeqLiteral infered to precise type
// if we add typed than it would be both type-correct and
// will pass Ycheck
prefix ::: List(tpd.Typed(lastParam, TypeTree(defn.ArrayType(el))))
case _ =>
???
}
case _ => args
}
} else args
}
val callArgs: List[Tree] = if(args.isEmpty) Nil else {
val expectedType = selected.widen.paramTypess.head.last
val lastParam = args.last
adaptLastArg(lastParam, expectedType)
}
val apply = untpd.Apply(fun, callArgs)
new typer.ApplyToTyped(apply, fun, selected, callArgs, expectedType).result.asInstanceOf[Tree] // needed to handle varargs
}
@tailrec
def sameTypes(trees: List[tpd.Tree], trees1: List[tpd.Tree]): Boolean = {
if (trees.isEmpty) trees.isEmpty
else if (trees1.isEmpty) trees.isEmpty
else (trees.head.tpe eq trees1.head.tpe) && sameTypes(trees.tail, trees1.tail)
}
def evalOnce(tree: Tree)(within: Tree => Tree)(implicit ctx: Context) = {
if (isIdempotentExpr(tree)) within(tree)
else {
val vdef = SyntheticValDef(ctx.freshName("ev$").toTermName, tree)
Block(vdef :: Nil, within(Ident(vdef.namedType)))
}
}
def runtimeCall(name: TermName, args: List[Tree])(implicit ctx: Context): Tree = {
Ident(defn.ScalaRuntimeModule.requiredMethod(name).termRef).appliedToArgs(args)
}
/** An extractor that pulls out type arguments */
object MaybePoly {
def unapply(tree: Tree): Option[(Tree, List[Tree])] = tree match {
case TypeApply(tree, targs) => Some(tree, targs)
case _ => Some(tree, Nil)
}
}
/** A traverser that passes the enlcosing class or method as an argumenr
* to the traverse method.
*/
abstract class EnclosingMethodTraverser(implicit ctx: Context) extends TreeAccumulator[Symbol] {
def traverse(enclMeth: Symbol, tree: Tree): Unit
def apply(enclMeth: Symbol, tree: Tree) = {
tree match {
case _: DefTree if tree.symbol.exists =>
traverse(tree.symbol.enclosingMethod, tree)
case _ =>
traverse(enclMeth, tree)
}
enclMeth
}
}
// ensure that constructors are fully applied?
// ensure that normal methods are fully applied?
}
| AlexSikia/dotty | src/dotty/tools/dotc/ast/tpd.scala | Scala | bsd-3-clause | 32,790 |
package io.vamp.pulse
import io.vamp.common.ClassMapper
import io.vamp.common.vitals.{ InfoRequest, StatsRequest }
import io.vamp.model.event._
import io.vamp.pulse.Percolator.{ GetPercolator, RegisterPercolator, UnregisterPercolator }
import io.vamp.pulse.notification._
import scala.concurrent.Future
class NoStorePulseActorMapper extends ClassMapper {
val name = "no-store"
val clazz = classOf[NoStorePulseActor]
}
class NoStorePulseActor extends PulseActor {
import PulseActor._
def receive = {
case InfoRequest β reply(Future.successful(Map[String, Any]("type" β "no-store")))
case StatsRequest β reply(Future.successful(None))
case Publish(event, publishEventValue) β reply((validateEvent andThen percolate(publishEventValue) andThen publish(publishEventValue))(Event.expandTags(event)), classOf[EventIndexError])
case Query(envelope) β reply((validateEventQuery andThen eventQuery(envelope.page, envelope.perPage))(envelope.request), classOf[EventQueryError])
case GetPercolator(name) β reply(Future.successful(getPercolator(name)))
case RegisterPercolator(name, tags, kind, message) β registerPercolator(name, tags, kind, message)
case UnregisterPercolator(name) β unregisterPercolator(name)
case any β unsupported(UnsupportedPulseRequest(any))
}
private def publish(publishEventValue: Boolean)(event: Event) = Future.successful(event)
protected def eventQuery(page: Int, perPage: Int)(query: EventQuery): Future[Any] = {
log.debug(s"Pulse query: $query")
query.aggregator match {
case None β Future.successful(EventResponseEnvelope(Nil, 0, page, perPage))
case Some(Aggregator(Aggregator.`count`, _)) β Future.successful(LongValueAggregationResult(0))
case Some(Aggregator(_, _)) β Future.successful(DoubleValueAggregationResult(0))
case _ β throw new UnsupportedOperationException
}
}
}
| dragoslav/vamp | pulse/src/main/scala/io/vamp/pulse/NoStorePulseActor.scala | Scala | apache-2.0 | 2,013 |
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.check.async
import io.gatling.core.check.extractor.jsonpath.OldJsonPathExtractorFactory
import io.gatling.core.check.extractor.regex.OldRegexExtractorFactory
import io.gatling.core.json.JsonParsers
import scala.concurrent.duration.FiniteDuration
import io.gatling.core.session.Expression
trait AsyncCheckSupport extends AsyncCheckDSL {
implicit def checkTypeStep2Check(step: CheckTypeStep): AsyncCheckBuilder = step.message.find.exists
}
trait AsyncCheckDSL {
// TODO: rename those !
val wsListen = new TimeoutStep(false)
val wsAwait = new TimeoutStep(true)
class TimeoutStep(await: Boolean) {
def within(timeout: FiniteDuration) = new ExpectationStep(await, timeout)
}
class ExpectationStep(await: Boolean, timeout: FiniteDuration) {
def until(count: Int) = new CheckTypeStep(await, timeout, UntilCount(count))
def expect(count: Int) = new CheckTypeStep(await, timeout, ExpectedCount(count))
def expect(range: Range) = new CheckTypeStep(await, timeout, ExpectedRange(range))
}
class CheckTypeStep(await: Boolean, timeout: FiniteDuration, expectation: Expectation) {
def regex(expression: Expression[String])(implicit extractorFactory: OldRegexExtractorFactory): AsyncRegexCheckBuilder[String] with AsyncRegexOfType =
AsyncRegexCheckBuilder.regex(expression, AsyncCheckBuilders.specializer(await, timeout, expectation))
def jsonPath(path: Expression[String])(implicit extractorFactory: OldJsonPathExtractorFactory, jsonParsers: JsonParsers) =
AsyncJsonPathCheckBuilder.jsonPath(path, AsyncCheckBuilders.specializer(await, timeout, expectation))
def jsonpJsonPath(path: Expression[String])(implicit extractorFactory: OldJsonPathExtractorFactory, jsonParsers: JsonParsers) =
AsyncJsonpJsonPathCheckBuilder.jsonpJsonPath(path, AsyncCheckBuilders.specializer(await, timeout, expectation))
val message = AsyncPlainCheckBuilder.message(AsyncCheckBuilders.specializer(await, timeout, expectation))
}
}
| wiacekm/gatling | gatling-http/src/main/scala/io/gatling/http/check/async/AsyncCheckSupport.scala | Scala | apache-2.0 | 2,621 |
/*
* Copyright (c) 2018. Lorem ipsum dolor sit amet, consectetur adipiscing elit.
* Morbi non lorem porttitor neque feugiat blandit. Ut vitae ipsum eget quam lacinia accumsan.
* Etiam sed turpis ac ipsum condimentum fringilla. Maecenas magna.
* Proin dapibus sapien vel ante. Aliquam erat volutpat. Pellentesque sagittis ligula eget metus.
* Vestibulum commodo. Ut rhoncus gravida arcu.
*/
package com.hackerforfuture.codeprototypes.dataloader.metadata
/**
* Created by wallace on 2018/10/8.
*/
object EventType extends Enumeration {
type EventType = Value
val ControlEvent, ScheduleEvent, HeartBeatEvent = Value
}
| BiyuHuang/CodePrototypesDemo | demo/DataLoader/src/main/scala/com/hackerforfuture/codeprototypes/dataloader/metadata/EventType.scala | Scala | apache-2.0 | 648 |
package org.vegas
import scala.collection.mutable.Map
case class ProgramOptions(val name: String, val version: String, val args: Array[String]) {
val shortFlag = """-(\\w)"""
val longFlag = """--([\\w-]+)"""
val parsedFlags = parseFlags(args.toList)
val flags = parsedFlags.toMap map { case (key, value) =>
key.getOrElse("") -> value
} filterKeys (_ != "")
val arguments = parsedFlags filter { case (flag, option) =>
flag.isEmpty
} map { case (key, value) =>
value getOrElse ""
}
private def isFlag(arg: String) = (arg matches """^-(\\w)$""") || (arg matches """^--([\\w-]+)$""")
private def parseFlags(program: List[String]): List[Tuple2[Option[String], Option[String]]] = program match {
case Nil => Nil
case "--" :: tail => tail map ((None -> Some(_)))
case flag :: Nil if isFlag(flag) => (Some(flag) -> None) :: Nil
case argument :: Nil => (None -> Some(argument)) :: Nil
case flag :: option :: tail if isFlag(flag) && !isFlag(option) => (Some(flag) -> Some(option)) :: parseFlags(tail)
case flag :: tail if isFlag(flag) => (Some(flag) -> None) :: parseFlags(tail)
case argument :: tail => (None -> Some(argument)) :: parseFlags(tail)
}
val descriptions: Map[String, String] = Map()
protected def chain(f: => Unit) = {
f
this
}
def description(flag: String, desc: String) = chain {
descriptions += (flag -> desc)
}
def apply(flag: String) = flags get flag
def hasFlag(flag: String) = flags contains flag
def printHelp { println(
s"$name $version\\n" +
"Usage:\\n" +
descriptions.map { case (flag, text) =>
//(if (reverseAliases contains flag) (" -" + reverseAliases.get(flag).get + "\\n") else "") +
" --" + flag + "\\n" +
" " + text + "\\n\\n"
}.mkString
)}
def printVersion {
println(s"$name $version")
}
}
| rrdelaney/vegas | src/main/scala/org/vegas/ProgramOptions.scala | Scala | mit | 1,999 |
package edu.rice.habanero.benchmarks.threadring
import edu.rice.habanero.actors.{JumiActor, JumiActorState, JumiPool}
import edu.rice.habanero.benchmarks.threadring.ThreadRingConfig.{DataMessage, ExitMessage, PingMessage}
import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner}
/**
*
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> (shams@rice.edu)
*/
object ThreadRingJumiActorBenchmark {
def main(args: Array[String]) {
BenchmarkRunner.runBenchmark(args, new ThreadRingJumiActorBenchmark)
}
private final class ThreadRingJumiActorBenchmark extends Benchmark {
def initialize(args: Array[String]) {
ThreadRingConfig.parseArgs(args)
}
def printArgInfo() {
ThreadRingConfig.printArgs()
}
def runIteration() {
val numActorsInRing = ThreadRingConfig.N
val ringActors = Array.tabulate[JumiActor[AnyRef]](numActorsInRing)(i => {
val loopActor = new ThreadRingActor(i, numActorsInRing)
loopActor.start()
loopActor
})
for ((loopActor, i) <- ringActors.view.zipWithIndex) {
val nextActor = ringActors((i + 1) % numActorsInRing)
loopActor.send(new DataMessage(nextActor))
}
ringActors(0).send(new PingMessage(ThreadRingConfig.R))
JumiActorState.awaitTermination()
}
def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double): Unit = {
if (lastIteration) {
JumiPool.shutdown()
}
}
}
private class ThreadRingActor(id: Int, numActorsInRing: Int) extends JumiActor[AnyRef] {
private var nextActor: JumiActor[AnyRef] = null
override def process(msg: AnyRef) {
msg match {
case pm: PingMessage =>
if (pm.hasNext) {
nextActor.send(pm.next())
} else {
nextActor.send(new ExitMessage(numActorsInRing))
}
case em: ExitMessage =>
if (em.hasNext) {
nextActor.send(em.next())
}
exit()
case dm: DataMessage =>
nextActor = dm.data.asInstanceOf[JumiActor[AnyRef]]
}
}
}
}
| shamsmahmood/savina | src/main/scala/edu/rice/habanero/benchmarks/threadring/ThreadRingJumiActorBenchmark.scala | Scala | gpl-2.0 | 2,121 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600a.v3
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtOptionalBigDecimal}
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
import uk.gov.hmrc.ct.ct600.v3.calculations.LoansToParticipatorsCalculator
import uk.gov.hmrc.ct.ct600a.v3.retriever.CT600ABoxRetriever
case class A20(value: Option[BigDecimal]) extends CtBoxIdentifier(name = "A20 - Tax chargeable on loans - (Tax due before any relief for loans repaid, released, or written off after the end of the period)")
with CtOptionalBigDecimal
object A20 extends LoansToParticipatorsCalculator {
def calculate(fieldValueRetriever: CT600ABoxRetriever, computationsBoxRetriever: ComputationsBoxRetriever): A20 = {
calculateA20(fieldValueRetriever.a15(), fieldValueRetriever.loansToParticipators(), computationsBoxRetriever.cp2())
}
}
| liquidarmour/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600a/v3/A20.scala | Scala | apache-2.0 | 1,439 |
/**
* Copyright (c) 2007-2011 Eric Torreborre <etorreborre@yahoo.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial portions of
* the Software. Neither the name of specs nor the names of its contributors may be used to endorse or promote
* products derived from this software without specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
* TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package org.specs.matcher
import org.specs.specification._
import org.specs._
object matcherSpecifications extends Specification {
"Matchers" areSpecifiedBy (
new beMatcherSpec,
new eventuallyMatchersSpec,
new haveMatcherSpec,
new iterableMatchersSpec,
new fileMatchersSpec,
new mapMatchersSpec,
new objectMatchersSpec,
new eitherMatchersSpec,
new patternMatchersSpec,
new scalacheckMatchersSpec,
new stringMatchersSpec,
new varianceSpec
)
}
| Muki-SkyWalker/specs | src/test/scala/org/specs/matcher/matcherSpecifications.scala | Scala | mit | 1,831 |
package dpla.ingestion3.mappers.providers
import dpla.ingestion3.mappers.utils.Document
import dpla.ingestion3.messages.{IngestMessage, MessageCollector}
import dpla.ingestion3.model._
import dpla.ingestion3.utils.FlatFileIO
import org.json4s.JsonAST._
import org.json4s.jackson.JsonMethods._
import org.scalatest.{BeforeAndAfter, FlatSpec}
class LcMappingTest extends FlatSpec with BeforeAndAfter {
implicit val msgCollector: MessageCollector[IngestMessage] = new MessageCollector[IngestMessage]
val shortName = "loc"
val jsonString: String = new FlatFileIO().readFileAsString("/lc.json")
val json: Document[JValue] = Document(parse(jsonString))
val extractor = new LcMapping
it should "extract the correct rights" in {
val expected = Seq("For rights relating to this resource, visit https://www.loc.gov/item/73691632/")
assert(extractor.rights(json) === expected)
}
it should "extract the correct dataProvider" in {
val expected = Seq(nameOnlyAgent("Library of Congress"))
assert(extractor.dataProvider(json) === expected)
}
it should "extract the correct original id" in {
val expected = Some("http://www.loc.gov/item/73691632/")
assert(extractor.originalId(json) == expected)
}
it should "extract the correct URL for isShownAt" in {
val expected = Seq(stringOnlyWebResource("https://www.loc.gov/item/73691632/"))
assert(extractor.isShownAt(json) === expected)
}
it should "extract the correct url for preview" in {
val expected = Seq(uriOnlyWebResource(URI("http:images.com")))
assert(extractor.preview(json) === expected)
}
// TODO test extraction of other-titles and alternate_title
it should "extract the correct alternate title" in {
val json = Document(parse(
"""{"item": {"other-title": ["alt title"], "other-titles": ["alt title 2"],"alternate_title": ["alt title 3"]}} """))
assert(extractor.alternateTitle(json) == Seq("alt title"))
}
// TODO test correct extraction from `dates` and if both present s
it should "extract the correct date" in {
val expected = Seq("1769").map(stringOnlyTimeSpan)
assert(extractor.date(json) == expected)
}
it should "extract the correct description" in {
val expected = Seq("Scale ca. 1:740,000. Title from verso. Manuscript, pen-and-ink. On verso: No 33. LC Maps of North America, 1750-1789, 1244 Available also through the Library of Congress Web site as a raster image. Vault AACR2", "[1769?]")
assert(extractor.description(json) == expected)
}
it should "extract the correct extent" in {
val expected = Seq("map, on sheet 39 x 29 cm.")
assert(extractor.extent(json) == expected)
}
// TODO test extraction from [item \\ format \\ type]
it should "extract the correct format" in {
val expected = Seq("map")
assert(extractor.format(json) == expected)
}
it should "extract the correct identifiers" in {
val expected = Seq("http://www.loc.gov/item/73691632/")
assert(extractor.identifier(json) == expected)
}
it should "extract the correct language" in {
val expected = Seq("english").map(nameOnlyConcept)
assert(extractor.language(json) == expected)
}
it should "extract the correct location" in {
val expected = Seq("New jersey", "New york", "New york (state)","United states")
.map(nameOnlyPlace)
assert(extractor.place(json) == expected)
}
it should "extract the correct title" in {
val expected = Seq("Lines run in the Jersies for determining boundaries between that Province & New York.")
assert(extractor.title(json) == expected)
}
// TODO Add test for extracting format keys
it should "extract the correct type" in {
val expected = Seq("map", "map")
assert(extractor.`type`(json) == expected)
}
it should "create the correct DPLA URI" in {
val expected = Some(URI("http://dp.la/api/items/af47f0702702b4697cf28868eb7dcea6"))
assert(extractor.dplaUri(json) === expected)
}
}
| dpla/ingestion3 | src/test/scala/dpla/ingestion3/mappers/providers/LcMappingTest.scala | Scala | mit | 3,965 |
package notebook.front.widgets.magic
import org.apache.spark.sql.{DataFrame, Row}
import notebook.front.widgets.magic.Implicits._
import notebook.front.widgets.isNumber
trait ExtraSamplerImplicits {
import SamplerImplicits.Sampler
implicit object DFSampler extends Sampler[DataFrame] {
import org.apache.spark.sql.types.{StructType, StructField,LongType}
import org.apache.spark.sql.functions._
//until zipWithIndex will be available on DF
def dfZipWithIndex(df: DataFrame):DataFrame = {
df.sqlContext.createDataFrame(
df.rdd.zipWithIndex.map(ln =>
Row.fromSeq(
ln._1.toSeq ++ Seq(ln._2)
)
),
StructType(
df.schema.fields ++
Array(StructField("_sn_index_",LongType,false))
)
)
}
def apply(df:DataFrame, max:Int):DataFrame = {
import df.sqlContext.implicits._
val columns = df.columns
dfZipWithIndex(df).filter($"_sn_index_" < max)
.select(columns.head, columns.tail: _*)
}
}
}
trait ExtraMagicImplicits {
case class DFPoint(row:Row, df:DataFrame) extends MagicRenderPoint {
val headers = df.columns.toSeq
val values = row.toSeq
}
implicit object DFToPoints extends ToPoints[DataFrame] {
import SamplerImplicits.Sampler
def apply(df:DataFrame, max:Int)(implicit sampler:Sampler[DataFrame]):Seq[MagicRenderPoint] = {
if (df.take(1).nonEmpty) {
val rows = sampler(df, max).collect
val points = df.schema.toList.map(_.dataType) match {
case List(x) if x.isInstanceOf[org.apache.spark.sql.types.StringType] => rows.map(i => StringPoint(i.asInstanceOf[String]))
case _ => rows.map(i => DFPoint(i, df))
}
val encoded = points.zipWithIndex.map { case (point, index) => point.values match {
case List(o) if isNumber(o) => AnyPoint((index, o))
case _ => point
}}
encoded
} else Nil
}
def count(x:DataFrame) = x.count()
def append(x:DataFrame, y:DataFrame) = x unionAll y
}
} | cbvoxel/spark-notebook | modules/common/src/main/post-df/magic.scala | Scala | apache-2.0 | 2,174 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher
import scala.util.matching.Regex
class SyntaxExceptionTest extends ExecutionEngineFunSuite {
test("shouldRaiseErrorWhenMissingIndexValue") {
test(
"start s = node:index(key=) return s",
"Invalid input ')': expected whitespace, \"...string...\" or a parameter (line 1, column 26)"
)
}
test("shouldGiveNiceErrorWhenMissingEqualsSign") {
test(
"start n=node:customer(id : {id}) return n",
"Invalid input ':': expected whitespace, comment or '=' (line 1, column 26)"
)
}
test("shouldRaiseErrorWhenMissingIndexKey") {
test(
"start s = node:index(=\"value\") return s",
"Invalid input '=': expected whitespace, an identifier, \"...string...\" or a parameter (line 1, column 22)"
)
}
test("startWithoutNodeOrRel") {
test(
"start s return s",
"Invalid input 'r': expected whitespace, comment or '=' (line 1, column 9)"
)
}
test("shouldRaiseErrorWhenMissingReturnColumns") {
test(
"match (s) where id(s) = 0 return",
"Unexpected end of input: expected whitespace, DISTINCT, '*' or an expression (line 1, column 33)"
)
}
test("shouldRaiseErrorWhenMissingReturn") {
test(
"match (s) where id(s) = 0",
"Query cannot conclude with MATCH (must be RETURN or an update clause) (line 1, column 1)"
)
}
test("shouldComplainAboutWholeNumbers") {
test(
"match (s) where id(s) = 0 return s limit -1",
"Invalid input '-1' is not a valid value, must be a positive integer (line 1, column 42 (offset: 41))"
)
}
test("matchWithoutIdentifierHasToHaveParenthesis") {
test(
"match (a) where id(a) = 0 match a--b, --> a return a",
"Invalid input '-': expected whitespace, comment or a pattern (line 1, column 39)"
)
}
test("matchWithoutIdentifierHasToHaveParenthesis2") {
test(
"match (a) where id(a) = 0 match (a) -->, a-->b return a",
"Invalid input ',': expected whitespace or a node pattern (line 1, column 40)"
)
}
test("shouldComplainAboutAStringBeingExpected") {
test(
"start s=node:index(key = value) return s",
"Invalid input 'v': expected whitespace, comment, \"...string...\" or a parameter (line 1, column 26)"
)
}
test("shortestPathCanNotHaveMinimumDepthDifferentFromZeroOrOne") {
test(
"match (a), (b) where id(a) = 0 and id(b) = 1 match p=shortestPath(a-[*2..3]->b) return p",
"shortestPath(...) does not support a minimal length different from 0 or 1 (line 1, column 54)"
)
}
test("shortestPathCanNotHaveMultipleLinksInIt") {
test(
"match (a), (b) where id(a) = 0 and id(b) = 1 match p=shortestPath(a-->()-->b) return p",
"shortestPath(...) requires a pattern containing a single relationship (line 1, column 54)"
)
}
test("oldNodeSyntaxGivesHelpfulError") {
test(
"start a=(0) return a",
"Invalid input '(': expected whitespace, NODE or RELATIONSHIP (line 1, column 9)"
)
}
test("weirdSpelling") {
test(
"start a=ndoe(0) return a",
"Invalid input 'd': expected 'o/O' (line 1, column 10)"
)
}
test("unclosedParenthesis") {
test(
"start a=node(0 return a",
"Invalid input 'r': expected whitespace, comment, ',' or ')' (line 1, column 16)"
)
}
test("trailingComa") {
test(
"start a=node(0,1,) return a",
"Invalid input ')': expected whitespace or an unsigned integer (line 1, column 18)"
)
}
test("unclosedCurly") {
test(
"start a=node({0) return a",
"Invalid input ')': expected whitespace or '}' (line 1, column 16)"
)
}
test("twoEqualSigns") {
test(
"start a==node(0) return a",
"Invalid input '=' (line 1, column 9)"
)
}
test("forgetByInOrderBy") {
test(
"match (a) where id(a) = 0 return a order a.name",
"Invalid input 'a': expected whitespace, comment or BY (line 1, column 42)"
)
}
test("unknownFunction") {
test(
"match (a) where id(a) = 0 return foo(a)",
"Unknown function 'foo' (line 1, column 34)"
)
}
test("usingRandomFunctionInAggregate") {
test(
"match (a) where id(a) = 0 return count(rand())",
"Can't use non-deterministic (random) functions inside of aggregate functions."
)
}
test("handlesMultiLineQueries") {
test(
"""start
a=node(0),
b=node(0),
c=node(0),
d=node(0),
e=node(0),
f=node(0),
g=node(0),
s=node:index(key = value) return s""",
"Invalid input 'v': expected whitespace, comment, \"...string...\" or a parameter (line 9, column 29)"
)
}
test("createNodeWithout") {
test(
"""start
a=node(0),
b=node(0),
c=node(0),
d=node(0),
e=node(0),
f=node(0),
g=node(0),
s=node:index(key = value) return s""",
"Invalid input 'v': expected whitespace, comment, \"...string...\" or a parameter (line 9, column 29)"
)
}
test("shouldRaiseErrorForInvalidHexLiteral") {
test(
"return 0x23G34",
"invalid literal number (line 1, column 8)"
)
test(
"return 0x23j",
"invalid literal number (line 1, column 8)"
)
}
def test(query: String, message: String) {
try {
execute(query)
fail(s"Did not get the expected syntax error, expected: $message")
} catch {
case x: CypherException => {
val actual = x.getMessage.lines.next.trim
actual should startWith(message.init)
}
}
}
def test(query: String, messageRegex: Regex) {
try {
execute(query)
fail(s"Did not get the expected syntax error, expected matching: '$messageRegex'")
} catch {
case x: CypherException =>
val actual = x.getMessage.lines.next().trim
messageRegex findFirstIn actual match {
case None => fail(s"Expected matching '$messageRegex', but was '$actual'")
case Some(_) => ()
}
}
}
}
| HuangLS/neo4j | community/cypher/cypher/src/test/scala/org/neo4j/cypher/SyntaxExceptionTest.scala | Scala | apache-2.0 | 6,857 |
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.converters.source
import java.nio.charset.Charset
import java.util
import java.util.Collections
import com.datamountaineer.streamreactor.connect.converters.MsgKey
import org.apache.kafka.connect.data._
import org.apache.kafka.connect.source.SourceRecord
class JsonSimpleConverter extends Converter {
override def convert(kafkaTopic: String,
sourceTopic: String,
messageId: String,
bytes: Array[Byte],
keys:Seq[String] = Seq.empty,
keyDelimiter:String = ".",
properties: Map[String, String] = Map.empty): SourceRecord = {
require(bytes != null, s"Invalid $bytes parameter")
val json = new String(bytes, Charset.defaultCharset)
val schemaAndValue = JsonSimpleConverter.convert(sourceTopic, json)
val value = schemaAndValue.value()
value match {
case s:Struct if keys.nonEmpty =>
val keysValue = keys.flatMap { key =>
Option(KeyExtractor.extract(s, key.split('.').toVector)).map(_.toString)
}.mkString(keyDelimiter)
new SourceRecord(Collections.singletonMap(Converter.TopicKey, sourceTopic),
null,
kafkaTopic,
Schema.STRING_SCHEMA,
keysValue,
schemaAndValue.schema(),
schemaAndValue.value())
case _=>
new SourceRecord(Collections.singletonMap(Converter.TopicKey, sourceTopic),
null,
kafkaTopic,
MsgKey.schema,
MsgKey.getStruct(sourceTopic, messageId),
schemaAndValue.schema(),
schemaAndValue.value())
}
}
}
object JsonSimpleConverter {
import org.json4s._
import org.json4s.native.JsonMethods._
def convert(name: String, str: String): SchemaAndValue = convert(name, parse(str))
def convert(name: String, value: JValue): SchemaAndValue = {
value match {
case JArray(arr) =>
val values = new util.ArrayList[AnyRef]()
val sv = convert(name, arr.head)
values.add(sv.value())
arr.tail.foreach { v => values.add(convert(name, v).value()) }
val schema = SchemaBuilder.array(sv.schema()).optional().build()
new SchemaAndValue(schema, values)
case JBool(b) => new SchemaAndValue(Schema.BOOLEAN_SCHEMA, b)
case JDecimal(d) =>
val schema = Decimal.builder(d.scale).optional().build()
new SchemaAndValue(schema, Decimal.fromLogical(schema, d.bigDecimal))
case JDouble(d) => new SchemaAndValue(Schema.FLOAT64_SCHEMA, d)
case JInt(i) => new SchemaAndValue(Schema.INT64_SCHEMA, i.toLong) //on purpose! LONG (we might get later records with long entries)
case JLong(l) => new SchemaAndValue(Schema.INT64_SCHEMA, l)
case JNull | JNothing => new SchemaAndValue(Schema.STRING_SCHEMA, null)
case JString(s) => new SchemaAndValue(Schema.STRING_SCHEMA, s)
case JObject(values) =>
val builder = SchemaBuilder.struct().name(name.replace("/", "_"))
val fields = values.map { case (n, v) =>
val schemaAndValue = convert(n, v)
builder.field(n, schemaAndValue.schema())
n -> schemaAndValue.value()
}.toMap
val schema = builder.build()
val struct = new Struct(schema)
fields.foreach { case (field, v) => struct.put(field, v) }
new SchemaAndValue(schema, struct)
}
}
}
| datamountaineer/kafka-connect-common | src/main/scala/com/datamountaineer/streamreactor/connect/converters/source/JsonSimpleConverter.scala | Scala | apache-2.0 | 4,069 |
package scalacss
import utest._
import TestUtil._
object ColorTest extends TestSuite {
import Macros.Color
import Dsl._
override def tests = TestSuite {
'valid {
def test(c: Color, expect: String) =
assertEq(c.value, expect)
'hex3 - test(c"#f09" , "#f09")
'hex6 - test(c"#abc105", "#abc105")
'rgbI - test(c"rgb(0,128,255)", "rgb(0,128,255)")
'rgbP - test(c"rgb(0%,50%,100%)", "rgb(0%,50%,100%)")
'rgba - test(c"rgba(255,128,0,0)", "rgba(255,128,0,0)")
'hsl - test(c"hsl(359,0%,100%)", "hsl(359,0%,100%)")
'hsla - test(c"hsla(0,100%,0%,1)", "hsla(0,100%,0%,1)")
'hexU - test(c"#ABC", "#abc")
'rgbU - test(c"RGB(0,128,255)", "rgb(0,128,255)")
'whitespace - test(c" rgba ( 255 , 128 , 0 , 0 ) ", "rgba(255,128,0,0)")
'alphaDec0 - test(c"hsla(0,100%,0%,0.918)", "hsla(0,100%,0%,0.918)")
'alphaDecZ - test(c"hsla(0,100%,0%,.543)", "hsla(0,100%,0%,.543)")
}
'invalid {
def assertFailure(e: CompileError) = ()
def assertErrorContains(e: CompileError, frag: String): Unit = {
val err = e.msg
assert(err contains frag)
}
'hex {
def test(e: CompileError): Unit = assertErrorContains(e, "Hex notation must be either")
"0" - test(compileError(""" c"#" """))
"1" - test(compileError(""" c"#f" """))
"2" - test(compileError(""" c"#00" """))
"4" - test(compileError(""" c"#1234" """))
"5" - test(compileError(""" c"#12345" """))
"7" - test(compileError(""" c"#1234567" """))
"g" - test(compileError(""" c"#00g" """))
"G" - test(compileError(""" c"#G00" """))
}
"empty" - assertFailure(compileError( """ c"" """))
"blank" - assertFailure(compileError( """ c" " """))
"badFn" - assertFailure(compileError( """ c"rbg(0,0,0)" """))
"two" - assertFailure(compileError( """ c"#fed #fed" """))
'numbers {
"r-1" - assertErrorContains(compileError(""" c"rgb(-1,0,0)" """), "Invalid red value")
"r256" - assertErrorContains(compileError(""" c"rgb(256,0,0)" """), "Invalid red value")
"g256" - assertErrorContains(compileError(""" c"rgb(0,256,0)" """), "Invalid green value")
"b256" - assertErrorContains(compileError(""" c"rgb(0,0,256)" """), "Invalid blue value")
"a2" - assertErrorContains(compileError(""" c"rgba(0,0,0,2)" """), "Invalid alpha value")
"a1.1" - assertErrorContains(compileError(""" c"rgba(0,0,0,1.1)" """), "Invalid alpha value")
"r101%" - assertErrorContains(compileError(""" c"rgb(101%,0%,0%)" """), "Invalid red value")
"g101%" - assertErrorContains(compileError(""" c"rgb(0%,101%,0%)" """), "Invalid green value")
"b101%" - assertErrorContains(compileError(""" c"rgb(0%,0%,101%)" """), "Invalid blue value")
"dbl" - assertFailure (compileError(""" c"rgb(2.5,0,0)" """))
"str" - assertFailure (compileError(""" c"rgb(x,0,0)" """))
"empty" - assertFailure (compileError(""" c"rgb(0,,0)" """))
"mixed" - assertFailure (compileError(""" c"rbg(0,0%,0)" """))
}
}
}
}
| LMnet/scalacss | core/src/test/scala/scalacss/ColorTest.scala | Scala | apache-2.0 | 3,208 |
/*
* Copyright 2013 Akiyoshi Sugiki, University of Tsukuba
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kumoi.shell.swap
import kumoi.shell.aaa._
import kumoi.shell.cache._
import java.rmi._
/**
* @author Akiyoshi Sugiki
*/
@remote trait HotSwapObjectSupport extends Remote {
@invalidate @nocache def hotswap(implicit auth: AAA)
} | axi-sugiki/kumoi | src/kumoi/shell/swap/HotSwapObjectSupport.scala | Scala | apache-2.0 | 863 |
import stainless.lang._
import stainless.annotation._
import stainless.collection._
object LoopInv {
sealed abstract class Input
case object Plus extends Input
case object Minus extends Input
val empty = Nil[Input]()
val S = Set(0, 1, 2, 3, 4, 5, 6)
val G1 = Set(0, 1, 2, 4, 5, 6)
val G2 = Set(4, 5, 6)
val G3 = Set(0, 2, 4, 6)
def check(inputs: List[Input]) = {
var remains = inputs
var left = true
var state = 0
(while (left) {
remains match {
case Nil() =>
left = false
case Cons(input, _) =>
input match {
case Plus if state <= 4 => state += 2
case Minus if state >= 2 => state -= 2
case _ => ()
}
remains = remains.tail
}
}) invariant (G3.contains(state))
}
}
| epfl-lara/stainless | frontends/benchmarks/imperative/valid/LoopInv.scala | Scala | apache-2.0 | 844 |
package org.faker
import org.scalatest.{FlatSpec, Matchers}
class NameSpec extends FlatSpec with Matchers with FakerBehaviors {
"name" should behave like validResult(Name.name)
"firstName" should behave like validResult(Name.firstName)
"lastName" should behave like validResult(Name.lastName)
"prefix" should behave like validResult(Name.prefix)
"suffix" should behave like validResult(Name.suffix)
"title" should behave like validResult(Name.jobTitle)
}
| ralli/faker_scala | src/test/scala/org/faker/NameSpec.scala | Scala | bsd-3-clause | 475 |
package net.kemuridama.kafcon.model
case class Topic(
name: String,
clusterId: Int,
brokers: List[Int],
replicationFactor: Int,
messageCount: Long,
partitions: List[Partition]
)
| kemuridama/kafcon | src/main/scala/net/kemuridama/kafcon/model/Topic.scala | Scala | mit | 191 |
/*
* Copyright (C) 2017 HAT Data Exchange Ltd
* SPDX-License-Identifier: AGPL-3.0
*
* This file is part of the Hub of All Things project (HAT).
*
* HAT is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License
* as published by the Free Software Foundation, version 3 of
* the License.
*
* HAT is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General
* Public License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*
* Written by Andrius Aucinas <andrius.aucinas@hatdex.org>
* 11 / 2017
*/
package org.hatdex.hat.api.controllers
import com.mohiva.play.silhouette.test._
import org.hatdex.hat.api.HATTestContext
import org.hatdex.hat.api.json.HatJsonFormats
import org.hatdex.hat.api.models.{ HatStatus, StatusKind }
import org.specs2.concurrent.ExecutionEnv
import org.specs2.mock.Mockito
import org.specs2.specification.BeforeAll
import play.api.Logger
import play.api.test.{ FakeRequest, PlaySpecification }
import scala.concurrent.Await
import scala.concurrent.duration._
class SystemStatusSpec(implicit ee: ExecutionEnv) extends PlaySpecification with Mockito with HATTestContext with BeforeAll with HatJsonFormats {
val logger = Logger(this.getClass)
sequential
def beforeAll: Unit = {
Await.result(databaseReady, 60.seconds)
}
"The `update` method" should {
"Return success response after updating HAT database" in {
val request = FakeRequest("GET", "http://hat.hubofallthings.net")
val controller = application.injector.instanceOf[SystemStatus]
val result = controller.update().apply(request)
status(result) must equalTo(OK)
(contentAsJson(result) \\ "message").as[String] must be equalTo "Database updated"
}
}
"The `status` method" should {
"Return current utilisation" in {
val request = FakeRequest("GET", "http://hat.hubofallthings.net")
.withAuthenticator(owner.loginInfo)
val controller = application.injector.instanceOf[SystemStatus]
val result = controller.status().apply(request)
status(result) must equalTo(OK)
val stats = contentAsJson(result).as[List[HatStatus]]
stats.length must be greaterThan 0
stats.find(_.title == "Previous Login").get.kind must be equalTo StatusKind.Text("Never", None)
stats.find(_.title == "Owner Email").get.kind must be equalTo StatusKind.Text("user@hat.org", None)
stats.find(_.title == "Database Storage").get.kind must haveClass[StatusKind.Numeric]
stats.find(_.title == "File Storage").get.kind must haveClass[StatusKind.Numeric]
stats.find(_.title == "Database Storage Used").get.kind must haveClass[StatusKind.Numeric]
stats.find(_.title == "File Storage Used").get.kind must haveClass[StatusKind.Numeric]
stats.find(_.title == "Database Storage Used Share").get.kind must haveClass[StatusKind.Numeric]
stats.find(_.title == "File Storage Used Share").get.kind must haveClass[StatusKind.Numeric]
}
"Return last login information when present" in {
val authRequest = FakeRequest("GET", "http://hat.hubofallthings.net")
.withHeaders("username" -> "hatuser", "password" -> "pa55w0rd")
val authController = application.injector.instanceOf[Authentication]
val request = FakeRequest("GET", "http://hat.hubofallthings.net")
.withAuthenticator(owner.loginInfo)
val controller = application.injector.instanceOf[SystemStatus]
val result = for {
_ <- authController.accessToken().apply(authRequest)
// login twice - the second login is considered "current", not previous
_ <- authController.accessToken().apply(authRequest)
r <- controller.status().apply(request)
} yield r
status(result) must equalTo(OK)
val stats = contentAsJson(result).as[List[HatStatus]]
stats.length must be greaterThan 0
stats.find(_.title == "Previous Login").get.kind must be equalTo StatusKind.Text("moments ago", None)
}
}
}
| Hub-of-all-Things/HAT2.0 | hat/test/org/hatdex/hat/api/controllers/SystemStatusSpec.scala | Scala | agpl-3.0 | 4,284 |
package nl.rabobank.oss.rules.dsl.core.projections
import nl.rabobank.oss.rules.dsl.nl.grammar.{DslCondition, DslEvaluation}
import nl.rabobank.oss.rules.engine.{ProjectionEvaluation, ProjectionListEvaluation, SingularFactEvaluation, _}
import nl.rabobank.oss.rules.facts.{Fact, ListFact}
/**
* Domain objects in the DSL can allow access to their fields through projections.
* This trait signals the support of field projections and provides a convenience
* method to create these projections.
*
* Here's an example:
*
* {{{
* case class Person(val name: String)
*
* class PersonFieldProjections(personFact: SingularFact[Person]) extends ProjectableFields[Person] {
* def outerFact: Fact[Person] = personFact
*
* def name: DslEvaluation[String] = projectField( _.name )
* }
*
* object PersonImplicits {
* implicit def toPersonFieldProjections(personFact: SingularFact[Person]): PersonFieldProjections = new PersonFieldProjections(personFact)
* }
* }}}
*
* With these elements in place, you can import the PersonImplicits._ where you want to use it in your
* DSL and you can refer to the `name` field of any `Fact` of type `Person`.
*
* @tparam C type from which the field(s) can be projected.
* @author Jan-Hendrik Kuperus (jan-hendrik@scala-rules.org)
*/
trait ProjectableFields[C] {
/**
* Any implementing class should provide the fact from which to project the fields through this method.
*
* @return the Fact of type C from which fields will be projected.
*/
protected def outerFact: Fact[C]
/**
* Provides a utility method to construct the DslEvaluation which entails the field projection.
*
* @param f the function which projects the Fact's value to the corresponding field of type F.
* @tparam F type of the projected field, which will also be the type of the resulting DslEvaluation.
* @return a DslEvaluation of the same type as the projected field. This evaluation will at runtime
* provide the value of the projected field of the accompanying Fact.
*/
protected def projectField[F](f: C => F): DslEvaluation[F] = DslEvaluation(
DslCondition.factFilledCondition(outerFact),
new ProjectionEvaluation[C, F](new SingularFactEvaluation[C](outerFact), f)
)
}
/**
* Domain objects in the DSL can allow access to their fields through projections.
* This trait signals the support of field projections and provides a convenience
* method to create these projections. This trait is meant for Lists of objects to project
* a List of traits back.
*
* Here's an example:
*
* {{{
* case class Person(val name: String)
*
* class PersonFieldListProjections(personFact: ListFact[Person]) extends ProjectableListFields[Person] {
* def outerFact: Fact[Person] = personFact
*
* def name: DslEvaluation[String] = projectField( _.name )
* }
*
* object PersonImplicits {
* implicit def toPersonFieldListProjections(personFact: ListFact[Person]): PersonListFieldProjections = new PersonListFieldProjections(personFact)
* }
* }}}
*
* With these elements in place, you can import the PersonImplicits._ where you want to use it in your
* DSL and you can refer to the `name` field of any `Fact` of type `Person`.
*
* @tparam C type from which the field(s) can be projected.
* @author Vincent Zorge (vincent@scala-rules.org)
*/
trait ProjectableListFields[C] {
/**
* Any implementing class should provide the fact from which to project the fields through this method.
*
* @return the ListFact of type C from which fields will be projected.
*/
protected def outerFact: ListFact[C]
/**
* Provides a utility method to construct the DslEvaluation which entails the field projection.
*
* @param f the function which projects the Fact's value to the corresponding field of type F.
* @tparam F type of the projected field, which will also be the type of the resulting DslEvaluation.
* @return a DslEvaluation of the same type as the projected field. This evaluation will at runtime
* provide the value of the projected field of the accompanying Fact.
*/
protected def projectField[F](f: C => F): ProjectedDslEvaluation[C, F] = new ProjectedDslEvaluation(f,
DslCondition.factFilledCondition(outerFact),
new ProjectionListEvaluation[C, F](outerFact.toEval, f)
)
}
class ProjectedDslEvaluation[C, F](val transform: C => F, condition: DslCondition, evaluation: Evaluation[List[F]]) extends DslEvaluation[List[F]](condition, evaluation)
| scala-rules/scala-rules | engine/src/main/scala/nl/rabobank/oss/rules/dsl/core/projections/ProjectableFields.scala | Scala | mit | 4,606 |
/* scala-stm - (c) 2009-2010, Stanford University, PPL */
package scala.concurrent.stm
package examples
/** See http://en.wikipedia.org/wiki/Dining_philosophers_problem
* The STM solution is particularly straightforward because we can
* simultaneously pick up two forks.
*/
object DiningPhilosophers {
class Fork {
val inUse = Ref(false)
}
class PhilosopherThread(meals: Int, left: Fork, right: Fork) extends Thread {
override def run(): Unit = {
for (_ <- 0 until meals) {
// THINK
pickUpBothForks()
// EAT
putDown(left)
putDown(right)
}
}
def pickUpBothForks(): Unit = {
atomic { implicit txn =>
if (left.inUse() || right.inUse())
retry
left.inUse() = true
right.inUse() = true
}
}
def putDown(f: Fork): Unit = {
f.inUse.single() = false
}
}
def time(tableSize: Int, meals: Int): Long = {
val forks = Array.tabulate(tableSize) { _ => new Fork }
val threads = Array.tabulate(tableSize) { i => new PhilosopherThread(meals, forks(i), forks((i + 1) % tableSize)) }
val start = System.currentTimeMillis
for (t <- threads) t.start()
for (t <- threads) t.join()
System.currentTimeMillis - start
}
def main(args: Array[String]): Unit = {
val meals = 100000
for (_ <- 0 until 3) {
val elapsed = time(5, meals)
printf("%3.1f usec/meal\\n", (elapsed * 1000.0) / meals)
}
}
}
| nbronson/scala-stm | src/test/scala/scala/concurrent/stm/examples/DiningPhilosophers.scala | Scala | bsd-3-clause | 1,474 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.dataset
import org.apache.calcite.plan._
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.{JoinInfo, JoinRelType}
import org.apache.calcite.rel.metadata.RelMetadataQuery
import org.apache.calcite.rel.{BiRel, RelNode, RelWriter}
import org.apache.calcite.rex.RexNode
import org.apache.calcite.util.mapping.IntPair
import org.apache.flink.api.common.functions.FlatJoinFunction
import org.apache.flink.api.common.operators.base.JoinOperatorBase.JoinHint
import org.apache.flink.api.java.DataSet
import org.apache.flink.table.api.{BatchTableEnvironment, TableException}
import org.apache.flink.table.calcite.FlinkTypeFactory
import org.apache.flink.table.codegen.FunctionCodeGenerator
import org.apache.flink.table.plan.nodes.CommonJoin
import org.apache.flink.table.runtime.FlatJoinRunner
import org.apache.flink.types.Row
import scala.collection.JavaConversions._
import scala.collection.mutable.ArrayBuffer
/**
* Flink RelNode which matches along with JoinOperator and its related operations.
*/
class DataSetJoin(
cluster: RelOptCluster,
traitSet: RelTraitSet,
leftNode: RelNode,
rightNode: RelNode,
rowRelDataType: RelDataType,
joinCondition: RexNode,
joinRowType: RelDataType,
joinInfo: JoinInfo,
keyPairs: List[IntPair],
joinType: JoinRelType,
joinHint: JoinHint,
ruleDescription: String)
extends BiRel(cluster, traitSet, leftNode, rightNode)
with CommonJoin
with DataSetRel {
override def deriveRowType(): RelDataType = rowRelDataType
override def copy(traitSet: RelTraitSet, inputs: java.util.List[RelNode]): RelNode = {
new DataSetJoin(
cluster,
traitSet,
inputs.get(0),
inputs.get(1),
getRowType,
joinCondition,
joinRowType,
joinInfo,
keyPairs,
joinType,
joinHint,
ruleDescription)
}
override def toString: String = {
joinToString(
joinRowType,
joinCondition,
joinType,
getExpressionString)
}
override def explainTerms(pw: RelWriter): RelWriter = {
joinExplainTerms(
super.explainTerms(pw),
joinRowType,
joinCondition,
joinType,
getExpressionString)
}
override def computeSelfCost (planner: RelOptPlanner, metadata: RelMetadataQuery): RelOptCost = {
val leftRowCnt = metadata.getRowCount(getLeft)
val leftRowSize = estimateRowSize(getLeft.getRowType)
val rightRowCnt = metadata.getRowCount(getRight)
val rightRowSize = estimateRowSize(getRight.getRowType)
val ioCost = (leftRowCnt * leftRowSize) + (rightRowCnt * rightRowSize)
val cpuCost = leftRowCnt + rightRowCnt
val rowCnt = leftRowCnt + rightRowCnt
planner.getCostFactory.makeCost(rowCnt, cpuCost, ioCost)
}
override def translateToPlan(tableEnv: BatchTableEnvironment): DataSet[Row] = {
val config = tableEnv.getConfig
val returnType = FlinkTypeFactory.toInternalRowTypeInfo(getRowType)
// get the equality keys
val leftKeys = ArrayBuffer.empty[Int]
val rightKeys = ArrayBuffer.empty[Int]
if (keyPairs.isEmpty) {
// if no equality keys => not supported
throw TableException(
"Joins should have at least one equality condition.\\n" +
s"\\tLeft: ${left.toString},\\n" +
s"\\tRight: ${right.toString},\\n" +
s"\\tCondition: (${joinConditionToString(joinRowType,
joinCondition, getExpressionString)})"
)
}
else {
// at least one equality expression
val leftFields = left.getRowType.getFieldList
val rightFields = right.getRowType.getFieldList
keyPairs.foreach(pair => {
val leftKeyType = leftFields.get(pair.source).getType.getSqlTypeName
val rightKeyType = rightFields.get(pair.target).getType.getSqlTypeName
// check if keys are compatible
if (leftKeyType == rightKeyType) {
// add key pair
leftKeys.add(pair.source)
rightKeys.add(pair.target)
} else {
throw TableException(
"Equality join predicate on incompatible types.\\n" +
s"\\tLeft: ${left.toString},\\n" +
s"\\tRight: ${right.toString},\\n" +
s"\\tCondition: (${joinConditionToString(joinRowType,
joinCondition, getExpressionString)})"
)
}
})
}
val leftDataSet = left.asInstanceOf[DataSetRel].translateToPlan(tableEnv)
val rightDataSet = right.asInstanceOf[DataSetRel].translateToPlan(tableEnv)
val (joinOperator, nullCheck) = joinType match {
case JoinRelType.INNER => (leftDataSet.join(rightDataSet), false)
case JoinRelType.LEFT => (leftDataSet.leftOuterJoin(rightDataSet), true)
case JoinRelType.RIGHT => (leftDataSet.rightOuterJoin(rightDataSet), true)
case JoinRelType.FULL => (leftDataSet.fullOuterJoin(rightDataSet), true)
}
if (nullCheck && !config.getNullCheck) {
throw TableException("Null check in TableConfig must be enabled for outer joins.")
}
val generator = new FunctionCodeGenerator(
config,
nullCheck,
leftDataSet.getType,
Some(rightDataSet.getType))
val conversion = generator.generateConverterResultExpression(
returnType,
joinRowType.getFieldNames)
var body = ""
if (joinInfo.isEqui) {
// only equality condition
body = s"""
|${conversion.code}
|${generator.collectorTerm}.collect(${conversion.resultTerm});
|""".stripMargin
}
else {
val nonEquiPredicates = joinInfo.getRemaining(this.cluster.getRexBuilder)
val condition = generator.generateExpression(nonEquiPredicates)
body = s"""
|${condition.code}
|if (${condition.resultTerm}) {
| ${conversion.code}
| ${generator.collectorTerm}.collect(${conversion.resultTerm});
|}
|""".stripMargin
}
val genFunction = generator.generateFunction(
ruleDescription,
classOf[FlatJoinFunction[Row, Row, Row]],
body,
returnType)
val joinFun = new FlatJoinRunner[Row, Row, Row](
genFunction.name,
genFunction.code,
genFunction.returnType)
val joinOpName =
s"where: (${joinConditionToString(joinRowType, joinCondition, getExpressionString)}), " +
s"join: (${joinSelectionToString(joinRowType)})"
joinOperator
.where(leftKeys.toArray: _*)
.equalTo(rightKeys.toArray: _*)
.`with`(joinFun)
.name(joinOpName)
}
}
| PangZhi/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/dataset/DataSetJoin.scala | Scala | apache-2.0 | 7,380 |
package de.thm.move.util
trait Resettable {
def reset(): Unit
}
| THM-MoTE/MoVE | src/main/scala/de/thm/move/util/Resettable.scala | Scala | mpl-2.0 | 67 |
import java.io.File
import merger.{Scanner, GroupInfo, GrouperCombinators => GC, SimilarityComparators}
case class Config(val dirs: Set[File] = Set.empty)
object Merger {
val parser = new scopt.OptionParser[Config]("scopt") {
head("merger", "0.1")
help("help").text("This help message")
arg[File]("<dir>...").unbounded.text("Directories to merge").action { (d,c) =>
c.copy(dirs = c.dirs + d)
}
}
def main(args: Array[String]) {
parser.parse(args, Config()) map { conf =>
def grouper(info: GroupInfo): GroupInfo =
GC.lowercase(GC.alpha(GC.withoutExt(info)))
val scanner = new Scanner(grouper)
conf.dirs.foreach(scanner.scan(_))
val filtered = scanner.groups.filter(_._2.size > 1)
var groupCount = 0
for((key, group) <- filtered) {
groupCount += 1
println(s"$groupCount / ${filtered.size} -- `${key}`")
val gs = group.toList
for(i <- 0 to gs.size - 1) println(s" $i) ${humanReadable(gs(i).length)} -- ${gs(i)}")
print("File to keep (blank to skip): ")
try {
for(file <- group -- readLine.split(',').map(x => gs(x.trim.toInt))) {
println(s" Removing ${file}")
file.delete
}
} catch {
case e: NumberFormatException => {
println(" No action taken")
}
}
}
} getOrElse {
println("UH-OH, you fucked up... give me some directories")
}
}
//http://stackoverflow.com/questions/3758606/how-to-convert-byte-size-into-human-readable-format-in-java
private def humanReadable(bytes: Long): String = {
if(bytes < 1024) return s"$bytes B"
val exp = (Math.log(bytes) / Math.log(1024)).asInstanceOf[Int]
"%.1f %sB".format(bytes / Math.pow(1024, exp), "KMGTPE".charAt(exp -1))
}
}
| jfhall/merger | src/main/scala/cli.scala | Scala | mit | 1,838 |
package com.cddcore.carersblog.reasoningOnDates
import scala.language.implicitConversions
import scala.xml._
import org.cddcore.engine._
import org.cddcore.engine.tests.CddJunitRunner
import org.joda.time._
import org.joda.time.format._
import org.junit.runner.RunWith
import java.util.concurrent.atomic.AtomicReference
import spray.caching._
import scala.concurrent.ExecutionContext
import scala.concurrent.duration.Duration
import scala.concurrent.Await
import java.util.concurrent.Future
object World {
def apply(ninoToCis: NinoToCis): World = World(Xmls.asDate("2010-7-5"), ninoToCis)
}
case class World(dateProcessingDate: DateTime, ninoToCis: NinoToCis) extends LoggerDisplay {
def loggerDisplay(dp: LoggerDisplayProcessor): String =
"World(" + dateProcessingDate + ")"
val maxWait = scala.concurrent.duration.Duration(1, "seconds")
}
trait NinoToCis {
def apply(nino: String): Elem
}
class TestNinoToCis extends NinoToCis {
def apply(nino: String) =
try {
val full = s"Cis/${nino}.txt"
val url = getClass.getClassLoader.getResource(full)
if (url == null)
<NoCis/>
else {
val xmlString = scala.io.Source.fromURL(url).mkString
val xml = XML.loadString(xmlString)
xml
}
} catch {
case e: Exception => throw new RuntimeException("Cannot load " + nino, e)
}
}
case class KeyAndParams(key: String, params: Any*) {
override def toString = "<" + key + params.mkString("(", ",", ")") + ">"
}
object Xmls {
def validateClaim(id: String) = {
try {
val full = s"ValidateClaim/${id}.xml"
val url = getClass.getClassLoader.getResource(full)
val xmlString = scala.io.Source.fromURL(url).mkString
val xml = XML.loadString(xmlString)
xml
} catch {
case e: Exception => throw new RuntimeException("Cannot load " + id, e)
}
}
/** The boolean is 'hospitalisation' */
def validateClaimWithBreaks(breaks: (String, String, Boolean)*): CarersXmlSituation =
validateClaimWithBreaksFull(breaks.map((x) => (x._1, x._2, if (x._3) "Hospitalisation" else "other", if (x._3) "Hospital" else "other")): _*)
def validateClaimWithBreaksFull(breaks: (String, String, String, String)*): CarersXmlSituation = {
val url = getClass.getClassLoader.getResource("ValidateClaim/CL801119A.xml")
val xmlString = scala.io.Source.fromURL(url).mkString
val breaksInCareXml = <ClaimBreaks>
{
breaks.map((t) =>
<BreakInCare>
<BICFromDate>{ t._1 }</BICFromDate>
<BICToDate>{ t._2 }</BICToDate>
<BICReason>{ t._3 }</BICReason>
<BICType>{ t._4 }</BICType>
</BreakInCare>)
}
</ClaimBreaks>
val withBreaks = xmlString.replace("<ClaimBreaks />", breaksInCareXml.toString)
new CarersXmlSituation(World(new TestNinoToCis), XML.loadString(withBreaks))
}
val formatter = DateTimeFormat.forPattern("yyyy-MM-dd");
def asDate(s: String): DateTime = formatter.parseDateTime(s);
}
case class CarersXmlSituation(w: World, validateClaimXml: Elem) extends XmlSituation {
import Xml._
lazy val claimStartDate = xml(validateClaimXml) \ "ClaimData" \ "ClaimStartDate" \ date
lazy val timeLimitForClaimingThreeMonths = claimSubmittedDate().minusMonths(3)
lazy val claimEndDate = xml(validateClaimXml) \ "ClaimData" \ "ClaimEndDate" \ optionDate
lazy val claimSubmittedDate = xml(validateClaimXml) \ "StatementData" \ "StatementDate" \ date
lazy val dependantAwardStartDate = xml(dependantCisXml) \ "Award" \ "AssessmentDetails" \ "ClaimStartDate" \ optionDate
lazy val birthdate = xml(validateClaimXml) \ "ClaimantData" \ "ClaimantBirthDate" \ "PersonBirthDate" \ date
lazy val claim35Hours = xml(validateClaimXml) \ "ClaimData" \ "Claim35Hours" \ yesNo(default = false)
lazy val ClaimCurrentResidentUK = xml(validateClaimXml) \ "ClaimData" \ "ClaimCurrentResidentUK" \ yesNo(default = false)
lazy val ClaimEducationFullTime = xml(validateClaimXml) \ "ClaimData" \ "ClaimEducationFullTime" \ yesNo(default = false)
lazy val ClaimAlwaysUK = xml(validateClaimXml) \ "ClaimData" \ "ClaimAlwaysUK" \ yesNo(default = false)
def underSixteenOn(date: DateTime) = birthdate.get() match {
case Some(bd) => bd.plusYears(16).isAfter(date)
case _ => false
}
lazy val DependantNino = xml(validateClaimXml) \ "DependantData" \ "DependantNINO" \ string
lazy val dependantCisXml: Elem = DependantNino.get() match {
case Some(s) => w.ninoToCis(s);
case None => <NoDependantXml/>
}
lazy val dependantLevelOfQualifyingCare = xml(dependantCisXml) \\ "AwardComponent" \ string
lazy val dependantHasSufficientLevelOfQualifyingCare = dependantLevelOfQualifyingCare() == "DLA Middle Rate Care"
lazy val hasChildExpenses = xml(validateClaimXml) \ "ExpensesData" \ "ExpensesChild" \ yesNo(default = false)
lazy val childExpensesAcount = xml(validateClaimXml) \ "ExpensesData" \ "ExpensesChildAmount" \ double
lazy val hasPsnPension = xml(validateClaimXml) \ "ExpensesData" \ "ExpensesPsnPension" \ yesNo(default = false)
lazy val psnPensionAcount = xml(validateClaimXml) \ "ExpensesData" \ "ExpensesPsnPensionAmount" \ double
lazy val hasOccPension = xml(validateClaimXml) \ "ExpensesData" \ "ExpensesOccPension" \ yesNo(default = false)
lazy val occPensionAcount = xml(validateClaimXml) \ "ExpensesData" \ "ExpensesOccPensionAmount" \ double
lazy val hasEmploymentData = xml(validateClaimXml) \ "newEmploymentData" \ boolean
lazy val employmentGrossSalary = xml(validateClaimXml) \ "EmploymentData" \ "EmploymentGrossSalary" \ double
lazy val employmentPayPeriodicity = xml(validateClaimXml) \ "EmploymentData" \ "EmploymentPayPeriodicity" \ string
lazy val breaksInCare = xml(validateClaimXml) \ "ClaimData" \ "ClaimBreaks" \ "BreakInCare" \
obj((ns) => ns.map((n) => {
val from = Xmls.asDate((n \ "BICFromDate").text)
val to = Xmls.asDate((n \ "BICToDate").text)
val reason = (n \ "BICType").text
new DateRange(from, to, reason)
}))
lazy val nettIncome = Income.income(this) - Expenses.expenses(this)
lazy val incomeTooHigh = nettIncome >= 110
lazy val incomeOK = !incomeTooHigh
private val guardConditionCache = new AtomicReference[Map[DateTime, Future[List[KeyAndParams]]]](Map())
def guardConditions(dateTime: DateTime): List[KeyAndParams] = Maps.getOrCreate(guardConditionCache, dateTime, Carers.guardConditions(dateTime, this))
}
@RunWith(classOf[CddJunitRunner])
object Expenses {
implicit def stringStringToCarers(x: String) = CarersXmlSituation(World(new TestNinoToCis), Xmls.validateClaim(x))
val expenses = Engine.folding[CarersXmlSituation, Double, Double]((acc, v) => acc + v, 0).
title("Expenses").
code((c: CarersXmlSituation) => 0.0).
childEngine("Child care expenses", """Customer's claiming CA may claim an allowable expense of up to 50% of their childcare expenses
where the child care is not being undertaken by a direct relative. This amount may then be deducted from their gross pay.""").
scenario("CL100110A").expected(15).
because((c: CarersXmlSituation) => c.hasChildExpenses()).
code((c: CarersXmlSituation) => c.childExpensesAcount() / 2).
scenario("CL100104A").expected(0).
childEngine("PSN Pensions", """Customers claiming CA may claim an allowable expense of up to 50% of their Private Pension contributions.
This amount may then be deducted from their gross pay figure.""").
scenario("CL100111A").expected(15).
because((c: CarersXmlSituation) => c.hasPsnPension()).
code((c: CarersXmlSituation) => c.psnPensionAcount() / 2).
scenario("CL100104A").expected(0).
childEngine("Occupational Pension",
"""Customers claiming CA may claim an allowable expense of up to 50% of their Occupational Pension contributions.
This amount may then be deducted from their gross pay figure.""").
scenario("CL100112A").expected(15).
because((c: CarersXmlSituation) => c.hasOccPension()).
code((c: CarersXmlSituation) => c.occPensionAcount() / 2).
scenario("CL100104A").expected(0).
build
}
@RunWith(classOf[CddJunitRunner])
object Income {
implicit def stringToCarers(x: String) = CarersXmlSituation(World(new TestNinoToCis), Xmls.validateClaim(x))
implicit def stringToDate(x: String) = Xmls.asDate(x)
implicit def stringStringToCarers(x: Tuple2[String, String]) = CarersXmlSituation(World(x._1, new TestNinoToCis), Xmls.validateClaim(x._2))
val income = Engine[CarersXmlSituation, Double]().title("Income").
useCase("No income", "A person without any income should return 0 as their income").
scenario("CL100104A").expected(0).
because((c: CarersXmlSituation) => !c.hasEmploymentData()).
useCase("Annually paid", "A person who is annually paid has their annual salary divided by 52 to calculate their income").
scenario("CL100113A").expected(7000.0 / 52).
because((c: CarersXmlSituation) => c.employmentPayPeriodicity() == "Annually").
code((c: CarersXmlSituation) => c.employmentGrossSalary() / 52).
useCase("Weekly paid").
scenario("CL100110A").expected(110).
because((c: CarersXmlSituation) => c.employmentPayPeriodicity() == "Weekly").
code((c: CarersXmlSituation) => c.employmentGrossSalary()).
build
}
@RunWith(classOf[CddJunitRunner])
object Carers {
implicit def stringToDate(x: String) = Xmls.asDate(x)
implicit def stringStringToListDateAndString(x: (String, String)): List[(DateTime, String)] = List((Xmls.asDate(x._1), x._2))
implicit def stringStringToDateRange(x: (String, String, String)) = DateRange(x._1, x._2, x._3)
implicit def stringToCarers(x: String) = CarersXmlSituation(World(new TestNinoToCis), Xmls.validateClaim(x))
implicit def toKeyAndParams(x: String) = Some(KeyAndParams(x))
implicit def toValidateClaim(x: List[(String, String, Boolean)]): CarersXmlSituation = Xmls.validateClaimWithBreaks(x: _*)
val carersPayment: Double = 110
private def isInRange(dateOfInterest: DateTime, start: DateTime, end: Option[DateTime]) = {
start.isBefore(dateOfInterest) &&
end.isDefined &&
end.get.isAfter(dateOfInterest)
}
val interestingDates = Engine.folding[CarersXmlSituation, List[(DateTime, String)], List[(DateTime, String)]]((acc, opt) => acc ++ opt, List()).title("Interesting Dates").
childEngine("BirthDate", "Your birthdate is interesting IFF you become the age of sixteen during the period of the claim").
scenario("CL100105a").expected(List()).
scenario("CL1PA100").expected(("2010-7-10", "Sixteenth Birthday")).
code((c: CarersXmlSituation) => List((c.birthdate().plusYears(16), "Sixteenth Birthday"))).
because((c: CarersXmlSituation) => isInRange(c.birthdate().plusYears(16), c.claimStartDate(), c.claimEndDate())).
childEngine("Claim start date", "Is always an interesting date").
scenario("CL100105a").expected(("2010-1-1", "Claim Start Date")).
code((c: CarersXmlSituation) => List((c.claimStartDate(), "Claim Start Date"))).
childEngine("Claim end date", "Is always an interesting date, and we have to fake it if it doesn't exist").
scenario("CL100105a").expected(("3999-12-31", "Claim End Date")).
scenario("CL1PA100").expected(("2999-12-31", "Claim End Date")).
code((c: CarersXmlSituation) => List((c.claimEndDate().get, "Claim End Date"))).
because((c: CarersXmlSituation) => c.claimEndDate().isDefined).
childEngine("Claim submitted date", "Is always an interesting date").
scenario("CL100105a").expected(("2010-1-1", "Claim Submitted Date")).
code((c: CarersXmlSituation) => List((c.claimSubmittedDate(), "Claim Submitted Date"))).
childEngine("Time Limit For Claiming Three Months", "Is an interesting date, if it falls inside the claim period").
scenario("CL100105a").expected(List()).
scenario("CL1PA100").expected(("2010-3-9", "Three month claim time limit")).
code((c: CarersXmlSituation) => List((c.timeLimitForClaimingThreeMonths, "Three month claim time limit"))).
because((c: CarersXmlSituation) => isInRange(c.timeLimitForClaimingThreeMonths, c.claimStartDate(), c.claimEndDate())).
childEngine("Breaks in Care add the from date, and the first date after the to date").
scenario(List(("2010-3-1", "2010-3-4", true)), "Single break").expected(List(("2010-3-1", "Break in care (Hospital) started"), ("2010-3-5", "Break in care (Hospital) ended"))).
code((c: CarersXmlSituation) => c.breaksInCare().flatMap((dr) => List(
(dr.from, s"Break in care (${dr.reason}) started"),
(dr.to.plusDays(1), s"Break in care (${dr.reason}) ended"))).toList).
scenario(List(("2010-3-1", "2010-3-4", true), ("2010-4-1", "2010-4-4", true)), "Two breaks").
expected(List(("2010-3-1", "Break in care (Hospital) started"), ("2010-3-5", "Break in care (Hospital) ended"),
("2010-4-1", "Break in care (Hospital) started"), ("2010-4-5", "Break in care (Hospital) ended"))).
childEngine("Four weeks after the start of a non hospital break in care, and twelve weeks after a hospital break of care are interesting if the break is active then").
scenario(List(("2010-7-1", "2010-7-4", false)), "Non hospital break, Too short").expected(List()).
code((c: CarersXmlSituation) => c.breaksInCare().flatMap(dr => {
def conditionallyAddDate(dr: DateRange, weeks: Int): List[(DateTime, String)] = {
val lastDay = dr.from.plusWeeks(weeks)
if (dr.to.isAfter(lastDay) || dr == lastDay) List((lastDay, "Care break too long")) else List()
}
dr.reason.equalsIgnoreCase("Hospital") match {
case false => conditionallyAddDate(dr, 4)
case true => conditionallyAddDate(dr, 12)
}
}).toList).
scenario(List(("2010-7-1", "2010-8-1", false)), "Non hospital break, more than four weeks").expected(List(("2010-7-29", "Care break too long"))).
scenario(List(("2010-7-1", "2010-8-1", true)), "Hospital break. Too short").expected(List()).
scenario(List(("2010-7-1", "2010-10-1", true)), "Hospital break, more than twelve weeks").expected(List(("2010-9-23", "Care break too long"))).
build;
val singleBreakInCare = Engine[DateTime, DateTime, DateRange, Boolean]().title("Single Break In Care").
description("The first date is the date being processed. The second date is the claim start date. The result is false if this DateRange invalidates the claim for the current date").
useCase("Outside date range").
scenario("2010-3-1", "2010-1-1", ("2010-5-1", "2010-5-5", "Reason"), "Before date").expected(true).
because((processDate: DateTime, claimStartDate: DateTime, dr: DateRange) => !dr.contains(processDate)).
useCase("Not yet 22 weeks after claim start date").
scenario("2010-3-1", "2010-1-1", ("2010-3-1", "2010-3-5", "Reason"), "After date").expected(false).
because((processDate: DateTime, claimStartDate: DateTime, dr: DateRange) => processDate.isBefore(claimStartDate.plusWeeks(22))).
useCase(" 22 weeks after claim start date, non hospital").
scenario("2010-7-1", "2010-1-1", ("2010-7-1", "2010-11-4", "Reason"), "Non hospital break, first day").expected(true).
because((processDate: DateTime, claimStartDate: DateTime, dr: DateRange) => processDate.isBefore(dr.from.plusWeeks(4))).
scenario("2010-7-2", "2010-1-1", ("2010-7-1", "2010-11-4", "Reason"), "Non hospital break, second day").expected(true).
scenario("2010-7-28", "2010-1-1", ("2010-7-1", "2010-11-4", "Reason"), "Non hospital break, last day of four weeks").expected(true).
scenario("2010-7-29", "2010-1-1", ("2010-7-1", "2010-11-4", "Reason"), "Non hospital break, first day after four weeks").expected(false).
because((processDate: DateTime, claimStartDate: DateTime, dr: DateRange) => { val lastDay = dr.from.plusWeeks(4).minusDays(1); processDate.isAfter(lastDay) }).
scenario("2010-11-4", "2010-1-1", ("2010-7-1", "2010-11-4", "Reason"), "Non hospital break, last day of break").expected(false).
scenario("2010-11-5", "2010-1-1", ("2010-7-1", "2010-11-4", "Reason"), "Non hospital break, first day after break").expected(true).
useCase(" 22 weeks after claim start date, hospital").
scenario("2010-7-1", "2010-1-1", ("2010-7-1", "2010-12-4", "Hospital"), "Hospital break, first day").expected(true).
scenario("2010-7-29", "2010-1-1", ("2010-7-1", "2010-11-4", "Hospital"), "Hospital break, first day after four weeks").expected(true).
because((processDate: DateTime, claimStartDate: DateTime, dr: DateRange) => { val firstInvalidDay = dr.from.plusWeeks(12); dr.reason.equalsIgnoreCase("Hospital") && processDate.isBefore(firstInvalidDay) }).
scenario("2010-09-21", "2010-1-1", ("2010-7-1", "2010-12-4", "Hospital"), "Hospital break, last day of twelve weeks 1").expected(true).
scenario("2010-09-22", "2010-1-1", ("2010-7-1", "2010-12-4", "Hospital"), "Hospital break, last day of twelve weeks").expected(true).
scenario("2010-09-23", "2010-1-1", ("2010-7-1", "2010-12-4", "Hospital"), "Hospital break, first day after twelve weeks").expected(false).
scenario("2010-12-4", "2010-1-1", ("2010-7-1", "2010-12-4", "Hospital"), "Hospital break, last day of break").expected(false).
scenario("2010-12-5", "2010-1-1", ("2010-7-1", "2010-12-4", "Hospital"), "Hospital break, first day after break").expected(true).
build
def breaksInCare = Engine[DateTime, CarersXmlSituation, Boolean]().title("Breaks in care").
description("This works out if any given break in care (specified by the DateRange) still allows payment. For reference the validateClaimWithBreaks method " +
"creates a validate claims application form with a claims start date of 2010-01-01. The 22 week enabler for breaks in care occurs on 2010-06-04").
useCase("The datetime is outside any break in care, means that payment is OK").
scenario("2010-05-12", List(("2010-5-13", "2010-6-13", true)), "Just before break").expected(true).
code((d: DateTime, c: CarersXmlSituation) => {
val startDate = c.claimStartDate()
c.breaksInCare().foldLeft(true)((acc, dr) => acc && singleBreakInCare(d, startDate, dr))
}).
scenario("2010-06-14", List(("2010-5-13", "2010-6-13", true)), "Just after break").expected(true).
scenario("2010-05-12", List(("2010-5-13", "2010-6-13", true), ("2010-6-1", "2010-6-2", true)), "Just before break, multiple breaks").expected(true).
scenario("2010-06-14", List(("2010-5-13", "2010-6-13", true), ("2010-6-1", "2010-6-2", true)), "Just after break, multiple breaks").expected(true).
useCase("The datetime is in a break in care (dependant in hospital), and the care payments were made for 22 weeks pre care, and the break is less than 12 weeks").
scenario("2010-7-1", List(("2010-7-1", "2010-9-22", true)), "First day of break that is one day short of 12 weeks").expected(true).
scenario("2010-9-22", List(("2010-7-1", "2010-9-22", true)), "Last day of break that is one day short of 12 weeks").expected(true).
useCase("The datetime is in a break in care (dependant not in hospital), and the care payments were made for 22 weeks pre care, and the break is less than 4 weeks").
scenario("2010-7-1", List(("2010-7-1", "2010-7-10", false)), "First day of break that is one day short of 4 weeks").expected(true).
scenario("2010-7-10", List(("2010-7-1", "2010-7-10", false)), "Last day of break that is one day short of 4 weeks").expected(true).
useCase("The datetime is in a break in care (dependant not in hospital), and the care payments were made for 22 weeks pre care, and the break is more than 4 weeks").
scenario("2010-7-1", List(("2010-7-1", "2010-08-02", false)), "First day of break that is over 4 weeks").expected(true).
scenario("2010-7-28", List(("2010-7-1", "2010-08-02", false)), "Last valid day of break that is over 4 weeks").expected(true).
scenario("2010-7-29", List(("2010-7-1", "2010-08-02", false)), "First invalid day of break that is over 4 weeks").expected(false).
scenario("2010-8-2", List(("2010-7-1", "2010-08-02", false)), "Last day of break that is over 4 weeks").expected(false).
scenario("2010-8-3", List(("2010-7-1", "2010-08-02", false)), "After break that is over 4 weeks").expected(true).
useCase("The datetime is in a break in care (dependant in hospital), and the care payments were made for 22 weeks pre care, and the break is more than 12 weeks").
scenario("2010-6-04", List(("2010-6-4", "2010-9-1", true)), "First day of break that is over 12 weeks").expected(true).
scenario("2010-8-25", List(("2010-6-4", "2010-9-1", true)), "Last valid day of break that is over 12 weeks1").expected(true).
scenario("2010-8-26", List(("2010-6-4", "2010-9-1", true)), "Last valid day of break that is over 12 weeks").expected(true).
scenario("2010-8-27", List(("2010-6-4", "2010-9-1", true)), "First invalid day of break that is over 12 weeks").expected(false).
scenario("2010-9-1", List(("2010-6-4", "2010-9-1", true)), "Last day of break that is over 12 weeks").expected(false).
scenario("2010-9-2", List(("2010-6-4", "2010-9-1", true)), "After break that is over 12 weeks").expected(true).
useCase("The datetime is in a break in care and the care payments were not made for 22 weeks pre care").
scenario("2010-2-2", List(("2010-3-1", "2010-3-3", true)), "before a break that is pre 22 weeks").expected(true).
scenario("2010-3-1", List(("2010-3-1", "2010-3-3", true)), "first day of a break that is pre 22 weeks").expected(false).
scenario("2010-3-3", List(("2010-3-1", "2010-3-3", true)), "last day a break that is pre 22 weeks").expected(false).
scenario("2010-3-4", List(("2010-3-1", "2010-3-3", true)), "after a break that is pre 22 weeks").expected(true).
build
val guardConditions = Engine.folding[DateTime, CarersXmlSituation, Option[KeyAndParams], List[KeyAndParams]]((acc, opt) => acc ::: opt.toList, List()).title("Check Guard Condition").
code((d: DateTime, c: CarersXmlSituation) => None).
childEngine("Age Restriction", "Customers under age 16 are not entitled to Carers Allowance").
scenario("2010-6-9", "CL100104A", "Cl100104A-Age Under 16").expectedAndCode("carer.claimant.under16").
because((d: DateTime, c: CarersXmlSituation) => c.underSixteenOn(d)).
scenario("2022-3-1", "CL100104A", "Cl100104A-Age Under 16").expected(None).
childEngine("Caring hours", "Customers with Hours of caring must be 35 hours or more in any one week").
scenario("2010-1-1", "CL100105A", "CL100105A-lessThen35Hours").
expectedAndCode("carer.claimant.under35hoursCaring").
because((d: DateTime, c: CarersXmlSituation) => !c.claim35Hours()).
childEngine("Qualifying Benefit", "Dependant Party's without the required level of qualyfing benefit will result in the disallowance of the claim to Carer.").
scenario("2010-6-23", "CL100106A", "CL100106A-Without qualifying benefit").
expectedAndCode(("carer.qualifyingBenefit.dpWithoutRequiredLevelOfQualifyingBenefit")).
because((d: DateTime, c: CarersXmlSituation) => !c.dependantHasSufficientLevelOfQualifyingCare).
childEngine("UK Residence", "Customer who is not considered resident and present in GB is not entitled to CA.").
scenario("2010-6-7", "CL100107A", "CL100107A-notInGB").
expectedAndCode("carers.claimant.notResident").
because((d: DateTime, c: CarersXmlSituation) => !c.ClaimAlwaysUK()).
childEngine("Immigration Status", "Customers who have restrictions on their immigration status will be disallowed CA.").
scenario("2010-6-7", "CL100108A", "CL100108A-restriction on immigration status").
expectedAndCode("carers.claimant.restriction.immigrationStatus").
because((d: DateTime, c: CarersXmlSituation) => !c.ClaimCurrentResidentUK()).
childEngine("Full Time Eduction", "Customers in Full Time Education 21 hours or more each week are not entitled to CA.").
scenario("2010-2-10", "CL100109A", "CL100109A-full time education").
expectedAndCode("carers.claimant.fullTimeEduction.moreThan21Hours").
because((d: DateTime, c: CarersXmlSituation) => c.ClaimEducationFullTime()).
childEngine("High Salary", "Customers who earn more than the threshold value per week are not entitled to CA").
scenario("2010-2-10", "CL100111A").expected(None).
assertion((d: DateTime, c: CarersXmlSituation, optReason: ROrException[Option[KeyAndParams]]) => c.nettIncome == 95).
scenario("2010-2-10", "CL100112A").expected(None).
assertion((d: DateTime, c: CarersXmlSituation, optReason: ROrException[Option[KeyAndParams]]) => c.nettIncome == 95).
scenario("2010-2-10", "CL100113A").expectedAndCode("carers.income.tooHigh").
because((d: DateTime, c: CarersXmlSituation) => c.incomeTooHigh).
childEngine("Breaks in care", "Breaks in care may cause the claim to be invalid").
scenario("2010-6-1", List(("2010-7-1", "2010-12-20", false)), "Long break in care, but date outside range").expected(None).
scenario("2010-7-10", List(("2010-7-1", "2010-7-20", false)), "Short break in care when after 22 weeks").expected(None).
scenario("2010-12-1", List(("2010-7-1", "2010-12-20", false)), "Long break in care when after 22 weeks").expectedAndCode("carers.breakInCare").
because((d: DateTime, c: CarersXmlSituation) => !breaksInCare(d, c)).
build
type ReasonsOrAmount = Either[Double, List[KeyAndParams]]
implicit def toAmoumt(x: Double) = Left(x)
implicit def toReasons(x: List[KeyAndParams]) = Right(x)
implicit def stringsToReasons(x: List[String]) = Right(x.map(KeyAndParams(_)))
val engine = Engine[DateTime, CarersXmlSituation, ReasonsOrAmount]().title("Validate Claim").
code((d: DateTime, c: CarersXmlSituation) => Left(carersPayment)).
useCase("Guard Conditions", "All guard conditions should be passed").
scenario("2010-6-7", "CL100108A", "CL100108A-restriction on immigration status").
expected(List("carers.claimant.notResident", "carers.claimant.restriction.immigrationStatus")).
code((d: DateTime, c: CarersXmlSituation) => Right(c.guardConditions(d))).
useCase("Employment 4", """Customer's claiming CA may claim an allowable expense of up to 50% of their childcare expenses where the child care is not being undertaken by a direct relative.
This amount may then be deducted from their gross pay.""").
scenario("2010-3-22", "CL100110A", "CL100110A-child care allowance").
expected(carersPayment).
because((d: DateTime, c: CarersXmlSituation) => c.guardConditions(d).size == 0).
useCase("Employment 5", """Customers claiming CA may claim an allowable expense of up to 50% of their Private Pension contributions.
This amount may then be deducted from their gross pay figure.""").
scenario("2010-3-8", "CL100111A", "CL100111A-private pension").
expected(carersPayment).
useCase("Employment 6", """Customers claiming CA may claim an allowable expense of up to 50% of their Occupational Pension contributions.
This amount may then be deducted from their gross pay figure.""").
scenario("2010-3-8", "CL100112A", "CL100112A-occupational pension").
expected(carersPayment).
build
/** Returns a DatesToBeProcessedTogether and the days that the claim is valid for */
def findTimeLine(c: CarersXmlSituation) = {
val dates = interestingDates(c)
val dayToSplit = DateRanges.sunday
val result = DateRanges.interestingDatesToDateRangesToBeProcessedTogether(dates, dayToSplit)
result.map((drCollection) => {
val result =
drCollection.dateRanges.map((dr) => {
val result = engine(dr.from, c)
(dr, result)
})
(drCollection, result)
})
}
def main(args: Array[String]) {
val list = List("00", "01", "02", "04", "05", "06", "07", "09", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19")
for (name <- list) {
val fullName = s"CL1001${name}A"
println(fullName)
println(findTimeLine(fullName).collect {
case (drc, result) =>
val startDate = drc.dateRanges.head.from
val reasons = result.collect { case (dr, result) => DateRange.formatter.print(dr.from) + "/" + dr.reason + " => " + result }
reasons.mkString(",")
}.mkString("\n"))
println
}
}
} | phil-rice/Carers | src/main/scala/com/cddcore/carersblog/reasoningOnDates/Carers.scala | Scala | bsd-2-clause | 28,536 |
/*
* Copyright 2014 - 2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package streamz.camel.akka
import java.util.concurrent.{ ArrayBlockingQueue, TimeUnit }
import akka.stream._
import akka.stream.stage._
import org.apache.camel.{ AsyncCallback => CamelAsyncCallback, _ }
import streamz.camel.{ StreamContext, StreamMessage }
import scala.collection.mutable
import scala.concurrent.{ ExecutionContext, Future }
import scala.reflect.ClassTag
import scala.util.{ Failure, Success, Try }
private case class AsyncExchange(exchange: Exchange, callback: CamelAsyncCallback)
private class AsyncExchangeProcessor(capacity: Int) extends AsyncProcessor {
val receivedExchanges = new ArrayBlockingQueue[AsyncExchange](capacity)
def fail(t: Throwable): Unit = {
val _ = t // silence 'unused parameter' warning
receivedExchanges.iterator()
()
}
override def process(exchange: Exchange): Unit =
throw new UnsupportedOperationException("Synchronous processing not supported")
override def process(exchange: Exchange, callback: CamelAsyncCallback): Boolean = {
receivedExchanges.put(AsyncExchange(exchange, callback))
false
}
}
private[akka] class EndpointConsumerReplier[A, B](uri: String, capacity: Int)(implicit streamContext: StreamContext, tag: ClassTag[B])
extends GraphStage[FlowShape[StreamMessage[A], StreamMessage[B]]] {
private implicit val ec: ExecutionContext =
ExecutionContext.fromExecutorService(streamContext.executorService)
private val timeout: Long = streamContext.config.getDuration("streamz.camel.consumer.receive.timeout", TimeUnit.MILLISECONDS)
val in: Inlet[StreamMessage[A]] =
Inlet("EndpointConsumerReplier.in")
val out: Outlet[StreamMessage[B]] =
Outlet("EndpointConsumerReplier.out")
override val shape: FlowShape[StreamMessage[A], StreamMessage[B]] =
FlowShape.of(in, out)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
new GraphStageLogic(shape) {
private val processor = new AsyncExchangeProcessor(capacity)
private val emittedExchanges = mutable.Queue.empty[AsyncExchange]
private val consumedCallback = getAsyncCallback(consumed)
private var consuming: Boolean = false
private var consumer: Consumer = _
setHandler(in, new InHandler {
override def onPush(): Unit = {
val AsyncExchange(ce, ac) = emittedExchanges.dequeue()
ce.setOut(grab(in).camelMessage(streamContext.camelContext))
ac.done(false)
pull(in)
if (!consuming && isAvailable(out)) consumeAsync()
}
})
setHandler(out, new OutHandler {
override def onPull(): Unit =
if (!consuming && hasCapacity) consumeAsync()
})
private def hasCapacity: Boolean =
emittedExchanges.size < capacity
private def consumeAsync(): Unit = {
Future(processor.receivedExchanges.poll(timeout, TimeUnit.MILLISECONDS)).foreach(consumedCallback.invoke)
consuming = true
}
private def consumed(asyncExchange: AsyncExchange): Unit = {
consuming = false
asyncExchange match {
case null =>
if (!isClosed(out)) consumeAsync()
case AsyncExchange(ce, ac) if ce.getPattern != ExchangePattern.InOut =>
ac.done(false)
failStage(new IllegalArgumentException(s"Exchange pattern ${ExchangePattern.InOut} expected but was ${ce.getPattern}"))
case AsyncExchange(ce, ac) if ce.getException ne null =>
ac.done(false)
failStage(ce.getException)
case ae @ AsyncExchange(ce, ac) =>
Try(StreamMessage.from[B](ce.getIn)) match {
case Success(m) =>
push(out, m)
emittedExchanges.enqueue(ae)
if (hasCapacity && isAvailable(out)) consumeAsync()
case Failure(e) =>
ce.setException(e)
ac.done(false)
failStage(e)
}
}
}
override def preStart(): Unit = {
consumer = streamContext.consumer(uri, processor)
consumer.start()
pull(in)
}
}
}
| krasserm/streamz | streamz-camel-akka/src/main/scala/streamz/camel/akka/EndpointConsumerReplier.scala | Scala | apache-2.0 | 4,739 |
/**
* Copyright (C) 2009-2011 the original author or authors.
* See the notice.md file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fusesource.scalate.servlet
import javax.servlet._
import http.{HttpServletRequestWrapper, HttpServletResponse, HttpServletRequest}
import java.lang.String
import org.fusesource.scalate.support.TemplateFinder
import org.fusesource.scalate.util.{Log}
object TemplateEngineFilter extends Log
/**
* Servlet filter which auto routes to the scalate engines for paths which have a scalate template
* defined.
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class TemplateEngineFilter extends Filter {
import TemplateEngineFilter._
var config: FilterConfig = _
var engine: ServletTemplateEngine = _
var finder: TemplateFinder = _
var errorUris: List[String] = ServletHelper.errorUris()
/**
* Called by the servlet engine to create the template engine and configure this filter
*/
def init(filterConfig: FilterConfig) = {
config = filterConfig
engine = createTemplateEngine(config)
finder = new TemplateFinder(engine)
filterConfig.getInitParameter("replaced-extensions") match {
case null =>
case x =>
finder.replacedExtensions = x.split(":+").toList
}
// register the template engine so they can be easily resolved from elsewhere
ServletTemplateEngine(filterConfig.getServletContext) = engine
}
/**
* Called by the servlet engine on shut down.
*/
def destroy = {
}
/**
* Performs the actual filter
*/
def doFilter(request: ServletRequest, response: ServletResponse, chain: FilterChain): Unit = {
(request,response) match {
case (request: HttpServletRequest, response: HttpServletResponse) =>
val request_wrapper = wrap(request)
debug("Checking '%s'", request.getRequestURI)
findTemplate(request.getRequestURI.substring(request.getContextPath.length)) match {
case Some(template)=>
debug("Rendering '%s' using template '%s'", request.getRequestURI, template)
val context = new ServletRenderContext(engine, request_wrapper, response, config.getServletContext)
try {
context.include(template, true)
} catch {
case e:Throwable => showErrorPage(request_wrapper, response, e)
}
case None=>
chain.doFilter(request_wrapper, response)
}
case _ =>
chain.doFilter(request, response)
}
}
def showErrorPage(request: HttpServletRequest, response: HttpServletResponse, e:Throwable):Unit = {
info(e, "failure: %s", e)
// we need to expose all the errors property here...
request.setAttribute("javax.servlet.error.exception", e)
request.setAttribute("javax.servlet.error.exception_type", e.getClass)
request.setAttribute("javax.servlet.error.message", e.getMessage)
request.setAttribute("javax.servlet.error.request_uri", request.getRequestURI)
request.setAttribute("javax.servlet.error.servlet_name", request.getServerName)
request.setAttribute("javax.servlet.error.status_code", 500)
response.setStatus(500)
errorUris.find( x=>findTemplate(x).isDefined ) match {
case Some(template)=>
val context = new ServletRenderContext(engine, request, response, config.getServletContext)
context.include(template, true)
// since we directly rendered the error page.. remove the attributes
// since they screw /w tomcat.
request.removeAttribute("javax.servlet.error.exception")
request.removeAttribute("javax.servlet.error.exception_type")
request.removeAttribute("javax.servlet.error.message")
request.removeAttribute("javax.servlet.error.request_uri")
request.removeAttribute("javax.servlet.error.servlet_name")
request.removeAttribute("javax.servlet.error.status_code")
case None =>
throw e;
}
}
/**
* Allow derived filters to override and customize the template engine from the configuration
*/
protected def createTemplateEngine(config: FilterConfig): ServletTemplateEngine = {
new ServletTemplateEngine(config)
}
protected def findTemplate(name: String) = finder.findTemplate(name)
def wrap(request: HttpServletRequest) = new ScalateServletRequestWrapper(request)
class ScalateServletRequestWrapper(request: HttpServletRequest) extends HttpServletRequestWrapper(request) {
override def getRequestDispatcher(path: String) = {
findTemplate(path).map( new ScalateRequestDispatcher(_) ).getOrElse( request.getRequestDispatcher(path) )
}
}
class ScalateRequestDispatcher(template:String) extends RequestDispatcher {
def forward(request: ServletRequest, response: ServletResponse):Unit = include(request, response)
def include(request: ServletRequest, response: ServletResponse):Unit = {
(request,response) match {
case (request: HttpServletRequest, response: HttpServletResponse) =>
val context = new ServletRenderContext(engine, wrap(request), response, config.getServletContext)
context.include(template, true)
case _ =>
None
}
}
}
}
| dnatic09/scalate | scalate-core/src/main/scala/org/fusesource/scalate/servlet/TemplateEngineFilter.scala | Scala | apache-2.0 | 5,820 |
package com.eigengo.lift.common
import akka.actor.{ActorLogging, Actor, ReceiveTimeout}
trait AutoPassivation extends ActorLogging {
this: Actor β
import akka.contrib.pattern.ShardRegion.Passivate
private val passivationReceive: Receive = {
// passivation support
case ReceiveTimeout β
log.debug("ReceiveTimeout: passivating.")
context.parent ! Passivate(stopMessage = 'stop)
case 'stop β
log.debug("'stop: bye-bye, cruel world, see you after recovery.")
context.stop(self)
}
protected def withPassivation(receive: Receive): Receive = receive.orElse(passivationReceive)
}
| lachatak/lift | server/common/src/main/scala/com/eigengo/lift/common/AutoPassivation.scala | Scala | apache-2.0 | 630 |
/**
* Inspiration/references:
* - http://stackoverflow.com/questions/4161460/bootstrapping-a-web-server-in-scala/6432180#6432180
* - http://matt.might.net/articles/pipelined-nonblocking-extensible-web-server-with-coroutines/
*
*/
import java.io.{InputStreamReader, BufferedReader}
import java.net.InetSocketAddress
import java.util.concurrent.Executors
import com.sun.net.httpserver.{HttpExchange, HttpHandler, HttpServer}
import scala.collection.JavaConversions._
trait HttpMethod
case object GET extends HttpMethod
case object POST extends HttpMethod
case class HttpHeader(val name : String, val value : String)
case class HttpRequest(val headers : List[HttpHeader],
val uri : java.net.URI,
val method : HttpMethod,
val body : String)
case class HttpResponse(val headers : List[HttpHeader],
val code : Int = 200,
val mimeType : String = "",
val body : String = "")
case class InternalServerError(val statusCode : Int = 500,
val message : String = "",
val responseBody : String = "") extends Exception {
}
abstract class RequestHandler {
def apply(request : HttpRequest, response : Option[HttpResponse]) : Option[HttpResponse]
def ==> (handler : RequestHandler) : RequestHandler = {
val that = this
new RequestHandler {
def apply(request : HttpRequest, response : Option[HttpResponse]) : Option[HttpResponse] = {
val newResponse = that(request, response)
handler(request, newResponse)
}
}
}
}
/** A request handler to add some headers to all responses. Should come last. */
class AddHeaderRequestHandler(val headers : () => List[HttpHeader]) extends RequestHandler {
def apply(request : HttpRequest, response : Option[HttpResponse]) : Option[HttpResponse] = {
response match {
case Some(r) => Some(r.copy(headers = r.headers ++ headers()))
case None => None
}
}
}
class Default404RequestHandler extends RequestHandler {
val defaultBody =
"""<html>
<head>
</head>
<body>
No matching resource.
</body>
</html>"""
val defaultResponse = HttpResponse(headers = List(), code = 404, body = defaultBody)
def apply(request : HttpRequest, response : Option[HttpResponse]) : Option[HttpResponse] = {
response match {
case Some(r) => response
case None => Some(defaultResponse)
}
}
}
class RoutingRequestHandler extends RequestHandler {
private val routeMaps = new collection.mutable.HashMap[String, (HttpRequest) => HttpResponse]()
def get(path : String)(action : (HttpRequest) => HttpResponse) = routeMaps += path -> action
def internalErrorResponse(ex : InternalServerError) = HttpResponse(
List[HttpHeader](),
ex.statusCode,
ex.responseBody)
def apply(request : HttpRequest, response : Option[HttpResponse]) : Option[HttpResponse] = {
val path = request.uri.getPath
routeMaps.get(path) match {
case Some(action) => try {
Some(action(request))
} catch {
case ex : InternalServerError => {
println(ex.message)
Some(internalErrorResponse(ex))
}
}
case None => None
}
}
}
class SimpleHttpHandler(val handleRequests : RequestHandler) extends HttpHandler {
def getVerb(ex : HttpExchange) = ex.getRequestMethod() match {
case "GET" => GET
case "POST" => POST
case m => throw new Exception("Unsupported HTTP method: " + m)
}
def headersToList(hdrs : com.sun.net.httpserver.Headers) =
(hdrs.toList.map { kv => kv._2.map((v) => HttpHeader(kv._1, v)) }).flatten
def getRequestBody(ex : HttpExchange) : String = {
val s = scala.io.Source.fromInputStream(ex.getRequestBody())
s mkString
}
def makeRequest(ex : HttpExchange) = HttpRequest(headers = headersToList(ex.getRequestHeaders()),
uri = ex.getRequestURI(),
method = getVerb(ex),
body = getRequestBody(ex))
def handle(exchange : HttpExchange) {
val response = handleRequests(makeRequest(exchange), None)
response match {
case Some(r) => {
val bytesToSend = r.body.getBytes
for (h <- r.headers) { exchange.getResponseHeaders().add(h.name, h.value) }
exchange.sendResponseHeaders(r.code, bytesToSend.size)
exchange.getResponseBody().write(bytesToSend)
exchange.close()
}
case None => exchange.close()
}
}
}
class SimpleHttpServer(val address : InetSocketAddress, val handler : HttpHandler) {
private val server = HttpServer.create(address, 0)
server.createContext("/", handler)
private val threadPool = Executors.newFixedThreadPool(10)
server.setExecutor(threadPool)
/** the real address the server is listening on; allows using port 0 in address */
val listenAddress = server.getAddress()
def start = server.start()
def stop = server.stop(0)
}
object SimpleServerDemo {
private val gmtFormat =
new java.text.SimpleDateFormat("E, d MMM yyyy HH:mm:ss 'GMT'",
java.util.Locale.US);
gmtFormat.setTimeZone(java.util.TimeZone.getTimeZone("GMT"));
def runServer() {
val globalHeaderGen = () => {
List(HttpHeader("Server", "SimpleHttpServerDemo"),
HttpHeader("Date", gmtFormat.format(new java.util.Date())))
}
val routing = new RoutingRequestHandler()
routing.get("/test")((r) => HttpResponse(headers=List(), body="response from test/ route"))
val default404 = new Default404RequestHandler()
val addHeaders = new AddHeaderRequestHandler(globalHeaderGen)
val handler = new SimpleHttpHandler(routing ==> default404 ==> addHeaders)
val svr = new SimpleHttpServer(new InetSocketAddress(0), handler)
println(svr.listenAddress.toString)
svr.start
}
}
| cordarei/scala-parser-server | src/main/scala/SimpleHttpServer.scala | Scala | gpl-3.0 | 6,031 |
import org.specs._
class A extends Specification
{
"this" should {
"not work" in { 1 must_== 2 }
}
}
object A extends Specification
{
"this" should {
"not work" in { 1 must_== 2 }
}
} | matheshar/simple-build-tool | src/sbt-test/tests/specs-run/changes/ClassFailModuleFail.scala | Scala | bsd-3-clause | 199 |
import sbt._
import sbt.Keys._
import sbtrelease.ReleasePlugin
import com.typesafe.sbt.pgp.PgpKeys
/** Adds common settings automatically to all subprojects */
object GlobalPlugin extends AutoPlugin {
val org = "com.sksamuel.avro4s"
val AvroVersion = "1.8.1"
val Log4jVersion = "1.2.17"
val ScalatestVersion = "3.0.0"
val ScalaVersion = "2.12.1"
val Slf4jVersion = "1.7.12"
override def requires = ReleasePlugin
override def trigger = allRequirements
override def projectSettings = publishingSettings ++ Seq(
organization := org,
scalaVersion := ScalaVersion,
crossScalaVersions := Seq("2.11.8", "2.12.1"),
resolvers += Resolver.mavenLocal,
parallelExecution in Test := false,
scalacOptions := Seq("-unchecked", "-deprecation", "-encoding", "utf8", "-Ywarn-unused-import",
"-Xfatal-warnings", "-feature", "-language:existentials"
),
javacOptions := Seq("-source", "1.7", "-target", "1.7"),
libraryDependencies ++= Seq(
"org.scala-lang" % "scala-reflect" % scalaVersion.value,
"org.apache.avro" % "avro" % AvroVersion,
"org.slf4j" % "slf4j-api" % Slf4jVersion,
"log4j" % "log4j" % Log4jVersion % "test",
"org.slf4j" % "log4j-over-slf4j" % Slf4jVersion % "test",
"org.scalatest" %% "scalatest" % ScalatestVersion % "test"
)
)
val publishingSettings = Seq(
publishMavenStyle := true,
publishArtifact in Test := false,
ReleasePlugin.autoImport.releasePublishArtifactsAction := PgpKeys.publishSigned.value,
ReleasePlugin.autoImport.releaseCrossBuild := true,
publishTo := {
val nexus = "https://oss.sonatype.org/"
if (isSnapshot.value) {
Some("snapshots" at s"${nexus}content/repositories/snapshots")
} else {
Some("releases" at s"${nexus}service/local/staging/deploy/maven2")
}
},
pomExtra := {
<url>https://github.com/sksamuel/avro4s</url>
<licenses>
<license>
<name>MIT</name>
<url>https://opensource.org/licenses/MIT</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<url>git@github.com:sksamuel/avro4s.git</url>
<connection>scm:git@github.com:sksamuel/avro4s.git</connection>
</scm>
<developers>
<developer>
<id>sksamuel</id>
<name>sksamuel</name>
<url>http://github.com/sksamuel</url>
</developer>
</developers>
}
)
} | YuvalItzchakov/avro4s | project/GlobalPlugin.scala | Scala | mit | 2,611 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.common
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.table.api.Types
import org.apache.flink.table.api.config.OptimizerConfigOptions
import org.apache.flink.table.plan.stats.{ColumnStats, TableStats}
import org.apache.flink.table.planner.plan.rules.logical.JoinDeriveNullFilterRule
import org.apache.flink.table.planner.plan.stats.FlinkStatistic
import org.apache.flink.table.planner.utils.{TableTestBase, TableTestUtil}
import org.junit.{Before, Test}
import scala.collection.JavaConversions._
abstract class JoinReorderTestBase extends TableTestBase {
protected val util: TableTestUtil = getTableTestUtil
protected def getTableTestUtil: TableTestUtil
@Before
def setup(): Unit = {
val types = Array[TypeInformation[_]](Types.INT, Types.LONG, Types.STRING)
util.addTableSource("T1", types, Array("a1", "b1", "c1"), FlinkStatistic.builder()
.tableStats(new TableStats(1000000L, Map(
"a1" -> new ColumnStats(1000000L, 0L, 4.0, 4, null, null),
"b1" -> new ColumnStats(10L, 0L, 8.0, 8, null, null)
))).build())
util.addTableSource("T2", types, Array("a2", "b2", "c2"), FlinkStatistic.builder()
.tableStats(new TableStats(10000L, Map(
"a2" -> new ColumnStats(100L, 0L, 4.0, 4, null, null),
"b2" -> new ColumnStats(5000L, 0L, 8.0, 8, null, null)
))).build())
util.addTableSource("T3", types, Array("a3", "b3", "c3"), FlinkStatistic.builder()
.tableStats(new TableStats(10L, Map(
"a3" -> new ColumnStats(5L, 0L, 4.0, 4, null, null),
"b3" -> new ColumnStats(2L, 0L, 8.0, 8, null, null)
))).build())
util.addTableSource("T4", types, Array("a4", "b4", "c4"), FlinkStatistic.builder()
.tableStats(new TableStats(100L, Map(
"a4" -> new ColumnStats(100L, 0L, 4.0, 4, null, null),
"b4" -> new ColumnStats(20L, 0L, 8.0, 8, null, null)
))).build())
util.addTableSource("T5", types, Array("a5", "b5", "c5"), FlinkStatistic.builder()
.tableStats(new TableStats(500000L, Map(
"a5" -> new ColumnStats(200000L, 0L, 4.0, 4, null, null),
"b5" -> new ColumnStats(200L, 0L, 8.0, 8, null, null)
))).build())
util.getTableEnv.getConfig.getConfiguration.setBoolean(
OptimizerConfigOptions.TABLE_OPTIMIZER_JOIN_REORDER_ENABLED, true)
}
@Test
def testStarJoinCondition1(): Unit = {
val sql =
s"""
|SELECT * FROM T1, T2, T3, T4, T5
|WHERE a1 = a2 AND a1 = a3 AND a1 = a4 AND a1 = a5
""".stripMargin
util.verifyExecPlan(sql)
}
@Test
def testStarJoinCondition2(): Unit = {
val sql =
s"""
|SELECT * FROM T1, T2, T3, T4, T5
|WHERE b1 = b2 AND b1 = b3 AND b1 = b4 AND b1 = b5
""".stripMargin
util.verifyExecPlan(sql)
}
@Test
def testBushyJoinCondition1(): Unit = {
val sql =
s"""
|SELECT * FROM T1, T2, T3, T4, T5
|WHERE a1 = a2 AND a2 = a3 AND a1 = a4 AND a3 = a5
""".stripMargin
util.verifyExecPlan(sql)
}
@Test
def testBushyJoinCondition2(): Unit = {
val sql =
s"""
|SELECT * FROM T1, T2, T3, T4, T5
|WHERE b1 = b2 AND b2 = b3 AND b1 = b4 AND b3 = b5
""".stripMargin
util.verifyExecPlan(sql)
}
@Test
def testWithoutColumnStats(): Unit = {
val sql =
s"""
|SELECT * FROM T1, T2, T3, T4, T5
|WHERE c1 = c2 AND c1 = c3 AND c2 = c4 AND c1 = c5
""".stripMargin
util.verifyExecPlan(sql)
}
@Test
def testJoinWithProject(): Unit = {
val sql =
s"""
|WITH V1 AS (SELECT b1, a1, a2, c2 FROM T1 JOIN T2 ON a1 = a2),
| V2 AS (SELECT a3, b1, a1, c2, c3 FROM V1 JOIN T3 ON a2 = a3),
| V3 AS (SELECT a3, b1, a1, c2, c3, a4, b4 FROM T4 JOIN V2 ON a1 = a4)
|
|SELECT * FROM V3, T5 where a4 = a5
""".stripMargin
// can not reorder now
util.verifyExecPlan(sql)
}
@Test
def testJoinWithFilter(): Unit = {
val sql =
s"""
|WITH V1 AS (SELECT * FROM T1 JOIN T2 ON a1 = a2 WHERE b1 * b2 > 10),
| V2 AS (SELECT * FROM V1 JOIN T3 ON a2 = a3 WHERE b1 * b3 < 2000),
| V3 AS (SELECT * FROM T4 JOIN V2 ON a3 = a4 WHERE b2 + b4 > 100)
|
|SELECT * FROM V3, T5 WHERE a4 = a5 AND b5 < 15
""".stripMargin
util.verifyExecPlan(sql)
}
@Test
def testInnerAndLeftOuterJoin(): Unit = {
val sql =
s"""
|SELECT * FROM T1
| JOIN T2 ON a1 = a2
| JOIN T3 ON a2 = a3
| LEFT OUTER JOIN T4 ON a1 = a4
| JOIN T5 ON a4 = a5
""".stripMargin
// T1, T2, T3 can reorder
util.verifyExecPlan(sql)
}
@Test
def testInnerAndRightOuterJoin(): Unit = {
val sql =
s"""
|SELECT * FROM T1
| RIGHT OUTER JOIN T2 ON a1 = a2
| JOIN T3 ON a2 = a3
| JOIN T4 ON a1 = a4
| JOIN T5 ON a4 = a5
""".stripMargin
// T3, T4, T5 can reorder
util.verifyExecPlan(sql)
}
@Test
def testInnerAndFullOuterJoin(): Unit = {
val sql =
s"""
|SELECT * FROM T1
| JOIN T2 ON a1 = a2
| FULL OUTER JOIN T3 ON a2 = a3
| JOIN T4 ON a1 = a4
| JOIN T5 ON a4 = a5
""".stripMargin
util.verifyExecPlan(sql)
}
@Test
def testAllLeftOuterJoin(): Unit = {
val sql =
s"""
|SELECT * FROM T1
| LEFT OUTER JOIN T2 ON a1 = a2
| LEFT OUTER JOIN T3 ON a2 = a3
| LEFT OUTER JOIN T4 ON a1 = a4
| LEFT OUTER JOIN T5 ON a4 = a5
""".stripMargin
// can not reorder
util.verifyExecPlan(sql)
}
@Test
def testAllRightOuterJoin(): Unit = {
val sql =
s"""
|SELECT * FROM T1
| RIGHT OUTER JOIN T2 ON a1 = a2
| RIGHT OUTER JOIN T3 ON a2 = a3
| RIGHT OUTER JOIN T4 ON a1 = a4
| RIGHT OUTER JOIN T5 ON a4 = a5
""".stripMargin
// can not reorder
util.verifyExecPlan(sql)
}
@Test
def testAllFullOuterJoin(): Unit = {
val sql =
s"""
|SELECT * FROM T1
| FULL OUTER JOIN T2 ON a1 = a2
| FULL OUTER JOIN T3 ON a1 = a3
| FULL OUTER JOIN T4 ON a1 = a4
| FULL OUTER JOIN T5 ON a4 = a5
""".stripMargin
// can not reorder
util.verifyExecPlan(sql)
}
@Test
def testDeriveNullFilterAfterJoinReorder(): Unit = {
val types = Array[TypeInformation[_]](Types.INT, Types.LONG)
val builderA = ColumnStats.Builder.builder()
.setNdv(200000L)
.setNullCount(50000L)
.setAvgLen(4.0)
.setMaxLen(4)
val builderB = ColumnStats.Builder.builder()
.setNdv(100000L)
.setNullCount(0L)
.setAvgLen(8.0)
.setMaxLen(8)
util.addTableSource("T6", types, Array("a6", "b6"), FlinkStatistic.builder()
.tableStats(new TableStats(500000L, Map(
"a6" -> builderA.build(),
"b6" -> builderB.build()
))).build())
util.addTableSource("T7", types, Array("a7", "b7"), FlinkStatistic.builder()
.tableStats(new TableStats(500000L, Map(
"a7" -> builderA.build(),
"b7" -> builderB.build()
))).build())
util.addTableSource("T8", types, Array("a8", "b8"), FlinkStatistic.builder()
.tableStats(new TableStats(500000L, Map(
"a8" -> builderA.build(),
"b8" -> builderB.build()
))).build())
util.getTableEnv.getConfig.getConfiguration.setLong(
JoinDeriveNullFilterRule.TABLE_OPTIMIZER_JOIN_NULL_FILTER_THRESHOLD, 10000L)
val sql =
s"""
|SELECT * FROM T6
| INNER JOIN T7 ON b6 = b7
| INNER JOIN T8 ON a6 = a8
|""".stripMargin
util.verifyExecPlan(sql)
}
}
| apache/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/plan/common/JoinReorderTestBase.scala | Scala | apache-2.0 | 8,675 |
package edu.gemini.spModel.io.impl
import edu.gemini.pot.sp.SPNodeKey
import edu.gemini.pot.sp.version._
import edu.gemini.shared.util._
import edu.gemini.spModel.pio._
import scala.collection.JavaConverters._
/**
* Utility for converting from a version vector map to a Pio Container and
* vice versa.
*/
object VersionVectorPio {
val kind = "versions"
def toContainer(f: PioFactory, vm: VersionMap): Container = {
val c = f.createContainer(kind, kind, "1.0")
vm foreach {
// Every map entry corresponds to a node and its versions. So make a
// paramset per map entry / node.
case (key, vv) =>
val node = f.createParamSet("node")
c.addParamSet(node)
// Add the key to identify the node.
Pio.addParam(f, node, "key", key.toString)
// Add each database's version of that node.
vv.clocks foreach {
case (id, version) => Pio.addIntParam(f, node, id.toString, version)
}
}
c
}
def toVersions(c: Container): VersionMap = {
// Get all the container's "node" ParamSet children as a List[Node]
val nodes = c.getParamSets("node").asScala.toList.asInstanceOf[List[ParamSet]] map { Node(_) }
// Get a map String -> LifespanId so we can avoid recreating equivalent LifespanIds
val ids = (Map.empty[String,LifespanId]/:nodes) { (m, node) =>
(m/:node.versions.unzip._1) { (idMap, idStr) =>
if (idMap.contains(idStr)) idMap else idMap + (idStr -> LifespanId.fromString(idStr))
}
}
// Fold and empty version map over the Node, converting each Node into a
// SPNodeKey -> DbVersions tuple to add it to the version map.
(EmptyVersionMap/:nodes) { (vm, node) => vm + node.toTuple(ids) }.toMap
}
private object Node {
def apply(pset: ParamSet): Node = {
// Separate the one "key" param from the zero or more db version params
// for this node.
val params = pset.getParams.asScala.toList.asInstanceOf[List[Param]]
val (keyParams, versionParams) = params.span(_.getName.equals("key"))
val nodeKey = new SPNodeKey(keyParams.head.getValue)
val nodeVersions = versionParams map { p =>
(p.getName, java.lang.Integer.valueOf(p.getValue))
}
Node(nodeKey, nodeVersions)
}
}
private case class Node(key: SPNodeKey, versions: List[(String, java.lang.Integer)]) {
def toTuple(ids: Map[String, LifespanId]): (SPNodeKey, NodeVersions) =
key -> VersionVector(versions map {
case (idStr, intVal) => ids(idStr) -> intVal
}: _*)
}
}
| arturog8m/ocs | bundle/edu.gemini.spModel.io/src/main/scala/edu/gemini/spModel/io/impl/VersionVectorPio.scala | Scala | bsd-3-clause | 2,575 |
package dotty.tools
package dotc
package typer
import core._
import ast._
import Trees._, Constants._, StdNames._, Scopes._, Denotations._
import Contexts._, Symbols._, Types._, SymDenotations._, Names._, NameOps._, Flags._, Decorators._
import ast.desugar, ast.desugar._
import ProtoTypes._
import util.Positions._
import util.{Attachment, SourcePosition, DotClass}
import collection.mutable
import annotation.tailrec
import ErrorReporting._
import tpd.ListOfTreeDecorator
import config.Printers._
import Annotations._
import Inferencing._
import transform.ValueClasses._
import TypeApplications._
import language.implicitConversions
trait NamerContextOps { this: Context =>
/** Enter symbol into current class, if current class is owner of current context,
* or into current scope, if not. Should always be called instead of scope.enter
* in order to make sure that updates to class members are reflected in
* finger prints.
*/
def enter(sym: Symbol): Symbol = {
ctx.owner match {
case cls: ClassSymbol => cls.enter(sym)
case _ => this.scope.openForMutations.enter(sym)
}
sym
}
/** The denotation with the given name in current context */
def denotNamed(name: Name): Denotation =
if (owner.isClass)
if (outer.owner == owner) { // inner class scope; check whether we are referring to self
if (scope.size == 1) {
val elem = scope.lastEntry
if (elem.name == name) return elem.sym.denot // return self
}
assert(scope.size <= 1, scope)
owner.thisType.member(name)
}
else // we are in the outermost context belonging to a class; self is invisible here. See inClassContext.
owner.findMember(name, owner.thisType, EmptyFlags)
else
scope.denotsNamed(name).toDenot(NoPrefix)
/** Either the current scope, or, if the current context owner is a class,
* the declarations of the current class.
*/
def effectiveScope: Scope =
if (owner != null && owner.isClass) owner.asClass.unforcedDecls
else scope
/** The symbol (stored in some typer's symTree) of an enclosing context definition */
def symOfContextTree(tree: untpd.Tree) = {
def go(ctx: Context): Symbol = {
ctx.typeAssigner match {
case typer: Typer =>
tree.getAttachment(typer.SymOfTree) match {
case Some(sym) => sym
case None =>
var cx = ctx.outer
while (cx.typeAssigner eq typer) cx = cx.outer
go(cx)
}
case _ => NoSymbol
}
}
go(this)
}
/** Context where `sym` is defined, assuming we are in a nested context. */
def defContext(sym: Symbol) =
outersIterator
.dropWhile(_.owner != sym)
.dropWhile(_.owner == sym)
.next
/** The given type, unless `sym` is a constructor, in which case the
* type of the constructed instance is returned
*/
def effectiveResultType(sym: Symbol, typeParams: List[Symbol], given: Type) =
if (sym.name == nme.CONSTRUCTOR) sym.owner.typeRef.appliedTo(typeParams map (_.typeRef))
else given
/** if isConstructor, make sure it has one non-implicit parameter list */
def normalizeIfConstructor(paramSymss: List[List[Symbol]], isConstructor: Boolean) =
if (isConstructor &&
(paramSymss.isEmpty || paramSymss.head.nonEmpty && (paramSymss.head.head is Implicit)))
Nil :: paramSymss
else
paramSymss
/** The method type corresponding to given parameters and result type */
def methodType(typeParams: List[Symbol], valueParamss: List[List[Symbol]], resultType: Type, isJava: Boolean = false)(implicit ctx: Context): Type = {
val monotpe =
(valueParamss :\\ resultType) { (params, resultType) =>
val make =
if (params.nonEmpty && (params.head is Implicit)) ImplicitMethodType
else if (isJava) JavaMethodType
else MethodType
if (isJava)
for (param <- params)
if (param.info.isDirectRef(defn.ObjectClass)) param.info = defn.AnyType
make.fromSymbols(params, resultType)
}
if (typeParams.nonEmpty) PolyType.fromSymbols(typeParams, monotpe)
else if (valueParamss.isEmpty) ExprType(monotpe)
else monotpe
}
/** Find moduleClass/sourceModule in effective scope */
private def findModuleBuddy(name: Name)(implicit ctx: Context) = {
val scope = effectiveScope
val it = scope.lookupAll(name).filter(_ is Module)
assert(it.hasNext, s"no companion $name in $scope")
it.next
}
/** Add moduleClass or sourceModule functionality to completer
* for a module or module class
*/
def adjustModuleCompleter(completer: LazyType, name: Name) =
if (name.isTermName)
completer withModuleClass (_ => findModuleBuddy(name.moduleClassName))
else
completer withSourceModule (_ => findModuleBuddy(name.sourceModuleName))
}
/** This class creates symbols from definitions and imports and gives them
* lazy types.
*
* Timeline:
*
* During enter, trees are expanded as necessary, populating the expandedTree map.
* Symbols are created, and the symOfTree map is set up.
*
* Symbol completion causes some trees to be already typechecked and typedTree
* entries are created to associate the typed trees with the untyped expanded originals.
*
* During typer, original trees are first expanded using expandedTree. For each
* expanded member definition or import we extract and remove the corresponding symbol
* from the symOfTree map and complete it. We then consult the typedTree map to see
* whether a typed tree exists already. If yes, the typed tree is returned as result.
* Otherwise, we proceed with regular type checking.
*
* The scheme is designed to allow sharing of nodes, as long as each duplicate appears
* in a different method.
*/
class Namer { typer: Typer =>
import untpd._
val TypedAhead = new Attachment.Key[tpd.Tree]
val ExpandedTree = new Attachment.Key[Tree]
val SymOfTree = new Attachment.Key[Symbol]
/** A partial map from unexpanded member and pattern defs and to their expansions.
* Populated during enterSyms, emptied during typer.
*/
//lazy val expandedTree = new mutable.AnyRefMap[DefTree, Tree]
/*{
override def default(tree: DefTree) = tree // can't have defaults on AnyRefMaps :-(
}*/
/** A map from expanded MemberDef, PatDef or Import trees to their symbols.
* Populated during enterSyms, emptied at the point a typed tree
* with the same symbol is created (this can be when the symbol is completed
* or at the latest when the tree is typechecked.
*/
//lazy val symOfTree = new mutable.AnyRefMap[Tree, Symbol]
/** A map from expanded trees to their typed versions.
* Populated when trees are typechecked during completion (using method typedAhead).
*/
// lazy val typedTree = new mutable.AnyRefMap[Tree, tpd.Tree]
/** A map from method symbols to nested typers.
* Populated when methods are completed. Emptied when they are typechecked.
* The nested typer contains new versions of the four maps above including this
* one, so that trees that are shared between different DefDefs can be independently
* used as indices. It also contains a scope that contains nested parameters.
*/
lazy val nestedTyper = new mutable.AnyRefMap[Symbol, Typer]
/** The scope of the typer.
* For nested typers this is a place parameters are entered during completion
* and where they survive until typechecking. A context with this typer also
* has this scope.
*/
val scope = newScope
/** The symbol of the given expanded tree. */
def symbolOfTree(tree: Tree)(implicit ctx: Context): Symbol = {
val xtree = expanded(tree)
xtree.getAttachment(TypedAhead) match {
case Some(ttree) => ttree.symbol
case none => xtree.attachment(SymOfTree)
}
}
/** The enclosing class with given name; error if none exists */
def enclosingClassNamed(name: TypeName, pos: Position)(implicit ctx: Context): Symbol = {
if (name.isEmpty) NoSymbol
else {
val cls = ctx.owner.enclosingClassNamed(name)
if (!cls.exists) ctx.error(s"no enclosing class or object is named $name", pos)
cls
}
}
/** Record `sym` as the symbol defined by `tree` */
def recordSym(sym: Symbol, tree: Tree)(implicit ctx: Context): Symbol = {
val refs = tree.attachmentOrElse(References, Nil)
if (refs.nonEmpty) {
tree.removeAttachment(References)
refs foreach (_.pushAttachment(OriginalSymbol, sym))
}
tree.pushAttachment(SymOfTree, sym)
sym
}
/** If this tree is a member def or an import, create a symbol of it
* and store in symOfTree map.
*/
def createSymbol(tree: Tree)(implicit ctx: Context): Symbol = {
def privateWithinClass(mods: Modifiers) =
enclosingClassNamed(mods.privateWithin, mods.pos)
def checkFlags(flags: FlagSet) =
if (flags.isEmpty) flags
else {
val (ok, adapted, kind) = tree match {
case tree: TypeDef => (flags.isTypeFlags, flags.toTypeFlags, "type")
case _ => (flags.isTermFlags, flags.toTermFlags, "value")
}
if (!ok)
ctx.error(i"modifier(s) `$flags' incompatible with $kind definition", tree.pos)
adapted
}
/** Add moduleClass/sourceModule to completer if it is for a module val or class */
def adjustIfModule(completer: LazyType, tree: MemberDef) =
if (tree.mods is Module) ctx.adjustModuleCompleter(completer, tree.name.encode)
else completer
typr.println(i"creating symbol for $tree in ${ctx.mode}")
def checkNoConflict(name: Name): Name = {
def errorName(msg: => String) = {
ctx.error(msg, tree.pos)
name.freshened
}
def preExisting = ctx.effectiveScope.lookup(name)
if (ctx.owner is PackageClass)
if (preExisting.isDefinedInCurrentRun)
errorName(s"${preExisting.showLocated} is compiled twice")
else name
else
if ((!ctx.owner.isClass || name.isTypeName) && preExisting.exists)
errorName(i"$name is already defined as $preExisting")
else name
}
val inSuperCall = if (ctx.mode is Mode.InSuperCall) InSuperCall else EmptyFlags
tree match {
case tree: TypeDef if tree.isClassDef =>
val name = checkNoConflict(tree.name.encode).asTypeName
val flags = checkFlags(tree.mods.flags &~ Implicit)
val cls = recordSym(ctx.newClassSymbol(
ctx.owner, name, flags | inSuperCall,
cls => adjustIfModule(new ClassCompleter(cls, tree)(ctx), tree),
privateWithinClass(tree.mods), tree.pos, ctx.source.file), tree)
cls.completer.asInstanceOf[ClassCompleter].init()
cls
case tree: MemberDef =>
val name = checkNoConflict(tree.name.encode)
val flags = checkFlags(tree.mods.flags)
val isDeferred = lacksDefinition(tree)
val deferred = if (isDeferred) Deferred else EmptyFlags
val method = if (tree.isInstanceOf[DefDef]) Method else EmptyFlags
val inSuperCall1 = if (tree.mods is ParamOrAccessor) EmptyFlags else inSuperCall
// suppress inSuperCall for constructor parameters
val higherKinded = tree match {
case tree: TypeDef if tree.tparams.nonEmpty && isDeferred => HigherKinded
case _ => EmptyFlags
}
// to complete a constructor, move one context further out -- this
// is the context enclosing the class. Note that the context in which a
// constructor is recorded and the context in which it is completed are
// different: The former must have the class as owner (because the
// constructor is owned by the class), the latter must not (because
// constructor parameters are interpreted as if they are outside the class).
// Don't do this for Java constructors because they need to see the import
// of the companion object, and it is not necessary for them because they
// have no implementation.
val cctx = if (tree.name == nme.CONSTRUCTOR && !(tree.mods is JavaDefined)) ctx.outer else ctx
recordSym(ctx.newSymbol(
ctx.owner, name, flags | deferred | method | higherKinded | inSuperCall1,
adjustIfModule(new Completer(tree)(cctx), tree),
privateWithinClass(tree.mods), tree.pos), tree)
case tree: Import =>
recordSym(ctx.newSymbol(
ctx.owner, nme.IMPORT, Synthetic, new Completer(tree), NoSymbol, tree.pos), tree)
case _ =>
NoSymbol
}
}
/** If `sym` exists, enter it in effective scope. Check that
* package members are not entered twice in the same run.
*/
def enterSymbol(sym: Symbol)(implicit ctx: Context) = {
if (sym.exists) {
typr.println(s"entered: $sym in ${ctx.owner} and ${ctx.effectiveScope}")
ctx.enter(sym)
}
sym
}
/** Create package if it does not yet exist. */
private def createPackageSymbol(pid: RefTree)(implicit ctx: Context): Symbol = {
val pkgOwner = pid match {
case Ident(_) => if (ctx.owner eq defn.EmptyPackageClass) defn.RootClass else ctx.owner
case Select(qual: RefTree, _) => createPackageSymbol(qual).moduleClass
}
val existing = pkgOwner.info.decls.lookup(pid.name)
if ((existing is Package) && (pkgOwner eq existing.owner)) existing
else ctx.newCompletePackageSymbol(pkgOwner, pid.name.asTermName).entered
}
/** Expand tree and store in `expandedTree` */
def expand(tree: Tree)(implicit ctx: Context): Unit = tree match {
case mdef: DefTree =>
val expanded = desugar.defTree(mdef)
typr.println(i"Expansion: $mdef expands to $expanded")
if (expanded ne mdef) mdef.pushAttachment(ExpandedTree, expanded)
case _ =>
}
/** The expanded version of this tree, or tree itself if not expanded */
def expanded(tree: Tree)(implicit ctx: Context): Tree = tree match {
case ddef: DefTree => ddef.attachmentOrElse(ExpandedTree, ddef)
case _ => tree
}
/** A new context that summarizes an import statement */
def importContext(sym: Symbol, selectors: List[Tree])(implicit ctx: Context) =
ctx.fresh.setImportInfo(new ImportInfo(sym, selectors))
/** A new context for the interior of a class */
def inClassContext(selfInfo: DotClass /* Should be Type | Symbol*/)(implicit ctx: Context): Context = {
val localCtx: Context = ctx.fresh.setNewScope
selfInfo match {
case sym: Symbol if sym.exists && sym.name != nme.WILDCARD =>
localCtx.scope.openForMutations.enter(sym)
case _ =>
}
localCtx
}
/** For all class definitions `stat` in `xstats`: If the companion class if not also defined
* in `xstats`, invalidate it by setting its info to NoType.
*/
def invalidateCompanions(pkg: Symbol, xstats: List[untpd.Tree])(implicit ctx: Context): Unit = {
val definedNames = xstats collect { case stat: NameTree => stat.name }
def invalidate(name: TypeName) =
if (!(definedNames contains name)) {
val member = pkg.info.decl(name).asSymDenotation
if (member.isClass && !(member is Package)) member.info = NoType
}
xstats foreach {
case stat: TypeDef if stat.isClassDef =>
invalidate(stat.name.moduleClassName)
case _ =>
}
}
/** Expand tree and create top-level symbols for statement and enter them into symbol table */
def index(stat: Tree)(implicit ctx: Context): Context = {
expand(stat)
indexExpanded(stat)
}
/** Create top-level symbols for all statements in the expansion of this statement and
* enter them into symbol table
*/
def indexExpanded(stat: Tree)(implicit ctx: Context): Context = expanded(stat) match {
case pcl: PackageDef =>
val pkg = createPackageSymbol(pcl.pid)
index(pcl.stats)(ctx.fresh.setOwner(pkg.moduleClass))
invalidateCompanions(pkg, Trees.flatten(pcl.stats map expanded))
ctx
case imp: Import =>
importContext(createSymbol(imp), imp.selectors)
case mdef: DefTree =>
enterSymbol(createSymbol(mdef))
ctx
case stats: Thicket =>
for (tree <- stats.toList) enterSymbol(createSymbol(tree))
ctx
case _ =>
ctx
}
/** Create top-level symbols for statements and enter them into symbol table */
def index(stats: List[Tree])(implicit ctx: Context): Context = {
val classDef = mutable.Map[TypeName, TypeDef]()
val moduleDef = mutable.Map[TypeName, TypeDef]()
/** Merge the definitions of a synthetic companion generated by a case class
* and the real companion, if both exist.
*/
def mergeCompanionDefs() = {
for (cdef @ TypeDef(name, _) <- stats)
if (cdef.isClassDef) {
classDef(name) = cdef
cdef.attachmentOrElse(ExpandedTree, cdef) match {
case Thicket(cls :: mval :: (mcls @ TypeDef(_, _: Template)) :: crest) =>
moduleDef(name) = mcls
case _ =>
}
}
for (mdef @ ModuleDef(name, _) <- stats if !mdef.mods.is(Flags.Package)) {
val typName = name.toTypeName
val Thicket(vdef :: (mcls @ TypeDef(_, impl: Template)) :: Nil) = mdef.attachment(ExpandedTree)
moduleDef(typName) = mcls
classDef get name.toTypeName match {
case Some(cdef) =>
cdef.attachmentOrElse(ExpandedTree, cdef) match {
case Thicket(cls :: mval :: TypeDef(_, compimpl: Template) :: crest) =>
val mcls1 = cpy.TypeDef(mcls)(
rhs = cpy.Template(impl)(body = compimpl.body ++ impl.body))
mdef.putAttachment(ExpandedTree, Thicket(vdef :: mcls1 :: Nil))
moduleDef(typName) = mcls1
cdef.putAttachment(ExpandedTree, Thicket(cls :: crest))
case _ =>
}
case none =>
}
}
}
def createLinks(classTree: TypeDef, moduleTree: TypeDef)(implicit ctx: Context) = {
val claz = ctx.denotNamed(classTree.name.encode).symbol
val modl = ctx.denotNamed(moduleTree.name.encode).symbol
ctx.synthesizeCompanionMethod(nme.COMPANION_CLASS_METHOD, claz, modl).entered
ctx.synthesizeCompanionMethod(nme.COMPANION_MODULE_METHOD, modl, claz).entered
}
def createCompanionLinks(implicit ctx: Context): Unit = {
for (cdef @ TypeDef(name, _) <- classDef.values) {
moduleDef.getOrElse(name, EmptyTree) match {
case t: TypeDef =>
createLinks(cdef, t)
case EmptyTree =>
}
}
}
stats foreach expand
mergeCompanionDefs()
val ctxWithStats = (ctx /: stats) ((ctx, stat) => indexExpanded(stat)(ctx))
createCompanionLinks(ctxWithStats)
ctxWithStats
}
/** The completer of a symbol defined by a member def or import (except ClassSymbols) */
class Completer(val original: Tree)(implicit ctx: Context) extends TypeParamsCompleter {
protected def localContext(owner: Symbol) = ctx.fresh.setOwner(owner).setTree(original)
private var myTypeParams: List[TypeSymbol] = null
private var nestedCtx: Context = null
def completerTypeParams(sym: Symbol): List[TypeSymbol] = {
if (myTypeParams == null) {
//println(i"completing type params of $sym in ${sym.owner}")
myTypeParams = original match {
case tdef: TypeDef =>
nestedCtx = localContext(sym).setNewScope
locally {
implicit val ctx: Context = nestedCtx
completeParams(tdef.tparams)
tdef.tparams.map(symbolOfTree(_).asType)
}
case _ =>
Nil
}
}
myTypeParams
}
private def typeSig(sym: Symbol): Type = original match {
case original: ValDef =>
if (sym is Module) moduleValSig(sym)
else valOrDefDefSig(original, sym, Nil, Nil, identity)(localContext(sym).setNewScope)
case original: DefDef =>
val typer1 = ctx.typer.newLikeThis
nestedTyper(sym) = typer1
typer1.defDefSig(original, sym)(localContext(sym).setTyper(typer1))
case original: TypeDef =>
assert(!original.isClassDef)
typeDefSig(original, sym, completerTypeParams(sym))(nestedCtx)
case imp: Import =>
try {
val expr1 = typedAheadExpr(imp.expr, AnySelectionProto)
ImportType(expr1)
} catch {
case ex: CyclicReference =>
typr.println(s"error while completing ${imp.expr}")
throw ex
}
}
final override def complete(denot: SymDenotation)(implicit ctx: Context) = {
if (completions != noPrinter && ctx.typerState != this.ctx.typerState) {
completions.println(completions.getClass.toString)
def levels(c: Context): Int =
if (c.typerState eq this.ctx.typerState) 0
else if (c.typerState == null) -1
else if (c.outer.typerState == c.typerState) levels(c.outer)
else levels(c.outer) + 1
completions.println(s"!!!completing ${denot.symbol.showLocated} in buried typerState, gap = ${levels(ctx)}")
}
completeInCreationContext(denot)
}
protected def addAnnotations(denot: SymDenotation): Unit = original match {
case original: untpd.MemberDef =>
for (annotTree <- untpd.modsDeco(original).mods.annotations) {
val cls = typedAheadAnnotation(annotTree)
val ann = Annotation.deferred(cls, implicit ctx => typedAnnotation(annotTree))
denot.addAnnotation(ann)
}
case _ =>
}
/** Intentionally left without `implicit ctx` parameter. We need
* to pick up the context at the point where the completer was created.
*/
def completeInCreationContext(denot: SymDenotation): Unit = {
denot.info = typeSig(denot.symbol)
addAnnotations(denot)
Checking.checkWellFormed(denot.symbol)
}
}
class ClassCompleter(cls: ClassSymbol, original: TypeDef)(ictx: Context) extends Completer(original)(ictx) {
withDecls(newScope)
protected implicit val ctx: Context = localContext(cls).setMode(ictx.mode &~ Mode.InSuperCall)
val TypeDef(name, impl @ Template(constr, parents, self, _)) = original
val (params, rest) = impl.body span {
case td: TypeDef => td.mods is Param
case vd: ValDef => vd.mods is ParamAccessor
case _ => false
}
def init() = index(params)
/** The type signature of a ClassDef with given symbol */
override def completeInCreationContext(denot: SymDenotation): Unit = {
/* The type of a parent constructor. Types constructor arguments
* only if parent type contains uninstantiated type parameters.
*/
def parentType(parent: untpd.Tree)(implicit ctx: Context): Type =
if (parent.isType) {
typedAheadType(parent).tpe
} else {
val (core, targs) = stripApply(parent) match {
case TypeApply(core, targs) => (core, targs)
case core => (core, Nil)
}
val Select(New(tpt), nme.CONSTRUCTOR) = core
val targs1 = targs map (typedAheadType(_))
val ptype = typedAheadType(tpt).tpe appliedTo targs1.tpes
if (ptype.typeParams.isEmpty) ptype
else typedAheadExpr(parent).tpe
}
/* Check parent type tree `parent` for the following well-formedness conditions:
* (1) It must be a class type with a stable prefix (@see checkClassTypeWithStablePrefix)
* (2) If may not derive from itself
* (3) Overriding type parameters must be correctly forwarded. (@see checkTypeParamOverride)
*/
def checkedParentType(parent: untpd.Tree, paramAccessors: List[Symbol]): Type = {
val ptype = parentType(parent)(ctx.superCallContext)
if (cls.isRefinementClass) ptype
else {
val pt = checkClassTypeWithStablePrefix(ptype, parent.pos, traitReq = parent ne parents.head)
if (pt.derivesFrom(cls)) {
val addendum = parent match {
case Select(qual: Super, _) if ctx.scala2Mode =>
"\\n(Note that inheriting a class of the same name is no longer allowed)"
case _ => ""
}
ctx.error(i"cyclic inheritance: $cls extends itself$addendum", parent.pos)
defn.ObjectType
}
else if (!paramAccessors.forall(checkTypeParamOverride(pt, _)))
defn.ObjectType
else pt
}
}
/* Check that every parameter with the same name as a visible named parameter in the parent
* class satisfies the following two conditions:
* (1) The overriding parameter is also named (i.e. not local/name mangled).
* (2) The overriding parameter is passed on directly to the parent parameter, or the
* parent parameter is not fully defined.
* @return true if conditions are satisfied, false otherwise.
*/
def checkTypeParamOverride(parent: Type, paramAccessor: Symbol): Boolean = {
var ok = true
val pname = paramAccessor.name
def illegal(how: String): Unit = {
ctx.error(d"Illegal override of public type parameter $pname in $parent$how", paramAccessor.pos)
ok = false
}
def checkAlias(tp: Type): Unit = tp match {
case tp: RefinedType =>
if (tp.refinedName == pname)
tp.refinedInfo match {
case TypeAlias(alias) =>
alias match {
case TypeRef(pre, name1) if name1 == pname && (pre =:= cls.thisType) =>
// OK, parameter is passed on directly
case _ =>
illegal(d".\\nParameter is both redeclared and instantiated with $alias.")
}
case _ => // OK, argument is not fully defined
}
else checkAlias(tp.parent)
case _ =>
}
if (parent.nonPrivateMember(paramAccessor.name).symbol.is(Param))
if (paramAccessor is Private)
illegal("\\nwith private parameter. Parameter definition needs to be prefixed with `type'.")
else
checkAlias(parent)
ok
}
val selfInfo =
if (self.isEmpty) NoType
else if (cls.is(Module)) {
val moduleType = cls.owner.thisType select sourceModule
if (self.name == nme.WILDCARD) moduleType
else recordSym(
ctx.newSymbol(cls, self.name, self.mods.flags, moduleType, coord = self.pos),
self)
}
else createSymbol(self)
// pre-set info, so that parent types can refer to type params
denot.info = ClassInfo(cls.owner.thisType, cls, Nil, decls, selfInfo)
// Ensure constructor is completed so that any parameter accessors
// which have type trees deriving from its parameters can be
// completed in turn. Note that parent types access such parameter
// accessors, that's why the constructor needs to be completed before
// the parent types are elaborated.
index(constr)
symbolOfTree(constr).ensureCompleted()
val tparamAccessors = decls.filter(_ is TypeParamAccessor).toList
val parentTypes = ensureFirstIsClass(parents.map(checkedParentType(_, tparamAccessors)))
val parentRefs = ctx.normalizeToClassRefs(parentTypes, cls, decls)
typr.println(s"completing $denot, parents = $parents, parentTypes = $parentTypes, parentRefs = $parentRefs")
index(rest)(inClassContext(selfInfo))
denot.info = ClassInfo(cls.owner.thisType, cls, parentRefs, decls, selfInfo)
addAnnotations(denot)
Checking.checkWellFormed(cls)
if (isDerivedValueClass(cls)) cls.setFlag(Final)
cls.setApplicableFlags(
(NoInitsInterface /: impl.body)((fs, stat) => fs & defKind(stat)))
}
}
/** Typecheck tree during completion, and remember result in typedtree map */
private def typedAheadImpl(tree: Tree, pt: Type)(implicit ctx: Context): tpd.Tree = {
val xtree = expanded(tree)
xtree.getAttachment(TypedAhead) match {
case Some(ttree) => ttree
case none =>
val ttree = typer.typed(tree, pt)
xtree.pushAttachment(TypedAhead, ttree)
ttree
}
}
def typedAheadType(tree: Tree, pt: Type = WildcardType)(implicit ctx: Context): tpd.Tree =
typedAheadImpl(tree, pt)(ctx retractMode Mode.PatternOrType addMode Mode.Type)
def typedAheadExpr(tree: Tree, pt: Type = WildcardType)(implicit ctx: Context): tpd.Tree =
typedAheadImpl(tree, pt)(ctx retractMode Mode.PatternOrType)
def typedAheadAnnotation(tree: Tree)(implicit ctx: Context): Symbol = tree match {
case Apply(fn, _) => typedAheadAnnotation(fn)
case TypeApply(fn, _) => typedAheadAnnotation(fn)
case Select(qual, nme.CONSTRUCTOR) => typedAheadAnnotation(qual)
case New(tpt) => typedAheadType(tpt).tpe.classSymbol
}
/** Enter and typecheck parameter list */
def completeParams(params: List[MemberDef])(implicit ctx: Context) = {
index(params)
for (param <- params) typedAheadExpr(param)
}
/** The signature of a module valdef.
* This will compute the corresponding module class TypeRef immediately
* without going through the defined type of the ValDef. This is necessary
* to avoid cyclic references involving imports and module val defs.
*/
def moduleValSig(sym: Symbol)(implicit ctx: Context): Type = {
val clsName = sym.name.moduleClassName
val cls = ctx.denotNamed(clsName) suchThat (_ is ModuleClass)
ctx.owner.thisType select (clsName, cls)
}
/** The type signature of a ValDef or DefDef
* @param mdef The definition
* @param sym Its symbol
* @param paramFn A wrapping function that produces the type of the
* defined symbol, given its final return type
*/
def valOrDefDefSig(mdef: ValOrDefDef, sym: Symbol, typeParams: List[Symbol], paramss: List[List[Symbol]], paramFn: Type => Type)(implicit ctx: Context): Type = {
def inferredType = {
/** A type for this definition that might be inherited from elsewhere:
* If this is a setter parameter, the corresponding getter type.
* If this is a class member, the conjunction of all result types
* of overridden methods.
* NoType if neither case holds.
*/
val inherited =
if (sym.owner.isTerm) NoType
else {
// TODO: Look only at member of supertype instead?
lazy val schema = paramFn(WildcardType)
val site = sym.owner.thisType
((NoType: Type) /: sym.owner.info.baseClasses.tail) { (tp, cls) =>
val iRawInfo =
cls.info.nonPrivateDecl(sym.name).matchingDenotation(site, schema).info
val iInstInfo = iRawInfo match {
case iRawInfo: PolyType =>
if (iRawInfo.paramNames.length == typeParams.length)
iRawInfo.instantiate(typeParams map (_.typeRef))
else NoType
case _ =>
if (typeParams.isEmpty) iRawInfo
else NoType
}
val iResType = iInstInfo.finalResultType.asSeenFrom(site, cls)
if (iResType.exists)
typr.println(i"using inherited type for ${mdef.name}; raw: $iRawInfo, inst: $iInstInfo, inherited: $iResType")
tp & iResType
}
}
/** The proto-type to be used when inferring the result type from
* the right hand side. This is `WildcardType` except if the definition
* is a default getter. In that case, the proto-type is the type of
* the corresponding parameter where bound parameters are replaced by
* Wildcards.
*/
def rhsProto = {
val name = sym.asTerm.name
val idx = name.defaultGetterIndex
if (idx < 0) WildcardType
else {
val original = name.defaultGetterToMethod
val meth: Denotation =
if (original.isConstructorName && (sym.owner is ModuleClass))
sym.owner.companionClass.info.decl(nme.CONSTRUCTOR)
else
ctx.defContext(sym).denotNamed(original)
def paramProto(paramss: List[List[Type]], idx: Int): Type = paramss match {
case params :: paramss1 =>
if (idx < params.length) wildApprox(params(idx))
else paramProto(paramss1, idx - params.length)
case nil =>
WildcardType
}
val defaultAlts = meth.altsWith(_.hasDefaultParams)
if (defaultAlts.length == 1)
paramProto(defaultAlts.head.info.widen.paramTypess, idx)
else
WildcardType
}
}
// println(s"final inherited for $sym: ${inherited.toString}") !!!
// println(s"owner = ${sym.owner}, decls = ${sym.owner.info.decls.show}")
def isInline = sym.is(Final, butNot = Method)
def widenRhs(tp: Type): Type = tp.widenTermRefExpr match {
case tp: ConstantType if isInline => tp
case _ => tp.widen.approximateUnion
}
val rhsCtx = ctx.addMode(Mode.InferringReturnType)
def rhsType = typedAheadExpr(mdef.rhs, inherited orElse rhsProto)(rhsCtx).tpe
def cookedRhsType = ctx.deskolemize(widenRhs(rhsType))
lazy val lhsType = fullyDefinedType(cookedRhsType, "right-hand side", mdef.pos)
//if (sym.name.toString == "y") println(i"rhs = $rhsType, cooked = $cookedRhsType")
if (inherited.exists)
if (sym.is(Final, butNot = Method) && lhsType.isInstanceOf[ConstantType])
lhsType // keep constant types that fill in for a non-constant (to be revised when inline has landed).
else inherited
else {
if (sym is Implicit) {
val resStr = if (mdef.isInstanceOf[DefDef]) "result " else ""
ctx.error(d"${resStr}type of implicit definition needs to be given explicitly", mdef.pos)
sym.resetFlag(Implicit)
}
lhsType orElse WildcardType
}
}
val tptProto = mdef.tpt match {
case _: untpd.DerivedTypeTree =>
WildcardType
case TypeTree(untpd.EmptyTree) =>
inferredType
case TypedSplice(tpt: TypeTree) if !isFullyDefined(tpt.tpe, ForceDegree.none) =>
val rhsType = typedAheadExpr(mdef.rhs, tpt.tpe).tpe
mdef match {
case mdef: DefDef if mdef.name == nme.ANON_FUN =>
val hygienicType = avoid(rhsType, paramss.flatten)
if (!(hygienicType <:< tpt.tpe))
ctx.error(i"return type ${tpt.tpe} of lambda cannot be made hygienic;\\n" +
i"it is not a supertype of the hygienic type $hygienicType", mdef.pos)
//println(i"lifting $rhsType over $paramss -> $hygienicType = ${tpt.tpe}")
//println(TypeComparer.explained { implicit ctx => hygienicType <:< tpt.tpe })
case _ =>
}
WildcardType
case _ =>
WildcardType
}
paramFn(typedAheadType(mdef.tpt, tptProto).tpe)
}
/** The type signature of a DefDef with given symbol */
def defDefSig(ddef: DefDef, sym: Symbol)(implicit ctx: Context) = {
val DefDef(name, tparams, vparamss, _, _) = ddef
val isConstructor = name == nme.CONSTRUCTOR
// The following 3 lines replace what was previously just completeParams(tparams).
// But that can cause bad bounds being computed, as witnessed by
// tests/pos/paramcycle.scala. The problematic sequence is this:
// 0. Class constructor gets completed.
// 1. Type parameter CP of constructor gets completed
// 2. As a first step CP's bounds are set to Nothing..Any.
// 3. CP's real type bound demands the completion of corresponding type parameter DP
// of enclosing class.
// 4. Type parameter DP has a rhs a DerivedFromParam tree, as installed by
// desugar.classDef
// 5. The completion of DP then copies the current bounds of CP, which are still Nothing..Any.
// 6. The completion of CP finishes installing the real type bounds.
// Consequence: CP ends up with the wrong bounds!
// To avoid this we always complete type parameters of a class before the type parameters
// of the class constructor, but after having indexed the constructor parameters (because
// indexing is needed to provide a symbol to copy for DP's completion.
// With the patch, we get instead the following sequence:
// 0. Class constructor gets completed.
// 1. Class constructor parameter CP is indexed.
// 2. Class parameter DP starts completion.
// 3. Info of CP is computed (to be copied to DP).
// 4. CP is completed.
// 5. Info of CP is copied to DP and DP is completed.
index(tparams)
if (isConstructor) sym.owner.typeParams.foreach(_.ensureCompleted())
for (tparam <- tparams) typedAheadExpr(tparam)
vparamss foreach completeParams
def typeParams = tparams map symbolOfTree
val paramSymss = ctx.normalizeIfConstructor(vparamss.nestedMap(symbolOfTree), isConstructor)
def wrapMethType(restpe: Type): Type = {
val restpe1 = // try to make anonymous functions non-dependent, so that they can be used in closures
if (name == nme.ANON_FUN) avoid(restpe, paramSymss.flatten)
else restpe
ctx.methodType(tparams map symbolOfTree, paramSymss, restpe1, isJava = ddef.mods is JavaDefined)
}
if (isConstructor) {
// set result type tree to unit, but take the current class as result type of the symbol
typedAheadType(ddef.tpt, defn.UnitType)
wrapMethType(ctx.effectiveResultType(sym, typeParams, NoType))
}
else valOrDefDefSig(ddef, sym, typeParams, paramSymss, wrapMethType)
}
def typeDefSig(tdef: TypeDef, sym: Symbol, tparamSyms: List[TypeSymbol])(implicit ctx: Context): Type = {
val isDerived = tdef.rhs.isInstanceOf[untpd.DerivedTypeTree]
//val toParameterize = tparamSyms.nonEmpty && !isDerived
//val needsLambda = sym.allOverriddenSymbols.exists(_ is HigherKinded) && !isDerived
def abstracted(tp: Type): Type =
if (tparamSyms.nonEmpty && !isDerived) tp.LambdaAbstract(tparamSyms)
//else if (toParameterize) tp.parameterizeWith(tparamSyms)
else tp
val dummyInfo = abstracted(TypeBounds.empty)
sym.info = dummyInfo
// Temporarily set info of defined type T to ` >: Nothing <: Any.
// This is done to avoid cyclic reference errors for F-bounds.
// This is subtle: `sym` has now an empty TypeBounds, but is not automatically
// made an abstract type. If it had been made an abstract type, it would count as an
// abstract type of its enclosing class, which might make that class an invalid
// prefix. I verified this would lead to an error when compiling io.ClassPath.
// A distilled version is in pos/prefix.scala.
//
// The scheme critically relies on an implementation detail of isRef, which
// inspects a TypeRef's info, instead of simply dealiasing alias types.
val rhsType = abstracted(typedAheadType(tdef.rhs).tpe)
val unsafeInfo = rhsType match {
case bounds: TypeBounds => bounds
case alias => TypeAlias(alias, if (sym is Local) sym.variance else 0)
}
if (isDerived) sym.info = unsafeInfo
else {
sym.info = NoCompleter
sym.info = checkNonCyclic(sym, unsafeInfo, reportErrors = true)
}
// Here we pay the price for the cavalier setting info to TypeBounds.empty above.
// We need to compensate by invalidating caches in references that might
// still contain the TypeBounds.empty. If we do not do this, stdlib factories
// fail with a bounds error in PostTyper.
def ensureUpToDate(tp: Type, outdated: Type) = tp match {
case tref: TypeRef if tref.info == outdated && sym.info != outdated =>
tref.uncheckedSetSym(null)
case _ =>
}
ensureUpToDate(sym.typeRef, dummyInfo)
ensureUpToDate(sym.typeRef.appliedTo(tparamSyms.map(_.typeRef)), TypeBounds.empty)
etaExpandArgs.apply(sym.info)
}
/** Eta expand all class types C appearing as arguments to a higher-kinded
* type parameter to type lambdas, e.g. [HK0] => C[HK0]. This is necessary
* because in `typedAppliedTypeTree` we might have missed some eta expansions
* of arguments in F-bounds, because the recursive type was initialized with
* TypeBounds.empty.
*/
def etaExpandArgs(implicit ctx: Context) = new TypeMap {
def apply(tp: Type): Type = tp match {
case tp: RefinedType =>
val args = tp.argInfos.mapconserve(this)
if (args.nonEmpty) {
val tycon = tp.withoutArgs(args)
val tycon1 = this(tycon)
val tparams = tycon.typeParams
val args1 = if (args.length == tparams.length) etaExpandIfHK(tparams, args) else args
if ((tycon1 eq tycon) && (args1 eq args)) tp else tycon1.appliedTo(args1)
} else mapOver(tp)
case _ => mapOver(tp)
}
}
}
| densh/dotty | src/dotty/tools/dotc/typer/Namer.scala | Scala | bsd-3-clause | 41,070 |
package io.github.ptitjes.scott.nl.lang.fr
import io.github.ptitjes.scott.nl.conll.CoNLLXParser
import io.github.ptitjes.scott.nl.corpora.Lexica
import io.github.ptitjes.scott.nl.lang.fr.testFTBCoarse._
import scala.io.Source
/**
* @author Didier Villevalois
*/
object testDistributions extends App {
val ftbPath = args(0)
val corpus = FTB.parseFullFine(ftbPath)
val word = "en"
for (
(s, index) <- corpus.sequences.zipWithIndex;
t <- s.tokens
) {
if (t.word.string == word) {
println("\\t\\t" + index + "\\t" + t.coarseTag)
}
}
}
| ptitjes/scott | scott-nl/src/main/scala/io/github/ptitjes/scott/nl/lang/fr/testDistributions.scala | Scala | gpl-3.0 | 554 |
package com.mogproject.mogami.core
import com.mogproject.mogami.core.Player.{BLACK, WHITE}
import com.mogproject.mogami.core.Ptype.{KNIGHT, LANCE, PAWN, SILVER}
import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.must.Matchers
import com.mogproject.mogami.core.SquareConstant._
import com.mogproject.mogami.core.Direction._
import com.mogproject.mogami.core.io.RecordFormatException
class SquareSpec extends AnyFlatSpec with Matchers with ScalaCheckDrivenPropertyChecks {
private val csaSquare = for (r <- '1' to '9'; f <- '1' to '9') yield s"$f$r"
private val sfenSquare = for (r <- 'a' to 'i'; f <- '1' to '9') yield s"$f$r"
private val kifSquare = for (r <- "δΈδΊδΈεδΊε
δΈε
«δΉ"; f <- "οΌοΌοΌοΌοΌοΌοΌοΌοΌ") yield s"$f$r"
"Square#unary_!" must "flip the position" in {
!P11 mustBe P99
!P12 mustBe P98
!P19 mustBe P91
!P55 mustBe P55
!P76 mustBe P34
!P99 mustBe P11
}
it must "cancel double negation" in forAll(SquareGen.squares) { sq =>
!(!sq) must be(sq)
}
"Square#parseCsaString" must "return Some in normal cases" in {
csaSquare map { c => Square.parseCsaString(c) } mustBe Square.all
}
it must "throw an exception in error cases" in {
assertThrows[RecordFormatException](Square.parseCsaString(""))
assertThrows[RecordFormatException](Square.parseCsaString(" "))
assertThrows[RecordFormatException](Square.parseCsaString("x" * 1000))
assertThrows[RecordFormatException](Square.parseCsaString("01"))
assertThrows[RecordFormatException](Square.parseCsaString("90"))
assertThrows[RecordFormatException](Square.parseCsaString("123"))
}
"Square#toCsaString" must "make CSA-formatted string" in {
Square.all map (_.toCsaString) mustBe csaSquare
}
it must "recover the original square" in forAll(SquareGen.squares) { s =>
Square.parseCsaString(s.toCsaString) mustBe s
}
"Square#parseSfenString" must "return Some in normal cases" in {
sfenSquare map { c => Square.parseSfenString(c) } mustBe Square.all
}
it must "return None in error cases" in {
assertThrows[RecordFormatException](Square.parseSfenString(""))
assertThrows[RecordFormatException](Square.parseSfenString(" "))
assertThrows[RecordFormatException](Square.parseSfenString("x" * 1000))
assertThrows[RecordFormatException](Square.parseSfenString("0a"))
assertThrows[RecordFormatException](Square.parseSfenString("i0"))
assertThrows[RecordFormatException](Square.parseSfenString("123"))
}
"Square#toSfenString" must "make SFEN-formatted string" in {
Square.all map (_.toSfenString) mustBe sfenSquare
}
it must "recover the original square" in forAll(SquareGen.squares) { s =>
Square.parseSfenString(s.toSfenString) mustBe s
}
"Square#toKifString" must "make KIF-formatted string" in {
Square.all map (_.toKifString) mustBe kifSquare
}
"Square#parseKifString" must "return Some in normal cases" in {
kifSquare map Square.parseKifString mustBe Square.all
}
it must "throw an exception in error cases" in {
assertThrows[RecordFormatException](Square.parseKifString(""))
assertThrows[RecordFormatException](Square.parseKifString(" "))
assertThrows[RecordFormatException](Square.parseKifString(" "))
}
it must "recover the original square" in forAll(SquareGen.squares) { s =>
Square.parseKifString(s.toKifString) mustBe s
}
"Square#isPromotionZone" must "return if a piece can promote" in {
Square(1, 2).isPromotionZone(BLACK) must be(true)
Square(2, 3).isPromotionZone(BLACK) must be(true)
Square(3, 4).isPromotionZone(BLACK) must be(false)
Square(4, 5).isPromotionZone(BLACK) must be(false)
Square(5, 6).isPromotionZone(BLACK) must be(false)
Square(6, 7).isPromotionZone(BLACK) must be(false)
Square(7, 8).isPromotionZone(BLACK) must be(false)
Square(8, 9).isPromotionZone(BLACK) must be(false)
Square(9, 1).isPromotionZone(BLACK) must be(true)
Square(1, 2).isPromotionZone(WHITE) must be(false)
Square(2, 3).isPromotionZone(WHITE) must be(false)
Square(3, 4).isPromotionZone(WHITE) must be(false)
Square(4, 5).isPromotionZone(WHITE) must be(false)
Square(5, 6).isPromotionZone(WHITE) must be(false)
Square(6, 7).isPromotionZone(WHITE) must be(true)
Square(7, 8).isPromotionZone(WHITE) must be(true)
Square(8, 9).isPromotionZone(WHITE) must be(true)
Square(9, 1).isPromotionZone(WHITE) must be(false)
}
"Square#isLeglZone" must "return if a piece can move there" in {
Square(1, 2).isLegalZone(Piece(BLACK, PAWN)) must be(true)
Square(2, 3).isLegalZone(Piece(BLACK, PAWN)) must be(true)
Square(3, 4).isLegalZone(Piece(BLACK, PAWN)) must be(true)
Square(4, 5).isLegalZone(Piece(BLACK, PAWN)) must be(true)
Square(5, 6).isLegalZone(Piece(BLACK, PAWN)) must be(true)
Square(6, 7).isLegalZone(Piece(BLACK, PAWN)) must be(true)
Square(7, 8).isLegalZone(Piece(BLACK, PAWN)) must be(true)
Square(8, 9).isLegalZone(Piece(BLACK, PAWN)) must be(true)
Square(9, 1).isLegalZone(Piece(BLACK, PAWN)) must be(false)
Square(8, 1).isLegalZone(Piece(BLACK, LANCE)) must be(false)
Square(7, 1).isLegalZone(Piece(BLACK, KNIGHT)) must be(false)
Square(6, 2).isLegalZone(Piece(BLACK, KNIGHT)) must be(false)
Square(5, 1).isLegalZone(Piece(BLACK, SILVER)) must be(true)
Square(4, 8).isLegalZone(Piece(WHITE, KNIGHT)) must be(false)
}
"Square#getBetweenBB" must "return inner bitboards" in {
P11.getBetweenBB(P99).toSet must be(Set(P22, P33, P44, P55, P66, P77, P88))
P99.getBetweenBB(P11).toSet must be(Set(P88, P77, P66, P55, P44, P33, P22))
P55.getBetweenBB(P77).toSet must be(Set(P66))
P55.getBetweenBB(P78).toSet must be(Set.empty)
P55.getBetweenBB(P95).toSet must be(Set(P65, P75, P85))
P99.getBetweenBB(P99).toSet must be(Set.empty)
P99.getBetweenBB(P88).toSet must be(Set.empty)
}
"Square#getRelation" must "return relationship between squares" in {
P55.getDisplacement(BLACK, P44) must be(Displacement(DiagonallyForward, 1))
P55.getDisplacement(BLACK, P54) must be(Displacement(Forward, 1))
P55.getDisplacement(BLACK, P64) must be(Displacement(DiagonallyForward, 1))
P55.getDisplacement(BLACK, P45) must be(Displacement(Side, 1))
P55.getDisplacement(BLACK, P55) must be(Displacement(NoRelation, 0))
P55.getDisplacement(BLACK, P65) must be(Displacement(Side, 1))
P55.getDisplacement(BLACK, P46) must be(Displacement(DiagonallyBackward, 1))
P55.getDisplacement(BLACK, P56) must be(Displacement(Backward, 1))
P55.getDisplacement(BLACK, P66) must be(Displacement(DiagonallyBackward, 1))
P55.getDisplacement(BLACK, P43) must be(Displacement(KnightMove, 1))
P55.getDisplacement(BLACK, P53) must be(Displacement(Forward, 2))
P55.getDisplacement(BLACK, P63) must be(Displacement(KnightMove, 1))
P55.getDisplacement(BLACK, P47) must be(Displacement(NoRelation, 0))
P55.getDisplacement(BLACK, P57) must be(Displacement(Backward, 2))
P55.getDisplacement(BLACK, P67) must be(Displacement(NoRelation, 0))
P11.getDisplacement(BLACK, P99) must be(Displacement(DiagonallyBackward, 8))
P99.getDisplacement(BLACK, P11) must be(Displacement(DiagonallyForward, 8))
P99.getDisplacement(BLACK, P12) must be(Displacement(NoRelation, 0))
P32.getDisplacement(BLACK, P92) must be(Displacement(Side, 6))
P32.getDisplacement(BLACK, P12) must be(Displacement(Side, 2))
P55.getDisplacement(WHITE, P44) must be(Displacement(DiagonallyBackward, 1))
P55.getDisplacement(WHITE, P54) must be(Displacement(Backward, 1))
P55.getDisplacement(WHITE, P64) must be(Displacement(DiagonallyBackward, 1))
P55.getDisplacement(WHITE, P45) must be(Displacement(Side, 1))
P55.getDisplacement(WHITE, P55) must be(Displacement(NoRelation, 0))
P55.getDisplacement(WHITE, P65) must be(Displacement(Side, 1))
P55.getDisplacement(WHITE, P46) must be(Displacement(DiagonallyForward, 1))
P55.getDisplacement(WHITE, P56) must be(Displacement(Forward, 1))
P55.getDisplacement(WHITE, P66) must be(Displacement(DiagonallyForward, 1))
P55.getDisplacement(WHITE, P43) must be(Displacement(NoRelation, 0))
P55.getDisplacement(WHITE, P53) must be(Displacement(Backward, 2))
P55.getDisplacement(WHITE, P63) must be(Displacement(NoRelation, 0))
P55.getDisplacement(WHITE, P47) must be(Displacement(KnightMove, 1))
P55.getDisplacement(WHITE, P57) must be(Displacement(Forward, 2))
P55.getDisplacement(WHITE, P67) must be(Displacement(KnightMove, 1))
P11.getDisplacement(WHITE, P99) must be(Displacement(DiagonallyForward, 8))
P99.getDisplacement(WHITE, P11) must be(Displacement(DiagonallyBackward, 8))
P99.getDisplacement(WHITE, P12) must be(Displacement(NoRelation, 0))
P32.getDisplacement(WHITE, P92) must be(Displacement(Side, 6))
P32.getDisplacement(WHITE, P12) must be(Displacement(Side, 2))
}
} | mogproject/mog-core-scala | shared/src/test/scala/com/mogproject/mogami/core/SquareSpec.scala | Scala | apache-2.0 | 9,004 |
package ghpages.pages
import ghpages.examples.util.ErrorHandler
import japgolly.scalajs.react._
import japgolly.scalajs.react.extra.router.RouterCtl
import japgolly.scalajs.react.vdom.html_<^._
object ExampleComponents {
case class Props(current: Example, router: RouterCtl[Example], examples: Vector[Example])
implicit val propsReuse: Reusability[Props] =
Reusability.derive[Props]
val menu = ScalaComponent.builder[Props]
.render_P { p =>
def menuItem(e: Example) = {
val active = e == p.current
p.router.link(e)(
^.classSet1("list-group-item", "active" -> active),
e.title)
}
<.div(^.cls := "col-md-2",
<.div(^.cls := "list-group")(
p.examples.map(menuItem): _*))
}
.configure(Reusability.shouldComponentUpdate)
.build
private val errorHandler =
ErrorHandler.pure(e =>
<.pre(
^.color := "#c00",
s"ERROR: ${e.message}\\n${e.stack}"))
val body = ScalaComponent.builder[Example]
.render_P(eg =>
<.div(
^.cls := "col-md-10",
errorHandler(eg.render())))
.build
val component = ScalaComponent.builder[Props]
.render_P(p =>
<.div(^.cls := "row",
menu(p),
body(p.current)))
.build
}
| japgolly/scalajs-react | ghpages/src/main/scala/ghpages/pages/ExampleComponents.scala | Scala | apache-2.0 | 1,275 |
package org.pfcoperez.scalawk
import org.pfcoperez.scalawk.transitions.ToCommandWithSeparator
object lines extends ToCommandWithSeparator | pfcoperez/scalawk | src/main/scala/org/pfcoperez/scalawk/lines.scala | Scala | mit | 139 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
/* Uncomment after remove type aliases in org.scalatest package object
import org.scalatest.exceptions.TestFailedException
*/
class FailureOfSpec extends FunSpec with FailureOf {
describe("The failureOf method") {
it("should return None if no exception is thrown") {
val result = failureOf { assert(1 + 1 === 2) }
assert(result === None)
}
it("should return the exception if TestFailedException is thrown") {
val result = failureOf { assert(1 + 1 === 3) }
assert(result.isDefined)
assert(result.get.isInstanceOf[TestFailedException])
}
it("if UnknownError is thrown, should complete abruptly with that exception") {
intercept[UnknownError] {
failureOf { throw new UnknownError }
}
}
}
}
| travisbrown/scalatest | src/test/scala/org/scalatest/FailureOfSpec.scala | Scala | apache-2.0 | 1,391 |
object Test extends App {
// single line comment in multi line comment
/*//*/ val x = 1 */*/
val x = 2
println(x)
// single line comment in nested multi line comment
/*/*//*/ val y = 1 */*/*/
val y = 2
println(y)
}
| som-snytt/dotty | tests/run/t7300.scala | Scala | apache-2.0 | 232 |
package scala.virtualization.lms
package epfl
package test2
import common._
import test1._
trait MergeSort { this: Relat =>
def splitOddEven[T](xs: List[T]): (List[T], List[T]) = (xs: @unchecked) match {
case o :: e :: xt =>
val (os, es) = splitOddEven(xt)
((o :: os), (e :: es))
case Nil => (xs, xs)
// cases?
}
def mergeOddEven[T](odd: List[T], even: List[T]): List[T] = ((odd, even): @unchecked) match {
case (Nil, Nil) =>
Nil
case ((o :: os), (e :: es)) =>
o :: (e :: mergeOddEven(os, es))
// cases?
}
def merge(xs: List[Rep[Double]]): List[Rep[Double]] = (xs: @unchecked) match {
case o :: e :: Nil =>
min(o, e) :: max(o, e) :: Nil
case _ =>
val (odd0, even0) = splitOddEven(xs)
val (odd1, even1) = (merge(odd0), merge(even0))
val (odd2, even2) = odd1 zip even1 map {
case (x, y) =>
(min(x,y), max(x,y))
} unzip;
mergeOddEven(odd2, even2)
}
def sort(xs: List[Rep[Double]]): List[Rep[Double]] = xs match {
case (x :: Nil) =>
xs
case _ =>
val N = xs.length // should assert it's power of two
val (left0, right0) = xs.splitAt(N/2)
val (left1, right1) = (sort(left0), sort(right0))
merge(left1 ::: right1)
}
}
class TestSort extends FileDiffSuite {
val prefix = home + "test-out/epfl/test2-"
def testSort = {
withOutFile(prefix+"sort1") {
val o = new MergeSort with RelatExpOpt with FlatResult
import o._
val r = sort(List.tabulate(8)(_ => fresh))
println(globalDefs.mkString("\n"))
println(r)
val p = new ExportGraph { val IR: o.type = o }
p.emitDepGraph(result(r), prefix+"sort1-dot", true)
}
assertFileEqualsCheck(prefix+"sort1")
assertFileEqualsCheck(prefix+"sort1-dot")
}
}
| afernandez90/virtualization-lms-core | test-src/epfl/test2-fft/TestSort.scala | Scala | bsd-3-clause | 1,841 |
package com.alzindiq.cluster
import com.alzindiq.Plumber
import com.alzindiq.similarity.AbstractSimilarityRanker
import com.rockymadden.stringmetric.similarity.JaccardMetric
import org.scalatest.{Matchers, FlatSpec}
import scala.collection.mutable
object ClustererTest {
val one = TextPlumber(1,"one is one")
val onePrime = TextPlumber(1,"this one is similar to other one")
val two = TextPlumber(2, "one sismilar to one")
val three = TextPlumber( 3, "one similar to one")
val four = TextPlumber(4, "one sismilar to one")
val five = TextPlumber( 5, "oneis not the one")
val six = TextPlumber(6, "two is not the one")
val seven = TextPlumber(7, "another two is not the one")
val eight = TextPlumber(8, "this two is not that one")
val nine = TextPlumber(9, "two is two")
val ten = TextPlumber(10, "three is here")
val eleven = TextPlumber(11, "three is here to stay")
val twelve = TextPlumber( 12, "another three is here too")
val thirteen = TextPlumber(13, "one two three nonsense")
def similarityFunction (threshold : Double) = (p : Plumber, p2 : Plumber) => {
val jaccard= JaccardMetric(2).compare(p.attributes.get("text").get.toString, p2.attributes.get("text").get.toString).get // use bigrams
if(jaccard >= threshold) jaccard
else 0d
}
}
class ClustererTest extends FlatSpec with Matchers {
"clusterer " should "create right clusters without any delta" in{
val bucket1 : Set[Plumber] = Set(ClustererTest.one, ClustererTest.two, ClustererTest.three, ClustererTest.four, ClustererTest.five)//, five, six, seven, eight, nine, ten, eleven, twelve, thirteen)
val bucket2 : Set[Plumber]= Set(ClustererTest.six, ClustererTest.seven, ClustererTest.eight, ClustererTest.nine)
val bucket3 : Set[Plumber] = Set(ClustererTest.ten, ClustererTest.eleven, ClustererTest.twelve, ClustererTest.thirteen)
var buckets = mutable.Map.empty[List[Any],Set[Plumber]]
buckets.put(List("one"), bucket1)
var test = Map(buckets.toList : _*)
val clusterer = new Clusterer(ClustererTest.similarityFunction(0.6))
val results = clusterer.initClusters4AllBuckets(test)
results.size shouldBe 1
val res = results.head._2
res.size shouldBe 3
res.head.value should be > 0.7
buckets.put(List("two"), bucket2)
buckets.put(List("three"),bucket3)
test = Map(buckets.toList : _*)
val threeResults = clusterer.initClusters4AllBuckets(test)
threeResults.get(List("two")).head.size shouldBe 3
threeResults.get(List("three")).head.size shouldBe 1
}
"Clusterer" should "create increments without starting from stcratch" in {
val bucket1 : Set[Plumber] = Set(ClustererTest.one, ClustererTest.three)
val added : Set[Plumber] = Set(ClustererTest.two, ClustererTest.four, ClustererTest.five)
val removed : Set[Plumber] = Set(ClustererTest.three)
val modified : Set[Plumber] = Set(ClustererTest.onePrime)
println("OnePrime to Two "+ClustererTest.similarityFunction(0.5).apply(ClustererTest.two,ClustererTest.onePrime))
var buckets = mutable.Map.empty[List[Any],Set[Plumber]]
buckets.put(List("one"), bucket1)
var test = Map(buckets.toList : _*)
val clusterer = new Clusterer(ClustererTest.similarityFunction(0.5))
val results = clusterer.initClusters4AllBuckets(test)
results.get(List("one")).head shouldBe List()
val updates : List[Set[Plumber]] = List(added, modified, removed)
val bucketName : List[Any] = List("one")
val updateMap : Map[List[Any],List[Set[Plumber]]] = Map(bucketName -> updates)
val incremented = clusterer.increment(updateMap)
incremented.get(List("one")).size shouldBe 1
incremented.get(List("one")).head.size shouldBe 1
incremented.get(List("one")).head.head.source shouldBe "2"
incremented.get(List("one")).head.head.dst shouldBe "4"
incremented.get(List("one")).head.head.value shouldBe 1d
}
}
object TextPlumber {
def ngrams(n: Int, data : Array[String]) : Set[List[String]] = (2 to n).map(i => data.sliding(i).toStream).foldLeft(Stream.empty[Array[String]])((acc, res) => acc #:::res ).map(_.toList).toSet
def apply(i : Int, text : String) = new TextPlumber(i, text)
}
class TextPlumber(i: Int, text : String) extends Plumber (Map(AbstractSimilarityRanker.plumbId -> i, "text" -> text, "ngramText" -> TextPlumber.ngrams(2,text.split(" "))))
| alzindiq/plumb | src/test/scala/com/alzindiq/cluster/ClustererTest.scala | Scala | apache-2.0 | 4,355 |
/*
* Licensed to Tuplejump Software Pvt. Ltd. under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Tuplejump Software Pvt. Ltd. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.tuplejump.calliope
import spark.{Logging, RDD}
import org.apache.hadoop.mapreduce.HadoopMapReduceUtil
import java.nio.ByteBuffer
import org.apache.cassandra.thrift.{Column, Mutation, ColumnOrSuperColumn}
import org.apache.cassandra.hadoop.ColumnFamilyOutputFormat
import scala.collection.JavaConversions._
import spark.SparkContext._
import org.apache.cassandra.hadoop.cql3.CqlOutputFormat
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
class CassandraRDDFunctions[U](self: RDD[U])
extends Logging with HadoopMapReduceUtil with Serializable {
private final val OUTPUT_KEYSPACE_CONFIG: String = "cassandra.output.keyspace"
private final val OUTPUT_CQL: String = "cassandra.output.cql"
/**
* Save the RDD to the given keyspace and column family to a cassandra cluster accessible at localhost:9160
*
* @param keyspace Keyspace to save the RDD
* @param columnFamily ColumnFamily to save the RDD
* @param keyMarshaller The Marshaller, that takes in an RDD entry:U and gives a row key
* @param rowMarshaller The Marshaller, that takes in an RDD entry:U and gives a map for columns
*
*/
def thriftSaveToCassandra(keyspace: String, columnFamily: String)
(implicit keyMarshaller: U => ByteBuffer, rowMarshaller: U => Map[ByteBuffer, ByteBuffer],
um: ClassManifest[U]) {
thriftSaveToCassandra(CasBuilder.thrift.withColumnFamily(keyspace, columnFamily))
}
/**
*
* Save the RDD to the given keyspace and column family to a cassandra cluster accessible at host:port
*
* @param host Host to connect to from the SparkContext. The actual tasks will use their assigned hosts
* @param port RPC port to use from the SparkContext.
* @param keyspace Keyspace to save the RDD
* @param columnFamily ColumnFamily to save the RDD
* @param keyMarshaller The Marshaller, that takes in an RDD entry:U and gives a row key
* @param rowMarshaller The Marshaller, that takes in an RDD entry:U and gives a map for columns
*
*/
def thriftSaveToCassandra(host: String, port: String, keyspace: String, columnFamily: String)
(implicit keyMarshaller: U => ByteBuffer, rowMarshaller: U => Map[ByteBuffer, ByteBuffer],
um: ClassManifest[U]) {
thriftSaveToCassandra(CasBuilder.thrift.withColumnFamily(keyspace, columnFamily).onHost(host).onPort(port))
}
/**
*
* Save the RDD using the given configuration
*
* @param cas The configuration to use to connect to the cluster
* @param keyMarshaller The Marshaller, that takes in an RDD entry:U and gives a row key
* @param rowMarshaller The Marshaller, that takes in an RDD entry:U and gives a map for columns
*
* @see ThriftCasBuilder
*
*/
def thriftSaveToCassandra(cas: ThriftCasBuilder)
(implicit keyMarshaller: U => ByteBuffer, rowMarshaller: U => Map[ByteBuffer, ByteBuffer],
um: ClassManifest[U]) {
val conf = cas.configuration
self.map[(ByteBuffer, java.util.List[Mutation])] {
case x: U =>
(x, mapToMutations(x))
}.saveAsNewAPIHadoopFile(
conf.get(OUTPUT_KEYSPACE_CONFIG),
classOf[ByteBuffer],
classOf[List[Mutation]],
classOf[ColumnFamilyOutputFormat],
conf)
def mapToMutations(m: Map[ByteBuffer, ByteBuffer]): List[Mutation] = {
m.map {
case (k, v) =>
val column = new Column()
column.setName(k)
column.setValue(v)
column.setTimestamp(System.currentTimeMillis)
val mutation = new Mutation()
mutation.setColumn_or_supercolumn(new ColumnOrSuperColumn().setColumn(column))
}.toList
}
}
/**
*
* Saves the RDD data to Cassandra using CQL3 update statement to the given keyspace and comlumn family to
* a cassandra cluster accessible at localhost:9160
*
* @param keyspace Keyspace to save the RDD
* @param columnFamily ColumnFamily to save the RDD
* @param updateCql The CQL Update query to use to update the entry in cassandra. This MUST be an update query of the form,
* UPDATE <keyspace>.<columnfamily> SET <field1> = ?, <field2> = ?
* WHERE <primarykey1> = ? and <primarykey2> = ?
* @param keyMarshaller A function that accepts a rdd entry:U and returns a Map of KeyName:String -> KeyValue:ByteBuffer
* @param rowMarshaller A function that accepts a rdd entry:U and returns a List of field values:ByteBuffer
* in the field order in query
*
*/
def cql3SaveToCassandra(keyspace: String, columnFamily: String, updateCql: String)
(implicit keyMarshaller: U => Map[String, ByteBuffer], rowMarshaller: U => List[ByteBuffer],
um: ClassManifest[U]) {
cql3SaveToCassandra(CasBuilder.cql3.withColumnFamily(keyspace, columnFamily).saveWithQuery(updateCql))
}
/**
* Saves the RDD data to Cassandra using CQL3 update statement to the given keyspace and comlumn family to
* a cassandra cluster accessible at host:port
*
* @param host Host to connect to from the SparkContext. The actual tasks will use their assigned hosts
* @param port RPC port to use from the SparkContext.
* @param keyspace Keyspace to save the RDD
* @param columnFamily ColumnFamily to save the RDD
* @param updateCql The CQL Update query to use to update the entry in cassandra. This MUST be an update query of the form,
* UPDATE <keyspace>.<columnfamily> SET <field1> = ?, <field2> = ?
* WHERE <primarykey1> = ? and <primarykey2> = ?
* @param keyMarshaller A function that accepts a rdd entry:U and returns a Map of KeyName:String -> KeyValue:ByteBuffer
* @param rowMarshaller A function that accepts a rdd entry:U and returns a List of field values:ByteBuffer
* in the field order in query
*
*/
def cql3SaveToCassandra(host: String, port: String, keyspace: String, columnFamily: String, updateCql: String)
(implicit keyMarshaller: U => Map[String, ByteBuffer], rowMarshaller: U => List[ByteBuffer],
um: ClassManifest[U]) {
cql3SaveToCassandra(CasBuilder.cql3.withColumnFamily(keyspace, columnFamily)
.onHost(host)
.onPort(port)
.saveWithQuery(updateCql))
}
/**
*
* @param cas The configuration to use for saving rdd to Cassandra. This should atleast configure the keyspace,
* columnfamily and the query to save the entry with.
* @param keyMarshaller A function that accepts a rdd entry:U and returns a Map of KeyName:String -> KeyValue:ByteBuffer
* @param rowMarshaller A function that accepts a rdd entry:U and returns a List of field values:ByteBuffer
* in the field order in query
*
* @see Cql3CasBuilder
*
*/
def cql3SaveToCassandra(cas: Cql3CasBuilder)
(implicit keyMarshaller: U => Map[String, ByteBuffer], rowMarshaller: U => List[ByteBuffer],
um: ClassManifest[U]) {
val conf = cas.configuration
require(conf.get(OUTPUT_CQL) != null && !conf.get(OUTPUT_CQL).isEmpty,
"Query to save the records to cassandra must be set using saveWithQuery on cas")
self.map[(java.util.Map[String, ByteBuffer], java.util.List[ByteBuffer])] {
case row: U =>
(mapAsJavaMap(keyMarshaller(row)), seqAsJavaList(rowMarshaller(row)))
}.saveAsNewAPIHadoopFile(
conf.get(OUTPUT_KEYSPACE_CONFIG),
classOf[java.util.Map[String, ByteBuffer]],
classOf[java.util.List[ByteBuffer]],
classOf[CqlOutputFormat],
conf
)
}
}
| milliondreams/calliope | src/main/scala/com/tuplejump/calliope/CassandraRDDFunctions.scala | Scala | apache-2.0 | 8,615 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.util.Properties
import java.util.concurrent.{CountDownLatch, TimeUnit}
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicLong, AtomicReference}
import scala.annotation.meta.param
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, Map}
import scala.util.control.NonFatal
import org.scalatest.concurrent.{Signaler, ThreadSignaler, TimeLimits}
import org.scalatest.exceptions.TestFailedException
import org.scalatest.time.SpanSugar._
import org.apache.spark._
import org.apache.spark.broadcast.BroadcastManager
import org.apache.spark.executor.ExecutorMetrics
import org.apache.spark.internal.config
import org.apache.spark.rdd.{DeterministicLevel, RDD}
import org.apache.spark.scheduler.SchedulingMode.SchedulingMode
import org.apache.spark.shuffle.{FetchFailedException, MetadataFetchFailedException}
import org.apache.spark.storage.{BlockId, BlockManagerId, BlockManagerMaster}
import org.apache.spark.util.{AccumulatorContext, AccumulatorV2, CallSite, LongAccumulator, ThreadUtils, Utils}
class DAGSchedulerEventProcessLoopTester(dagScheduler: DAGScheduler)
extends DAGSchedulerEventProcessLoop(dagScheduler) {
override def post(event: DAGSchedulerEvent): Unit = {
try {
// Forward event to `onReceive` directly to avoid processing event asynchronously.
onReceive(event)
} catch {
case NonFatal(e) => onError(e)
}
}
override def onError(e: Throwable): Unit = {
logError("Error in DAGSchedulerEventLoop: ", e)
dagScheduler.stop()
throw e
}
}
class MyCheckpointRDD(
sc: SparkContext,
numPartitions: Int,
dependencies: List[Dependency[_]],
locations: Seq[Seq[String]] = Nil,
@(transient @param) tracker: MapOutputTrackerMaster = null,
indeterminate: Boolean = false)
extends MyRDD(sc, numPartitions, dependencies, locations, tracker, indeterminate) {
// Allow doCheckpoint() on this RDD.
override def compute(split: Partition, context: TaskContext): Iterator[(Int, Int)] =
Iterator.empty
}
/**
* An RDD for passing to DAGScheduler. These RDDs will use the dependencies and
* preferredLocations (if any) that are passed to them. They are deliberately not executable
* so we can test that DAGScheduler does not try to execute RDDs locally.
*
* Optionally, one can pass in a list of locations to use as preferred locations for each task,
* and a MapOutputTrackerMaster to enable reduce task locality. We pass the tracker separately
* because, in this test suite, it won't be the same as sc.env.mapOutputTracker.
*/
class MyRDD(
sc: SparkContext,
numPartitions: Int,
dependencies: List[Dependency[_]],
locations: Seq[Seq[String]] = Nil,
@(transient @param) tracker: MapOutputTrackerMaster = null,
indeterminate: Boolean = false)
extends RDD[(Int, Int)](sc, dependencies) with Serializable {
override def compute(split: Partition, context: TaskContext): Iterator[(Int, Int)] =
throw new RuntimeException("should not be reached")
override def getPartitions: Array[Partition] = (0 until numPartitions).map(i => new Partition {
override def index: Int = i
}).toArray
override protected def getOutputDeterministicLevel = {
if (indeterminate) DeterministicLevel.INDETERMINATE else super.getOutputDeterministicLevel
}
override def getPreferredLocations(partition: Partition): Seq[String] = {
if (locations.isDefinedAt(partition.index)) {
locations(partition.index)
} else if (tracker != null && dependencies.size == 1 &&
dependencies(0).isInstanceOf[ShuffleDependency[_, _, _]]) {
// If we have only one shuffle dependency, use the same code path as ShuffledRDD for locality
val dep = dependencies(0).asInstanceOf[ShuffleDependency[_, _, _]]
tracker.getPreferredLocationsForShuffle(dep, partition.index)
} else {
Nil
}
}
override def toString: String = "DAGSchedulerSuiteRDD " + id
}
class DAGSchedulerSuiteDummyException extends Exception
class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with TimeLimits {
import DAGSchedulerSuite._
// Necessary to make ScalaTest 3.x interrupt a thread on the JVM like ScalaTest 2.2.x
implicit val defaultSignaler: Signaler = ThreadSignaler
val conf = new SparkConf
/** Set of TaskSets the DAGScheduler has requested executed. */
val taskSets = scala.collection.mutable.Buffer[TaskSet]()
/** Stages for which the DAGScheduler has called TaskScheduler.cancelTasks(). */
val cancelledStages = new HashSet[Int]()
val tasksMarkedAsCompleted = new ArrayBuffer[Task[_]]()
val taskScheduler = new TaskScheduler() {
override def schedulingMode: SchedulingMode = SchedulingMode.FIFO
override def rootPool: Pool = new Pool("", schedulingMode, 0, 0)
override def start() = {}
override def stop() = {}
override def executorHeartbeatReceived(
execId: String,
accumUpdates: Array[(Long, Seq[AccumulatorV2[_, _]])],
blockManagerId: BlockManagerId,
executorUpdates: Map[(Int, Int), ExecutorMetrics]): Boolean = true
override def submitTasks(taskSet: TaskSet) = {
// normally done by TaskSetManager
taskSet.tasks.foreach(_.epoch = mapOutputTracker.getEpoch)
taskSets += taskSet
}
override def cancelTasks(stageId: Int, interruptThread: Boolean): Unit = {
cancelledStages += stageId
}
override def killTaskAttempt(
taskId: Long, interruptThread: Boolean, reason: String): Boolean = false
override def killAllTaskAttempts(
stageId: Int, interruptThread: Boolean, reason: String): Unit = {}
override def notifyPartitionCompletion(stageId: Int, partitionId: Int): Unit = {
taskSets.filter(_.stageId == stageId).lastOption.foreach { ts =>
val tasks = ts.tasks.filter(_.partitionId == partitionId)
assert(tasks.length == 1)
tasksMarkedAsCompleted += tasks.head
}
}
override def setDAGScheduler(dagScheduler: DAGScheduler) = {}
override def defaultParallelism() = 2
override def executorDecommission(executorId: String) = {}
override def executorLost(executorId: String, reason: ExecutorLossReason): Unit = {}
override def workerRemoved(workerId: String, host: String, message: String): Unit = {}
override def applicationAttemptId(): Option[String] = None
}
/**
* Listeners which records some information to verify in UTs. Getter-kind methods in this class
* ensures the value is returned after ensuring there's no event to process, as well as the
* value is immutable: prevent showing odd result by race condition.
*/
class EventInfoRecordingListener extends SparkListener {
private val _submittedStageInfos = new HashSet[StageInfo]
private val _successfulStages = new HashSet[Int]
private val _failedStages = new ArrayBuffer[Int]
private val _stageByOrderOfExecution = new ArrayBuffer[Int]
private val _endedTasks = new HashSet[Long]
override def onStageSubmitted(stageSubmitted: SparkListenerStageSubmitted): Unit = {
_submittedStageInfos += stageSubmitted.stageInfo
}
override def onStageCompleted(stageCompleted: SparkListenerStageCompleted): Unit = {
val stageInfo = stageCompleted.stageInfo
_stageByOrderOfExecution += stageInfo.stageId
if (stageInfo.failureReason.isEmpty) {
_successfulStages += stageInfo.stageId
} else {
_failedStages += stageInfo.stageId
}
}
override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = {
_endedTasks += taskEnd.taskInfo.taskId
}
def submittedStageInfos: Set[StageInfo] = {
waitForListeners()
_submittedStageInfos.toSet
}
def successfulStages: Set[Int] = {
waitForListeners()
_successfulStages.toSet
}
def failedStages: List[Int] = {
waitForListeners()
_failedStages.toList
}
def stageByOrderOfExecution: List[Int] = {
waitForListeners()
_stageByOrderOfExecution.toList
}
def endedTasks: Set[Long] = {
waitForListeners()
_endedTasks.toSet
}
private def waitForListeners(): Unit = sc.listenerBus.waitUntilEmpty()
}
var sparkListener: EventInfoRecordingListener = null
var mapOutputTracker: MapOutputTrackerMaster = null
var broadcastManager: BroadcastManager = null
var securityMgr: SecurityManager = null
var scheduler: DAGScheduler = null
var dagEventProcessLoopTester: DAGSchedulerEventProcessLoop = null
/**
* Set of cache locations to return from our mock BlockManagerMaster.
* Keys are (rdd ID, partition ID). Anything not present will return an empty
* list of cache locations silently.
*/
val cacheLocations = new HashMap[(Int, Int), Seq[BlockManagerId]]
// stub out BlockManagerMaster.getLocations to use our cacheLocations
val blockManagerMaster = new BlockManagerMaster(null, null, conf, true) {
override def getLocations(blockIds: Array[BlockId]): IndexedSeq[Seq[BlockManagerId]] = {
blockIds.map {
_.asRDDId.map(id => (id.rddId -> id.splitIndex)).flatMap(key => cacheLocations.get(key)).
getOrElse(Seq())
}.toIndexedSeq
}
override def removeExecutor(execId: String): Unit = {
// don't need to propagate to the driver, which we don't have
}
}
/** The list of results that DAGScheduler has collected. */
val results = new HashMap[Int, Any]()
var failure: Exception = _
val jobListener = new JobListener() {
override def taskSucceeded(index: Int, result: Any) = results.put(index, result)
override def jobFailed(exception: Exception) = { failure = exception }
}
/** A simple helper class for creating custom JobListeners */
class SimpleListener extends JobListener {
val results = new HashMap[Int, Any]
var failure: Exception = null
override def taskSucceeded(index: Int, result: Any): Unit = results.put(index, result)
override def jobFailed(exception: Exception): Unit = { failure = exception }
}
override def beforeEach(): Unit = {
super.beforeEach()
init(new SparkConf())
}
private def init(testConf: SparkConf): Unit = {
sc = new SparkContext("local[2]", "DAGSchedulerSuite", testConf)
sparkListener = new EventInfoRecordingListener
failure = null
sc.addSparkListener(sparkListener)
taskSets.clear()
tasksMarkedAsCompleted.clear()
cancelledStages.clear()
cacheLocations.clear()
results.clear()
securityMgr = new SecurityManager(conf)
broadcastManager = new BroadcastManager(true, conf, securityMgr)
mapOutputTracker = new MapOutputTrackerMaster(conf, broadcastManager, true) {
override def sendTracker(message: Any): Unit = {
// no-op, just so we can stop this to avoid leaking threads
}
}
scheduler = new DAGScheduler(
sc,
taskScheduler,
sc.listenerBus,
mapOutputTracker,
blockManagerMaster,
sc.env)
dagEventProcessLoopTester = new DAGSchedulerEventProcessLoopTester(scheduler)
}
override def afterEach(): Unit = {
try {
scheduler.stop()
dagEventProcessLoopTester.stop()
mapOutputTracker.stop()
broadcastManager.stop()
} finally {
super.afterEach()
}
}
override def afterAll(): Unit = {
super.afterAll()
}
/**
* Type of RDD we use for testing. Note that we should never call the real RDD compute methods.
* This is a pair RDD type so it can always be used in ShuffleDependencies.
*/
type PairOfIntsRDD = RDD[(Int, Int)]
/**
* Process the supplied event as if it were the top of the DAGScheduler event queue, expecting
* the scheduler not to exit.
*
* After processing the event, submit waiting stages as is done on most iterations of the
* DAGScheduler event loop.
*/
private def runEvent(event: DAGSchedulerEvent): Unit = {
dagEventProcessLoopTester.post(event)
}
/**
* When we submit dummy Jobs, this is the compute function we supply. Except in a local test
* below, we do not expect this function to ever be executed; instead, we will return results
* directly through CompletionEvents.
*/
private val jobComputeFunc = (context: TaskContext, it: Iterator[(_)]) =>
it.next.asInstanceOf[Tuple2[_, _]]._1
/** Send the given CompletionEvent messages for the tasks in the TaskSet. */
private def complete(taskSet: TaskSet, results: Seq[(TaskEndReason, Any)]): Unit = {
assert(taskSet.tasks.size >= results.size)
for ((result, i) <- results.zipWithIndex) {
if (i < taskSet.tasks.size) {
runEvent(makeCompletionEvent(taskSet.tasks(i), result._1, result._2))
}
}
}
private def completeWithAccumulator(
accumId: Long,
taskSet: TaskSet,
results: Seq[(TaskEndReason, Any)]): Unit = {
assert(taskSet.tasks.size >= results.size)
for ((result, i) <- results.zipWithIndex) {
if (i < taskSet.tasks.size) {
runEvent(makeCompletionEvent(
taskSet.tasks(i),
result._1,
result._2,
Seq(AccumulatorSuite.createLongAccum("", initValue = 1, id = accumId))))
}
}
}
/** Submits a job to the scheduler and returns the job id. */
private def submit(
rdd: RDD[_],
partitions: Array[Int],
func: (TaskContext, Iterator[_]) => _ = jobComputeFunc,
listener: JobListener = jobListener,
properties: Properties = null): Int = {
val jobId = scheduler.nextJobId.getAndIncrement()
runEvent(JobSubmitted(jobId, rdd, func, partitions, CallSite("", ""), listener, properties))
jobId
}
/** Submits a map stage to the scheduler and returns the job id. */
private def submitMapStage(
shuffleDep: ShuffleDependency[_, _, _],
listener: JobListener = jobListener): Int = {
val jobId = scheduler.nextJobId.getAndIncrement()
runEvent(MapStageSubmitted(jobId, shuffleDep, CallSite("", ""), listener))
jobId
}
/** Sends TaskSetFailed to the scheduler. */
private def failed(taskSet: TaskSet, message: String): Unit = {
runEvent(TaskSetFailed(taskSet, message, None))
}
/** Sends JobCancelled to the DAG scheduler. */
private def cancel(jobId: Int): Unit = {
runEvent(JobCancelled(jobId, None))
}
test("[SPARK-3353] parent stage should have lower stage id") {
sc.parallelize(1 to 10).map(x => (x, x)).reduceByKey(_ + _, 4).count()
val stageByOrderOfExecution = sparkListener.stageByOrderOfExecution
assert(stageByOrderOfExecution.length === 2)
assert(stageByOrderOfExecution(0) < stageByOrderOfExecution(1))
}
/**
* This test ensures that DAGScheduler build stage graph correctly.
*
* Suppose you have the following DAG:
*
* [A] <--(s_A)-- [B] <--(s_B)-- [C] <--(s_C)-- [D]
* \\ /
* <-------------
*
* Here, RDD B has a shuffle dependency on RDD A, and RDD C has shuffle dependency on both
* B and A. The shuffle dependency IDs are numbers in the DAGScheduler, but to make the example
* easier to understand, let's call the shuffled data from A shuffle dependency ID s_A and the
* shuffled data from B shuffle dependency ID s_B.
*
* Note: [] means an RDD, () means a shuffle dependency.
*/
test("[SPARK-13902] Ensure no duplicate stages are created") {
val rddA = new MyRDD(sc, 1, Nil)
val shuffleDepA = new ShuffleDependency(rddA, new HashPartitioner(1))
val s_A = shuffleDepA.shuffleId
val rddB = new MyRDD(sc, 1, List(shuffleDepA), tracker = mapOutputTracker)
val shuffleDepB = new ShuffleDependency(rddB, new HashPartitioner(1))
val s_B = shuffleDepB.shuffleId
val rddC = new MyRDD(sc, 1, List(shuffleDepA, shuffleDepB), tracker = mapOutputTracker)
val shuffleDepC = new ShuffleDependency(rddC, new HashPartitioner(1))
val s_C = shuffleDepC.shuffleId
val rddD = new MyRDD(sc, 1, List(shuffleDepC), tracker = mapOutputTracker)
submit(rddD, Array(0))
assert(scheduler.shuffleIdToMapStage.size === 3)
assert(scheduler.activeJobs.size === 1)
val mapStageA = scheduler.shuffleIdToMapStage(s_A)
val mapStageB = scheduler.shuffleIdToMapStage(s_B)
val mapStageC = scheduler.shuffleIdToMapStage(s_C)
val finalStage = scheduler.activeJobs.head.finalStage
assert(mapStageA.parents.isEmpty)
assert(mapStageB.parents === List(mapStageA))
assert(mapStageC.parents === List(mapStageA, mapStageB))
assert(finalStage.parents === List(mapStageC))
complete(taskSets(0), Seq((Success, makeMapStatus("hostA", 1))))
complete(taskSets(1), Seq((Success, makeMapStatus("hostA", 1))))
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", 1))))
complete(taskSets(3), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("All shuffle files on the slave should be cleaned up when slave lost") {
// reset the test context with the right shuffle service config
afterEach()
val conf = new SparkConf()
conf.set(config.SHUFFLE_SERVICE_ENABLED.key, "true")
conf.set("spark.files.fetchFailure.unRegisterOutputOnHost", "true")
init(conf)
runEvent(ExecutorAdded("exec-hostA1", "hostA"))
runEvent(ExecutorAdded("exec-hostA2", "hostA"))
runEvent(ExecutorAdded("exec-hostB", "hostB"))
val firstRDD = new MyRDD(sc, 3, Nil)
val firstShuffleDep = new ShuffleDependency(firstRDD, new HashPartitioner(3))
val firstShuffleId = firstShuffleDep.shuffleId
val shuffleMapRdd = new MyRDD(sc, 3, List(firstShuffleDep))
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(3))
val secondShuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep))
submit(reduceRdd, Array(0))
// map stage1 completes successfully, with one task on each executor
complete(taskSets(0), Seq(
(Success,
MapStatus(
BlockManagerId("exec-hostA1", "hostA", 12345), Array.fill[Long](1)(2), mapTaskId = 5)),
(Success,
MapStatus(
BlockManagerId("exec-hostA2", "hostA", 12345), Array.fill[Long](1)(2), mapTaskId = 6)),
(Success, makeMapStatus("hostB", 1, mapTaskId = 7))
))
// map stage2 completes successfully, with one task on each executor
complete(taskSets(1), Seq(
(Success,
MapStatus(
BlockManagerId("exec-hostA1", "hostA", 12345), Array.fill[Long](1)(2), mapTaskId = 8)),
(Success,
MapStatus(
BlockManagerId("exec-hostA2", "hostA", 12345), Array.fill[Long](1)(2), mapTaskId = 9)),
(Success, makeMapStatus("hostB", 1, mapTaskId = 10))
))
// make sure our test setup is correct
val initialMapStatus1 = mapOutputTracker.shuffleStatuses(firstShuffleId).mapStatuses
// val initialMapStatus1 = mapOutputTracker.mapStatuses.get(0).get
assert(initialMapStatus1.count(_ != null) === 3)
assert(initialMapStatus1.map{_.location.executorId}.toSet ===
Set("exec-hostA1", "exec-hostA2", "exec-hostB"))
assert(initialMapStatus1.map{_.mapId}.toSet === Set(5, 6, 7))
val initialMapStatus2 = mapOutputTracker.shuffleStatuses(secondShuffleId).mapStatuses
// val initialMapStatus1 = mapOutputTracker.mapStatuses.get(0).get
assert(initialMapStatus2.count(_ != null) === 3)
assert(initialMapStatus2.map{_.location.executorId}.toSet ===
Set("exec-hostA1", "exec-hostA2", "exec-hostB"))
assert(initialMapStatus2.map{_.mapId}.toSet === Set(8, 9, 10))
// reduce stage fails with a fetch failure from one host
complete(taskSets(2), Seq(
(FetchFailed(BlockManagerId("exec-hostA2", "hostA", 12345),
firstShuffleId, 0L, 0, 0, "ignored"),
null)
))
// Here is the main assertion -- make sure that we de-register
// the map outputs for both map stage from both executors on hostA
val mapStatus1 = mapOutputTracker.shuffleStatuses(firstShuffleId).mapStatuses
assert(mapStatus1.count(_ != null) === 1)
assert(mapStatus1(2).location.executorId === "exec-hostB")
assert(mapStatus1(2).location.host === "hostB")
val mapStatus2 = mapOutputTracker.shuffleStatuses(secondShuffleId).mapStatuses
assert(mapStatus2.count(_ != null) === 1)
assert(mapStatus2(2).location.executorId === "exec-hostB")
assert(mapStatus2(2).location.host === "hostB")
}
test("zero split job") {
var numResults = 0
var failureReason: Option[Exception] = None
val fakeListener = new JobListener() {
override def taskSucceeded(partition: Int, value: Any): Unit = numResults += 1
override def jobFailed(exception: Exception): Unit = {
failureReason = Some(exception)
}
}
val jobId = submit(new MyRDD(sc, 0, Nil), Array(), listener = fakeListener)
assert(numResults === 0)
cancel(jobId)
assert(failureReason.isDefined)
assert(failureReason.get.getMessage() === "Job 0 cancelled ")
}
test("run trivial job") {
submit(new MyRDD(sc, 1, Nil), Array(0))
complete(taskSets(0), List((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("run trivial job w/ dependency") {
val baseRdd = new MyRDD(sc, 1, Nil)
val finalRdd = new MyRDD(sc, 1, List(new OneToOneDependency(baseRdd)))
submit(finalRdd, Array(0))
complete(taskSets(0), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("equals and hashCode AccumulableInfo") {
val accInfo1 = new AccumulableInfo(
1, Some("a1"), Some("delta1"), Some("val1"), internal = true, countFailedValues = false)
val accInfo2 = new AccumulableInfo(
1, Some("a1"), Some("delta1"), Some("val1"), internal = false, countFailedValues = false)
val accInfo3 = new AccumulableInfo(
1, Some("a1"), Some("delta1"), Some("val1"), internal = false, countFailedValues = false)
assert(accInfo1 !== accInfo2)
assert(accInfo2 === accInfo3)
assert(accInfo2.hashCode() === accInfo3.hashCode())
}
test("cache location preferences w/ dependency") {
val baseRdd = new MyRDD(sc, 1, Nil).cache()
val finalRdd = new MyRDD(sc, 1, List(new OneToOneDependency(baseRdd)))
cacheLocations(baseRdd.id -> 0) =
Seq(makeBlockManagerId("hostA"), makeBlockManagerId("hostB"))
submit(finalRdd, Array(0))
val taskSet = taskSets(0)
assertLocations(taskSet, Seq(Seq("hostA", "hostB")))
complete(taskSet, Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("regression test for getCacheLocs") {
val rdd = new MyRDD(sc, 3, Nil).cache()
cacheLocations(rdd.id -> 0) =
Seq(makeBlockManagerId("hostA"), makeBlockManagerId("hostB"))
cacheLocations(rdd.id -> 1) =
Seq(makeBlockManagerId("hostB"), makeBlockManagerId("hostC"))
cacheLocations(rdd.id -> 2) =
Seq(makeBlockManagerId("hostC"), makeBlockManagerId("hostD"))
val locs = scheduler.getCacheLocs(rdd).map(_.map(_.host))
assert(locs === Seq(Seq("hostA", "hostB"), Seq("hostB", "hostC"), Seq("hostC", "hostD")))
}
/**
* This test ensures that if a particular RDD is cached, RDDs earlier in the dependency chain
* are not computed. It constructs the following chain of dependencies:
* +---+ shuffle +---+ +---+ +---+
* | A |<--------| B |<---| C |<---| D |
* +---+ +---+ +---+ +---+
* Here, B is derived from A by performing a shuffle, C has a one-to-one dependency on B,
* and D similarly has a one-to-one dependency on C. If none of the RDDs were cached, this
* set of RDDs would result in a two stage job: one ShuffleMapStage, and a ResultStage that
* reads the shuffled data from RDD A. This test ensures that if C is cached, the scheduler
* doesn't perform a shuffle, and instead computes the result using a single ResultStage
* that reads C's cached data.
*/
test("getMissingParentStages should consider all ancestor RDDs' cache statuses") {
val rddA = new MyRDD(sc, 1, Nil)
val rddB = new MyRDD(sc, 1, List(new ShuffleDependency(rddA, new HashPartitioner(1))),
tracker = mapOutputTracker)
val rddC = new MyRDD(sc, 1, List(new OneToOneDependency(rddB))).cache()
val rddD = new MyRDD(sc, 1, List(new OneToOneDependency(rddC)))
cacheLocations(rddC.id -> 0) =
Seq(makeBlockManagerId("hostA"), makeBlockManagerId("hostB"))
submit(rddD, Array(0))
assert(scheduler.runningStages.size === 1)
// Make sure that the scheduler is running the final result stage.
// Because C is cached, the shuffle map stage to compute A does not need to be run.
assert(scheduler.runningStages.head.isInstanceOf[ResultStage])
}
test("avoid exponential blowup when getting preferred locs list") {
// Build up a complex dependency graph with repeated zip operations, without preferred locations
var rdd: RDD[_] = new MyRDD(sc, 1, Nil)
(1 to 30).foreach(_ => rdd = rdd.zip(rdd))
// getPreferredLocs runs quickly, indicating that exponential graph traversal is avoided.
failAfter(10.seconds) {
val preferredLocs = scheduler.getPreferredLocs(rdd, 0)
// No preferred locations are returned.
assert(preferredLocs.length === 0)
}
}
test("unserializable task") {
val unserializableRdd = new MyRDD(sc, 1, Nil) {
class UnserializableClass
val unserializable = new UnserializableClass
}
submit(unserializableRdd, Array(0))
assert(failure.getMessage.startsWith(
"Job aborted due to stage failure: Task not serializable:"))
assert(sparkListener.failedStages === Seq(0))
assertDataStructuresEmpty()
}
test("trivial job failure") {
submit(new MyRDD(sc, 1, Nil), Array(0))
failed(taskSets(0), "some failure")
assert(failure.getMessage === "Job aborted due to stage failure: some failure")
assert(sparkListener.failedStages === Seq(0))
assertDataStructuresEmpty()
}
test("trivial job cancellation") {
val rdd = new MyRDD(sc, 1, Nil)
val jobId = submit(rdd, Array(0))
cancel(jobId)
assert(failure.getMessage === s"Job $jobId cancelled ")
assert(sparkListener.failedStages === Seq(0))
assertDataStructuresEmpty()
}
test("job cancellation no-kill backend") {
// make sure that the DAGScheduler doesn't crash when the TaskScheduler
// doesn't implement killTask()
val noKillTaskScheduler = new TaskScheduler() {
override def schedulingMode: SchedulingMode = SchedulingMode.FIFO
override def rootPool: Pool = new Pool("", schedulingMode, 0, 0)
override def start(): Unit = {}
override def stop(): Unit = {}
override def submitTasks(taskSet: TaskSet): Unit = {
taskSets += taskSet
}
override def cancelTasks(stageId: Int, interruptThread: Boolean): Unit = {
throw new UnsupportedOperationException
}
override def killTaskAttempt(
taskId: Long, interruptThread: Boolean, reason: String): Boolean = {
throw new UnsupportedOperationException
}
override def killAllTaskAttempts(
stageId: Int, interruptThread: Boolean, reason: String): Unit = {
throw new UnsupportedOperationException
}
override def notifyPartitionCompletion(stageId: Int, partitionId: Int): Unit = {
throw new UnsupportedOperationException
}
override def setDAGScheduler(dagScheduler: DAGScheduler): Unit = {}
override def defaultParallelism(): Int = 2
override def executorHeartbeatReceived(
execId: String,
accumUpdates: Array[(Long, Seq[AccumulatorV2[_, _]])],
blockManagerId: BlockManagerId,
executorUpdates: Map[(Int, Int), ExecutorMetrics]): Boolean = true
override def executorDecommission(executorId: String): Unit = {}
override def executorLost(executorId: String, reason: ExecutorLossReason): Unit = {}
override def workerRemoved(workerId: String, host: String, message: String): Unit = {}
override def applicationAttemptId(): Option[String] = None
}
val noKillScheduler = new DAGScheduler(
sc,
noKillTaskScheduler,
sc.listenerBus,
mapOutputTracker,
blockManagerMaster,
sc.env)
dagEventProcessLoopTester = new DAGSchedulerEventProcessLoopTester(noKillScheduler)
val jobId = submit(new MyRDD(sc, 1, Nil), Array(0))
cancel(jobId)
// Because the job wasn't actually cancelled, we shouldn't have received a failure message.
assert(failure === null)
// When the task set completes normally, state should be correctly updated.
complete(taskSets(0), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
assert(sparkListener.failedStages.isEmpty)
assert(sparkListener.successfulStages.contains(0))
}
test("run trivial shuffle") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA"), makeBlockManagerId("hostB")))
complete(taskSets(1), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("run trivial shuffle with fetch failure") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length))))
// the 2nd ResultTask failed
complete(taskSets(1), Seq(
(Success, 42),
(FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0L, 0, 0, "ignored"), null)))
// this will get called
// blockManagerMaster.removeExecutor("exec-hostA")
// ask the scheduler to try it again
scheduler.resubmitFailedStages()
// have the 2nd attempt pass
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", reduceRdd.partitions.length))))
// we can see both result blocks now
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1.host).toSet ===
HashSet("hostA", "hostB"))
complete(taskSets(3), Seq((Success, 43)))
assert(results === Map(0 -> 42, 1 -> 43))
assertDataStructuresEmpty()
}
private val shuffleFileLossTests = Seq(
("slave lost with shuffle service", SlaveLost("", false), true, false),
("worker lost with shuffle service", SlaveLost("", true), true, true),
("worker lost without shuffle service", SlaveLost("", true), false, true),
("executor failure with shuffle service", ExecutorKilled, true, false),
("executor failure without shuffle service", ExecutorKilled, false, true))
for ((eventDescription, event, shuffleServiceOn, expectFileLoss) <- shuffleFileLossTests) {
val maybeLost = if (expectFileLoss) {
"lost"
} else {
"not lost"
}
test(s"shuffle files $maybeLost when $eventDescription") {
// reset the test context with the right shuffle service config
afterEach()
val conf = new SparkConf()
conf.set(config.SHUFFLE_SERVICE_ENABLED.key, shuffleServiceOn.toString)
init(conf)
assert(sc.env.blockManager.externalShuffleServiceEnabled == shuffleServiceOn)
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
runEvent(ExecutorLost("exec-hostA", event))
if (expectFileLoss) {
intercept[MetadataFetchFailedException] {
mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0)
}
} else {
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA"), makeBlockManagerId("hostB")))
}
}
}
test("SPARK-28967 properties must be cloned before posting to listener bus for 0 partition") {
val properties = new Properties()
val func = (context: TaskContext, it: Iterator[(_)]) => 1
val resultHandler = (taskIndex: Int, result: Int) => {}
val assertionError = new AtomicReference[TestFailedException](
new TestFailedException("Listener didn't receive expected JobStart event", 0))
val listener = new SparkListener() {
override def onJobStart(event: SparkListenerJobStart): Unit = {
try {
// spark.job.description can be implicitly set for 0 partition jobs.
// So event.properties and properties can be different. See SPARK-29997.
event.properties.remove(SparkContext.SPARK_JOB_DESCRIPTION)
properties.remove(SparkContext.SPARK_JOB_DESCRIPTION)
assert(event.properties.equals(properties), "Expected same content of properties, " +
s"but got properties with different content. props in caller ${properties} /" +
s" props in event ${event.properties}")
assert(event.properties.ne(properties), "Expected instance with different identity, " +
"but got same instance.")
assertionError.set(null)
} catch {
case e: TestFailedException => assertionError.set(e)
}
}
}
sc.addSparkListener(listener)
// 0 partition
val testRdd = new MyRDD(sc, 0, Nil)
val waiter = scheduler.submitJob(testRdd, func, Seq.empty, CallSite.empty,
resultHandler, properties)
sc.listenerBus.waitUntilEmpty()
assert(assertionError.get() === null)
}
// Helper function to validate state when creating tests for task failures
private def checkStageId(stageId: Int, attempt: Int, stageAttempt: TaskSet): Unit = {
assert(stageAttempt.stageId === stageId)
assert(stageAttempt.stageAttemptId == attempt)
}
// Helper functions to extract commonly used code in Fetch Failure test cases
private def setupStageAbortTest(sc: SparkContext): Unit = {
sc.listenerBus.addToSharedQueue(new EndListener())
ended = false
jobResult = null
}
// Create a new Listener to confirm that the listenerBus sees the JobEnd message
// when we abort the stage. This message will also be consumed by the EventLoggingListener
// so this will propagate up to the user.
var ended = false
var jobResult : JobResult = null
class EndListener extends SparkListener {
override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = {
jobResult = jobEnd.jobResult
ended = true
}
}
/**
* Common code to get the next stage attempt, confirm it's the one we expect, and complete it
* successfully.
*
* @param stageId - The current stageId
* @param attemptIdx - The current attempt count
* @param numShufflePartitions - The number of partitions in the next stage
*/
private def completeShuffleMapStageSuccessfully(
stageId: Int,
attemptIdx: Int,
numShufflePartitions: Int): Unit = {
val stageAttempt = taskSets.last
checkStageId(stageId, attemptIdx, stageAttempt)
complete(stageAttempt, stageAttempt.tasks.zipWithIndex.map {
case (task, idx) =>
(Success, makeMapStatus("host" + ('A' + idx).toChar, numShufflePartitions))
}.toSeq)
}
/**
* Common code to get the next stage attempt, confirm it's the one we expect, and complete it
* with all FetchFailure.
*
* @param stageId - The current stageId
* @param attemptIdx - The current attempt count
* @param shuffleDep - The shuffle dependency of the stage with a fetch failure
*/
private def completeNextStageWithFetchFailure(
stageId: Int,
attemptIdx: Int,
shuffleDep: ShuffleDependency[_, _, _]): Unit = {
val stageAttempt = taskSets.last
checkStageId(stageId, attemptIdx, stageAttempt)
complete(stageAttempt, stageAttempt.tasks.zipWithIndex.map { case (task, idx) =>
(FetchFailed(makeBlockManagerId("hostA"), shuffleDep.shuffleId, 0L, 0, idx, "ignored"), null)
}.toSeq)
}
/**
* Common code to get the next result stage attempt, confirm it's the one we expect, and
* complete it with a success where we return 42.
*
* @param stageId - The current stageId
* @param attemptIdx - The current attempt count
*/
private def completeNextResultStageWithSuccess(
stageId: Int,
attemptIdx: Int,
partitionToResult: Int => Int = _ => 42): Unit = {
val stageAttempt = taskSets.last
checkStageId(stageId, attemptIdx, stageAttempt)
assert(scheduler.stageIdToStage(stageId).isInstanceOf[ResultStage])
val taskResults = stageAttempt.tasks.zipWithIndex.map { case (task, idx) =>
(Success, partitionToResult(idx))
}
complete(stageAttempt, taskResults.toSeq)
}
/**
* In this test, we simulate a job where many tasks in the same stage fail. We want to show
* that many fetch failures inside a single stage attempt do not trigger an abort
* on their own, but only when there are enough failing stage attempts.
*/
test("Single stage fetch failure should not abort the stage.") {
setupStageAbortTest(sc)
val parts = 8
val shuffleMapRdd = new MyRDD(sc, parts, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(parts))
val reduceRdd = new MyRDD(sc, parts, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, (0 until parts).toArray)
completeShuffleMapStageSuccessfully(0, 0, numShufflePartitions = parts)
completeNextStageWithFetchFailure(1, 0, shuffleDep)
// Resubmit and confirm that now all is well
scheduler.resubmitFailedStages()
assert(scheduler.runningStages.nonEmpty)
assert(!ended)
// Complete stage 0 and then stage 1 with a "42"
completeShuffleMapStageSuccessfully(0, 1, numShufflePartitions = parts)
completeNextResultStageWithSuccess(1, 1)
// Confirm job finished successfully
sc.listenerBus.waitUntilEmpty()
assert(ended)
assert(results === (0 until parts).map { idx => idx -> 42 }.toMap)
assertDataStructuresEmpty()
}
/**
* In this test we simulate a job failure where the first stage completes successfully and
* the second stage fails due to a fetch failure. Multiple successive fetch failures of a stage
* trigger an overall job abort to avoid endless retries.
*/
test("Multiple consecutive stage fetch failures should lead to job being aborted.") {
setupStageAbortTest(sc)
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
for (attempt <- 0 until scheduler.maxConsecutiveStageAttempts) {
// Complete all the tasks for the current attempt of stage 0 successfully
completeShuffleMapStageSuccessfully(0, attempt, numShufflePartitions = 2)
// Now we should have a new taskSet, for a new attempt of stage 1.
// Fail all these tasks with FetchFailure
completeNextStageWithFetchFailure(1, attempt, shuffleDep)
// this will trigger a resubmission of stage 0, since we've lost some of its
// map output, for the next iteration through the loop
scheduler.resubmitFailedStages()
if (attempt < scheduler.maxConsecutiveStageAttempts - 1) {
assert(scheduler.runningStages.nonEmpty)
assert(!ended)
} else {
// Stage should have been aborted and removed from running stages
assertDataStructuresEmpty()
sc.listenerBus.waitUntilEmpty()
assert(ended)
jobResult match {
case JobFailed(reason) =>
assert(reason.getMessage.contains("ResultStage 1 () has failed the maximum"))
case other => fail(s"expected JobFailed, not $other")
}
}
}
}
/**
* In this test, we create a job with two consecutive shuffles, and simulate 2 failures for each
* shuffle fetch. In total In total, the job has had four failures overall but not four failures
* for a particular stage, and as such should not be aborted.
*/
test("Failures in different stages should not trigger an overall abort") {
setupStageAbortTest(sc)
val shuffleOneRdd = new MyRDD(sc, 2, Nil).cache()
val shuffleDepOne = new ShuffleDependency(shuffleOneRdd, new HashPartitioner(2))
val shuffleTwoRdd = new MyRDD(sc, 2, List(shuffleDepOne), tracker = mapOutputTracker).cache()
val shuffleDepTwo = new ShuffleDependency(shuffleTwoRdd, new HashPartitioner(1))
val finalRdd = new MyRDD(sc, 1, List(shuffleDepTwo), tracker = mapOutputTracker)
submit(finalRdd, Array(0))
// In the first two iterations, Stage 0 succeeds and stage 1 fails. In the next two iterations,
// stage 2 fails.
for (attempt <- 0 until scheduler.maxConsecutiveStageAttempts) {
// Complete all the tasks for the current attempt of stage 0 successfully
completeShuffleMapStageSuccessfully(0, attempt, numShufflePartitions = 2)
if (attempt < scheduler.maxConsecutiveStageAttempts / 2) {
// Now we should have a new taskSet, for a new attempt of stage 1.
// Fail all these tasks with FetchFailure
completeNextStageWithFetchFailure(1, attempt, shuffleDepOne)
} else {
completeShuffleMapStageSuccessfully(1, attempt, numShufflePartitions = 1)
// Fail stage 2
completeNextStageWithFetchFailure(2,
attempt - scheduler.maxConsecutiveStageAttempts / 2, shuffleDepTwo)
}
// this will trigger a resubmission of stage 0, since we've lost some of its
// map output, for the next iteration through the loop
scheduler.resubmitFailedStages()
}
completeShuffleMapStageSuccessfully(0, 4, numShufflePartitions = 2)
completeShuffleMapStageSuccessfully(1, 4, numShufflePartitions = 1)
// Succeed stage2 with a "42"
completeNextResultStageWithSuccess(2, scheduler.maxConsecutiveStageAttempts / 2)
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
/**
* In this test we demonstrate that only consecutive failures trigger a stage abort. A stage may
* fail multiple times, succeed, then fail a few more times (because its run again by downstream
* dependencies). The total number of failed attempts for one stage will go over the limit,
* but that doesn't matter, since they have successes in the middle.
*/
test("Non-consecutive stage failures don't trigger abort") {
setupStageAbortTest(sc)
val shuffleOneRdd = new MyRDD(sc, 2, Nil).cache()
val shuffleDepOne = new ShuffleDependency(shuffleOneRdd, new HashPartitioner(2))
val shuffleTwoRdd = new MyRDD(sc, 2, List(shuffleDepOne), tracker = mapOutputTracker).cache()
val shuffleDepTwo = new ShuffleDependency(shuffleTwoRdd, new HashPartitioner(1))
val finalRdd = new MyRDD(sc, 1, List(shuffleDepTwo), tracker = mapOutputTracker)
submit(finalRdd, Array(0))
// First, execute stages 0 and 1, failing stage 1 up to MAX-1 times.
for (attempt <- 0 until scheduler.maxConsecutiveStageAttempts - 1) {
// Make each task in stage 0 success
completeShuffleMapStageSuccessfully(0, attempt, numShufflePartitions = 2)
// Now we should have a new taskSet, for a new attempt of stage 1.
// Fail these tasks with FetchFailure
completeNextStageWithFetchFailure(1, attempt, shuffleDepOne)
scheduler.resubmitFailedStages()
// Confirm we have not yet aborted
assert(scheduler.runningStages.nonEmpty)
assert(!ended)
}
// Rerun stage 0 and 1 to step through the task set
completeShuffleMapStageSuccessfully(0, 3, numShufflePartitions = 2)
completeShuffleMapStageSuccessfully(1, 3, numShufflePartitions = 1)
// Fail stage 2 so that stage 1 is resubmitted when we call scheduler.resubmitFailedStages()
completeNextStageWithFetchFailure(2, 0, shuffleDepTwo)
scheduler.resubmitFailedStages()
// Rerun stage 0 to step through the task set
completeShuffleMapStageSuccessfully(0, 4, numShufflePartitions = 2)
// Now again, fail stage 1 (up to MAX_FAILURES) but confirm that this doesn't trigger an abort
// since we succeeded in between.
completeNextStageWithFetchFailure(1, 4, shuffleDepOne)
scheduler.resubmitFailedStages()
// Confirm we have not yet aborted
assert(scheduler.runningStages.nonEmpty)
assert(!ended)
// Next, succeed all and confirm output
// Rerun stage 0 + 1
completeShuffleMapStageSuccessfully(0, 5, numShufflePartitions = 2)
completeShuffleMapStageSuccessfully(1, 5, numShufflePartitions = 1)
// Succeed stage 2 and verify results
completeNextResultStageWithSuccess(2, 1)
assertDataStructuresEmpty()
sc.listenerBus.waitUntilEmpty()
assert(ended)
assert(results === Map(0 -> 42))
}
test("trivial shuffle with multiple fetch failures") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length))))
// The MapOutputTracker should know about both map output locations.
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1.host).toSet ===
HashSet("hostA", "hostB"))
// The first result task fails, with a fetch failure for the output from the first mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0L, 0, 0, "ignored"),
null))
assert(sparkListener.failedStages.contains(1))
// The second ResultTask fails, with a fetch failure for the output from the second mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 1L, 1, 1, "ignored"),
null))
// The SparkListener should not receive redundant failure events.
assert(sparkListener.failedStages.size === 1)
}
test("Retry all the tasks on a resubmitted attempt of a barrier stage caused by FetchFailure") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil).barrier().mapPartitions(iter => iter)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length))))
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq.empty))
// The first result task fails, with a fetch failure for the output from the first mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0L, 0, 0, "ignored"),
null))
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq(0, 1)))
scheduler.resubmitFailedStages()
// Complete the map stage.
completeShuffleMapStageSuccessfully(0, 1, numShufflePartitions = 2)
// Complete the result stage.
completeNextResultStageWithSuccess(1, 1)
sc.listenerBus.waitUntilEmpty()
assertDataStructuresEmpty()
}
test("Retry all the tasks on a resubmitted attempt of a barrier stage caused by TaskKilled") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil).barrier().mapPartitions(iter => iter)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", reduceRdd.partitions.length))))
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq(1)))
// The second map task fails with TaskKilled.
runEvent(makeCompletionEvent(
taskSets(0).tasks(1),
TaskKilled("test"),
null))
assert(sparkListener.failedStages === Seq(0))
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq(0, 1)))
scheduler.resubmitFailedStages()
// Complete the map stage.
completeShuffleMapStageSuccessfully(0, 1, numShufflePartitions = 2)
// Complete the result stage.
completeNextResultStageWithSuccess(1, 0)
sc.listenerBus.waitUntilEmpty()
assertDataStructuresEmpty()
}
test("Fail the job if a barrier ResultTask failed") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
.barrier()
.mapPartitions(iter => iter)
submit(reduceRdd, Array(0, 1))
// Complete the map stage.
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostA", 2))))
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq.empty))
// The first ResultTask fails
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
TaskKilled("test"),
null))
// Assert the stage has been cancelled.
sc.listenerBus.waitUntilEmpty()
assert(failure.getMessage.startsWith("Job aborted due to stage failure: Could not recover " +
"from a failed barrier ResultStage."))
}
/**
* This tests the case where another FetchFailed comes in while the map stage is getting
* re-run.
*/
test("late fetch failures don't cause multiple concurrent attempts for the same map stage") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
val mapStageId = 0
def countSubmittedMapStageAttempts(): Int = {
sparkListener.submittedStageInfos.count(_.stageId == mapStageId)
}
// The map stage should have been submitted.
assert(countSubmittedMapStageAttempts() === 1)
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
// The MapOutputTracker should know about both map output locations.
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1.host).toSet ===
HashSet("hostA", "hostB"))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 1).map(_._1.host).toSet ===
HashSet("hostA", "hostB"))
// The first result task fails, with a fetch failure for the output from the first mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0L, 0, 0, "ignored"),
null))
assert(sparkListener.failedStages.contains(1))
// Trigger resubmission of the failed map stage.
runEvent(ResubmitFailedStages)
// Another attempt for the map stage should have been submitted, resulting in 2 total attempts.
assert(countSubmittedMapStageAttempts() === 2)
// The second ResultTask fails, with a fetch failure for the output from the second mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(1),
FetchFailed(makeBlockManagerId("hostB"), shuffleId, 1L, 1, 1, "ignored"),
null))
// Another ResubmitFailedStages event should not result in another attempt for the map
// stage being run concurrently.
// NOTE: the actual ResubmitFailedStages may get called at any time during this, but it
// shouldn't effect anything -- our calling it just makes *SURE* it gets called between the
// desired event and our check.
runEvent(ResubmitFailedStages)
assert(countSubmittedMapStageAttempts() === 2)
}
/**
* This tests the case where a late FetchFailed comes in after the map stage has finished getting
* retried and a new reduce stage starts running.
*/
test("extremely late fetch failures don't cause multiple concurrent attempts for " +
"the same stage") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
def countSubmittedReduceStageAttempts(): Int = {
sparkListener.submittedStageInfos.count(_.stageId == 1)
}
def countSubmittedMapStageAttempts(): Int = {
sparkListener.submittedStageInfos.count(_.stageId == 0)
}
// The map stage should have been submitted.
assert(countSubmittedMapStageAttempts() === 1)
// Complete the map stage.
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
// The reduce stage should have been submitted.
assert(countSubmittedReduceStageAttempts() === 1)
// The first result task fails, with a fetch failure for the output from the first mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0L, 0, 0, "ignored"),
null))
// Trigger resubmission of the failed map stage and finish the re-started map task.
runEvent(ResubmitFailedStages)
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", 1))))
// Because the map stage finished, another attempt for the reduce stage should have been
// submitted, resulting in 2 total attempts for each the map and the reduce stage.
assert(countSubmittedMapStageAttempts() === 2)
assert(countSubmittedReduceStageAttempts() === 2)
// A late FetchFailed arrives from the second task in the original reduce stage.
runEvent(makeCompletionEvent(
taskSets(1).tasks(1),
FetchFailed(makeBlockManagerId("hostB"), shuffleId, 1L, 1, 1, "ignored"),
null))
// Running ResubmitFailedStages shouldn't result in any more attempts for the map stage, because
// the FetchFailed should have been ignored
runEvent(ResubmitFailedStages)
// The FetchFailed from the original reduce stage should be ignored.
assert(countSubmittedMapStageAttempts() === 2)
}
test("task events always posted in speculation / when stage is killed") {
val baseRdd = new MyRDD(sc, 4, Nil)
val finalRdd = new MyRDD(sc, 4, List(new OneToOneDependency(baseRdd)))
submit(finalRdd, Array(0, 1, 2, 3))
// complete two tasks
runEvent(makeCompletionEvent(
taskSets(0).tasks(0), Success, 42,
Seq.empty, Array.empty, createFakeTaskInfoWithId(0)))
runEvent(makeCompletionEvent(
taskSets(0).tasks(1), Success, 42,
Seq.empty, Array.empty, createFakeTaskInfoWithId(1)))
// verify stage exists
assert(scheduler.stageIdToStage.contains(0))
assert(sparkListener.endedTasks.size === 2)
// finish other 2 tasks
runEvent(makeCompletionEvent(
taskSets(0).tasks(2), Success, 42,
Seq.empty, Array.empty, createFakeTaskInfoWithId(2)))
runEvent(makeCompletionEvent(
taskSets(0).tasks(3), Success, 42,
Seq.empty, Array.empty, createFakeTaskInfoWithId(3)))
assert(sparkListener.endedTasks.size === 4)
// verify the stage is done
assert(!scheduler.stageIdToStage.contains(0))
// Stage should be complete. Finish one other Successful task to simulate what can happen
// with a speculative task and make sure the event is sent out
runEvent(makeCompletionEvent(
taskSets(0).tasks(3), Success, 42,
Seq.empty, Array.empty, createFakeTaskInfoWithId(5)))
assert(sparkListener.endedTasks.size === 5)
// make sure non successful tasks also send out event
runEvent(makeCompletionEvent(
taskSets(0).tasks(3), UnknownReason, 42,
Seq.empty, Array.empty, createFakeTaskInfoWithId(6)))
assert(sparkListener.endedTasks.size === 6)
}
test("ignore late map task completions") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
// pretend we were told hostA went away
val oldEpoch = mapOutputTracker.getEpoch
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
val newEpoch = mapOutputTracker.getEpoch
assert(newEpoch > oldEpoch)
// now start completing some tasks in the shuffle map stage, under different hosts
// and epochs, and make sure scheduler updates its state correctly
val taskSet = taskSets(0)
val shuffleStage = scheduler.stageIdToStage(taskSet.stageId).asInstanceOf[ShuffleMapStage]
assert(shuffleStage.numAvailableOutputs === 0)
// should be ignored for being too old
runEvent(makeCompletionEvent(
taskSet.tasks(0),
Success,
makeMapStatus("hostA", reduceRdd.partitions.size)))
assert(shuffleStage.numAvailableOutputs === 0)
// should work because it's a non-failed host (so the available map outputs will increase)
runEvent(makeCompletionEvent(
taskSet.tasks(0),
Success,
makeMapStatus("hostB", reduceRdd.partitions.size)))
assert(shuffleStage.numAvailableOutputs === 1)
// should be ignored for being too old
runEvent(makeCompletionEvent(
taskSet.tasks(0),
Success,
makeMapStatus("hostA", reduceRdd.partitions.size)))
assert(shuffleStage.numAvailableOutputs === 1)
// should work because it's a new epoch, which will increase the number of available map
// outputs, and also finish the stage
taskSet.tasks(1).epoch = newEpoch
runEvent(makeCompletionEvent(
taskSet.tasks(1),
Success,
makeMapStatus("hostA", reduceRdd.partitions.size)))
assert(shuffleStage.numAvailableOutputs === 2)
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostB"), makeBlockManagerId("hostA")))
// finish the next stage normally, which completes the job
complete(taskSets(1), Seq((Success, 42), (Success, 43)))
assert(results === Map(0 -> 42, 1 -> 43))
assertDataStructuresEmpty()
}
test("run shuffle with map stage failure") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
// Fail the map stage. This should cause the entire job to fail.
val stageFailureMessage = "Exception failure in map stage"
failed(taskSets(0), stageFailureMessage)
assert(failure.getMessage === s"Job aborted due to stage failure: $stageFailureMessage")
// Listener bus should get told about the map stage failing, but not the reduce stage
// (since the reduce stage hasn't been started yet).
assert(sparkListener.failedStages.toSet === Set(0))
assertDataStructuresEmpty()
}
/**
* Run two jobs, with a shared dependency. We simulate a fetch failure in the second job, which
* requires regenerating some outputs of the shared dependency. One key aspect of this test is
* that the second job actually uses a different stage for the shared dependency (a "skipped"
* stage).
*/
test("shuffle fetch failure in a reused shuffle dependency") {
// Run the first job successfully, which creates one shuffle dependency
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep))
submit(reduceRdd, Array(0, 1))
completeShuffleMapStageSuccessfully(0, 0, 2)
completeNextResultStageWithSuccess(1, 0)
assert(results === Map(0 -> 42, 1 -> 42))
assertDataStructuresEmpty()
// submit another job w/ the shared dependency, and have a fetch failure
val reduce2 = new MyRDD(sc, 2, List(shuffleDep))
submit(reduce2, Array(0, 1))
// Note that the stage numbering here is only b/c the shared dependency produces a new, skipped
// stage. If instead it reused the existing stage, then this would be stage 2
completeNextStageWithFetchFailure(3, 0, shuffleDep)
scheduler.resubmitFailedStages()
// the scheduler now creates a new task set to regenerate the missing map output, but this time
// using a different stage, the "skipped" one
// SPARK-9809 -- this stage is submitted without a task for each partition (because some of
// the shuffle map output is still available from stage 0); make sure we've still got internal
// accumulators setup
assert(scheduler.stageIdToStage(2).latestInfo.taskMetrics != null)
completeShuffleMapStageSuccessfully(2, 0, 2)
completeNextResultStageWithSuccess(3, 1, idx => idx + 1234)
assert(results === Map(0 -> 1234, 1 -> 1235))
assertDataStructuresEmpty()
}
/**
* This test runs a three stage job, with a fetch failure in stage 1. but during the retry, we
* have completions from both the first & second attempt of stage 1. So all the map output is
* available before we finish any task set for stage 1. We want to make sure that we don't
* submit stage 2 until the map output for stage 1 is registered
*/
test("don't submit stage until its dependencies map outputs are registered (SPARK-5259)") {
val firstRDD = new MyRDD(sc, 3, Nil)
val firstShuffleDep = new ShuffleDependency(firstRDD, new HashPartitioner(3))
val firstShuffleId = firstShuffleDep.shuffleId
val shuffleMapRdd = new MyRDD(sc, 3, List(firstShuffleDep))
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep))
submit(reduceRdd, Array(0))
// things start out smoothly, stage 0 completes with no issues
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostB", shuffleMapRdd.partitions.length)),
(Success, makeMapStatus("hostB", shuffleMapRdd.partitions.length)),
(Success, makeMapStatus("hostA", shuffleMapRdd.partitions.length))
))
// then one executor dies, and a task fails in stage 1
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(null, firstShuffleId, 2L, 2, 0, "Fetch failed"),
null))
// so we resubmit stage 0, which completes happily
scheduler.resubmitFailedStages()
val stage0Resubmit = taskSets(2)
assert(stage0Resubmit.stageId == 0)
assert(stage0Resubmit.stageAttemptId === 1)
val task = stage0Resubmit.tasks(0)
assert(task.partitionId === 2)
runEvent(makeCompletionEvent(
task,
Success,
makeMapStatus("hostC", shuffleMapRdd.partitions.length)))
// now here is where things get tricky : we will now have a task set representing
// the second attempt for stage 1, but we *also* have some tasks for the first attempt for
// stage 1 still going
val stage1Resubmit = taskSets(3)
assert(stage1Resubmit.stageId == 1)
assert(stage1Resubmit.stageAttemptId === 1)
assert(stage1Resubmit.tasks.length === 3)
// we'll have some tasks finish from the first attempt, and some finish from the second attempt,
// so that we actually have all stage outputs, though no attempt has completed all its
// tasks
runEvent(makeCompletionEvent(
taskSets(3).tasks(0),
Success,
makeMapStatus("hostC", reduceRdd.partitions.length)))
runEvent(makeCompletionEvent(
taskSets(3).tasks(1),
Success,
makeMapStatus("hostC", reduceRdd.partitions.length)))
// late task finish from the first attempt
runEvent(makeCompletionEvent(
taskSets(1).tasks(2),
Success,
makeMapStatus("hostB", reduceRdd.partitions.length)))
// What should happen now is that we submit stage 2. However, we might not see an error
// b/c of DAGScheduler's error handling (it tends to swallow errors and just log them). But
// we can check some conditions.
// Note that the really important thing here is not so much that we submit stage 2 *immediately*
// but that we don't end up with some error from these interleaved completions. It would also
// be OK (though sub-optimal) if stage 2 simply waited until the resubmission of stage 1 had
// all its tasks complete
// check that we have all the map output for stage 0 (it should have been there even before
// the last round of completions from stage 1, but just to double check it hasn't been messed
// up) and also the newly available stage 1
val stageToReduceIdxs = Seq(
0 -> (0 until 3),
1 -> (0 until 1)
)
for {
(stage, reduceIdxs) <- stageToReduceIdxs
reduceIdx <- reduceIdxs
} {
// this would throw an exception if the map status hadn't been registered
val statuses = mapOutputTracker.getMapSizesByExecutorId(stage, reduceIdx)
// really we should have already thrown an exception rather than fail either of these
// asserts, but just to be extra defensive let's double check the statuses are OK
assert(statuses != null)
assert(statuses.nonEmpty)
}
// and check that stage 2 has been submitted
assert(taskSets.size == 5)
val stage2TaskSet = taskSets(4)
assert(stage2TaskSet.stageId == 2)
assert(stage2TaskSet.stageAttemptId == 0)
}
/**
* We lose an executor after completing some shuffle map tasks on it. Those tasks get
* resubmitted, and when they finish the job completes normally
*/
test("register map outputs correctly after ExecutorLost and task Resubmitted") {
val firstRDD = new MyRDD(sc, 3, Nil)
val firstShuffleDep = new ShuffleDependency(firstRDD, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 5, List(firstShuffleDep))
submit(reduceRdd, Array(0))
// complete some of the tasks from the first stage, on one host
runEvent(makeCompletionEvent(
taskSets(0).tasks(0),
Success,
makeMapStatus("hostA", reduceRdd.partitions.length)))
runEvent(makeCompletionEvent(
taskSets(0).tasks(1),
Success,
makeMapStatus("hostA", reduceRdd.partitions.length)))
// now that host goes down
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
// so we resubmit those tasks
runEvent(makeCompletionEvent(taskSets(0).tasks(0), Resubmitted, null))
runEvent(makeCompletionEvent(taskSets(0).tasks(1), Resubmitted, null))
// now complete everything on a different host
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostB", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length))
))
// now we should submit stage 1, and the map output from stage 0 should be registered
// check that we have all the map output for stage 0
(0 until reduceRdd.partitions.length).foreach { reduceIdx =>
val statuses = mapOutputTracker.getMapSizesByExecutorId(0, reduceIdx)
// really we should have already thrown an exception rather than fail either of these
// asserts, but just to be extra defensive let's double check the statuses are OK
assert(statuses != null)
assert(statuses.nonEmpty)
}
// and check that stage 1 has been submitted
assert(taskSets.size == 2)
val stage1TaskSet = taskSets(1)
assert(stage1TaskSet.stageId == 1)
assert(stage1TaskSet.stageAttemptId == 0)
}
/**
* Makes sure that failures of stage used by multiple jobs are correctly handled.
*
* This test creates the following dependency graph:
*
* shuffleMapRdd1 shuffleMapRDD2
* | \\ |
* | \\ |
* | \\ |
* | \\ |
* reduceRdd1 reduceRdd2
*
* We start both shuffleMapRdds and then fail shuffleMapRdd1. As a result, the job listeners for
* reduceRdd1 and reduceRdd2 should both be informed that the job failed. shuffleMapRDD2 should
* also be cancelled, because it is only used by reduceRdd2 and reduceRdd2 cannot complete
* without shuffleMapRdd1.
*/
test("failure of stage used by two jobs") {
val shuffleMapRdd1 = new MyRDD(sc, 2, Nil)
val shuffleDep1 = new ShuffleDependency(shuffleMapRdd1, new HashPartitioner(2))
val shuffleMapRdd2 = new MyRDD(sc, 2, Nil)
val shuffleDep2 = new ShuffleDependency(shuffleMapRdd2, new HashPartitioner(2))
val reduceRdd1 = new MyRDD(sc, 2, List(shuffleDep1), tracker = mapOutputTracker)
val reduceRdd2 = new MyRDD(sc, 2, List(shuffleDep1, shuffleDep2), tracker = mapOutputTracker)
// We need to make our own listeners for this test, since by default submit uses the same
// listener for all jobs, and here we want to capture the failure for each job separately.
class FailureRecordingJobListener() extends JobListener {
var failureMessage: String = _
override def taskSucceeded(index: Int, result: Any): Unit = {}
override def jobFailed(exception: Exception): Unit = { failureMessage = exception.getMessage }
}
val listener1 = new FailureRecordingJobListener()
val listener2 = new FailureRecordingJobListener()
submit(reduceRdd1, Array(0, 1), listener = listener1)
submit(reduceRdd2, Array(0, 1), listener = listener2)
val stageFailureMessage = "Exception failure in map stage"
failed(taskSets(0), stageFailureMessage)
assert(cancelledStages.toSet === Set(0, 2))
// Make sure the listeners got told about both failed stages.
assert(sparkListener.successfulStages.isEmpty)
assert(sparkListener.failedStages.toSet === Set(0, 2))
assert(listener1.failureMessage === s"Job aborted due to stage failure: $stageFailureMessage")
assert(listener2.failureMessage === s"Job aborted due to stage failure: $stageFailureMessage")
assertDataStructuresEmpty()
}
def checkJobPropertiesAndPriority(taskSet: TaskSet, expected: String, priority: Int): Unit = {
assert(taskSet.properties != null)
assert(taskSet.properties.getProperty("testProperty") === expected)
assert(taskSet.priority === priority)
}
def launchJobsThatShareStageAndCancelFirst(): ShuffleDependency[Int, Int, Nothing] = {
val baseRdd = new MyRDD(sc, 1, Nil)
val shuffleDep1 = new ShuffleDependency(baseRdd, new HashPartitioner(1))
val intermediateRdd = new MyRDD(sc, 1, List(shuffleDep1))
val shuffleDep2 = new ShuffleDependency(intermediateRdd, new HashPartitioner(1))
val finalRdd1 = new MyRDD(sc, 1, List(shuffleDep2))
val finalRdd2 = new MyRDD(sc, 1, List(shuffleDep2))
val job1Properties = new Properties()
val job2Properties = new Properties()
job1Properties.setProperty("testProperty", "job1")
job2Properties.setProperty("testProperty", "job2")
// Run jobs 1 & 2, both referencing the same stage, then cancel job1.
// Note that we have to submit job2 before we cancel job1 to have them actually share
// *Stages*, and not just shuffle dependencies, due to skipped stages (at least until
// we address SPARK-10193.)
val jobId1 = submit(finalRdd1, Array(0), properties = job1Properties)
val jobId2 = submit(finalRdd2, Array(0), properties = job2Properties)
assert(scheduler.activeJobs.nonEmpty)
val testProperty1 = scheduler.jobIdToActiveJob(jobId1).properties.getProperty("testProperty")
// remove job1 as an ActiveJob
cancel(jobId1)
// job2 should still be running
assert(scheduler.activeJobs.nonEmpty)
val testProperty2 = scheduler.jobIdToActiveJob(jobId2).properties.getProperty("testProperty")
assert(testProperty1 != testProperty2)
// NB: This next assert isn't necessarily the "desired" behavior; it's just to document
// the current behavior. We've already submitted the TaskSet for stage 0 based on job1, but
// even though we have cancelled that job and are now running it because of job2, we haven't
// updated the TaskSet's properties. Changing the properties to "job2" is likely the more
// correct behavior.
val job1Id = 0 // TaskSet priority for Stages run with "job1" as the ActiveJob
checkJobPropertiesAndPriority(taskSets(0), "job1", job1Id)
complete(taskSets(0), Seq((Success, makeMapStatus("hostA", 1))))
shuffleDep1
}
/**
* Makes sure that tasks for a stage used by multiple jobs are submitted with the properties of a
* later, active job if they were previously run under a job that is no longer active
*/
test("stage used by two jobs, the first no longer active (SPARK-6880)") {
launchJobsThatShareStageAndCancelFirst()
// The next check is the key for SPARK-6880. For the stage which was shared by both job1 and
// job2 but never had any tasks submitted for job1, the properties of job2 are now used to run
// the stage.
checkJobPropertiesAndPriority(taskSets(1), "job2", 1)
complete(taskSets(1), Seq((Success, makeMapStatus("hostA", 1))))
assert(taskSets(2).properties != null)
complete(taskSets(2), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assert(scheduler.activeJobs.isEmpty)
assertDataStructuresEmpty()
}
/**
* Makes sure that tasks for a stage used by multiple jobs are submitted with the properties of a
* later, active job if they were previously run under a job that is no longer active, even when
* there are fetch failures
*/
test("stage used by two jobs, some fetch failures, and the first job no longer active " +
"(SPARK-6880)") {
val shuffleDep1 = launchJobsThatShareStageAndCancelFirst()
val job2Id = 1 // TaskSet priority for Stages run with "job2" as the ActiveJob
// lets say there is a fetch failure in this task set, which makes us go back and
// run stage 0, attempt 1
complete(taskSets(1), Seq(
(FetchFailed(makeBlockManagerId("hostA"),
shuffleDep1.shuffleId, 0L, 0, 0, "ignored"), null)))
scheduler.resubmitFailedStages()
// stage 0, attempt 1 should have the properties of job2
assert(taskSets(2).stageId === 0)
assert(taskSets(2).stageAttemptId === 1)
checkJobPropertiesAndPriority(taskSets(2), "job2", job2Id)
// run the rest of the stages normally, checking that they have the correct properties
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", 1))))
checkJobPropertiesAndPriority(taskSets(3), "job2", job2Id)
complete(taskSets(3), Seq((Success, makeMapStatus("hostA", 1))))
checkJobPropertiesAndPriority(taskSets(4), "job2", job2Id)
complete(taskSets(4), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assert(scheduler.activeJobs.isEmpty)
assertDataStructuresEmpty()
}
/**
* In this test, we run a map stage where one of the executors fails but we still receive a
* "zombie" complete message from a task that ran on that executor. We want to make sure the
* stage is resubmitted so that the task that ran on the failed executor is re-executed, and
* that the stage is only marked as finished once that task completes.
*/
test("run trivial shuffle with out-of-band executor failure and retry") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
// Tell the DAGScheduler that hostA was lost.
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
// At this point, no more tasks are running for the stage (and the TaskSetManager considers the
// stage complete), but the tasks that ran on HostA need to be re-run, so the DAGScheduler
// should re-submit the stage with one task (the task that originally ran on HostA).
assert(taskSets.size === 2)
assert(taskSets(1).tasks.size === 1)
// Make sure that the stage that was re-submitted was the ShuffleMapStage (not the reduce
// stage, which shouldn't be run until all of the tasks in the ShuffleMapStage complete on
// alive executors).
assert(taskSets(1).tasks(0).isInstanceOf[ShuffleMapTask])
// have hostC complete the resubmitted task
complete(taskSets(1), Seq((Success, makeMapStatus("hostC", 1))))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostC"), makeBlockManagerId("hostB")))
// Make sure that the reduce stage was now submitted.
assert(taskSets.size === 3)
assert(taskSets(2).tasks(0).isInstanceOf[ResultTask[_, _]])
// Complete the reduce stage.
complete(taskSets(2), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("recursive shuffle failures") {
val shuffleOneRdd = new MyRDD(sc, 2, Nil)
val shuffleDepOne = new ShuffleDependency(shuffleOneRdd, new HashPartitioner(2))
val shuffleTwoRdd = new MyRDD(sc, 2, List(shuffleDepOne), tracker = mapOutputTracker)
val shuffleDepTwo = new ShuffleDependency(shuffleTwoRdd, new HashPartitioner(1))
val finalRdd = new MyRDD(sc, 1, List(shuffleDepTwo), tracker = mapOutputTracker)
submit(finalRdd, Array(0))
// have the first stage complete normally
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
// have the second stage complete normally
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostC", 1))))
// fail the third stage because hostA went down
complete(taskSets(2), Seq(
(FetchFailed(makeBlockManagerId("hostA"),
shuffleDepTwo.shuffleId, 0L, 0, 0, "ignored"), null)))
// TODO assert this:
// blockManagerMaster.removeExecutor("exec-hostA")
// have DAGScheduler try again
scheduler.resubmitFailedStages()
complete(taskSets(3), Seq((Success, makeMapStatus("hostA", 2))))
complete(taskSets(4), Seq((Success, makeMapStatus("hostA", 1))))
complete(taskSets(5), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("cached post-shuffle") {
val shuffleOneRdd = new MyRDD(sc, 2, Nil).cache()
val shuffleDepOne = new ShuffleDependency(shuffleOneRdd, new HashPartitioner(2))
val shuffleTwoRdd = new MyRDD(sc, 2, List(shuffleDepOne), tracker = mapOutputTracker).cache()
val shuffleDepTwo = new ShuffleDependency(shuffleTwoRdd, new HashPartitioner(1))
val finalRdd = new MyRDD(sc, 1, List(shuffleDepTwo), tracker = mapOutputTracker)
submit(finalRdd, Array(0))
cacheLocations(shuffleTwoRdd.id -> 0) = Seq(makeBlockManagerId("hostD"))
cacheLocations(shuffleTwoRdd.id -> 1) = Seq(makeBlockManagerId("hostC"))
// complete stage 0
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
// complete stage 1
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
// pretend stage 2 failed because hostA went down
complete(taskSets(2), Seq(
(FetchFailed(makeBlockManagerId("hostA"),
shuffleDepTwo.shuffleId, 0L, 0, 0, "ignored"), null)))
// TODO assert this:
// blockManagerMaster.removeExecutor("exec-hostA")
// DAGScheduler should notice the cached copy of the second shuffle and try to get it rerun.
scheduler.resubmitFailedStages()
assertLocations(taskSets(3), Seq(Seq("hostD")))
// allow hostD to recover
complete(taskSets(3), Seq((Success, makeMapStatus("hostD", 1))))
complete(taskSets(4), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("SPARK-30388: shuffle fetch failed on speculative task, but original task succeed") {
var completedStage: List[Int] = Nil
val listener = new SparkListener() {
override def onStageCompleted(event: SparkListenerStageCompleted): Unit = {
completedStage = completedStage :+ event.stageInfo.stageId
}
}
sc.addSparkListener(listener)
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep))
submit(reduceRdd, Array(0, 1))
completeShuffleMapStageSuccessfully(0, 0, 2)
sc.listenerBus.waitUntilEmpty()
assert(completedStage === List(0))
// result task 0.0 succeed
runEvent(makeCompletionEvent(taskSets(1).tasks(0), Success, 42))
// speculative result task 1.1 fetch failed
val info = new TaskInfo(4, index = 1, attemptNumber = 1, 0L, "", "", TaskLocality.ANY, true)
runEvent(makeCompletionEvent(
taskSets(1).tasks(1),
FetchFailed(makeBlockManagerId("hostA"), shuffleDep.shuffleId, 0L, 0, 1, "ignored"),
null,
Seq.empty,
Array.empty,
info
)
)
sc.listenerBus.waitUntilEmpty()
assert(completedStage === List(0, 1))
Thread.sleep(DAGScheduler.RESUBMIT_TIMEOUT * 2)
// map stage resubmitted
assert(scheduler.runningStages.size === 1)
val mapStage = scheduler.runningStages.head
assert(mapStage.id === 0)
assert(mapStage.latestInfo.failureReason.isEmpty)
// original result task 1.0 succeed
runEvent(makeCompletionEvent(taskSets(1).tasks(1), Success, 42))
sc.listenerBus.waitUntilEmpty()
assert(completedStage === List(0, 1, 1, 0))
assert(scheduler.activeJobs.isEmpty)
}
test("misbehaved accumulator should not crash DAGScheduler and SparkContext") {
val acc = new LongAccumulator {
override def add(v: java.lang.Long): Unit = throw new DAGSchedulerSuiteDummyException
override def add(v: Long): Unit = throw new DAGSchedulerSuiteDummyException
}
sc.register(acc)
// Run this on executors
sc.parallelize(1 to 10, 2).foreach { item => acc.add(1) }
// Make sure we can still run commands
assert(sc.parallelize(1 to 10, 2).count() === 10)
}
test("misbehaved accumulator should not impact other accumulators") {
val bad = new LongAccumulator {
override def merge(other: AccumulatorV2[java.lang.Long, java.lang.Long]): Unit = {
throw new DAGSchedulerSuiteDummyException
}
}
sc.register(bad, "bad")
val good = sc.longAccumulator("good")
sc.parallelize(1 to 10, 2).foreach { item =>
bad.add(1)
good.add(1)
}
// This is to ensure the `bad` accumulator did fail to update its value
assert(bad.value == 0L)
// Should be able to update the "good" accumulator
assert(good.value == 10L)
}
/**
* The job will be failed on first task throwing an error.
* Any subsequent task WILL throw a legitimate java.lang.UnsupportedOperationException.
* If multiple tasks, there exists a race condition between the SparkDriverExecutionExceptions
* and their differing causes as to which will represent result for job...
*/
test("misbehaved resultHandler should not crash DAGScheduler and SparkContext") {
failAfter(1.minute) { // If DAGScheduler crashes, the following test will hang forever
for (error <- Seq(
new DAGSchedulerSuiteDummyException,
new AssertionError, // E.g., assert(foo == bar) fails
new NotImplementedError // E.g., call a method with `???` implementation.
)) {
val e = intercept[SparkDriverExecutionException] {
// Number of parallelized partitions implies number of tasks of job
val rdd = sc.parallelize(1 to 10, 2)
sc.runJob[Int, Int](
rdd,
(context: TaskContext, iter: Iterator[Int]) => iter.size,
// For a robust test assertion, limit number of job tasks to 1; that is,
// if multiple RDD partitions, use id of any one partition, say, first partition id=0
Seq(0),
(part: Int, result: Int) => throw error)
}
assert(e.getCause eq error)
// Make sure we can still run commands on our SparkContext
assert(sc.parallelize(1 to 10, 2).count() === 10)
}
}
}
test(s"invalid ${SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL} should not crash DAGScheduler") {
sc.setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, "invalid")
try {
intercept[SparkException] {
sc.parallelize(1 to 1, 1).foreach { _ =>
throw new DAGSchedulerSuiteDummyException
}
}
// Verify the above job didn't crash DAGScheduler by running a simple job
assert(sc.parallelize(1 to 10, 2).count() === 10)
} finally {
sc.setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null)
}
}
test("getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)") {
val e1 = intercept[DAGSchedulerSuiteDummyException] {
val rdd = new MyRDD(sc, 2, Nil) {
override def getPartitions: Array[Partition] = {
throw new DAGSchedulerSuiteDummyException
}
}
rdd.reduceByKey(_ + _, 1).count()
}
// Make sure we can still run commands
assert(sc.parallelize(1 to 10, 2).count() === 10)
}
test("getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)") {
val e1 = intercept[SparkException] {
val rdd = new MyRDD(sc, 2, Nil) {
override def getPreferredLocations(split: Partition): Seq[String] = {
throw new DAGSchedulerSuiteDummyException
}
}
rdd.count()
}
assert(e1.getMessage.contains(classOf[DAGSchedulerSuiteDummyException].getName))
// Make sure we can still run commands
assert(sc.parallelize(1 to 10, 2).count() === 10)
}
test("accumulator not calculated for resubmitted result stage") {
// just for register
val accum = AccumulatorSuite.createLongAccum("a")
val finalRdd = new MyRDD(sc, 1, Nil)
submit(finalRdd, Array(0))
completeWithAccumulator(accum.id, taskSets(0), Seq((Success, 42)))
completeWithAccumulator(accum.id, taskSets(0), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assert(accum.value === 1)
assertDataStructuresEmpty()
}
test("accumulator not calculated for resubmitted task in result stage") {
val accum = AccumulatorSuite.createLongAccum("a")
val finalRdd = new MyRDD(sc, 2, Nil)
submit(finalRdd, Array(0, 1))
// finish the first task
completeWithAccumulator(accum.id, taskSets(0), Seq((Success, 42)))
// verify stage exists
assert(scheduler.stageIdToStage.contains(0))
// finish the first task again (simulate a speculative task or a resubmitted task)
completeWithAccumulator(accum.id, taskSets(0), Seq((Success, 42)))
assert(results === Map(0 -> 42))
// The accumulator should only be updated once.
assert(accum.value === 1)
runEvent(makeCompletionEvent(taskSets(0).tasks(1), Success, 42))
assertDataStructuresEmpty()
}
test("accumulators are updated on exception failures and task killed") {
val acc1 = AccumulatorSuite.createLongAccum("ingenieur")
val acc2 = AccumulatorSuite.createLongAccum("boulanger")
val acc3 = AccumulatorSuite.createLongAccum("agriculteur")
assert(AccumulatorContext.get(acc1.id).isDefined)
assert(AccumulatorContext.get(acc2.id).isDefined)
assert(AccumulatorContext.get(acc3.id).isDefined)
val accUpdate1 = new LongAccumulator
accUpdate1.metadata = acc1.metadata
accUpdate1.setValue(15)
val accUpdate2 = new LongAccumulator
accUpdate2.metadata = acc2.metadata
accUpdate2.setValue(13)
val accUpdate3 = new LongAccumulator
accUpdate3.metadata = acc3.metadata
accUpdate3.setValue(18)
val accumUpdates1 = Seq(accUpdate1, accUpdate2)
val accumInfo1 = accumUpdates1.map(AccumulatorSuite.makeInfo)
val exceptionFailure = new ExceptionFailure(
new SparkException("fondue?"),
accumInfo1).copy(accums = accumUpdates1)
submit(new MyRDD(sc, 1, Nil), Array(0))
runEvent(makeCompletionEvent(taskSets.head.tasks.head, exceptionFailure, "result"))
assert(AccumulatorContext.get(acc1.id).get.value === 15L)
assert(AccumulatorContext.get(acc2.id).get.value === 13L)
val accumUpdates2 = Seq(accUpdate3)
val accumInfo2 = accumUpdates2.map(AccumulatorSuite.makeInfo)
val taskKilled = new TaskKilled( "test", accumInfo2, accums = accumUpdates2)
runEvent(makeCompletionEvent(taskSets.head.tasks.head, taskKilled, "result"))
assert(AccumulatorContext.get(acc3.id).get.value === 18L)
}
test("reduce tasks should be placed locally with map output") {
// Create a shuffleMapRdd with 1 partition
val shuffleMapRdd = new MyRDD(sc, 1, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1))))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA")))
// Reducer should run on the same host that map task ran
val reduceTaskSet = taskSets(1)
assertLocations(reduceTaskSet, Seq(Seq("hostA")))
complete(reduceTaskSet, Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("reduce task locality preferences should only include machines with largest map outputs") {
val numMapTasks = 4
// Create a shuffleMapRdd with more partitions
val shuffleMapRdd = new MyRDD(sc, numMapTasks, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
val statuses = (1 to numMapTasks).map { i =>
(Success, makeMapStatus("host" + i, 1, (10*i).toByte))
}
complete(taskSets(0), statuses)
// Reducer should prefer the last 3 hosts as they have 20%, 30% and 40% of data
val hosts = (1 to numMapTasks).map(i => "host" + i).reverse.take(numMapTasks - 1)
val reduceTaskSet = taskSets(1)
assertLocations(reduceTaskSet, Seq(hosts))
complete(reduceTaskSet, Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("stages with both narrow and shuffle dependencies use narrow ones for locality") {
// Create an RDD that has both a shuffle dependency and a narrow dependency (e.g. for a join)
val rdd1 = new MyRDD(sc, 1, Nil)
val rdd2 = new MyRDD(sc, 1, Nil, locations = Seq(Seq("hostB")))
val shuffleDep = new ShuffleDependency(rdd1, new HashPartitioner(1))
val narrowDep = new OneToOneDependency(rdd2)
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep, narrowDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1))))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA")))
// Reducer should run where RDD 2 has preferences, even though it also has a shuffle dep
val reduceTaskSet = taskSets(1)
assertLocations(reduceTaskSet, Seq(Seq("hostB")))
complete(reduceTaskSet, Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("Spark exceptions should include call site in stack trace") {
val e = intercept[SparkException] {
sc.parallelize(1 to 10, 2).map { _ => throw new RuntimeException("uh-oh!") }.count()
}
// Does not include message, ONLY stack trace.
val stackTraceString = Utils.exceptionString(e)
// should actually include the RDD operation that invoked the method:
assert(stackTraceString.contains("org.apache.spark.rdd.RDD.count"))
// should include the FunSuite setup:
assert(stackTraceString.contains("org.scalatest.FunSuite"))
}
test("catch errors in event loop") {
// this is a test of our testing framework -- make sure errors in event loop don't get ignored
// just run some bad event that will throw an exception -- we'll give a null TaskEndReason
val rdd1 = new MyRDD(sc, 1, Nil)
submit(rdd1, Array(0))
intercept[Exception] {
complete(taskSets(0), Seq(
(null, makeMapStatus("hostA", 1))))
}
}
test("simple map stage submission") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
// Submit a map stage by itself
submitMapStage(shuffleDep)
assert(results.size === 0) // No results yet
completeShuffleMapStageSuccessfully(0, 0, 1)
assert(results.size === 1)
results.clear()
assertDataStructuresEmpty()
// Submit a reduce job that depends on this map stage; it should directly do the reduce
submit(reduceRdd, Array(0))
completeNextResultStageWithSuccess(2, 0)
assert(results === Map(0 -> 42))
results.clear()
assertDataStructuresEmpty()
// Check that if we submit the map stage again, no tasks run
submitMapStage(shuffleDep)
assert(results.size === 1)
assertDataStructuresEmpty()
}
test("map stage submission with reduce stage also depending on the data") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
// Submit the map stage by itself
submitMapStage(shuffleDep)
// Submit a reduce job that depends on this map stage
submit(reduceRdd, Array(0))
// Complete tasks for the map stage
completeShuffleMapStageSuccessfully(0, 0, 1)
assert(results.size === 1)
results.clear()
// Complete tasks for the reduce stage
completeNextResultStageWithSuccess(1, 0)
assert(results === Map(0 -> 42))
results.clear()
assertDataStructuresEmpty()
// Check that if we submit the map stage again, no tasks run
submitMapStage(shuffleDep)
assert(results.size === 1)
assertDataStructuresEmpty()
}
test("map stage submission with fetch failure") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
// Submit a map stage by itself
submitMapStage(shuffleDep)
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length))))
assert(results.size === 1)
results.clear()
assertDataStructuresEmpty()
// Submit a reduce job that depends on this map stage, but where one reduce will fail a fetch
submit(reduceRdd, Array(0, 1))
complete(taskSets(1), Seq(
(Success, 42),
(FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0L, 0, 0, "ignored"), null)))
// Ask the scheduler to try it again; TaskSet 2 will rerun the map task that we couldn't fetch
// from, then TaskSet 3 will run the reduce stage
scheduler.resubmitFailedStages()
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", reduceRdd.partitions.length))))
complete(taskSets(3), Seq((Success, 43)))
assert(results === Map(0 -> 42, 1 -> 43))
results.clear()
assertDataStructuresEmpty()
// Run another reduce job without a failure; this should just work
submit(reduceRdd, Array(0, 1))
complete(taskSets(4), Seq(
(Success, 44),
(Success, 45)))
assert(results === Map(0 -> 44, 1 -> 45))
results.clear()
assertDataStructuresEmpty()
// Resubmit the map stage; this should also just work
submitMapStage(shuffleDep)
assert(results.size === 1)
results.clear()
assertDataStructuresEmpty()
}
/**
* In this test, we have three RDDs with shuffle dependencies, and we submit map stage jobs
* that are waiting on each one, as well as a reduce job on the last one. We test that all of
* these jobs complete even if there are some fetch failures in both shuffles.
*/
test("map stage submission with multiple shared stages and failures") {
val rdd1 = new MyRDD(sc, 2, Nil)
val dep1 = new ShuffleDependency(rdd1, new HashPartitioner(2))
val rdd2 = new MyRDD(sc, 2, List(dep1), tracker = mapOutputTracker)
val dep2 = new ShuffleDependency(rdd2, new HashPartitioner(2))
val rdd3 = new MyRDD(sc, 2, List(dep2), tracker = mapOutputTracker)
val listener1 = new SimpleListener
val listener2 = new SimpleListener
val listener3 = new SimpleListener
submitMapStage(dep1, listener1)
submitMapStage(dep2, listener2)
submit(rdd3, Array(0, 1), listener = listener3)
// Complete the first stage
assert(taskSets(0).stageId === 0)
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", rdd1.partitions.length)),
(Success, makeMapStatus("hostB", rdd1.partitions.length))))
assert(mapOutputTracker.getMapSizesByExecutorId(dep1.shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA"), makeBlockManagerId("hostB")))
assert(listener1.results.size === 1)
// When attempting the second stage, show a fetch failure
assert(taskSets(1).stageId === 1)
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostA", rdd2.partitions.length)),
(FetchFailed(makeBlockManagerId("hostA"), dep1.shuffleId, 0L, 0, 0, "ignored"), null)))
scheduler.resubmitFailedStages()
assert(listener2.results.size === 0) // Second stage listener should not have a result yet
// Stage 0 should now be running as task set 2; make its task succeed
assert(taskSets(2).stageId === 0)
complete(taskSets(2), Seq(
(Success, makeMapStatus("hostC", rdd2.partitions.length))))
assert(mapOutputTracker.getMapSizesByExecutorId(dep1.shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostC"), makeBlockManagerId("hostB")))
assert(listener2.results.size === 0) // Second stage listener should still not have a result
// Stage 1 should now be running as task set 3; make its first task succeed
assert(taskSets(3).stageId === 1)
complete(taskSets(3), Seq(
(Success, makeMapStatus("hostB", rdd2.partitions.length)),
(Success, makeMapStatus("hostD", rdd2.partitions.length))))
assert(mapOutputTracker.getMapSizesByExecutorId(dep2.shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostB"), makeBlockManagerId("hostD")))
assert(listener2.results.size === 1)
// Finally, the reduce job should be running as task set 4; make it see a fetch failure,
// then make it run again and succeed
assert(taskSets(4).stageId === 2)
complete(taskSets(4), Seq(
(Success, 52),
(FetchFailed(makeBlockManagerId("hostD"), dep2.shuffleId, 0L, 0, 0, "ignored"), null)))
scheduler.resubmitFailedStages()
// TaskSet 5 will rerun stage 1's lost task, then TaskSet 6 will rerun stage 2
assert(taskSets(5).stageId === 1)
complete(taskSets(5), Seq(
(Success, makeMapStatus("hostE", rdd2.partitions.length))))
complete(taskSets(6), Seq(
(Success, 53)))
assert(listener3.results === Map(0 -> 52, 1 -> 53))
assertDataStructuresEmpty()
}
test("Trigger mapstage's job listener in submitMissingTasks") {
val rdd1 = new MyRDD(sc, 2, Nil)
val dep1 = new ShuffleDependency(rdd1, new HashPartitioner(2))
val rdd2 = new MyRDD(sc, 2, List(dep1), tracker = mapOutputTracker)
val dep2 = new ShuffleDependency(rdd2, new HashPartitioner(2))
val listener1 = new SimpleListener
val listener2 = new SimpleListener
submitMapStage(dep1, listener1)
submitMapStage(dep2, listener2)
// Complete the stage0.
assert(taskSets(0).stageId === 0)
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", rdd1.partitions.length)),
(Success, makeMapStatus("hostB", rdd1.partitions.length))))
assert(mapOutputTracker.getMapSizesByExecutorId(dep1.shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA"), makeBlockManagerId("hostB")))
assert(listener1.results.size === 1)
// When attempting stage1, trigger a fetch failure.
assert(taskSets(1).stageId === 1)
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostC", rdd2.partitions.length)),
(FetchFailed(makeBlockManagerId("hostA"), dep1.shuffleId, 0L, 0, 0, "ignored"), null)))
scheduler.resubmitFailedStages()
// Stage1 listener should not have a result yet
assert(listener2.results.size === 0)
// Speculative task succeeded in stage1.
runEvent(makeCompletionEvent(
taskSets(1).tasks(1),
Success,
makeMapStatus("hostD", rdd2.partitions.length)))
// stage1 listener still should not have a result, though there's no missing partitions
// in it. Because stage1 has been failed and is not inside `runningStages` at this moment.
assert(listener2.results.size === 0)
// Stage0 should now be running as task set 2; make its task succeed
assert(taskSets(2).stageId === 0)
complete(taskSets(2), Seq(
(Success, makeMapStatus("hostC", rdd2.partitions.length))))
assert(mapOutputTracker.getMapSizesByExecutorId(dep1.shuffleId, 0).map(_._1).toSet ===
Set(makeBlockManagerId("hostC"), makeBlockManagerId("hostB")))
// After stage0 is finished, stage1 will be submitted and found there is no missing
// partitions in it. Then listener got triggered.
assert(listener2.results.size === 1)
assertDataStructuresEmpty()
}
/**
* In this test, we run a map stage where one of the executors fails but we still receive a
* "zombie" complete message from that executor. We want to make sure the stage is not reported
* as done until all tasks have completed.
*
* Most of the functionality in this test is tested in "run trivial shuffle with out-of-band
* executor failure and retry". However, that test uses ShuffleMapStages that are followed by
* a ResultStage, whereas in this test, the ShuffleMapStage is tested in isolation, without a
* ResultStage after it.
*/
test("map stage submission with executor failure late map task completions") {
val shuffleMapRdd = new MyRDD(sc, 3, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
submitMapStage(shuffleDep)
val oldTaskSet = taskSets(0)
runEvent(makeCompletionEvent(oldTaskSet.tasks(0), Success, makeMapStatus("hostA", 2)))
assert(results.size === 0) // Map stage job should not be complete yet
// Pretend host A was lost. This will cause the TaskSetManager to resubmit task 0, because it
// completed on hostA.
val oldEpoch = mapOutputTracker.getEpoch
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
val newEpoch = mapOutputTracker.getEpoch
assert(newEpoch > oldEpoch)
// Suppose we also get a completed event from task 1 on the same host; this should be ignored
runEvent(makeCompletionEvent(oldTaskSet.tasks(1), Success, makeMapStatus("hostA", 2)))
assert(results.size === 0) // Map stage job should not be complete yet
// A completion from another task should work because it's a non-failed host
runEvent(makeCompletionEvent(oldTaskSet.tasks(2), Success, makeMapStatus("hostB", 2)))
// At this point, no more tasks are running for the stage (and the TaskSetManager considers
// the stage complete), but the task that ran on hostA needs to be re-run, so the map stage
// shouldn't be marked as complete, and the DAGScheduler should re-submit the stage.
assert(results.size === 0)
assert(taskSets.size === 2)
// Now complete tasks in the second task set
val newTaskSet = taskSets(1)
// 2 tasks should have been re-submitted, for tasks 0 and 1 (which ran on hostA).
assert(newTaskSet.tasks.size === 2)
// Complete task 0 from the original task set (i.e., not hte one that's currently active).
// This should still be counted towards the job being complete (but there's still one
// outstanding task).
runEvent(makeCompletionEvent(newTaskSet.tasks(0), Success, makeMapStatus("hostB", 2)))
assert(results.size === 0)
// Complete the final task, from the currently active task set. There's still one
// running task, task 0 in the currently active stage attempt, but the success of task 0 means
// the DAGScheduler can mark the stage as finished.
runEvent(makeCompletionEvent(newTaskSet.tasks(1), Success, makeMapStatus("hostB", 2)))
assert(results.size === 1) // Map stage job should now finally be complete
assertDataStructuresEmpty()
// Also test that a reduce stage using this shuffled data can immediately run
val reduceRDD = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
results.clear()
submit(reduceRDD, Array(0, 1))
complete(taskSets(2), Seq((Success, 42), (Success, 43)))
assert(results === Map(0 -> 42, 1 -> 43))
results.clear()
assertDataStructuresEmpty()
}
/**
* Checks the DAGScheduler's internal logic for traversing an RDD DAG by making sure that
* getShuffleDependencies correctly returns the direct shuffle dependencies of a particular
* RDD. The test creates the following RDD graph (where n denotes a narrow dependency and s
* denotes a shuffle dependency):
*
* A <------------s---------,
* \\
* B <--s-- C <--s-- D <--n------ E
*
* Here, the direct shuffle dependency of C is just the shuffle dependency on B. The direct
* shuffle dependencies of E are the shuffle dependency on A and the shuffle dependency on C.
*/
test("getShuffleDependencies correctly returns only direct shuffle parents") {
val rddA = new MyRDD(sc, 2, Nil)
val shuffleDepA = new ShuffleDependency(rddA, new HashPartitioner(1))
val rddB = new MyRDD(sc, 2, Nil)
val shuffleDepB = new ShuffleDependency(rddB, new HashPartitioner(1))
val rddC = new MyRDD(sc, 1, List(shuffleDepB))
val shuffleDepC = new ShuffleDependency(rddC, new HashPartitioner(1))
val rddD = new MyRDD(sc, 1, List(shuffleDepC))
val narrowDepD = new OneToOneDependency(rddD)
val rddE = new MyRDD(sc, 1, List(shuffleDepA, narrowDepD), tracker = mapOutputTracker)
assert(scheduler.getShuffleDependencies(rddA) === Set())
assert(scheduler.getShuffleDependencies(rddB) === Set())
assert(scheduler.getShuffleDependencies(rddC) === Set(shuffleDepB))
assert(scheduler.getShuffleDependencies(rddD) === Set(shuffleDepC))
assert(scheduler.getShuffleDependencies(rddE) === Set(shuffleDepA, shuffleDepC))
}
test("SPARK-17644: After one stage is aborted for too many failed attempts, subsequent stages" +
"still behave correctly on fetch failures") {
// Runs a job that always encounters a fetch failure, so should eventually be aborted
def runJobWithPersistentFetchFailure: Unit = {
val rdd1 = sc.makeRDD(Array(1, 2, 3, 4), 2).map(x => (x, 1)).groupByKey()
val shuffleHandle =
rdd1.dependencies.head.asInstanceOf[ShuffleDependency[_, _, _]].shuffleHandle
rdd1.map {
case (x, _) if (x == 1) =>
throw new FetchFailedException(
BlockManagerId("1", "1", 1), shuffleHandle.shuffleId, 0L, 0, 0, "test")
case (x, _) => x
}.count()
}
// Runs a job that encounters a single fetch failure but succeeds on the second attempt
def runJobWithTemporaryFetchFailure: Unit = {
val rdd1 = sc.makeRDD(Array(1, 2, 3, 4), 2).map(x => (x, 1)).groupByKey()
val shuffleHandle =
rdd1.dependencies.head.asInstanceOf[ShuffleDependency[_, _, _]].shuffleHandle
rdd1.map {
case (x, _) if (x == 1) && FailThisAttempt._fail.getAndSet(false) =>
throw new FetchFailedException(
BlockManagerId("1", "1", 1), shuffleHandle.shuffleId, 0L, 0, 0, "test")
}
}
failAfter(10.seconds) {
val e = intercept[SparkException] {
runJobWithPersistentFetchFailure
}
assert(e.getMessage.contains("org.apache.spark.shuffle.FetchFailedException"))
}
// Run a second job that will fail due to a fetch failure.
// This job will hang without the fix for SPARK-17644.
failAfter(10.seconds) {
val e = intercept[SparkException] {
runJobWithPersistentFetchFailure
}
assert(e.getMessage.contains("org.apache.spark.shuffle.FetchFailedException"))
}
failAfter(10.seconds) {
try {
runJobWithTemporaryFetchFailure
} catch {
case e: Throwable => fail("A job with one fetch failure should eventually succeed")
}
}
}
test("[SPARK-19263] DAGScheduler should not submit multiple active tasksets," +
" even with late completions from earlier stage attempts") {
// Create 3 RDDs with shuffle dependencies on each other: rddA <--- rddB <--- rddC
val rddA = new MyRDD(sc, 2, Nil)
val shuffleDepA = new ShuffleDependency(rddA, new HashPartitioner(2))
val shuffleIdA = shuffleDepA.shuffleId
val rddB = new MyRDD(sc, 2, List(shuffleDepA), tracker = mapOutputTracker)
val shuffleDepB = new ShuffleDependency(rddB, new HashPartitioner(2))
val rddC = new MyRDD(sc, 2, List(shuffleDepB), tracker = mapOutputTracker)
submit(rddC, Array(0, 1))
// Complete both tasks in rddA.
assert(taskSets(0).stageId === 0 && taskSets(0).stageAttemptId === 0)
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostA", 2))))
// Fetch failed for task(stageId=1, stageAttemptId=0, partitionId=0) running on hostA
// and task(stageId=1, stageAttemptId=0, partitionId=1) is still running.
assert(taskSets(1).stageId === 1 && taskSets(1).stageAttemptId === 0)
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleIdA, 0L, 0, 0,
"Fetch failure of task: stageId=1, stageAttempt=0, partitionId=0"),
result = null))
// Both original tasks in rddA should be marked as failed, because they ran on the
// failed hostA, so both should be resubmitted. Complete them on hostB successfully.
scheduler.resubmitFailedStages()
assert(taskSets(2).stageId === 0 && taskSets(2).stageAttemptId === 1
&& taskSets(2).tasks.size === 2)
complete(taskSets(2), Seq(
(Success, makeMapStatus("hostB", 2)),
(Success, makeMapStatus("hostB", 2))))
// Complete task(stageId=1, stageAttemptId=0, partitionId=1) running on failed hostA
// successfully. The success should be ignored because the task started before the
// executor failed, so the output may have been lost.
runEvent(makeCompletionEvent(
taskSets(1).tasks(1), Success, makeMapStatus("hostA", 2)))
// task(stageId=1, stageAttemptId=1, partitionId=1) should be marked completed when
// task(stageId=1, stageAttemptId=0, partitionId=1) finished
// ideally we would verify that but no way to get into task scheduler to verify
// Both tasks in rddB should be resubmitted, because none of them has succeeded truly.
// Complete the task(stageId=1, stageAttemptId=1, partitionId=0) successfully.
// Task(stageId=1, stageAttemptId=1, partitionId=1) of this new active stage attempt
// is still running.
assert(taskSets(3).stageId === 1 && taskSets(3).stageAttemptId === 1
&& taskSets(3).tasks.size === 2)
runEvent(makeCompletionEvent(
taskSets(3).tasks(0), Success, makeMapStatus("hostB", 2)))
// At this point there should be no active task set for stageId=1 and we need
// to resubmit because the output from (stageId=1, stageAttemptId=0, partitionId=1)
// was ignored due to executor failure
assert(taskSets.size === 5)
assert(taskSets(4).stageId === 1 && taskSets(4).stageAttemptId === 2
&& taskSets(4).tasks.size === 1)
// Complete task(stageId=1, stageAttempt=2, partitionId=1) successfully.
runEvent(makeCompletionEvent(
taskSets(4).tasks(0), Success, makeMapStatus("hostB", 2)))
// Now the ResultStage should be submitted, because all of the tasks of rddB have
// completed successfully on alive executors.
assert(taskSets.size === 6 && taskSets(5).tasks(0).isInstanceOf[ResultTask[_, _]])
complete(taskSets(5), Seq(
(Success, 1),
(Success, 1)))
}
test("task end event should have updated accumulators (SPARK-20342)") {
val tasks = 10
val accumId = new AtomicLong()
val foundCount = new AtomicLong()
val listener = new SparkListener() {
override def onTaskEnd(event: SparkListenerTaskEnd): Unit = {
event.taskInfo.accumulables.find(_.id == accumId.get).foreach { _ =>
foundCount.incrementAndGet()
}
}
}
sc.addSparkListener(listener)
// Try a few times in a loop to make sure. This is not guaranteed to fail when the bug exists,
// but it should at least make the test flaky. If the bug is fixed, this should always pass.
(1 to 10).foreach { i =>
foundCount.set(0L)
val accum = sc.longAccumulator(s"accum$i")
accumId.set(accum.id)
sc.parallelize(1 to tasks, tasks).foreach { _ =>
accum.add(1L)
}
sc.listenerBus.waitUntilEmpty()
assert(foundCount.get() === tasks)
}
}
test("Barrier task failures from the same stage attempt don't trigger multiple stage retries") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil).barrier().mapPartitions(iter => iter)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
val mapStageId = 0
def countSubmittedMapStageAttempts(): Int = {
sparkListener.submittedStageInfos.count(_.stageId == mapStageId)
}
// The map stage should have been submitted.
assert(countSubmittedMapStageAttempts() === 1)
// The first map task fails with TaskKilled.
runEvent(makeCompletionEvent(
taskSets(0).tasks(0),
TaskKilled("test"),
null))
assert(sparkListener.failedStages === Seq(0))
// The second map task fails with TaskKilled.
runEvent(makeCompletionEvent(
taskSets(0).tasks(1),
TaskKilled("test"),
null))
// Trigger resubmission of the failed map stage.
runEvent(ResubmitFailedStages)
// Another attempt for the map stage should have been submitted, resulting in 2 total attempts.
assert(countSubmittedMapStageAttempts() === 2)
}
test("Barrier task failures from a previous stage attempt don't trigger stage retry") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil).barrier().mapPartitions(iter => iter)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
val mapStageId = 0
def countSubmittedMapStageAttempts(): Int = {
sparkListener.submittedStageInfos.count(_.stageId == mapStageId)
}
// The map stage should have been submitted.
assert(countSubmittedMapStageAttempts() === 1)
// The first map task fails with TaskKilled.
runEvent(makeCompletionEvent(
taskSets(0).tasks(0),
TaskKilled("test"),
null))
assert(sparkListener.failedStages === Seq(0))
// Trigger resubmission of the failed map stage.
runEvent(ResubmitFailedStages)
// Another attempt for the map stage should have been submitted, resulting in 2 total attempts.
assert(countSubmittedMapStageAttempts() === 2)
// The second map task fails with TaskKilled.
runEvent(makeCompletionEvent(
taskSets(0).tasks(1),
TaskKilled("test"),
null))
// The second map task failure doesn't trigger stage retry.
runEvent(ResubmitFailedStages)
assert(countSubmittedMapStageAttempts() === 2)
}
private def constructIndeterminateStageFetchFailed(): (Int, Int) = {
val shuffleMapRdd1 = new MyRDD(sc, 2, Nil, indeterminate = true)
val shuffleDep1 = new ShuffleDependency(shuffleMapRdd1, new HashPartitioner(2))
val shuffleId1 = shuffleDep1.shuffleId
val shuffleMapRdd2 = new MyRDD(sc, 2, List(shuffleDep1), tracker = mapOutputTracker)
val shuffleDep2 = new ShuffleDependency(shuffleMapRdd2, new HashPartitioner(2))
val shuffleId2 = shuffleDep2.shuffleId
val finalRdd = new MyRDD(sc, 2, List(shuffleDep2), tracker = mapOutputTracker)
submit(finalRdd, Array(0, 1))
// Finish the first shuffle map stage.
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
assert(mapOutputTracker.findMissingPartitions(shuffleId1) === Some(Seq.empty))
// Finish the second shuffle map stage.
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostC", 2)),
(Success, makeMapStatus("hostD", 2))))
assert(mapOutputTracker.findMissingPartitions(shuffleId2) === Some(Seq.empty))
// The first task of the final stage failed with fetch failure
runEvent(makeCompletionEvent(
taskSets(2).tasks(0),
FetchFailed(makeBlockManagerId("hostC"), shuffleId2, 0L, 0, 0, "ignored"),
null))
(shuffleId1, shuffleId2)
}
test("SPARK-25341: abort stage while using old fetch protocol") {
// reset the test context with using old fetch protocol
afterEach()
val conf = new SparkConf()
conf.set(config.SHUFFLE_USE_OLD_FETCH_PROTOCOL.key, "true")
init(conf)
// Construct the scenario of indeterminate stage fetch failed.
constructIndeterminateStageFetchFailed()
// The job should fail because Spark can't rollback the shuffle map stage while
// using old protocol.
assert(failure != null && failure.getMessage.contains(
"Spark can only do this while using the new shuffle block fetching protocol"))
}
test("SPARK-25341: retry all the succeeding stages when the map stage is indeterminate") {
val (shuffleId1, shuffleId2) = constructIndeterminateStageFetchFailed()
// Check status for all failedStages
val failedStages = scheduler.failedStages.toSeq
assert(failedStages.map(_.id) == Seq(1, 2))
// Shuffle blocks of "hostC" is lost, so first task of the `shuffleMapRdd2` needs to retry.
assert(failedStages.collect {
case stage: ShuffleMapStage if stage.shuffleDep.shuffleId == shuffleId2 => stage
}.head.findMissingPartitions() == Seq(0))
// The result stage is still waiting for its 2 tasks to complete
assert(failedStages.collect {
case stage: ResultStage => stage
}.head.findMissingPartitions() == Seq(0, 1))
scheduler.resubmitFailedStages()
// The first task of the `shuffleMapRdd2` failed with fetch failure
runEvent(makeCompletionEvent(
taskSets(3).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId1, 0L, 0, 0, "ignored"),
null))
val newFailedStages = scheduler.failedStages.toSeq
assert(newFailedStages.map(_.id) == Seq(0, 1))
scheduler.resubmitFailedStages()
// First shuffle map stage resubmitted and reran all tasks.
assert(taskSets(4).stageId == 0)
assert(taskSets(4).stageAttemptId == 1)
assert(taskSets(4).tasks.length == 2)
// Finish all stage.
complete(taskSets(4), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
assert(mapOutputTracker.findMissingPartitions(shuffleId1) === Some(Seq.empty))
complete(taskSets(5), Seq(
(Success, makeMapStatus("hostC", 2)),
(Success, makeMapStatus("hostD", 2))))
assert(mapOutputTracker.findMissingPartitions(shuffleId2) === Some(Seq.empty))
complete(taskSets(6), Seq((Success, 11), (Success, 12)))
// Job successful ended.
assert(results === Map(0 -> 11, 1 -> 12))
results.clear()
assertDataStructuresEmpty()
}
test("SPARK-25341: continuous indeterminate stage roll back") {
// shuffleMapRdd1/2/3 are all indeterminate.
val shuffleMapRdd1 = new MyRDD(sc, 2, Nil, indeterminate = true)
val shuffleDep1 = new ShuffleDependency(shuffleMapRdd1, new HashPartitioner(2))
val shuffleId1 = shuffleDep1.shuffleId
val shuffleMapRdd2 = new MyRDD(
sc, 2, List(shuffleDep1), tracker = mapOutputTracker, indeterminate = true)
val shuffleDep2 = new ShuffleDependency(shuffleMapRdd2, new HashPartitioner(2))
val shuffleId2 = shuffleDep2.shuffleId
val shuffleMapRdd3 = new MyRDD(
sc, 2, List(shuffleDep2), tracker = mapOutputTracker, indeterminate = true)
val shuffleDep3 = new ShuffleDependency(shuffleMapRdd3, new HashPartitioner(2))
val shuffleId3 = shuffleDep3.shuffleId
val finalRdd = new MyRDD(sc, 2, List(shuffleDep3), tracker = mapOutputTracker)
submit(finalRdd, Array(0, 1), properties = new Properties())
// Finish the first 2 shuffle map stages.
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
assert(mapOutputTracker.findMissingPartitions(shuffleId1) === Some(Seq.empty))
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostB", 2)),
(Success, makeMapStatus("hostD", 2))))
assert(mapOutputTracker.findMissingPartitions(shuffleId2) === Some(Seq.empty))
// Executor lost on hostB, both of stage 0 and 1 should be reran.
runEvent(makeCompletionEvent(
taskSets(2).tasks(0),
FetchFailed(makeBlockManagerId("hostB"), shuffleId2, 0L, 0, 0, "ignored"),
null))
mapOutputTracker.removeOutputsOnHost("hostB")
assert(scheduler.failedStages.toSeq.map(_.id) == Seq(1, 2))
scheduler.resubmitFailedStages()
def checkAndCompleteRetryStage(
taskSetIndex: Int,
stageId: Int,
shuffleId: Int): Unit = {
assert(taskSets(taskSetIndex).stageId == stageId)
assert(taskSets(taskSetIndex).stageAttemptId == 1)
assert(taskSets(taskSetIndex).tasks.length == 2)
complete(taskSets(taskSetIndex), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq.empty))
}
// Check all indeterminate stage roll back.
checkAndCompleteRetryStage(3, 0, shuffleId1)
checkAndCompleteRetryStage(4, 1, shuffleId2)
checkAndCompleteRetryStage(5, 2, shuffleId3)
// Result stage success, all job ended.
complete(taskSets(6), Seq((Success, 11), (Success, 12)))
assert(results === Map(0 -> 11, 1 -> 12))
results.clear()
assertDataStructuresEmpty()
}
test("SPARK-29042: Sampled RDD with unordered input should be indeterminate") {
val shuffleMapRdd1 = new MyRDD(sc, 2, Nil, indeterminate = false)
val shuffleDep1 = new ShuffleDependency(shuffleMapRdd1, new HashPartitioner(2))
val shuffleMapRdd2 = new MyRDD(sc, 2, List(shuffleDep1), tracker = mapOutputTracker)
assert(shuffleMapRdd2.outputDeterministicLevel == DeterministicLevel.UNORDERED)
val sampledRdd = shuffleMapRdd2.sample(true, 0.3, 1000L)
assert(sampledRdd.outputDeterministicLevel == DeterministicLevel.INDETERMINATE)
}
private def assertResultStageFailToRollback(mapRdd: MyRDD): Unit = {
val shuffleDep = new ShuffleDependency(mapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val finalRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(finalRdd, Array(0, 1))
completeShuffleMapStageSuccessfully(taskSets.length - 1, 0, numShufflePartitions = 2)
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq.empty))
// Finish the first task of the result stage
runEvent(makeCompletionEvent(
taskSets.last.tasks(0), Success, 42,
Seq.empty, Array.empty, createFakeTaskInfoWithId(0)))
// Fail the second task with FetchFailed.
runEvent(makeCompletionEvent(
taskSets.last.tasks(1),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0L, 0, 0, "ignored"),
null))
// The job should fail because Spark can't rollback the result stage.
assert(failure != null && failure.getMessage.contains("Spark cannot rollback"))
}
test("SPARK-23207: cannot rollback a result stage") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil, indeterminate = true)
assertResultStageFailToRollback(shuffleMapRdd)
}
test("SPARK-23207: local checkpoint fail to rollback (checkpointed before)") {
val shuffleMapRdd = new MyCheckpointRDD(sc, 2, Nil, indeterminate = true)
shuffleMapRdd.localCheckpoint()
shuffleMapRdd.doCheckpoint()
assertResultStageFailToRollback(shuffleMapRdd)
}
test("SPARK-23207: local checkpoint fail to rollback (checkpointing now)") {
val shuffleMapRdd = new MyCheckpointRDD(sc, 2, Nil, indeterminate = true)
shuffleMapRdd.localCheckpoint()
assertResultStageFailToRollback(shuffleMapRdd)
}
private def assertResultStageNotRollbacked(mapRdd: MyRDD): Unit = {
val shuffleDep = new ShuffleDependency(mapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val finalRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(finalRdd, Array(0, 1))
completeShuffleMapStageSuccessfully(taskSets.length - 1, 0, numShufflePartitions = 2)
assert(mapOutputTracker.findMissingPartitions(shuffleId) === Some(Seq.empty))
// Finish the first task of the result stage
runEvent(makeCompletionEvent(
taskSets.last.tasks(0), Success, 42,
Seq.empty, Array.empty, createFakeTaskInfoWithId(0)))
// Fail the second task with FetchFailed.
runEvent(makeCompletionEvent(
taskSets.last.tasks(1),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0L, 0, 0, "ignored"),
null))
assert(failure == null, "job should not fail")
val failedStages = scheduler.failedStages.toSeq
assert(failedStages.length == 2)
// Shuffle blocks of "hostA" is lost, so first task of the `shuffleMapRdd2` needs to retry.
assert(failedStages.collect {
case stage: ShuffleMapStage if stage.shuffleDep.shuffleId == shuffleId => stage
}.head.findMissingPartitions() == Seq(0))
// The first task of result stage remains completed.
assert(failedStages.collect {
case stage: ResultStage => stage
}.head.findMissingPartitions() == Seq(1))
}
test("SPARK-23207: reliable checkpoint can avoid rollback (checkpointed before)") {
withTempDir { dir =>
sc.setCheckpointDir(dir.getCanonicalPath)
val shuffleMapRdd = new MyCheckpointRDD(sc, 2, Nil, indeterminate = true)
shuffleMapRdd.checkpoint()
shuffleMapRdd.doCheckpoint()
assertResultStageNotRollbacked(shuffleMapRdd)
}
}
test("SPARK-23207: reliable checkpoint fail to rollback (checkpointing now)") {
withTempDir { dir =>
sc.setCheckpointDir(dir.getCanonicalPath)
val shuffleMapRdd = new MyCheckpointRDD(sc, 2, Nil, indeterminate = true)
shuffleMapRdd.checkpoint()
assertResultStageFailToRollback(shuffleMapRdd)
}
}
test("SPARK-27164: RDD.countApprox on empty RDDs schedules jobs which never complete") {
val latch = new CountDownLatch(1)
val jobListener = new SparkListener {
override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = {
latch.countDown()
}
}
sc.addSparkListener(jobListener)
sc.emptyRDD[Int].countApprox(10000).getFinalValue()
assert(latch.await(10, TimeUnit.SECONDS))
}
test("Completions in zombie tasksets update status of non-zombie taskset") {
val parts = 4
val shuffleMapRdd = new MyRDD(sc, parts, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(parts))
val reduceRdd = new MyRDD(sc, parts, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, (0 until parts).toArray)
assert(taskSets.length == 1)
// Finish the first task of the shuffle map stage.
runEvent(makeCompletionEvent(
taskSets(0).tasks(0), Success, makeMapStatus("hostA", 4),
Seq.empty, Array.empty, createFakeTaskInfoWithId(0)))
// The second task of the shuffle map stage failed with FetchFailed.
runEvent(makeCompletionEvent(
taskSets(0).tasks(1),
FetchFailed(makeBlockManagerId("hostB"), shuffleDep.shuffleId, 0L, 0, 0, "ignored"),
null))
scheduler.resubmitFailedStages()
assert(taskSets.length == 2)
// The first partition has completed already, so the new attempt only need to run 3 tasks.
assert(taskSets(1).tasks.length == 3)
// Finish the first task of the second attempt of the shuffle map stage.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0), Success, makeMapStatus("hostA", 4),
Seq.empty, Array.empty, createFakeTaskInfoWithId(0)))
// Finish the third task of the first attempt of the shuffle map stage.
runEvent(makeCompletionEvent(
taskSets(0).tasks(2), Success, makeMapStatus("hostA", 4),
Seq.empty, Array.empty, createFakeTaskInfoWithId(0)))
assert(tasksMarkedAsCompleted.length == 1)
assert(tasksMarkedAsCompleted.head.partitionId == 2)
// Finish the forth task of the first attempt of the shuffle map stage.
runEvent(makeCompletionEvent(
taskSets(0).tasks(3), Success, makeMapStatus("hostA", 4),
Seq.empty, Array.empty, createFakeTaskInfoWithId(0)))
assert(tasksMarkedAsCompleted.length == 2)
assert(tasksMarkedAsCompleted.last.partitionId == 3)
// Now the shuffle map stage is completed, and the next stage is submitted.
assert(taskSets.length == 3)
// Finish
complete(taskSets(2), Seq((Success, 42), (Success, 42), (Success, 42), (Success, 42)))
assertDataStructuresEmpty()
}
/**
* Assert that the supplied TaskSet has exactly the given hosts as its preferred locations.
* Note that this checks only the host and not the executor ID.
*/
private def assertLocations(taskSet: TaskSet, hosts: Seq[Seq[String]]): Unit = {
assert(hosts.size === taskSet.tasks.size)
for ((taskLocs, expectedLocs) <- taskSet.tasks.map(_.preferredLocations).zip(hosts)) {
assert(taskLocs.map(_.host).toSet === expectedLocs.toSet)
}
}
private def assertDataStructuresEmpty(): Unit = {
assert(scheduler.activeJobs.isEmpty)
assert(scheduler.failedStages.isEmpty)
assert(scheduler.jobIdToActiveJob.isEmpty)
assert(scheduler.jobIdToStageIds.isEmpty)
assert(scheduler.stageIdToStage.isEmpty)
assert(scheduler.runningStages.isEmpty)
assert(scheduler.shuffleIdToMapStage.isEmpty)
assert(scheduler.waitingStages.isEmpty)
assert(scheduler.outputCommitCoordinator.isEmpty)
}
// Nothing in this test should break if the task info's fields are null, but
// OutputCommitCoordinator requires the task info itself to not be null.
private def createFakeTaskInfo(): TaskInfo = {
val info = new TaskInfo(0, 0, 0, 0L, "", "", TaskLocality.ANY, false)
info.finishTime = 1
info
}
private def createFakeTaskInfoWithId(taskId: Long): TaskInfo = {
val info = new TaskInfo(taskId, 0, 0, 0L, "", "", TaskLocality.ANY, false)
info.finishTime = 1
info
}
private def makeCompletionEvent(
task: Task[_],
reason: TaskEndReason,
result: Any,
extraAccumUpdates: Seq[AccumulatorV2[_, _]] = Seq.empty,
metricPeaks: Array[Long] = Array.empty,
taskInfo: TaskInfo = createFakeTaskInfo()): CompletionEvent = {
val accumUpdates = reason match {
case Success => task.metrics.accumulators()
case ef: ExceptionFailure => ef.accums
case tk: TaskKilled => tk.accums
case _ => Seq.empty
}
CompletionEvent(task, reason, result, accumUpdates ++ extraAccumUpdates, metricPeaks, taskInfo)
}
}
object DAGSchedulerSuite {
def makeMapStatus(host: String, reduces: Int, sizes: Byte = 2, mapTaskId: Long = -1): MapStatus =
MapStatus(makeBlockManagerId(host), Array.fill[Long](reduces)(sizes), mapTaskId)
def makeBlockManagerId(host: String): BlockManagerId =
BlockManagerId("exec-" + host, host, 12345)
}
object FailThisAttempt {
val _fail = new AtomicBoolean(true)
}
| goldmedal/spark | core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala | Scala | apache-2.0 | 134,817 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import scala.collection.immutable.HashSet
import scala.collection.mutable.{ArrayBuffer, Stack}
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.expressions.Literal.{FalseLiteral, TrueLiteral}
import org.apache.spark.sql.catalyst.expressions.objects.AssertNotNull
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
/*
* Optimization rules defined in this file should not affect the structure of the logical plan.
*/
/**
* Replaces [[Expression Expressions]] that can be statically evaluated with
* equivalent [[Literal]] values.
*/
object ConstantFolding extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsDown {
// Skip redundant folding of literals. This rule is technically not necessary. Placing this
// here avoids running the next rule for Literal values, which would create a new Literal
// object and running eval unnecessarily.
case l: Literal => l
// Fold expressions that are foldable.
case e if e.foldable => Literal.create(e.eval(EmptyRow), e.dataType)
}
}
}
/**
* Substitutes [[Attribute Attributes]] which can be statically evaluated with their corresponding
* value in conjunctive [[Expression Expressions]]
* eg.
* {{{
* SELECT * FROM table WHERE i = 5 AND j = i + 3
* ==> SELECT * FROM table WHERE i = 5 AND j = 8
* }}}
*
* Approach used:
* - Start from AND operator as the root
* - Get all the children conjunctive predicates which are EqualTo / EqualNullSafe such that they
* don't have a `NOT` or `OR` operator in them
* - Populate a mapping of attribute => constant value by looking at all the equals predicates
* - Using this mapping, replace occurrence of the attributes with the corresponding constant values
* in the AND node.
*/
object ConstantPropagation extends Rule[LogicalPlan] with PredicateHelper {
private def containsNonConjunctionPredicates(expression: Expression): Boolean = expression.find {
case _: Not | _: Or => true
case _ => false
}.isDefined
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case f: Filter => f transformExpressionsUp {
case and: And =>
val conjunctivePredicates =
splitConjunctivePredicates(and)
.filter(expr => expr.isInstanceOf[EqualTo] || expr.isInstanceOf[EqualNullSafe])
.filterNot(expr => containsNonConjunctionPredicates(expr))
val equalityPredicates = conjunctivePredicates.collect {
case e @ EqualTo(left: AttributeReference, right: Literal) => ((left, right), e)
case e @ EqualTo(left: Literal, right: AttributeReference) => ((right, left), e)
case e @ EqualNullSafe(left: AttributeReference, right: Literal) => ((left, right), e)
case e @ EqualNullSafe(left: Literal, right: AttributeReference) => ((right, left), e)
}
val constantsMap = AttributeMap(equalityPredicates.map(_._1))
val predicates = equalityPredicates.map(_._2).toSet
def replaceConstants(expression: Expression) = expression transform {
case a: AttributeReference =>
constantsMap.get(a) match {
case Some(literal) => literal
case None => a
}
}
and transform {
case e @ EqualTo(_, _) if !predicates.contains(e) => replaceConstants(e)
case e @ EqualNullSafe(_, _) if !predicates.contains(e) => replaceConstants(e)
}
}
}
}
/**
* Reorder associative integral-type operators and fold all constants into one.
*/
object ReorderAssociativeOperator extends Rule[LogicalPlan] {
private def flattenAdd(
expression: Expression,
groupSet: ExpressionSet): Seq[Expression] = expression match {
case expr @ Add(l, r) if !groupSet.contains(expr) =>
flattenAdd(l, groupSet) ++ flattenAdd(r, groupSet)
case other => other :: Nil
}
private def flattenMultiply(
expression: Expression,
groupSet: ExpressionSet): Seq[Expression] = expression match {
case expr @ Multiply(l, r) if !groupSet.contains(expr) =>
flattenMultiply(l, groupSet) ++ flattenMultiply(r, groupSet)
case other => other :: Nil
}
private def collectGroupingExpressions(plan: LogicalPlan): ExpressionSet = plan match {
case Aggregate(groupingExpressions, aggregateExpressions, child) =>
ExpressionSet.apply(groupingExpressions)
case _ => ExpressionSet(Seq())
}
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan =>
// We have to respect aggregate expressions which exists in grouping expressions when plan
// is an Aggregate operator, otherwise the optimized expression could not be derived from
// grouping expressions.
val groupingExpressionSet = collectGroupingExpressions(q)
q transformExpressionsDown {
case a: Add if a.deterministic && a.dataType.isInstanceOf[IntegralType] =>
val (foldables, others) = flattenAdd(a, groupingExpressionSet).partition(_.foldable)
if (foldables.size > 1) {
val foldableExpr = foldables.reduce((x, y) => Add(x, y))
val c = Literal.create(foldableExpr.eval(EmptyRow), a.dataType)
if (others.isEmpty) c else Add(others.reduce((x, y) => Add(x, y)), c)
} else {
a
}
case m: Multiply if m.deterministic && m.dataType.isInstanceOf[IntegralType] =>
val (foldables, others) = flattenMultiply(m, groupingExpressionSet).partition(_.foldable)
if (foldables.size > 1) {
val foldableExpr = foldables.reduce((x, y) => Multiply(x, y))
val c = Literal.create(foldableExpr.eval(EmptyRow), m.dataType)
if (others.isEmpty) c else Multiply(others.reduce((x, y) => Multiply(x, y)), c)
} else {
m
}
}
}
}
/**
* Optimize IN predicates:
* 1. Removes literal repetitions.
* 2. Replaces [[In (value, seq[Literal])]] with optimized version
* [[InSet (value, HashSet[Literal])]] which is much faster.
*/
object OptimizeIn extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsDown {
case expr @ In(v, list) if expr.inSetConvertible =>
val newList = ExpressionSet(list).toSeq
if (newList.size > SQLConf.get.optimizerInSetConversionThreshold) {
val hSet = newList.map(e => e.eval(EmptyRow))
InSet(v, HashSet() ++ hSet)
} else if (newList.size < list.size) {
expr.copy(list = newList)
} else { // newList.length == list.length
expr
}
}
}
}
/**
* Simplifies boolean expressions:
* 1. Simplifies expressions whose answer can be determined without evaluating both sides.
* 2. Eliminates / extracts common factors.
* 3. Merge same expressions
* 4. Removes `Not` operator.
*/
object BooleanSimplification extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsUp {
case TrueLiteral And e => e
case e And TrueLiteral => e
case FalseLiteral Or e => e
case e Or FalseLiteral => e
case FalseLiteral And _ => FalseLiteral
case _ And FalseLiteral => FalseLiteral
case TrueLiteral Or _ => TrueLiteral
case _ Or TrueLiteral => TrueLiteral
case a And b if Not(a).semanticEquals(b) => FalseLiteral
case a Or b if Not(a).semanticEquals(b) => TrueLiteral
case a And b if a.semanticEquals(Not(b)) => FalseLiteral
case a Or b if a.semanticEquals(Not(b)) => TrueLiteral
case a And b if a.semanticEquals(b) => a
case a Or b if a.semanticEquals(b) => a
case a And (b Or c) if Not(a).semanticEquals(b) => And(a, c)
case a And (b Or c) if Not(a).semanticEquals(c) => And(a, b)
case (a Or b) And c if a.semanticEquals(Not(c)) => And(b, c)
case (a Or b) And c if b.semanticEquals(Not(c)) => And(a, c)
case a Or (b And c) if Not(a).semanticEquals(b) => Or(a, c)
case a Or (b And c) if Not(a).semanticEquals(c) => Or(a, b)
case (a And b) Or c if a.semanticEquals(Not(c)) => Or(b, c)
case (a And b) Or c if b.semanticEquals(Not(c)) => Or(a, c)
// Common factor elimination for conjunction
case and @ (left And right) =>
// 1. Split left and right to get the disjunctive predicates,
// i.e. lhs = (a, b), rhs = (a, c)
// 2. Find the common predict between lhsSet and rhsSet, i.e. common = (a)
// 3. Remove common predict from lhsSet and rhsSet, i.e. ldiff = (b), rdiff = (c)
// 4. Apply the formula, get the optimized predicate: common || (ldiff && rdiff)
val lhs = splitDisjunctivePredicates(left)
val rhs = splitDisjunctivePredicates(right)
val common = lhs.filter(e => rhs.exists(e.semanticEquals))
if (common.isEmpty) {
// No common factors, return the original predicate
and
} else {
val ldiff = lhs.filterNot(e => common.exists(e.semanticEquals))
val rdiff = rhs.filterNot(e => common.exists(e.semanticEquals))
if (ldiff.isEmpty || rdiff.isEmpty) {
// (a || b || c || ...) && (a || b) => (a || b)
common.reduce(Or)
} else {
// (a || b || c || ...) && (a || b || d || ...) =>
// ((c || ...) && (d || ...)) || a || b
(common :+ And(ldiff.reduce(Or), rdiff.reduce(Or))).reduce(Or)
}
}
// Common factor elimination for disjunction
case or @ (left Or right) =>
// 1. Split left and right to get the conjunctive predicates,
// i.e. lhs = (a, b), rhs = (a, c)
// 2. Find the common predict between lhsSet and rhsSet, i.e. common = (a)
// 3. Remove common predict from lhsSet and rhsSet, i.e. ldiff = (b), rdiff = (c)
// 4. Apply the formula, get the optimized predicate: common && (ldiff || rdiff)
val lhs = splitConjunctivePredicates(left)
val rhs = splitConjunctivePredicates(right)
val common = lhs.filter(e => rhs.exists(e.semanticEquals))
if (common.isEmpty) {
// No common factors, return the original predicate
or
} else {
val ldiff = lhs.filterNot(e => common.exists(e.semanticEquals))
val rdiff = rhs.filterNot(e => common.exists(e.semanticEquals))
if (ldiff.isEmpty || rdiff.isEmpty) {
// (a && b) || (a && b && c && ...) => a && b
common.reduce(And)
} else {
// (a && b && c && ...) || (a && b && d && ...) =>
// ((c && ...) || (d && ...)) && a && b
(common :+ Or(ldiff.reduce(And), rdiff.reduce(And))).reduce(And)
}
}
case Not(TrueLiteral) => FalseLiteral
case Not(FalseLiteral) => TrueLiteral
case Not(a GreaterThan b) => LessThanOrEqual(a, b)
case Not(a GreaterThanOrEqual b) => LessThan(a, b)
case Not(a LessThan b) => GreaterThanOrEqual(a, b)
case Not(a LessThanOrEqual b) => GreaterThan(a, b)
case Not(a Or b) => And(Not(a), Not(b))
case Not(a And b) => Or(Not(a), Not(b))
case Not(Not(e)) => e
}
}
}
/**
* Simplifies binary comparisons with semantically-equal expressions:
* 1) Replace '<=>' with 'true' literal.
* 2) Replace '=', '<=', and '>=' with 'true' literal if both operands are non-nullable.
* 3) Replace '<' and '>' with 'false' literal if both operands are non-nullable.
*/
object SimplifyBinaryComparison extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsUp {
// True with equality
case a EqualNullSafe b if a.semanticEquals(b) => TrueLiteral
case a EqualTo b if !a.nullable && !b.nullable && a.semanticEquals(b) => TrueLiteral
case a GreaterThanOrEqual b if !a.nullable && !b.nullable && a.semanticEquals(b) =>
TrueLiteral
case a LessThanOrEqual b if !a.nullable && !b.nullable && a.semanticEquals(b) => TrueLiteral
// False with inequality
case a GreaterThan b if !a.nullable && !b.nullable && a.semanticEquals(b) => FalseLiteral
case a LessThan b if !a.nullable && !b.nullable && a.semanticEquals(b) => FalseLiteral
}
}
}
/**
* Simplifies conditional expressions (if / case).
*/
object SimplifyConditionals extends Rule[LogicalPlan] with PredicateHelper {
private def falseOrNullLiteral(e: Expression): Boolean = e match {
case FalseLiteral => true
case Literal(null, _) => true
case _ => false
}
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsUp {
case If(TrueLiteral, trueValue, _) => trueValue
case If(FalseLiteral, _, falseValue) => falseValue
case If(Literal(null, _), _, falseValue) => falseValue
case e @ CaseWhen(branches, elseValue) if branches.exists(x => falseOrNullLiteral(x._1)) =>
// If there are branches that are always false, remove them.
// If there are no more branches left, just use the else value.
// Note that these two are handled together here in a single case statement because
// otherwise we cannot determine the data type for the elseValue if it is None (i.e. null).
val newBranches = branches.filter(x => !falseOrNullLiteral(x._1))
if (newBranches.isEmpty) {
elseValue.getOrElse(Literal.create(null, e.dataType))
} else {
e.copy(branches = newBranches)
}
case e @ CaseWhen(branches, _) if branches.headOption.map(_._1) == Some(TrueLiteral) =>
// If the first branch is a true literal, remove the entire CaseWhen and use the value
// from that. Note that CaseWhen.branches should never be empty, and as a result the
// headOption (rather than head) added above is just an extra (and unnecessary) safeguard.
branches.head._2
case CaseWhen(branches, _) if branches.exists(_._1 == TrueLiteral) =>
// a branc with a TRue condition eliminates all following branches,
// these branches can be pruned away
val (h, t) = branches.span(_._1 != TrueLiteral)
CaseWhen( h :+ t.head, None)
}
}
}
/**
* Simplifies LIKE expressions that do not need full regular expressions to evaluate the condition.
* For example, when the expression is just checking to see if a string starts with a given
* pattern.
*/
object LikeSimplification extends Rule[LogicalPlan] {
// if guards below protect from escapes on trailing %.
// Cases like "something\\%" are not optimized, but this does not affect correctness.
private val startsWith = "([^_%]+)%".r
private val endsWith = "%([^_%]+)".r
private val startsAndEndsWith = "([^_%]+)%([^_%]+)".r
private val contains = "%([^_%]+)%".r
private val equalTo = "([^_%]*)".r
def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions {
case Like(input, Literal(pattern, StringType)) =>
if (pattern == null) {
// If pattern is null, return null value directly, since "col like null" == null.
Literal(null, BooleanType)
} else {
pattern.toString match {
case startsWith(prefix) if !prefix.endsWith("\\\\") =>
StartsWith(input, Literal(prefix))
case endsWith(postfix) =>
EndsWith(input, Literal(postfix))
// 'a%a' pattern is basically same with 'a%' && '%a'.
// However, the additional `Length` condition is required to prevent 'a' match 'a%a'.
case startsAndEndsWith(prefix, postfix) if !prefix.endsWith("\\\\") =>
And(GreaterThanOrEqual(Length(input), Literal(prefix.length + postfix.length)),
And(StartsWith(input, Literal(prefix)), EndsWith(input, Literal(postfix))))
case contains(infix) if !infix.endsWith("\\\\") =>
Contains(input, Literal(infix))
case equalTo(str) =>
EqualTo(input, Literal(str))
case _ =>
Like(input, Literal.create(pattern, StringType))
}
}
}
}
/**
* Replaces [[Expression Expressions]] that can be statically evaluated with
* equivalent [[Literal]] values. This rule is more specific with
* Null value propagation from bottom to top of the expression tree.
*/
object NullPropagation extends Rule[LogicalPlan] {
private def isNullLiteral(e: Expression): Boolean = e match {
case Literal(null, _) => true
case _ => false
}
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsUp {
case e @ WindowExpression(Cast(Literal(0L, _), _, _), _) =>
Cast(Literal(0L), e.dataType, Option(SQLConf.get.sessionLocalTimeZone))
case e @ AggregateExpression(Count(exprs), _, _, _) if exprs.forall(isNullLiteral) =>
Cast(Literal(0L), e.dataType, Option(SQLConf.get.sessionLocalTimeZone))
case ae @ AggregateExpression(Count(exprs), _, false, _) if !exprs.exists(_.nullable) =>
// This rule should be only triggered when isDistinct field is false.
ae.copy(aggregateFunction = Count(Literal(1)))
case IsNull(c) if !c.nullable => Literal.create(false, BooleanType)
case IsNotNull(c) if !c.nullable => Literal.create(true, BooleanType)
case EqualNullSafe(Literal(null, _), r) => IsNull(r)
case EqualNullSafe(l, Literal(null, _)) => IsNull(l)
case AssertNotNull(c, _) if !c.nullable => c
// For Coalesce, remove null literals.
case e @ Coalesce(children) =>
val newChildren = children.filterNot(isNullLiteral)
if (newChildren.isEmpty) {
Literal.create(null, e.dataType)
} else if (newChildren.length == 1) {
newChildren.head
} else {
Coalesce(newChildren)
}
// If the value expression is NULL then transform the In expression to null literal.
case In(Literal(null, _), _) => Literal.create(null, BooleanType)
// Non-leaf NullIntolerant expressions will return null, if at least one of its children is
// a null literal.
case e: NullIntolerant if e.children.exists(isNullLiteral) =>
Literal.create(null, e.dataType)
}
}
}
/**
* Propagate foldable expressions:
* Replace attributes with aliases of the original foldable expressions if possible.
* Other optimizations will take advantage of the propagated foldable expressions.
*
* {{{
* SELECT 1.0 x, 'abc' y, Now() z ORDER BY x, y, 3
* ==> SELECT 1.0 x, 'abc' y, Now() z ORDER BY 1.0, 'abc', Now()
* }}}
*/
object FoldablePropagation extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = {
val foldableMap = AttributeMap(plan.flatMap {
case Project(projectList, _) => projectList.collect {
case a: Alias if a.child.foldable => (a.toAttribute, a)
}
case _ => Nil
})
val replaceFoldable: PartialFunction[Expression, Expression] = {
case a: AttributeReference if foldableMap.contains(a) => foldableMap(a)
}
if (foldableMap.isEmpty) {
plan
} else {
var stop = false
CleanupAliases(plan.transformUp {
// A leaf node should not stop the folding process (note that we are traversing up the
// tree, starting at the leaf nodes); so we are allowing it.
case l: LeafNode =>
l
// We can only propagate foldables for a subset of unary nodes.
case u: UnaryNode if !stop && canPropagateFoldables(u) =>
u.transformExpressions(replaceFoldable)
// Allow inner joins. We do not allow outer join, although its output attributes are
// derived from its children, they are actually different attributes: the output of outer
// join is not always picked from its children, but can also be null.
// TODO(cloud-fan): It seems more reasonable to use new attributes as the output attributes
// of outer join.
case j @ Join(_, _, Inner, _) if !stop =>
j.transformExpressions(replaceFoldable)
// We can fold the projections an expand holds. However expand changes the output columns
// and often reuses the underlying attributes; so we cannot assume that a column is still
// foldable after the expand has been applied.
// TODO(hvanhovell): Expand should use new attributes as the output attributes.
case expand: Expand if !stop =>
val newExpand = expand.copy(projections = expand.projections.map { projection =>
projection.map(_.transform(replaceFoldable))
})
stop = true
newExpand
case other =>
stop = true
other
})
}
}
/**
* Whitelist of all [[UnaryNode]]s for which allow foldable propagation.
*/
private def canPropagateFoldables(u: UnaryNode): Boolean = u match {
case _: Project => true
case _: Filter => true
case _: SubqueryAlias => true
case _: Aggregate => true
case _: Window => true
case _: Sample => true
case _: GlobalLimit => true
case _: LocalLimit => true
case _: Generate => true
case _: Distinct => true
case _: AppendColumns => true
case _: AppendColumnsWithObject => true
case _: ResolvedHint => true
case _: RepartitionByExpression => true
case _: Repartition => true
case _: Sort => true
case _: TypedFilter => true
case _ => false
}
}
/**
* Optimizes expressions by replacing according to CodeGen configuration.
*/
object OptimizeCodegen extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions {
case e: CaseWhen if canCodegen(e) => e.toCodegen()
}
private def canCodegen(e: CaseWhen): Boolean = {
val numBranches = e.branches.size + e.elseValue.size
numBranches <= SQLConf.get.maxCaseBranchesForCodegen
}
}
/**
* Removes [[Cast Casts]] that are unnecessary because the input is already the correct type.
*/
object SimplifyCasts extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions {
case Cast(e, dataType, _) if e.dataType == dataType => e
case c @ Cast(e, dataType, _) => (e.dataType, dataType) match {
case (ArrayType(from, false), ArrayType(to, true)) if from == to => e
case (MapType(fromKey, fromValue, false), MapType(toKey, toValue, true))
if fromKey == toKey && fromValue == toValue => e
case _ => c
}
}
}
/**
* Removes nodes that are not necessary.
*/
object RemoveDispensableExpressions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions {
case UnaryPositive(child) => child
case PromotePrecision(child) => child
}
}
/**
* Removes the inner case conversion expressions that are unnecessary because
* the inner conversion is overwritten by the outer one.
*/
object SimplifyCaseConversionExpressions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsUp {
case Upper(Upper(child)) => Upper(child)
case Upper(Lower(child)) => Upper(child)
case Lower(Upper(child)) => Lower(child)
case Lower(Lower(child)) => Lower(child)
}
}
}
/**
* Combine nested [[Concat]] expressions.
*/
object CombineConcats extends Rule[LogicalPlan] {
private def flattenConcats(concat: Concat): Concat = {
val stack = Stack[Expression](concat)
val flattened = ArrayBuffer.empty[Expression]
while (stack.nonEmpty) {
stack.pop() match {
case Concat(children) =>
stack.pushAll(children.reverse)
case child =>
flattened += child
}
}
Concat(flattened)
}
def apply(plan: LogicalPlan): LogicalPlan = plan.transformExpressionsDown {
case concat: Concat if concat.children.exists(_.isInstanceOf[Concat]) =>
flattenConcats(concat)
}
}
| VigneshMohan1/spark-branch-2.3 | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/expressions.scala | Scala | apache-2.0 | 25,273 |
package edu.gemini.qv.plugin.selector
import edu.gemini.qv.plugin.{ConstraintsChanged, TimeRangeChanged, QvContext, QvTool}
import edu.gemini.qv.plugin.ui.QvGui
import edu.gemini.qv.plugin.ui.QvGui.ActionButton
import edu.gemini.qv.plugin.util.ConstraintsCache._
import edu.gemini.qv.plugin.util.ScheduleCache._
import edu.gemini.qv.plugin.util.SolutionProvider.ConstraintType
import edu.gemini.qv.plugin.util._
import edu.gemini.spModel.core.Site
import java.awt.Desktop
import java.net.{URL, URI}
import scala.swing._
import scala.swing.event.ButtonClicked
/**
* UI element that allows to deselect constraints in order to look at different scenarios for observations.
* The buttons are organised on two separate panels which gives more flexibility to place them in the UI.
*/
class ConstraintsSelector(ctx: QvContext) extends Publisher {
private val mainConstraintsBtns = Seq(
ConstraintBtn(
"Elevation",
"Restrict observations by elevation, hour angle and airmass constraints.",
Set(Elevation)),
ConstraintBtn(
"Sky Brightness",
"Restrict observations by sky brightness constraints.",
Set(SkyBrightness)),
ConstraintBtn(
"Timing Windows",
"Restrict observations by timing windows.",
Set(TimingWindows)),
ConstraintBtn(
"Minimum Time",
"Restrict by the minimum time needed to successfully observe an observation.",
Set(MinimumTime))
)
private val scheduleConstraintsBtns = Seq(
ConstraintBtn(
"Instruments",
"Restrict observations by instrument availability.",
Set(InstrumentConstraint)),
ConstraintBtn(
"Telescope",
"Take planned engineering and shutdown time and long duration weather events into account.",
Set(EngineeringConstraint, ShutdownConstraint, WeatherConstraint)),
ConstraintBtn(
"Programs",
"Take laser availability and classical observing restrictions into account.",
Set(LaserConstraint, ProgramConstraint))
)
private val buttons = mainConstraintsBtns ++ scheduleConstraintsBtns
object mainConstraints extends FlowPanel() {
hGap = 0
contents += new Label("Constraints:")
contents += Swing.HStrut(5)
contents ++= mainConstraintsBtns
}
object scheduleConstraints extends FlowPanel() {
hGap = 0
contents += new Label("Schedule:")
contents += Swing.HStrut(5)
contents ++= scheduleConstraintsBtns
contents += Swing.HStrut(5)
contents += ActionButton(
"",
"Show schedule constraints calendar in browser.",
() => {
Desktop.getDesktop.browse(SolutionProvider(ctx).telescopeScheduleUrl)
},
QvGui.CalendarIcon
)
contents += ActionButton(
"",
"Edit schedule constraints.",
(button: Button) => {
val editor = new ScheduleEditor(ctx, SolutionProvider(ctx).scheduleCache)
editor.setLocationRelativeTo(button)
editor.open()
ConstraintsSelector.this.publish(TimeRangeChanged)
},
QvGui.EditIcon
)
}
listenTo(buttons:_*)
listenTo(SolutionProvider(ctx))
listenTo(ctx)
reactions += {
case ConstraintCalculationStart(c, _) =>
buttons.find(_.constraints.contains(c)).map(_.enabled = false)
case ConstraintCalculationEnd(c, _) =>
buttons.find(_.constraints.contains(c)).map(_.enabled = true)
case ConstraintsChanged =>
buttons.foreach(b => b.selected = b.constraints.intersect(ctx.selectedConstraints).isEmpty)
case ButtonClicked(b) => b match {
case b: ConstraintBtn =>
ctx.selectedConstraints = selected
case _ => // Ignore
}
}
/**
* Returns all currently selected constraints.
* Note: We use those toggle buttons "the wrong way round", i.e. selecting one, deselects the constraint
* and vice versa. By default all constraints are selected, i.e. at startup all buttons are deselected.
* Ok, that's confusing, but it is as it is.
* Note also: Above horizon constraint is always selected in order never to show observations as available
* when they are below the horizon.
* @return
*/
def selected = buttons.filter(!_.selected).filter(_.enabled).map(_.constraints).flatten.toSet + AboveHorizon
case class ConstraintBtn(label: String, tip: String, constraints: Set[ConstraintType]) extends ToggleButton {
focusable = false
text = label
icon = QvGui.CheckIcon
selectedIcon = QvGui.DelIcon
disabledIcon = QvGui.Spinner16Icon
tooltip = tip
selected = constraints.intersect(ctx.selectedConstraints).isEmpty
}
}
| arturog8m/ocs | bundle/edu.gemini.qv.plugin/src/main/scala/edu/gemini/qv/plugin/selector/ConstraintsSelector.scala | Scala | bsd-3-clause | 4,575 |
package org.scalaide.core.testsetup
object SdtTestConstants {
/**
* Used in JUnit's `@Ignore` annotations. Don't add type here. With no type scala compiler compiles
* is to `constant type String("TODO - this test...")`.
*/
final val TestRequiresGuiSupport =
"TODO - this test triggers an eclipse bundle that requires GUI support, which is not available on the build server"
}
| Kwestor/scala-ide | org.scala-ide.sdt.core.tests/src/org/scalaide/core/testsetup/SdtTestConstants.scala | Scala | bsd-3-clause | 395 |
package org.scaladebugger.language.models
case class Conditional(
condition: Expression,
trueBranch: Expression,
falseBranch: Expression
) extends Expression
| ensime/scala-debugger | scala-debugger-language/src/main/scala/org/scaladebugger/language/models/Conditional.scala | Scala | apache-2.0 | 165 |
package config
import com.google.inject.{Inject, Singleton}
import com.typesafe.config.Config
@Singleton
class AuthConfig @Inject()(config: Config) {
val skipConfirmation: Boolean = config.getBoolean("skipConfirmation")
val turnOffAdminFilter: Boolean = config.getBoolean("turnOffAdminFilter")
val secret: String = config.getString("secret")
val passwordMinLength: Int = config.getInt("password.minLength")
object google {
val clientId: String = config.getString("oauth2.google.clientId")
val clientSecret: String = config.getString("oauth2.google.clientSecret")
val state: String = config.getString("oauth2.google.state")
val scope: String = config.getString("oauth2.google.scope")
val callback: String = config.getString("oauth2.google.callback")
}
object facebook {
val clientId: String = config.getString("oauth2.facebook.clientId")
val clientSecret: String = config.getString("oauth2.facebook.clientSecret")
val state: String = config.getString("oauth2.facebook.state")
val scope: String = config.getString("oauth2.facebook.scope")
val callback: String = config.getString("oauth2.facebook.callback")
}
object github {
val clientId: String = config.getString("oauth2.github.clientId")
val clientSecret: String = config.getString("oauth2.github.clientSecret")
val state: String = config.getString("oauth2.github.state")
val scope: String = config.getString("oauth2.github.scope")
val callback: String = config.getString("oauth2.github.callback")
}
object linkedin {
val clientId: String = config.getString("oauth2.linkedin.clientId")
val clientSecret: String = config.getString("oauth2.linkedin.clientSecret")
val state: String = config.getString("oauth2.linkedin.state")
val scope: String = config.getString("oauth2.linkedin.scope")
val callback: String = config.getString("oauth2.linkedin.callback")
}
}
| sysgears/apollo-universal-starter-kit | modules/authentication/server-scala/src/main/scala/config/AuthConfig.scala | Scala | mit | 1,919 |
package resources
import javax.inject.Inject
import javax.inject.Singleton
import org.coursera.example.Course
import org.coursera.naptime.Fields
import org.coursera.naptime.GetReverseRelation
import org.coursera.naptime.MultiGetReverseRelation
import org.coursera.naptime.Ok
import org.coursera.naptime.ResourceName
import org.coursera.naptime.model.Keyed
import org.coursera.naptime.resources.CourierCollectionResource
import stores.CourseStore
@Singleton
class CoursesResource @Inject() (
courseStore: CourseStore)
extends CourierCollectionResource[String, Course] {
override def resourceName = "courses"
override def resourceVersion = 1
override implicit lazy val Fields: Fields[Course] = BaseFields
.withReverseRelations(
"instructors" -> MultiGetReverseRelation(
resourceName = ResourceName("instructors", 1),
ids = "$instructorIds"),
"partner" -> GetReverseRelation(
resourceName = ResourceName("partners", 1),
id = "$partnerId",
description = "Partner who produces this course."),
"courseMetadata/org.coursera.example.CertificateCourseMetadata/certificateInstructor" ->
GetReverseRelation(
resourceName = ResourceName("instructors", 1),
id = "${courseMetadata/org.coursera.example.CertificateCourseMetadata/certificateInstructorId}",
description = "Instructor who's name and signature appears on the course certificate."))
def get(id: String = "v1-123") = Nap.get { context =>
OkIfPresent(id, courseStore.get(id))
}
def multiGet(ids: Set[String], types: Set[String] = Set("course", "specialization")) = Nap.multiGet { context =>
Ok(courseStore.all()
.filter(course => ids.contains(course._1))
.map { case (id, course) => Keyed(id, course) }.toList)
}
def getAll() = Nap.getAll { context =>
val courses = courseStore.all().toList.map { case (id, course) => Keyed(id, course) }
val coursesAfterNext = context.paging.start
.map(s => courses.dropWhile(_.key != s))
.getOrElse(courses)
val coursesSubset = coursesAfterNext.take(context.paging.limit)
val next = coursesAfterNext.drop(context.paging.limit).headOption.map(_.key)
Ok(coursesSubset)
.withPagination(next, Some(courses.size.toLong))
}
def byInstructor(instructorId: String) = Nap.finder { context =>
val courses = courseStore.all()
.filter(course => course._2.instructorIds.map(_.toString).contains(instructorId))
Ok(courses.toList.map { case (id, course) => Keyed(id, course) })
}
}
| vkuo-coursera/naptime | examples/src/main/scala/resources/CoursesResource.scala | Scala | apache-2.0 | 2,560 |
package breeze.storage
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import java.util.Arrays
/**
*
* @author dlwh
*/
trait ConfigurableDefault[V] extends Serializable { outer =>
def value(implicit default: DefaultArrayValue[V]):V
def fillArray(arr: Array[V], v: V) = arr.asInstanceOf[AnyRef] match {
case x: Array[Int] => Arrays.fill(arr.asInstanceOf[Array[Int]], v.asInstanceOf[Int])
case x: Array[Long] => Arrays.fill(arr.asInstanceOf[Array[Long]], v.asInstanceOf[Long])
case x: Array[Short] => Arrays.fill(arr.asInstanceOf[Array[Short]], v.asInstanceOf[Short])
case x: Array[Double] => Arrays.fill(arr.asInstanceOf[Array[Double]], v.asInstanceOf[Double])
case x: Array[Float] => Arrays.fill(arr.asInstanceOf[Array[Float]], v.asInstanceOf[Float])
case x: Array[Char] => Arrays.fill(arr.asInstanceOf[Array[Char]], v.asInstanceOf[Char])
case x: Array[Byte] => Arrays.fill(arr.asInstanceOf[Array[Byte]], v.asInstanceOf[Byte])
case x: Array[_] => Arrays.fill(arr.asInstanceOf[Array[AnyRef]], v.asInstanceOf[AnyRef])
case _ => throw new RuntimeException("shouldn't be here!")
}
def makeArray(size:Int)(implicit default: DefaultArrayValue[V], man: ClassManifest[V]) = {
val arr = new Array[V](size)
fillArray(arr,value(default))
arr
}
def map[U](f: V=>U)(implicit dav: DefaultArrayValue[V]) = new ConfigurableDefault[U] {
def value(implicit default: DefaultArrayValue[U]) = f(outer.value(dav))
}
}
trait LowPriorityConfigurableImplicits {
implicit def default[V]: ConfigurableDefault[V] = {
new ConfigurableDefault[V] {
def value(implicit default: DefaultArrayValue[V]) = default.value
}
}
}
object ConfigurableDefault extends LowPriorityConfigurableImplicits {
implicit def fromV[V](v: V):ConfigurableDefault[V] = {
new ConfigurableDefault[V] {
def value(implicit default: DefaultArrayValue[V]) = v
}
}
}
| tjhunter/scalanlp-core | math/src/main/scala/breeze/storage/ConfigurableDefault.scala | Scala | apache-2.0 | 2,997 |
package com.rayrobdod.boardGame
import org.scalatest.FunSpec
final class ElongatedTriangularFieldTest extends FunSpec
with FieldTests
{
describe ("ElongatedTriangularField") {
val clazzes = for (
i β -1 to 2;
j β -1 to 2;
t β ElongatedTriangularType.values
) yield {
ElongatedTriangularIndex(i, j, t) -> s"qwerty"
}
val field = ElongatedTriangularField(clazzes.toMap)
describe ("square") {
it ("gets the same thing as space with the appropriate index") {
assertResult( field.space( ElongatedTriangularIndex( 0, 0, ElongatedTriangularType.Square ) ) ){
field.squareSpace( 0, 0 )
}
}
}
describe ("northTri") {
it ("gets the same thing as space with the appropriate index") {
assertResult( field.space( ElongatedTriangularIndex( 0, 1, ElongatedTriangularType.NorthTri ) ) ){
field.northTriSpace( 0, 1 )
}
}
}
describe ("southTri") {
it ("gets the same thing as space with the appropriate index") {
assertResult( field.space( ElongatedTriangularIndex( 2, -1, ElongatedTriangularType.SouthTri ) ) ){
field.southTriSpace( 2, -1 )
}
}
}
}
singleElementField("An ElongatedTriangularField containing a single square space")(
idx = ElongatedTriangularIndex(0,0,ElongatedTriangularType.Square)
, unequalIndex = ElongatedTriangularIndex(0,1,ElongatedTriangularType.Square)
, clazz = "123"
, generator = Field.elongatedTriangularSpaceGenerator[String]
)
singleElementField("An ElongatedTriangularField containing a single NorthTriangular space")(
idx = ElongatedTriangularIndex(0,0,ElongatedTriangularType.NorthTri)
, unequalIndex = ElongatedTriangularIndex(0,1,ElongatedTriangularType.Square)
, clazz = "456"
, generator = Field.elongatedTriangularSpaceGenerator[String]
)
singleElementField("An ElongatedTriangularField containing a single SouthTriangular space")(
idx = ElongatedTriangularIndex(0,0,ElongatedTriangularType.SouthTri)
, unequalIndex = ElongatedTriangularIndex(0,1,ElongatedTriangularType.Square)
, clazz = "456"
, generator = Field.elongatedTriangularSpaceGenerator[String]
)
describe ("Spaces with full adjacency") {
val clazzes = for (
i β -1 to 2;
j β -1 to 2;
t β ElongatedTriangularType.values
) yield {
ElongatedTriangularIndex(i, j, t) -> s"qwerty"
}
val field = ElongatedTriangularField(clazzes.toMap)
describe ("the square one") {
val index = ElongatedTriangularIndex(0, 0, ElongatedTriangularType.Square)
val center = field.space(index).get.asInstanceOf[ElongatedTriangularSpace.Square[String]]
it ("is adjacent to four spaces") {
assertResult(4){center.adjacentSpaces.length}
}
it ("east is (1,0,square)") {
assertResult(field.space( index.copy(x = 1) )){center.east}
}
it ("west is (-1,0,square)") {
assertResult(field.space( index.copy(x = -1) )){center.west}
}
it ("north is (0,0,NorthTri)") {
assertResult(field.space( index.copy(typ = ElongatedTriangularType.NorthTri) )){center.north}
}
it ("south is (0,0,SouthTri)") {
assertResult(field.space( index.copy(typ = ElongatedTriangularType.SouthTri) )){center.south}
}
}
describe ("the north tri one (even y)") {
val index = ElongatedTriangularIndex(0, 0, ElongatedTriangularType.NorthTri)
val center = field.space(index).get.asInstanceOf[ElongatedTriangularSpace.Triangle1[String]]
it ("is adjacent to three spaces") {
assertResult(3){center.adjacentSpaces.length}
}
it ("northeast is (-1,-1,southTri)") {
assertResult(field.space( ElongatedTriangularIndex(-1, -1, ElongatedTriangularType.SouthTri) )){center.northEast}
}
it ("northwest is (0,-1,southTri)") {
assertResult(field.space( ElongatedTriangularIndex(0, -1, ElongatedTriangularType.SouthTri) )){center.northWest}
}
it ("south is (0,0,Square)") {
assertResult(field.space( index.copy(typ = ElongatedTriangularType.Square) )){center.south}
}
it ("this.northwest.southeast == this") {
assertResult(center){center.northWest.get.southEast.get}
}
it ("this.northeast.southwest == this") {
assertResult(center){center.northWest.get.southEast.get}
}
}
describe ("the north tri one (odd y)") {
val index = ElongatedTriangularIndex(0, 1, ElongatedTriangularType.NorthTri)
val center = field.space(index).get.asInstanceOf[ElongatedTriangularSpace.Triangle1[String]]
it ("is adjacent to three spaces") {
assertResult(3){center.adjacentSpaces.length}
}
it ("northeast is (0,-1,southTri)") {
assertResult(field.space( ElongatedTriangularIndex(0, 0, ElongatedTriangularType.SouthTri) )){center.northEast}
}
it ("northwest is (1,-1,southTri)") {
assertResult(field.space( ElongatedTriangularIndex(1, 0, ElongatedTriangularType.SouthTri) )){center.northWest}
}
it ("south is (0,0,Square)") {
assertResult(field.space( index.copy(typ = ElongatedTriangularType.Square) )){center.south}
}
it ("this.northwest.southeast == this") {
assertResult(center){center.northWest.get.southEast.get}
}
it ("this.northeast.southwest == this") {
assertResult(center){center.northWest.get.southEast.get}
}
}
describe ("the south tri one (even y)") {
val index = ElongatedTriangularIndex(0, 0, ElongatedTriangularType.SouthTri)
val center = field.space(index).get.asInstanceOf[ElongatedTriangularSpace.Triangle2[String]]
it ("is adjacent to three spaces") {
assertResult(3){center.adjacentSpaces.length}
}
it ("southEast is (-1,1,northTri)") {
assertResult(field.space( ElongatedTriangularIndex(-1, 1, ElongatedTriangularType.NorthTri) )){center.southEast}
}
it ("southWest is (0,1,northTri)") {
assertResult(field.space( ElongatedTriangularIndex(0, 1, ElongatedTriangularType.NorthTri) )){center.southWest}
}
it ("north is (0,0,Square)") {
assertResult(field.space( index.copy(typ = ElongatedTriangularType.Square) )){center.north}
}
it ("this.southwest.northeast == this") {
assertResult(center){center.southWest.get.northEast.get}
}
it ("this.southeast.northwest == this") {
assertResult(center){center.southWest.get.northEast.get}
}
}
describe ("the south tri one (odd y)") {
val index = ElongatedTriangularIndex(0, 1, ElongatedTriangularType.SouthTri)
val center = field.space(index).get.asInstanceOf[ElongatedTriangularSpace.Triangle2[String]]
it ("is adjacent to three spaces") {
assertResult(3){center.adjacentSpaces.length}
}
it ("southEast is (0,1,southTri)") {
assertResult(field.space( ElongatedTriangularIndex(0, 2, ElongatedTriangularType.NorthTri) )){center.southEast}
}
it ("southWest is (1,1,southTri)") {
assertResult(field.space( ElongatedTriangularIndex(1, 2, ElongatedTriangularType.NorthTri) )){center.southWest}
}
it ("north is (0,0,Square)") {
assertResult(field.space( index.copy(typ = ElongatedTriangularType.Square) )){center.north}
}
it ("this.southwest.northeast == this") {
assertResult(center){center.southWest.get.northEast.get}
}
it ("this.southeast.northwest == this") {
assertResult(center){center.southWest.get.northEast.get}
}
}
}
}
| rayrobdod/boardGame | Model/src/test/scala/ElongatedTriangularFieldTest.scala | Scala | gpl-3.0 | 7,220 |
package com.datastax.spark.connector.util
/** Counts elements fetched form the underlying iterator. Limit causes iterator to terminate early */
class CountingIterator[T](iterator: Iterator[T], limit: Option[Long] = None) extends Iterator[T] {
private var _count = 0
/** Returns the number of successful invocations of `next` */
def count = _count
def hasNext = limit match {
case Some(l) => _count < l && iterator.hasNext
case _ => iterator.hasNext
}
def next() = {
val item = iterator.next()
_count += 1
item
}
}
| Stratio/spark-cassandra-connector | spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/util/CountingIterator.scala | Scala | apache-2.0 | 552 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.api.tools.tests.scaladsl
import com.lightbend.lagom.scaladsl.api.ServiceLocator
import com.lightbend.lagom.scaladsl.api.ServiceLocator.NoServiceLocator
import com.lightbend.lagom.scaladsl.server.LagomApplication
import com.lightbend.lagom.scaladsl.server.LagomApplicationContext
import com.lightbend.lagom.scaladsl.server.LagomApplicationLoader
import play.api.libs.ws.ahc.AhcWSComponents
class AclServiceLoader extends LagomApplicationLoader {
override def load(context: LagomApplicationContext): LagomApplication =
new AclServiceApplication(context) {
override def serviceLocator: ServiceLocator = NoServiceLocator
}
override def loadDevMode(context: LagomApplicationContext): LagomApplication =
new AclServiceApplication(context) {
override def serviceLocator: ServiceLocator = NoServiceLocator
}
override def describeService = Some(readDescriptor[AclService])
}
abstract class AclServiceApplication(context: LagomApplicationContext)
extends LagomApplication(context)
with AhcWSComponents {
override lazy val lagomServer = serverFor[AclService](new AclServiceImpl)
}
// ---------------------------------------
class NoAclServiceLoader extends LagomApplicationLoader {
override def load(context: LagomApplicationContext): LagomApplication =
new NoAclServiceApplication(context) {
override def serviceLocator: ServiceLocator = NoServiceLocator
}
override def loadDevMode(context: LagomApplicationContext): LagomApplication =
new NoAclServiceApplication(context) {
override def serviceLocator: ServiceLocator = NoServiceLocator
}
override def describeService = Some(readDescriptor[NoAclService])
}
abstract class NoAclServiceApplication(context: LagomApplicationContext)
extends LagomApplication(context)
with AhcWSComponents {
override lazy val lagomServer = serverFor[NoAclService](new NoAclServiceImpl)
}
// ---------------------------------------
class UndescribedServiceLoader extends LagomApplicationLoader {
override def load(context: LagomApplicationContext): LagomApplication =
new NoAclServiceApplication(context) {
override def serviceLocator: ServiceLocator = NoServiceLocator
}
override def loadDevMode(context: LagomApplicationContext): LagomApplication =
new NoAclServiceApplication(context) {
override def serviceLocator: ServiceLocator = NoServiceLocator
}
override def describeService = None
}
// Just like AclServiceLoader but overriding the deprecated describeServices method instead of describeService
class LegacyUndescribedServiceLoader extends LagomApplicationLoader {
override def load(context: LagomApplicationContext): LagomApplication =
new NoAclServiceApplication(context) {
override def serviceLocator: ServiceLocator = NoServiceLocator
}
override def loadDevMode(context: LagomApplicationContext): LagomApplication =
new NoAclServiceApplication(context) {
override def serviceLocator: ServiceLocator = NoServiceLocator
}
}
abstract class UndescribedServiceApplication(context: LagomApplicationContext)
extends LagomApplication(context)
with AhcWSComponents {
override lazy val lagomServer = serverFor[UndescribedService](new UndescribedServiceImpl)
}
// ---------------------------------------
| ignasi35/lagom | api-tools/src/test/scala/com/lightbend/lagom/api/tools/tests/scaladsl/ServiceLoaders.scala | Scala | apache-2.0 | 3,406 |
// Copyright (C) 2019 MapRoulette contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
package org.maproulette.models.dal
import java.sql.Connection
import anorm._
import org.maproulette.data.ItemType
import org.maproulette.exception.LockedException
import org.maproulette.models.BaseObject
import org.maproulette.models.utils.TransactionManager
import org.maproulette.session.User
/**
* @author mcuthbert
*/
trait Locking[T <: BaseObject[_]] extends TransactionManager {
this: BaseDAL[_, _] =>
/**
* Unlocks an item in the database
*
* @param user The user requesting to unlock the item
* @param item The item being unlocked
* @param c A sql connection that is implicitly passed in from the calling function, this is an
* implicit function because this will always be called from within the code and never
* directly from an API call
* @return true if successful
*/
def unlockItem(user: User, item: T)(implicit c: Option[Connection] = None): Int =
this.withMRTransaction { implicit c =>
val checkQuery = s"""SELECT user_id FROM locked WHERE item_id = {itemId} AND item_type = ${item.itemType.typeId} FOR UPDATE"""
SQL(checkQuery).on('itemId -> ParameterValue.toParameterValue(item.id)(p = keyToStatement)).as(SqlParser.long("user_id").singleOpt) match {
case Some(id) =>
if (id == user.id) {
val query = s"""DELETE FROM locked WHERE user_id = ${user.id} AND item_id = {itemId} AND item_type = ${item.itemType.typeId}"""
SQL(query).on('itemId -> ParameterValue.toParameterValue(item.id)(p = keyToStatement)).executeUpdate()
} else {
throw new LockedException(s"Item [${item.id}] currently locked by different user. [${user.id}")
}
case None => throw new LockedException(s"Item [${item.id}] trying to unlock does not exist.")
}
}
/**
* Method to lock all items returned in the lambda block. It will first all unlock all items
* that have been locked by the user.
*
* @param user The user making the request
* @param itemType The type of item that will be locked
* @param block The block of code to execute inbetween unlocking and locking items
* @param c The connection
* @return List of objects
*/
def withListLocking(user: User, itemType: Option[ItemType] = None)(block: () => List[T])
(implicit c: Option[Connection] = None): List[T] = {
this.withMRTransaction { implicit c =>
// if a user is requesting a task, then we can unlock all other tasks for that user, as only a single
// task can be locked at a time
this.unlockAllItems(user, itemType)
val results = block()
// once we have the tasks, we need to lock each one, if any fail to lock we just remove
// them from the list. A guest user will not lock any tasks, but when logged in will be
// required to refetch the current task, and if it is locked, then will have to get another
// task
if (!user.guest) {
val resultList = results.filter(lockItem(user, _) > 0)
if (resultList.isEmpty) {
List[T]()
}
resultList
} else {
results
}
}
}
/**
* Locks an item in the database.
*
* @param user The user requesting the lock
* @param item The item wanting to be locked
* @param c A sql connection that is implicitly passed in from the calling function, this is an
* implicit function because this will always be called from within the code and never
* directly from an API call
* @return true if successful
*/
def lockItem(user: User, item: T)(implicit c: Option[Connection] = None): Int =
this.withMRTransaction { implicit c =>
// first check to see if the item is already locked
val checkQuery =
s"""SELECT user_id FROM locked WHERE item_id = {itemId} AND item_type = ${item.itemType.typeId} FOR UPDATE"""
SQL(checkQuery).on('itemId -> ParameterValue.toParameterValue(item.id)(p = keyToStatement)).as(SqlParser.long("user_id").singleOpt) match {
case Some(id) =>
if (id == user.id) {
val query = s"UPDATE locked SET locked_time = NOW() WHERE user_id = ${user.id} AND item_id = {itemId} AND item_type = ${item.itemType.typeId}"
SQL(query).on('itemId -> ParameterValue.toParameterValue(item.id)(p = keyToStatement)).executeUpdate()
} else {
0
//throw new LockedException(s"Could not acquire lock on object [${item.id}, already locked by user [$id]")
}
case None =>
val query = s"INSERT INTO locked (item_type, item_id, user_id) VALUES (${item.itemType.typeId}, {itemId}, ${user.id})"
SQL(query).on('itemId -> ParameterValue.toParameterValue(item.id)(p = keyToStatement)).executeUpdate()
}
}
/**
* Unlocks all the items that are associated with the current user
*
* @param user The user
* @param c an implicit connection, this function should generally be executed in conjunction
* with other requests
* @return Number of locks removed
*/
def unlockAllItems(user: User, itemType: Option[ItemType] = None)(implicit c: Option[Connection] = None): Int =
this.withMRTransaction { implicit c =>
itemType match {
case Some(it) =>
SQL"""DELETE FROM locked WHERE user_id = ${user.id} AND item_type = ${it.typeId}""".executeUpdate()
case None =>
SQL"""DELETE FROM locked WHERE user_id = ${user.id}""".executeUpdate()
}
}
/**
* Method to lock a single optional item returned in a lambda block. It will first unlock all items
* that have been locked by the user
*
* @param user The user making the request
* @param itemType The type of item that will be locked
* @param block The block of code to execute inbetween unlocking and locking items
* @param c The connection
* @return Option object
*/
def withSingleLocking(user: User, itemType: Option[ItemType] = None)(block: () => Option[T])
(implicit c: Option[Connection] = None): Option[T] = {
this.withMRTransaction { implicit c =>
// if a user is requesting a task, then we can unlock all other tasks for that user, as only a single
// task can be locked at a time
this.unlockAllItems(user, itemType)
val result = block()
if (!user.guest) {
result match {
case Some(r) => lockItem(user, r)
case None => // ignore
}
}
result
}
}
}
| mvexel/maproulette2 | app/org/maproulette/models/dal/Locking.scala | Scala | apache-2.0 | 6,747 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.oap.index
import java.io.OutputStream
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.internal.Logging
import org.apache.spark.internal.io.FileCommitProtocol
import org.apache.spark.sql.Dataset
import org.apache.spark.sql.RuntimeConfig
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.catalog.CatalogTable
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.plans.logical.Project
import org.apache.spark.sql.execution.datasources.{FileFormatWriter, FileIndex, PartitionDirectory}
import org.apache.spark.sql.execution.datasources.HadoopFsRelation
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.execution.datasources.OapException
import org.apache.spark.sql.execution.datasources.OapIndexWriteJobStatsTracker
import org.apache.spark.sql.execution.datasources.oap.IndexMeta
import org.apache.spark.sql.execution.datasources.oap.OapFileFormat
import org.apache.spark.sql.execution.datasources.oap.io.{BytesCompressor, BytesDecompressor, IndexFile}
import org.apache.spark.sql.execution.datasources.orc.ReadOnlyNativeOrcFileFormat
import org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat
import org.apache.spark.sql.execution.datasources.parquet.ReadOnlyParquetFileFormat
import org.apache.spark.sql.hive.orc.ReadOnlyOrcFileFormat
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.oap.OapConf
import org.apache.spark.sql.types.MetadataBuilder
import org.apache.spark.sql.types.StructType
import org.apache.spark.unsafe.Platform
/**
* Utils for Index read/write
*/
private[oap] object IndexUtils extends Logging {
def readVersion(fileReader: IndexFileReader): Option[Int] = {
val magicBytes = fileReader.read(0, IndexFile.VERSION_LENGTH)
deserializeVersion(magicBytes)
}
def serializeVersion(versionNum: Int): Array[Byte] = {
assert(versionNum <= 65535)
IndexFile.VERSION_PREFIX.getBytes("UTF-8") ++
Array((versionNum >> 8).toByte, (versionNum & 0xFF).toByte)
}
def deserializeVersion(bytes: Array[Byte]): Option[Int] = {
val prefix = IndexFile.VERSION_PREFIX.getBytes("UTF-8")
val versionPos = bytes.length - 2
assert(bytes.length == prefix.length + 2)
if (bytes.slice(0, prefix.length) sameElements prefix) {
val version = bytes(versionPos) << 8 | bytes(versionPos + 1)
Some(version)
} else {
None
}
}
def writeHead(writer: OutputStream, versionNum: Int): Int = {
val versionData = serializeVersion(versionNum)
assert(versionData.length == IndexFile.VERSION_LENGTH)
writer.write(versionData)
IndexFile.VERSION_LENGTH
}
/**
* Get the data file name as the index file name. For example the data file is
* "/tmp/part-00000-df7c3ca8-560f-4089-a0b1-58ab817bc2c3.snappy.parquet",
* the index file name is part-00000-df7c3ca8-560f-4089-a0b1-58ab817bc2c3.snappy
* @param dataFile the data file
* @return the index file name
*/
private def getIndexFileNameFromDatafile (dataFile: Path): String = {
val dataFileName = dataFile.getName
val pos = dataFileName.lastIndexOf(".")
if (pos > 0) {
dataFileName.substring(0, pos)
} else {
dataFileName
}
}
/**
* Get the index file path based on the configuration of OapConf.OAP_INDEX_DIRECTORY,
* the data file, index name and the created index time.
* Here the data file is to generated the index file name and get the parent path of data file.
* If the OapConf.OAP_INDEX_DIRECTORY is "", the index file path is generated based
* on the the parent path of dataFile + index file name; otherwise
* the index file path is the value of OapConf.OAP_INDEX_DIRECTORY +
* the parent path of dataFile +index file name
*
* @param conf the configuration to get the value of OapConf.OAP_INDEX_DIRECTORY
* @param dataFile the path of the data file
* @param name the name of the index
* @param time the time of creating index
* @return the generated index path
*/
def getIndexFilePath(
conf: Configuration, dataFile: Path, name: String, time: String): Path = {
import OapFileFormat._
val indexDirectory = conf.get(
OapConf.OAP_INDEX_DIRECTORY.key, OapConf.OAP_INDEX_DIRECTORY.defaultValueString)
val indexFileName = getIndexFileNameFromDatafile(dataFile)
if (indexDirectory.trim != "") {
new Path(
indexDirectory + "/" + Path.getPathWithoutSchemeAndAuthority(dataFile.getParent),
s"${"." + indexFileName + "." + time + "." + name + OAP_INDEX_EXTENSION}")
} else {
new Path(
dataFile.getParent,
s"${"." + indexFileName + "." + time + "." + name + OAP_INDEX_EXTENSION}")
}
}
def writeFloat(out: OutputStream, v: Float): Unit =
writeInt(out, java.lang.Float.floatToIntBits(v))
def writeDouble(out: OutputStream, v: Double): Unit =
writeLong(out, java.lang.Double.doubleToLongBits(v))
def writeBoolean(out: OutputStream, v: Boolean): Unit = out.write(if (v) 1 else 0)
def writeByte(out: OutputStream, v: Int): Unit = out.write(v)
def writeBytes(out: OutputStream, b: Array[Byte]): Unit = out.write(b)
def writeShort(out: OutputStream, v: Int): Unit = {
out.write(v >>> 0 & 0XFF)
out.write(v >>> 8 & 0xFF)
}
def toBytes(v: Int): Array[Byte] = {
Array(0, 8, 16, 24).map(shift => ((v >>> shift) & 0XFF).toByte)
}
def writeInt(out: OutputStream, v: Int): Unit = {
out.write((v >>> 0) & 0xFF)
out.write((v >>> 8) & 0xFF)
out.write((v >>> 16) & 0xFF)
out.write((v >>> 24) & 0xFF)
}
def writeLong(out: OutputStream, v: Long): Unit = {
out.write((v >>> 0).toInt & 0xFF)
out.write((v >>> 8).toInt & 0xFF)
out.write((v >>> 16).toInt & 0xFF)
out.write((v >>> 24).toInt & 0xFF)
out.write((v >>> 32).toInt & 0xFF)
out.write((v >>> 40).toInt & 0xFF)
out.write((v >>> 48).toInt & 0xFF)
out.write((v >>> 56).toInt & 0xFF)
}
/**
* Generate the temp index file path based on the configuration of OapConf.OAP_INDEX_DIRECTORY,
* the inputFile, outputPath passed by FileFormatWriter,
* attemptPath generated by FileFormatWriter.
* Here the inputFile is to generated the index file name and get the parent path of inputFile.
* If the OapConf.OAP_INDEX_DIRECTORY is "", the index file path is the the parent path of
* inputFile + index file name; otherwise
* the index file path is the value of OapConf.OAP_INDEX_DIRECTORY +
* the parent path of inputFile +index file name
*
* @param conf the configuration to get the value of OapConf.OAP_INDEX_DIRECTORY
* @param inputFile the input data file path which contain the partition path
* @param outputPath the outputPath passed by FileFormatWriter.getOutputPath
* @param attemptPath the temp file path generated by FileFormatWriter
* @param extension the extension name of the index file
* @return the index path
*/
def generateTempIndexFilePath(
conf: Configuration, inputFile: String,
outputPath: Path, attemptPath: String, extension: String): Path = {
val inputFilePath = new Path(inputFile)
val indexFileName = getIndexFileNameFromDatafile(inputFilePath)
val indexDirectory = conf.get(OapConf.OAP_INDEX_DIRECTORY.key,
OapConf.OAP_INDEX_DIRECTORY.defaultValueString)
if (indexDirectory.trim != "") {
// here the outputPath = indexDirectory + tablePath or
// indexDirectory + tablePath+ partitionPath
// we should also remove the schema of indexDirectory when get the tablePath
// to avoid the wrong tablePath when the indexDirectory contain schema. file:/tmp
// For example: indexDirectory = file:/tmp outputPath = file:/tmp/tablePath
val tablePath =
Path.getPathWithoutSchemeAndAuthority(outputPath).toString.replaceFirst(
Path.getPathWithoutSchemeAndAuthority(new Path(indexDirectory)).toString, "")
val partitionPath =
Path.getPathWithoutSchemeAndAuthority(
inputFilePath.getParent).toString.replaceFirst(tablePath.toString, "")
new Path(new Path(attemptPath).getParent.toString + "/"
+ partitionPath + "/." + indexFileName + extension)
} else {
new Path(inputFilePath.getParent.toString.replace(
outputPath.toString, new Path(attemptPath).getParent.toString),
"." + indexFileName + extension)
}
}
/**
* Generate the outPutPath based on OapConf.OAP_INDEX_DIRECTORY and the data path,
* here the dataPath may be the table path + partition path if the fileIndex.rootPaths
* has 1 item, or the table path
* @param fileIndex [[FileIndex]] of a relation
* @param conf the configuration to get the value of OapConf.OAP_INDEX_DIRECTORY
* @return the outPutPath to save the job temporary data
*/
def getOutputPathBasedOnConf(fileIndex: FileIndex, conf: RuntimeConfig): Path = {
def getTableBaseDir(path: Path, times: Int): Path = {
if (times > 0) getTableBaseDir(path.getParent, times - 1)
else path
}
val paths = fileIndex.rootPaths
assert(paths.nonEmpty, "Expected at least one path of fileIndex.rootPaths, but no value")
val dataPath = paths.length match {
case 1 => paths.head
case _ => getTableBaseDir(paths.head, fileIndex.partitionSchema.length)
}
val indexDirectory = conf.get(OapConf.OAP_INDEX_DIRECTORY.key)
if (indexDirectory.trim != "") {
new Path (
indexDirectory + Path.getPathWithoutSchemeAndAuthority(dataPath).toString)
} else {
dataPath
}
}
def getOutputPathBasedOnConf(
partitionDirs: Seq[PartitionDirectory], fileIndex: FileIndex, conf: RuntimeConfig): Path = {
def getTableBaseDir(path: Path, times: Int): Path = {
if (times > 0) getTableBaseDir(path.getParent, times - 1)
else path
}
val prtitionLength = fileIndex.partitionSchema.length
val baseDirs = partitionDirs
.filter(_.files.nonEmpty).map(dir => dir.files.head.getPath.getParent)
.map(getTableBaseDir(_, prtitionLength)).toSet
val dataPath = if (baseDirs.isEmpty) {
getOutputPathBasedOnConf(fileIndex, conf)
} else if (baseDirs.size == 1) {
baseDirs.head
} else {
throw new UnsupportedOperationException("Not support multi data base dir now")
}
logWarning(s"data path = $dataPath")
val indexDirectory = conf.get(OapConf.OAP_INDEX_DIRECTORY.key)
if (indexDirectory.trim != "") {
new Path (
indexDirectory + Path.getPathWithoutSchemeAndAuthority(dataPath).toString)
} else {
dataPath
}
}
val INT_SIZE = 4
val LONG_SIZE = 8
/**
* Constrain: keys.last >= candidate must be true. This is guaranteed
* by [[BTreeIndexRecordReader.findNodeIdx]]
* @return the first key >= candidate. (keys.last >= candidate makes this always possible)
*/
def binarySearch(
start: Int, length: Int,
keys: Int => InternalRow, candidate: InternalRow,
compare: (InternalRow, InternalRow) => Int): (Int, Boolean) = {
var s = start
var e = length - 1
var found = false
var m = s
while (s <= e && !found) {
assert(s + e >= 0, "too large array size caused overflow")
m = (s + e) / 2
val cmp = compare(candidate, keys(m))
if (cmp == 0) {
found = true
} else if (cmp > 0) {
s = m + 1
} else {
e = m - 1
}
if (!found) {
m = s
}
}
(m, found)
}
def binarySearchForStart(
start: Int, length: Int,
keys: Int => InternalRow, candidate: InternalRow,
compare: (InternalRow, InternalRow) => Int): (Int, Boolean) = {
var s = start + 1
var e = length - 1
lazy val initCmp = compare(candidate, keys(0))
if (length <= 0 || initCmp <= 0) {
return (0, length > 0 && initCmp == 0)
}
var found = false
var m = s
while (s <= e && !found) {
assert(s + e >= 0, "too large array size caused overflow")
m = (s + e) / 2
val cmp = compare(candidate, keys(m))
val marginCmp = compare(candidate, keys(m - 1))
if (cmp == 0 && marginCmp > 0) found = true
else if (cmp > 0) s = m + 1
else e = m - 1
}
if (!found) m = s
(m, found)
}
def binarySearchForEnd(
start: Int, length: Int,
keys: Int => InternalRow, candidate: InternalRow,
compare: (InternalRow, InternalRow) => Int): (Int, Boolean) = {
var s = start
var e = length - 2
lazy val initCmp = compare(candidate, keys(length - 1))
if (length <= 0 || compare(candidate, keys(0)) < 0) {
(-1, false)
} else if (initCmp > 0) {
(length, false)
} else if (initCmp == 0) {
(length - 1, true)
} else {
var (m, found) = (s, false)
while (s <= e && !found) {
assert(s + e >= 0, "too large array size caused overflow")
m = (s + e) / 2
val cmp = compare(candidate, keys(m))
val marginCmp = compare(candidate, keys(m + 1))
if (cmp == 0 && marginCmp < 0) found = true
else if (cmp < 0) e = m - 1
else s = m + 1
}
if (!found) m = s
(m, found)
}
}
private val CODEC_MAGIC: Array[Byte] = "CODEC".getBytes("UTF-8")
def compressIndexData(compressor: BytesCompressor, bytes: Array[Byte]): Array[Byte] = {
CODEC_MAGIC ++ toBytes(bytes.length) ++ compressor.compress(bytes)
}
def decompressIndexData(decompressor: BytesDecompressor, bytes: Array[Byte]): Array[Byte] = {
if (CODEC_MAGIC.sameElements(bytes.slice(0, CODEC_MAGIC.length))) {
val length = Platform.getInt(bytes, Platform.BYTE_ARRAY_OFFSET + CODEC_MAGIC.length)
val decompressedBytes =
decompressor.decompress(bytes.slice(CODEC_MAGIC.length + INT_SIZE, bytes.length), length)
decompressedBytes
} else {
bytes
}
}
def extractInfoFromPlan(sparkSession: SparkSession, optimized: LogicalPlan)
: (FileIndex, StructType, String, Option[CatalogTable], LogicalPlan) = {
val (fileCatalog, schema, readerClassName, identifier, relation) = optimized match {
case LogicalRelation(
_fsRelation @ HadoopFsRelation(f, _, s, _, _: ParquetFileFormat, _),
attributes, id, _) =>
val oapParquetEnabled =
if (sparkSession.conf.contains(OapConf.OAP_PARQUET_ENABLED.key)) {
sparkSession.conf.get(OapConf.OAP_PARQUET_ENABLED)
} else {
sparkSession.conf.get(OapConf.OAP_PARQUET_ENABLE)
}
if (!oapParquetEnabled) {
throw new OapException(s"turn on ${
OapConf.OAP_PARQUET_ENABLED.key
} to allow index operation on parquet files")
}
// Use ReadOnlyParquetFileFormat instead of ParquetFileFormat because of
// ReadOnlyParquetFileFormat.isSplitable always return false.
val fsRelation = _fsRelation.copy(
fileFormat = new ReadOnlyParquetFileFormat(),
options = _fsRelation.options)(_fsRelation.sparkSession)
val logical = LogicalRelation(fsRelation, attributes, id, isStreaming = false)
(f, s, OapFileFormat.PARQUET_DATA_FILE_CLASSNAME, id, logical)
case LogicalRelation(
_fsRelation @ HadoopFsRelation(f, _, s, _, format, _), attributes, id, _)
if format.isInstanceOf[org.apache.spark.sql.hive.orc.OrcFileFormat] ||
format.isInstanceOf[org.apache.spark.sql.execution.datasources.orc.OrcFileFormat] =>
val oapORCEnabled = if (sparkSession.conf.contains(OapConf.OAP_ORC_ENABLED.key)) {
sparkSession.conf.get(OapConf.OAP_ORC_ENABLED)
} else {
sparkSession.conf.get(OapConf.OAP_ORC_ENABLE)
}
if (!oapORCEnabled)
{
throw new OapException(s"turn on ${
OapConf.OAP_ORC_ENABLED.key
} to allow index building on orc files")
}
// ReadOnlyOrcFileFormat and ReadOnlyNativeOrcFileFormat don't support splitable.
// ReadOnlyOrcFileFormat is for hive orc.
// ReadOnlyNativeOrcFileFormat is for native orc introduced in Spark 2.3.
val fsRelation = format match {
case _: org.apache.spark.sql.hive.orc.OrcFileFormat =>
_fsRelation.copy(fileFormat = new ReadOnlyOrcFileFormat(),
options = _fsRelation.options)(_fsRelation.sparkSession)
case _: org.apache.spark.sql.execution.datasources.orc.OrcFileFormat =>
_fsRelation.copy(fileFormat = new ReadOnlyNativeOrcFileFormat(),
options = _fsRelation.options)(_fsRelation.sparkSession)
}
val logical = LogicalRelation(fsRelation, attributes, id, isStreaming = false)
(f, s, OapFileFormat.ORC_DATA_FILE_CLASSNAME, id, logical)
case other =>
throw new OapException(s"We don't support index operation for " +
s"${other.simpleString(SQLConf.get.maxToStringFields)}")
}
(fileCatalog, schema, readerClassName, identifier, relation)
}
def buildPartitionsFilter(partitions: Seq[PartitionDirectory], schema: StructType): String = {
partitions.map{ p =>
(0 until p.values.numFields).
map(i => (schema.fields(i).name, p.values.get(i, schema.fields(i).dataType))).
map{ case (k, v) => s"$k='$v'" }.reduce(_ + " AND " + _)
}.map("(" + _ + ")").reduce(_ + " OR " + _)
}
def buildPartitionIndex(
relation: LogicalPlan,
sparkSession: SparkSession,
partitions: Seq[PartitionDirectory],
outPutPath: Path,
partitionSchema: StructType,
indexColumns: Seq[IndexColumn],
indexType: OapIndexType,
indexMeta: IndexMeta): Seq[IndexBuildResult] = {
val projectList = indexColumns.map { indexColumn =>
relation.output.find(p => p.name == indexColumn.columnName).get.withMetadata(
new MetadataBuilder().putBoolean("isAscending", indexColumn.isAscending).build())
}
var ds = Dataset.ofRows(sparkSession, Project(projectList, relation))
if (partitionSchema.nonEmpty) {
val disjunctivePartitionsFilter = buildPartitionsFilter(partitions, partitionSchema)
ds = ds.filter(disjunctivePartitionsFilter)
}
assert(outPutPath != null, "Expected exactly one path to be specified, but no value")
val configuration = sparkSession.sessionState.newHadoopConf()
val qualifiedOutputPath = {
val fs = outPutPath.getFileSystem(configuration)
outPutPath.makeQualified(fs.getUri, fs.getWorkingDirectory)
}
val committer = FileCommitProtocol.instantiate(
classOf[OapIndexCommitProtocol].getCanonicalName,
jobId = java.util.UUID.randomUUID().toString,
outputPath = outPutPath.toUri.toString,
false)
val options = Map(
"indexName" -> indexMeta.name,
"indexTime" -> indexMeta.time,
"isAppend" -> "true",
"indexType" -> indexType.toString
)
val statsTrackers = new OapIndexWriteJobStatsTracker
FileFormatWriter.write(
sparkSession = sparkSession,
ds.queryExecution.executedPlan,
fileFormat = new OapIndexFileFormat,
committer = committer,
outputSpec = FileFormatWriter.OutputSpec(
qualifiedOutputPath.toUri.toString, Map.empty, ds.queryExecution.analyzed.output),
hadoopConf = configuration,
Seq.empty, // partitionColumns
bucketSpec = None,
statsTrackers = Seq(statsTrackers),
options = options)
statsTrackers.indexBuildResults
}
}
| Intel-bigdata/OAP | oap-cache/oap/src/main/scala/org/apache/spark/sql/execution/datasources/oap/index/IndexUtils.scala | Scala | apache-2.0 | 20,399 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.logical
import org.apache.flink.table.api.TableException
import org.apache.flink.table.planner.calcite.{FlinkRelBuilder, FlinkRelFactories}
import org.apache.flink.table.planner.plan.utils.{AggregateUtil, ExpandUtil}
import com.google.common.collect.ImmutableList
import org.apache.calcite.plan.RelOptRule._
import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall}
import org.apache.calcite.rel.core.AggregateCall
import org.apache.calcite.rel.logical.LogicalAggregate
import org.apache.calcite.rex.{RexBuilder, RexNode}
import org.apache.calcite.sql.SqlKind
import org.apache.calcite.sql.fun.SqlStdOperatorTable
import org.apache.calcite.util.ImmutableBitSet
import scala.collection.JavaConversions._
/**
* This rule rewrites an aggregation query with grouping sets into
* an regular aggregation query with expand.
*
* This rule duplicates the input data by two or more times (# number of groupSets +
* an optional non-distinct group). This will put quite a bit of memory pressure of the used
* aggregate and exchange operators.
*
* This rule will be used for the plan with grouping sets or the plan with distinct aggregations
* after [[FlinkAggregateExpandDistinctAggregatesRule]] applied.
*
* `FlinkAggregateExpandDistinctAggregatesRule` rewrites an aggregate query with
* distinct aggregations into an expanded double aggregation. The first aggregate has
* grouping sets in which the regular aggregation expressions and every distinct clause
* are aggregated in a separate group. The results are then combined in a second aggregate.
*
* Examples:
*
* MyTable: a: INT, b: BIGINT, c: VARCHAR(32), d: VARCHAR(32)
*
* Original records:
* +-----+-----+-----+-----+
* | a | b | c | d |
* +-----+-----+-----+-----+
* | 1 | 1 | c1 | d1 |
* +-----+-----+-----+-----+
* | 1 | 2 | c1 | d2 |
* +-----+-----+-----+-----+
* | 2 | 1 | c1 | d1 |
* +-----+-----+-----+-----+
*
* Example1 (expand for DISTINCT aggregates):
*
* SQL:
* SELECT a, SUM(DISTINCT b) as t1, COUNT(DISTINCT c) as t2, COUNT(d) as t3 FROM MyTable GROUP BY a
*
* Logical plan:
* {{{
* LogicalAggregate(group=[{0}], t1=[SUM(DISTINCT $1)], t2=[COUNT(DISTINCT $2)], t3=[COUNT($3)])
* LogicalTableScan(table=[[builtin, default, MyTable]])
* }}}
*
* Logical plan after `FlinkAggregateExpandDistinctAggregatesRule` applied:
* {{{
* LogicalProject(a=[$0], t1=[$1], t2=[$2], t3=[CAST($3):BIGINT NOT NULL])
* LogicalProject(a=[$0], t1=[$1], t2=[$2], $f3=[CASE(IS NOT NULL($3), $3, 0)])
* LogicalAggregate(group=[{0}], t1=[SUM($1) FILTER $4], t2=[COUNT($2) FILTER $5],
* t3=[MIN($3) FILTER $6])
* LogicalProject(a=[$0], b=[$1], c=[$2], t3=[$3], $g_1=[=($4, 1)], $g_2=[=($4, 2)],
* $g_3=[=($4, 3)])
* LogicalAggregate(group=[{0, 1, 2}], groups=[[{0, 1}, {0, 2}, {0}]], t3=[COUNT($3)],
* $g=[GROUPING($0, $1, $2)])
* LogicalTableScan(table=[[builtin, default, MyTable]])
* }}}
*
* Logical plan after this rule applied:
* {{{
* LogicalCalc(expr#0..3=[{inputs}], expr#4=[IS NOT NULL($t3)], ...)
* LogicalAggregate(group=[{0}], t1=[SUM($1) FILTER $4], t2=[COUNT($2) FILTER $5],
* t3=[MIN($3) FILTER $6])
* LogicalCalc(expr#0..4=[{inputs}], ... expr#10=[CASE($t6, $t5, $t8, $t7, $t9)],
* expr#11=[1], expr#12=[=($t10, $t11)], ... $g_1=[$t12], ...)
* LogicalAggregate(group=[{0, 1, 2, 4}], groups=[[]], t3=[COUNT($3)])
* LogicalExpand(projects=[{a=[$0], b=[$1], c=[null], d=[$3], $e=[1]},
* {a=[$0], b=[null], c=[$2], d=[$3], $e=[2]}, {a=[$0], b=[null], c=[null], d=[$3], $e=[3]}])
* LogicalTableSourceScan(table=[[builtin, default, MyTable]], fields=[a, b, c, d])
* }}}
*
* '$e = 1' is equivalent to 'group by a, b'
* '$e = 2' is equivalent to 'group by a, c'
* '$e = 3' is equivalent to 'group by a'
*
* Expanded records:
* +-----+-----+-----+-----+-----+
* | a | b | c | d | $e |
* +-----+-----+-----+-----+-----+ ---+---
* | 1 | 1 | null| d1 | 1 | |
* +-----+-----+-----+-----+-----+ |
* | 1 | null| c1 | d1 | 2 | records expanded by record1
* +-----+-----+-----+-----+-----+ |
* | 1 | null| null| d1 | 3 | |
* +-----+-----+-----+-----+-----+ ---+---
* | 1 | 2 | null| d2 | 1 | |
* +-----+-----+-----+-----+-----+ |
* | 1 | null| c1 | d2 | 2 | records expanded by record2
* +-----+-----+-----+-----+-----+ |
* | 1 | null| null| d2 | 3 | |
* +-----+-----+-----+-----+-----+ ---+---
* | 2 | 1 | null| d1 | 1 | |
* +-----+-----+-----+-----+-----+ |
* | 2 | null| c1 | d1 | 2 | records expanded by record3
* +-----+-----+-----+-----+-----+ |
* | 2 | null| null| d1 | 3 | |
* +-----+-----+-----+-----+-----+ ---+---
*
* Example2 (Some fields are both in DISTINCT aggregates and non-DISTINCT aggregates):
*
* SQL:
* SELECT MAX(a) as t1, COUNT(DISTINCT a) as t2, count(DISTINCT d) as t3 FROM MyTable
*
* Field `a` is both in DISTINCT aggregate and `MAX` aggregate,
* so, `a` should be outputted as two individual fields, one is for `MAX` aggregate,
* another is for DISTINCT aggregate.
*
* Expanded records:
* +-----+-----+-----+-----+
* | a | d | $e | a_0 |
* +-----+-----+-----+-----+ ---+---
* | 1 | null| 1 | 1 | |
* +-----+-----+-----+-----+ |
* | null| d1 | 2 | 1 | records expanded by record1
* +-----+-----+-----+-----+ |
* | null| null| 3 | 1 | |
* +-----+-----+-----+-----+ ---+---
* | 1 | null| 1 | 1 | |
* +-----+-----+-----+-----+ |
* | null| d2 | 2 | 1 | records expanded by record2
* +-----+-----+-----+-----+ |
* | null| null| 3 | 1 | |
* +-----+-----+-----+-----+ ---+---
* | 2 | null| 1 | 2 | |
* +-----+-----+-----+-----+ |
* | null| d1 | 2 | 2 | records expanded by record3
* +-----+-----+-----+-----+ |
* | null| null| 3 | 2 | |
* +-----+-----+-----+-----+ ---+---
*
* Example3 (expand for CUBE/ROLLUP/GROUPING SETS):
*
* SQL:
* SELECT a, c, SUM(b) as b FROM MyTable GROUP BY GROUPING SETS (a, c)
*
* Logical plan:
* {{{
* LogicalAggregate(group=[{0, 1}], groups=[[{0}, {1}]], b=[SUM($2)])
* LogicalProject(a=[$0], c=[$2], b=[$1])
* LogicalTableScan(table=[[builtin, default, MyTable]])
* }}}
*
* Logical plan after this rule applied:
* {{{
* LogicalCalc(expr#0..3=[{inputs}], proj#0..1=[{exprs}], b=[$t3])
* LogicalAggregate(group=[{0, 2, 3}], groups=[[]], b=[SUM($1)])
* LogicalExpand(projects=[{a=[$0], b=[$1], c=[null], $e=[1]},
* {a=[null], b=[$1], c=[$2], $e=[2]}])
* LogicalNativeTableScan(table=[[builtin, default, MyTable]])
* }}}
*
* '$e = 1' is equivalent to 'group by a'
* '$e = 2' is equivalent to 'group by c'
*
* Expanded records:
* +-----+-----+-----+-----+
* | a | b | c | $e |
* +-----+-----+-----+-----+ ---+---
* | 1 | 1 | null| 1 | |
* +-----+-----+-----+-----+ records expanded by record1
* | null| 1 | c1 | 2 | |
* +-----+-----+-----+-----+ ---+---
* | 1 | 2 | null| 1 | |
* +-----+-----+-----+-----+ records expanded by record2
* | null| 2 | c1 | 2 | |
* +-----+-----+-----+-----+ ---+---
* | 2 | 1 | null| 1 | |
* +-----+-----+-----+-----+ records expanded by record3
* | null| 1 | c1 | 2 | |
* +-----+-----+-----+-----+ ---+---
*/
class DecomposeGroupingSetsRule extends RelOptRule(
operand(classOf[LogicalAggregate], any),
FlinkRelFactories.FLINK_REL_BUILDER,
"DecomposeGroupingSetsRule") {
override def matches(call: RelOptRuleCall): Boolean = {
val agg: LogicalAggregate = call.rel(0)
val groupIdExprs = AggregateUtil.getGroupIdExprIndexes(agg.getAggCallList)
agg.getGroupSets.size() > 1 || groupIdExprs.nonEmpty
}
override def onMatch(call: RelOptRuleCall): Unit = {
val agg: LogicalAggregate = call.rel(0)
// Long data type is used to store groupValue in FlinkAggregateExpandDistinctAggregatesRule,
// and the result of grouping function is a positive value,
// so the max groupCount must be less than 64.
if (agg.getGroupCount >= 64) {
throw new TableException("group count must be less than 64.")
}
val aggInput = agg.getInput
val groupIdExprs = AggregateUtil.getGroupIdExprIndexes(agg.getAggCallList)
val aggCallsWithIndexes = agg.getAggCallList.zipWithIndex
val cluster = agg.getCluster
val rexBuilder = cluster.getRexBuilder
val needExpand = agg.getGroupSets.size() > 1
val relBuilder = call.builder().asInstanceOf[FlinkRelBuilder]
relBuilder.push(aggInput)
val (newGroupSet, duplicateFieldMap) = if (needExpand) {
val (duplicateFieldMap, expandIdIdxInExpand) = ExpandUtil.buildExpandNode(
cluster, relBuilder, agg.getAggCallList, agg.getGroupSet, agg.getGroupSets)
// new groupSet contains original groupSet and expand_id('$e') field
val newGroupSet = agg.getGroupSet.union(ImmutableBitSet.of(expandIdIdxInExpand))
(newGroupSet, duplicateFieldMap)
} else {
// no need add expand node, only need care about group functions
(agg.getGroupSet, Map.empty[Integer, Integer])
}
val newGroupCount = newGroupSet.cardinality()
val newAggCalls = aggCallsWithIndexes.collect {
case (aggCall, idx) if !groupIdExprs.contains(idx) =>
val newArgList = aggCall.getArgList.map(a => duplicateFieldMap.getOrElse(a, a)).toList
val newFilterArg = duplicateFieldMap.getOrDefault(aggCall.filterArg, aggCall.filterArg)
aggCall.adaptTo(
relBuilder.peek(), newArgList, newFilterArg, agg.getGroupCount, newGroupCount)
}
// create simple aggregate
relBuilder.aggregate(
relBuilder.groupKey(newGroupSet, ImmutableList.of[ImmutableBitSet](newGroupSet)),
newAggCalls)
val newAgg = relBuilder.peek()
// create a project to mapping original aggregate's output
// get names of original grouping fields
val groupingFieldsName = Seq.range(0, agg.getGroupCount)
.map(x => agg.getRowType.getFieldNames.get(x))
// create field access for all original grouping fields
val groupingFields = agg.getGroupSet.toList.zipWithIndex.map {
case (_, idx) => rexBuilder.makeInputRef(newAgg, idx)
}.toArray[RexNode]
val groupSetsWithIndexes = agg.getGroupSets.zipWithIndex
// output aggregate calls including `normal` agg call and grouping agg call
var aggCnt = 0
val aggFields = aggCallsWithIndexes.map {
case (aggCall, idx) if groupIdExprs.contains(idx) =>
if (needExpand) {
// reference to expand_id('$e') field in new aggregate
val expandIdIdxInNewAgg = newGroupCount - 1
val expandIdField = rexBuilder.makeInputRef(newAgg, expandIdIdxInNewAgg)
// create case when for group expression
val whenThenElse = groupSetsWithIndexes.flatMap {
case (subGroupSet, i) =>
val groupExpr = lowerGroupExpr(rexBuilder, aggCall, groupSetsWithIndexes, i)
if (i < agg.getGroupSets.size() - 1) {
// WHEN/THEN
val expandIdVal = ExpandUtil.genExpandId(agg.getGroupSet, subGroupSet)
val expandIdType = newAgg.getRowType.getFieldList.get(expandIdIdxInNewAgg).getType
val expandIdLit = rexBuilder.makeLiteral(expandIdVal, expandIdType, false)
Seq(
// when $e = $e_value
rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, expandIdField, expandIdLit),
// then return group expression literal value
groupExpr
)
} else {
// ELSE
Seq(
// else return group expression literal value
groupExpr
)
}
}
rexBuilder.makeCall(SqlStdOperatorTable.CASE, whenThenElse)
} else {
// create literal for group expression
lowerGroupExpr(rexBuilder, aggCall, groupSetsWithIndexes, 0)
}
case _ =>
// create access to aggregation result
val aggResult = rexBuilder.makeInputRef(newAgg, newGroupCount + aggCnt)
aggCnt += 1
aggResult
}
// add a projection to establish the result schema and set the values of the group expressions.
relBuilder.project(
groupingFields.toSeq ++ aggFields,
groupingFieldsName ++ agg.getAggCallList.map(_.name))
relBuilder.convert(agg.getRowType, true)
call.transformTo(relBuilder.build())
}
/** Returns a literal for a given group expression. */
private def lowerGroupExpr(
builder: RexBuilder,
call: AggregateCall,
groupSetsWithIndexes: Seq[(ImmutableBitSet, Int)],
indexInGroupSets: Int): RexNode = {
val groupSet = groupSetsWithIndexes(indexInGroupSets)._1
val groups = groupSet.asSet()
call.getAggregation.getKind match {
case SqlKind.GROUP_ID =>
// https://issues.apache.org/jira/browse/CALCITE-1824
// GROUP_ID is not in the SQL standard. It is implemented only by Oracle.
// GROUP_ID is useful only if you have duplicate grouping sets,
// If grouping sets are distinct, GROUP_ID() will always return zero;
// Else return the index in the duplicate grouping sets.
// e.g. SELECT deptno, GROUP_ID() AS g FROM Emp GROUP BY GROUPING SETS (deptno, (), ())
// As you can see, the grouping set () occurs twice.
// So there is one row in the result for each occurrence:
// the first occurrence has g = 0; the second has g = 1.
val duplicateGroupSetsIndices = groupSetsWithIndexes.filter {
case (gs, _) => gs.compareTo(groupSet) == 0
}.map(_._2).toArray[Int]
require(duplicateGroupSetsIndices.nonEmpty)
val id: Long = duplicateGroupSetsIndices.indexOf(indexInGroupSets)
builder.makeLiteral(id, call.getType, false)
case SqlKind.GROUPING | SqlKind.GROUPING_ID =>
// GROUPING function is defined in the SQL standard,
// but the definition of GROUPING is different from in Oracle and in SQL standard:
// https://docs.oracle.com/cd/B28359_01/server.111/b28286/functions064.htm#SQLRF00647
//
// GROUPING_ID function is not defined in the SQL standard, and has the same
// functionality with GROUPING function in Calcite.
// our implementation is consistent with Oracle about GROUPING_ID function.
//
// NOTES:
// In Calcite, the java-document of SqlGroupingFunction is not consistent with agg.iq.
val res: Long = call.getArgList.foldLeft(0L)((res, arg) =>
(res << 1L) + (if (groups.contains(arg)) 0L else 1L)
)
builder.makeLiteral(res, call.getType, false)
case _ => builder.constantNull()
}
}
}
object DecomposeGroupingSetsRule {
val INSTANCE: RelOptRule = new DecomposeGroupingSetsRule
}
| tzulitai/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/rules/logical/DecomposeGroupingSetsRule.scala | Scala | apache-2.0 | 16,250 |
package com.toscaruntime.it
import java.nio.file.Files
import com.toscaruntime.cli.command.UseCommand
import com.toscaruntime.it.TestConstant._
import com.toscaruntime.it.steps.{AgentsSteps, DeploymentsSteps}
import com.toscaruntime.util.FileUtil
import com.typesafe.scalalogging.LazyLogging
import org.scalatest.{BeforeAndAfter, FeatureSpec, GivenWhenThen}
import scala.util.control.Exception.ignoring
/**
* Base configuration for all specs
*
* @author Minh Khang VU
*/
class AbstractSpec extends FeatureSpec with GivenWhenThen with LazyLogging with BeforeAndAfter {
before {
try {
logger.info("Cleaning agents on the local docker daemon")
AgentsSteps.listAgents().foreach { agent =>
logger.info(s"Cleaning agent [${agent.head}]")
ignoring(classOf[Exception])(AgentsSteps.launchUndeployment(agent.head))
logger.info(s"Cleaned agent [${agent.head}]")
}
logger.info("Cleaning deployment images on the local docker daemon")
DeploymentsSteps.listDeploymentImages().foreach { image =>
logger.info(s"Cleaning image [${image.head}]")
ignoring(classOf[Exception])(DeploymentsSteps.deleteDeploymentImage(image.head))
logger.info(s"Cleaned image [${image.head}]")
}
logger.info(s"Cleaning test data at ${testDataPath.toAbsolutePath}")
FileUtil.delete(testDataPath)
Files.createDirectories(repositoryPath)
Files.createDirectories(tempPath)
Files.createDirectories(assemblyPath)
logger.info(s"Cleaned test data ${testDataPath.toAbsolutePath}")
UseCommand.switchConfiguration(Context.dockerConfig.getUrl, Context.dockerConfig.getCertPath, testDataPath)
} catch {
case e: Throwable => logger.warn(s"Could not properly clean test data at ${testDataPath.toAbsolutePath}", e)
}
}
}
| vuminhkh/tosca-runtime | test/src/it/scala/com/toscaruntime/it/AbstractSpec.scala | Scala | mit | 1,828 |
package app
import util.{LockUtil, CollaboratorsAuthenticator, JGitUtil, ReferrerAuthenticator, Notifier, Keys}
import util.Directory._
import util.Implicits._
import util.ControlUtil._
import service._
import org.eclipse.jgit.api.Git
import jp.sf.amateras.scalatra.forms._
import org.eclipse.jgit.transport.RefSpec
import scala.collection.JavaConverters._
import org.eclipse.jgit.lib.{ObjectId, CommitBuilder, PersonIdent}
import service.IssuesService._
import service.PullRequestService._
import util.JGitUtil.DiffInfo
import service.RepositoryService.RepositoryTreeNode
import util.JGitUtil.CommitInfo
import org.slf4j.LoggerFactory
import org.eclipse.jgit.merge.MergeStrategy
import org.eclipse.jgit.errors.NoMergeBaseException
import service.WebHookService.WebHookPayload
class PullRequestsController extends PullRequestsControllerBase
with RepositoryService with AccountService with IssuesService with PullRequestService with MilestonesService with LabelsService
with ActivityService with WebHookService with ReferrerAuthenticator with CollaboratorsAuthenticator
trait PullRequestsControllerBase extends ControllerBase {
self: RepositoryService with AccountService with IssuesService with MilestonesService with LabelsService
with ActivityService with PullRequestService with WebHookService with ReferrerAuthenticator with CollaboratorsAuthenticator =>
private val logger = LoggerFactory.getLogger(classOf[PullRequestsControllerBase])
val pullRequestForm = mapping(
"title" -> trim(label("Title" , text(required, maxlength(100)))),
"content" -> trim(label("Content", optional(text()))),
"targetUserName" -> trim(text(required, maxlength(100))),
"targetBranch" -> trim(text(required, maxlength(100))),
"requestUserName" -> trim(text(required, maxlength(100))),
"requestRepositoryName" -> trim(text(required, maxlength(100))),
"requestBranch" -> trim(text(required, maxlength(100))),
"commitIdFrom" -> trim(text(required, maxlength(40))),
"commitIdTo" -> trim(text(required, maxlength(40)))
)(PullRequestForm.apply)
val mergeForm = mapping(
"message" -> trim(label("Message", text(required)))
)(MergeForm.apply)
case class PullRequestForm(
title: String,
content: Option[String],
targetUserName: String,
targetBranch: String,
requestUserName: String,
requestRepositoryName: String,
requestBranch: String,
commitIdFrom: String,
commitIdTo: String)
case class MergeForm(message: String)
get("/:owner/:repository/pulls")(referrersOnly { repository =>
searchPullRequests(None, repository)
})
get("/:owner/:repository/pulls/:userName")(referrersOnly { repository =>
searchPullRequests(Some(params("userName")), repository)
})
get("/:owner/:repository/pull/:id")(referrersOnly { repository =>
params("id").toIntOpt.flatMap{ issueId =>
val owner = repository.owner
val name = repository.name
getPullRequest(owner, name, issueId) map { case(issue, pullreq) =>
using(Git.open(getRepositoryDir(owner, name))){ git =>
val (commits, diffs) =
getRequestCompareInfo(owner, name, pullreq.commitIdFrom, owner, name, pullreq.commitIdTo)
pulls.html.pullreq(
issue, pullreq,
getComments(owner, name, issueId),
getIssueLabels(owner, name, issueId),
(getCollaborators(owner, name) ::: (if(getAccountByUserName(owner).get.isGroupAccount) Nil else List(owner))).sorted,
getMilestonesWithIssueCount(owner, name),
getLabels(owner, name),
commits,
diffs,
hasWritePermission(owner, name, context.loginAccount),
repository)
}
}
} getOrElse NotFound
})
ajaxGet("/:owner/:repository/pull/:id/mergeguide")(collaboratorsOnly { repository =>
params("id").toIntOpt.flatMap{ issueId =>
val owner = repository.owner
val name = repository.name
getPullRequest(owner, name, issueId) map { case(issue, pullreq) =>
pulls.html.mergeguide(
checkConflictInPullRequest(owner, name, pullreq.branch, pullreq.requestUserName, name, pullreq.requestBranch, issueId),
pullreq,
s"${baseUrl}/git/${pullreq.requestUserName}/${pullreq.requestRepositoryName}.git")
}
} getOrElse NotFound
})
get("/:owner/:repository/pull/:id/delete/:branchName")(collaboratorsOnly { repository =>
params("id").toIntOpt.map { issueId =>
val branchName = params("branchName")
val userName = context.loginAccount.get.userName
if(repository.repository.defaultBranch != branchName){
using(Git.open(getRepositoryDir(repository.owner, repository.name))){ git =>
git.branchDelete().setBranchNames(branchName).call()
recordDeleteBranchActivity(repository.owner, repository.name, userName, branchName)
}
}
createComment(repository.owner, repository.name, userName, issueId, branchName, "delete_branch")
redirect(s"/${repository.owner}/${repository.name}/pull/${issueId}")
} getOrElse NotFound
})
post("/:owner/:repository/pull/:id/merge", mergeForm)(collaboratorsOnly { (form, repository) =>
params("id").toIntOpt.flatMap { issueId =>
val owner = repository.owner
val name = repository.name
LockUtil.lock(s"${owner}/${name}/merge"){
getPullRequest(owner, name, issueId).map { case (issue, pullreq) =>
using(Git.open(getRepositoryDir(owner, name))) { git =>
// mark issue as merged and close.
val loginAccount = context.loginAccount.get
createComment(owner, name, loginAccount.userName, issueId, form.message, "merge")
createComment(owner, name, loginAccount.userName, issueId, "Close", "close")
updateClosed(owner, name, issueId, true)
// record activity
recordMergeActivity(owner, name, loginAccount.userName, issueId, form.message)
// merge
val mergeBaseRefName = s"refs/heads/${pullreq.branch}"
val merger = MergeStrategy.RECURSIVE.newMerger(git.getRepository, true)
val mergeBaseTip = git.getRepository.resolve(mergeBaseRefName)
val mergeTip = git.getRepository.resolve(s"refs/pull/${issueId}/head")
val conflicted = try {
!merger.merge(mergeBaseTip, mergeTip)
} catch {
case e: NoMergeBaseException => true
}
if (conflicted) {
throw new RuntimeException("This pull request can't merge automatically.")
}
// creates merge commit
val mergeCommit = new CommitBuilder()
mergeCommit.setTreeId(merger.getResultTreeId)
mergeCommit.setParentIds(Array[ObjectId](mergeBaseTip, mergeTip): _*)
val personIdent = new PersonIdent(loginAccount.fullName, loginAccount.mailAddress)
mergeCommit.setAuthor(personIdent)
mergeCommit.setCommitter(personIdent)
mergeCommit.setMessage(s"Merge pull request #${issueId} from ${pullreq.requestUserName}/${pullreq.requestRepositoryName}\\n\\n" +
form.message)
// insertObject and got mergeCommit Object Id
val inserter = git.getRepository.newObjectInserter
val mergeCommitId = inserter.insert(mergeCommit)
inserter.flush()
inserter.release()
// update refs
val refUpdate = git.getRepository.updateRef(mergeBaseRefName)
refUpdate.setNewObjectId(mergeCommitId)
refUpdate.setForceUpdate(false)
refUpdate.setRefLogIdent(personIdent)
refUpdate.setRefLogMessage("merged", true)
refUpdate.update()
val (commits, _) = getRequestCompareInfo(owner, name, pullreq.commitIdFrom,
pullreq.requestUserName, pullreq.requestRepositoryName, pullreq.commitIdTo)
commits.flatten.foreach { commit =>
if(!existsCommitId(owner, name, commit.id)){
insertCommitId(owner, name, commit.id)
}
}
// close issue by content of pull request
val defaultBranch = getRepository(owner, name, baseUrl).get.repository.defaultBranch
if(pullreq.branch == defaultBranch){
commits.flatten.foreach { commit =>
closeIssuesFromMessage(commit.fullMessage, loginAccount.userName, owner, name)
}
issue.content match {
case Some(content) => closeIssuesFromMessage(content, loginAccount.userName, owner, name)
case _ =>
}
closeIssuesFromMessage(form.message, loginAccount.userName, owner, name)
}
// call web hook
getWebHookURLs(owner, name) match {
case webHookURLs if(webHookURLs.nonEmpty) =>
for(ownerAccount <- getAccountByUserName(owner)){
callWebHook(owner, name, webHookURLs,
WebHookPayload(git, loginAccount, mergeBaseRefName, repository, commits.flatten.toList, ownerAccount))
}
case _ =>
}
// notifications
Notifier().toNotify(repository, issueId, "merge"){
Notifier.msgStatus(s"${baseUrl}/${owner}/${name}/pull/${issueId}")
}
redirect(s"/${owner}/${name}/pull/${issueId}")
}
}
}
} getOrElse NotFound
})
get("/:owner/:repository/compare")(referrersOnly { forkedRepository =>
(forkedRepository.repository.originUserName, forkedRepository.repository.originRepositoryName) match {
case (Some(originUserName), Some(originRepositoryName)) => {
getRepository(originUserName, originRepositoryName, baseUrl).map { originRepository =>
using(
Git.open(getRepositoryDir(originUserName, originRepositoryName)),
Git.open(getRepositoryDir(forkedRepository.owner, forkedRepository.name))
){ (oldGit, newGit) =>
val oldBranch = JGitUtil.getDefaultBranch(oldGit, originRepository).get._2
val newBranch = JGitUtil.getDefaultBranch(newGit, forkedRepository).get._2
redirect(s"${context.path}/${forkedRepository.owner}/${forkedRepository.name}/compare/${originUserName}:${oldBranch}...${newBranch}")
}
} getOrElse NotFound
}
case _ => {
using(Git.open(getRepositoryDir(forkedRepository.owner, forkedRepository.name))){ git =>
JGitUtil.getDefaultBranch(git, forkedRepository).map { case (_, defaultBranch) =>
redirect(s"${context.path}/${forkedRepository.owner}/${forkedRepository.name}/compare/${defaultBranch}...${defaultBranch}")
} getOrElse {
redirect(s"${context.path}/${forkedRepository.owner}/${forkedRepository.name}")
}
}
}
}
})
get("/:owner/:repository/compare/*...*")(referrersOnly { forkedRepository =>
val Seq(origin, forked) = multiParams("splat")
val (originOwner, tmpOriginBranch) = parseCompareIdentifie(origin, forkedRepository.owner)
val (forkedOwner, tmpForkedBranch) = parseCompareIdentifie(forked, forkedRepository.owner)
(for(
originRepositoryName <- if(originOwner == forkedOwner){
Some(forkedRepository.name)
} else {
forkedRepository.repository.originRepositoryName.orElse {
getForkedRepositories(forkedRepository.owner, forkedRepository.name).find(_._1 == originOwner).map(_._2)
}
};
originRepository <- getRepository(originOwner, originRepositoryName, baseUrl)
) yield {
using(
Git.open(getRepositoryDir(originRepository.owner, originRepository.name)),
Git.open(getRepositoryDir(forkedRepository.owner, forkedRepository.name))
){ case (oldGit, newGit) =>
val originBranch = JGitUtil.getDefaultBranch(oldGit, originRepository, tmpOriginBranch).get._2
val forkedBranch = JGitUtil.getDefaultBranch(newGit, forkedRepository, tmpForkedBranch).get._2
val forkedId = getForkedCommitId(oldGit, newGit,
originRepository.owner, originRepository.name, originBranch,
forkedRepository.owner, forkedRepository.name, forkedBranch)
val oldId = oldGit.getRepository.resolve(forkedId)
val newId = newGit.getRepository.resolve(forkedBranch)
val (commits, diffs) = getRequestCompareInfo(
originRepository.owner, originRepository.name, oldId.getName,
forkedRepository.owner, forkedRepository.name, newId.getName)
pulls.html.compare(
commits,
diffs,
(forkedRepository.repository.originUserName, forkedRepository.repository.originRepositoryName) match {
case (Some(userName), Some(repositoryName)) => (userName, repositoryName) :: getForkedRepositories(userName, repositoryName)
case _ => (forkedRepository.owner, forkedRepository.name) :: getForkedRepositories(forkedRepository.owner, forkedRepository.name)
},
originBranch,
forkedBranch,
oldId.getName,
newId.getName,
forkedRepository,
originRepository,
forkedRepository,
hasWritePermission(forkedRepository.owner, forkedRepository.name, context.loginAccount))
}
}) getOrElse NotFound
})
ajaxGet("/:owner/:repository/compare/*...*/mergecheck")(collaboratorsOnly { forkedRepository =>
val Seq(origin, forked) = multiParams("splat")
val (originOwner, tmpOriginBranch) = parseCompareIdentifie(origin, forkedRepository.owner)
val (forkedOwner, tmpForkedBranch) = parseCompareIdentifie(forked, forkedRepository.owner)
(for(
originRepositoryName <- if(originOwner == forkedOwner){
Some(forkedRepository.name)
} else {
forkedRepository.repository.originRepositoryName.orElse {
getForkedRepositories(forkedRepository.owner, forkedRepository.name).find(_._1 == originOwner).map(_._2)
}
};
originRepository <- getRepository(originOwner, originRepositoryName, baseUrl)
) yield {
using(
Git.open(getRepositoryDir(originRepository.owner, originRepository.name)),
Git.open(getRepositoryDir(forkedRepository.owner, forkedRepository.name))
){ case (oldGit, newGit) =>
val originBranch = JGitUtil.getDefaultBranch(oldGit, originRepository, tmpOriginBranch).get._2
val forkedBranch = JGitUtil.getDefaultBranch(newGit, forkedRepository, tmpForkedBranch).get._2
pulls.html.mergecheck(
checkConflict(originRepository.owner, originRepository.name, originBranch,
forkedRepository.owner, forkedRepository.name, forkedBranch))
}
}) getOrElse NotFound
})
post("/:owner/:repository/pulls/new", pullRequestForm)(referrersOnly { (form, repository) =>
val loginUserName = context.loginAccount.get.userName
val issueId = createIssue(
owner = repository.owner,
repository = repository.name,
loginUser = loginUserName,
title = form.title,
content = form.content,
assignedUserName = None,
milestoneId = None,
isPullRequest = true)
createPullRequest(
originUserName = repository.owner,
originRepositoryName = repository.name,
issueId = issueId,
originBranch = form.targetBranch,
requestUserName = form.requestUserName,
requestRepositoryName = form.requestRepositoryName,
requestBranch = form.requestBranch,
commitIdFrom = form.commitIdFrom,
commitIdTo = form.commitIdTo)
// fetch requested branch
using(Git.open(getRepositoryDir(repository.owner, repository.name))){ git =>
git.fetch
.setRemote(getRepositoryDir(form.requestUserName, form.requestRepositoryName).toURI.toString)
.setRefSpecs(new RefSpec(s"refs/heads/${form.requestBranch}:refs/pull/${issueId}/head"))
.call
}
// record activity
recordPullRequestActivity(repository.owner, repository.name, loginUserName, issueId, form.title)
// notifications
Notifier().toNotify(repository, issueId, form.content.getOrElse("")){
Notifier.msgPullRequest(s"${baseUrl}/${repository.owner}/${repository.name}/pull/${issueId}")
}
redirect(s"/${repository.owner}/${repository.name}/pull/${issueId}")
})
/**
* Checks whether conflict will be caused in merging. Returns true if conflict will be caused.
*/
private def checkConflict(userName: String, repositoryName: String, branch: String,
requestUserName: String, requestRepositoryName: String, requestBranch: String): Boolean = {
LockUtil.lock(s"${userName}/${repositoryName}/merge-check"){
using(Git.open(getRepositoryDir(requestUserName, requestRepositoryName))) { git =>
val remoteRefName = s"refs/heads/${branch}"
val tmpRefName = s"refs/merge-check/${userName}/${branch}"
val refSpec = new RefSpec(s"${remoteRefName}:${tmpRefName}").setForceUpdate(true)
try {
// fetch objects from origin repository branch
git.fetch
.setRemote(getRepositoryDir(userName, repositoryName).toURI.toString)
.setRefSpecs(refSpec)
.call
// merge conflict check
val merger = MergeStrategy.RECURSIVE.newMerger(git.getRepository, true)
val mergeBaseTip = git.getRepository.resolve(s"refs/heads/${requestBranch}")
val mergeTip = git.getRepository.resolve(tmpRefName)
try {
!merger.merge(mergeBaseTip, mergeTip)
} catch {
case e: NoMergeBaseException => true
}
} finally {
val refUpdate = git.getRepository.updateRef(refSpec.getDestination)
refUpdate.setForceUpdate(true)
refUpdate.delete()
}
}
}
}
/**
* Checks whether conflict will be caused in merging within pull request. Returns true if conflict will be caused.
*/
private def checkConflictInPullRequest(userName: String, repositoryName: String, branch: String,
requestUserName: String, requestRepositoryName: String, requestBranch: String,
issueId: Int): Boolean = {
LockUtil.lock(s"${userName}/${repositoryName}/merge") {
using(Git.open(getRepositoryDir(userName, repositoryName))) { git =>
// merge
val merger = MergeStrategy.RECURSIVE.newMerger(git.getRepository, true)
val mergeBaseTip = git.getRepository.resolve(s"refs/heads/${branch}")
val mergeTip = git.getRepository.resolve(s"refs/pull/${issueId}/head")
try {
!merger.merge(mergeBaseTip, mergeTip)
} catch {
case e: NoMergeBaseException => true
}
}
}
}
/**
* Parses branch identifier and extracts owner and branch name as tuple.
*
* - "owner:branch" to ("owner", "branch")
* - "branch" to ("defaultOwner", "branch")
*/
private def parseCompareIdentifie(value: String, defaultOwner: String): (String, String) =
if(value.contains(':')){
val array = value.split(":")
(array(0), array(1))
} else {
(defaultOwner, value)
}
/**
* Extracts all repository names from [[service.RepositoryService.RepositoryTreeNode]] as flat list.
*/
private def getRepositoryNames(node: RepositoryTreeNode): List[String] =
node.owner :: node.children.map { child => getRepositoryNames(child) }.flatten
/**
* Returns the identifier of the root commit (or latest merge commit) of the specified branch.
*/
private def getForkedCommitId(oldGit: Git, newGit: Git, userName: String, repositoryName: String, branch: String,
requestUserName: String, requestRepositoryName: String, requestBranch: String): String =
JGitUtil.getCommitLogs(newGit, requestBranch, true){ commit =>
existsCommitId(userName, repositoryName, commit.getName) && JGitUtil.getBranchesOfCommit(oldGit, commit.getName).contains(branch)
}.head.id
private def getRequestCompareInfo(userName: String, repositoryName: String, branch: String,
requestUserName: String, requestRepositoryName: String, requestCommitId: String): (Seq[Seq[CommitInfo]], Seq[DiffInfo]) = {
using(
Git.open(getRepositoryDir(userName, repositoryName)),
Git.open(getRepositoryDir(requestUserName, requestRepositoryName))
){ (oldGit, newGit) =>
val oldId = oldGit.getRepository.resolve(branch)
val newId = newGit.getRepository.resolve(requestCommitId)
val commits = newGit.log.addRange(oldId, newId).call.iterator.asScala.map { revCommit =>
new CommitInfo(revCommit)
}.toList.splitWith { (commit1, commit2) =>
view.helpers.date(commit1.time) == view.helpers.date(commit2.time)
}
val diffs = JGitUtil.getDiffs(newGit, oldId.getName, newId.getName, true)
(commits, diffs)
}
}
private def searchPullRequests(userName: Option[String], repository: RepositoryService.RepositoryInfo) =
defining(repository.owner, repository.name){ case (owner, repoName) =>
val filterUser = userName.map { x => Map("created_by" -> x) } getOrElse Map("all" -> "")
val page = IssueSearchCondition.page(request)
val sessionKey = Keys.Session.Pulls(owner, repoName)
// retrieve search condition
val condition = session.putAndGet(sessionKey,
if(request.hasQueryString) IssueSearchCondition(request)
else session.getAs[IssueSearchCondition](sessionKey).getOrElse(IssueSearchCondition())
)
pulls.html.list(
searchIssue(condition, filterUser, true, (page - 1) * PullRequestLimit, PullRequestLimit, owner -> repoName),
getPullRequestCountGroupByUser(condition.state == "closed", owner, Some(repoName)),
userName,
page,
countIssue(condition.copy(state = "open" ), filterUser, true, owner -> repoName),
countIssue(condition.copy(state = "closed"), filterUser, true, owner -> repoName),
countIssue(condition, Map.empty, true, owner -> repoName),
condition,
repository,
hasWritePermission(owner, repoName, context.loginAccount))
}
}
| katsumit/gitbucket | src/main/scala/app/PullRequestsController.scala | Scala | apache-2.0 | 22,545 |
package evaluation
import scala.concurrent.Future
trait RestClient {
def getPosts: Future[List[Post]]
def getPostForUser(userId: Int): Future[List[Post]]
def createNewPost(post: Post): Future[CreatePostResponse]
def updatePost(id: Int, post: Post): Future[Post]
def deletePost(id: Int): Future[Boolean]
} | saig0/scala-rest-client-evaluation | src/main/scala/evaluation/RestClient.scala | Scala | apache-2.0 | 324 |
import scala.annotation.tailrec
object funsets {
def factorial(n: Int): Int = {
def factorialStep(mul: Int, n: Int): Int =
if (n == 0) mul else factorialStep(mul * n, n - 1)
factorialStep(1, n)
}
factorial(5)
def sum(f: (Int => Int), a: Int, b: Int): Int = {
def loop(a: Int, acc: Int): Int = {
if (a > b) acc
else loop(a + 1, acc + f(a))
}
loop(a, 0)
}
sum(x => x * x, 1, 4)
def product(f : Int => Int)(a : Int, b : Int) : Int =
if (a > b) 1
else f(a) * product(f)(a + 1, b)
product(x => x * x)(3, 4)
def fact(n : Int): Int =
product(x => x)(1, n)
fact(5)
def mapReduce(f : Int => Int, combine : (Int, Int) => Int, zero: Int)(a : Int, b : Int) : Int =
if (a > b) zero
else combine(f(a), mapReduce(f, combine, zero)(a + 1, b))
mapReduce(x => x, (x,y) => x * y, 1)(1, 5)
}
| M4573R/playground-notes | functional-programming-principles-in-scala/week2/funsets.scala | Scala | mit | 1,013 |
package com.github.rgafiyatullin.creek_xml.dom_query
import com.github.rgafiyatullin.creek_xml.dom.Node
import scala.collection.immutable.Queue
class NodeWithQueries(node: Node) {
def select(path: Path): Seq[Node] = {
val query = DomQuery.Select(path)
query(node)
}
def select(predicate: Predicate): Seq[Node] =
select(Path(Queue(predicate)))
def delete(path: Path): Node = {
val query = DomQuery.Delete(path)
query(node)
}
def delete(predicate: Predicate): Node =
delete(Path(Queue(predicate)))
def update(path: Path)(f: Node => Node): Node = {
val query = DomQuery.Update(path, f)
query(node)
}
def update(predicate: Predicate): (Node => Node) => Node =
update(Path(Queue(predicate)))
def upsert(path: Path)(f: Node => Option[Node]): Node = {
val query = DomQuery.Upsert(path, f)
query(node)
}
def upsert(predicate: Predicate): (Node => Option[Node]) => Node =
upsert(Path(Queue(predicate)))
}
| RGafiyatullin/creek-xml | src/main/scala/com/github/rgafiyatullin/creek_xml/dom_query/NodeWithQueries.scala | Scala | mit | 976 |
package edu.nccu.plsm.geo.projection
import scala.language.postfixOps
case class LatLng(
lat: Double,
lng: Double
) {
private[this] def format(degree: Double) = {
val d = degree toInt
val m = ((degree - d) * 60) toInt
val s = BigDecimal(degree * 3600 % 60).setScale(5, BigDecimal.RoundingMode.HALF_UP)
f"""$d%3sΒ° $m%2s' $s%8s""""
}
def latDegree = math.toDegrees(lat)
def lngDegree = math.toDegrees(lng)
def latitude = format(latDegree)
def longitude = format(lngDegree)
}
| AtkinsChang/geoconvert | core/src/main/scala/edu.nccu.plsm.geo/projection/LatLng.scala | Scala | apache-2.0 | 512 |
package com.sksamuel.elastic4s.requests.nodes
case class NodeStatsRequest(nodes: Seq[String], stats: Seq[String] = Seq.empty) {
def stats(stats: Seq[String]): NodeStatsRequest = copy(stats = stats)
}
| sksamuel/elastic4s | elastic4s-domain/src/main/scala/com/sksamuel/elastic4s/requests/nodes/NodeStatsRequest.scala | Scala | apache-2.0 | 203 |
package skinny.engine.cookie
import javax.servlet.http.{ HttpServletRequest, HttpServletResponse }
import skinny.engine.implicits.ServletApiImplicits
import scala.collection.mutable
/**
* Extended cookie object.
*/
class SweetCookies(
private[this] val request: HttpServletRequest,
private[this] val response: HttpServletResponse)
extends ServletApiImplicits {
private[this] lazy val cookies = mutable.HashMap[String, String]() ++ request.cookies
def get(key: String): Option[String] = cookies.get(key)
def apply(key: String): String = {
cookies.get(key) getOrElse (throw new Exception("No cookie could be found for the specified key"))
}
def update(name: String, value: String)(
implicit cookieOptions: CookieOptions = CookieOptions()): Cookie = {
cookies += name -> value
addCookie(name, value, cookieOptions)
}
def set(name: String, value: String)(
implicit cookieOptions: CookieOptions = CookieOptions()): Cookie = {
this.update(name, value)(cookieOptions)
}
def delete(name: String)(implicit cookieOptions: CookieOptions = CookieOptions()): Unit = {
cookies -= name
addCookie(name, "", cookieOptions.copy(maxAge = 0))
}
def +=(keyValuePair: (String, String))(
implicit cookieOptions: CookieOptions = CookieOptions()): Cookie = {
this.update(keyValuePair._1, keyValuePair._2)(cookieOptions)
}
def -=(key: String)(implicit cookieOptions: CookieOptions = CookieOptions()): Unit = {
delete(key)(cookieOptions)
}
private def addCookie(name: String, value: String, options: CookieOptions): Cookie = {
val cookie = new Cookie(name, value)(options)
response.addCookie(cookie)
cookie
}
} | holycattle/skinny-framework | engine/src/main/scala/skinny/engine/cookie/SweetCookies.scala | Scala | mit | 1,695 |
package tholowka.diz.unmarshalling.terms
import scala.collection.mutable.ListBuffer
import tholowka.diz.interfaces._
/**
* JsonString represents a parser to parse a typical JSON string.
* Architecturally, this parser sits beneath RowValue
{{{
RowValue
|
|
JsonString
}}}
*/
private [terms] object JsonString {}
private [terms] case class JsonString extends Parser[String] {
def consume(input: Stream[Char]): Option[String] = {
// print("string=> ")
var escapeSeqCheck = EscapeSequenceCheck()
var result = ""
def filterOutQuotes(ch: Char) = {
(ch, escapeSeqCheck.escapeSeqFound) match {
case ('"', false) => {
escapeSeqCheck = EscapeSequenceCheck(escapeSeqCheck, ch)
false
}
case (_, _) => {
escapeSeqCheck = EscapeSequenceCheck(escapeSeqCheck, ch)
true
}
}
}
input.filter(filterOutQuotes).foreach(ch => {
result += ch.toString
})
// print(">=string ")
Some(result)
}
}
| tholowka/diz | src/main/scala/tholowka/diz/unmarshalling/terms/JsonString.scala | Scala | mit | 1,162 |
package com.mesosphere.cosmos
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import java.util
import org.scalatest.FreeSpec
class ByteBuffersSpec extends FreeSpec {
"ByteBuffers.toBytes(ByteBuffer) should" - {
"work for an array backed ByteBuffer" in {
val bytes = "hello".getBytes(StandardCharsets.UTF_8)
val bb = ByteBuffer.wrap(bytes)
val actual = ByteBuffers.getBytes(bb)
assert(util.Arrays.equals(bytes, actual))
}
"work for a non-array backed ByteBuffer" in {
val bytes = "hello".getBytes(StandardCharsets.UTF_8)
val bb = ByteBuffer.allocateDirect(bytes.size)
bytes.foreach(bb.put)
bb.rewind() // rewind the position back to the beginning after having written
val actual = ByteBuffers.getBytes(bb)
assert(util.Arrays.equals(bytes, actual))
}
"check read index bounds" in {
val bytes = "hello".getBytes(StandardCharsets.UTF_8)
val bb = ByteBuffer.allocateDirect(bytes.size)
bytes.foreach(bb.put)
try {
val _ = ByteBuffers.getBytes(bb)
} catch {
case ioobe: IndexOutOfBoundsException =>
assertResult("5 > 0")(ioobe.getMessage)
}
}
}
}
| movicha/cosmos | cosmos-server/src/test/scala/com/mesosphere/cosmos/ByteBuffersSpec.scala | Scala | apache-2.0 | 1,217 |
package objsets
import org.scalatest.FunSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class TweetSetSuite extends FunSuite {
trait TestSets {
val set1 = new Empty
val set2 = set1.incl(new Tweet("a", "a body", 20))
val set3 = set2.incl(new Tweet("b", "b body", 20))
val c = new Tweet("c", "c body", 7)
val d = new Tweet("d", "d body", 9)
val set4c = set3.incl(c)
val set4d = set3.incl(d)
val set5 = set4c.incl(d)
}
def asSet(tweets: TweetSet): Set[Tweet] = {
var res = Set[Tweet]()
tweets.foreach(res += _)
res
}
def size(set: TweetSet): Int = asSet(set).size
test("filter: on empty set") {
new TestSets {
assert(size(set1.filter(tw => tw.user == "a")) === 0)
}
}
test("filter: a on set5") {
new TestSets {
assert(size(set5.filter(tw => tw.user == "a")) === 1)
}
}
test("filter: 20 on set5") {
new TestSets {
assert(size(set5.filter(tw => tw.retweets == 20)) === 2)
}
}
test("union: set4c and set4d") {
new TestSets {
assert(size(set4c.union(set4d)) === 4)
}
}
test("union: with empty set (1)") {
new TestSets {
assert(size(set5.union(set1)) === 4)
}
}
test("union: with empty set (2)") {
new TestSets {
assert(size(set1.union(set5)) === 4)
}
}
test("max: set5") {
new TestSets {
val most = set5.mostRetweeted
assert(most.user == "a" || most.user == "b")
}
}
test("descending: set5") {
new TestSets {
val trends = set5.descendingByRetweet
assert(!trends.isEmpty)
assert(trends.head.user == "a" || trends.head.user == "b")
}
}
}
| pacomendes/scala | assignments/w3/objsets/src/test/scala/objsets/TweetSetSuite.scala | Scala | mit | 1,720 |
import sbt._
import Keys._
import play.Project._
object ApplicationBuild extends Build {
val appName = "jarme-ui"
val appVersion = "1.0-SNAPSHOT"
val appDependencies = Seq(
// Add your project dependencies here,
javaCore,
javaJdbc,
javaEbean
)
val main = play.Project(appName, appVersion, appDependencies).settings(
// Add your own project settings here
)
}
| yroffin/jarme | jarme-ui/project/Build.scala | Scala | apache-2.0 | 414 |
package composition
import com.tzavellas.sse.guice.ScalaModule
import pdf.PdfService
import pdf.PdfServiceImpl
final class PdfServiceBinding extends ScalaModule {
def configure() = bind[PdfService].to[PdfServiceImpl].asEagerSingleton()
} | dvla/vrm-retention-online | app/composition/PdfServiceBinding.scala | Scala | mit | 242 |
/*
* Copyright 2015 Webtrends (http://www.webtrends.com)
*
* See the LICENCE.txt file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.webtrends.harness.command
import akka.actor.{Props, ActorRef, Actor}
import akka.pattern.ask
import akka.util.Timeout
import com.webtrends.harness.app.Harness
import scala.concurrent.duration._
import com.webtrends.harness.HarnessConstants
import com.webtrends.harness.logging.ActorLoggingAdapter
import scala.concurrent.{Promise, Future}
import scala.util.{Failure, Success}
trait CommandHelper extends ActorLoggingAdapter with BaseCommandHelper {
this: Actor =>
override lazy implicit val actorSystem = context.system
}
/**
* A trait that you can add to any actor that will enable the actor to talk to the CommandManager easily
* and execute commands at will
*
* @author Michael Cuthbert on 12/10/14.
*/
trait BaseCommandHelper {
import scala.concurrent.ExecutionContext.Implicits.global
lazy implicit val actorSystem = Harness.getActorSystem.get
var commandManagerInitialized = false
var commandManager:Option[ActorRef] = None
def initCommandHelper() = {
addCommands
}
def initCommandManager : Future[Boolean] = {
val p = Promise[Boolean]
commandManagerInitialized match {
case true => p success commandManagerInitialized
case false =>
actorSystem.actorSelection(HarnessConstants.CommandFullName).resolveOne()(2 seconds) onComplete {
case Success(s) =>
commandManagerInitialized = true
commandManager = Some(s)
p success commandManagerInitialized
case Failure(f) => p failure CommandException("Component Manager", f)
}
}
p.future
}
/**
* This function should be implemented by any service that wants to add
* any commands to make available for use
*/
def addCommands() = {}
/**
* Wrapper that allows services to add commands to the command manager with a single command
*
* @param name name of the command you want to add
* @param props the props for that command actor class
* @return
*/
def addCommandWithProps[T<:Command](name:String, props:Props, checkHealth: Boolean = false) : Future[ActorRef] = {
implicit val timeout = Timeout(2 seconds)
val p = Promise[ActorRef]
initCommandManager onComplete {
case Success(_) =>
commandManager match {
case Some(cm) =>
(cm ? AddCommandWithProps(name, props, checkHealth)).mapTo[ActorRef] onComplete {
case Success(r) => p success r
case Failure(f) => p failure f
}
case None => p failure CommandException("CommandManager", "CommandManager not found!")
}
case Failure(f) => p failure f
}
p.future
}
/**
* Wrapper that allows services add commands to the command manager with a single command
*
* @param name name of the command you want to add
* @param actorClass the class for the actor
*/
def addCommand[T<:Command](name:String, actorClass:Class[T], checkHealth: Boolean = false) : Future[ActorRef] = {
implicit val timeout = Timeout(2 seconds)
val p = Promise[ActorRef]
initCommandManager onComplete {
case Success(_) =>
commandManager match {
case Some(cm) =>
(cm ? AddCommand(name, actorClass, checkHealth)).mapTo[ActorRef] onComplete {
case Success(r) => p success r
case Failure(f) => p failure f
}
case None => p failure CommandException("CommandManager", "CommandManager not found!")
}
case Failure(f) => p failure f
}
p.future
}
/**
* Wrapper that allows services execute commands (remote or otherwise)
*
* @param name name of the command you want to execute
* if this is a remote command the name will be the reference to the
* command
* @param bean the bean that will be passed to the command
* @param server If none then we are executing a local command, if set then it is a remote command and that is the server name
* @param port The port of the remote server defaults to 0, as by default this function deals with local commands
* @return
*/
def executeCommand[T:Manifest](name:String, bean:Option[CommandBean]=None, server:Option[String]=None,
port:Int=2552)(implicit timeout:Timeout) : Future[BaseCommandResponse[T]] = {
val p = Promise[BaseCommandResponse[T]]
initCommandManager onComplete {
case Success(_) =>
commandManager match {
case Some(cm) =>
val msg = server match {
case Some(srv) => ExecuteRemoteCommand(name, srv, port, bean, timeout)
case None => ExecuteCommand(name, bean, timeout)
}
(cm ? msg)(timeout).mapTo[BaseCommandResponse[T]] onComplete {
case Success(s) => p success s
case Failure(f) => p failure CommandException("CommandManager", f)
}
case None => p failure CommandException("CommandManager", "CommandManager not found!")
}
case Failure(f) => p failure f
}
p.future
}
}
| Webtrends/wookiee | wookiee-core/src/main/scala/com/webtrends/harness/command/CommandHelper.scala | Scala | apache-2.0 | 5,804 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.apollo.util.path
/**
* Represents a filter which only operates on a path
*
* @version $Revision: 1.3 $
*/
abstract trait PathFilter {
def matches(path: Path): Boolean
} | chirino/activemq-apollo | apollo-util/src/main/scala/org/apache/activemq/apollo/util/path/PathFilter.scala | Scala | apache-2.0 | 1,026 |
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.json
import java.io.InputStream
import java.nio.charset.Charset
trait JsonParser {
def parse(bytes: Array[Byte], charset: Charset): Object
def parse(string: String): Object
def parse(stream: InputStream, charset: Charset): Object
}
| wiacekm/gatling | gatling-core/src/main/scala/io/gatling/core/json/JsonParser.scala | Scala | apache-2.0 | 883 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.tools.data
import com.beust.jcommander.{JCommander, Parameter, Parameters}
import org.apache.accumulo.core.client.TableNotFoundException
import org.locationtech.geomesa.accumulo.data.AccumuloDataStore
import org.locationtech.geomesa.accumulo.index.AccumuloFeatureIndex
import org.locationtech.geomesa.accumulo.tools.{AccumuloDataStoreCommand, AccumuloDataStoreParams}
import org.locationtech.geomesa.tools.{Command, CommandWithSubCommands, RequiredTypeNameParam, Runner}
import org.locationtech.geomesa.utils.index.IndexMode
import scala.collection.JavaConversions._
class TableConfCommand(val runner: Runner, val jc: JCommander) extends CommandWithSubCommands {
import TableConfCommand._
override val name = "config-table"
override val params = new TableConfParams()
override val subCommands: Seq[Command] =
Seq(new TableConfListCommand, new TableConfDescribeCommand, new TableConfUpdateCommand)
}
class TableConfListCommand extends AccumuloDataStoreCommand {
import TableConfCommand._
override val name = "list"
override val params = new ListParams
override def execute(): Unit = {
Command.user.info(s"Getting configuration parameters for table: ${params.tableSuffix}")
withDataStore((ds) => getProperties(ds, params).toSeq.sortBy(_.getKey).foreach(p => Command.output.info(p.toString)))
}
}
class TableConfDescribeCommand extends AccumuloDataStoreCommand {
import TableConfCommand._
override val name = "describe"
override val params = new DescribeParams
override def execute(): Unit = {
Command.user.info(s"Finding the value for '${params.param}' on table: ${params.tableSuffix}")
withDataStore((ds) => Command.output.info(getProp(ds, params).toString))
}
}
class TableConfUpdateCommand extends AccumuloDataStoreCommand {
import TableConfCommand._
override val name = "update"
override val params = new UpdateParams
override def execute(): Unit = {
val param = params.param
val newValue = params.newValue
val tableName = params.tableSuffix
withDataStore { (ds) =>
val property = getProp(ds, params)
Command.user.info(s"'$param' on table '$tableName' currently set to: \n$property")
if (newValue != property.getValue) {
Command.user.info(s"Attempting to update '$param' to '$newValue'...")
val updatedValue = setValue(ds, params)
Command.user.info(s"'$param' on table '$tableName' is now set to: \n$updatedValue")
} else {
Command.user.info(s"'$param' already set to '$newValue'. No need to update.")
}
}
}
}
object TableConfCommand {
def getProp(ds: AccumuloDataStore, params: DescribeParams) = getProperties(ds, params).find(_.getKey == params.param).getOrElse {
throw new Exception(s"Parameter '${params.param}' not found in table: ${params.tableSuffix}")
}
def setValue(ds: AccumuloDataStore, params: UpdateParams) =
try {
ds.connector.tableOperations.setProperty(getTableName(ds, params), params.param, params.newValue)
getProp(ds, params)
} catch {
case e: Exception =>
throw new Exception("Error updating the table property: " + e.getMessage, e)
}
def getProperties(ds: AccumuloDataStore, params: ListParams) =
try {
ds.connector.tableOperations.getProperties(getTableName(ds, params))
} catch {
case tnfe: TableNotFoundException =>
throw new Exception(s"Error: table ${params.tableSuffix} could not be found: " + tnfe.getMessage, tnfe)
}
def getTableName(ds: AccumuloDataStore, params: ListParams) =
AccumuloFeatureIndex.indices(ds.getSchema(params.featureName), IndexMode.Any)
.find(_.name == params.tableSuffix)
.map(_.getTableName(params.featureName, ds))
.getOrElse(throw new Exception(s"Invalid table suffix: ${params.tableSuffix}"))
@Parameters(commandDescription = "Perform table configuration operations")
class TableConfParams {}
@Parameters(commandDescription = "List the configuration parameters for a geomesa table")
class ListParams extends AccumuloDataStoreParams with RequiredTypeNameParam {
@Parameter(names = Array("-t", "--table-suffix"), description = "Table suffix to operate on (attr_idx, st_idx, or records)", required = true)
var tableSuffix: String = null
}
@Parameters(commandDescription = "Describe a given configuration parameter for a table")
class DescribeParams extends ListParams {
@Parameter(names = Array("-P", "--param"), description = "Accumulo table configuration param name (e.g. table.bloom.enabled)", required = true)
var param: String = null
}
@Parameters(commandDescription = "Update a given table configuration parameter")
class UpdateParams extends DescribeParams {
@Parameter(names = Array("-n", "--new-value"), description = "New value of the property", required = true)
var newValue: String = null
}
}
| ronq/geomesa | geomesa-accumulo/geomesa-accumulo-tools/src/main/scala/org/locationtech/geomesa/accumulo/tools/data/TableConfCommand.scala | Scala | apache-2.0 | 5,397 |
package chap5
object Exe4 extends App {
import Stream._
assert(Stream(1, 2, 3).forAll(_ < 4))
val stream: Stream[Int] = cons(1, stream.map(_ + 1))
assert(!stream.forAll(_ < 4))
}
| ponkotuy/FPScala | src/main/scala/chap5/Exe4.scala | Scala | unlicense | 188 |
import xfp.trees._
import collection.mutable.{HashSet, Set}
/**
* Generates all possible rewritings of an expression given by the input file.
*
* The algorithm works as follows:
* 0) Add the expression to the worklist
* 1) For all expressions e in the worklist:
* - For all nodes n in the expression e:
* - apply all possible rewrite rules to node n,
* generating new expressions
* - if a new expression is not yet in the worklist, add it
* to a set 'newExprs', and set 'changed' to true
* 2) Add all expressions in newExprs to worklist
* 3) If changed is true, repeat from step 1
*
* Takes as input the file with the expression and optionally the size the
* worklist can maximally grow to before giving up (defualt: 10000).
* If the algorithm does not finish, 100 randomly selected expressions are
* printed to the file random100.txt in addition to the generated.txt
*/
object ExprEnumeration {
val debug = false
// all expressions seen so far
var worklist = new HashSet[CExpr]()
var maxWorklistSize = 10000000
def main(args: Array[String]) {
if (args.size == 0) { println("Please specify the input file with the expression"); return }
val origExpr = ExprParser.parseFile(args(0)).asInstanceOf[CExpr]
if (origExpr == null) { println("Parse error."); return }
val original = translate(origExpr)
if (args.size > 1) maxWorklistSize = args(1).toInt
worklist += original
var step = 0
var changed = true
while (changed && worklist.size < maxWorklistSize) {
if (debug) println("\n\nstep " + step + ". worklist: " + worklist.size)
var newExprs = new HashSet[CExpr]()
changed = false
for (expr <- worklist) {
var allSubtrees = allNodes(expr)
for (node <- allSubtrees) {
var newNodes = applyRules(node)
for (newNode <- newNodes) {
val tmp = findAndReplace(expr, node, newNode)
if (!worklist.contains(tmp)) {
newExprs += tmp
changed = true
}
}
}
}
if (debug) println("new expressions: " + newExprs)
if (newExprs.size > 0) {
assert(changed)
worklist ++= newExprs
}
step += 1
}
// Now cleanup all the duplicates
val finished = worklist.map(e => cleanUp(orderLex(e)._1))
println("Generated expressions: " + finished.size)
var index = 0
/*val map = Map("x0" -> "", "x1" -> "")*/
outputToFile("generated.txt", finished)
if (worklist.size >= maxWorklistSize) {
println("WARNING: did not finish!")
outputToFile("random900.txt", util.Random.shuffle(finished.toList).slice(0, 900))
}
}
def outputToFile(name: String, set: Iterable[CExpr]) = {
var index = 0
val out = new java.io.FileWriter(name)
for(e <- set) {
//println(e)
//out.write(e.toString(map))
out.write("//expr " + index + "\n")
out.write(e.toString() + "\n")
index += 1
}
out.close
}
// bottom- up
// return the sorted expression, plus a string representation, used for comparison
def orderLex(e: CExpr): (CExpr, String) = e match {
case CAdd(a, b) =>
val (aExp, aStr) = orderLex(a)
val (bExp, bStr) = orderLex(b)
if (bStr.compareTo(aStr) < 0) (CAdd(bExp, aExp), bStr + aStr)
else (CAdd(aExp, bExp), aStr + bStr)
case CMult(a, b) =>
val (aExp, aStr) = orderLex(a)
val (bExp, bStr) = orderLex(b)
if (bStr.compareTo(aStr) < 0) (CMult(bExp, aExp), bStr + aStr)
else (CMult(aExp, bExp), aStr + bStr)
case CNeg(a) =>
val (aExp, aStr) = orderLex(a)
(CNeg(aExp), aStr)
case CInv(a) =>
val (aExp, aStr) = orderLex(a)
(CInv(aExp), aStr)
case CDoubleConst(a) => (e, a.toString)
case CVar(n) => (e, n)
case _ => return null;
}
def cleanUp(e: CExpr): CExpr = e match {
case CAdd(a, CNeg(b)) => CSub(cleanUp(a), cleanUp(b))
case CAdd(a, b) => CAdd(cleanUp(a), cleanUp(b))
case CMult(a, b) => CMult(cleanUp(a), cleanUp(b))
case CInv(a) => CInv(cleanUp(a))
case CNeg(a) => CNeg(cleanUp(a))
case CDoubleConst(d) => CDoubleConst(d)
case CVar(n) => CVar(n)
case _ => return null;
}
def translate(e: CExpr): CExpr = e match {
case CSub(a, b) => CAdd(translate(a), CNeg(translate(b)))
case CAdd(a, b) => CAdd(translate(a), translate(b))
case CMult(a, b) => CMult(translate(a), translate(b))
case CDiv(a, b) => CMult(translate(a), CInv(translate(b)))
case CInv(a) => CInv(translate(a))
case CNeg(a) => CNeg(translate(a))
case CDoubleConst(d) => CDoubleConst(d)
case CVar(n) => CVar(n)
case _ => return null;
}
def allNodes(tree: CExpr): Set[CExpr] = tree match {
case CNeg(a) => Set(tree) ++ allNodes(a)
case CAdd(a, b) => Set(tree) ++ allNodes(a) ++ allNodes(b)
case CMult(a, b) => Set(tree) ++ allNodes(a) ++ allNodes(b)
case CInv(a) => Set(tree) ++ allNodes(a)
case CDoubleConst(a) => Set(tree)
case CVar(a) => Set(tree)
case _ => return null;
}
def applyRules(expr: CExpr): Set[CExpr] = {
val tmp: Set[CExpr] = expr match {
case CNeg(CMult(a, b)) => Set( CMult(CNeg(a), b), CMult(a, CNeg(b)) )
case CNeg(CAdd(a, b)) => Set( CAdd(CNeg(a), CNeg(b)))
case CNeg(CInv(a)) => Set(CInv(CNeg(a)))
case CAdd(CAdd(a, b), CAdd(c, d)) =>
Set( CAdd(a, CAdd(b, CAdd(c, d))), CAdd(CAdd(CAdd(a, b), c), d),
CAdd(CAdd(c, d), CAdd(a, b)) )
case CAdd(CAdd(a, b), c) => Set( CAdd(a, CAdd(b, c)), CAdd(c, CAdd(a, b)) )
case CAdd(a, CAdd(b, c)) => Set( CAdd(CAdd(a, b), c), CAdd(CAdd(b, c), a) )
case CAdd(CMult(a, b), CMult(c, d)) =>
var list = new HashSet[CExpr]()
if (a == c) list += CMult(a, CAdd(b, d))
if (b == d) list += CMult(CAdd(a, c), b)
if (a == d) list += CMult(a, CAdd(b, c))
if (b == c) list += CMult(CAdd(a, d), b)
list += CAdd(CMult(c, d), CMult(a, b))
case CAdd(CNeg(a), CNeg(b)) => Set( CNeg(CAdd(a, b)), CAdd(CNeg(b), CNeg(a)))
case CAdd(a, b) => Set(CAdd(b, a))
case CMult(CMult(a, b), c) => Set(CMult(a, CMult(b, c)), CMult(c, CMult(a, b)))
case CMult(a, CMult(b, c)) => Set(CMult(CMult(a, b), c), CMult(CMult(b, c), a))
case CMult(a, CAdd(b, c)) => Set(CAdd(CMult(a, b), CMult(a, c)), CMult(CAdd(b, c), a))
case CMult(CAdd(a, b), c) => Set(CAdd(CMult(a, c), CMult(b, c)), CMult(c, CAdd(a, b)))
case CMult(CNeg(a), b) => Set(CNeg(CMult(a, b)), CMult(b, CNeg(a)))
case CMult(a, CNeg(b)) => Set(CNeg(CMult(a, b)), CMult(CNeg(b), a))
case CMult(CInv(a), CInv(b)) => Set(CInv(CMult(a, b)), CMult(CInv(b), CInv(a)))
case CMult(a, b) => Set(CMult(b, a))
case CInv(CNeg(a)) => Set(CNeg(CInv(a)))
case CInv(CMult(a, b)) => Set(CMult(CInv(a), CInv(b)))
case _ => Set( )
}
//for (e <- tmp) if (e.getClass == expr.getClass) markDone(e)
tmp
}
// FIXME: if we have duplicates, we need to replace all!
def findAndReplace(tree: CExpr, oldNode: CExpr, newNode: CExpr): CExpr =
if (tree == oldNode) return newNode
else
tree match {
case CAdd(l, r) =>
if (l == oldNode) CAdd(newNode, r)
else if (r == oldNode) CAdd(l, newNode)
else CAdd(findAndReplace(l, oldNode, newNode), findAndReplace(r, oldNode, newNode))
case CMult(l, r) =>
if (l == oldNode) CMult(newNode, r)
else if (r == oldNode) CMult(l, newNode)
else CMult(findAndReplace(l, oldNode, newNode), findAndReplace(r, oldNode, newNode))
case CNeg(e) =>
if (e == oldNode) CNeg(newNode)
else CNeg(findAndReplace(e, oldNode, newNode))
case CInv(e) =>
if (e == oldNode) CInv(newNode)
else CInv(findAndReplace(e, oldNode, newNode))
case c: CDoubleConst => c
case v: CVar => v
case _ => return null;
}
}
| malyzajko/xfp | analysis_tool/src/xfp/ExprEnumeration.scala | Scala | bsd-3-clause | 8,008 |
package org.jetbrains.plugins.scala.codeInsight.intention.comprehension
import com.intellij.codeInsight.intention.PsiElementBaseIntentionAction
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.project.Project
import com.intellij.psi.{PsiElement, TokenType}
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScForStatement
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.util.IntentionAvailabilityChecker
/**
* Pavel Fatin
*/
class ConvertToParenthesesIntention extends PsiElementBaseIntentionAction {
def getFamilyName = "Convert to parentheses"
override def getText = getFamilyName
def isAvailable(project: Project, editor: Editor, element: PsiElement) = {
element match {
case e @ Parent(_: ScForStatement) =>
List(ScalaTokenTypes.tLBRACE, ScalaTokenTypes.tRBRACE).contains(e.getNode.getElementType) &&
IntentionAvailabilityChecker.checkIntention(this, element)
case _ => false
}
}
override def invoke(project: Project, editor: Editor, element: PsiElement) {
val statement = element.getParent.asInstanceOf[ScForStatement]
val manager = statement.getManager
val block = ScalaPsiElementFactory.parseElement("(_)", manager)
for (lBrace <- Option(statement.findFirstChildByType(ScalaTokenTypes.tLBRACE))) {
lBrace.replace(block.getFirstChild)
}
for (rBrace <- Option(statement.findFirstChildByType(ScalaTokenTypes.tRBRACE))) {
rBrace.replace(block.getLastChild)
}
for (enumerators <- statement.enumerators;
cr <- enumerators.findChildrenByType(TokenType.WHITE_SPACE) if cr.getText.contains('\\n')) {
cr.replace(ScalaPsiElementFactory.createSemicolon(manager))
}
for (cr <- statement.findChildrenByType(TokenType.WHITE_SPACE) if cr.getText.contains('\\n')) {
cr.delete()
}
}
} | triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/codeInsight/intention/comprehension/ConvertToParenthesesIntention.scala | Scala | apache-2.0 | 2,004 |
class Foo{
val default = this
def foo(a: Int)(b: Foo = default): b.type = b
def bar(b: Foo = default): b.type = b
val x: Foo = bar() // ok
val x2: Foo = foo(1)() // ok
val s: Foo = foo(1) // error
val s2: default.type = foo(1) // error
}
| lampepfl/dotty | tests/neg/i803.scala | Scala | apache-2.0 | 254 |
package com.github.spirom.sparkflights.experiments
import com.github.spirom.sparkflights.fw.SQLExperiment
import org.apache.spark.sql.SQLContext
class TopAirportsByDeparturesSQL(sqlContext: SQLContext)
extends SQLExperiment("TopAirportsByDeparturesSQL", sqlContext) {
def runUserCode(sqlContext: SQLContext, outputBase: String): Unit = {
//
// The 10 airports with the highest absolute number of scheduled
// departures since 2000
//
val topDepartures = sqlContext.sql(
s"""
| SELECT origin, count(*) AS total_departures
| FROM flights
| WHERE year >= '2000'
| GROUP BY origin
| ORDER BY total_departures DESC
| LIMIT 10
""".stripMargin)
topDepartures.rdd.saveAsTextFile(s"$outputBase/top_departures")
}
}
| spirom/SparkFlightExamples | src/main/scala/com/github/spirom/sparkflights/experiments/TopAirportsByDeparturesSQL.scala | Scala | mit | 804 |
package com.sksamuel.scapegoat.inspections
import com.sksamuel.scapegoat.PluginRunner
import org.scalatest.{FreeSpec, Matchers}
/** @author Stephen Samuel */
class ConstantIfTest extends FreeSpec with ASTSugar with Matchers with PluginRunner {
override val inspections = Seq(new ConstantIf)
"ConstantIf" - {
"should report warning" in {
val code = """object Test {
if (1 < 2) {
println("sammy")
}
if (2 < 1) {
println("sammy")
}
if ("sam" == "sam".substring(0)) println("sammy")
if (true) println("sammy")
if (false) println("sammy")
if (1 < System.currentTimeMillis()) println("sammy")
} """.stripMargin
compileCodeSnippet(code)
compiler.scapegoat.reporter.warnings.size shouldBe 4
}
}
}
| RichardBradley/scapegoat | src/test/scala/com/sksamuel/scapegoat/inspections/ConstantIfTest.scala | Scala | apache-2.0 | 971 |
/*
* This software is licensed under the GNU Affero General Public License, quoted below.
*
* This file is a part of PowerAPI.
*
* Copyright (C) 2011-2014 Inria, University of Lille 1.
*
* PowerAPI is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* PowerAPI is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with PowerAPI.
*
* If not, please consult http://www.gnu.org/licenses/agpl-3.0.html.
*/
package org.powerapi.module
import akka.actor.ActorRef
import java.util.UUID
import org.powerapi.core.{Message, MessageBus, Channel}
import org.powerapi.core.ClockChannel.ClockTick
import org.powerapi.core.power._
import org.powerapi.core.target.Target
import scala.concurrent.duration.DurationInt
/**
* PowerChannel channel and messages.
*
* @author <a href="mailto:maxime.colmant@gmail.com">Maxime Colmant</a>
* @author <a href="mailto:l.huertas.pro@gmail.com">LoΓ―c Huertas</a>
* @author <a href="mailto:romain.rouvoy@univ-lille1.fr">Romain Rouvoy</a>
*/
object PowerChannel extends Channel {
type M = PowerReport
/**
* Base trait for each power report
*/
trait PowerReport extends Message {
def muid: UUID
def tick: ClockTick
}
/**
* RawPowerReport is represented as a dedicated type of message.
*
* @param topic: subject used for routing the message.
* @param muid: monitor unique identifier (MUID), which is at the origin of the report flow.
* @param target: monitor target.
* @param power: target's power consumption.
* @param device: device targeted.
* @param tick: tick origin.
*/
case class RawPowerReport(topic: String,
muid: UUID,
target: Target,
power: Power,
device: String,
tick: ClockTick) extends PowerReport
/**
* AggregatePowerReport is represented as a dedicated type of message.
*/
case class AggregatePowerReport(muid: UUID, aggFunction: Seq[Power] => Power) extends PowerReport {
private val reports = collection.mutable.Buffer[RawPowerReport]()
private lazy val agg = aggFunction(for(report <- reports) yield report.power)
def size: Int = reports.size
def +=(report: RawPowerReport): AggregatePowerReport = {
reports += report
this
}
val topic: String = aggPowerReportTopic(muid)
def targets: Set[Target] = (for(report <- reports) yield report.target).toSet
def power: Power = agg
def devices: Set[String] = (for(report <- reports) yield report.device).toSet
def tick: ClockTick = if(reports.nonEmpty) reports.last.tick else ClockTick("", 0.seconds)
}
/**
* Publish a raw power report in the event bus.
*/
def publishRawPowerReport(muid: UUID, target: Target, power: Power, device: String, tick: ClockTick): MessageBus => Unit = {
publish(RawPowerReport(rawPowerReportMuid(muid), muid, target, power, device, tick))
}
/**
* Publish an aggregated power report in the event bus.
*/
def render(aggR: AggregatePowerReport): MessageBus => Unit = {
publish(aggR)
}
/**
* External methods used by the Reporter components for interacting with the bus.
*/
def subscribeAggPowerReport(muid: UUID): MessageBus => ActorRef => Unit = {
subscribe(aggPowerReportTopic(muid))
}
def unsubscribeAggPowerReport(muid: UUID): MessageBus => ActorRef => Unit = {
unsubscribe(aggPowerReportTopic(muid))
}
/**
* External method used by the MonitorChild actors for interacting with the bus.
*/
def subscribeRawPowerReport(muid: UUID): MessageBus => ActorRef => Unit = {
subscribe(rawPowerReportMuid(muid))
}
def unsubscribeRawPowerReport(muid: UUID): MessageBus => ActorRef => Unit = {
unsubscribe(rawPowerReportMuid(muid))
}
/**
* Use to format a MUID to an associated topic.
*/
private def rawPowerReportMuid(muid: UUID): String = {
s"power:$muid"
}
private def aggPowerReportTopic(muid: UUID): String = {
s"reporter:$muid"
}
}
| rouvoy/powerapi | powerapi-core/src/main/scala/org/powerapi/module/PowerChannel.scala | Scala | agpl-3.0 | 4,536 |
package tscfg.example
final case class ScalaIssue61Cfg(
intParams : scala.Option[scala.List[scala.Int]]
)
object ScalaIssue61Cfg {
def apply(c: com.typesafe.config.Config): ScalaIssue61Cfg = {
val $tsCfgValidator: $TsCfgValidator = new $TsCfgValidator()
val parentPath: java.lang.String = ""
val $result = ScalaIssue61Cfg(
intParams = if(c.hasPathOrNull("intParams")) scala.Some($_L$_int(c.getList("intParams"), parentPath, $tsCfgValidator)) else None
)
$tsCfgValidator.validate()
$result
}
private def $_L$_int(cl:com.typesafe.config.ConfigList, parentPath: java.lang.String, $tsCfgValidator: $TsCfgValidator): scala.List[scala.Int] = {
import scala.jdk.CollectionConverters._
cl.asScala.map(cv => $_int(cv)).toList
}
private def $_expE(cv:com.typesafe.config.ConfigValue, exp:java.lang.String) = {
val u: Any = cv.unwrapped
new java.lang.RuntimeException(s"${cv.origin.lineNumber}: " +
"expecting: " + exp + " got: " +
(if (u.isInstanceOf[java.lang.String]) "\\"" + u + "\\"" else u))
}
private def $_int(cv:com.typesafe.config.ConfigValue): scala.Int = {
val u: Any = cv.unwrapped
if ((cv.valueType != com.typesafe.config.ConfigValueType.NUMBER) ||
!u.isInstanceOf[Integer]) throw $_expE(cv, "integer")
u.asInstanceOf[Integer]
}
final class $TsCfgValidator {
private val badPaths = scala.collection.mutable.ArrayBuffer[java.lang.String]()
def addBadPath(path: java.lang.String, e: com.typesafe.config.ConfigException): Unit = {
badPaths += s"'$path': ${e.getClass.getName}(${e.getMessage})"
}
def addInvalidEnumValue(path: java.lang.String, value: java.lang.String, enumName: java.lang.String): Unit = {
badPaths += s"'$path': invalid value $value for enumeration $enumName"
}
def validate(): Unit = {
if (badPaths.nonEmpty) {
throw new com.typesafe.config.ConfigException(
badPaths.mkString("Invalid configuration:\\n ", "\\n ", "")
){}
}
}
}
}
| carueda/tscfg | src/test/scala/tscfg/example/ScalaIssue61Cfg.scala | Scala | apache-2.0 | 2,034 |
/* Copyright 2009-2016 EPFL, Lausanne */
package leon.utils
import scala.collection.SeqView
import scala.collection.mutable.ArrayBuffer
object SeqUtils {
type Tuple[T] = Seq[T]
def cartesianProduct[T](seqs: Tuple[Seq[T]]): Seq[Tuple[T]] = {
val sizes = seqs.map(_.size)
val max = sizes.product
val result = new ArrayBuffer[Tuple[T]](max)
var i = 0
while (i < max) {
var c = i
var sel = -1
val elem = for (s <- sizes) yield {
val index = c % s
c = c / s
sel += 1
seqs(sel)(index)
}
i+=1
result += elem
}
result
}
def sumTo(sum: Int, arity: Int): Seq[Seq[Int]] = {
require(arity >= 1)
if (sum < arity) {
Nil
} else if (arity == 1) {
Seq(Seq(sum))
} else {
(1 until sum).flatMap{ n =>
sumTo(sum-n, arity-1).map( r => n +: r)
}
}
}
def sumToOrdered(sum: Int, arity: Int): Seq[Seq[Int]] = {
def rec(sum: Int, arity: Int): Seq[Seq[Int]] = {
require(arity > 0)
if (sum < 0) Nil
else if (arity == 1) Seq(Seq(sum))
else for {
n <- 0 to sum / arity
rest <- rec(sum - arity * n, arity - 1)
} yield n +: rest.map(n + _)
}
rec(sum, arity) filterNot (_.head == 0)
}
def groupWhile[T](es: Seq[T])(p: T => Boolean): Seq[Seq[T]] = {
var res: Seq[Seq[T]] = Nil
var c = es
while (!c.isEmpty) {
val (span, rest) = c.span(p)
if (span.isEmpty) {
res :+= Seq(rest.head)
c = rest.tail
} else {
res :+= span
c = rest
}
}
res
}
}
class CartesianView[+A](views: Seq[SeqView[A, Seq[A]]]) extends SeqView[Seq[A], Seq[Seq[A]]] {
override protected def underlying: Seq[Seq[A]] = SeqUtils.cartesianProduct(views)
override def length: Int = views.map{ _.size }.product
override def apply(idx: Int): Seq[A] = {
if (idx < 0 || idx >= length) throw new IndexOutOfBoundsException
var c = idx
for (v <- views) yield {
val ic = c % v.size
c /= v.size
v(ic)
}
}
override def iterator: Iterator[Seq[A]] = new Iterator[Seq[A]] {
// It's unfortunate, but we have to use streams to memoize
private val streams = views.map { _.toStream }
private val current = streams.toArray
// We take a note if there exists an empty view to begin with
// (which means the whole iterator is empty)
private val empty = streams exists { _.isEmpty }
override def hasNext: Boolean = !empty && current.exists { _.nonEmpty }
override def next(): Seq[A] = {
if (!hasNext) throw new NoSuchElementException("next on empty iterator")
// Propagate curry
for (i <- (0 to streams.size).takeWhile(current(_).isEmpty)) {
current(i) = streams(i)
}
val ret = current map { _.head }
for (i <- (0 to streams.size)) {
current(i) = current(i).tail
}
ret
}
}
}
| epfl-lara/leon | src/main/scala/leon/utils/SeqUtils.scala | Scala | gpl-3.0 | 2,951 |
package gorillas.collection.mutable
import gorillas.collection.immutable.{ SortedArrayNavigableMap, NavigableMap }
import gorillas.util.PairSorting
import gorillas.collection.generic.KeyTransformation
import collection.{ GenTraversableOnce, mutable }
/**
* @author Ricardo Leon
* @param ordering self explanatory
* @param key2int self explanatory
* @tparam K map entry's key type
* @tparam V map entry's value type
*/
final class NavigableMapBuilder[K, V](implicit ordering: Ordering[K], key2int: KeyTransformation[K], keyManifest: ClassManifest[K], valueManifest: ClassManifest[V])
extends AbstractNavigableMapBuilder[K, V] with mutable.Builder[(K, V), NavigableMap[K, V]] {
override def ++=(xs: TraversableOnce[(K, V)]): this.type =
++=(xs.asInstanceOf[GenTraversableOnce[(K, V)]])
override def clear() {
keys.clear()
values.clear()
}
override def sizeHint(size: Int) {
keys.sizeHint(size)
values.sizeHint(size)
}
override def result(): NavigableMap[K, V] = {
keys.size match {
case 0 => NavigableMap.empty[K, V]
case 1 => NavigableMap.single((keys(0), values(0)))
case _ =>
val keysArray = keys.toArray
val valuesArray = values.toArray
PairSorting.mergeSort(keysArray, valuesArray)
new SortedArrayNavigableMap[K, V](keysArray, valuesArray)
}
}
}
| rmleon/GorillasCollection | maps/src/main/scala/gorillas/collection/mutable/NavigableMapBuilder.scala | Scala | bsd-3-clause | 1,355 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.types
import org.apache.spark.{SparkException, SparkFunSuite}
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
class DataTypeSuite extends SparkFunSuite {
test("construct an ArrayType") {
val array = ArrayType(StringType)
assert(ArrayType(StringType, true) === array)
}
test("construct an MapType") {
val map = MapType(StringType, IntegerType)
assert(MapType(StringType, IntegerType, true) === map)
}
test("construct with add") {
val struct = (new StructType)
.add("a", IntegerType, true)
.add("b", LongType, false)
.add("c", StringType, true)
assert(StructField("b", LongType, false) === struct("b"))
}
test("construct with add from StructField") {
// Test creation from StructField type
val struct = (new StructType)
.add(StructField("a", IntegerType, true))
.add(StructField("b", LongType, false))
.add(StructField("c", StringType, true))
assert(StructField("b", LongType, false) === struct("b"))
}
test("construct with add from StructField with comments") {
// Test creation from StructField using four different ways
val struct = (new StructType)
.add("a", "int", true, "test1")
.add("b", StringType, true, "test3")
.add(StructField("c", LongType, false).withComment("test4"))
.add(StructField("d", LongType))
assert(StructField("a", IntegerType, true).withComment("test1") == struct("a"))
assert(StructField("b", StringType, true).withComment("test3") == struct("b"))
assert(StructField("c", LongType, false).withComment("test4") == struct("c"))
assert(StructField("d", LongType) == struct("d"))
assert(struct("c").getComment() == Option("test4"))
assert(struct("d").getComment().isEmpty)
}
test("construct with String DataType") {
// Test creation with DataType as String
val struct = (new StructType)
.add("a", "int", true)
.add("b", "long", false)
.add("c", "string", true)
assert(StructField("a", IntegerType, true) === struct("a"))
assert(StructField("b", LongType, false) === struct("b"))
assert(StructField("c", StringType, true) === struct("c"))
}
test("extract fields from a StructType") {
val struct = StructType(
StructField("a", IntegerType, true) ::
StructField("b", LongType, false) ::
StructField("c", StringType, true) ::
StructField("d", FloatType, true) :: Nil)
assert(StructField("b", LongType, false) === struct("b"))
intercept[IllegalArgumentException] {
struct("e")
}
val expectedStruct = StructType(
StructField("b", LongType, false) ::
StructField("d", FloatType, true) :: Nil)
assert(expectedStruct === struct(Set("b", "d")))
intercept[IllegalArgumentException] {
struct(Set("b", "d", "e", "f"))
}
}
test("extract field index from a StructType") {
val struct = StructType(
StructField("a", LongType) ::
StructField("b", FloatType) :: Nil)
assert(struct.fieldIndex("a") === 0)
assert(struct.fieldIndex("b") === 1)
intercept[IllegalArgumentException] {
struct.fieldIndex("non_existent")
}
}
test("fieldsMap returns map of name to StructField") {
val struct = StructType(
StructField("a", LongType) ::
StructField("b", FloatType) :: Nil)
val mapped = StructType.fieldsMap(struct.fields)
val expected = Map(
"a" -> StructField("a", LongType),
"b" -> StructField("b", FloatType))
assert(mapped === expected)
}
test("merge where right is empty") {
val left = StructType(
StructField("a", LongType) ::
StructField("b", FloatType) :: Nil)
val right = StructType(List())
val merged = left.merge(right)
assert(DataType.equalsIgnoreCompatibleNullability(merged, left))
assert(merged("a").metadata.getBoolean(StructType.metadataKeyForOptionalField))
assert(merged("b").metadata.getBoolean(StructType.metadataKeyForOptionalField))
}
test("merge where left is empty") {
val left = StructType(List())
val right = StructType(
StructField("a", LongType) ::
StructField("b", FloatType) :: Nil)
val merged = left.merge(right)
assert(DataType.equalsIgnoreCompatibleNullability(merged, right))
assert(merged("a").metadata.getBoolean(StructType.metadataKeyForOptionalField))
assert(merged("b").metadata.getBoolean(StructType.metadataKeyForOptionalField))
}
test("merge where both are non-empty") {
val left = StructType(
StructField("a", LongType) ::
StructField("b", FloatType) :: Nil)
val right = StructType(
StructField("c", LongType) :: Nil)
val expected = StructType(
StructField("a", LongType) ::
StructField("b", FloatType) ::
StructField("c", LongType) :: Nil)
val merged = left.merge(right)
assert(DataType.equalsIgnoreCompatibleNullability(merged, expected))
assert(merged("a").metadata.getBoolean(StructType.metadataKeyForOptionalField))
assert(merged("b").metadata.getBoolean(StructType.metadataKeyForOptionalField))
assert(merged("c").metadata.getBoolean(StructType.metadataKeyForOptionalField))
}
test("merge where right contains type conflict") {
val left = StructType(
StructField("a", LongType) ::
StructField("b", FloatType) :: Nil)
val right = StructType(
StructField("b", LongType) :: Nil)
intercept[SparkException] {
left.merge(right)
}
}
test("existsRecursively") {
val struct = StructType(
StructField("a", LongType) ::
StructField("b", FloatType) :: Nil)
assert(struct.existsRecursively(_.isInstanceOf[LongType]))
assert(struct.existsRecursively(_.isInstanceOf[StructType]))
assert(!struct.existsRecursively(_.isInstanceOf[IntegerType]))
val mapType = MapType(struct, StringType)
assert(mapType.existsRecursively(_.isInstanceOf[LongType]))
assert(mapType.existsRecursively(_.isInstanceOf[StructType]))
assert(mapType.existsRecursively(_.isInstanceOf[StringType]))
assert(mapType.existsRecursively(_.isInstanceOf[MapType]))
assert(!mapType.existsRecursively(_.isInstanceOf[IntegerType]))
val arrayType = ArrayType(mapType)
assert(arrayType.existsRecursively(_.isInstanceOf[LongType]))
assert(arrayType.existsRecursively(_.isInstanceOf[StructType]))
assert(arrayType.existsRecursively(_.isInstanceOf[StringType]))
assert(arrayType.existsRecursively(_.isInstanceOf[MapType]))
assert(arrayType.existsRecursively(_.isInstanceOf[ArrayType]))
assert(!arrayType.existsRecursively(_.isInstanceOf[IntegerType]))
}
def checkDataTypeJsonRepr(dataType: DataType): Unit = {
test(s"JSON - $dataType") {
assert(DataType.fromJson(dataType.json) === dataType)
}
}
checkDataTypeJsonRepr(NullType)
checkDataTypeJsonRepr(BooleanType)
checkDataTypeJsonRepr(ByteType)
checkDataTypeJsonRepr(ShortType)
checkDataTypeJsonRepr(IntegerType)
checkDataTypeJsonRepr(LongType)
checkDataTypeJsonRepr(FloatType)
checkDataTypeJsonRepr(DoubleType)
checkDataTypeJsonRepr(DecimalType(10, 5))
checkDataTypeJsonRepr(DecimalType.SYSTEM_DEFAULT)
checkDataTypeJsonRepr(DateType)
checkDataTypeJsonRepr(TimestampType)
checkDataTypeJsonRepr(StringType)
checkDataTypeJsonRepr(BinaryType)
checkDataTypeJsonRepr(ArrayType(DoubleType, true))
checkDataTypeJsonRepr(ArrayType(StringType, false))
checkDataTypeJsonRepr(MapType(IntegerType, StringType, true))
checkDataTypeJsonRepr(MapType(IntegerType, ArrayType(DoubleType), false))
val metadata = new MetadataBuilder()
.putString("name", "age")
.build()
val structType = StructType(Seq(
StructField("a", IntegerType, nullable = true),
StructField("b", ArrayType(DoubleType), nullable = false),
StructField("c", DoubleType, nullable = false, metadata)))
checkDataTypeJsonRepr(structType)
def checkDefaultSize(dataType: DataType, expectedDefaultSize: Int): Unit = {
test(s"Check the default size of $dataType") {
assert(dataType.defaultSize === expectedDefaultSize)
}
}
checkDefaultSize(NullType, 1)
checkDefaultSize(BooleanType, 1)
checkDefaultSize(ByteType, 1)
checkDefaultSize(ShortType, 2)
checkDefaultSize(IntegerType, 4)
checkDefaultSize(LongType, 8)
checkDefaultSize(FloatType, 4)
checkDefaultSize(DoubleType, 8)
checkDefaultSize(DecimalType(10, 5), 8)
checkDefaultSize(DecimalType.SYSTEM_DEFAULT, 16)
checkDefaultSize(DateType, 4)
checkDefaultSize(TimestampType, 8)
checkDefaultSize(StringType, 20)
checkDefaultSize(BinaryType, 100)
checkDefaultSize(ArrayType(DoubleType, true), 8)
checkDefaultSize(ArrayType(StringType, false), 20)
checkDefaultSize(MapType(IntegerType, StringType, true), 24)
checkDefaultSize(MapType(IntegerType, ArrayType(DoubleType), false), 12)
checkDefaultSize(structType, 20)
def checkEqualsIgnoreCompatibleNullability(
from: DataType,
to: DataType,
expected: Boolean): Unit = {
val testName =
s"equalsIgnoreCompatibleNullability: (from: $from, to: $to)"
test(testName) {
assert(DataType.equalsIgnoreCompatibleNullability(from, to) === expected)
}
}
checkEqualsIgnoreCompatibleNullability(
from = ArrayType(DoubleType, containsNull = true),
to = ArrayType(DoubleType, containsNull = true),
expected = true)
checkEqualsIgnoreCompatibleNullability(
from = ArrayType(DoubleType, containsNull = false),
to = ArrayType(DoubleType, containsNull = false),
expected = true)
checkEqualsIgnoreCompatibleNullability(
from = ArrayType(DoubleType, containsNull = false),
to = ArrayType(DoubleType, containsNull = true),
expected = true)
checkEqualsIgnoreCompatibleNullability(
from = ArrayType(DoubleType, containsNull = true),
to = ArrayType(DoubleType, containsNull = false),
expected = false)
checkEqualsIgnoreCompatibleNullability(
from = ArrayType(DoubleType, containsNull = false),
to = ArrayType(StringType, containsNull = false),
expected = false)
checkEqualsIgnoreCompatibleNullability(
from = MapType(StringType, DoubleType, valueContainsNull = true),
to = MapType(StringType, DoubleType, valueContainsNull = true),
expected = true)
checkEqualsIgnoreCompatibleNullability(
from = MapType(StringType, DoubleType, valueContainsNull = false),
to = MapType(StringType, DoubleType, valueContainsNull = false),
expected = true)
checkEqualsIgnoreCompatibleNullability(
from = MapType(StringType, DoubleType, valueContainsNull = false),
to = MapType(StringType, DoubleType, valueContainsNull = true),
expected = true)
checkEqualsIgnoreCompatibleNullability(
from = MapType(StringType, DoubleType, valueContainsNull = true),
to = MapType(StringType, DoubleType, valueContainsNull = false),
expected = false)
checkEqualsIgnoreCompatibleNullability(
from = MapType(StringType, ArrayType(IntegerType, true), valueContainsNull = true),
to = MapType(StringType, ArrayType(IntegerType, false), valueContainsNull = true),
expected = false)
checkEqualsIgnoreCompatibleNullability(
from = MapType(StringType, ArrayType(IntegerType, false), valueContainsNull = true),
to = MapType(StringType, ArrayType(IntegerType, true), valueContainsNull = true),
expected = true)
checkEqualsIgnoreCompatibleNullability(
from = StructType(StructField("a", StringType, nullable = true) :: Nil),
to = StructType(StructField("a", StringType, nullable = true) :: Nil),
expected = true)
checkEqualsIgnoreCompatibleNullability(
from = StructType(StructField("a", StringType, nullable = false) :: Nil),
to = StructType(StructField("a", StringType, nullable = false) :: Nil),
expected = true)
checkEqualsIgnoreCompatibleNullability(
from = StructType(StructField("a", StringType, nullable = false) :: Nil),
to = StructType(StructField("a", StringType, nullable = true) :: Nil),
expected = true)
checkEqualsIgnoreCompatibleNullability(
from = StructType(StructField("a", StringType, nullable = true) :: Nil),
to = StructType(StructField("a", StringType, nullable = false) :: Nil),
expected = false)
checkEqualsIgnoreCompatibleNullability(
from = StructType(
StructField("a", StringType, nullable = false) ::
StructField("b", StringType, nullable = true) :: Nil),
to = StructType(
StructField("a", StringType, nullable = false) ::
StructField("b", StringType, nullable = false) :: Nil),
expected = false)
def checkCatalogString(dt: DataType): Unit = {
test(s"catalogString: $dt") {
val dt2 = CatalystSqlParser.parseDataType(dt.catalogString)
assert(dt === dt2)
}
}
def createStruct(n: Int): StructType = new StructType(Array.tabulate(n) {
i => StructField(s"col$i", IntegerType, nullable = true)
})
checkCatalogString(BooleanType)
checkCatalogString(ByteType)
checkCatalogString(ShortType)
checkCatalogString(IntegerType)
checkCatalogString(LongType)
checkCatalogString(FloatType)
checkCatalogString(DoubleType)
checkCatalogString(DecimalType(10, 5))
checkCatalogString(BinaryType)
checkCatalogString(StringType)
checkCatalogString(DateType)
checkCatalogString(TimestampType)
checkCatalogString(createStruct(4))
checkCatalogString(createStruct(40))
checkCatalogString(ArrayType(IntegerType))
checkCatalogString(ArrayType(createStruct(40)))
checkCatalogString(MapType(IntegerType, StringType))
checkCatalogString(MapType(IntegerType, createStruct(40)))
}
| ZxlAaron/mypros | sql/catalyst/src/test/scala/org/apache/spark/sql/types/DataTypeSuite.scala | Scala | apache-2.0 | 14,394 |
/*
* Copyright (c) 2013-2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0, and
* you may not use this file except in compliance with the Apache License
* Version 2.0. You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the Apache License Version 2.0 is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow
package collectors
package scalastream
package utils
import CollectorPayload.thrift.model1.CollectorPayload
// Thrift
import org.apache.thrift.TDeserializer
// json4s
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.json4s._
// Specs2
import org.specs2.mutable.Specification
class SplitBatchSpec extends Specification {
val splitBatch = SplitBatch
"SplitBatch.split" should {
"Batch a list of strings based on size" in {
splitBatch.split(List("a", "b", "c"), 5, 1) must_==
SplitBatchResult(
List(List("c"),List("b", "a")),
Nil)
}
"Reject only those strings which are too big" in {
splitBatch.split(List("123456", "1", "123"), 5, 0) must_==
SplitBatchResult(
List(List("123", "1")),
List("123456"))
}
"Batch a long list of strings" in {
splitBatch.split(List("12345677890", "123456789", "12345678", "1234567", "123456", "12345", "1234", "123", "12", "1"), 9, 0) must_==
SplitBatchResult(
List(
List("1", "12", "123"),
List("1234", "12345"),
List("123456"),
List("1234567"),
List("12345678"),
List("123456789")),
List("12345677890"))
}
}
"SplitBatch.splitAndSerializePayload" should {
"Serialize an empty CollectorPayload" in {
val actual = SplitBatch.splitAndSerializePayload(new CollectorPayload(), 100)
val target = new CollectorPayload
new TDeserializer().deserialize(target, actual.good.head)
target must_== new CollectorPayload
}
"Reject an oversized GET CollectorPayload" in {
val payload = new CollectorPayload()
payload.setQuerystring("x" * 1000)
val actual = SplitBatch.splitAndSerializePayload(payload, 100)
val errorJson = parse(new String(actual.bad.head))
errorJson \\ "size" must_== JInt(1019)
errorJson \\ "errors" must_== JArray(List(JString("Cannot split record with null body")))
actual.good must_== Nil
}
"Reject an oversized POST CollectorPayload with an unparseable body" in {
val payload = new CollectorPayload()
payload.setBody("s" * 1000)
val actual = SplitBatch.splitAndSerializePayload(payload, 100)
val errorJson = parse(new String(actual.bad.head))
errorJson \\ "size" must_== JInt(1019)
}
"Reject an oversized POST CollectorPayload which would be oversized even without its body" in {
val payload = new CollectorPayload()
val data = compact(render(
("schema" -> "s") ~
("data" -> List(
("e" -> "se") ~ ("tv" -> "js-2.4.3"),
("e" -> "se") ~ ("tv" -> "js-2.4.3")
))))
payload.setBody(data)
payload.setPath("p" * 1000)
val actual = SplitBatch.splitAndSerializePayload(payload, 1000)
actual.bad.size must_== 1
parse(new String(actual.bad.head)) \\ "errors" must_==
JArray(List(JString("Even without the body, the serialized event is too large")))
}
"Split a CollectorPayload with three large events and four very large events" in {
val payload = new CollectorPayload()
val data = compact(render(
("schema" -> "s") ~
("data" -> List(
("e" -> "se") ~ ("tv" -> "x" * 600),
("e" -> "se") ~ ("tv" -> "x" * 5),
("e" -> "se") ~ ("tv" -> "x" * 600),
("e" -> "se") ~ ("tv" -> "y" * 1000),
("e" -> "se") ~ ("tv" -> "y" * 1000),
("e" -> "se") ~ ("tv" -> "y" * 1000),
("e" -> "se") ~ ("tv" -> "y" * 1000)
))
))
payload.setBody(data)
val actual = SplitBatch.splitAndSerializePayload(payload, 1000)
actual.bad.size must_== 4
actual.good.size must_== 2
}
}
}
| mdavid/lessig-bigdata | lib/snowplow/2-collectors/scala-stream-collector/src/test/scala/com.snowplowanalytics.snowplow.collectors.scalastream/utils/SplitBatchSpec.scala | Scala | mit | 4,544 |
/*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.validation
abstract class TagRuleWrapper extends TagRule
| Netflix/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/validation/TagRuleWrapper.scala | Scala | apache-2.0 | 691 |
package com.munchii.sbt.version
import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB
import sbt._
object Keys {
val awsCredentialsProvider = SettingKey[AWSCredentialsProvider]("aws-credentials-provider")
val awsDynamoDBClient = SettingKey[AmazonDynamoDB]("aws-dynamodb-client")
val awsRegion = SettingKey[String]("aws-region")
val buildNumberTable = SettingKey[String]("build-number-table")
val qualifier = SettingKey[Option[String]]("qualifier")
}
| munchii/sbt-dynamodb-version | src/main/scala/com/munchii/sbt/version/Keys.scala | Scala | apache-2.0 | 515 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.spark
import com.github.benmanes.caffeine.cache.{CacheLoader, Caffeine}
import org.geotools.data.{DataStore, DataStoreFinder}
/**
* Caches accessing of DataStores.
*/
object DataStoreConnector {
import scala.collection.JavaConverters._
def apply[T <: DataStore](params: Map[String, String]): T = loadingMap.get(params).asInstanceOf[T]
private val loadingMap = Caffeine.newBuilder().build[Map[String, String], DataStore](
new CacheLoader[Map[String, String], DataStore] {
override def load(key: Map[String, String]): DataStore = DataStoreFinder.getDataStore(key.asJava)
}
)
}
| elahrvivaz/geomesa | geomesa-spark/geomesa-spark-core/src/main/scala/org/locationtech/geomesa/spark/DataStoreConnector.scala | Scala | apache-2.0 | 1,104 |
package acceptance.support
import scala.util.{Failure, Success, Try}
import scalaj.http.{Http => HttpClient}
trait Http {
val EmptyBody = ""
val statusCode = Map(
"OK" -> "200",
"NOT FOUND" -> "404",
"BAD REQUEST" -> "400",
"UNAUTHORIZED" -> "401",
"CREATED" -> "201",
"INTERNAL SERVER ERROR" -> "500"
)
def GET(url: String, header: (String, String)*): (String, String) = {
Try(HttpClient(url).headers(header).asString) match {
case Success(resp) => (resp.code.toString, resp.body)
case Failure(ex) => println(s"Problem getting URL $url: ${ex.getMessage}"); throw ex
}
}
def POST(url: String, bodyJson: String = EmptyBody): (String, String) = {
Try(HttpClient(url).postData(bodyJson).header("content-type", "application/json").asString) match {
case Success(resp) => (resp.code.toString, resp.body)
case Failure(ex) => println(s"Problem post URL $url: ${ex.getMessage}"); throw ex
}
}
}
object Http extends Http
| tvlive/tv-api | test/acceptance/support/Http.scala | Scala | apache-2.0 | 996 |
/*
* Copyright (c) 2015-6 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package macrocompat
import scala.language.experimental.macros
import scala.annotation.StaticAnnotation
import scala.reflect.macros.Context
class bundle extends StaticAnnotation {
def macroTransform(annottees: Any*): Any = macro BundleMacro.bundleImpl
}
class BundleMacro[C <: Context](val c: C) {
import c.universe._
import Flag._
object TreeE {
val TreeNme = newTypeName("Tree")
val CNme = newTermName("c")
def unapply(t: Tree): Option[Tree] = t match {
case Ident(TreeNme) => Some(tq"Any")
case Select(Ident(CNme), TreeNme) => Some(tq"Any")
case x => None
}
}
object ExprE {
val ExprNme = newTypeName("Expr")
def unapply(t: Tree): Option[Tree] = t match {
case AppliedTypeTree(ExprNme, List(arg)) => Some(arg)
case AppliedTypeTree(Select(_, ExprNme), List(arg)) => Some(arg)
case AppliedTypeTree(Ident(ExprNme), List(arg)) => Some(arg)
case _ => None
}
}
object WeakTypeTagE {
val WTTNme = newTypeName("WeakTypeTag")
def unapply(t: Tree): Option[Tree] = t match {
case AppliedTypeTree(Ident(WTTNme), List(arg)) => Some(arg)
case AppliedTypeTree(Select(_, WTTNme), List(arg)) => Some(arg)
case _ => None
}
}
object Repeat {
val Scala = newTermName("scala")
val Repeated = newTypeName("<repeated>")
def apply(t: Tree): Tree =
AppliedTypeTree(Select(Select(Ident(nme.ROOTPKG), Scala), Repeated), List(t))
def unapply(t: Tree): Option[Tree] = t match {
case AppliedTypeTree(Select(Select(Ident(nme.ROOTPKG), Scala), Repeated), List(tpt)) => Some(tpt)
case _ => None
}
}
def mkForwarder(d: DefDef, macroClassNme: TypeName): DefDef = {
val DefDef(mods, name, tparams, vparamss, tpt, rhs) = d
val ctxNme = newTermName(c.fresh)
val ctxParam = q""" val $ctxNme: _root_.scala.reflect.macros.Context """
val targs = tparams.map(_.name)
val cvparamss = vparamss.map(_.map { param =>
val ValDef(mods, nme, tpt, rhs) = param
val ctpt = tpt match {
case TreeE(t) => tq""" $ctxNme.Expr[$t] """
case ExprE(t) => tq""" $ctxNme.Expr[$t] """
case Repeat(TreeE(t)) => Repeat(tq""" $ctxNme.Expr[$t] """)
case Repeat(ExprE(t)) => Repeat(tq""" $ctxNme.Expr[$t] """)
case WeakTypeTagE(t) => tq""" $ctxNme.WeakTypeTag[$t] """
}
ValDef(mods, nme, ctpt, rhs)
})
val mi = newTermName(c.fresh("inst"))
val cargss = vparamss.map(_.map { param =>
val ValDef(mods, nme, tpt, rhs) = param
tpt match {
case TreeE(_) => q""" $nme.tree.asInstanceOf[$mi.c.universe.Tree] """
case ExprE(t) => q""" $nme.asInstanceOf[$mi.c.Expr[$t]] """
case Repeat(TreeE(_)) => q""" $nme.map(_.tree.asInstanceOf[$mi.c.universe.Tree]): _* """
case Repeat(ExprE(t)) => q""" $nme.map(_.asInstanceOf[$mi.c.Expr[$t]]): _* """
case WeakTypeTagE(t) => q""" $nme.asInstanceOf[$mi.c.universe.WeakTypeTag[$t]] """
}
})
val call =
q"""
val $mi =
new $macroClassNme(
new _root_.macrocompat.RuntimeCompatContext(
$ctxNme.asInstanceOf[_root_.scala.reflect.macros.runtime.Context]
)
)
$mi.${name.toTermName}[..$targs](...$cargss)
"""
val (ctpt, crhs) =
tpt match {
case ExprE(tpt) => (
tq""" $ctxNme.Expr[$tpt] """,
q""" $ctxNme.Expr[$tpt](_root_.macrocompat.BundleMacro.fixPositions[$ctxNme.type]($ctxNme)($call.tree.asInstanceOf[$ctxNme.universe.Tree])) """
)
case TreeE(tpt) => (
tq""" $ctxNme.Expr[Nothing] """,
q""" $ctxNme.Expr[Nothing](_root_.macrocompat.BundleMacro.fixPositions[$ctxNme.type]($ctxNme)($call.asInstanceOf[$ctxNme.universe.Tree])) """
)
}
DefDef(mods, name, tparams, List(ctxParam) :: cvparamss, ctpt, crhs)
}
object MacroImpl {
def unapply(d: DefDef): Option[DefDef] = {
val DefDef(mods, name, tparams, vparamss, tpt, rhs) = d
tpt match {
case TreeE(_)|ExprE(_)
if vparamss.forall(_.forall {
case ValDef(mods, _, _, _) if mods hasFlag IMPLICIT => true
case ValDef(_, _, TreeE(_)|ExprE(_)|Repeat(TreeE(_))|Repeat(ExprE(_))|WeakTypeTagE(_), _) => true
case _ => false
}) => Some(d)
case _ => None
}
}
}
def stripPositions(tree: Tree): Tree = {
if(!tree.isEmpty) {
tree.setPos(NoPosition)
tree.children.foreach(stripPositions)
}
tree
}
def fixPositions(tree: Tree): Tree = {
val global = c.universe.asInstanceOf[scala.tools.nsc.Global]
if(global.settings.Yrangepos.value)
stripPositions(tree)
else
tree
}
def bundleImpl(annottees: Tree*): Tree = {
annottees match {
case List(clsDef: ClassDef) => mkMacroClsAndObjTree(clsDef, None)
case List(clsDef: ClassDef, objDef: ModuleDef) => mkMacroClsAndObjTree(clsDef, Some(objDef))
case other =>
c.abort(c.enclosingPosition, "Unexpected tree shape.")
}
}
def mkMacroClsAndObjTree(clsDef: ClassDef, objDef: Option[ModuleDef]) = {
val ClassDef(mods, macroClassNme, tparams, Template(parents, self, body)) = clsDef
// The private forwarding defintions (appliedType, Modifiers) below are needed because they need
// to appear to be accessible as a result of import c.universe._. They can't be added to
// c.compatUniverse because that results in import ambiguities. Note that the definitions are
// private to avoid override conflicts in stacks of inherited bundles.
val Block(stats, _) =
q"""
import c.compatUniverse._
import _root_.macrocompat.TypecheckerContextExtensions._
private def appliedType(tc: c.universe.Type, ts: _root_.scala.collection.immutable.List[c.universe.Type]): c.universe.Type = c.universe.appliedType(tc, ts)
private def appliedType(tc: c.universe.Type, ts: c.universe.Type*): c.universe.Type = c.universe.appliedType(tc, ts.toList)
private val Annotation = c.compatUniverse.CompatAnnotation
private val Modifiers = c.compatUniverse.CompatModifiers
"""
// For now all macro bundles must have a Context constructor argument named "c". See,
// https://gitter.im/scala/scala?at=55ef0ffe24362d5253fe3a51
val ctxNme = newTermName("c")
val mixinCtorNme = newTermName("$init$")
val (prefix0, ctor :: suffix0) = body.span {
case d: DefDef if d.name == nme.CONSTRUCTOR || d.name == mixinCtorNme => false
case _ => true
}
val (prefix1, suffix1) = suffix0.span {
case ValDef(_, nme, _, _) if nme == ctxNme => false
case _ => true
}
val suffix2 =
suffix1 match {
case decl :: rest => prefix1 ++ (decl :: stats) ++ rest
case _ => stats ++ prefix1 ++ suffix1
}
val newBody = prefix0 ++ List(ctor) ++ suffix2
val macroClass = ClassDef(mods, macroClassNme, tparams, Template(parents, self, newBody))
val res =
if(mods.hasFlag(ABSTRACT))
objDef match {
case Some(obj) =>
q"""
$macroClass
$obj
"""
case _ =>
macroClass
}
else {
val (objEarlydefns, objParents, objBody) = objDef match {
case Some(q"$objMods object $objTname extends { ..$objEarlydefns } with ..$objParents { $objSelf => ..$objBody }") => (objEarlydefns, objParents, objBody)
case None => (Nil, List(tq"_root_.scala.AnyRef"), Nil)
}
val defns = body collect {
case MacroImpl(d: DefDef) => d
}
val forwarders = defns.map { d => mkForwarder(d, macroClassNme) }
val macroObjectNme = macroClassNme.toTermName
q"""
$macroClass
object $macroObjectNme extends { ..$objEarlydefns } with ..$objParents {
..$forwarders
..$objBody
}
"""
}
fixPositions(res)
}
}
object BundleMacro {
def inst[C <: Context](c: C) = new BundleMacro[c.type](c)
def bundleImpl(c: Context)(annottees: c.Expr[Any]*): c.Expr[Any] =
c.Expr[Any](inst(c).bundleImpl(annottees.map(_.tree): _*))
def fixPositions[C <: Context](c: C)(tree: c.Tree): c.Tree =
inst(c).fixPositions(tree)
}
| milessabin/macro-compat | core/src/main/scala_2.10/macrocompat/bundle_2.10.scala | Scala | apache-2.0 | 8,931 |
package homepage
import _root_.java.io.File
import _root_.junit.framework._
import Assert._
import _root_.scala.xml.XML
import _root_.net.liftweb.util._
import _root_.net.liftweb.common._
object AppTest {
def suite: Test = {
val suite = new TestSuite(classOf[AppTest])
suite
}
def main(args: Array[String]) {
_root_.junit.textui.TestRunner.run(suite)
}
}
/**
* Unit test for simple App.
*/
class AppTest extends TestCase("app") {
/**
* Rigourous Tests :-)
*/
def testOK() = assertTrue(true)
// def testKO() = assertTrue(false);
/**
* Tests to make sure the project's XML files are well-formed.
*
* Finds every *.html and *.xml file in src/main/webapp (and its
* subdirectories) and tests to make sure they are well-formed.
*/
def testXml() = {
var failed: List[File] = Nil
def handledXml(file: String) = file.endsWith(".xml")
/** Filters the files to be checked for xml validity. All jsMath html files are excluded. */
def handledXHtml(file: String) = !file.contains("jsMath") &&
(file.endsWith(".html") || file.endsWith(".htm") || file.endsWith(".xhtml"))
def wellFormed(file: File) {
if (file.isDirectory)
for (f <- file.listFiles) wellFormed(f)
/*
if (file.isFile && file.exists && handledXml(file.getName)) {
try {
import java.io.FileInputStream
val fis = new FileInputStream(file)
try {
XML.load(fis)
} finally {
fis.close()
}
} catch {
case e: _root_.org.xml.sax.SAXParseException => failed = file :: failed
}
}
*/
if (file.isFile && file.exists && handledXHtml(file.getPath)) {
PCDataXmlParser(new _root_.java.io.FileInputStream(file.getAbsolutePath)) match {
case Full(_) => // file is ok
case _ => failed = file :: failed
}
}
}
wellFormed(new File("src/main/webapp"))
val numFails = failed.size
if (numFails > 0) {
val fileStr = if (numFails == 1) "file" else "files"
val msg = "Malformed XML in " + numFails + " " + fileStr + ": " + failed.mkString(", ")
println(msg)
fail(msg)
}
}
}
| bbiletskyy/homepage | src/test/scala/homepage/AppTest.scala | Scala | apache-2.0 | 2,238 |
object Solution {
def isTheArray(nums: Array[Int]): Boolean = {
var sum = 0
val sumArray = for (n <- nums) yield { sum += n; sum - n }
(0 until nums.size).find(index => sum - nums(index) == 2 * sumArray(index))
.isDefined
}
def main(args: Array[String]) {
val t = readLine.toInt
for (_ <- 1 to t) {
val _ = readLine
val nums = readLine.split(" ").map(_.toInt)
println(if (isTheArray(nums)) "YES" else "NO")
}
}
}
| advancedxy/hackerrank | algorithms/implementation/SherlockAndArray.scala | Scala | mit | 488 |
package fpinscala.answers.parallelism
import java.util.concurrent.{Callable, CountDownLatch, ExecutorService}
import java.util.concurrent.atomic.AtomicReference
import scala.language.implicitConversions
object Nonblocking {
trait Future[+A] {
private[parallelism] def apply(k: A => Unit): Unit
}
type Par[+A] = ExecutorService => Future[A]
object Par {
def run[A](es: ExecutorService)(p: Par[A]): A = {
val ref = new java.util.concurrent.atomic.AtomicReference[A] // A mutable, threadsafe reference, to use for storing the result
//println("atomic reference: "+ref)
val latch = new CountDownLatch(1) // A latch which, when decremented, implies that `ref` has the result
//println("latch: "+latch)
//println("apply executor service to p: "+p+"("+es+")")
p(es) { a => ref.set(a); latch.countDown } // Asynchronously set the result, and decrement the latch
//println("awaiting latch")
latch.await // Block until the `latch.countDown` is invoked asynchronously
//println("ref.get")
ref.get // Once we've passed the latch, we know `ref` has been set, and return its value
}
def unit[A](a: A): Par[A] =
es => new Future[A] {
def apply(cb: A => Unit): Unit =
cb(a)
}
/** A non-strict version of `unit` */
def delay[A](a: => A): Par[A] =
es => new Future[A] {
def apply(cb: A => Unit): Unit =
cb(a)
}
def fork[A](a: => Par[A]): Par[A] =
es => new Future[A] {
def apply(cb: A => Unit): Unit =
eval(es)(a(es)(cb))
}
/**
* Helper function for constructing `Par` values out of calls to non-blocking continuation-passing-style APIs.
* This will come in handy in Chapter 13.
*/
def async[A](f: (A => Unit) => Unit): Par[A] = es => new Future[A] {
def apply(k: A => Unit) = f(k)
}
/**
* Helper function, for evaluating an action
* asynchronously, using the given `ExecutorService`.
*/
def eval(es: ExecutorService)(r: => Unit): Unit =
es.submit(new Callable[Unit] { def call = r })
def map2[A,B,C](p: Par[A], p2: Par[B])(f: (A,B) => C): Par[C] =
es => new Future[C] {
def apply(cb: C => Unit): Unit = {
var ar: Option[A] = None
var br: Option[B] = None
// this implementation is a little too liberal in forking of threads -
// it forks a new logical thread for the actor and for stack-safety,
// forks evaluation of the callback `cb`
val combiner = Actor[Either[A,B]](es) {
case Left(a) =>
if (br.isDefined) eval(es)(cb(f(a,br.get)))
else ar = Some(a)
case Right(b) =>
if (ar.isDefined) eval(es)(cb(f(ar.get,b)))
else br = Some(b)
}
p(es)(a => combiner ! Left(a))
p2(es)(b => combiner ! Right(b))
}
}
// specialized version of `map`
def map[A,B](p: Par[A])(f: A => B): Par[B] =
es => new Future[B] {
def apply(cb: B => Unit): Unit =
p(es)(a => eval(es) { cb(f(a)) })
}
def lazyUnit[A](a: => A): Par[A] =
fork(unit(a))
def asyncF[A,B](f: A => B): A => Par[B] =
a => lazyUnit(f(a))
def sequenceRight[A](as: List[Par[A]]): Par[List[A]] =
as match {
case Nil => unit(Nil)
case h :: t => map2(h, fork(sequence(t)))(_ :: _)
}
def sequenceBalanced[A](as: IndexedSeq[Par[A]]): Par[IndexedSeq[A]] = fork {
if (as.isEmpty) unit(Vector())
else if (as.length == 1) map(as.head)(a => Vector(a))
else {
val (l,r) = as.splitAt(as.length/2)
map2(sequenceBalanced(l), sequenceBalanced(r))(_ ++ _)
}
}
def sequence[A](as: List[Par[A]]): Par[List[A]] =
map(sequenceBalanced(as.toIndexedSeq))(_.toList)
def parMap[A,B](as: List[A])(f: A => B): Par[List[B]] =
sequence(as.map(asyncF(f)))
def parMap[A,B](as: IndexedSeq[A])(f: A => B): Par[IndexedSeq[B]] =
sequenceBalanced(as.map(asyncF(f)))
// exercise answers
/*
* We can implement `choice` as a new primitive.
*
* `p(es)(result => ...)` for some `ExecutorService`, `es`, and
* some `Par`, `p`, is the idiom for running `p`, and registering
* a callback to be invoked when its result is available. The
* result will be bound to `result` in the function passed to
* `p(es)`.
*
* If you find this code difficult to follow, you may want to
* write down the type of each subexpression and follow the types
* through the implementation. What is the type of `p(es)`? What
* about `t(es)`? What about `t(es)(cb)`?
*/
def choice[A](p: Par[Boolean])(t: Par[A], f: Par[A]): Par[A] =
es => new Future[A] {
def apply(cb: A => Unit): Unit =
p(es) { b =>
if (b) eval(es) { t(es)(cb) }
else eval(es) { f(es)(cb) }
}
}
/* The code here is very similar. */
def choiceN[A](p: Par[Int])(ps: List[Par[A]]): Par[A] =
es => new Future[A] {
def apply(cb: A => Unit): Unit =
p(es) { ind => eval(es) { ps(ind)(es)(cb) }}
}
def choiceViaChoiceN[A](a: Par[Boolean])(ifTrue: Par[A], ifFalse: Par[A]): Par[A] =
choiceN(map(a)(b => if (b) 0 else 1))(List(ifTrue, ifFalse))
def choiceMap[K,V](p: Par[K])(ps: Map[K,Par[V]]): Par[V] =
es => new Future[V] {
def apply(cb: V => Unit): Unit =
p(es)(k => ps(k)(es)(cb))
}
/* `chooser` is usually called `flatMap` or `bind`. */
def chooser[A,B](p: Par[A])(f: A => Par[B]): Par[B] =
flatMap(p)(f)
def flatMap[A,B](p: Par[A])(f: A => Par[B]): Par[B] =
es => new Future[B] {
def apply(cb: B => Unit): Unit =
p(es)(a => f(a)(es)(cb))
}
def choiceViaFlatMap[A](p: Par[Boolean])(f: Par[A], t: Par[A]): Par[A] =
flatMap(p)(b => if (b) t else f)
def choiceNViaFlatMap[A](p: Par[Int])(choices: List[Par[A]]): Par[A] =
flatMap(p)(i => choices(i))
def join[A](p: Par[Par[A]]): Par[A] =
es => new Future[A] {
def apply(cb: A => Unit): Unit =
p(es)(p2 => eval(es) { p2(es)(cb) })
}
def joinViaFlatMap[A](a: Par[Par[A]]): Par[A] =
flatMap(a)(x => x)
def flatMapViaJoin[A,B](p: Par[A])(f: A => Par[B]): Par[B] =
join(map(p)(f))
/* Gives us infix syntax for `Par`. */
implicit def toParOps[A](p: Par[A]): ParOps[A] = new ParOps(p)
// infix versions of `map`, `map2` and `flatMap`
class ParOps[A](p: Par[A]) {
def map[B](f: A => B): Par[B] = Par.map(p)(f)
def map2[B,C](b: Par[B])(f: (A,B) => C): Par[C] = Par.map2(p,b)(f)
def flatMap[B](f: A => Par[B]): Par[B] = Par.flatMap(p)(f)
def zip[B](b: Par[B]): Par[(A,B)] = p.map2(b)((_,_))
}
}
}
| peterbecich/fpinscala | answers/src/main/scala/fpinscala/parallelism/Nonblocking.scala | Scala | mit | 6,898 |
package models
import java.sql.Timestamp
case class Dose (
ptHospitalNumber: String,
date: Timestamp
)
| BBK-SDP-2015-jtomli03/Morphidose2 | app/models/Dose.scala | Scala | apache-2.0 | 162 |
package com.gjos.scala.swoc
import org.scalatest.{Matchers, WordSpec}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import com.gjos.scala.swoc.util.Resource
class MainSpec extends WordSpec with Matchers {
"Main" should {
"run without timing out" in {
val (ioManager, output) = IOManager.fileMode(Resource.testResource("replay-timeout.txt"))
val bot = new Bot(None)
val engine = new Engine(bot, ioManager)
val maxRuntime = 4000.millis
Await.ready(Future(engine.run()), maxRuntime)
val outLines = output().split(ioManager.newline).toList
outLines.size should be (2)
}
"be able to play as white" in {
val (ioManager, output) = IOManager.fileMode(Resource.testResource("start-as-white.txt"))
val bot = new Bot(None)
val engine = new Engine(bot, ioManager)
val maxRuntime = 2000.millis
Await.ready(Future(engine.run()), maxRuntime)
val outLines = output().split(ioManager.newline).toList
outLines.size should be (1)
}
"keep giving responses" in {
val (ioManager, output) = IOManager.fileMode(Resource.testResource("responseless.txt"))
val bot = new Bot(None)
val engine = new Engine(bot, ioManager)
val maxRuntime = 12000.millis
Await.ready(Future(engine.run()), maxRuntime)
val outLines = output().split(ioManager.newline).toList
outLines.size should be (6)
println(outLines)
}
}
}
| Oduig/swoc2014 | Greedy/src/test/scala/com/gjos/scala/swoc/MainSpec.scala | Scala | apache-2.0 | 1,533 |
package xyz.hyperreal.funl
import org.scalatest._
import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks
class osx_bsd_critical_Tests extends FreeSpec with ScalaCheckPropertyChecks with Matchers with HaskellTest {
"1" in {
test("""(()|.)(b)""", """ab""", "(0,2)(0,1)(?,?)(1,2)") shouldBe true
}
"-1" in {
test("""(()|.)(b)""", """ab""", "(1,2)(1,1)(1,1)(1,2)") shouldBe true
}
"2" in {
test("""(()|[ab])(b)""", """ab""", "(0,2)(0,1)(?,?)(1,2)") shouldBe true
}
"-2" in {
test("""(()|[ab])(b)""", """ab""", "(1,2)(1,1)(1,1)(1,2)") shouldBe true
}
"3" in {
test("""(()|[ab])+b""", """aaab""", "(0,4)(2,3)(?,?)") shouldBe true
}
"-3" in {
test("""(()|[ab])+b""", """aaab""", "(3,4)(3,3)(3,3)") shouldBe true
}
"11" in {
test("""(.|())(b)""", """ab""", "(0,2)(0,1)(?,?)(1,2)") shouldBe true
}
"12" in {
test("""([ab]|())(b)""", """ab""", "(0,2)(0,1)(?,?)(1,2)") shouldBe true
}
"14" in {
test("""([ab]|())+b""", """aaab""", "(0,4)(2,3)(?,?)") shouldBe true
}
"-14" in {
test("""([ab]|())+b""", """aaab""", "(0,4)(3,3)(3,3)") shouldBe true
}
"20" in {
test("""(.?)(b)""", """ab""", "(0,2)(0,1)(1,2)") shouldBe true
}
}
| edadma/funl | bvm/src/test/scala/xyz/hyperreal/bvm/osx_bsd_critical_Tests.scala | Scala | mit | 1,221 |
package actors.daily
import actors.persistent.QueueLikeActor.UpdatedMillis
import actors.persistent.staffing.GetState
import actors.persistent.{RecoveryActorLike, Sizes}
import akka.persistence.{Recovery, SaveSnapshotSuccess, SnapshotSelectionCriteria}
import drt.shared.CrunchApi.{DeskRecMinute, MillisSinceEpoch, MinuteLike, MinutesContainer}
import drt.shared.Terminals.Terminal
import drt.shared.{SDateLike, WithTimeAccessor}
import org.slf4j.{Logger, LoggerFactory}
import scalapb.GeneratedMessage
import services.SDate
import services.graphstages.Crunch
abstract class TerminalDayLikeActor[VAL <: MinuteLike[VAL, INDEX], INDEX <: WithTimeAccessor](year: Int,
month: Int,
day: Int,
terminal: Terminal,
now: () => SDateLike,
maybePointInTime: Option[MillisSinceEpoch]) extends RecoveryActorLike {
val loggerSuffix: String = maybePointInTime match {
case None => ""
case Some(pit) => f"@${SDate(pit).toISOString()}"
}
override val log: Logger = LoggerFactory.getLogger(f"$getClass-$terminal-$year%04d-$month%02d-$day%02d$loggerSuffix")
val typeForPersistenceId: String
var state: Map[INDEX, VAL] = Map()
override def persistenceId: String = f"terminal-$typeForPersistenceId-${terminal.toString.toLowerCase}-$year-$month%02d-$day%02d"
override val snapshotBytesThreshold: Int = Sizes.oneMegaByte
private val maxSnapshotInterval = 250
override val maybeSnapshotInterval: Option[Int] = Option(maxSnapshotInterval)
override val recoveryStartMillis: MillisSinceEpoch = now().millisSinceEpoch
val firstMinute: SDateLike = SDate(year, month, day, 0, 0, Crunch.utcTimeZone)
val firstMinuteMillis: MillisSinceEpoch = firstMinute.millisSinceEpoch
val lastMinuteMillis: MillisSinceEpoch = firstMinute.addDays(1).addMinutes(-1).millisSinceEpoch
override def recovery: Recovery = maybePointInTime match {
case None =>
Recovery(SnapshotSelectionCriteria(Long.MaxValue, maxTimestamp = Long.MaxValue, 0L, 0L))
case Some(pointInTime) =>
val criteria = SnapshotSelectionCriteria(maxTimestamp = pointInTime)
Recovery(fromSnapshot = criteria, replayMax = maxSnapshotInterval)
}
override def receiveCommand: Receive = {
case container: MinutesContainer[VAL, INDEX] =>
log.debug(s"Received MinutesContainer for persistence")
updateAndPersistDiff(container)
case GetState =>
log.debug(s"Received GetState")
sender() ! stateResponse
case _: SaveSnapshotSuccess =>
ackIfRequired()
case m => log.warn(s"Got unexpected message: $m")
}
private def stateResponse: Option[MinutesContainer[VAL, INDEX]] =
if (state.nonEmpty) Option(MinutesContainer(state.values.toSet)) else None
def diffFromMinutes(state: Map[INDEX, VAL], minutes: Iterable[MinuteLike[VAL, INDEX]]): Iterable[VAL] = {
val nowMillis = now().millisSinceEpoch
minutes
.map(cm => state.get(cm.key) match {
case None => Option(cm.toUpdatedMinute(nowMillis))
case Some(existingCm) => cm.maybeUpdated(existingCm, nowMillis)
})
.collect { case Some(update) => update }
}
def updateStateFromDiff(state: Map[INDEX, VAL], diff: Iterable[MinuteLike[VAL, INDEX]]): Map[INDEX, VAL] =
state ++ diff.collect {
case cm if firstMinuteMillis <= cm.minute && cm.minute < lastMinuteMillis => (cm.key, cm.toUpdatedMinute(cm.lastUpdated.getOrElse(0L)))
}
def updateAndPersistDiff(container: MinutesContainer[VAL, INDEX]): Unit =
diffFromMinutes(state, container.minutes) match {
case noDifferences if noDifferences.isEmpty => sender() ! UpdatedMillis.empty
case differences =>
state = updateStateFromDiff(state, differences)
val messageToPersist = containerToMessage(differences)
val updatedMillis = if (shouldSendEffectsToSubscriber(container))
UpdatedMillis(differences.map(_.minute))
else UpdatedMillis.empty
val replyToAndMessage = Option((sender(), updatedMillis))
persistAndMaybeSnapshotWithAck(messageToPersist, replyToAndMessage)
}
def shouldSendEffectsToSubscriber(container: MinutesContainer[VAL, INDEX]): Boolean =
container.contains(classOf[DeskRecMinute])
def containerToMessage(differences: Iterable[VAL]): GeneratedMessage
def updatesToApply(allUpdates: Iterable[(INDEX, VAL)]): Iterable[(INDEX, VAL)] =
maybePointInTime match {
case None => allUpdates
case Some(pit) => allUpdates.filter {
case (_, cm) => cm.lastUpdated.getOrElse(0L) <= pit
}
}
}
| UKHomeOffice/drt-scalajs-spa-exploration | server/src/main/scala/actors/daily/TerminalDayLikeActor.scala | Scala | apache-2.0 | 4,978 |
/*
Copyright (c) 2016, Rice University
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of Rice University
nor the names of its contributors may be used to endorse or
promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.apache.spark.rdd.cl
import scala.reflect.ClassTag
import scala.reflect._
import scala.reflect.runtime.universe._
import java.net._
import java.util.LinkedList
import java.util.Map
import java.util.HashMap
import org.apache.spark.{Partition, TaskContext}
import org.apache.spark.rdd._
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.mllib.linalg.DenseVector
import org.apache.spark.mllib.linalg.SparseVector
import com.amd.aparapi.internal.model.ClassModel
import com.amd.aparapi.internal.model.Tuple2ClassModel
import com.amd.aparapi.internal.model.DenseVectorClassModel
import com.amd.aparapi.internal.model.SparseVectorClassModel
import com.amd.aparapi.internal.model.HardCodedClassModels
import com.amd.aparapi.internal.model.HardCodedClassModels.ShouldNotCallMatcher
import com.amd.aparapi.internal.model.Entrypoint
import com.amd.aparapi.internal.writer.KernelWriter
import com.amd.aparapi.internal.writer.KernelWriter.WriterAndKernel
import com.amd.aparapi.internal.writer.BlockWriter
import com.amd.aparapi.internal.writer.ScalaArrayParameter
import com.amd.aparapi.internal.writer.ScalaParameter.DIRECTION
/*
* A new CLMappedRDD object is created for each partition/task being processed,
* lifetime and accessibility of items inside an instance of these is limited to
* one thread and one task running on that thread.
*/
class CLMappedRDD[U: ClassTag, T: ClassTag](val prev: RDD[T], val f: T => U,
val useSwat : Boolean) extends RDD[U](prev) {
override val partitioner = firstParent[T].partitioner
override def getPartitions: Array[Partition] = firstParent[T].partitions
override def compute(split: Partition, context: TaskContext) : Iterator[U] = {
val nested = firstParent[T].iterator(split, context)
val threadId : Int = RuntimeUtil.getThreadID
if (useSwat) {
// // Every N threads run in JVM
// if (threadId % 4 == 0) {
// return new Iterator[U] {
// def next() : U = f(nested.next)
// def hasNext() : Boolean = nested.hasNext
// }
// } else {
new PullCLRDDProcessor(nested, f, context, firstParent[T].id, split.index)
// }
} else {
System.err.println("Thread = " + threadId + " running stage = " +
context.stageId + ", partition = " + context.partitionId +
" on JVM")
return new Iterator[U] {
def next() : U = {
f(nested.next)
}
def hasNext() : Boolean = {
nested.hasNext
}
}
}
}
}
| agrippa/spark-swat | swat/src/main/scala/org/apache/spark/rdd/cl/CLMappedRDD.scala | Scala | bsd-3-clause | 4,041 |
package org.jetbrains.plugins.scala
package lang
package psi
package types
import com.intellij.psi.PsiClass
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.base.ScFieldId
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.ScBindingPattern
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.TypeParameter
import org.jetbrains.plugins.scala.lang.psi.types.result.TypingContext
import scala.collection.mutable
/**
* Substitutor should be meaningful only for decls and typeDecls. Components shouldn't be applied by substitutor.
*/
case class ScCompoundType(components: Seq[ScType], signatureMap: Map[Signature, ScType],
typesMap: Map[String, TypeAliasSignature]) extends ValueType {
private var hash: Int = -1
override def hashCode: Int = {
if (hash == -1) {
hash = components.hashCode() + (signatureMap.hashCode() * 31 + typesMap.hashCode()) * 31
}
hash
}
def visitType(visitor: ScalaTypeVisitor) {
visitor.visitCompoundType(this)
}
override def typeDepth: Int = {
val depths = signatureMap.map {
case (sign: Signature, tp: ScType) =>
val rtDepth = tp.typeDepth
if (sign.typeParams.nonEmpty) {
(ScType.typeParamsDepth(sign.typeParams) + 1).max(rtDepth)
} else rtDepth
} ++ typesMap.map {
case (s: String, sign: TypeAliasSignature) =>
val boundsDepth = sign.lowerBound.typeDepth.max(sign.upperBound.typeDepth)
if (sign.typeParams.nonEmpty) {
(ScType.typeParamsDepth(sign.typeParams.toArray) + 1).max(boundsDepth)
} else boundsDepth
}
val ints = components.map(_.typeDepth)
val componentsDepth = if (ints.length == 0) 0 else ints.max
if (depths.nonEmpty) componentsDepth.max(depths.max + 1)
else componentsDepth
}
override def removeAbstracts = ScCompoundType(components.map(_.removeAbstracts),
signatureMap.map {
case (s: Signature, tp: ScType) =>
def updateTypeParam(tp: TypeParameter): TypeParameter = {
new TypeParameter(tp.name, tp.typeParams.map(updateTypeParam), () => tp.lowerType().removeAbstracts,
() => tp.upperType().removeAbstracts, tp.ptp)
}
val pTypes: List[Seq[() => ScType]] = s.substitutedTypes.map(_.map(f => () => f().removeAbstracts))
val tParams: Array[TypeParameter] = if (s.typeParams.length == 0) TypeParameter.EMPTY_ARRAY else s.typeParams.map(updateTypeParam)
val rt: ScType = tp.removeAbstracts
(new Signature(s.name, pTypes, s.paramLength, tParams,
ScSubstitutor.empty, s.namedElement match {
case fun: ScFunction =>
ScFunction.getCompoundCopy(pTypes.map(_.map(_()).toList), tParams.toList, rt, fun)
case b: ScBindingPattern => ScBindingPattern.getCompoundCopy(rt, b)
case f: ScFieldId => ScFieldId.getCompoundCopy(rt, f)
case named => named
}, s.hasRepeatedParam), rt)
}, typesMap.map {
case (s: String, sign) => (s, sign.updateTypes(_.removeAbstracts))
})
import scala.collection.immutable.{HashSet => IHashSet}
override def recursiveUpdate(update: ScType => (Boolean, ScType), visited: IHashSet[ScType]): ScType = {
if (visited.contains(this)) {
return update(this) match {
case (true, res) => res
case _ => this
}
}
update(this) match {
case (true, res) => res
case _ =>
def updateTypeParam(tp: TypeParameter): TypeParameter = {
new TypeParameter(tp.name, tp.typeParams.map(updateTypeParam), {
val res = tp.lowerType().recursiveUpdate(update, visited + this)
() => res
}, {
val res = tp.upperType().recursiveUpdate(update, visited + this)
() => res
}, tp.ptp)
}
new ScCompoundType(components.map(_.recursiveUpdate(update, visited + this)), signatureMap.map {
case (s: Signature, tp) =>
val pTypes: List[Seq[() => ScType]] =
s.substitutedTypes.map(_.map(f => () => f().recursiveUpdate(update, visited + this)))
val tParams: Array[TypeParameter] = if (s.typeParams.length == 0) TypeParameter.EMPTY_ARRAY else s.typeParams.map(updateTypeParam)
val rt: ScType = tp.recursiveUpdate(update, visited + this)
(new Signature(
s.name, pTypes, s.paramLength, tParams, ScSubstitutor.empty, s.namedElement match {
case fun: ScFunction =>
ScFunction.getCompoundCopy(pTypes.map(_.map(_()).toList), tParams.toList, rt, fun)
case b: ScBindingPattern => ScBindingPattern.getCompoundCopy(rt, b)
case f: ScFieldId => ScFieldId.getCompoundCopy(rt, f)
case named => named
}, s.hasRepeatedParam
), rt)
}, typesMap.map {
case (s, sign) => (s, sign.updateTypes(_.recursiveUpdate(update, visited + this)))
})
}
}
override def recursiveVarianceUpdateModifiable[T](data: T, update: (ScType, Int, T) => (Boolean, ScType, T),
variance: Int = 1): ScType = {
update(this, variance, data) match {
case (true, res, _) => res
case (_, _, newData) =>
def updateTypeParam(tp: TypeParameter): TypeParameter = {
new TypeParameter(tp.name, tp.typeParams.map(updateTypeParam), {
val res = tp.lowerType().recursiveVarianceUpdateModifiable(newData, update, 1)
() => res
}, {
val res = tp.upperType().recursiveVarianceUpdateModifiable(newData, update, 1)
() => res
}, tp.ptp)
}
new ScCompoundType(components.map(_.recursiveVarianceUpdateModifiable(newData, update, variance)), signatureMap.map {
case (s: Signature, tp) =>
val tParams = if (s.typeParams.length == 0) TypeParameter.EMPTY_ARRAY else s.typeParams.map(updateTypeParam)
(new Signature(
s.name, s.substitutedTypes.map(_.map(f => () => f().recursiveVarianceUpdateModifiable(newData, update, 1))),
s.paramLength, tParams, ScSubstitutor.empty, s.namedElement, s.hasRepeatedParam
), tp.recursiveVarianceUpdateModifiable(newData, update, 1))
}, typesMap.map {
case (s, sign) => (s, sign.updateTypes(_.recursiveVarianceUpdateModifiable(newData, update, 1)))
})
}
}
override def equivInner(r: ScType, uSubst: ScUndefinedSubstitutor, falseUndef: Boolean): (Boolean, ScUndefinedSubstitutor) = {
var undefinedSubst = uSubst
r match {
case r: ScCompoundType =>
if (r == this) return (true, undefinedSubst)
if (components.length != r.components.length) return (false, undefinedSubst)
val list = components.zip(r.components)
val iterator = list.iterator
while (iterator.hasNext) {
val (w1, w2) = iterator.next()
val t = Equivalence.equivInner(w1, w2, undefinedSubst, falseUndef)
if (!t._1) return (false, undefinedSubst)
undefinedSubst = t._2
}
if (signatureMap.size != r.signatureMap.size) return (false, undefinedSubst)
val iterator2 = signatureMap.iterator
while (iterator2.hasNext) {
val (sig, t) = iterator2.next()
r.signatureMap.get(sig) match {
case None => return (false, undefinedSubst)
case Some(t1) =>
val f = Equivalence.equivInner(t, t1, undefinedSubst, falseUndef)
if (!f._1) return (false, undefinedSubst)
undefinedSubst = f._2
}
}
val types1 = typesMap
val types2 = r.typesMap
if (types1.size != types2.size) (false, undefinedSubst)
else {
val types1iterator = types1.iterator
while (types1iterator.hasNext) {
val (name, bounds1) = types1iterator.next()
types2.get(name) match {
case None => return (false, undefinedSubst)
case Some (bounds2) =>
var t = Equivalence.equivInner(bounds1.lowerBound, bounds2.lowerBound, undefinedSubst, falseUndef)
if (!t._1) return (false, undefinedSubst)
undefinedSubst = t._2
t = Equivalence.equivInner(bounds1.upperBound, bounds2.upperBound, undefinedSubst, falseUndef)
if (!t._1) return (false, undefinedSubst)
undefinedSubst = t._2
}
}
(true, undefinedSubst)
}
case _ =>
if (signatureMap.size == 0 && typesMap.size == 0) {
val filtered = components.filter {
case psi.types.Any => false
case psi.types.AnyRef =>
if (!r.conforms(psi.types.AnyRef)) return (false, undefinedSubst)
false
case ScDesignatorType(obj: PsiClass) if obj.qualifiedName == "java.lang.Object" =>
if (!r.conforms(psi.types.AnyRef)) return (false, undefinedSubst)
false
case _ => true
}
if (filtered.length == 1) Equivalence.equivInner(filtered(0), r, undefinedSubst, falseUndef)
else (false, undefinedSubst)
} else (false, undefinedSubst)
}
}
}
object ScCompoundType {
def fromPsi(components: Seq[ScType], decls: Seq[ScDeclaredElementsHolder],
typeDecls: Seq[ScTypeAlias], subst: ScSubstitutor): ScCompoundType = {
val signatureMapVal: mutable.HashMap[Signature, ScType] = new mutable.HashMap[Signature, ScType] {
override def elemHashCode(s : Signature) = s.name.hashCode * 31 + {
val length = s.paramLength
if (length.sum == 0) List(0).hashCode()
else length.hashCode()
}
}
val typesVal = new mutable.HashMap[String, TypeAliasSignature]
for (typeDecl <- typeDecls) {
typesVal += ((typeDecl.name, new TypeAliasSignature(typeDecl)))
}
for (decl <- decls) {
decl match {
case fun: ScFunction =>
signatureMapVal += ((new Signature(fun.name, PhysicalSignature.typesEval(fun), PhysicalSignature.paramLength(fun),
TypeParameter.fromArray(fun.getTypeParameters), subst, fun, PhysicalSignature.hasRepeatedParam(fun)),
fun.returnType.getOrAny))
case varDecl: ScVariable =>
for (e <- varDecl.declaredElements) {
val varType = e.getType(TypingContext.empty)
signatureMapVal += ((new Signature(e.name, Seq.empty, 0, subst, e), varType.getOrAny))
signatureMapVal += ((new Signature(e.name + "_=", Seq(() => varType.getOrAny), 1, subst, e), psi.types.Unit)) //setter
}
case valDecl: ScValue =>
for (e <- valDecl.declaredElements) {
val valType = e.getType(TypingContext.empty)
signatureMapVal += ((new Signature(e.name, Seq.empty, 0, subst, e), valType.getOrAny))
}
}
}
ScCompoundType(components, signatureMapVal.toMap, typesVal.toMap)
}
class CompoundSignature()
} | LPTK/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/types/ScCompoundType.scala | Scala | apache-2.0 | 11,171 |
package demo.sub
class A {
implicit def x(i: Int): C = new C(i)
}
class C(i: Int) {
def y = i + 1
}
| som-snytt/xsbt | sbt/src/sbt-test/source-dependencies/pkg-self/changes/A1.scala | Scala | bsd-3-clause | 103 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.