code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package com.deleris.tetrix import akka.actor._ class StageActor(stateActor: ActorRef) extends Actor { import Stage._ import scala.concurrent.{Future, Await} import scala.concurrent.duration._ import akka.pattern.ask def receive = { case MoveLeft => updateState {moveLeft} case MoveRight => updateState {moveRight} case RotateCW => updateState {rotateCW} case Tick => updateState {tick} case Drop => updateState {drop} case Attack => updateState {notifyAttack} } private[this] def opponent: ActorSelection = if (self.path.name == "playerActor1") context.actorSelection("../playerActor2") //context.actorFor("/playerActor2") else context.actorSelection("../playerActor1") //context.actorFor("/playerActor1") private[this] def updateState(trans: GameState => GameState) { val future = (stateActor ? GetState)(1 second).mapTo[GameState] val s1 = Await.result(future, 1 second) val s2 = trans(s1) stateActor ! SetState(s2) (0 to s2.lastDeleted - 2) foreach { i => opponent ! Attack } } }
ldeleris/tetrix-scala
library/src/main/scala/StageActor.scala
Scala
mit
1,147
package scala.tools.nsc package backend.jvm package analysis import java.lang.invoke.LambdaMetafactory import scala.annotation.switch import scala.collection.JavaConverters._ import scala.collection.mutable import scala.tools.asm.Opcodes._ import scala.tools.asm.tree._ import scala.tools.asm.tree.analysis._ import scala.tools.asm.{Handle, Type} import scala.tools.nsc.backend.jvm.BTypes._ import scala.tools.nsc.backend.jvm.GenBCode._ import scala.tools.nsc.backend.jvm.opt.BytecodeUtils._ /** * This component hosts tools and utilities used in the backend that require access to a `BTypes` * instance. * * One example is the AsmAnalyzer class, which runs `computeMaxLocalsMaxStack` on the methodNode to * be analyzed. This method in turn lives inside the BTypes assembly because it queries the per-run * cache `maxLocalsMaxStackComputed` defined in there. */ class BackendUtils[BT <: BTypes](val btypes: BT) { import btypes._ import btypes.coreBTypes._ import callGraph.ClosureInstantiation /** * A wrapper to make ASM's Analyzer a bit easier to use. */ class AsmAnalyzer[V <: Value](methodNode: MethodNode, classInternalName: InternalName, val analyzer: Analyzer[V] = new Analyzer(new BasicInterpreter)) { computeMaxLocalsMaxStack(methodNode) try { analyzer.analyze(classInternalName, methodNode) } catch { case ae: AnalyzerException => throw new AnalyzerException(null, "While processing " + classInternalName + "." + methodNode.name, ae) } def frameAt(instruction: AbstractInsnNode): Frame[V] = analyzer.frameAt(instruction, methodNode) } /** * See the doc comment on package object `analysis` for a discussion on performance. */ object AsmAnalyzer { // jvm limit is 65535 for both number of instructions and number of locals private def size(method: MethodNode) = method.instructions.size.toLong * method.maxLocals * method.maxLocals // with the limits below, analysis should not take more than one second private val nullnessSizeLimit = 5000l * 600l * 600l // 5000 insns, 600 locals private val basicValueSizeLimit = 9000l * 1000l * 1000l private val sourceValueSizeLimit = 8000l * 950l * 950l def sizeOKForAliasing(method: MethodNode): Boolean = size(method) < nullnessSizeLimit def sizeOKForNullness(method: MethodNode): Boolean = size(method) < nullnessSizeLimit def sizeOKForBasicValue(method: MethodNode): Boolean = size(method) < basicValueSizeLimit def sizeOKForSourceValue(method: MethodNode): Boolean = size(method) < sourceValueSizeLimit } class ProdConsAnalyzer(val methodNode: MethodNode, classInternalName: InternalName) extends AsmAnalyzer(methodNode, classInternalName, new Analyzer(new InitialProducerSourceInterpreter)) with ProdConsAnalyzerImpl class NonLubbingTypeFlowAnalyzer(val methodNode: MethodNode, classInternalName: InternalName) extends AsmAnalyzer(methodNode, classInternalName, new Analyzer(new NonLubbingTypeFlowInterpreter)) /** * Add: * private static Object $deserializeLambda$(SerializedLambda l) { * return indy[scala.runtime.LambdaDeserialize.bootstrap](l) * } * * We use invokedynamic here to enable caching within the deserializer without needing to * host a static field in the enclosing class. This allows us to add this method to interfaces * that define lambdas in default methods. */ def addLambdaDeserialize(classNode: ClassNode, implMethods: Iterable[Handle]): Unit = { val cw = classNode // Make sure to reference the ClassBTypes of all types that are used in the code generated // here (e.g. java/util/Map) are initialized. Initializing a ClassBType adds it to the // `classBTypeFromInternalName` map. When writing the classfile, the asm ClassWriter computes // stack map frames and invokes the `getCommonSuperClass` method. This method expects all // ClassBTypes mentioned in the source code to exist in the map. val nilLookupDesc = MethodBType(Nil, jliMethodHandlesLookupRef).descriptor val serlamObjDesc = MethodBType(jliSerializedLambdaRef :: Nil, ObjectRef).descriptor { val mv = cw.visitMethod(ACC_PRIVATE + ACC_STATIC + ACC_SYNTHETIC, "$deserializeLambda$", serlamObjDesc, null, null) mv.visitCode() mv.visitVarInsn(ALOAD, 0) mv.visitInvokeDynamicInsn("lambdaDeserialize", serlamObjDesc, lambdaDeserializeBootstrapHandle, implMethods.toArray: _*) mv.visitInsn(ARETURN) mv.visitEnd() } } /** * Clone the instructions in `methodNode` into a new [[InsnList]], mapping labels according to * the `labelMap`. Returns the new instruction list and a map from old to new instructions, and * a list of lambda implementation methods references by invokedynamic[LambdaMetafactory] for a * serializable SAM types. */ def cloneInstructions(methodNode: MethodNode, labelMap: Map[LabelNode, LabelNode], keepLineNumbers: Boolean): (InsnList, Map[AbstractInsnNode, AbstractInsnNode], List[Handle]) = { val javaLabelMap = labelMap.asJava val result = new InsnList var map = Map.empty[AbstractInsnNode, AbstractInsnNode] var inlinedTargetHandles = mutable.ListBuffer[Handle]() for (ins <- methodNode.instructions.iterator.asScala) { ins match { case callGraph.LambdaMetaFactoryCall(indy, _, _, _) => indy.bsmArgs match { case Array(_, targetHandle: Handle, _, flags: Integer, xs@_*) if (flags.intValue & LambdaMetafactory.FLAG_SERIALIZABLE) != 0 => inlinedTargetHandles += targetHandle case _ => } case _ => } if (keepLineNumbers || !ins.isInstanceOf[LineNumberNode]) { val cloned = ins.clone(javaLabelMap) result add cloned map += ((ins, cloned)) } } (result, map, inlinedTargetHandles.toList) } def getBoxedUnit: FieldInsnNode = new FieldInsnNode(GETSTATIC, srBoxedUnitRef.internalName, "UNIT", srBoxedUnitRef.descriptor) private val anonfunAdaptedName = """.*\$anonfun\$.*\$\d+\$adapted""".r def hasAdaptedImplMethod(closureInit: ClosureInstantiation): Boolean = { anonfunAdaptedName.pattern.matcher(closureInit.lambdaMetaFactoryCall.implMethod.getName).matches } private def primitiveAsmTypeToBType(primitiveType: Type): PrimitiveBType = (primitiveType.getSort: @switch) match { case Type.BOOLEAN => BOOL case Type.BYTE => BYTE case Type.CHAR => CHAR case Type.SHORT => SHORT case Type.INT => INT case Type.LONG => LONG case Type.FLOAT => FLOAT case Type.DOUBLE => DOUBLE case _ => null } def isScalaBox(insn: MethodInsnNode): Boolean = { insn.owner == srBoxesRunTimeRef.internalName && { val args = Type.getArgumentTypes(insn.desc) args.length == 1 && (srBoxesRuntimeBoxToMethods.get(primitiveAsmTypeToBType(args(0))) match { case Some(MethodNameAndType(name, tp)) => name == insn.name && tp.descriptor == insn.desc case _ => false }) } } def getScalaBox(primitiveType: Type): MethodInsnNode = { val bType = primitiveAsmTypeToBType(primitiveType) val MethodNameAndType(name, methodBType) = srBoxesRuntimeBoxToMethods(bType) new MethodInsnNode(INVOKESTATIC, srBoxesRunTimeRef.internalName, name, methodBType.descriptor, /*itf =*/ false) } def isScalaUnbox(insn: MethodInsnNode): Boolean = { insn.owner == srBoxesRunTimeRef.internalName && (srBoxesRuntimeUnboxToMethods.get(primitiveAsmTypeToBType(Type.getReturnType(insn.desc))) match { case Some(MethodNameAndType(name, tp)) => name == insn.name && tp.descriptor == insn.desc case _ => false }) } def getScalaUnbox(primitiveType: Type): MethodInsnNode = { val bType = primitiveAsmTypeToBType(primitiveType) val MethodNameAndType(name, methodBType) = srBoxesRuntimeUnboxToMethods(bType) new MethodInsnNode(INVOKESTATIC, srBoxesRunTimeRef.internalName, name, methodBType.descriptor, /*itf =*/ false) } private def calleeInMap(insn: MethodInsnNode, map: Map[InternalName, MethodNameAndType]): Boolean = map.get(insn.owner) match { case Some(MethodNameAndType(name, tp)) => insn.name == name && insn.desc == tp.descriptor case _ => false } def isJavaBox(insn: MethodInsnNode): Boolean = calleeInMap(insn, javaBoxMethods) def isJavaUnbox(insn: MethodInsnNode): Boolean = calleeInMap(insn, javaUnboxMethods) def isPredefAutoBox(insn: MethodInsnNode): Boolean = { insn.owner == PredefRef.internalName && (predefAutoBoxMethods.get(insn.name) match { case Some(tp) => insn.desc == tp.descriptor case _ => false }) } def isPredefAutoUnbox(insn: MethodInsnNode): Boolean = { insn.owner == PredefRef.internalName && (predefAutoUnboxMethods.get(insn.name) match { case Some(tp) => insn.desc == tp.descriptor case _ => false }) } def isRefCreate(insn: MethodInsnNode): Boolean = calleeInMap(insn, srRefCreateMethods) def isRefZero(insn: MethodInsnNode): Boolean = calleeInMap(insn, srRefZeroMethods) def runtimeRefClassBoxedType(refClass: InternalName): Type = Type.getArgumentTypes(srRefCreateMethods(refClass).methodType.descriptor)(0) def isSideEffectFreeCall(insn: MethodInsnNode): Boolean = { isScalaBox(insn) || isScalaUnbox(insn) || isJavaBox(insn) || // not java unbox, it may NPE isSideEffectFreeConstructorCall(insn) } def isNonNullMethodInvocation(mi: MethodInsnNode): Boolean = { isJavaBox(mi) || isScalaBox(mi) || isPredefAutoBox(mi) || isRefCreate(mi) || isRefZero(mi) } def isModuleLoad(insn: AbstractInsnNode, moduleName: InternalName): Boolean = insn match { case fi: FieldInsnNode => fi.getOpcode == GETSTATIC && fi.owner == moduleName && fi.name == "MODULE$" && fi.desc == ("L" + moduleName + ";") case _ => false } def isPredefLoad(insn: AbstractInsnNode) = isModuleLoad(insn, PredefRef.internalName) def isPrimitiveBoxConstructor(insn: MethodInsnNode): Boolean = calleeInMap(insn, primitiveBoxConstructors) def isRuntimeRefConstructor(insn: MethodInsnNode): Boolean = calleeInMap(insn, srRefConstructors) def isTupleConstructor(insn: MethodInsnNode): Boolean = calleeInMap(insn, tupleClassConstructors) // unused objects created by these constructors are eliminated by pushPop private lazy val sideEffectFreeConstructors: Set[(String, String)] = { val ownerDesc = (p: (InternalName, MethodNameAndType)) => (p._1, p._2.methodType.descriptor) primitiveBoxConstructors.map(ownerDesc).toSet ++ srRefConstructors.map(ownerDesc) ++ tupleClassConstructors.map(ownerDesc) ++ Set( (ObjectRef.internalName, MethodBType(Nil, UNIT).descriptor), (StringRef.internalName, MethodBType(Nil, UNIT).descriptor), (StringRef.internalName, MethodBType(List(StringRef), UNIT).descriptor), (StringRef.internalName, MethodBType(List(ArrayBType(CHAR)), UNIT).descriptor)) } def isSideEffectFreeConstructorCall(insn: MethodInsnNode): Boolean = { insn.name == INSTANCE_CONSTRUCTOR_NAME && sideEffectFreeConstructors((insn.owner, insn.desc)) } private lazy val classesOfSideEffectFreeConstructors = sideEffectFreeConstructors.map(_._1) def isNewForSideEffectFreeConstructor(insn: AbstractInsnNode) = { insn.getOpcode == NEW && { val ti = insn.asInstanceOf[TypeInsnNode] classesOfSideEffectFreeConstructors.contains(ti.desc) } } def isBoxedUnit(insn: AbstractInsnNode) = { insn.getOpcode == GETSTATIC && { val fi = insn.asInstanceOf[FieldInsnNode] fi.owner == srBoxedUnitRef.internalName && fi.name == "UNIT" && fi.desc == srBoxedUnitRef.descriptor } } /** * Visit the class node and collect all referenced nested classes. */ def collectNestedClasses(classNode: ClassNode): List[ClassBType] = { val innerClasses = mutable.Set.empty[ClassBType] def visitInternalName(internalName: InternalName): Unit = if (internalName != null) { val t = classBTypeFromParsedClassfile(internalName) if (t.isNestedClass.get) innerClasses += t } // either an internal/Name or [[Linternal/Name; -- there are certain references in classfiles // that are either an internal name (without the surrounding `L;`) or an array descriptor // `[Linternal/Name;`. def visitInternalNameOrArrayReference(ref: String): Unit = if (ref != null) { val bracket = ref.lastIndexOf('[') if (bracket == -1) visitInternalName(ref) else if (ref.charAt(bracket + 1) == 'L') visitInternalName(ref.substring(bracket + 2, ref.length - 1)) } // we are only interested in the class references in the descriptor, so we can skip over // primitives and the brackets of array descriptors def visitDescriptor(desc: String): Unit = (desc.charAt(0): @switch) match { case '(' => val internalNames = mutable.ListBuffer.empty[String] var i = 1 while (i < desc.length) { if (desc.charAt(i) == 'L') { val start = i + 1 // skip the L while (desc.charAt(i) != ';') i += 1 internalNames append desc.substring(start, i) } // skips over '[', ')', primitives i += 1 } internalNames foreach visitInternalName case 'L' => visitInternalName(desc.substring(1, desc.length - 1)) case '[' => visitInternalNameOrArrayReference(desc) case _ => // skip over primitive types } def visitConstant(const: AnyRef): Unit = const match { case t: Type => visitDescriptor(t.getDescriptor) case _ => } // in principle we could references to annotation types, as they only end up as strings in the // constant pool, not as class references. however, the java compiler still includes nested // annotation classes in the innerClass table, so we do the same. explained in detail in the // large comment in class BTypes. def visitAnnotation(annot: AnnotationNode): Unit = { visitDescriptor(annot.desc) if (annot.values != null) annot.values.asScala foreach visitConstant } def visitAnnotations(annots: java.util.List[_ <: AnnotationNode]) = if (annots != null) annots.asScala foreach visitAnnotation def visitAnnotationss(annotss: Array[java.util.List[AnnotationNode]]) = if (annotss != null) annotss foreach visitAnnotations def visitHandle(handle: Handle): Unit = { visitInternalNameOrArrayReference(handle.getOwner) visitDescriptor(handle.getDesc) } visitInternalName(classNode.name) innerClasses ++= classBTypeFromParsedClassfile(classNode.name).info.get.nestedClasses visitInternalName(classNode.superName) classNode.interfaces.asScala foreach visitInternalName visitInternalName(classNode.outerClass) visitAnnotations(classNode.visibleAnnotations) visitAnnotations(classNode.visibleTypeAnnotations) visitAnnotations(classNode.invisibleAnnotations) visitAnnotations(classNode.invisibleTypeAnnotations) for (f <- classNode.fields.asScala) { visitDescriptor(f.desc) visitAnnotations(f.visibleAnnotations) visitAnnotations(f.visibleTypeAnnotations) visitAnnotations(f.invisibleAnnotations) visitAnnotations(f.invisibleTypeAnnotations) } for (m <- classNode.methods.asScala) { visitDescriptor(m.desc) visitAnnotations(m.visibleAnnotations) visitAnnotations(m.visibleTypeAnnotations) visitAnnotations(m.invisibleAnnotations) visitAnnotations(m.invisibleTypeAnnotations) visitAnnotationss(m.visibleParameterAnnotations) visitAnnotationss(m.invisibleParameterAnnotations) visitAnnotations(m.visibleLocalVariableAnnotations) visitAnnotations(m.invisibleLocalVariableAnnotations) m.exceptions.asScala foreach visitInternalName for (tcb <- m.tryCatchBlocks.asScala) visitInternalName(tcb.`type`) val iter = m.instructions.iterator() while (iter.hasNext) iter.next() match { case ti: TypeInsnNode => visitInternalNameOrArrayReference(ti.desc) case fi: FieldInsnNode => visitInternalNameOrArrayReference(fi.owner); visitDescriptor(fi.desc) case mi: MethodInsnNode => visitInternalNameOrArrayReference(mi.owner); visitDescriptor(mi.desc) case id: InvokeDynamicInsnNode => visitDescriptor(id.desc); visitHandle(id.bsm); id.bsmArgs foreach visitConstant case ci: LdcInsnNode => visitConstant(ci.cst) case ma: MultiANewArrayInsnNode => visitDescriptor(ma.desc) case _ => } } innerClasses.toList } /** * In order to run an Analyzer, the maxLocals / maxStack fields need to be available. The ASM * framework only computes these values during bytecode generation. * * NOTE 1: as explained in the `analysis` package object, the maxStack value used by the Analyzer * may be smaller than the correct maxStack value in the classfile (Analyzers only use a single * slot for long / double values). The maxStack computed here are correct for running an analyzer, * but not for writing in the classfile. We let the ClassWriter recompute max's. * * NOTE 2: the maxStack value computed here may be larger than the smallest correct value * that would allow running an analyzer, see `InstructionStackEffect.forAsmAnalysisConservative`. * * NOTE 3: the implementation doesn't look at instructions that cannot be reached, it computes * the max local / stack size in the reachable code. These max's work just fine for running an * Analyzer: its implementation also skips over unreachable code in the same way. */ def computeMaxLocalsMaxStack(method: MethodNode): Unit = { if (isAbstractMethod(method) || isNativeMethod(method)) { method.maxLocals = 0 method.maxStack = 0 } else if (!maxLocalsMaxStackComputed(method)) { val size = method.instructions.size var maxLocals = parametersSize(method) var maxStack = 0 // queue of instruction indices where analysis should start var queue = new Array[Int](8) var top = -1 def enq(i: Int): Unit = { if (top == queue.length - 1) { val nq = new Array[Int](queue.length * 2) Array.copy(queue, 0, nq, 0, queue.length) queue = nq } top += 1 queue(top) = i } def deq(): Int = { val r = queue(top) top -= 1 r } val subroutineRetTargets = new mutable.Stack[AbstractInsnNode] // for each instruction in the queue, contains the stack height at this instruction. // once an instruction has been treated, contains -1 to prevent re-enqueuing val stackHeights = new Array[Int](size) def enqInsn(insn: AbstractInsnNode, height: Int): Unit = { enqInsnIndex(method.instructions.indexOf(insn), height) } def enqInsnIndex(insnIndex: Int, height: Int): Unit = { if (insnIndex < size && stackHeights(insnIndex) != -1) { stackHeights(insnIndex) = height enq(insnIndex) } } val tcbIt = method.tryCatchBlocks.iterator() while (tcbIt.hasNext) { val tcb = tcbIt.next() enqInsn(tcb.handler, 1) if (maxStack == 0) maxStack = 1 } enq(0) while (top != -1) { val insnIndex = deq() val insn = method.instructions.get(insnIndex) val initHeight = stackHeights(insnIndex) stackHeights(insnIndex) = -1 // prevent i from being enqueued again if (insn.getOpcode == -1) { // frames, labels, line numbers enqInsnIndex(insnIndex + 1, initHeight) } else { val stackGrowth = InstructionStackEffect.maxStackGrowth(insn) val heightAfter = initHeight + stackGrowth if (heightAfter > maxStack) maxStack = heightAfter // update maxLocals insn match { case v: VarInsnNode => val longSize = if (isSize2LoadOrStore(v.getOpcode)) 1 else 0 maxLocals = math.max(maxLocals, v.`var` + longSize + 1) // + 1 because local numbers are 0-based case i: IincInsnNode => maxLocals = math.max(maxLocals, i.`var` + 1) case _ => } insn match { case j: JumpInsnNode => if (j.getOpcode == JSR) { val jsrTargetHeight = heightAfter + 1 if (jsrTargetHeight > maxStack) maxStack = jsrTargetHeight subroutineRetTargets.push(j.getNext) enqInsn(j.label, jsrTargetHeight) } else { enqInsn(j.label, heightAfter) val opc = j.getOpcode if (opc != GOTO) enqInsnIndex(insnIndex + 1, heightAfter) // jump is conditional, so the successor is also a possible control flow target } case l: LookupSwitchInsnNode => var j = 0 while (j < l.labels.size) { enqInsn(l.labels.get(j), heightAfter); j += 1 } enqInsn(l.dflt, heightAfter) case t: TableSwitchInsnNode => var j = 0 while (j < t.labels.size) { enqInsn(t.labels.get(j), heightAfter); j += 1 } enqInsn(t.dflt, heightAfter) case r: VarInsnNode if r.getOpcode == RET => enqInsn(subroutineRetTargets.pop(), heightAfter) case _ => val opc = insn.getOpcode if (opc != ATHROW && !isReturn(insn)) enqInsnIndex(insnIndex + 1, heightAfter) } } } method.maxLocals = maxLocals method.maxStack = maxStack maxLocalsMaxStackComputed += method } } }
felixmulder/scala
src/compiler/scala/tools/nsc/backend/jvm/analysis/BackendUtils.scala
Scala
bsd-3-clause
21,816
package org.dbpedia.spotlight.io import java.io._ import java.util.zip.GZIPInputStream import org.dbpedia.spotlight.log.SpotlightLog import scala.io.Source /** * Iterates over nt files. Can also iterate over nq files not caring about the fourth element. * @author dirk */ object NTripleSource { def fromFile(ntFile : File) : NTripleSource = new NTripleSource(ntFile) class NTripleSource(ntFile: File) extends Traversable[(String,String,String)] { override def foreach[U]( f: ((String,String,String)) => U) { var input : InputStream = new FileInputStream(ntFile) if (ntFile.getName.endsWith(".gz")) { input = new GZIPInputStream(input) } var linesIterator : Iterator[String] = Iterator.empty try { linesIterator = Source.fromInputStream(input, "UTF-8").getLines } catch { case e: java.nio.charset.MalformedInputException => linesIterator = Source.fromInputStream(input).getLines } for (line <- linesIterator) { if (!line.startsWith("#")) { //comments val elements = line.trim.split(" ") if (elements.length >= 4) { var subj = elements(0) var pred = elements(1) var obj = elements(2) subj = subj.substring(1,subj.length-1) pred = pred.substring(1,pred.length-1) obj = obj.substring(1,obj.length-1) f((subj,pred,obj)) } else { SpotlightLog.error(this.getClass, "line must have at least 4 whitespaces; got %d in line: %d", elements.length-1,line) } } } } } }
dbpedia-spotlight/dbpedia-spotlight-model
core/src/main/scala/org/dbpedia/spotlight/io/NTripleSource.scala
Scala
apache-2.0
1,889
/* * Copyright (C) Lightbend Inc. <https://www.lightbend.com> */ package docs.home.scaladsl.persistence //#full-example import com.lightbend.lagom.scaladsl.persistence.PersistentEntity.ReplyType import akka.Done import com.lightbend.lagom.scaladsl.playjson.JsonSerializer sealed trait BlogCommand object BlogCommand { import play.api.libs.json._ import JsonSerializer.emptySingletonFormat implicit val postContentFormat = Json.format[PostContent] val serializers = Vector( JsonSerializer(Json.format[AddPost]), JsonSerializer(Json.format[AddPostDone]), JsonSerializer(emptySingletonFormat(GetPost)), JsonSerializer(Json.format[ChangeBody]), JsonSerializer(emptySingletonFormat(Publish)) ) } //#AddPost final case class AddPost(content: PostContent) extends BlogCommand with ReplyType[AddPostDone] //#AddPost final case class AddPostDone(postId: String) case object GetPost extends BlogCommand with ReplyType[PostContent] final case class ChangeBody(body: String) extends BlogCommand with ReplyType[Done] case object Publish extends BlogCommand with ReplyType[Done] //#full-example
lagom/lagom
docs/manual/scala/guide/cluster/code/docs/home/scaladsl/persistence/BlogCommand.scala
Scala
apache-2.0
1,124
package com.typesafe.slick.testkit.tests import slick.jdbc.H2Profile import com.typesafe.slick.testkit.util.{RelationalTestDB, AsyncTest} class NewQuerySemanticsTest extends AsyncTest[RelationalTestDB] { import tdb.profile.api._ def testNewComposition = { class SuppliersStd(tag: Tag) extends Table[(Int, String, String, String, String, String)](tag, "SUPPLIERS") { def id = column[Int]("SUP_ID", O.PrimaryKey) // This is the primary key column def name = column[String]("SUP_NAME") def street = column[String]("STREET") def city = column[String]("CITY") def state = column[String]("STATE") def zip = column[String]("ZIP") def * = (id, name, street, city, state, zip) } val suppliersStd = TableQuery[SuppliersStd] class CoffeesStd(tag: Tag) extends Table[(String, Int, Int, Int, Int)](tag, "COFFEES") { def name = column[String]("COF_NAME", O.PrimaryKey, O.Length(254)) def supID = column[Int]("SUP_ID") def price = column[Int]("PRICE") def sales = column[Int]("SALES") def total = column[Int]("TOTAL") def * = (name, supID, price, sales, total) def supplier = foreignKey("SUP_FK", supID, suppliersStd)(_.id) } val coffeesStd = TableQuery[CoffeesStd] class Suppliers(tag: Tag) extends Table[(Int, String, String)](tag, "SUPPLIERS") { def id = column[Int]("SUP_ID", O.PrimaryKey) // This is the primary key column def name = column[String]("SUP_NAME") def street = column[String]("STREET") def city = column[String]("CITY") def state = column[String]("STATE") def zip = column[String]("ZIP") def * = (id, name, street) } val suppliers = TableQuery[Suppliers] class Coffees(tag: Tag) extends Table[(String, Int, Int, Int, Int)](tag, "COFFEES") { def name = column[String]("COF_NAME", O.PrimaryKey) def supID = column[Int]("SUP_ID") def price = column[Int]("PRICE") def sales = column[Int]("SALES") def total = column[Int]("TOTAL") def * = (name, supID, price, sales, (total * 10)) def totalComputed = sales * price def supplier = foreignKey("SUP_FK", supID, suppliers)(_.id) } val coffees = TableQuery[Coffees] val setup = seq( (suppliersStd.schema ++ coffeesStd.schema).create, suppliersStd += (101, "Acme, Inc.", "99 Market Street", "Groundsville", "CA", "95199"), suppliersStd += ( 49, "Superior Coffee", "1 Party Place", "Mendocino", "CA", "95460"), suppliersStd += (150, "The High Ground", "100 Coffee Lane", "Meadows", "CA", "93966"), coffeesStd ++= Seq( ("Colombian", 101, 799, 1, 0), ("French_Roast", 49, 799, 2, 0), ("Espresso", 150, 999, 3, 0), ("Colombian_Decaf", 101, 849, 4, 0), ("French_Roast_Decaf", 49, 999, 5, 0) ) ).named("setup") val qa = for { c <- coffees.take(3) } yield (c.supID, (c.name, 42)) val qa2 = coffees.take(3).map(_.name).take(2) val qb = qa.take(2).map(_._2) val qb2 = qa.map(n => n).take(2).map(_._2) val qc = qa.map(_._2).take(2) val a1 = seq( mark("qa", qa.result).map(_.toSet).map { ra => ra.size shouldBe 3 // No sorting, so result contents can vary ra shouldAllMatch { case (s: Int, (i: String, 42)) => () } }, mark("qa2", qa2.result).map(_.toSet).map(_.size shouldBe 2), mark("qb", qb.result).map(_.toSet).map { rb => rb.size shouldBe 2 // No sorting, so result contents can vary rb shouldAllMatch { case (i: String, 42) => () } }, mark("qb2", qb2.result).map(_.toSet).map { rb2 => rb2.size shouldBe 2 // No sorting, so result contents can vary rb2 shouldAllMatch { case (i: String, 42) => () } }, mark("qc", qc.result).map(_.toSet).map { rc => rc.size shouldBe 2 // No sorting, so result contents can vary rc shouldAllMatch { case (i: String, 42) => () } } ) // Plain table val q0 = coffees // Plain implicit join val q1 = for { c <- coffees.sortBy(c => (c.name, c.price.desc)).take(2) s <- suppliers } yield ((c.name, (s.city ++ ":")), c, s, c.totalComputed) // Explicit join with condition val q1b_0 = coffees.sortBy(_.price).take(3) join suppliers on (_.supID === _.id) def q1b = for { (c, s) <- q1b_0.sortBy(_._1.price).take(2).filter(_._1.name =!= "Colombian") (c2, s2) <- q1b_0 } yield (c.name, s.city, c2.name) def a2 = seq( mark("q0", q0.result).named("q0: Plain table").map(_.toSet).map { r0 => r0 shouldBe Set( ("Colombian", 101, 799, 1, 0), ("French_Roast", 49, 799, 2, 0), ("Espresso", 150, 999, 3, 0), ("Colombian_Decaf", 101, 849, 4, 0), ("French_Roast_Decaf", 49, 999, 5, 0) ) }, mark("q1", q1.result).named("q1: Plain implicit join").map(_.toSet).map { r1 => r1 shouldBe Set( (("Colombian","Groundsville:"),("Colombian",101,799,1,0),(101,"Acme, Inc.","99 Market Street"),799), (("Colombian","Mendocino:"),("Colombian",101,799,1,0),(49,"Superior Coffee","1 Party Place"),799), (("Colombian","Meadows:"),("Colombian",101,799,1,0),(150,"The High Ground","100 Coffee Lane"),799), (("Colombian_Decaf","Groundsville:"),("Colombian_Decaf",101,849,4,0),(101,"Acme, Inc.","99 Market Street"),3396), (("Colombian_Decaf","Mendocino:"),("Colombian_Decaf",101,849,4,0),(49,"Superior Coffee","1 Party Place"),3396), (("Colombian_Decaf","Meadows:"),("Colombian_Decaf",101,849,4,0),(150,"The High Ground","100 Coffee Lane"),3396) ) }, ifCap(rcap.pagingNested) { mark("q1b", q1b.result).named("q1b: Explicit join with condition").map { r1b => r1b.toSet shouldBe Set( ("French_Roast","Mendocino","Colombian"), ("French_Roast","Mendocino","French_Roast"), ("French_Roast","Mendocino","Colombian_Decaf") ) } } ) // More elaborate query val q2 = for { c <- coffees.filter(_.price < 900).map(_.*) s <- suppliers if s.id === c._2 } yield (c._1, s.name) // Lifting scalar values val q3 = coffees.flatMap { c => val cf = Query(c).filter(_.price === 849) cf.flatMap { cf => suppliers.filter(_.id === c.supID).map { s => (c.name, s.name, cf.name, cf.total, cf.totalComputed) } } } // Lifting scalar values, with extra tuple val q3b = coffees.flatMap { c => val cf = Query((c, 42)).filter(_._1.price < 900) cf.flatMap { case (cf, num) => suppliers.filter(_.id === c.supID).map { s => (c.name, s.name, cf.name, cf.total, cf.totalComputed, num) } } } // Map to tuple, then filter def q4 = for { c <- coffees.map(c => (c.name, c.price, 42)).sortBy(_._1).take(2).filter(_._2 < 800) } yield (c._1, c._3) // Map to tuple, then filter, with self-join def q4b_0 = coffees.map(c => (c.name, c.price, 42)).filter(_._2 < 800) def q4b = for { c <- q4b_0 d <- q4b_0 } yield (c,d) def a3 = for { _ <- q2.result.named("More elaborate query").map(_.toSet).map { r2 => r2 shouldBe Set( ("Colombian","Acme, Inc."), ("French_Roast","Superior Coffee"), ("Colombian_Decaf","Acme, Inc.") ) } _ <- q3.result.named("Lifting scalar values").map(_.toSet).map { r3 => r3 shouldBe Set(("Colombian_Decaf","Acme, Inc.","Colombian_Decaf",0,3396)) } _ <- q3b.result.named("Lifting scalar values, with extra tuple").map(_.toSet).map { r3b => r3b shouldBe Set( ("Colombian","Acme, Inc.","Colombian",0,799,42), ("French_Roast","Superior Coffee","French_Roast",0,1598,42), ("Colombian_Decaf","Acme, Inc.","Colombian_Decaf",0,3396,42) ) } _ <- ifCap(rcap.pagingNested) { mark("q4", q4.result).named("q4: Map to tuple, then filter").map(_.toSet shouldBe Set(("Colombian",42))) } _ <- mark("q4b", q4b.result).map(_.toSet shouldBe Set( (("Colombian",799,42),("Colombian",799,42)), (("Colombian",799,42),("French_Roast",799,42)), (("French_Roast",799,42),("Colombian",799,42)), (("French_Roast",799,42),("French_Roast",799,42)) )) } yield () // Implicit self-join val q5_0 = coffees.sortBy(_.price).take(2) val q5 = for { c1 <- q5_0 c2 <- q5_0 } yield (c1, c2) // Explicit self-join with condition val q5b = for { t <- q5_0 join q5_0 on (_.name === _.name) } yield (t._1, t._2) // Unused outer query result, unbound TableQuery val q6 = coffees.flatMap(c => suppliers) def a4 = seq( mark("q5", q5.result).named("q5: Implicit self-join").map(_.toSet).map { r5 => r5 shouldBe Set( (("Colombian",101,799,1,0),("Colombian",101,799,1,0)), (("Colombian",101,799,1,0),("French_Roast",49,799,2,0)), (("French_Roast",49,799,2,0),("Colombian",101,799,1,0)), (("French_Roast",49,799,2,0),("French_Roast",49,799,2,0)) ) }, mark("q5b", q5b.result).named("q5b: Explicit self-join with condition").map(_.toSet).map { r5b => r5b shouldBe Set( (("Colombian",101,799,1,0),("Colombian",101,799,1,0)), (("French_Roast",49,799,2,0),("French_Roast",49,799,2,0)) ) }, mark("q6", q6.result).named("q6: Unused outer query result, unbound TableQuery").map(_.toSet).map { r6 => r6 shouldBe Set( (101,"Acme, Inc.","99 Market Street"), (49,"Superior Coffee","1 Party Place"), (150,"The High Ground","100 Coffee Lane") ) } ) // Simple union val q7a = for { c <- coffees.filter(_.price < 800) union coffees.filter(_.price > 950) } yield (c.name, c.supID, c.total) // Union val q7 = for { c <- coffees.filter(_.price < 800).map((_, 1)) union coffees.filter(_.price > 950).map((_, 2)) } yield (c._1.name, c._1.supID, c._2) // Transitive push-down without union val q71 = for { c <- coffees.filter(_.price < 800).map((_, 1)) } yield (c._1.name, c._1.supID, c._2) def a5 = seq( q7a.result.named("Simple union").map(_.toSet).map { r7a => r7a shouldBe Set( ("Colombian",101,0), ("French_Roast",49,0), ("Espresso",150,0), ("French_Roast_Decaf",49,0) ) }, q7.result.named("Union").map(_.toSet).map { r7 => r7 shouldBe Set( ("Colombian",101,1), ("French_Roast",49,1), ("Espresso",150,2), ("French_Roast_Decaf",49,2) ) }, q71.result.named("Transitive push-down without union").map(_.toSet).map { r71 => r71 shouldBe Set( ("Colombian",101,1), ("French_Roast",49,1) ) } ) // Union with filter on the outside val q7b = q7 filter (_._1 =!= "Colombian") // Outer join val q8 = for { (c1, c2) <- coffees.filter(_.price < 900) joinLeft coffees.filter(_.price < 800) on (_.name === _.name) } yield (c1.name, c2.map(_.name)) // Nested outer join val q8b = for { t <- coffees.sortBy(_.sales).take(1) joinLeft coffees.sortBy(_.sales).take(2) on (_.name === _.name) joinLeft coffees.sortBy(_.sales).take(4) on (_._1.supID === _.supID) } yield (t._1, t._2) def a6 = seq( q7b.result.named("Union with filter on the outside").map(_.toSet).map { r7b => r7b shouldBe Set( ("French_Roast",49,1), ("Espresso",150,2), ("French_Roast_Decaf",49,2) ) }, q8.result.named("Outer join").map(_.toSet).map { r8 => r8 shouldBe Set( ("Colombian",Some("Colombian")), ("French_Roast",Some("French_Roast")), ("Colombian_Decaf",None) ) }, q8b.result.named("Nested outer join").map(_.toSet).map { r8b => r8b shouldBe Set( ((("Colombian",101,799,1,0),Some(("Colombian",101,799,1,0))),Some(("Colombian",101,799,1,0))), ((("Colombian",101,799,1,0),Some(("Colombian",101,799,1,0))),Some(("Colombian_Decaf",101,849,4,0))) ) } ) seq(setup, a1, a2, a3, a4, a5, a6) } def testOldComposition = { import TupleMethods._ class Users(tag: Tag) extends Table[(Int, String, String)](tag, "users") { def id = column[Int]("id") def first = column[String]("first") def last = column[String]("last") def * = id ~ first ~ last } val users = TableQuery[Users] class Orders(tag: Tag) extends Table[(Int, Int)](tag, "orders") { def userID = column[Int]("userID") def orderID = column[Int]("orderID") def * = userID ~ orderID } val orders = TableQuery[Orders] val q2 = for { u <- users.sortBy(u => (u.first, u.last.desc)) o <- orders filter { o => u.id === o.userID } } yield u.first ~ u.last ~ o.orderID val q3 = for (u <- users filter (_.id === 42)) yield u.first ~ u.last val q4 = (for { (u, o) <- users join orders on (_.id === _.userID) } yield (u.last, u.first ~ o.orderID)).sortBy(_._1).map(_._2) val q6a = (for (o <- orders if o.orderID === (for {o2 <- orders if o.userID === o2.userID} yield o2.orderID).max) yield o.orderID).sorted val q6b = (for (o <- orders if o.orderID === (for {o2 <- orders if o.userID === o2.userID} yield o2.orderID).max) yield o.orderID ~ o.userID).sortBy(_._1) val q6c = (for (o <- orders if o.orderID === (for {o2 <- orders if o.userID === o2.userID} yield o2.orderID).max) yield o).sortBy(_.orderID).map(o => o.orderID ~ o.userID) seq( (users.schema ++ orders.schema).create, q3.result, q4.result, q6a.result, q6b.result, q6c.result, (users.schema ++ orders.schema).drop ) } def testAdvancedFusion = { class TableA(tag: Tag) extends Table[Int](tag, "TableA") { def id = column[Int]("id") def * = id } val tableA = TableQuery[TableA] class TableB(tag: Tag) extends Table[(Int, Int)](tag, "TableB") { def id = column[Int]("id") def start = column[Int]("start") def * = (id, start) } val tableB = TableQuery[TableB] class TableC(tag: Tag) extends Table[Int](tag, "TableC") { def start = column[Int]("start") def * = start } val tableC = TableQuery[TableC] val queryErr2 = for { a <- tableA b <- tableB if b.id === a.id start = a.id + 1 c <- tableC if c.start <= start } yield (b, c) (tableA.schema ++ tableB.schema ++ tableC.schema).create >> queryErr2.result } def testSubquery = { class A(tag: Tag) extends Table[Int](tag, "A_subquery") { def id = column[Int]("id") def * = id } val as = TableQuery[A] for { _ <- as.schema.create _ <- as += 42 q0 = as.filter(_.id === 42.bind).length _ <- q0.result.named("q0").map(_ shouldBe 1) q1 = Compiled { (n: Rep[Int]) => as.filter(_.id === n).map(a => as.length) } _ <- q1(42).result.named("q1(42)").map(_ shouldBe List(1)) q2 = as.filter(_.id in as.sortBy(_.id).map(_.id)) _ <- q2.result.named("q2").map(_ shouldBe Vector(42)) } yield () } def testExpansion = { class A(tag: Tag) extends Table[(Int, String)](tag, "A_refexp") { def id = column[Int]("id") def a = column[String]("a") def b = column[String]("b") def * = (id, a) override def create_* = collectFieldSymbols((id, a, b).shaped.toNode) } val as = TableQuery[A] for { _ <- as.schema.create _ <- as.map(a => (a.id, a.a, a.b)) ++= Seq( (1, "a1", "b1"), (2, "a2", "b2"), (3, "a3", "b3") ) q1 = as.map(identity).filter(_.b === "b3") _ <- q1.result.named("q1").map(r1 => r1.toSet shouldBe Set((3, "a3"))) q2a = as.sortBy(_.a) join as on (_.b === _.b) q2 = for { (c, s) <- q2a c2 <- as } yield (c.id, c2.a) r2 <- q2.result.named("q2").map(_.toSet) _ = r2 shouldBe Set((1, "a1"), (1, "a2"), (1, "a3"), (2, "a1"), (2, "a2"), (2, "a3"), (3, "a1"), (3, "a2"), (3, "a3")) } yield () } def testNewFusion = { class A(tag: Tag) extends Table[(Int, String, String)](tag, "A_NEWFUSION") { def id = column[Int]("id", O.PrimaryKey) def a = column[String]("a") def b = column[String]("b") def * = (id, a, b) } val as = TableQuery[A] val data = Set((1, "a", "a"), (2, "a", "b"), (3, "c", "b")) val q1 = (as join as on (_.id === _.id)) val q2 = (as join as on (_.id === _.id) join as on (_._1.id === _.id)) val q3 = q2.map { case ((a1, a2), a3) => (a1.id, a2.a, a3.b) } val q4 = as.map(a => (a.id, a.a, a.b, a)).filter(_._3 === "b").map { case (id, a1, b, a2) => (id, a2) } val q5a = as.to[Set].filter(_.b === "b").map(_.id) val q5b = as.filter(_.b === "b").to[Set].map(_.id) val q5c = as.filter(_.b === "b").map(_.id).to[Set] val q6 = (as join as).groupBy(j => (j._1.a, j._1.b)).map { case (ab, rs) => (ab, rs.length, rs.map(_._1).length, rs.map(_._2).length, rs.map(_._1.id).max, rs.map(_._1.id).length) } val q7 = q6.filter(_._1._1 === "a").map(_._5.getOrElse(0)) val q8 = as.sortBy(_.id.desc).map(_.a) val q9a = as.sortBy(_.b).sortBy(_.a.desc).map(_.id) val q9b = as.sortBy(a => (a.a.desc, a.b)).map(_.id) val q10 = (as join as).map { case (a1, a2) => a1.id * 3 + a2.id - 3 }.sorted val q11a = q10.take(5) val q11b = q10.take(5).take(3) val q11c = q10.take(5).take(3).drop(1) val q11d = q10.take(5).drop(1).take(3) val q11e = q10.drop(7) val q11f = q10.take(6).drop(2).filter(_ =!= 5) val q12 = as.filter(_.id <= as.map(_.id).max-1).map(_.a) val q13 = (as.filter(_.id < 2) union as.filter(_.id > 2)).map(_.id) val q14 = q13.to[Set] val q15 = (as.map(a => a.id.?).filter(_ < 2) unionAll as.map(a => a.id.?).filter(_ > 2)).map(_.get).to[Set] val q16 = (as.map(a => a.id.?).filter(_ < 2) unionAll as.map(a => a.id.?).filter(_ > 2)).map(_.getOrElse(-1)).to[Set].filter(_ =!= 42) val q17 = as.sortBy(_.id).zipWithIndex.filter(_._2 < 2L).map { case (a, i) => (a.id, i) } val q18 = as.joinLeft(as).on { case (a1, a2) => a1.id === a2.id }.filter { case (a1, a2) => a1.id === 3 }.map { case (a1, a2) => a2 } val q19 = as.joinLeft(as).on { case (a1, a2) => a1.id === a2.id }.joinLeft(as).on { case ((_, a2), a3) => a2.map(_.b) === a3.b }.map(_._2) val q19b = as.joinLeft(as).on { case (a1, a2) => a1.id === a2.id }.joinLeft(as).on { case ((_, a2), a3) => a2.map(_.b) === a3.b }.subquery.map(_._2) if(tdb.profile == H2Profile) { assertNesting(q1, 1) assertNesting(q2, 1) assertNesting(q3, 1) assertNesting(q4, 1) assertNesting(q5a, 1) assertNesting(q5b, 1) assertNesting(q5c, 1) assertNesting(q6, 1) assertNesting(q7, 1) assertNesting(q8, 1) assertNesting(q9a, 1) assertNesting(q9b, 1) assertNesting(q10, 1) assertNesting(q11a, 1) assertNesting(q11b, 1) assertNesting(q11c, 1) assertNesting(q11d, 1) assertNesting(q11e, 1) assertNesting(q11f, 2) assertNesting(q12, 2) assertNesting(q13, 2) assertNesting(q14, 2) assertNesting(q15, 2) assertNesting(q16, 2) assertNesting(q17, 2) assertNesting(q18, 1) assertNesting(q19, 1) assertNesting(q19b, 2) } for { _ <- as.schema.create _ <- as ++= data _ <- mark("as", as.result).map(_.toSet shouldBe data) _ <- mark("q1", q1.result).map(_.toSet shouldBe data.zip(data)) _ <- mark("q2", q2.result).map(_.toSet shouldBe data.zip(data).zip(data)) _ <- mark("q3", q3.result).map(_.toSet shouldBe data) _ <- mark("q4", q4.result).map(_.toSet shouldBe data.filter(_._3 == "b").map { case t @ (id, _, _) => (id, t) }) _ <- mark("q5a", q5a.result).map(_ shouldBe Set(2, 3)) _ <- mark("q5b", q5b.result).map(_ shouldBe Set(2, 3)) _ <- mark("q5c", q5c.result).map(_ shouldBe Set(2, 3)) _ <- mark("q6", q6.result).map(_.toSet shouldBe Set((("c","b"),3,3,3,Some(3),3), (("a","a"),3,3,3,Some(1),3), (("a","b"),3,3,3,Some(2),3))) _ <- mark("q7", q7.result).map(_.toSet shouldBe Set(1, 2)) _ <- mark("q8", q8.result).map(_ shouldBe Seq("c", "a", "a")) _ <- mark("q9a", q9a.result).map(_ shouldBe Seq(3, 1, 2)) _ <- mark("q9b", q9b.result).map(_ shouldBe Seq(3, 1, 2)) _ <- mark("q10", q10.result).map(_ shouldBe Seq(1, 2, 3, 4, 5, 6, 7, 8, 9)) _ <- mark("q11a", q11a.result).map(_ shouldBe Seq(1, 2, 3, 4, 5)) _ <- mark("q11b", q11b.result).map(_ shouldBe Seq(1, 2, 3)) _ <- mark("q11c", q11c.result).map(_ shouldBe Seq(2, 3)) _ <- mark("q11d", q11d.result).map(_ shouldBe Seq(2, 3, 4)) _ <- mark("q11e", q11e.result).map(_ shouldBe Seq(8, 9)) _ <- mark("q11f", q11f.result).map(_ shouldBe Seq(3, 4, 6)) _ <- mark("q12", q12.result).map(_ shouldBe Seq("a", "a")) _ <- mark("q13", q13.result).map(_.toSet shouldBe Set(1, 3)) _ <- mark("q14", q14.result).map(_ shouldBe Set(1, 3)) _ <- mark("q15", q15.result).map(_ shouldBe Set(1, 3)) _ <- mark("q16", q16.result).map(_ shouldBe Set(1, 3)) _ <- ifCap(rcap.zip)(mark("q17", q17.result).map(_ shouldBe Seq((1,0), (2,1)))) _ <- mark("q18", q18.result).map(_ shouldBe Seq(Some((3, "c", "b")))) _ <- mark("q19", q19.result).map(_.toSet shouldBe Set(Some((1,"a","a")), Some((2,"a","b")), Some((3,"c","b")))) _ <- mark("q19b", q19b.result).map(_.toSet shouldBe Set(Some((1,"a","a")), Some((2,"a","b")), Some((3,"c","b")))) } yield () } }
AtkinsChang/slick
slick-testkit/src/main/scala/com/typesafe/slick/testkit/tests/NewQuerySemanticsTest.scala
Scala
bsd-2-clause
22,028
package spatial.interpreter import argon.core._ import spatial.nodes._ import argon.interpreter.{Interpreter => AInterpreter} trait Regs extends AInterpreter { class IReg(val r: Any) { var v: Any = r override def toString = { val vs = AInterpreter.stringify(v) s"Reg($vs)" } def reset = v = r } object EReg { def unapply(x: Exp[_]) = Some(eval[IReg](x)) } override def matchNode(lhs: Sym[_]) = super.matchNode(lhs).orElse { case RegNew(EAny(init)) => variables .get(lhs) .getOrElse(new IReg(init)) case RegRead(EReg(reg)) => reg.v case RegReset(EReg(reg), EBoolean(en)) => if (en) reg.reset case RegWrite(EReg(reg), EAny(v), EBoolean(cond)) => if (cond) { reg.v = v } case VarRegNew(_) => variables .get(lhs) .getOrElse(new IReg(null)) case VarRegRead(EReg(reg)) => reg.v case VarRegWrite(EReg(reg), EAny(v), EBoolean(cond)) => if (cond) { reg.v = v } } }
stanford-ppl/spatial-lang
spatial/core/src/spatial/interpreter/Regs.scala
Scala
mit
1,083
import java.io.{BufferedReader, InputStreamReader, PrintWriter} import java.net.{InetSocketAddress, Socket} /* * Copyright (c) 2016 Markus Mulkahainen * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * */ /** * Created by markus on 22.1.2016. */ class POP3Client(val server: String, val port: Int) { val socket = new Socket() socket.connect(new InetSocketAddress(server, port)) val io = new ClientIO(socket) println(io.receive()) def login(username: String, password: String): (Boolean, String) = { io.send(new POP3Messages.USER(username).msg()) var msg = POP3Messages.parseReturnMessage(io.receive()) msg match { case POP3Messages.ERR(data) => return (false, data) case POP3Messages.DEFAULT(data) => return (false, data) case _ => //nothing } io.send(new POP3Messages.PASS(password).msg()) msg = POP3Messages.parseReturnMessage(io.receive()) msg match { case POP3Messages.ERR(data) => return (false, data) case POP3Messages.DEFAULT(data) => return (false, data) case _ => //nothing } return (true, "Login ok") } def getMessages(): List[MailRef] = { io.send(new POP3Messages.LIST().msg()) val res = POP3Messages.parseReturnMessage(io.receive()) res match { case POP3Messages.ERR(data) => return List(MailRef.default()) case POP3Messages.OK(data) => val n = Integer.parseInt(data.split(" ").head) //"n messages" return (1 to n) .map(_ => io.receive()) .map(x => new MailRef(x.split(" ").head, x.split(" ").tail.head)) .toList } } def getMail(mail: MailRef): String = { io.send(new POP3Messages.RETR(mail.id).msg()) val res = POP3Messages.parseReturnMessage(io.receive()) res match { case POP3Messages.ERR(data) => return data case _ => //nothing } return Stream .continually(io.receive()) .takeWhile(x => !x.equals(".")) .reduce((a,b) => new String(a + "\n" +b)) } def quit(): Unit = { io.send(new POP3Messages.QUIT().msg()) } class MailRef(val id: String, val size: String) { override def toString():String = { return id + " " +size}} object MailRef { def default(): MailRef = { return new MailRef("", "")}} class ClientIO(val socket: Socket) { val out = new PrintWriter(socket.getOutputStream(), true) val in = new BufferedReader(new InputStreamReader(socket.getInputStream())) def send(msg: String): Unit = { out.println(msg) } def receive(): String = { return in.readLine() } } private object POP3Messages { abstract class Pop3Message { def msg():String } case class USER(val username: String) extends Pop3Message { def msg(): String = { return "USER " + username }} case class PASS(val password: String) extends Pop3Message { def msg(): String = { return "PASS " + password }} case class LIST() extends Pop3Message { def msg(): String = { return "LIST" }} case class QUIT() extends Pop3Message { def msg(): String = { return "QUIT" }} case class RETR(val id: String) extends Pop3Message { def msg(): String = { return "RETR " + id }} // extra for the assignment abstract class Pop3ReturnMessage case class OK(val data: String = "") extends Pop3ReturnMessage case class ERR(val data: String = "") extends Pop3ReturnMessage case class DEFAULT(val data: String = "") extends Pop3ReturnMessage class DataSeparator(val sep: String) private val strToMsgMap: Map[String, (DataSeparator, Function[String, Pop3ReturnMessage])] = Map( "+OK" -> (new DataSeparator(" "), (x: String) => new OK(x)), "-ERR" -> (new DataSeparator(" "), (x: String) => new ERR(x)) ) def parseReturnMessage(msg: String): Pop3ReturnMessage = { val sepAndFn = strToMsgMap .filterKeys(x => msg.startsWith(x)) .values .headOption .getOrElse((new DataSeparator(""), (x: String) => new DEFAULT(x))) return sepAndFn._2(msg.substring(msg.indexOf(sepAndFn._1.sep) + sepAndFn._1.sep.length())) } } }
Klyyssi/ties323
mail-protocols/pop-client/src/main/scala/POP3Client.scala
Scala
mit
4,689
// Copyright (C) 2011 Dmitri Nikulin // // This file is part of Vijil. // // Vijil is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // Vijil is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with Vijil. If not, see <http://www.gnu.org/licenses/>. // // Repository: https://github.com/dnikulin/vijil // Email: dnikulin+vijil@gmail.com package com.dnikulin.vijil.traits import com.dnikulin.vijil.render.NodeSpan trait HasMarks { val marks: IndexedSeq[NodeSpan] }
dnikulin/vijil
src/main/scala/com/dnikulin/vijil/traits/HasMarks.scala
Scala
agpl-3.0
953
/* *\\ ** _____ __ _____ __ ____ ** ** / ___/ / / /____/ / / / \\ FieldKit ** ** / ___/ /_/ /____/ / /__ / / / (c) 2009, field.io ** ** /_/ /____/ /____/ /_____/ http://www.field.io ** \\* */ /* created November 3, 2009 */ package field.kit.particle.behaviour import field.kit.particle._ import field.kit.math.Common._ import field.kit.math.Vec3 /** * 2D Behaviour * Sets a particles position to be on one of the simulation space edges * @author Marcus Wendt */ class Offspace2D extends Behaviour { var margin = 0f /** the absolute minimum coord */ protected var min = Vec3() /** the absolute minimum coord */ protected var max = Vec3() /** update the absolute coords */ override def prepare(dt:Float) { min := (-margin * .5f) *= ps.space.dimension max := (1 + margin * .5f) *= ps.space.dimension } def apply(p:Particle, dt:Float) { val edge = random(0, 3).toInt val position = random(0f, 1f) edge match { // left case 0 => p.x = 0 p.y = position * ps.space.height // top case 1 => p.x = position * ps.space.width p.y = ps.space.height // right case 2 => p.x = ps.space.width p.y = position * ps.space.height // bottom case 3 => p.x = position * ps.space.width p.y = 0 } } }
field/FieldKit.scala
src.particle/field/kit/particle/behaviour/Offspace.scala
Scala
lgpl-3.0
1,682
package com.fortysevendeg.scala.android.ui.main import android.support.v7.widget.{CardView, RecyclerView} import android.text.TextUtils.TruncateAt import android.view.Gravity import android.view.ViewGroup.LayoutParams._ import android.widget.ImageView.ScaleType import android.widget.{ImageView, LinearLayout, TextView} import com.fortysevendeg.macroid.extras.FrameLayoutTweaks._ import com.fortysevendeg.macroid.extras.ImageViewTweaks._ import com.fortysevendeg.macroid.extras.LinearLayoutTweaks._ import com.fortysevendeg.macroid.extras.ResourcesExtras._ import com.fortysevendeg.macroid.extras.TextTweaks._ import com.fortysevendeg.macroid.extras.ThemeExtras._ import com.fortysevendeg.macroid.extras.ViewGroupTweaks._ import com.fortysevendeg.macroid.extras.ViewTweaks._ import com.fortysevendeg.scala.android.R import macroid.FullDsl._ import macroid.{ActivityContextWrapper, ContextWrapper, Tweak} import scala.language.postfixOps trait Styles { def listStyle(implicit context: ContextWrapper): Tweak[RecyclerView] = llMatchWeightVertical + vPaddings(resGetDimensionPixelSize(R.dimen.padding_default)) + vgClipToPadding(false) def contentStyle(implicit context: ContextWrapper): Tweak[LinearLayout] = llVertical + vBackgroundColorResource(R.color.main_list_background) } trait AdapterStyles { def cardStyle(implicit activityContext: ActivityContextWrapper): Tweak[CardView] = vMatchWidth + (themeGetDrawable(android.R.attr.selectableItemBackground) map flForeground getOrElse Tweak.blank) def itemStyle: Tweak[LinearLayout] = llVertical + vMatchWidth def itemTopStyle(implicit context: ContextWrapper): Tweak[LinearLayout] = llHorizontal + vMatchWidth + llGravity(Gravity.CENTER_VERTICAL) + vPadding( paddingTop = resGetDimensionPixelSize(R.dimen.padding_default_xlarge), paddingBottom = resGetDimensionPixelSize(R.dimen.padding_default), paddingLeft = resGetDimensionPixelSize(R.dimen.padding_default_xlarge), paddingRight = resGetDimensionPixelSize(R.dimen.padding_default_xlarge)) def titleStyle(implicit context: ContextWrapper): Tweak[TextView] = llWrapWeightHorizontal + tvSizeResource(R.dimen.font_size_medium) + tvColorResource(R.color.primary) def descriptionStyle(implicit context: ContextWrapper): Tweak[TextView] = tvSizeResource(R.dimen.font_size_normal) + tvNormalLight + tvColorResource(R.color.main_list_description) + tvMaxLines(3) + vPadding( paddingBottom = resGetDimensionPixelSize(R.dimen.padding_default_xlarge), paddingLeft = resGetDimensionPixelSize(R.dimen.padding_default_xlarge), paddingRight = resGetDimensionPixelSize(R.dimen.padding_default_xlarge)) def apiStyle(implicit context: ContextWrapper): Tweak[TextView] = tvSizeResource(R.dimen.font_size_micro) + tvColorResource(R.color.main_list_api) + vPaddings( paddingTopBottom = resGetDimensionPixelSize(R.dimen.padding_default_micro), paddingLeftRight = resGetDimensionPixelSize(R.dimen.padding_default_small)) def lineHorizontalStyle(implicit context: ContextWrapper): Tweak[ImageView] = lp[LinearLayout](MATCH_PARENT, resGetDimensionPixelSize(R.dimen.line)) + vBackgroundColorResource(R.color.main_list_line) val bottomContentStyle: Tweak[LinearLayout] = vMatchWidth + llHorizontal + llGravity(Gravity.CENTER_VERTICAL) def bottomUserContentStyle(implicit activityContext: ActivityContextWrapper): Tweak[LinearLayout] = llWrapWeightHorizontal + llHorizontal + vPaddings(resGetDimensionPixelSize(R.dimen.padding_default)) + llGravity(Gravity.CENTER_VERTICAL) + (themeGetDrawable(android.R.attr.selectableItemBackground) map vBackground getOrElse Tweak.blank) def avatarStyle(implicit context: ContextWrapper): Tweak[ImageView] = { val size = resGetDimensionPixelSize(R.dimen.main_list_avatar_size) lp[LinearLayout](size, size) + ivScaleType(ScaleType.CENTER_CROP) + vMargins(resGetDimensionPixelSize(R.dimen.padding_default_small)) } def userNameStyle(implicit context: ContextWrapper): Tweak[TextView] = tvSizeResource(R.dimen.font_size_normal) + tvNormalLight + tvColorResource(R.color.primary) + tvMaxLines(1) + tvEllipsize(TruncateAt.END) def twitterStyle(implicit context: ContextWrapper): Tweak[TextView] = tvSizeResource(R.dimen.font_size_small) + tvNormalLight + tvColorResource(R.color.main_list_secondary) + tvMaxLines(1) + tvEllipsize(TruncateAt.END) def userNameContentStyle(implicit context: ContextWrapper): Tweak[LinearLayout] = llMatchWeightHorizontal + llVertical + vPadding(paddingLeft = resGetDimensionPixelSize(R.dimen.padding_default_small)) + llGravity(Gravity.CENTER_VERTICAL) def lineVerticalStyle(implicit context: ContextWrapper): Tweak[ImageView] = lp[LinearLayout](resGetDimensionPixelSize(R.dimen.line), MATCH_PARENT) + vBackgroundColorResource(R.color.main_list_line) def bottomLevelsContentStyle(implicit context: ContextWrapper): Tweak[LinearLayout] = llWrapWeightHorizontal + llVertical + vPaddings(resGetDimensionPixelSize(R.dimen.padding_default)) + llGravity(Gravity.CENTER_VERTICAL) def levelItemContentStyle(implicit context: ContextWrapper): Tweak[LinearLayout] = vWrapContent + vPadding(paddingBottom = resGetDimensionPixelSize(R.dimen.padding_default_micro)) def levelStyle(implicit context: ContextWrapper): Tweak[TextView] = tvSizeResource(R.dimen.font_size_small) + tvNormalLight + tvColorResource(R.color.main_list_secondary) + vMinWidth(resGetDimensionPixelSize(R.dimen.main_list_min_width_levels_tag)) def levelTypeStyle(implicit context: ContextWrapper): Tweak[TextView] = tvSizeResource(R.dimen.font_size_small) + tvColorResource(R.color.main_list_tag) + tvNormalLight + vPaddings( paddingTopBottom = 0, paddingLeftRight = resGetDimensionPixelSize(R.dimen.padding_default_small)) + tvMaxLines(1) + tvEllipsize(TruncateAt.END) }
pamu/ScalaAndroidMacroid
src/main/scala/com/fortysevendeg/scala/android/ui/main/Styles.scala
Scala
apache-2.0
6,201
package net.benmur.riemann.client import java.net.SocketAddress import scala.annotation.implicitNotFound import scala.collection.mutable.WrappedArray import org.scalamock.scalatest.MockFactory import org.scalatest.FunSuite import UnreliableIO.OneWayConnectionBuilder import UnreliableIO.Unreliable import UnreliableIO.UnreliableSendOff import akka.testkit.CallingThreadDispatcher import testingsupport.TestingTransportSupport.address import testingsupport.TestingTransportSupport.event import testingsupport.TestingTransportSupport.protoMsgEvent import testingsupport.TestingTransportSupport.timeout class UnreliableIOTest extends FunSuite with testingsupport.ImplicitActorSystem with MockFactory { import UnreliableIO._ import testingsupport.TestingTransportSupport._ test("send a protobuf Msg") { val socket = mock[Unreliable.SocketWrapper] val wa: WrappedArray[Byte] = WrappedArray.make(protoMsgEvent.toByteArray()) (socket.send _).expects(wa).once() val socketFactory = mockFunction[SocketAddress, Unreliable.SocketWrapper] socketFactory.expects(address).returning(socket).once() val conn = implicitly[ConnectionBuilder[Unreliable]].buildConnection( address, Some(socketFactory), Some(CallingThreadDispatcher.Id)) implicitly[SendOff[EventPart, Unreliable]].sendOff(conn, Write(event)) system.shutdown system.awaitTermination } }
benmur/riemann-scala-client
src/test/scala/net/benmur/riemann/client/UnreliableIOTest.scala
Scala
mit
1,408
package com.soteradefense.datawake.trails.data import backtype.storm.tuple.Values /** * Interface for defining a class that is read from kafka and converting to storm readable data. */ trait StormData { def toValues: Values }
Sotera/datawake-prefetch
trail-specific-search/src/main/scala/com/soteradefense/datawake/trails/data/StormData.scala
Scala
apache-2.0
232
package samples.scalaexchange.step7 import akka.event.Logging import akka.stream.Attributes import akka.stream.ThrottleMode.{Shaping, Enforcing} import akka.stream.scaladsl.{Flow, Sink, Source} import samples.scalaexchange.utils.SampleApp import scala.concurrent.Await import scala.concurrent.duration._ import scala.io.StdIn /** * See: https://github.com/jrudolph/akka-http-scala-js-websocket-chat/blob/master/cli/src/main/scala/example/akkawschat/cli/ChatClient.scala */ object WebsocketClient extends SampleApp with WebsocketClientSupport { private val uri = "ws://127.0.0.1:8080/ws/tweetEcho" println(s"Connecting to: $uri") private val helloSource = Source.repeat("Hello!").throttle(1, per = 1.second, maximumBurst = 1, mode = Shaping) private val printlnSink = Flow[Any].log("from-server").withAttributes(Attributes.logLevels(Logging.InfoLevel)).to(Sink.foreach(println)) private val singleExchange = connect(uri, printlnSink, helloSource) Await.result(singleExchange, 10.seconds) // connection OK. }
ktoso/akka-scala-exchange
src/main/scala/samples/scalaexchange/step7/WebsocketClient.scala
Scala
apache-2.0
1,032
package fr.renoux.gaston.model.preferences import fr.renoux.gaston.model._ import fr.renoux.gaston.util.BitSet /** No person (outside of the persons explicitly exempted from this rule) can be on more than one of the topics inside that list (regardless of slot). */ final case class TopicsExclusive(topics: BitSet[Topic], exemptions: BitSet[Person], reward: Score = Preference.NecessaryPreferenceScore) extends Preference.GlobalLevel with Preference.Anti { override def scoreSchedule(schedule: Schedule): Score = { val groups = schedule.personsByTopic.view.filterKeys(topics.contains).values.map(_.filterNot(exemptions)) groups.foldLeft((Set.empty[Person], Score.Zero)) { case ((found, score), ps) => if (ps.exists(found)) (found, score + reward) else (found ++ ps, score) }._2 } override def equals(o: Any): Boolean = o match { case that: TopicsExclusive => this.topics.actualEquals(that.topics) && this.exemptions.actualEquals(that.exemptions) && this.reward == that.reward case _ => false } override def hashCode(): Int = (this.topics.actualHashCode, this.exemptions.actualHashCode, reward).hashCode() }
gaelrenoux/gaston
src/main/scala/fr/renoux/gaston/model/preferences/TopicsExclusive.scala
Scala
apache-2.0
1,156
package feh.tec.nxt.run import feh.tec.rubik.RubikCube.SideName import feh.tec.rubik.RubikCubeImage.{ReadSide, SidesMap, ColorMap} import feh.util.file._ import Ordering.Implicits._ object ColorMaps { object SideNames{ def fromFile(path: String): ColorMap[Int, SideName] = { val file = new File(path) val strData = file.withInputStream(File.read[Seq[String]]).get val compareType = strData.head val pfs = strData.tail.map(_.split(',').map(_.trim) match { case Array(min, max, sideName) => val (mx, mn) = (min.toInt, max.toInt) val side = SideName.fromString(sideName) if (compareType startsWith "strict") ({ case i if i betweenStrict (mx, mn) => side } : PartialFunction[Int, SideName]) else ({ case i if i between (mx, mn) => side } : PartialFunction[Int, SideName]) }) ColorMap(pfs reduceLeft (_ orElse _)) } } implicit class BetweenWrapper[T: Ordering](t: T){ def between(min: T, max: T) = t >= min && t <= max def betweenStrict(min: T, max: T) = t >= min && t < max } } object SidesMaps { def default = SidesMap(Seq( ReadSide(SideName.Up), ReadSide(SideName.Back, flipX = true, flipY = true), ReadSide(SideName.Down), ReadSide(SideName.Front), ReadSide(SideName.Right, flipY = true, flipX = true), ReadSide(SideName.Left) )) }
fehu/int-sis--Rubik
nxt/src/main/scala/feh/tec/nxt/run/Configs.scala
Scala
mit
1,455
package domain import api.MeteoData import org.joda.time.{DateTimeZone, LocalDate, LocalTime, Period, Hours} import scala.collection.mutable.HashMap /** Trait transforming [[api.MeteoData]] to [[domain.WeatherData]] */ trait WeatherService { private val ShortTermInterval = 3 private val LongTermInterval = 6 def toWeatherData(meteoData: MeteoData, dstOffset: Int): WeatherData = { // Maps having days as keys. The values are maps having hours as keys and tuple(temperature, symbol) as values. type ForecastMap = HashMap[LocalDate, HashMap[Int, Tuple2[Int, Int]]] val shortTerm: ForecastMap = HashMap.empty val longTerm: ForecastMap = HashMap.empty val zone = DateTimeZone.forOffsetHours(dstOffset) val fromMeteoLocal = meteoData.from.toDateTime(zone) val toMeteoLocal = meteoData.to.toDateTime(zone) meteoData.forecasts map ( forecast => { val from = forecast.from.toDateTime(zone) if(from.compareTo(fromMeteoLocal) >= 0 && from.compareTo(toMeteoLocal) < 0) { val fromHour = from.getHourOfDay val to = forecast.to.toDateTime(zone) val fromLocalDate = from.toLocalDate val periodFromStartDay = new Period(fromMeteoLocal.withTimeAtStartOfDay, from) val daysFromStart = periodFromStartDay.getWeeks * 7 + periodFromStartDay.getDays val periodFromStart = new Period(fromMeteoLocal, from) val hoursFromStart = periodFromStart.getDays * 24 + periodFromStart.getHours if (from == to && forecast.temperature.isDefined) { // extract temperature // Check that the hour matches the short term interval if (daysFromStart < 3 && (hoursFromStart % ShortTermInterval == 0 )) { // short term val shortTermDay = shortTerm.getOrElseUpdate(fromLocalDate, HashMap.empty) val hourForecast = shortTermDay.getOrElseUpdate(fromHour, (-1, -1)) shortTermDay(fromHour) = hourForecast.copy(_1 = forecast.temperature.get.round) } // For long term forecast, we are only interested in the hours (0, 6, 12, 18) if (daysFromStart >= 2 && (forecast.from.getHourOfDay % LongTermInterval == 0)) { // long term val longTermDay = longTerm.getOrElseUpdate(fromLocalDate, HashMap.empty) val hourForecast = longTermDay.getOrElseUpdate(fromHour, (-1, -1)) longTermDay(fromHour) = hourForecast.copy(_1 = forecast.temperature.get.round) } } else if (forecast.symbol.isDefined) { // extract symbol val hoursBetween = Hours.hoursBetween(from, to).getHours if (daysFromStart < 3 && (hoursFromStart % ShortTermInterval == 0) && hoursBetween == ShortTermInterval) { // short term val shortTermDay = shortTerm.getOrElseUpdate(fromLocalDate, HashMap.empty) val hourForecast = shortTermDay.getOrElseUpdate(fromHour, (-1, -1)) shortTermDay(fromHour) = hourForecast.copy(_2 = forecast.symbol.get) } // For long term forecast, we are only interested in the hours (0, 6, 12, 18) if (daysFromStart >= 2 && (forecast.from.getHourOfDay % LongTermInterval) == 0 && hoursBetween == LongTermInterval) { // long term val longTermDay = longTerm.getOrElseUpdate(fromLocalDate, HashMap.empty) val hourForecast = longTermDay.getOrElseUpdate(fromHour, (-1, -1)) longTermDay(fromHour) = hourForecast.copy(_2 = forecast.symbol.get) } } } }) def sort(forecastMap: ForecastMap) = forecastMap.toList .sortWith( { case((hourLeft, _), (hourRight, _)) => hourLeft.compareTo(hourRight) < 0 } ) .map({ case (day, hours) => DailyData( day = day.toString("dd/MM/yyyy"), data = hours.toList .sortBy(_._1) .map( { case(hour, (temp, symbol)) => HourData( startTime = new LocalTime(hour, 0).toString("HH:mm"), temperature = temp, symbol = symbol)}) )}) WeatherData(shortterm = sort(shortTerm), longterm = sort(longTerm)) } }
sbondor/sunwatch
server/src/main/scala/domain/WeatherService.scala
Scala
mit
4,115
import maker.project.{Module, Project} import java.io.File val a = new Module( root = new File("a"), name = "a") val b = new Module( root = new File("b"), name = "b", immediateUpstreamModules = List(a)) val c = new Module(new File("c"), "c", List(a)) val d = new Module(new File("d"), "d", List(c)) val project = Project( name = "top-level-project", root = new File("."), immediateUpstreamModules = List(d) )
syl20bnr/maker
examples/multi-module-project/Project.scala
Scala
bsd-2-clause
435
package coroapi import scala.annotation.tailrec import scala.collection.JavaConverters._ import java.dyn.AsymCoroutine object AsymCoroutines { class Fibs extends AsymCoroutine[Nothing, Int] { override def run(n : Nothing) : Int = { @tailrec def fib(s0 : Int, s1 : Int) { ret(s0) fib(s1, s0+s1) } fib(0, 1) sys.error("Not reached") } } def main(args : Array[String]) { val fibs = new Fibs for(i <- fibs.iterator.asScala take 10) println(i) } }
milessabin/scala-cont-jvm-coro-talk
src/coroapi/AsymCoroutines.scala
Scala
apache-2.0
538
package akka.persistence.hazelcast.journal.util import java.util.Map.Entry import akka.persistence.hazelcast.journal.HazelcastJournal._ import com.hazelcast.query.Predicate private[hazelcast] class HighestSequenceNrPredicate(persistenceId: String, fromSequenceNr: Long) extends Predicate[MessageId, Message] { override def apply(entry: Entry[MessageId, Message]): Boolean = { val messageId = entry.getKey messageId.persistenceId == persistenceId && messageId.sequenceNr >= fromSequenceNr } }
dlisin/akka-persistence-hazelcast
src/main/scala/akka/persistence/hazelcast/journal/util/HighestSequenceNrPredicate.scala
Scala
apache-2.0
509
package dsentric.contracts import dsentric.codecs.{DCodec, DStringCodec} import dsentric.operators.{DataOperator, Expected, Optional} import dsentric.DObject class AspectFor[D <: DObject, D2 <: D](_source: ContractFor[D])( f: PartialFunction[Property[D, _], Option[AspectProperty[D2, _]]] = PartialFunction.empty ) extends ContractLike[D2] { //TODO: name override //TODO: DataOperators def \\\\(property: ObjectProperty[D], merged: ObjectProperty[D]*)( f: PartialFunction[Property[D, _], Option[AspectProperty[D2, _]]] = PartialFunction.empty ): ExpectedObjectAspectProperty[D2] = { val fields = Aspect.getFields(property)(f) ++ merged.flatMap(Aspect.getFields(_)(f)) val expectedProperty = new ExpectedObjectAspectProperty[D2](property._codec, Nil) fields.foreach(_._2.__setParent(expectedProperty)) expectedProperty.__setFields(fields) expectedProperty.__setParent(this) expectedProperty } //TODO support pulling from property if has withAdditional def \\\\\\[Key, Value](property: ObjectProperty[D], merged: ObjectProperty[D]*)( f: PartialFunction[Property[D, _], Option[AspectProperty[D2, _]]] = PartialFunction.empty )(implicit K: DStringCodec[Key], V: DCodec[Value]): ExpectedObjectAspectPropertyWithAdditional[D2, Key, Value] = { val fields = Aspect.getFields(property)(f) ++ merged.flatMap(Aspect.getFields(_)(f)) val expectedProperty = new ExpectedObjectAspectPropertyWithAdditional[D2, Key, Value](property._codec, Nil, Nil, K, V) fields.foreach(_._2.__setParent(expectedProperty)) expectedProperty.__setFields(fields) expectedProperty.__setParent(this) expectedProperty } def \\\\?(property: ObjectProperty[D], merged: ObjectProperty[D]*)( f: PartialFunction[Property[D, _], Option[AspectProperty[D2, _]]] = PartialFunction.empty ): MaybeObjectAspectProperty[D2] = { val fields = Aspect.getFields(property)(f) ++ merged.flatMap(Aspect.getFields(_)(f)) val maybeProperty = new MaybeObjectAspectProperty[D2](property._codec, Nil) fields.foreach(_._2.__setParent(maybeProperty)) maybeProperty.__setFields(fields) maybeProperty.__setParent(this) maybeProperty } //TODO support pulling from property if has withAdditional def \\\\\\?[Key, Value](property: ObjectProperty[D], merged: ObjectProperty[D]*)( f: PartialFunction[Property[D, _], Option[AspectProperty[D2, _]]] )(implicit K: DStringCodec[Key], V: DCodec[Value]): MaybeObjectAspectPropertyWithAdditional[D2, Key, Value] = { val fields = Aspect.getFields(property)(f) ++ merged.flatMap(Aspect.getFields(_)(f)) val maybeProperty = new MaybeObjectAspectPropertyWithAdditional[D2, Key, Value](property._codec, Nil, Nil, K, V) fields.foreach(_._2.__setParent(maybeProperty)) maybeProperty.__setFields(fields) maybeProperty.__setParent(this) maybeProperty } override def _root: ContractFor[D2] = _source.asInstanceOf[ContractFor[D2]] private[contracts] var __fields: Map[String, Property[D2, _]] = _ @volatile private var _bitmap0: Boolean = false final def _fields: Map[String, Property[D2, _]] = if (_bitmap0) __fields else { this.synchronized { //Contract resolved properties should override aspect transformed properties val aspectFields = Aspect.getFields[D, D2](_source)(f) aspectFields.foreach(_._2.__setParent(this)) __fields = aspectFields ++ this.getClass.getMethods.flatMap { m => if ( classOf[Property[D2, _]].isAssignableFrom( m.getReturnType ) && m.getTypeParameters.isEmpty && m.getParameterTypes.isEmpty ) m.invoke(this) match { case prop: PropertyResolver[D2, Any] @unchecked => Some(prop.__nameOverride.getOrElse(m.getName) -> prop) case prop: ObjectAspectProperty[D2] @unchecked => prop.__setKey(m.getName) Some(prop._key -> prop) case _ => None } else None }.toMap _bitmap0 = true } __fields } } class Aspect[D <: DObject](_source: ContractFor[D])( f: PartialFunction[Property[D, _], Option[AspectProperty[D, _]]] = PartialFunction.empty ) extends AspectFor[D, D](_source)(f) object Aspect { def apply[D <: DObject](_root: ContractFor[D])( f: PartialFunction[Property[D, _], Option[AspectProperty[D, _]]] ): Aspect[D] = new Aspect[D](_root)(f) private[contracts] def getFields[D <: DObject, D2 <: D]( contract: BaseContract[D] )(f: PartialFunction[Property[D, _], Option[AspectProperty[D2, _]]]): Map[String, AspectProperty[D2, _]] = { def reparent[B <: BaseContract[D2]](b: B, fields: Map[String, AspectProperty[D2, _]]): B = { fields.foreach(_._2.__setParent(b)) b } def mapValueProperty[T]: Function[ValueProperty[D, T], ValueAspectProperty[D2, T]] = { case e: ExpectedProperty[D, T] => ExpectedAspectProperty(e._key, e._codec, e._dataOperators) case e: MaybeExpectedProperty[D, T] => ExpectedAspectProperty(e._key, e._codec, e._dataOperators) case m: MaybeProperty[D, T] => MaybeAspectProperty(m._key, m._codec, m._dataOperators) case d: DefaultProperty[D, T] => DefaultAspectProperty(d._key, d._default, d._codec, d._dataOperators) case d: MaybeDefaultProperty[D, T] => DefaultAspectProperty(d._key, d._default, d._codec, d._dataOperators) case _: ValueAspectProperty[D, T] => ??? } def transformFields(fields: Map[String, Property[D, _]]): Map[String, AspectProperty[D2, _]] = fields.flatMap { pair => f.lift(pair._2) -> pair._2 match { case (None, o: ExpectedObjectProperty[D]) => val fields = transformFields(o._fields) val aspectProperty = ExpectedObjectAspectProperty(o, fields, o._dataOperators) Some(pair._1 -> reparent(aspectProperty, fields)) case (None, o: MaybeExpectedObjectProperty[D]) => val fields = transformFields(o._fields) val aspectProperty = ExpectedObjectAspectProperty(o, fields, o._dataOperators) Some(pair._1 -> reparent(aspectProperty, fields)) case (None, o: MaybeObjectProperty[D]) => val fields = transformFields(o._fields) val aspectProperty = MaybeObjectAspectProperty(o, fields, o._dataOperators) Some(pair._1 -> reparent(aspectProperty, fields)) case (None, p: ValueProperty[D, _]) => Some(pair._1 -> mapValueProperty(p)) case (Some(Some(a: ObjectAspectProperty[D2])), o: ObjectProperty[D]) => val fields = transformFields(o._fields) a.__setFields(fields) Some(pair._1 -> reparent(a, fields)) case (Some(maybeProperty), _) => maybeProperty.map(pair._1 -> _) case (None, _) => None } } val finalFields = transformFields(contract._fields) finalFields } } trait DAspectSyntax { type AspectPropertyFunction[D <: DObject, D2 <: DObject] = PartialFunction[Property[D, _], Option[AspectProperty[D2, _]]] implicit def toPropertyAspectOps[D <: DObject](p: Property[D, _]): PropertyAspectOps[D, _] = new PropertyAspectOps(p) implicit def toObjectPropertyAspectOps[D <: DObject](p: ObjectProperty[D]): ObjectPropertyAspectOps[D] = new ObjectPropertyAspectOps(p) implicit def toValuePropertyAspectOps[D <: DObject, T](p: ValueProperty[D, T]): ValuePropertyAspectOps[D, T] = new ValuePropertyAspectOps(p) implicit def toExpectedPropertyAspectOps[D <: DObject, T]( p: ExpectedProperty[D, T] ): ExpectedPropertyAspectOps[D, T] = new ExpectedPropertyAspectOps(p) implicit def toMaybeExpectedPropertyAspectOps[D <: DObject, T]( p: MaybeExpectedProperty[D, T] ): MaybeExpectedPropertyAspectOps[D, T] = new MaybeExpectedPropertyAspectOps(p) implicit def toMaybePropertyAspectOps[D <: DObject, T](p: MaybeProperty[D, T]): MaybePropertyAspectOps[D, T] = new MaybePropertyAspectOps(p) implicit def toDefaultPropertyAspectOps[D <: DObject, T](p: DefaultProperty[D, T]): DefaultPropertyAspectOps[D, T] = new DefaultPropertyAspectOps(p) implicit def toMaybeDefaultPropertyAspectOps[D <: DObject, T]( p: MaybeDefaultProperty[D, T] ): MaybeDefaultPropertyAspectOps[D, T] = new MaybeDefaultPropertyAspectOps(p) implicit def toExpectedObjectAspectOps[D <: DObject](p: ExpectedObjectProperty[D]): ExpectedObjectAspectOps[D] = new ExpectedObjectAspectOps(p) implicit def toMaybeExpectedObjectAspectOps[D <: DObject]( p: MaybeExpectedObjectProperty[D] ): MaybeExpectedObjectAspectOps[D] = new MaybeExpectedObjectAspectOps(p) implicit def toMaybeObjectAspectOps[D <: DObject](p: MaybeObjectProperty[D]): MaybeObjectAspectOps[D] = new MaybeObjectAspectOps(p) } object DAspectSyntax extends DAspectSyntax final class PropertyAspectOps[D <: DObject, _](val p: Property[D, _]) extends AnyVal { def $asExpected[D2 <: D](): AspectProperty[D2, _] = p match { case o: ObjectProperty[D] => val dataOperators = o._dataOperators .collect { case d: DataOperator[DObject] with Expected => d } new ObjectPropertyAspectOps[D](o).$asExpected[D2](dataOperators) case v: ValueProperty[D, _] => val dataOperators = v._dataOperators .collect { case d: DataOperator[_] with Expected => d } new ValuePropertyAspectOps[D, Any](v.asInstanceOf[ValueProperty[D, Any]]).$asExpected[D2](dataOperators) case _ => ??? } def $asMaybe[D2 <: D](): AspectProperty[D2, _] = p match { case o: ObjectProperty[D] => val dataOperators = o._dataOperators .collect { case d: DataOperator[DObject] with Optional => d } new ObjectPropertyAspectOps[D](o).$asMaybe[D2](dataOperators) case v: ValueProperty[D, _] => val dataOperators = v._dataOperators .collect { case d: DataOperator[_] with Optional => d } new ValuePropertyAspectOps[D, Any](v.asInstanceOf[ValueProperty[D, Any]]).$asMaybe[D2](dataOperators) case _ => ??? } def $asExpected[D2 <: D](replaceDataOperators: List[DataOperator[DObject] with Expected]): AspectProperty[D2, _] = p match { case o: ObjectProperty[D] => new ObjectPropertyAspectOps[D](o).$asExpected[D2](replaceDataOperators) case v: ValueProperty[D, _] => new ValuePropertyAspectOps[D, Any](v.asInstanceOf[ValueProperty[D, Any]]).$asExpected[D2](replaceDataOperators) case _ => ??? } def $asMaybe[D2 <: D](replaceDataOperators: List[DataOperator[DObject] with Optional]): AspectProperty[D2, _] = p match { case o: ObjectProperty[D] => new ObjectPropertyAspectOps[D](o).$asMaybe[D2](replaceDataOperators) case v: ValueProperty[D, _] => new ValuePropertyAspectOps[D, Any](v.asInstanceOf[ValueProperty[D, Any]]).$asMaybe[D2](replaceDataOperators) case _ => ??? } } final class ObjectPropertyAspectOps[D <: DObject](val p: ObjectProperty[D]) extends AnyVal { def $asExpected[D2 <: D](): ExpectedObjectAspectProperty[D2] = { val dataOperators = p._dataOperators .collect { case d: DataOperator[DObject] with Expected => d } ExpectedObjectAspectProperty[D2](p, dataOperators) } def $asExpected[D2 <: D]( replaceDataOperators: List[DataOperator[DObject] with Expected] ): ExpectedObjectAspectProperty[D2] = ExpectedObjectAspectProperty[D2](p, replaceDataOperators) def $asExpected[D2 <: D]( appendDataOperator: DataOperator[DObject] with Expected, tail: DataOperator[DObject] with Expected* ): ExpectedObjectAspectProperty[D2] = { val dataOperators = p._dataOperators .collect { case d: DataOperator[DObject] with Expected => d } ::: appendDataOperator :: tail.toList ExpectedObjectAspectProperty[D2](p, dataOperators) } def $asMaybe[D2 <: D](): MaybeObjectAspectProperty[D2] = { val dataOperators = p._dataOperators .collect { case d: DataOperator[DObject] with Optional => d } MaybeObjectAspectProperty[D2](p, dataOperators) } def $asMaybe[D2 <: D]( replaceDataOperators: List[DataOperator[DObject] with Optional] = Nil ): MaybeObjectAspectProperty[D2] = MaybeObjectAspectProperty[D2](p, replaceDataOperators) def $asMaybe[D2 <: D]( appendDataOperator: DataOperator[DObject] with Optional, tail: DataOperator[DObject] with Optional* ): MaybeObjectAspectProperty[D2] = { val dataOperators = p._dataOperators .collect { case d: DataOperator[DObject] with Optional => d } ::: appendDataOperator :: tail.toList MaybeObjectAspectProperty[D2](p, dataOperators) } } final class ValuePropertyAspectOps[D <: DObject, T](val p: ValueProperty[D, T]) extends AnyVal { def $asExpected[D2 <: D](): ExpectedAspectProperty[D2, T] = { val dataOperators = p._dataOperators .collect { case d: DataOperator[T] with Expected => d } ExpectedAspectProperty(p._key, p._codec, dataOperators) } def $asExpected[D2 <: D](replaceDataOperators: List[DataOperator[T] with Expected]): ExpectedAspectProperty[D2, T] = ExpectedAspectProperty(p._key, p._codec, replaceDataOperators) def $asExpected[D2 <: D]( appendDataOperators: DataOperator[T] with Expected, tail: DataOperator[T] with Expected* ): ExpectedAspectProperty[D2, T] = { val dataOperators = p._dataOperators .collect { case d: DataOperator[T] with Expected => d } ::: appendDataOperators :: tail.toList ExpectedAspectProperty(p._key, p._codec, dataOperators) } def $asMaybe[D2 <: D](): MaybeAspectProperty[D2, T] = { val dataOperators = p._dataOperators .collect { case d: DataOperator[T] with Optional => d } MaybeAspectProperty(p._key, p._codec, dataOperators) } def $asMaybe[D2 <: D](replaceDataOperators: List[DataOperator[T] with Optional] = Nil): MaybeAspectProperty[D2, T] = MaybeAspectProperty(p._key, p._codec, replaceDataOperators) def $asMaybe[D2 <: D]( appendDataOperators: DataOperator[T] with Optional, tail: DataOperator[T] with Optional* ): MaybeAspectProperty[D2, T] = { val dataOperators = p._dataOperators .collect { case d: DataOperator[T] with Optional => d } ::: appendDataOperators :: tail.toList MaybeAspectProperty(p._key, p._codec, dataOperators) } def $asDefault[D2 <: D](default: T): DefaultAspectProperty[D2, T] = { val operators = p._dataOperators .collect { case d: DataOperator[T] with Optional => d } DefaultAspectProperty(p._key, default, p._codec, operators) } def $asDefault[D2 <: D]( default: T, replaceDataOperators: List[DataOperator[T] with Optional] ): DefaultAspectProperty[D2, T] = DefaultAspectProperty(p._key, default, p._codec, replaceDataOperators) def $asDefault[D2 <: D]( default: T, appendDataOperators: DataOperator[T] with Optional, tail: DataOperator[T] with Optional* ): DefaultAspectProperty[D2, T] = { val operators = p._dataOperators .collect { case d: DataOperator[T] with Optional => d } ::: appendDataOperators :: tail.toList DefaultAspectProperty(p._key, default, p._codec, operators) } } final class ExpectedPropertyAspectOps[D <: DObject, T](val p: ExpectedProperty[D, T]) extends AnyVal { def $appendDataOperators[D2 <: D](dataOperators: DataOperator[T] with Expected*): ExpectedAspectProperty[D2, T] = ExpectedAspectProperty(p._key, p._codec, p._dataOperators ++ dataOperators) def $replaceDataOperators[D2 <: D](dataOperators: DataOperator[T] with Expected*): ExpectedAspectProperty[D2, T] = ExpectedAspectProperty(p._key, p._codec, dataOperators.toList) } final class MaybeExpectedPropertyAspectOps[D <: DObject, T](val p: MaybeExpectedProperty[D, T]) extends AnyVal { def $appendDataOperators[D2 <: D](dataOperators: DataOperator[T] with Expected*): ExpectedAspectProperty[D2, T] = ExpectedAspectProperty(p._key, p._codec, p._dataOperators ++ dataOperators) def $replaceDataOperators[D2 <: D](dataOperators: DataOperator[T] with Expected*): ExpectedAspectProperty[D2, T] = ExpectedAspectProperty(p._key, p._codec, dataOperators.toList) } final class MaybePropertyAspectOps[D <: DObject, T](val p: MaybeProperty[D, T]) extends AnyVal { def $appendDataOperators[D2 <: D](dataOperators: DataOperator[T] with Optional*): MaybeAspectProperty[D2, T] = MaybeAspectProperty(p._key, p._codec, p._dataOperators ++ dataOperators) def $replaceDataOperators[D2 <: D](dataOperators: DataOperator[T] with Optional*): MaybeAspectProperty[D2, T] = MaybeAspectProperty(p._key, p._codec, dataOperators.toList) } final class DefaultPropertyAspectOps[D <: DObject, T](val p: DefaultProperty[D, T]) extends AnyVal { def $changeDefault[D2 <: D](default: T): DefaultAspectProperty[D2, T] = DefaultAspectProperty(p._key, default, p._codec, p._dataOperators) def $appendDataOperators[D2 <: D](dataOperators: DataOperator[T] with Optional*): DefaultAspectProperty[D2, T] = DefaultAspectProperty(p._key, p._default, p._codec, p._dataOperators ++ dataOperators) def $replaceDataOperators[D2 <: D](dataOperators: DataOperator[T] with Optional*): DefaultAspectProperty[D2, T] = DefaultAspectProperty(p._key, p._default, p._codec, dataOperators.toList) } final class MaybeDefaultPropertyAspectOps[D <: DObject, T](val p: MaybeDefaultProperty[D, T]) extends AnyVal { def $changeDefault[D2 <: D](default: T): DefaultAspectProperty[D2, T] = DefaultAspectProperty(p._key, default, p._codec, p._dataOperators) def $appendDataOperators[D2 <: D](dataOperators: DataOperator[T] with Optional*): DefaultAspectProperty[D2, T] = DefaultAspectProperty(p._key, p._default, p._codec, p._dataOperators ++ dataOperators) def $replaceDataOperators[D2 <: D](dataOperators: DataOperator[T] with Optional*): DefaultAspectProperty[D2, T] = DefaultAspectProperty(p._key, p._default, p._codec, dataOperators.toList) } final class ExpectedObjectAspectOps[D <: DObject](val p: ExpectedObjectProperty[D]) extends AnyVal { def $appendDataOperators[D2 <: D]( dataOperators: DataOperator[DObject] with Expected* ): ExpectedObjectAspectProperty[D2] = ExpectedObjectAspectProperty(p, p._dataOperators ++ dataOperators) def $replaceDataOperators[D2 <: D]( dataOperators: DataOperator[DObject] with Expected* ): ExpectedObjectAspectProperty[D2] = ExpectedObjectAspectProperty(p, dataOperators.toList) } final class MaybeExpectedObjectAspectOps[D <: DObject](val p: MaybeExpectedObjectProperty[D]) extends AnyVal { def $appendDataOperators[D2 <: D]( dataOperators: DataOperator[DObject] with Expected* ): ExpectedObjectAspectProperty[D2] = ExpectedObjectAspectProperty(p, p._dataOperators ++ dataOperators) def $replaceDataOperators[D2 <: D]( dataOperators: DataOperator[DObject] with Expected* ): ExpectedObjectAspectProperty[D2] = ExpectedObjectAspectProperty(p, dataOperators.toList) } final class MaybeObjectAspectOps[D <: DObject](val p: MaybeObjectProperty[D]) extends AnyVal { def $appendDataOperators[D2 <: D]( dataOperators: DataOperator[DObject] with Optional* ): MaybeObjectAspectProperty[D2] = MaybeObjectAspectProperty(p, p._dataOperators ++ dataOperators) def $replaceDataOperators[D2 <: D](dataOperators: DataOperator[DObject] with Optional*): MaybeObjectAspectProperty[D2] = MaybeObjectAspectProperty(p, dataOperators.toList) }
HigherState/dsentric
maps/src/main/scala/dsentric/contracts/Aspect.scala
Scala
apache-2.0
20,218
/* * This software is licensed under the GNU Affero General Public License, quoted below. * * This file is a part of powerspy.scala. * * Copyright (C) 2011-2014 Inria, University of Lille 1. * * powerspy.scala is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of * the License, or (at your option) any later version. * * powerspy.scala is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with powerspy.scala. * * If not, please consult http://www.gnu.org/licenses/agpl-3.0.html. */ package fr.inria.powerspy.core object Encoding extends Enumeration { type Encoding = Value val LITTLE_ENDIAN, BIG_ENDIAN = Value } import fr.inria.powerspy.core.Encoding.{BIG_ENDIAN, Encoding} /** * Represents an hexadecimal * * @param value: hexadecimal value * @param encoding: string encoding */ class Hexadecimal(value: String, encoding: Encoding) { lazy val bits: Int = { // to a signed int if(encoding == BIG_ENDIAN) { java.lang.Long.parseLong(value, 16).toInt } else java.lang.Integer.reverseBytes(java.lang.Long.parseLong(value, 16).toInt) } }
Spirals-Team/powerspy.scala
powerspy-core/src/main/scala/fr/inria/powerspy/core/Hexadecimal.scala
Scala
agpl-3.0
1,493
package com.twitter.inject.server import com.twitter.finagle.http.{Fields, Method, Request, Response, Status} import com.twitter.server.AdminHttpServer import com.twitter.util.{Closable, Try} import java.net.URI /** Internal utility which represents an http client to the AdminHttpInterface of the [[EmbeddedTwitterServer]] */ private[twitter] abstract class AdminHttpClient private[twitter] ( twitterServer: com.twitter.server.TwitterServer, verbose: Boolean = false) { self: EmbeddedTwitterServer => /* Public */ final lazy val httpAdminClient: EmbeddedHttpClient = { start() val client = new EmbeddedHttpClient("httpAdminClient", httpAdminPort(), disableLogging) .withDefaultHeaders(() => defaultRequestHeaders) .withStreamResponses(streamResponse) closeOnExit { if (isStarted) { Closable.make { deadline => info(s"Closing embedded http client: ${client.label}", disableLogging) client.close(deadline) } } else Closable.nop } client } def httpGetAdmin( path: String, accept: String = null, headers: Map[String, String] = Map(), suppress: Boolean = false, andExpect: Status = Status.Ok, withLocation: String = null, withBody: String = null ): Response = { httpAdmin(path, accept, headers, suppress, andExpect, withLocation, withBody, Method.Get) } def httpPostAdmin( path: String, accept: String = null, headers: Map[String, String] = Map(), suppress: Boolean = false, andExpect: Status = Status.Ok, withLocation: String = null, withBody: String = null ): Response = { httpAdmin(path, accept, headers, suppress, andExpect, withLocation, withBody, Method.Post) } def healthResponse(expectedHealthy: Boolean = true): Try[Response] = { val expectedBody = if (expectedHealthy) "OK\\n" else "" Try { httpGetAdmin("/health", andExpect = Status.Ok, withBody = expectedBody, suppress = !verbose) } } def adminHttpServerRoutes: Seq[AdminHttpServer.Route] = { twitterServer.routes } /* Private */ private def httpAdmin( path: String, accept: String = null, headers: Map[String, String] = Map(), suppress: Boolean = false, andExpect: Status = Status.Ok, withLocation: String = null, withBody: String = null, method: Method ): Response = { val request = createApiRequest(path, method) httpAdminClient( request, addAcceptHeader(accept, headers), suppress, andExpect, withLocation, withBody) } /* Protected */ protected def addAcceptHeader( accept: String, headers: Map[String, String] ): Map[String, String] = { if (accept != null) headers + (Fields.Accept -> accept.toString) else headers } protected def createApiRequest(path: String, method: Method = Method.Get): Request = { val pathToUse = if (path.startsWith("http")) URI.create(path).getPath else path Request(method, pathToUse) } }
twitter/finatra
inject/inject-server/src/test/scala/com/twitter/inject/server/AdminHttpClient.scala
Scala
apache-2.0
3,052
package org.jetbrains.plugins.scala package lang.overrideImplement import com.intellij.testFramework.builders.JavaModuleFixtureBuilder import com.intellij.testFramework.fixtures.{JavaCodeInsightFixtureTestCase, ModuleFixture} import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings import org.jetbrains.plugins.scala.overrideImplement.ScalaOIUtil import org.jetbrains.plugins.scala.util.TypeAnnotationSettings import org.junit.Assert.assertEquals /** * Nikolay.Tropin * 12/18/13 */ class FromJavaOverrideImplementTest extends JavaCodeInsightFixtureTestCase { protected override def tuneFixture(moduleBuilder: JavaModuleFixtureBuilder[_]): Unit = { moduleBuilder.setMockJdkLevel(JavaModuleFixtureBuilder.MockJdkLevel.jdk15) // TODO: the path returned from IdeaTestUtil.getMockJdk14Path is invalid in the scala plugin // because the mock-jdk14 does only exists in the intellij-community source // we either have to copy the mock directory into our repo as well or just not add it at all //moduleBuilder.addJdk(IdeaTestUtil.getMockJdk14Path.getPath) } def runTest(methodName: String, javaText: String, scalaText: String, expectedText: String, isImplement: Boolean, defaultSettings: ScalaCodeStyleSettings = TypeAnnotationSettings.alwaysAddType(ScalaCodeStyleSettings.getInstance(getProject))): Unit = { myFixture.addFileToProject("JavaDummy.java", javaText.stripMargin.trim) val oldSettings = ScalaCodeStyleSettings.getInstance(getProject).clone() val scalaFile = myFixture.configureByText("ScalaDummy.scala", scalaText.replace("\\r", "").stripMargin.trim) TypeAnnotationSettings.set(getProject, defaultSettings) ScalaOIUtil.invokeOverrideImplement(scalaFile, isImplement, methodName)(getProject, myFixture.getEditor) TypeAnnotationSettings.set(getProject, oldSettings.asInstanceOf[ScalaCodeStyleSettings]) assertEquals(expectedText.replace("\\r", "").stripMargin.trim, scalaFile.getText.stripMargin.trim) } def testDefaultImplementations(): Unit = { val javaText = """ |public interface JavaDummy { | default int foo() { | return 1; | } |} """ val scalaText = """ |class Child extends JavaDummy { | <caret> |} """ val expectedText = """ |class Child extends JavaDummy { | override def foo(): Int = super.foo() |} """ runTest("foo", javaText, scalaText, expectedText, isImplement = false) } def testVarargImplement(): Unit = { val javaText = """ |public abstract class JavaDummy { | public abstract void vararg(int... args); |} """ val scalaText = """ |class Child extends JavaDummy { | <caret> |} """ val expectedText = """ |class Child extends JavaDummy { | def vararg(args: Int*): Unit = ??? |} """ runTest("vararg", javaText, scalaText, expectedText, isImplement = true) } def testVarargOverride(): Unit = { val javaText = """ |public class JavaDummy { | public void vararg(int... args) {} |} """ val scalaText = """ |class Child extends JavaDummy { | <caret> |} """ val expectedText = """ |class Child extends JavaDummy { | override def vararg(args: Int*): Unit = super.vararg(args: _*) |} """ runTest("vararg", javaText, scalaText, expectedText, isImplement = false) } def testKeywordNames(): Unit = { val javaText = """ |public class JavaDummy { | public void def(int val) {} |} """ val scalaText = """ |class Child extends JavaDummy { | <caret> |} """ val expectedText = """ |class Child extends JavaDummy { | override def `def`(`val`: Int) = super.`def`(`val`) |} """ val settings = TypeAnnotationSettings.alwaysAddType(ScalaCodeStyleSettings.getInstance(getProject)) runTest("def", javaText, scalaText, expectedText, isImplement = false, defaultSettings = TypeAnnotationSettings.noTypeAnnotationForPublic(settings)) } def testWithOverrideAnnotation(): Unit = { val javaText = """ |public class JavaDummy { | public void method(int number) {} | public static class Inner extends JavaDummy { | @Override | public void method(int number) { | super.method(number); | } | } |} """ val scalaText = """ |class Child extends JavaDummy.Inner { | <caret> |} """ val expected = """ |class Child extends JavaDummy.Inner { | override def method(number: Int): Unit = super.method(number) |} """ runTest("method", javaText, scalaText, expected, isImplement = false) } def testWithoutOverrideAnnotation(): Unit = { val javaText = """ |public class JavaDummy { | public void method(int number) {} | public static class Inner extends JavaDummy { | public void method(int number) { | super.method(number); | } | } |} """ val scalaText = """ |class Child extends JavaDummy.Inner { | <caret> |} """ val expected = """ |class Child extends JavaDummy.Inner { | override def method(number: Int): Unit = super.method(number) |} """ runTest("method", javaText, scalaText, expected, isImplement = false) } def testSimpleGenerics(): Unit = { val javaText = """ |public class JavaDummy<T> { | public T method(T arg) { | return arg; | } |} """ val scalaText = """ |class Child extends JavaDummy[Int] { | <caret> |} """ val expected = """ |class Child extends JavaDummy[Int] { | override def method(arg: Int): Int = super.method(arg) |} """ runTest("method", javaText, scalaText, expected, isImplement = false) } def testSimpleGenerics2(): Unit = { val javaText = """ |public class JavaDummy<T> { | public T method(T arg) { | return arg; | } |} """ val scalaText = """ |class Child[S] extends JavaDummy[S] { | <caret> |} """ val expected = """ |class Child[S] extends JavaDummy[S] { | override def method(arg: S): S = super.method(arg) |} """ runTest("method", javaText, scalaText, expected, isImplement = false) } def testGenerics(): Unit = { val javaText = """ |public class JavaDummy<T, S> { | public T method(JavaDummy<? extends T, ? super S> arg) { | return null; | } |} """ val scalaText = """ |class Child extends JavaDummy[Int, Boolean] { | <caret> |} """ val expectedText = """ |class Child extends JavaDummy[Int, Boolean] { | override def method(arg: JavaDummy[_ <: Int, _ >: Boolean]): Int = super.method(arg) |} """ runTest("method", javaText, scalaText, expectedText, isImplement = false) } def testTypeParameter(): Unit = { val javaText = """ |public class JavaDummy<T> { | public <S extends JavaDummy<T> & DummyInterface<T>> int method(int arg) { | return 0; | } | | public static interface DummyInterface<S> {} |} """ val scalaText = """ |class Child extends JavaDummy[Int] { | <caret> |} """ val expectedText = """ |class Child extends JavaDummy[Int] { | override def method[S <: JavaDummy[Int] with JavaDummy.DummyInterface[Int]](arg: Int): Int = super.method(arg) |} """ runTest("method", javaText, scalaText, expectedText, isImplement = false) } def testQueryLikeMethod(): Unit = { val javaText = """ |public class JavaDummy<T> { | public int getValue() {return 0;} |} """ val scalaText = """ |class Child extends JavaDummy[Int] { | <caret> |} """ val expectedText = """ |class Child extends JavaDummy[Int] { | override def getValue: Int = super.getValue |} """ runTest("getValue", javaText, scalaText, expectedText, isImplement = false) } def testMap(): Unit = { val javaText = { """ |public interface Map<K,V> | void putAll(Map<? extends K, ? extends V> m); |} """ } val scalaText = """ |class ExtendsMap[K, V] extends Map[K, V] { | <caret> |} """ val expectedText = """ |class ExtendsMap[K, V] extends Map[K, V] { | def putAll(m: Map[_ <: K, _ <: V]): Unit = ??? |} """ val methodName: String = "putAll" val isImplement = true runTest(methodName, javaText, scalaText, expectedText, isImplement) } def testSCL14206(): Unit = { val java = "public interface Solution { long find(int a[], int n); }" val scala = """ |class Impl extends Solution { | <caret> |} """.stripMargin val expected = """ |class Impl extends Solution { | def find(a: Array[Int], n: Int): Long = ??? |} """.stripMargin runTest("find", java, scala, expected, isImplement = true) } def testParameterTypeWithWildcard(): Unit = { val java = """ |public interface A<T extends Foo> { | void foo(A<?> a) |} """.stripMargin val scala = """ |class B extends A[Foo] { | <caret> |} | |trait Foo """.stripMargin val expected = """ |class B extends A[Foo] { | def foo(a: A[_]): Unit = ??? |} | |trait Foo """.stripMargin runTest("foo", java, scala, expected, isImplement = true) } def testRawParameterType(): Unit = { val java = """ |public interface A<T extends Foo> { | void foo(A a) |} """.stripMargin val scala = """ |class B extends A[Foo] { | <caret> |} | |trait Foo """.stripMargin val expected = """ |class B extends A[Foo] { | def foo(a: A[_ <: Foo]): Unit = ??? |} | |trait Foo """.stripMargin runTest("foo", java, scala, expected, isImplement = true) } }
JetBrains/intellij-scala
scala/scala-impl/test/org/jetbrains/plugins/scala/lang/overrideImplement/FromJavaOverrideImplementTest.scala
Scala
apache-2.0
11,016
object input_in_range1 { def main(args: Array[String]) { // Put code here } }
LoyolaChicagoBooks/introcs-scala-examples
input_in_range1/input_in_range1.scala
Scala
gpl-3.0
86
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.controller import kafka.admin.AdminUtils import kafka.api.LeaderAndIsr import kafka.common.{LeaderElectionNotNeededException, NoReplicaOnlineException, StateChangeFailedException, TopicAndPartition} import kafka.log.LogConfig import kafka.server.{ConfigType, KafkaConfig} import kafka.utils.Logging trait PartitionLeaderSelector { /** * @param topicAndPartition The topic and partition whose leader needs to be elected * @param currentLeaderAndIsr The current leader and isr of input partition read from zookeeper * @throws NoReplicaOnlineException If no replica in the assigned replicas list is alive * @return The leader and isr request, with the newly selected leader and isr, and the set of replicas to receive * the LeaderAndIsrRequest. */ def selectLeader(topicAndPartition: TopicAndPartition, currentLeaderAndIsr: LeaderAndIsr): (LeaderAndIsr, Seq[Int]) } /** * Select the new leader, new isr and receiving replicas (for the LeaderAndIsrRequest): * 1. If at least one broker from the isr is alive, it picks a broker from the live isr as the new leader and the live * isr as the new isr. * 2. Else, if unclean leader election for the topic is disabled, it throws a NoReplicaOnlineException. * 3. Else, it picks some alive broker from the assigned replica list as the new leader and the new isr. * 4. If no broker in the assigned replica list is alive, it throws a NoReplicaOnlineException * Replicas to receive LeaderAndIsr request = live assigned replicas * Once the leader is successfully registered in zookeeper, it updates the allLeaders cache */ class OfflinePartitionLeaderSelector(controllerContext: ControllerContext, config: KafkaConfig) extends PartitionLeaderSelector with Logging { logIdent = "[OfflinePartitionLeaderSelector]: " def selectLeader(topicAndPartition: TopicAndPartition, currentLeaderAndIsr: LeaderAndIsr): (LeaderAndIsr, Seq[Int]) = { controllerContext.partitionReplicaAssignment.get(topicAndPartition) match { case Some(assignedReplicas) => val liveAssignedReplicas = assignedReplicas.filter(r => controllerContext.isReplicaOnline(r, topicAndPartition)) val liveBrokersInIsr = currentLeaderAndIsr.isr.filter(r => controllerContext.isReplicaOnline(r, topicAndPartition)) val newLeaderAndIsr = if (liveBrokersInIsr.isEmpty) { // Prior to electing an unclean (i.e. non-ISR) leader, ensure that doing so is not disallowed by the configuration // for unclean leader election. if (!LogConfig.fromProps(config.originals, AdminUtils.fetchEntityConfig(controllerContext.zkUtils, ConfigType.Topic, topicAndPartition.topic)).uncleanLeaderElectionEnable) { throw new NoReplicaOnlineException( s"No replica in ISR for partition $topicAndPartition is alive. Live brokers are: [${controllerContext.liveBrokerIds}], " + s"ISR brokers are: [${currentLeaderAndIsr.isr.mkString(",")}]" ) } debug(s"No broker in ISR is alive for $topicAndPartition. Pick the leader from the alive assigned " + s"replicas: ${liveAssignedReplicas.mkString(",")}") if (liveAssignedReplicas.isEmpty) { throw new NoReplicaOnlineException(s"No replica for partition $topicAndPartition is alive. Live " + s"brokers are: [${controllerContext.liveBrokerIds}]. Assigned replicas are: [$assignedReplicas].") } else { controllerContext.stats.uncleanLeaderElectionRate.mark() val newLeader = liveAssignedReplicas.head warn(s"No broker in ISR is alive for $topicAndPartition. Elect leader $newLeader from live " + s"brokers ${liveAssignedReplicas.mkString(",")}. There's potential data loss.") currentLeaderAndIsr.newLeaderAndIsr(newLeader, List(newLeader)) } } else { val liveReplicasInIsr = liveAssignedReplicas.filter(r => liveBrokersInIsr.contains(r)) val newLeader = liveReplicasInIsr.head debug(s"Some broker in ISR is alive for $topicAndPartition. Select $newLeader from ISR " + s"${liveBrokersInIsr.mkString(",")} to be the leader.") currentLeaderAndIsr.newLeaderAndIsr(newLeader, liveBrokersInIsr) } info(s"Selected new leader and ISR $newLeaderAndIsr for offline partition $topicAndPartition") (newLeaderAndIsr, liveAssignedReplicas) case None => throw new NoReplicaOnlineException(s"Partition $topicAndPartition doesn't have replicas assigned to it") } } } /** * New leader = a live in-sync reassigned replica * New isr = current isr * Replicas to receive LeaderAndIsr request = reassigned replicas */ class ReassignedPartitionLeaderSelector(controllerContext: ControllerContext) extends PartitionLeaderSelector with Logging { logIdent = "[ReassignedPartitionLeaderSelector]: " /** * The reassigned replicas are already in the ISR when selectLeader is called. */ def selectLeader(topicAndPartition: TopicAndPartition, currentLeaderAndIsr: LeaderAndIsr): (LeaderAndIsr, Seq[Int]) = { val reassignedInSyncReplicas = controllerContext.partitionsBeingReassigned(topicAndPartition).newReplicas val newLeaderOpt = reassignedInSyncReplicas.find { r => controllerContext.isReplicaOnline(r, topicAndPartition) && currentLeaderAndIsr.isr.contains(r) } newLeaderOpt match { case Some(newLeader) => (currentLeaderAndIsr.newLeader(newLeader), reassignedInSyncReplicas) case None => val errorMessage = if (reassignedInSyncReplicas.isEmpty) { s"List of reassigned replicas for partition $topicAndPartition is empty. Current leader and ISR: " + s"[$currentLeaderAndIsr]" } else { s"None of the reassigned replicas for partition $topicAndPartition are in-sync with the leader. " + s"Current leader and ISR: [$currentLeaderAndIsr]" } throw new NoReplicaOnlineException(errorMessage) } } } /** * New leader = preferred (first assigned) replica (if in isr and alive); * New isr = current isr; * Replicas to receive LeaderAndIsr request = assigned replicas */ class PreferredReplicaPartitionLeaderSelector(controllerContext: ControllerContext) extends PartitionLeaderSelector with Logging { logIdent = "[PreferredReplicaPartitionLeaderSelector]: " def selectLeader(topicAndPartition: TopicAndPartition, currentLeaderAndIsr: LeaderAndIsr): (LeaderAndIsr, Seq[Int]) = { val assignedReplicas = controllerContext.partitionReplicaAssignment(topicAndPartition) val preferredReplica = assignedReplicas.head // check if preferred replica is the current leader val currentLeader = controllerContext.partitionLeadershipInfo(topicAndPartition).leaderAndIsr.leader if (currentLeader == preferredReplica) { throw new LeaderElectionNotNeededException("Preferred replica %d is already the current leader for partition %s" .format(preferredReplica, topicAndPartition)) } else { info("Current leader %d for partition %s is not the preferred replica.".format(currentLeader, topicAndPartition) + " Triggering preferred replica leader election") // check if preferred replica is not the current leader and is alive and in the isr if (controllerContext.isReplicaOnline(preferredReplica, topicAndPartition) && currentLeaderAndIsr.isr.contains(preferredReplica)) { val newLeaderAndIsr = currentLeaderAndIsr.newLeader(preferredReplica) (newLeaderAndIsr, assignedReplicas) } else { throw new StateChangeFailedException(s"Preferred replica $preferredReplica for partition $topicAndPartition " + s"is either not alive or not in the isr. Current leader and ISR: [$currentLeaderAndIsr]") } } } } /** * New leader = replica in isr that's not being shutdown; * New isr = current isr - shutdown replica; * Replicas to receive LeaderAndIsr request = live assigned replicas */ class ControlledShutdownLeaderSelector(controllerContext: ControllerContext) extends PartitionLeaderSelector with Logging { logIdent = "[ControlledShutdownLeaderSelector]: " def selectLeader(topicAndPartition: TopicAndPartition, currentLeaderAndIsr: LeaderAndIsr): (LeaderAndIsr, Seq[Int]) = { val currentIsr = currentLeaderAndIsr.isr val assignedReplicas = controllerContext.partitionReplicaAssignment(topicAndPartition) val liveAssignedReplicas = assignedReplicas.filter(r => controllerContext.isReplicaOnline(r, topicAndPartition, true)) val newIsr = currentIsr.filter(brokerId => !controllerContext.shuttingDownBrokerIds.contains(brokerId)) liveAssignedReplicas.find(newIsr.contains) match { case Some(newLeader) => debug(s"Partition $topicAndPartition : current leader = ${currentLeaderAndIsr.leader}, new leader = $newLeader") val newLeaderAndIsr = currentLeaderAndIsr.newLeaderAndIsr(newLeader, newIsr) (newLeaderAndIsr, liveAssignedReplicas) case None => throw new StateChangeFailedException(s"No other replicas in ISR ${currentIsr.mkString(",")} for $topicAndPartition " + s"besides shutting down brokers ${controllerContext.shuttingDownBrokerIds.mkString(",")}") } } } /** * Essentially does nothing. Returns the current leader and ISR, and the current * set of replicas assigned to a given topic/partition. */ class NoOpLeaderSelector(controllerContext: ControllerContext) extends PartitionLeaderSelector with Logging { logIdent = "[NoOpLeaderSelector]: " def selectLeader(topicAndPartition: TopicAndPartition, currentLeaderAndIsr: LeaderAndIsr): (LeaderAndIsr, Seq[Int]) = { warn("I should never have been asked to perform leader election, returning the current LeaderAndIsr and replica assignment.") (currentLeaderAndIsr, controllerContext.partitionReplicaAssignment(topicAndPartition)) } }
ErikKringen/kafka
core/src/main/scala/kafka/controller/PartitionLeaderSelector.scala
Scala
apache-2.0
10,943
/* * Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com> */ package scalaguide.json import play.api.data.validation.ValidationError import org.junit.runner.RunWith import org.specs2.mutable.Specification import org.specs2.runner.JUnitRunner @RunWith(classOf[JUnitRunner]) class ScalaJsonCombinatorsSpec extends Specification { val sampleJson = { //#sample-json import play.api.libs.json._ val json: JsValue = Json.parse(""" { "name" : "Watership Down", "location" : { "lat" : 51.235685, "long" : -1.309197 }, "residents" : [ { "name" : "Fiver", "age" : 4, "role" : null }, { "name" : "Bigwig", "age" : 6, "role" : "Owsla" } ] } """) //#sample-json json } object SampleModel { //#sample-model case class Location(lat: Double, long: Double) case class Resident(name: String, age: Int, role: Option[String]) case class Place(name: String, location: Location, residents: Seq[Resident]) //#sample-model } "Scala JSON" should { "allow using JsPath" in { //#jspath-define import play.api.libs.json._ //###replace: val json = { ... } val json: JsValue = sampleJson // Simple path val latPath = JsPath \ "location" \ "lat" // Recursive path val namesPath = JsPath \\ "name" // Indexed path val firstResidentPath = (JsPath \ "residents")(0) //#jspath-define //#jspath-define-alias val longPath = __ \ "location" \ "long" //#jspath-define-alias //#jspath-traverse val lat: List[JsValue] = latPath(json) // List(JsNumber(51.235685)) //#jspath-traverse //val name = (JsPath \ "name").read[String] and (JsPath \ "location").read[Int] latPath.toString === "/location/lat" namesPath.toString === "//name" firstResidentPath.toString === "/residents(0)" } "allow creating simple Reads" in { //#reads-imports import play.api.libs.json._ // JSON library import play.api.libs.json.Reads._ // Custom validation helpers import play.api.libs.functional.syntax._ // Combinator syntax //#reads-imports //###replace: val json = { ... } val json: JsValue = sampleJson //#reads-simple val nameReads: Reads[String] = (JsPath \ "name").read[String] //#reads-simple json.validate(nameReads) must beLike {case x: JsSuccess[String] => x.get === "Watership Down"} } "allow creating complex Reads" in { import SampleModel._ import play.api.libs.json._ import play.api.libs.functional.syntax._ //###replace: val json = { ... } val json: JsValue = sampleJson //#reads-complex-builder val locationReadsBuilder = (JsPath \ "lat").read[Double] and (JsPath \ "long").read[Double] //#reads-complex-builder //#reads-complex-buildertoreads implicit val locationReads = locationReadsBuilder.apply(Location.apply _) //#reads-complex-buildertoreads val locationResult = (json \ "location").validate[Location] locationResult must beLike {case x: JsSuccess[Location] => x.get.lat === 51.235685} } "allow creating complex Reads in a single statement" in { import SampleModel._ import play.api.libs.json._ import play.api.libs.functional.syntax._ //###replace: val json = { ... } val json: JsValue = sampleJson //#reads-complex-statement implicit val locationReads: Reads[Location] = ( (JsPath \ "lat").read[Double] and (JsPath \ "long").read[Double] )(Location.apply _) //#reads-complex-statement val locationResult = (json \ "location").validate[Location] locationResult must beLike {case x: JsSuccess[Location] => x.get.lat === 51.235685} } "allow validation with Reads" in { import SampleModel._ import play.api.libs.json._ import play.api.libs.json.Reads._ import play.api.libs.functional.syntax._ //#reads-validation-simple //###replace: val json = { ... } val json: JsValue = sampleJson val nameReads: Reads[String] = (JsPath \ "name").read[String] val nameResult: JsResult[String] = json.validate[String](nameReads) nameResult match { case s: JsSuccess[String] => println("Name: " + s.get) case e: JsError => println("Errors: " + JsError.toFlatJson(e).toString()) } //#reads-validation-simple nameResult must beLike {case x: JsSuccess[String] => x.get === "Watership Down"} //#reads-validation-custom val improvedNameReads = (JsPath \ "name").read[String](minLength[String](2)) //#reads-validation-custom json.validate[String](improvedNameReads) must beLike {case x: JsSuccess[String] => x.get === "Watership Down"} } "allow creating Reads for model" in { import SampleModel._ //#reads-model import play.api.libs.json._ import play.api.libs.json.Reads._ import play.api.libs.functional.syntax._ implicit val locationReads: Reads[Location] = ( (JsPath \ "lat").read[Double](min(-90.0) keepAnd max(90.0)) and (JsPath \ "long").read[Double](min(-180.0) keepAnd max(180.0)) )(Location.apply _) implicit val residentReads: Reads[Resident] = ( (JsPath \ "name").read[String](minLength[String](2)) and (JsPath \ "age").read[Int](min(0) keepAnd max(150)) and (JsPath \ "role").readNullable[String] )(Resident.apply _) implicit val placeReads: Reads[Place] = ( (JsPath \ "name").read[String](minLength[String](2)) and (JsPath \ "location").read[Location] and (JsPath \ "residents").read[Seq[Resident]] )(Place.apply _) //###replace: val json = { ... } val json: JsValue = sampleJson json.validate[Place] match { case s: JsSuccess[Place] => { val place: Place = s.get // do something with place } case e: JsError => { // error handling flow } } //#reads-model json.validate[Place] must beLike {case x: JsSuccess[Place] => x.get.name === "Watership Down"} } "allow creating Writes for model" in { import SampleModel._ //#writes-model import play.api.libs.json._ import play.api.libs.functional.syntax._ implicit val locationWrites: Writes[Location] = ( (JsPath \ "lat").write[Double] and (JsPath \ "long").write[Double] )(unlift(Location.unapply)) implicit val residentWrites: Writes[Resident] = ( (JsPath \ "name").write[String] and (JsPath \ "age").write[Int] and (JsPath \ "role").writeNullable[String] )(unlift(Resident.unapply)) implicit val placeWrites: Writes[Place] = ( (JsPath \ "name").write[String] and (JsPath \ "location").write[Location] and (JsPath \ "residents").write[Seq[Resident]] )(unlift(Place.unapply)) val place = Place( "Watership Down", Location(51.235685, -1.309197), Seq( Resident("Fiver", 4, None), Resident("Bigwig", 6, Some("Owsla")) ) ) val json = Json.toJson(place) //#writes-model val some = (JsPath \ "lat").write[Double] and (JsPath \ "long").write[Double] val placeSome = Place.unapply(place) json \ "name" === JsString("Watership Down") } "allow creating Reads/Writes for recursive types" in { import play.api.libs.json._ import play.api.libs.json.Reads._ import play.api.libs.functional.syntax._ //#reads-writes-recursive case class User(name: String, friends: Seq[User]) implicit lazy val userReads: Reads[User] = ( (__ \ "name").read[String] and (__ \ "friends").lazyRead(Reads.seq[User](userReads)) )(User) implicit lazy val userWrites: Writes[User] = ( (__ \ "name").write[String] and (__ \ "friends").lazyWrite(Writes.seq[User](userWrites)) )(unlift(User.unapply)) //#reads-writes-recursive // Use Reads for JSON -> model val json: JsValue = Json.parse(""" { "name" : "Fiver", "friends" : [ { "name" : "Bigwig", "friends" : [] }, { "name" : "Hazel", "friends" : [] } ] } """) val userResult = json.validate[User] userResult must beLike {case x: JsSuccess[User] => x.get.name === "Fiver"} // Use Writes for model -> JSON val jsonFromUser = Json.toJson(userResult.get) (jsonFromUser \ "name").as[String] === "Fiver" } "allow creating Format from components" in { import SampleModel._ import play.api.libs.json._ import play.api.libs.json.Reads._ import play.api.libs.functional.syntax._ //#format-components val locationReads: Reads[Location] = ( (JsPath \ "lat").read[Double](min(-90.0) keepAnd max(90.0)) and (JsPath \ "long").read[Double](min(-180.0) keepAnd max(180.0)) )(Location.apply _) val locationWrites: Writes[Location] = ( (JsPath \ "lat").write[Double] and (JsPath \ "long").write[Double] )(unlift(Location.unapply)) implicit val locationFormat: Format[Location] = Format(locationReads, locationWrites) //#format-components // Use Reads for JSON -> model val json: JsValue = Json.parse(""" { "lat" : 51.235685, "long" : -1.309197 } """) val location = json.validate[Location].get location === Location(51.235685,-1.309197) // Use Writes for model -> JSON val jsonFromLocation = Json.toJson(location) (jsonFromLocation \ "lat").as[Double] === 51.235685 } "allow creating Format from combinators" in { import SampleModel._ import play.api.libs.json._ import play.api.libs.json.Reads._ import play.api.libs.functional.syntax._ //#format-combinators implicit val locationFormat: Format[Location] = ( (JsPath \ "lat").format[Double](min(-90.0) keepAnd max(90.0)) and (JsPath \ "long").format[Double](min(-180.0) keepAnd max(180.0)) )(Location.apply, unlift(Location.unapply)) //#format-combinators // Use Reads for JSON -> model val json: JsValue = Json.parse(""" { "lat" : 51.235685, "long" : -1.309197 } """) val location = json.validate[Location].get location === Location(51.235685,-1.309197) // Use Writes for model -> JSON val jsonFromLocation = Json.toJson(location) (jsonFromLocation \ "lat").as[Double] === 51.235685 } } }
jyotikamboj/container
pf-documentation/manual/working/scalaGuide/main/json/code/ScalaJsonCombinatorsSpec.scala
Scala
mit
11,328
package ecommerce.shipping.view import ecommerce.shipping.ShipmentCreated import ecommerce.shipping.ShippingStatus.Waiting import pl.newicom.dddd.messaging.event.OfficeEventMessage import pl.newicom.dddd.view.sql.Projection import pl.newicom.dddd.view.sql.Projection.ProjectionAction import slick.dbio.Effect.Write import scala.concurrent.ExecutionContext class ShipmentProjection(dao: ShipmentDao)(implicit ex: ExecutionContext) extends Projection { override def consume(eventMessage: OfficeEventMessage): ProjectionAction[Write] = { eventMessage.event match { case ShipmentCreated(id, orderId) => dao.createOrUpdate(ShipmentView(id.value, orderId, Waiting)) } } }
pawelkaczor/ddd-leaven-akka-v2
shipping/read-back/src/main/scala/ecommerce/shipping/view/ShipmentProjection.scala
Scala
mit
695
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.eagle.datastream trait FlatMapper[T, R] extends Serializable{ def flatMap(input : T, collector : Collector[R]) }
eBay/Eagle
eagle-core/eagle-data-process/eagle-stream-process-base/src/main/scala/org/apache/eagle/datastream/FlatMapper.scala
Scala
apache-2.0
935
/** * Copyright 2016 Gianluca Amato <gianluca.amato@unich.it> * * This file is part of JANDOM: JVM-based Analyzer for Numerical DOMains * JANDOM is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * JANDOM is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of a * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with JANDOM. If not, see <http://www.gnu.org/licenses/>. */ package it.unich.jandom.targets.parameters import org.scalatest.FunSuite import it.unich.jandom.domains.numerical.BoxDoubleDomain import it.unich.jandom.targets.parameters.WideningSpecs._ /** * A test for the Widening hierarchy. * @author Gianluca Amato <gianluca.amato@unich.it> * */ class WideningsTest extends FunSuite { val BoxDouble = BoxDoubleDomain() test("default widening for boxes") { val d1 = BoxDouble(Array(0), Array(1)) val wd = DefaultWidening.get(BoxDouble) val d2 = BoxDouble(Array(1), Array(2)) val d3 = wd(d1, d2) assertResult(BoxDouble(Array(0), Array(Double.PositiveInfinity))) { d3 } } test("delayed widening for boxes") { val d1 = BoxDouble(Array(0), Array(1)) val wd = DelayedWidening(DefaultWidening, 2).get(BoxDouble) val d2 = BoxDouble(Array(1), Array(2)) val d3 = wd(d1, d2) assertResult(BoxDouble(Array(0), Array(2))) { d3 } val d4 = BoxDouble(Array(2), Array(3)) val d5 = wd(d3, d4) assertResult(BoxDouble(Array(0), Array(3))) { d5 } val d6 = BoxDouble(Array(3), Array(4)) val d7 = wd(d5, d6) assertResult(BoxDouble(Array(0), Array(Double.PositiveInfinity))) { d7 } } test("delayed widening with 0 delay") { val d1 = BoxDouble(Array(0), Array(1)) val wd = DelayedWidening(DefaultWidening, 0).get(BoxDouble) val d2 = BoxDouble(Array(1), Array(2)) val d3 = wd(d1, d2) assertResult(BoxDouble(Array(0), Array(Double.PositiveInfinity))) { d3 } } test("named widenings") { val d1 = BoxDouble(Array(0), Array(1)) val d2 = BoxDouble(Array(1), Array(2)) val wd = NamedWidening("default").get(BoxDouble) assertResult(BoxDouble(Array(0), Array(Double.PositiveInfinity))) { wd(d1,d2) } val wd2: WideningSpec = "default" assertResult(BoxDouble(Array(0), Array(Double.PositiveInfinity))) { wd2.get(BoxDouble)(d1,d2) } } }
amato-gianluca/Jandom
core/src/test/scala/it/unich/jandom/targets/parameters/WideningsTest.scala
Scala
lgpl-3.0
2,649
package io.flow.dependency.api.lib import db.ProjectBinaryForm import io.flow.dependency.v0.models.{BinaryType, Project} import io.flow.util.{Tag, Version} import scala.concurrent.{ExecutionContext, Future} case class Dependencies( binaries: Option[Seq[ProjectBinaryForm]] = None, libraries: Option[Seq[Artifact]] = None, resolverUris: Option[Seq[String]] = None, plugins: Option[Seq[Artifact]] = None ) { def librariesAndPlugins: Option[Seq[Artifact]] = { (libraries, plugins) match { case (None, None) => None case (Some(lib), None) => Some(lib) case (None, Some(plugins)) => Some(plugins) case (Some(lib), Some(plugins)) => Some(lib ++ plugins) } } def crossBuildVersion(): Map[BinaryType, Version] = { binaries match { case None => Map() case Some(bins) => { bins.sortBy { b => Version(b.version) }.map { bin => bin.name -> DependencyHelper.crossBuildVersion(bin.name, bin.version) }.toMap } } } } private[lib] object DependencyHelper { def crossBuildVersion(name: BinaryType, version: String): Version = { val versionObject = Version(version) name match { case BinaryType.Scala => { versionObject.tags.head match { case Tag.Semver(major, minor, _, _) => { // This is most common. We just want major and minor // version - e.g. 2.11.7 becomes 2.11. Version(s"${major}.${minor}", Seq(Tag.Semver(major, minor, 0))) } case _ => versionObject } } case BinaryType.Sbt => { // Get the binary-compatible version of sbt. Can be found by running `sbt sbtBinaryVersion` versionObject.tags.collectFirst { case Tag.Semver(1, _, _, _) => Version("1.0") case Tag.Semver(0, 13, _, _) => Version("0.13") }.getOrElse(versionObject) } case BinaryType.UNDEFINED(_) => { versionObject } } } } trait DependencyProvider { /** * Returns the dependencies for this project. */ def dependencies(project: Project)(implicit ec: ExecutionContext): Future[Dependencies] }
flowcommerce/dependency
api/app/lib/DependencyProvider.scala
Scala
mit
2,155
package views.acquire import composition.TestHarness import helpers.acquire.CookieFactoryForUISpecs import models.EnterAddressManuallyFormModel.EnterAddressManuallyCacheKey import org.openqa.selenium.{By, WebElement, WebDriver} import org.scalatest.selenium.WebBrowser.click import org.scalatest.selenium.WebBrowser.currentUrl import org.scalatest.selenium.WebBrowser.go import org.scalatest.selenium.WebBrowser.pageSource import org.scalatest.selenium.WebBrowser.pageTitle import pages.acquire.BusinessChooseYourAddressPage import pages.acquire.BeforeYouStartPage import pages.acquire.EnterAddressManuallyPage import pages.acquire.VehicleLookupPage import pages.acquire.SetupTradeDetailsPage import pages.acquire.BusinessChooseYourAddressPage.{back, sadPath, manualAddress, happyPath} import pages.common.ErrorPanel import pages.common.Feedback.AcquireEmailFeedbackLink import SetupTradeDetailsPage.PostcodeValid import uk.gov.dvla.vehicles.presentation.common.filters.CsrfPreventionAction import uk.gov.dvla.vehicles.presentation.common.testhelpers.{UiSpec, UiTag} class BusinessChooseYourAddressIntegrationSpec extends UiSpec with TestHarness { "business choose your address page" should { "display the page" taggedAs UiTag in new WebBrowserForSelenium { go to BeforeYouStartPage cacheSetup() go to BusinessChooseYourAddressPage pageTitle should equal(BusinessChooseYourAddressPage.title) } "contain feedback email facility with appropriate subject" taggedAs UiTag in new WebBrowserForSelenium { go to BeforeYouStartPage cacheSetup() go to BusinessChooseYourAddressPage pageSource.contains(AcquireEmailFeedbackLink) should equal(true) } "redirect when no traderBusinessName is cached" taggedAs UiTag in new WebBrowserForSelenium { go to BusinessChooseYourAddressPage pageTitle should equal(SetupTradeDetailsPage.title) } "display appropriate content when address service returns addresses" taggedAs UiTag in new WebBrowserForSelenium { SetupTradeDetailsPage.happyPath() pageSource.contains("No addresses found for that postcode") should equal(false) // Does not contain message pageSource should include( """<a id="enterAddressManuallyButton" href""") } "display the postcode entered in the previous page" taggedAs UiTag in new WebBrowserForSelenium { SetupTradeDetailsPage.happyPath() pageSource.contains(PostcodeValid.toUpperCase) should equal(true) } "display expected addresses in dropdown when address service returns addresses" taggedAs UiTag in new WebBrowserForSelenium { SetupTradeDetailsPage.happyPath() pageSource should include( s"presentationProperty stub, 123, property stub, street stub, town stub, area stub, $PostcodeValid" ) pageSource should include( s"presentationProperty stub, 456, property stub, street stub, town stub, area stub, $PostcodeValid" ) pageSource should include( s"presentationProperty stub, 789, property stub, street stub, town stub, area stub, $PostcodeValid" ) } "display appropriate content when address service returns no addresses" taggedAs UiTag in new WebBrowserForSelenium { SetupTradeDetailsPage.submitPostcodeWithoutAddresses pageSource should include("No addresses found for that postcode") // Does not contain the positive message } "manualAddress button that is displayed when addresses have been found" should { "go to the manual address entry page" taggedAs UiTag in new WebBrowserForSelenium { go to BeforeYouStartPage cacheSetup() go to BusinessChooseYourAddressPage click on manualAddress pageTitle should equal(EnterAddressManuallyPage.title) } } "manualAddress button that is displayed when no addresses have been found" should { "go to the manual address entry page" taggedAs UiTag in new WebBrowserForSelenium { SetupTradeDetailsPage.submitPostcodeWithoutAddresses click on manualAddress pageTitle should equal(EnterAddressManuallyPage.title) } } "contain the hidden csrfToken field" taggedAs UiTag in new WebBrowserForSelenium { SetupTradeDetailsPage.happyPath() val csrf: WebElement = webDriver.findElement(By.name(CsrfPreventionAction.TokenName)) csrf.getAttribute("type") should equal("hidden") csrf.getAttribute("name") should equal(uk.gov.dvla.vehicles.presentation.common.filters.CsrfPreventionAction.TokenName) csrf.getAttribute("value").length > 0 should equal(true) } } "back button" should { "display previous page" taggedAs UiTag in new WebBrowserForSelenium { go to BeforeYouStartPage cacheSetup() go to BusinessChooseYourAddressPage click on back pageTitle should equal(SetupTradeDetailsPage.title) currentUrl should equal(SetupTradeDetailsPage.url) } "display previous page with ceg route" taggedAs UiTag in new WebBrowserForSelenium { go to BeforeYouStartPage cacheSetup().withIdentifier("CEG") go to BusinessChooseYourAddressPage click on back pageTitle should equal(SetupTradeDetailsPage.title) currentUrl should equal(SetupTradeDetailsPage.cegUrl) } } "select button" should { "go to the next page when correct data is entered" taggedAs UiTag in new WebBrowserForSelenium { go to BeforeYouStartPage cacheSetup() happyPath pageTitle should equal(VehicleLookupPage.title) } "display validation error messages when addressSelected is not in the list" taggedAs UiTag in new WebBrowserForSelenium { go to BeforeYouStartPage cacheSetup() sadPath ErrorPanel.numberOfErrors should equal(1) } "remove redundant EnterAddressManually cookie (as we are now in an alternate history)" taggedAs UiTag in new PhantomJsByDefault { def cacheSetupVisitedEnterAddressManuallyPage()(implicit webDriver: WebDriver) = CookieFactoryForUISpecs .setupTradeDetails() .enterAddressManually() go to BeforeYouStartPage cacheSetupVisitedEnterAddressManuallyPage() happyPath // Verify the cookies identified by the full set of cache keys have been removed webDriver.manage().getCookieNamed(EnterAddressManuallyCacheKey) should equal(null) } } private def cacheSetup()(implicit webDriver: WebDriver) = CookieFactoryForUISpecs.setupTradeDetails() }
dvla/vehicles-acquire-online
test/views/acquire/BusinessChooseYourAddressIntegrationSpec.scala
Scala
mit
6,527
package blended.security import javax.security.auth.Subject trait BlendedPermissionManager { def permissions(subject : Subject) : BlendedPermissions }
woq-blended/blended
blended.security/jvm/src/main/scala/blended/security/BlendedPermissionManager.scala
Scala
apache-2.0
157
package com.yuvimasory.flashcards import org.scalatest.FunSuite import FlashcardParser._ import FlashcardParser.ComponentParsers._ import StringUtils._ class BackParserTest extends FunSuite { test("`back` rejects empty string") { parseAll(back, Empty) match { case NoSuccess(_, _) => case Success(_, _) => fail } } test("iss11 - `back` eliminates leading and trailing empty lines") { val str = """ hello world """ substring(1) parseAll(back, str) match { case Success(Back(List(one, two, three)), _) => case _ => fail("PARSE ERROR") } } test("`back` doesn't over parse") { val str = """ hello * next front """ substring(1) parseAll(back, str) match { case NoSuccess(_, _) => case _ => fail } } test("`back` complex case") { val str = """ ` hello world ` hello goodbye ` hello world ` """ substring(1) parseAll(back, str) match { case Success(Back(List(one, two, three, four, five, six)), _) => case _ => fail("PARSE ERROR") } } }
ymasory/Flashup
src/test/scala/BackParserTest.scala
Scala
agpl-3.0
1,057
package composition.webserviceclients.vrmretentioneligibility import com.tzavellas.sse.guice.ScalaModule import composition.webserviceclients.vrmretentioneligibility.Helper.createResponse import org.mockito.Matchers.any import org.mockito.Mockito.when import org.scalatest.mock.MockitoSugar import play.api.http.Status.OK import scala.concurrent.Future import uk.gov.dvla.vehicles.presentation.common.clientsidesession.TrackingId import webserviceclients.fakes.VehicleAndKeeperLookupWebServiceConstants.RegistrationNumberValid import webserviceclients.fakes.VrmRetentionEligibilityWebServiceConstants.ReplacementRegistrationNumberValid import webserviceclients.vrmretentioneligibility.VRMRetentionEligibilityRequest import webserviceclients.vrmretentioneligibility.VRMRetentionEligibilityResponse import webserviceclients.vrmretentioneligibility.VRMRetentionEligibilityResponseDto import webserviceclients.vrmretentioneligibility.VRMRetentionEligibilityWebService final class EligibilityWebServiceCallWithCurrentAndReplacement() extends ScalaModule with MockitoSugar { private val withCurrentAndReplacement: (Int, VRMRetentionEligibilityResponseDto) = { (OK, VRMRetentionEligibilityResponseDto( None, VRMRetentionEligibilityResponse( currentVRM = RegistrationNumberValid, replacementVRM = Some(ReplacementRegistrationNumberValid) )) ) } val stub = { val webService = mock[VRMRetentionEligibilityWebService] when(webService.invoke(any[VRMRetentionEligibilityRequest], any[TrackingId])) .thenReturn(Future.successful(createResponse(withCurrentAndReplacement))) webService } def configure() = bind[VRMRetentionEligibilityWebService].toInstance(stub) }
dvla/vrm-retention-online
test/composition/webserviceclients/vrmretentioneligibility/EligibilityWebServiceCallWithCurrentAndReplacement.scala
Scala
mit
1,723
package org.usagram.clarify.error import org.usagram.clarify.{ Indefinite, Tags } import org.scalatest._ class TooShortStringSpec extends FunSpec { import Matchers._ describe("#message") { val error = TooShortString(100) describe("when Tags#label is Some(string)") { val tags = Tags(Indefinite(Some("a label"))) it("returns a message include the label") { error.message(tags) should be("a label is too short, requires at least 100 characters") } } describe("when Tags#label is None") { val tags = Tags(Indefinite(None)) it("returns a message include the default label") { error.message(tags) should be("(no label) is too short, requires at least 100 characters") } } describe("when limit = 1") { val tags = Tags(Indefinite(None)) val error = TooShortString(1) it("returns a message with unit \"character\"") { error.message(tags) should be("(no label) is too short, requires at least 1 character") } } } }
takkkun/clarify
core/src/test/scala/org/usagram/clarify/error/TooShortStringSpec.scala
Scala
mit
1,035
/* * Copyright 2014–2020 SlamData Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package quasar.qsu import slamdata.Predef._ import quasar.Qspec import quasar.IdStatus.{ExcludeId, IncludeId} import quasar.api.resource.ResourcePath import quasar.common.{JoinType, SortDir} import quasar.contrib.pathy.AFile import quasar.ejson.{EJson, Fixed} import quasar.fp._ import quasar.contrib.iota._ import quasar.qscript.{ construction, educatedToTotal, Hole, HoleF, JoinSide, OnUndefined, PlannerError, ReduceFunc, ReduceFuncs, ReduceIndex, ReduceIndexF, ShiftType, SrcHole, Take } import quasar.qscript.PlannerError.InternalError import quasar.qscript.MapFuncsCore.{IntLit, RecIntLit} import quasar.qsu.ApplyProvenance.AuthenticatedQSU import matryoshka.EqualT import matryoshka.data.Fix import Fix._ import org.specs2.matcher.{Expectable, MatchResult, Matcher} import pathy.Path import Path.{Sandboxed, file} import scalaz.{EitherT, Free, Need, StateT, \\/, \\/-, NonEmptyList => NEL} import scalaz.Scalaz._ object GraduateSpec extends Qspec with QSUTTypes[Fix] { import QScriptUniform.Rotation type F[A] = EitherT[StateT[Need, Long, ?], PlannerError, A] type QSU[A] = QScriptUniform[A] type QSE[A] = QScriptEducated[A] val grad = Graduate[Fix, F] _ val qsu = QScriptUniform.DslT[Fix] val defaults = construction.mkDefaults[Fix, QSE] val func = defaults.func val recFunc = defaults.recFunc val fqse = defaults.free val qse = defaults.fix val root = Path.rootDir[Sandboxed] val afile: AFile = root </> file("foobar") val path: ResourcePath = ResourcePath.leaf(afile) "graduating QSU to QScript" should { "convert the QScript-ish nodes" >> { "convert Read" in { val qgraph: Fix[QSU] = qsu.read(afile, ExcludeId) val qscript: Fix[QSE] = qse.Read[ResourcePath](path, ExcludeId) qgraph must graduateAs(qscript) } "convert Map" in { val fm: RecFreeMap = recFunc.Add(recFunc.Hole, RecIntLit(17)) val qgraph: Fix[QSU] = qsu.map(qsu.read(afile, ExcludeId), fm) val qscript: Fix[QSE] = qse.Map(qse.Read[ResourcePath](path, ExcludeId), fm) qgraph must graduateAs(qscript) } "convert QSFilter" in { val fm: RecFreeMap = recFunc.Add(recFunc.Hole, RecIntLit(17)) val qgraph: Fix[QSU] = qsu.qsFilter(qsu.read(afile, ExcludeId), fm) val qscript: Fix[QSE] = qse.Filter(qse.Read[ResourcePath](path, ExcludeId), fm) qgraph must graduateAs(qscript) } "convert QSReduce" in { val buckets: List[FreeMap] = List(func.Add(HoleF, IntLit(17))) val abuckets: List[FreeAccess[Hole]] = buckets.map(_.map(Access.value[Hole](_))) val reducers: List[ReduceFunc[FreeMap]] = List(ReduceFuncs.Count(HoleF)) val repair: FreeMapA[ReduceIndex] = ReduceIndexF(\\/-(0)) val qgraph: Fix[QSU] = qsu.qsReduce(qsu.read(afile, ExcludeId), abuckets, reducers, repair) val qscript: Fix[QSE] = qse.Reduce(qse.Read[ResourcePath](path, ExcludeId), buckets, reducers, repair) qgraph must graduateAs(qscript) } "convert LeftShift" in { val struct: RecFreeMap = recFunc.Add(recFunc.Hole, recFunc.Constant(Fixed[Fix[EJson]].int(17))) val repair: JoinFunc = func.ConcatArrays( func.MakeArray(func.LeftSide), func.ConcatArrays( func.LeftSide, func.MakeArray(func.RightSide))) val qgraph: Fix[QSU] = qsu.leftShift(qsu.read(afile, ExcludeId), struct, IncludeId, OnUndefined.Omit, repair, Rotation.ShiftArray) val qscript: Fix[QSE] = qse.LeftShift(qse.Read[ResourcePath](path, ExcludeId), struct, IncludeId, ShiftType.Array, OnUndefined.Omit, repair) qgraph must graduateAs(qscript) } "convert QSSort" in { val buckets: List[FreeMap] = List(func.Add(HoleF, IntLit(17))) val abuckets: List[FreeAccess[Hole]] = buckets.map(_.map(Access.value[Hole](_))) val order: NEL[(FreeMap, SortDir)] = NEL(HoleF -> SortDir.Descending) val qgraph: Fix[QSU] = qsu.qsSort(qsu.read(afile, ExcludeId), abuckets, order) val qscript: Fix[QSE] = qse.Sort(qse.Read[ResourcePath](path, ExcludeId), buckets, order) qgraph must graduateAs(qscript) } "convert Distinct" in { val qgraph: Fix[QSU] = qsu.distinct(qsu.read(afile, ExcludeId)) val qscript: Fix[QSE] = qse.Reduce( qse.Read[ResourcePath](path, ExcludeId), List(HoleF), List(ReduceFuncs.Arbitrary(HoleF)), ReduceIndexF(\\/-(0))) qgraph must graduateAs(qscript) } "convert Unreferenced" in { val qgraph: Fix[QSU] = qsu.unreferenced() val qscript: Fix[QSE] = qse.Unreferenced qgraph must graduateAs(qscript) } } "fail to convert the LP-ish nodes" >> { "not convert LPFilter" in { val qgraph: Fix[QSU] = qsu.lpFilter(qsu.read(afile, ExcludeId), qsu.read(afile, ExcludeId)) qgraph must notGraduate } } "graduate naive `select * from zips`" in { val concatArr = func.ConcatArrays( func.MakeArray(func.LeftSide), func.MakeArray(func.RightSide)) val projectIdx = func.ProjectIndex(func.LeftSide, func.RightSide) val qgraph = qsu.subset( qsu.thetaJoin( qsu.leftShift( qsu.read(root </> file("zips"), ExcludeId), recFunc.Hole, IncludeId, OnUndefined.Omit, concatArr, Rotation.FlattenArray), qsu.cint(1), func.Constant[JoinSide](Fixed[Fix[EJson]].bool(true)), JoinType.Inner, projectIdx), Take, qsu.cint(11)) val lhs: Free[QSE, Hole] = fqse.LeftShift( fqse.Read(ResourcePath.leaf(root </> file("zips")), ExcludeId), recFunc.Hole, IncludeId, ShiftType.Array, OnUndefined.Omit, concatArr) val rhs: Free[QSE, Hole] = fqse.Map(fqse.Unreferenced, recFunc.Constant(Fixed[Fix[EJson]].int(1))) val qscript = qse.Subset( qse.Unreferenced, fqse.ThetaJoin( fqse.Unreferenced, lhs, rhs, func.Constant(Fixed[Fix[EJson]].bool(true)), JoinType.Inner, projectIdx), Take, fqse.Map( Free.pure[QSE, Hole](SrcHole), recFunc.Constant(Fixed[Fix[EJson]].int(11)))) qgraph must graduateAs(qscript) } } def graduateAs(expected: Fix[QSE]): Matcher[Fix[QSU]] = { new Matcher[Fix[QSU]] { def apply[S <: Fix[QSU]](s: Expectable[S]): MatchResult[S] = { val authd = AuthenticatedQSU(QSUGraph.fromTree[Fix](s.value), QAuth.empty[Fix, Unit]) val actual: PlannerError \\/ Fix[QSE] = evaluate(ReifyIdentities[Fix, F](authd) >>= grad) actual.bimap[MatchResult[S], MatchResult[S]]( { err => failure(s"graduating produced unexpected planner error: ${err.shows}", s) }, { qscript => result( EqualT[Fix].equal[QSE](qscript, expected), s"received expected qscript:\\n${qscript.shows}", s"received unexpected qscript:\\n${qscript.shows}\\nexpected:\\n${expected.shows}", s) }).merge } } } def notGraduate: Matcher[Fix[QSU]] = { new Matcher[Fix[QSU]] { def apply[S <: Fix[QSU]](s: Expectable[S]): MatchResult[S] = { val authd = AuthenticatedQSU(QSUGraph.fromTree[Fix](s.value), QAuth.empty[Fix, Unit]) val actual: PlannerError \\/ Fix[QSE] = evaluate(ReifyIdentities[Fix, F](authd) >>= grad) // TODO better equality checking for PlannerError actual.bimap[MatchResult[S], MatchResult[S]]( { case err @ InternalError(_, None) => success(s"received expected InternalError: ${(err: PlannerError).shows}", s) case err => failure(s"expected an InternalError without a cause, received: ${err.shows}", s) }, { qscript => failure(s"expected an error but found qscript:\\n${qscript.shows}", s) }).merge } } } def evaluate[A](fa: F[A]): PlannerError \\/ A = fa.run.eval(0L).value }
slamdata/quasar
qsu/src/test/scala/quasar/qsu/GraduateSpec.scala
Scala
apache-2.0
8,897
class A { def foo(x: Int): Int = x } val a = new A val thisisint = 45 a foo th/*caret*/ //thisisint
ilinum/intellij-scala
testdata/completion/smart/infix/SmartRightExprNotValTypeInfix.scala
Scala
apache-2.0
103
/* * Copyright 2001-2013 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalatest import org.scalactic.Equality import org.scalactic.Explicitly import org.scalactic.StringNormalizations._ import org.scalactic.Uniformity import org.scalactic.Entry import collection.GenTraversable import SharedHelpers._ import Matchers._ class OnlyContainMatcherDeciderSpec extends Spec with Explicitly { val mapTrimmed: Uniformity[(Int, String)] = new Uniformity[(Int, String)] { def normalized(s: (Int, String)): (Int, String) = (s._1, s._2.trim) def normalizedCanHandle(b: Any) = b match { case (_: Int, _: String) => true case _ => false } def normalizedOrSame(b: Any): Any = b match { case (k: Int, v: String) => normalized((k, v)) case _ => b } } val javaMapTrimmed: Uniformity[java.util.Map.Entry[Int, String]] = new Uniformity[java.util.Map.Entry[Int, String]] { def normalized(s: java.util.Map.Entry[Int, String]): java.util.Map.Entry[Int, String] = Entry(s.getKey, s.getValue.trim) def normalizedCanHandle(b: Any) = b match { case entry: java.util.Map.Entry[_, _] => (entry.getKey, entry.getValue) match { case (_: Int, _: String) => true case _ => false } case _ => false } def normalizedOrSame(b: Any): Any = b match { case entry: java.util.Map.Entry[_, _] => (entry.getKey, entry.getValue) match { case (k: Int, v: String) => normalized(Entry(k, v)) case _ => b } case _ => b } } val incremented: Uniformity[Int] = new Uniformity[Int] { var count = 0 def normalized(s: Int): Int = { count += 1 s + count } def normalizedCanHandle(b: Any): Boolean = b.isInstanceOf[Int] def normalizedOrSame(b: Any): Any = b match { case i: Int => normalized(i) case _ => b } } val mapIncremented: Uniformity[(Int, String)] = new Uniformity[(Int, String)] { var count = 0 def normalized(s: (Int, String)): (Int, String) = { count += 1 (s._1 + count, s._2) } def normalizedCanHandle(b: Any) = b match { case (_: Int, _: String) => true case _ => false } def normalizedOrSame(b: Any): Any = b match { case (k: Int, v: String) => normalized((k, v)) case _ => b } } val appended: Uniformity[String] = new Uniformity[String] { var count = 0 def normalized(s: String): String = { count += 1 s + count } def normalizedCanHandle(b: Any): Boolean = b.isInstanceOf[String] def normalizedOrSame(b: Any): Any = b match { case s: String => normalized(s) case _ => b } } val mapAppended: Uniformity[(Int, String)] = new Uniformity[(Int, String)] { var count = 0 def normalized(s: (Int, String)): (Int, String) = { count += 1 (s._1, s._2 + count) } def normalizedCanHandle(b: Any) = b match { case (_: Int, _: String) => true case _ => false } def normalizedOrSame(b: Any): Any = b match { case (k: Int, v: String) => normalized((k, v)) case _ => b } } val javaMapAppended: Uniformity[java.util.Map.Entry[Int, String]] = new Uniformity[java.util.Map.Entry[Int, String]] { var count = 0 def normalized(s: java.util.Map.Entry[Int, String]): java.util.Map.Entry[Int, String] = { count += 1 Entry(s.getKey, s.getValue + count) } def normalizedCanHandle(b: Any) = b match { case entry: java.util.Map.Entry[_, _] => (entry.getKey, entry.getValue) match { case (_: Int, _: String) => true case _ => false } case _ => false } def normalizedOrSame(b: Any): Any = b match { case entry: java.util.Map.Entry[_, _] => (entry.getKey, entry.getValue) match { case (k: Int, v: String) => normalized(Entry(k, v)) case _ => b } case _ => b } } val lowerCaseEquality = new Equality[String] { def areEqual(left: String, right: Any) = left.toLowerCase == (right match { case s: String => s.toLowerCase case other => other }) } val mapLowerCaseEquality = new Equality[(Int, String)] { def areEqual(left: (Int, String), right: Any) = right match { case t2: Tuple2[_, _] => left._1 == t2._1 && left._2.toLowerCase == (t2._2 match { case s: String => s.toLowerCase case other => other }) case right => left == right } } val javaMapLowerCaseEquality = new Equality[java.util.Map.Entry[Int, String]] { def areEqual(left: java.util.Map.Entry[Int, String], right: Any) = right match { case entry: java.util.Map.Entry[_, _] => left.getKey == entry.getKey && left.getValue.toLowerCase == (entry.getValue match { case s: String => s.toLowerCase case other => other }) case right => left == right } } val reverseEquality = new Equality[String] { def areEqual(left: String, right: Any) = left.reverse == (right match { case s: String => s.toLowerCase case other => other }) } val mapReverseEquality = new Equality[(Int, String)] { def areEqual(left: (Int, String), right: Any) = right match { case t2: Tuple2[_, _] => left._1 == t2._1 && left._2.reverse == (t2._2 match { case s: String => s.toLowerCase case other => other }) case right => left == right } } val javaMapReverseEquality = new Equality[java.util.Map.Entry[Int, String]] { def areEqual(left: java.util.Map.Entry[Int, String], right: Any) = right match { case entry: java.util.Map.Entry[_, _] => left.getKey == entry.getKey && left.getValue.reverse == (entry.getValue match { case s: String => s.toLowerCase case other => other }) case right => left == right } } object `only ` { def checkShouldContainStackDepth(e: exceptions.StackDepthException, left: Any, right: GenTraversable[Any], lineNumber: Int) { val leftText = FailureMessages.decorateToStringValue(left) e.message should be (Some(leftText + " did not contain only (" + right.map(FailureMessages.decorateToStringValue).mkString(", ") + ")")) e.failedCodeFileName should be (Some("OnlyContainMatcherDeciderSpec.scala")) e.failedCodeLineNumber should be (Some(lineNumber)) } def checkShouldNotContainStackDepth(e: exceptions.StackDepthException, left: Any, right: GenTraversable[Any], lineNumber: Int) { val leftText = FailureMessages.decorateToStringValue(left) e.message should be (Some(leftText + " contained only (" + right.map(FailureMessages.decorateToStringValue).mkString(", ") + ")")) e.failedCodeFileName should be (Some("OnlyContainMatcherDeciderSpec.scala")) e.failedCodeLineNumber should be (Some(lineNumber)) } def `should take specified normalization when 'should contain' is used` { (List("1", " 2", "3") should contain only (" 1", "2 ", " 3")) (after being trimmed) (Set("1", " 2", "3") should contain only (" 1", "2 ", " 3")) (after being trimmed) (Array("1", " 2", "3") should contain only (" 1", "2 ", " 3")) (after being trimmed) (javaList("1", " 2", "3") should contain only (" 1", "2 ", " 3")) (after being trimmed) (javaSet("1", " 2", "3") should contain only (" 1", "2 ", " 3")) (after being trimmed) (Map(1 -> "one", 2 -> " two", 3 -> "three") should contain only (1 -> " one", 2 -> "two ", 3 -> " three")) (after being mapTrimmed) (javaMap(Entry(1, "one"), Entry(2, " two"), Entry(3, "three")) should contain only (Entry(1, " one"), Entry(2, "two "), Entry(3, " three"))) (after being javaMapTrimmed) } def `should take specified normalization when 'should not contain' is used` { (List("1", "2", "3") should not contain only ("1", "2", "3")) (after being appended) (Set("1", "2", "3") should not contain only ("1", "2", "3")) (after being appended) (Array("1", "2", "3") should not contain only ("1", "2", "3")) (after being appended) (javaList("1", "2", "3") should not contain only ("1", "2", "3")) (after being appended) (javaSet("1", "2", "3") should not contain only ("1", "2", "3")) (after being appended) (Map(1 -> "one", 2 -> "two", 3 -> "three") should not contain only (1 -> "one", 2 -> "two", 3 -> "three")) (after being mapAppended) (javaMap(Entry(1, "one"), Entry(2, "two"), Entry(3, "three")) should not contain only (Entry(1, "one"), Entry(2, "two"), Entry(3, "three"))) (after being javaMapAppended) } def `should throw TestFailedException with correct stack depth and message when 'should contain custom matcher' failed with specified normalization` { val left1 = List("1", "2", "3") val e1 = intercept[exceptions.TestFailedException] { (left1 should contain only ("1", "2", "3")) (after being appended) } checkShouldContainStackDepth(e1, left1, Array("1", "2", "3").deep, thisLineNumber - 2) val left2 = Set("1", "2", "3") val e2 = intercept[exceptions.TestFailedException] { (left2 should contain only ("1", "2", "3")) (after being appended) } checkShouldContainStackDepth(e2, left2, Array("1", "2", "3").deep, thisLineNumber - 2) val left3 = Array("1", "2", "3") val e3 = intercept[exceptions.TestFailedException] { (left3 should contain only ("1", "2", "3")) (after being appended) } checkShouldContainStackDepth(e3, left3, Array("1", "2", "3").deep, thisLineNumber - 2) val left4 = javaList("1", "2", "3") val e4 = intercept[exceptions.TestFailedException] { (left4 should contain only ("1", "2", "3")) (after being appended) } checkShouldContainStackDepth(e4, left4, Array("1", "2", "3").deep, thisLineNumber - 2) val left5 = Map(1 -> "one", 2 -> "two", 3 -> "three") val e5 = intercept[exceptions.TestFailedException] { (left5 should contain only (1 -> "one", 2 -> "two", 3 -> "three")) (after being mapAppended) } checkShouldContainStackDepth(e5, left5, Array(1 -> "one", 2 -> "two", 3 -> "three").deep, thisLineNumber - 2) val left6 = javaMap(Entry(1, "one"), Entry(2, "two"), Entry(3, "three")) val e6 = intercept[exceptions.TestFailedException] { (left6 should contain only (Entry(1, "one"), Entry(2, "two"), Entry(3, "three"))) (after being javaMapAppended) } checkShouldContainStackDepth(e6, left6, Array(Entry(1, "one"), Entry(2, "two"), Entry(3, "three")).deep, thisLineNumber - 2) } def `should throw TestFailedException with correct stack depth and message when 'should not contain custom matcher' failed with specified normalization` { val left1 = List("1", " 2", "3") val e1 = intercept[exceptions.TestFailedException] { (left1 should not contain only (" 1", "2 ", " 3")) (after being trimmed) } checkShouldNotContainStackDepth(e1, left1, Array(" 1", "2 ", " 3").deep, thisLineNumber - 2) val left2 = Set("1", " 2", "3") val e2 = intercept[exceptions.TestFailedException] { (left2 should not contain only (" 1", "2 ", " 3")) (after being trimmed) } checkShouldNotContainStackDepth(e2, left2, Array(" 1", "2 ", " 3").deep, thisLineNumber - 2) val left3 = Array("1", " 2", "3") val e3 = intercept[exceptions.TestFailedException] { (left3 should not contain only (" 1", "2 ", " 3")) (after being trimmed) } checkShouldNotContainStackDepth(e3, left3, Array(" 1", "2 ", " 3").deep, thisLineNumber - 2) val left4 = javaList("1", " 2", "3") val e4 = intercept[exceptions.TestFailedException] { (left4 should not contain only (" 1", "2 ", " 3")) (after being trimmed) } checkShouldNotContainStackDepth(e4, left4, Array(" 1", "2 ", " 3").deep, thisLineNumber - 2) val left5 = Map(1 -> "one", 2 -> " two", 3 -> "three") val e5 = intercept[exceptions.TestFailedException] { (left5 should not contain only (1 -> " one", 2 -> "two ", 3 -> " three")) (after being mapTrimmed) } checkShouldNotContainStackDepth(e5, left5, Array(1 -> " one", 2 -> "two ", 3 -> " three").deep, thisLineNumber - 2) val left6 = javaMap(Entry(1, "one"), Entry(2, " two"), Entry(3, "three")) val e6 = intercept[exceptions.TestFailedException] { (left6 should not contain only (Entry(1, " one"), Entry(2, "two "), Entry(3, " three"))) (after being javaMapTrimmed) } checkShouldNotContainStackDepth(e6, left6, Array(Entry(1, " one"), Entry(2, "two "), Entry(3, " three")).deep, thisLineNumber - 2) } def `should take specified equality and normalization when 'should contain' is used` { (List("ONE ", " TWO", "THREE ") should contain only (" one", "two ", " three")) (decided by lowerCaseEquality afterBeing trimmed) (Set("ONE ", " TWO", "THREE ") should contain only (" one", "two ", " three")) (decided by lowerCaseEquality afterBeing trimmed) (Array("ONE ", " TWO", "THREE ") should contain only (" one", "two ", " three")) (decided by lowerCaseEquality afterBeing trimmed) (javaList("ONE ", " TWO", "THREE ") should contain only (" one", "two ", " three")) (decided by lowerCaseEquality afterBeing trimmed) (Map(1 -> "ONE ", 2 -> " TWO", 3 -> "THREE ") should contain only (1 -> " one", 2 -> "two ", 3 -> " three")) (decided by mapLowerCaseEquality afterBeing mapTrimmed) (javaMap(Entry(1, "ONE "), Entry(2, " TWO"), Entry(3, "THREE ")) should contain only (Entry(1, " one"), Entry(2, "two "), Entry(3, " three"))) (decided by javaMapLowerCaseEquality afterBeing javaMapTrimmed) } def `should take specified equality and normalization when 'should not contain' is used` { (List("one ", " two", "three ") should not contain only (" one", "two ", " three")) (decided by reverseEquality afterBeing trimmed) (Set("one ", " two", "three ") should not contain only (" one", "two ", " three")) (decided by reverseEquality afterBeing trimmed) (Array("one ", " two", "three ") should not contain only (" one", "two ", " three")) (decided by reverseEquality afterBeing trimmed) (javaList("one ", " two", "three ") should not contain only (" one", "two ", " three")) (decided by reverseEquality afterBeing trimmed) (Map(1 -> "one ", 2 -> " two", 3 -> "three ") should not contain only (1 -> " one", 2 -> "two ", 3 -> " three")) (decided by mapReverseEquality afterBeing mapTrimmed) (javaMap(Entry(1, "one "), Entry(2, " two"), Entry(3, "three ")) should not contain only (Entry(1, " one"), Entry(2, "two "), Entry(3, " three"))) (decided by javaMapReverseEquality afterBeing javaMapTrimmed) } def `should throw TestFailedException with correct stack depth and message when 'should contain custom matcher' failed with specified equality and normalization` { val left1 = List("one ", " two", "three ") val e1 = intercept[exceptions.TestFailedException] { (left1 should contain only (" one", "two ", " three")) (decided by reverseEquality afterBeing trimmed) } checkShouldContainStackDepth(e1, left1, Array(" one", "two ", " three").deep, thisLineNumber - 2) val left2 = Set("one ", " two", "three ") val e2 = intercept[exceptions.TestFailedException] { (left2 should contain only (" one", "two ", " three")) (decided by reverseEquality afterBeing trimmed) } checkShouldContainStackDepth(e2, left2, Array(" one", "two ", " three").deep, thisLineNumber - 2) val left3 = Array("one ", " two", "three ") val e3 = intercept[exceptions.TestFailedException] { (left3 should contain only (" one", "two ", " three")) (decided by reverseEquality afterBeing trimmed) } checkShouldContainStackDepth(e3, left3, Array(" one", "two ", " three").deep, thisLineNumber - 2) val left4 = javaList("one ", " two", "three ") val e4 = intercept[exceptions.TestFailedException] { (left4 should contain only (" one", "two ", " three")) (decided by reverseEquality afterBeing trimmed) } checkShouldContainStackDepth(e4, left4, Array(" one", "two ", " three").deep, thisLineNumber - 2) val left5 = Map(1 -> "one ", 2 -> " two", 3 -> "three ") val e5 = intercept[exceptions.TestFailedException] { (left5 should contain only (1 -> " one", 2 -> "two ", 3 -> " three")) (decided by mapReverseEquality afterBeing mapTrimmed) } checkShouldContainStackDepth(e5, left5, Array(1 -> " one", 2 -> "two ", 3 -> " three").deep, thisLineNumber - 2) val left6 = javaMap(Entry(1, "one "), Entry(2, " two"), Entry(3, "three ")) val e6 = intercept[exceptions.TestFailedException] { (left6 should contain only (Entry(1, " one"), Entry(2, "two "), Entry(3, " three"))) (decided by javaMapReverseEquality afterBeing javaMapTrimmed) } checkShouldContainStackDepth(e6, left6, Array(Entry(1, " one"), Entry(2, "two "), Entry(3, " three")).deep, thisLineNumber - 2) } def `should throw TestFailedException with correct stack depth and message when 'should not contain custom matcher' failed with specified equality and normalization` { val left1 = List("ONE ", " TWO", "THREE ") val e1 = intercept[exceptions.TestFailedException] { (left1 should not contain only (" one", "two ", " three")) (decided by lowerCaseEquality afterBeing trimmed) } checkShouldNotContainStackDepth(e1, left1, Array(" one", "two ", " three").deep, thisLineNumber - 2) val left2 = Set("ONE ", " TWO", "THREE ") val e2 = intercept[exceptions.TestFailedException] { (left2 should not contain only (" one", "two ", " three")) (decided by lowerCaseEquality afterBeing trimmed) } checkShouldNotContainStackDepth(e2, left2, Array(" one", "two ", " three").deep, thisLineNumber - 2) val left3 = Array("ONE ", " TWO", "THREE ") val e3 = intercept[exceptions.TestFailedException] { (left3 should not contain only (" one", "two ", " three")) (decided by lowerCaseEquality afterBeing trimmed) } checkShouldNotContainStackDepth(e3, left3, Array(" one", "two ", " three").deep, thisLineNumber - 2) val left4 = javaList("ONE ", " TWO", "THREE ") val e4 = intercept[exceptions.TestFailedException] { (left4 should not contain only (" one", "two ", " three")) (decided by lowerCaseEquality afterBeing trimmed) } checkShouldNotContainStackDepth(e4, left4, Array(" one", "two ", " three").deep, thisLineNumber - 2) val left5 = Map(1 -> "ONE ", 2 -> " TWO", 3 -> "THREE ") val e5 = intercept[exceptions.TestFailedException] { (left5 should not contain only (1 -> " one ", 2 -> "two ", 3 -> " three")) (decided by mapLowerCaseEquality afterBeing mapTrimmed) } checkShouldNotContainStackDepth(e5, left5, Array(1 -> " one ", 2 -> "two ", 3 -> " three").deep, thisLineNumber - 2) val left6 = javaMap(Entry(1, "ONE "), Entry(2, " TWO"), Entry(3, "THREE ")) val e6 = intercept[exceptions.TestFailedException] { (left6 should not contain only (Entry(1, " one "), Entry(2, "two "), Entry(3, " three"))) (decided by javaMapLowerCaseEquality afterBeing javaMapTrimmed) } checkShouldNotContainStackDepth(e6, left6, Array(Entry(1, " one "), Entry(2, "two "), Entry(3, " three")).deep, thisLineNumber - 2) } } }
cheeseng/scalatest
scalatest-test/src/test/scala/org/scalatest/OnlyContainMatcherDeciderSpec.scala
Scala
apache-2.0
21,096
/* Copyright (c) 2017-2021, Robby, Kansas State University All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package org.sireum trait contract { import language.experimental.macros import $internal.Macro object Spec { def apply(arg0: => Unit): Unit = macro Macro.lUnit1 } object Contract { def apply(arg0: Reads): Unit = macro Macro.lUnit1 def apply(arg0: Requires): Unit = macro Macro.lUnit1 def apply(arg0: Modifies): Unit = macro Macro.lUnit1 def apply(arg0: Ensures): Unit = macro Macro.lUnit1 def apply(arg0: Reads, arg1: Requires): Unit = macro Macro.lUnit2 def apply(arg0: Reads, arg1: Modifies): Unit = macro Macro.lUnit2 def apply(arg0: Reads, arg1: Ensures): Unit = macro Macro.lUnit2 def apply(arg0: Requires, arg1: Modifies): Unit = macro Macro.lUnit2 def apply(arg0: Requires, arg1: Ensures): Unit = macro Macro.lUnit2 def apply(arg0: Modifies, arg1: Ensures): Unit = macro Macro.lUnit2 def apply(arg0: Reads, arg1: Requires, arg2: Modifies): Unit = macro Macro.lUnit3 def apply(arg0: Reads, arg1: Modifies, arg2: Ensures): Unit = macro Macro.lUnit3 def apply(arg0: Reads, arg1: Requires, arg2: Ensures): Unit = macro Macro.lUnit3 def apply(arg0: Requires, arg1: Modifies, arg2: Ensures): Unit = macro Macro.lUnit3 def apply(arg0: Reads, arg1: Requires, arg2: Modifies, arg3: Ensures): Unit = macro Macro.lUnit4 def apply(arg0: Case, arg1: Case*): Unit = macro Macro.lUnit1S def apply(arg0: Reads, arg1: Case, arg2: Case*): Unit = macro Macro.lUnit2S def apply(arg0: Modifies, arg1: Case, arg2: Case*): Unit = macro Macro.lUnit2S def apply(arg0: Reads, arg1: Modifies, arg2: Case, arg3: Case*): Unit = macro Macro.lUnit3S def apply(arg0: String): Unit = macro Macro.lUnit1 def apply(arg0: DataRefinement): Unit = macro Macro.lUnit1 def Havoc(arg0: Any, arg1: Any*): Unit = macro Macro.lUnit1S object Only { def apply[T](reads: Reads): T = ??? def apply[T](requires: Requires): T = ??? def apply[T](modifies: Modifies): T = ??? def apply[T](ensures: Ensures): T = ??? def apply[T](reads: Reads, requires: Requires): T = ??? def apply[T](reads: Reads, modifies: Modifies): T = ??? def apply[T](reads: Reads, ensures: Ensures): T = ??? def apply[T](requires: Requires, modifies: Modifies): T = ??? def apply[T](requires: Requires, ensures: Ensures): T = ??? def apply[T](modifies: Modifies, ensures: Ensures): T = ??? def apply[T](reads: Reads, requires: Requires, modifies: Modifies): T = ??? def apply[T](reads: Reads, modifies: Modifies, ensures: Ensures): T = ??? def apply[T](requires: Requires, modifies: Modifies, ensures: Ensures): T = ??? def apply[T](reads: Reads, requires: Requires, modifies: Modifies, ensures: Ensures): T = ??? def apply[T](case0: Case, cases: Case*): T = ??? def apply[T](reads: Reads, case0: Case, cases: Case*): T = ??? def apply[T](modifies: Modifies, case0: Case, cases: Case*): T = ??? def apply[T](reads: Reads, modifies: Modifies, case0: Case, cases: Case*): T = ??? } trait State { def apply[T](o: T): T def ~[T](o: T): StatePost } trait StateCont { def ~[T](o: T): StatePost } trait StatePost { def ~(post: State): StateCont } trait Case trait Requires trait Reads trait Modifies trait Ensures trait MaxIt trait Sequent { def Proof(steps: ProofStep*): Sequent } trait Proof trait SubProof trait ProofStep trait Justification trait Invariant trait DataRefinement trait Lemma trait Theorem trait Fact trait Assume trait Assert trait Let trait StepBuilder { def #>(cond: B): StepBuilder2 def #>(cond: Assume): ProofStep def #>(cond: Assert): ProofStep def #>(cond: Let): ProofStep def #>(cond: SubProof): ProofStep } trait StepBuilder2 { def by(just: Justification): ProofStep def by(just: Predef.String): ProofStep def by(o: Unit): StepBuilder3 def by[T1](f: T1 => Unit): StepBuilder3 def by[T1, T2](f: (T1, T2) => Unit): StepBuilder3 def by[T1, T2, T3](f: (T1, T2, T3) => Unit): StepBuilder3 def by[T1, T2, T3, T4](f: (T1, T2, T3, T4) => Unit): StepBuilder3 def by[T1, T2, T3, T4, T5](f: (T1, T2, T3, T4, T5) => Unit): StepBuilder3 def by[T1, T2, T3, T4, T5, T6](f: (T1, T2, T3, T4, T5, T6) => Unit): StepBuilder3 def by[T1, T2, T3, T4, T5, T6, T7](f: (T1, T2, T3, T4, T5, T6, T7) => Unit): StepBuilder3 def by[T1, T2, T3, T4, T5, T6, T7, T8](f: (T1, T2, T3, T4, T5, T6, T7, T8) => Unit): StepBuilder3 def by[T1, T2, T3, T4, T5, T6, T7, T8, T9](f: (T1, T2, T3, T4, T5, T6, T7, T8, T9) => Unit): StepBuilder3 def by[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10](f: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10) => Unit): StepBuilder3 def by[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11](f: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11) => Unit): StepBuilder3 def by[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12](f: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12) => Unit): StepBuilder3 def by[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13](f: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13) => Unit): StepBuilder3 def by[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14](f: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14) => Unit): StepBuilder3 def by[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15](f: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15) => Unit): StepBuilder3 def by[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16](f: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16) => Unit): StepBuilder3 def by[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17](f: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17) => Unit): StepBuilder3 def by[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18](f: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18) => Unit): StepBuilder3 def by[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19](f: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19) => Unit): StepBuilder3 def by[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20](f: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20) => Unit): StepBuilder3 def by[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21](f: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21) => Unit): StepBuilder3 def by[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22](f: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22) => Unit): StepBuilder3 } trait StepBuilder3 { def and(args: Z*): ProofStep } object StepBuilder3 { import language.implicitConversions implicit def toProofStep(o: StepBuilder3): ProofStep = o.and() } trait SequentBuilder { def |-(conclusion: B): Sequent def ⊢(conclusion: B): Sequent } } def MaxIt(num: Z): Contract.MaxIt = ??? def Reads(accesses: Any*): Contract.Reads = ??? def Requires(claims: B*): Contract.Requires = ??? def Modifies(accesses: Any*): Contract.Modifies = ??? def Ensures(claims: B*): Contract.Ensures = ??? def Invariant(claims: B*): Contract.Invariant = ??? def Invariant(desc: String, claims: B*): Contract.Invariant = ??? def DataRefinement(rep: Any)(refs: Any*)(claims: B*): Contract.DataRefinement = ??? def Case(requires: Contract.Requires): Contract.Case = ??? def Case(ensures: Contract.Ensures): Contract.Case = ??? def Case(requires: Contract.Requires, ensures: Contract.Ensures): Contract.Case = ??? def Case(name: String, requires: Contract.Requires): Contract.Case = ??? def Case(name: String, ensures: Contract.Ensures): Contract.Case = ??? def Case(name: String, requires: Contract.Requires, ensures: Contract.Ensures): Contract.Case = ??? def Invariant(arg0: Contract.Modifies, arg1: B*): Unit = macro Macro.lUnit1S def Invariant(arg0: Contract.MaxIt, arg1: B*): Unit = macro Macro.lUnit1S def Invariant(arg0: Contract.MaxIt, arg1: Contract.Modifies, arg2: B*): Unit = macro Macro.lUnit2S def Fact(claims: B*): Contract.Fact = ??? def Fact(desc: String, claims: B*): Contract.Fact = ??? def Lemma(claim: B, proof: Contract.Proof): Contract.Lemma = ??? def Lemma(desc: String, claim: B, proof: Contract.Proof): Contract.Lemma = ??? def Theorem(claim: B, proof: Contract.Proof): Contract.Theorem = ??? def Theorem(desc: String, claim: B, proof: Contract.Proof): Contract.Theorem = ??? def Proof(steps: Contract.ProofStep*): Contract.Proof = ??? def Deduce(arg0: Contract.ProofStep, arg1: Contract.ProofStep*): Unit = macro Macro.lUnit1S def Deduce(arg0: Contract.Sequent, arg1: Contract.Sequent*): Unit = macro Macro.lUnit1S def Deduce(arg0: String, arg1: Contract.Sequent, arg2: Contract.Sequent*): Unit = macro Macro.lUnit2S def Step(no: Int, claim: B): Contract.ProofStep = ??? def Step(no: Int, claim: B, just: Contract.Justification): Contract.ProofStep = ??? def SubProof(steps: Contract.ProofStep*): Contract.SubProof = ??? def Assume(claim: B): Contract.Assume = ??? def Assert(claim: B, subProof: Contract.SubProof): Contract.Assert = ??? def StructuralInduction(subProof: Contract.SubProof): Contract.Justification = ??? def Let[T](body: T => Contract.SubProof): Contract.Let = ??? def Let[T1, T2](body: (T1, T2) => Contract.SubProof): Contract.Let = ??? def Let[T1, T2, T3](body: (T1, T2, T3) => Contract.SubProof): Contract.Let = ??? def Let[T1, T2, T3, T4](body: (T1, T2, T3, T4) => Contract.SubProof): Contract.Let = ??? def Let[T1, T2, T3, T4, T5](body: (T1, T2, T3, T4, T5) => Contract.SubProof): Contract.Let = ??? def Let[T1, T2, T3, T4, T5, T6](body: (T1, T2, T3, T4, T5, T6) => Contract.SubProof): Contract.Let = ??? def Let[T1, T2, T3, T4, T5, T6, T7](body: (T1, T2, T3, T4, T5, T6, T7) => Contract.SubProof): Contract.Let = ??? def Let[T1, T2, T3, T4, T5, T6, T7, T8](body: (T1, T2, T3, T4, T5, T6, T7, T8) => Contract.SubProof): Contract.Let = ??? def Let[T1, T2, T3, T4, T5, T6, T7, T8, T9](body: (T1, T2, T3, T4, T5, T6, T7, T8, T9) => Contract.SubProof): Contract.Let = ??? def Let[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10](body: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10) => Contract.SubProof): Contract.Let = ??? def Let[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11](body: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11) => Contract.SubProof): Contract.Let = ??? def Let[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12](body: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12) => Contract.SubProof): Contract.Let = ??? def Let[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13](body: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13) => Contract.SubProof): Contract.Let = ??? def Let[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14](body: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14) => Contract.SubProof): Contract.Let = ??? def Let[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15](body: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15) => Contract.SubProof): Contract.Let = ??? def Let[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16](body: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16) => Contract.SubProof): Contract.Let = ??? def Let[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17](body: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17) => Contract.SubProof): Contract.Let = ??? def Let[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18](body: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18) => Contract.SubProof): Contract.Let = ??? def Let[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19](body: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19) => Contract.SubProof): Contract.Let = ??? def Let[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20](body: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20) => Contract.SubProof): Contract.Let = ??? def Let[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21](body: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21) => Contract.SubProof): Contract.Let = ??? def Let[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22](body: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22) => Contract.SubProof): Contract.Let = ??? def In[T](v: T): T = ??? def Old[T](v: T): T = ??? def At[T](label: String, v: T): T = ??? def At[T](n: Z, v: T): T = ??? def Res[T]: T = ??? def Idx[T <: ZLike[T]](v: Any): T = ??? def |-(conclusion: B): Contract.Sequent = ??? def ⊢(conclusion: B): Contract.Sequent = ??? object All { def apply[T](p: T => Boolean): B = halt("This form of All is not executable") def apply[T1, T2](p: (T1, T2) => Boolean): B = halt("This form of All is not executable") def apply[T1, T2, T3](p: (T1, T2, T3) => Boolean): B = halt("This form of All is not executable") def apply[T1, T2, T3, T4](p: (T1, T2, T3, T4) => Boolean): B = halt("This form of All is not executable") def apply[T1, T2, T3, T4, T5](p: (T1, T2, T3, T4, T5) => Boolean): B = halt("This form of All is not executable") def apply[T1, T2, T3, T4, T5, T6](p: (T1, T2, T3, T4, T5, T6) => Boolean): B = halt("This form of All is not executable") def apply[T1, T2, T3, T4, T5, T6, T7](p: (T1, T2, T3, T4, T5, T6, T7) => Boolean): B = halt("This form of All is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8](p: (T1, T2, T3, T4, T5, T6, T7, T8) => Boolean): B = halt("This form of All is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9) => Boolean): B = halt("This form of All is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10) => Boolean): B = halt("This form of All is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11) => Boolean): B = halt("This form of All is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12) => Boolean): B = halt("This form of All is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13) => Boolean): B = halt("This form of All is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14) => Boolean): B = halt("This form of All is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15) => Boolean): B = halt("This form of All is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16) => Boolean): B = halt("This form of All is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17) => Boolean): B = halt("This form of All is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18) => Boolean): B = halt("This form of All is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19) => Boolean): B = halt("This form of All is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20) => Boolean): B = halt("This form of All is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21) => Boolean): B = halt("This form of All is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22) => Boolean): B = halt("This form of All is not executable") def apply[I](seq: ZRange[I])(p: I => Boolean): B = { for (e <- seq) { if (!p(e)) { return F } } return T } def apply(seq: scala.collection.immutable.Range)(p: Z => Boolean): B = { for (e <- seq) { if (!p(e)) { return F } } return T } def apply[I, T](seq: IS[I, T])(p: T => Boolean): B = { for (e <- seq) { if (!p(e)) { return F } } return T } def apply[I, T1, T2](seq: IS[I, (T1, T2)])(p: (T1, T2) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2)) { return F } } return T } def apply[I, T1, T2, T3](seq: IS[I, (T1, T2, T3)])(p: (T1, T2, T3) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3)) { return F } } return T } def apply[I, T1, T2, T3, T4](seq: IS[I, (T1, T2, T3, T4)])(p: (T1, T2, T3, T4) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5](seq: IS[I, (T1, T2, T3, T4, T5)])(p: (T1, T2, T3, T4, T5) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6](seq: IS[I, (T1, T2, T3, T4, T5, T6)])(p: (T1, T2, T3, T4, T5, T6) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7)])(p: (T1, T2, T3, T4, T5, T6, T7) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8)])(p: (T1, T2, T3, T4, T5, T6, T7, T8) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16, e._17)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16, e._17, e._18)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16, e._17, e._18, e._19)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16, e._17, e._18, e._19, e._20)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16, e._17, e._18, e._19, e._20, e._21)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16, e._17, e._18, e._19, e._20, e._21, e._22)) { return F } } return T } def apply[I, T](seq: MS[I, T])(p: T => Boolean): B = { for (e <- seq) { if (!p(e)) { return F } } return T } def apply[I, T1, T2](seq: MS[I, (T1, T2)])(p: (T1, T2) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2)) { return F } } return T } def apply[I, T1, T2, T3](seq: MS[I, (T1, T2, T3)])(p: (T1, T2, T3) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3)) { return F } } return T } def apply[I, T1, T2, T3, T4](seq: MS[I, (T1, T2, T3, T4)])(p: (T1, T2, T3, T4) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5](seq: MS[I, (T1, T2, T3, T4, T5)])(p: (T1, T2, T3, T4, T5) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6](seq: MS[I, (T1, T2, T3, T4, T5, T6)])(p: (T1, T2, T3, T4, T5, T6) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7)])(p: (T1, T2, T3, T4, T5, T6, T7) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8)])(p: (T1, T2, T3, T4, T5, T6, T7, T8) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16, e._17)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16, e._17, e._18)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16, e._17, e._18, e._19)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16, e._17, e._18, e._19, e._20)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16, e._17, e._18, e._19, e._20, e._21)) { return F } } return T } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22) => Boolean): B = { for (e <- seq) { if (!p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16, e._17, e._18, e._19, e._20, e._21, e._22)) { return F } } return T } } object ∀ { @inline def apply[T](p: T => Boolean): B = All(p) @inline def apply[T1, T2](p: (T1, T2) => Boolean): B = All(p) @inline def apply[T1, T2, T3](p: (T1, T2, T3) => Boolean): B = All(p) @inline def apply[T1, T2, T3, T4](p: (T1, T2, T3, T4) => Boolean): B = All(p) @inline def apply[T1, T2, T3, T4, T5](p: (T1, T2, T3, T4, T5) => Boolean): B = All(p) @inline def apply[T1, T2, T3, T4, T5, T6](p: (T1, T2, T3, T4, T5, T6) => Boolean): B = All(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7](p: (T1, T2, T3, T4, T5, T6, T7) => Boolean): B = All(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8](p: (T1, T2, T3, T4, T5, T6, T7, T8) => Boolean): B = All(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9) => Boolean): B = All(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10) => Boolean): B = All(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11) => Boolean): B = All(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12) => Boolean): B = All(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13) => Boolean): B = All(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14) => Boolean): B = All(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15) => Boolean): B = All(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16) => Boolean): B = All(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17) => Boolean): B = All(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18) => Boolean): B = All(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19) => Boolean): B = All(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20) => Boolean): B = All(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21) => Boolean): B = All(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22) => Boolean): B = All(p) @inline def apply[I](seq: ZRange[I])(p: I => Boolean): B = All(seq)(p) @inline def apply[I, T](seq: IS[I, T])(p: T => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2](seq: IS[I, (T1, T2)])(p: (T1, T2) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3](seq: IS[I, (T1, T2, T3)])(p: (T1, T2, T3) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4](seq: IS[I, (T1, T2, T3, T4)])(p: (T1, T2, T3, T4) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5](seq: IS[I, (T1, T2, T3, T4, T5)])(p: (T1, T2, T3, T4, T5) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6](seq: IS[I, (T1, T2, T3, T4, T5, T6)])(p: (T1, T2, T3, T4, T5, T6) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7)])(p: (T1, T2, T3, T4, T5, T6, T7) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8)])(p: (T1, T2, T3, T4, T5, T6, T7, T8) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22) => Boolean): B = All(seq)(p) @inline def apply[I, T](seq: MS[I, T])(p: T => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2](seq: MS[I, (T1, T2)])(p: (T1, T2) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3](seq: MS[I, (T1, T2, T3)])(p: (T1, T2, T3) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4](seq: MS[I, (T1, T2, T3, T4)])(p: (T1, T2, T3, T4) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5](seq: MS[I, (T1, T2, T3, T4, T5)])(p: (T1, T2, T3, T4, T5) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6](seq: MS[I, (T1, T2, T3, T4, T5, T6)])(p: (T1, T2, T3, T4, T5, T6) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7)])(p: (T1, T2, T3, T4, T5, T6, T7) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8)])(p: (T1, T2, T3, T4, T5, T6, T7, T8) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21) => Boolean): B = All(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22) => Boolean): B = All(seq)(p) } object Exists { def apply[T](p: T => Boolean): B = halt("This form of Exists is not executable") def apply[T1, T2](p: (T1, T2) => Boolean): B = halt("This form of Exists is not executable") def apply[T1, T2, T3](p: (T1, T2, T3) => Boolean): B = halt("This form of Exists is not executable") def apply[T1, T2, T3, T4](p: (T1, T2, T3, T4) => Boolean): B = halt("This form of Exists is not executable") def apply[T1, T2, T3, T4, T5](p: (T1, T2, T3, T4, T5) => Boolean): B = halt("This form of Exists is not executable") def apply[T1, T2, T3, T4, T5, T6](p: (T1, T2, T3, T4, T5, T6) => Boolean): B = halt("This form of Exists is not executable") def apply[T1, T2, T3, T4, T5, T6, T7](p: (T1, T2, T3, T4, T5, T6, T7) => Boolean): B = halt("This form of Exists is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8](p: (T1, T2, T3, T4, T5, T6, T7, T8) => Boolean): B = halt("This form of Exists is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9) => Boolean): B = halt("This form of Exists is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10) => Boolean): B = halt("This form of Exists is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11) => Boolean): B = halt("This form of Exists is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12) => Boolean): B = halt("This form of Exists is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13) => Boolean): B = halt("This form of Exists is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14) => Boolean): B = halt("This form of Exists is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15) => Boolean): B = halt("This form of Exists is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16) => Boolean): B = halt("This form of Exists is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17) => Boolean): B = halt("This form of Exists is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18) => Boolean): B = halt("This form of Exists is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19) => Boolean): B = halt("This form of Exists is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20) => Boolean): B = halt("This form of Exists is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21) => Boolean): B = halt("This form of Exists is not executable") def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22) => Boolean): B = halt("This form of Exists is not executable") def apply[I](seq: ZRange[I])(p: I => Boolean): B = { for (e <- seq) { if (p(e)) { return T } } return F } def apply(seq: scala.collection.immutable.Range)(p: Z => Boolean): B = { for (e <- seq) { if (p(e)) { return T } } return F } def apply[I, T](seq: IS[I, T])(p: T => Boolean): B = { for (e <- seq) { if (p(e)) { return T } } return F } def apply[I, T1, T2](seq: IS[I, (T1, T2)])(p: (T1, T2) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2)) { return T } } return F } def apply[I, T1, T2, T3](seq: IS[I, (T1, T2, T3)])(p: (T1, T2, T3) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3)) { return T } } return F } def apply[I, T1, T2, T3, T4](seq: IS[I, (T1, T2, T3, T4)])(p: (T1, T2, T3, T4) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5](seq: IS[I, (T1, T2, T3, T4, T5)])(p: (T1, T2, T3, T4, T5) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6](seq: IS[I, (T1, T2, T3, T4, T5, T6)])(p: (T1, T2, T3, T4, T5, T6) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7)])(p: (T1, T2, T3, T4, T5, T6, T7) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8)])(p: (T1, T2, T3, T4, T5, T6, T7, T8) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16, e._17)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16, e._17, e._18)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16, e._17, e._18, e._19)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16, e._17, e._18, e._19, e._20)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16, e._17, e._18, e._19, e._20, e._21)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16, e._17, e._18, e._19, e._20, e._21, e._22)) { return T } } return F } def apply[I, T](seq: MS[I, T])(p: T => Boolean): B = { for (e <- seq) { if (p(e)) { return T } } return F } def apply[I, T1, T2](seq: MS[I, (T1, T2)])(p: (T1, T2) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2)) { return T } } return F } def apply[I, T1, T2, T3](seq: MS[I, (T1, T2, T3)])(p: (T1, T2, T3) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3)) { return T } } return F } def apply[I, T1, T2, T3, T4](seq: MS[I, (T1, T2, T3, T4)])(p: (T1, T2, T3, T4) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5](seq: MS[I, (T1, T2, T3, T4, T5)])(p: (T1, T2, T3, T4, T5) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6](seq: MS[I, (T1, T2, T3, T4, T5, T6)])(p: (T1, T2, T3, T4, T5, T6) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7)])(p: (T1, T2, T3, T4, T5, T6, T7) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8)])(p: (T1, T2, T3, T4, T5, T6, T7, T8) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16, e._17)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16, e._17, e._18)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16, e._17, e._18, e._19)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16, e._17, e._18, e._19, e._20)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16, e._17, e._18, e._19, e._20, e._21)) { return T } } return F } def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22) => Boolean): B = { for (e <- seq) { if (p(e._1, e._2, e._3, e._4, e._5, e._6, e._7, e._8, e._9, e._10, e._11, e._12, e._13, e._14, e._15, e._16, e._17, e._18, e._19, e._20, e._21, e._22)) { return T } } return F } } object ∃ { @inline def apply[T](p: T => Boolean): B = Exists(p) @inline def apply[T1, T2](p: (T1, T2) => Boolean): B = Exists(p) @inline def apply[T1, T2, T3](p: (T1, T2, T3) => Boolean): B = Exists(p) @inline def apply[T1, T2, T3, T4](p: (T1, T2, T3, T4) => Boolean): B = Exists(p) @inline def apply[T1, T2, T3, T4, T5](p: (T1, T2, T3, T4, T5) => Boolean): B = Exists(p) @inline def apply[T1, T2, T3, T4, T5, T6](p: (T1, T2, T3, T4, T5, T6) => Boolean): B = Exists(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7](p: (T1, T2, T3, T4, T5, T6, T7) => Boolean): B = Exists(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8](p: (T1, T2, T3, T4, T5, T6, T7, T8) => Boolean): B = Exists(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9) => Boolean): B = Exists(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10) => Boolean): B = Exists(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11) => Boolean): B = Exists(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12) => Boolean): B = Exists(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13) => Boolean): B = Exists(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14) => Boolean): B = Exists(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15) => Boolean): B = Exists(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16) => Boolean): B = Exists(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17) => Boolean): B = Exists(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18) => Boolean): B = Exists(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19) => Boolean): B = Exists(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20) => Boolean): B = Exists(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21) => Boolean): B = Exists(p) @inline def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22](p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22) => Boolean): B = Exists(p) @inline def apply[I](seq: ZRange[I])(p: I => Boolean): B = Exists(seq)(p) @inline def apply[I, T](seq: IS[I, T])(p: T => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2](seq: IS[I, (T1, T2)])(p: (T1, T2) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3](seq: IS[I, (T1, T2, T3)])(p: (T1, T2, T3) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4](seq: IS[I, (T1, T2, T3, T4)])(p: (T1, T2, T3, T4) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5](seq: IS[I, (T1, T2, T3, T4, T5)])(p: (T1, T2, T3, T4, T5) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6](seq: IS[I, (T1, T2, T3, T4, T5, T6)])(p: (T1, T2, T3, T4, T5, T6) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7)])(p: (T1, T2, T3, T4, T5, T6, T7) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8)])(p: (T1, T2, T3, T4, T5, T6, T7, T8) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22](seq: IS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22) => Boolean): B = Exists(seq)(p) @inline def apply[I, T](seq: MS[I, T])(p: T => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2](seq: MS[I, (T1, T2)])(p: (T1, T2) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3](seq: MS[I, (T1, T2, T3)])(p: (T1, T2, T3) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4](seq: MS[I, (T1, T2, T3, T4)])(p: (T1, T2, T3, T4) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5](seq: MS[I, (T1, T2, T3, T4, T5)])(p: (T1, T2, T3, T4, T5) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6](seq: MS[I, (T1, T2, T3, T4, T5, T6)])(p: (T1, T2, T3, T4, T5, T6) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7)])(p: (T1, T2, T3, T4, T5, T6, T7) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8)])(p: (T1, T2, T3, T4, T5, T6, T7, T8) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21) => Boolean): B = Exists(seq)(p) @inline def apply[I, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22](seq: MS[I, (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22)])(p: (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22) => Boolean): B = Exists(seq)(p) } import language.implicitConversions implicit def $toStepBuilder(stepNo: Int): Contract.StepBuilder = ??? implicit def $toStepBuilder(stepNo: Z): Contract.StepBuilder = ??? implicit def $toJustification(name: String): Contract.Justification = ??? implicit def $toB(state: Contract.StateCont): B = ??? implicit def $toSequent(bs: B): Contract.SequentBuilder = ??? implicit def $toSequent(bs: (B, B)): Contract.SequentBuilder = ??? implicit def $toSequent(bs: (B, B, B)): Contract.SequentBuilder = ??? implicit def $toSequent(bs: (B, B, B, B)): Contract.SequentBuilder = ??? implicit def $toSequent(bs: (B, B, B, B, B)): Contract.SequentBuilder = ??? implicit def $toSequent(bs: (B, B, B, B, B, B)): Contract.SequentBuilder = ??? implicit def $toSequent(bs: (B, B, B, B, B, B, B)): Contract.SequentBuilder = ??? implicit def $toSequent(bs: (B, B, B, B, B, B, B, B)): Contract.SequentBuilder = ??? implicit def $toSequent(bs: (B, B, B, B, B, B, B, B, B)): Contract.SequentBuilder = ??? implicit def $toSequent(bs: (B, B, B, B, B, B, B, B, B, B)): Contract.SequentBuilder = ??? implicit def $toSequent(bs: (B, B, B, B, B, B, B, B, B, B, B)): Contract.SequentBuilder = ??? implicit def $toSequent(bs: (B, B, B, B, B, B, B, B, B, B, B, B)): Contract.SequentBuilder = ??? implicit def $toSequent(bs: (B, B, B, B, B, B, B, B, B, B, B, B, B)): Contract.SequentBuilder = ??? implicit def $toSequent(bs: (B, B, B, B, B, B, B, B, B, B, B, B, B, B)): Contract.SequentBuilder = ??? implicit def $toSequent(bs: (B, B, B, B, B, B, B, B, B, B, B, B, B, B, B)): Contract.SequentBuilder = ??? implicit def $toSequent(bs: (B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B)): Contract.SequentBuilder = ??? implicit def $toSequent(bs: (B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B)): Contract.SequentBuilder = ??? implicit def $toSequent(bs: (B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B)): Contract.SequentBuilder = ??? implicit def $toSequent(bs: (B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B)): Contract.SequentBuilder = ??? implicit def $toSequent(bs: (B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B)): Contract.SequentBuilder = ??? implicit def $toSequent(bs: (B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B)): Contract.SequentBuilder = ??? implicit def $toSequent(bs: (B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B)): Contract.SequentBuilder = ??? }
sireum/v3-logika-runtime
library/shared/src/main/scala/org/sireum/contract.scala
Scala
bsd-2-clause
85,786
package sublimeSpam.ui class DecorativeQuick(rand: util.Random, maxX: Int, maxY: Int) { private val realMinX = -maxX private val realMaxX = maxX * 2 private val realMinY = -maxY private val realMaxY = maxY * 2 val railColor = rand.nextInt(3) val dirX = { val candidateDirX = rand.nextInt(8) - 4 if (candidateDirX >= 0) candidateDirX + 1 else candidateDirX } val dirY = { val candidateDirY = rand.nextInt(8) - 4 if (candidateDirY >= 0) candidateDirY + 1 else candidateDirY } var x = { val candidateX = rand.nextInt(maxX) if (dirX > 0) -candidateX else maxX + candidateX } var y = { val candidateY = rand.nextInt(maxY) if (dirY > 0) - candidateY else maxY + candidateY } /** * Updates the Quick's position variables and checks if it is still inside the allowed area. * Returns true if it is, false if not. */ def updatePosition(): Boolean = { x += dirX y += dirY if (x < realMinX || x > realMaxX || y < realMinY || y > realMaxY) false else true } }
Berthur/SublimeSpam
src/sublimeSpam/ui/DecorativeQuick.scala
Scala
gpl-3.0
1,054
// - Project: scalajs-angulate (https://github.com/jokade/scalajs-angulate) // Description: Provides a base class for macros with common utility functions // // Distributed under the MIT License (see included file LICENSE) package biz.enef.angulate.impl import acyclic.file import biz.enef.angulate.named import scala.reflect.macros.blackbox protected[angulate] abstract class MacroBase { val c: blackbox.Context import c.universe._ /* type definitions */ val namedAnnotation = typeOf[named] // include runtime log messages if true protected lazy val runtimeLogging = c.settings.exists( _ == "biz.enef.angulate.runtimeLogging" ) /** * Print to console during compilation * @param tree * @param msg */ protected[this] def printCode(tree: Tree, msg: String = "") = c.info( c.enclosingPosition, s"""$msg |${showCode(tree)} """.stripMargin, true ) protected[this] def makeArgsList(f: MethodSymbol) = if(f.paramLists.isEmpty) (List(),List()) else f.paramLists.head.map( p => { val name = TermName(c.freshName("x")) (q"$name: ${p.typeSignature}", q"$name") }).unzip protected[this] def getDINames(f: MethodSymbol) = { f.paramLists.head.map{ p=> p.annotations.find( _.tree.tpe =:= namedAnnotation ).map { a => val name = a.tree.children.tail.head.toString // TODO: that's ludicrous... what is the right way to unquote the string??? name.substring(1,name.length-1) }.getOrElse(p.name.toString) } } /** * Creates a AngularJS constructor array for the specified type. * * @note the returned tree requires `js.Array` to be in scope * * @param ct class type */ protected[this] def createDIArray(ct: Type) = { val m = getConstructor(ct) val deps = getDINames(m) val (params,args) = makeArgsList(m) q"""js.Array[Any](..$deps, ((..$params) => new $ct(..$args)):js.Function)""" } protected[this] def getConstructor(ct: Type) = ct.decls.filter( _.isConstructor ).collect{ case m: MethodSymbol => m}.head // TODO: support DI name annotations protected[this] def createFunctionDIArray(t: c.Tree) = { val (f,params) = analyzeFunction(t) val diNames = params.map( p => p._2.toString ) q"js.Array[Any](..$diNames, $f:js.Function)" } protected[this] def analyzeFunction(t: c.Tree) = { val (m:Tree,params:List[ValDef]) = t match { case q"(..$params) => $body" => (t,params) case q"{(..$params) => $body}" => (t.children.head,params) } val args = params.map{ p => val q"$mods val $name: $tpe = $rhs" = p (mods,name,tpe,rhs) } (m,args) } }
CapeSepias/scalajs-angulate
src/main/scala/biz/enef/angulate/impl/MacroBase.scala
Scala
mit
2,683
package org.jetbrains.plugins.scala package annotator import org.intellij.lang.annotations.Language import org.jetbrains.plugins.scala.annotator.element.ScConstructorInvocationAnnotator import org.jetbrains.plugins.scala.extensions._ import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile import org.jetbrains.plugins.scala.lang.psi.api.base.ScConstructorInvocation import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScClass import org.jetbrains.plugins.scala.lang.psi.types.Compatibility import org.jetbrains.plugins.scala.util.assertions.MatcherAssertions class ConstructorInvocationAnnotatorTest extends AnnotatorSimpleTestCase { final val Header = """ class Seq[+A] object Seq { def apply[A](a: A) = new Seq[A] } class Simple class Complex(r: Double, i: Double) class A(a: Int) class B[X](a: X) class C(a: Int) { def this() = this(0) } class D(a: Int) { def this(b: Boolean) = this(0) } class DD(a: Int) { def this(b: Boolean, c: Int) = this(0) } class DDD(a: Int)(b: Int) class DDD2(a: Int) { def this(b: Boolean)(c: Boolean) = this(0) } class FFF[X](a: X)(b: X) class GGG[X, Y](a: X)(b: Y) class Z[+A]; class Y[+A] class E[X](a: Z[X]) { def this(o: Y[X]) = this(Z) } class EE class F(implicit a: Int) class Klass[K](a: K) type Alias[A] = Klass[A] """ def testEmpty(): Unit = { assertNothing(messages("")) } def testFine(): Unit = { val codes = Seq( "new Simple", "new Simple()", "new Complex(1.0, 1.0)", "new A(0)", "new A(a = 0)", "new B[Int](0)", "new B(0)", "new C(0)", "new C()", "new C", "new D(0)", "new D(false)", "new DD(0)", "new DD(false, 1)", "new DDD(1)(2)", "new FFF(1)(2)", "new GGG(1)(true)", "new E[Int](new Y[Int])", "new E[Int](new Z[Int])", "new E(new Y[Int])", "new E(new Z[Int])", "new Alias[Int](0)" ) for {code <- codes} { assertNothing(messages(code)) assertNothing(messages(code + " {}")) } } def testExcessArguments(): Unit = { assertMatches(messages("new A(0, 1)")) { case Error(", 1", "Too many arguments for constructor A(Int)") :: Nil => } assertMessagesSorted(messages("new D(0, 1)"))( Error("D", "Cannot resolve overloaded constructor `D`") // SCL-15594 ) assertMessagesSorted(messages("new D(true, 1)"))( Error("D", "Cannot resolve overloaded constructor `D`") // SCL-15594 ) assertMessagesSorted(messages("new D(true, 1) {}"))( Error("D", "Cannot resolve overloaded constructor `D`") // SCL-15594 ) assertMessagesSorted(messages("new DDD(1)(2, 3)"))( Error(", 3", "Too many arguments for constructor DDD(Int)(Int)") ) } def testAutoTupling(): Unit = { assertNothing(messages("new B")) assertNothing(messages("new B()")) assertNothing(messages("new FFF()()")) assertNothing(messages("new GGG()()")) assertMessages(messages("new FFF()(true)"))( Error("true", "Type mismatch, expected: Unit, actual: Boolean") ) } // TODO Don't separate the code from the expected messages (it's hard to understand such a test) def testMissedParameters(): Unit = { assertMatches(messages("new A")) { case Error(_, "Unspecified value parameters: a: Int") :: Nil => } assertMatches(messages("new A()")) { case Error(_, "Unspecified value parameters: a: Int") :: Nil => } assertMatches(messages("new B[Int]()")) { case Error(_, "Unspecified value parameters: a: Int") :: Nil => } assertMessagesSorted(messages("new D"))( Error("D", "Cannot resolve overloaded constructor `D`") // SCL-15594 ) assertMessagesSorted(messages("new D() {}"))( Error("D", "Cannot resolve overloaded constructor `D`") // SCL-15594 ) assertMessagesSorted(messages("new DD()"))( Error("DD", "Cannot resolve overloaded constructor `DD`") // SCL-15594 ) assertMessagesSorted(messages("new DD()"))( Error("DD", "Cannot resolve overloaded constructor `DD`") // SCL-15594 ) assertMessagesSorted(messages("new DD() {}"))( Error("DD", "Cannot resolve overloaded constructor `DD`") // SCL-15594 ) assertMessagesSorted(messages("new DDD(3)()"))( Error("()", "Unspecified value parameters: b: Int") ) assertMessagesSorted(messages("new DDD()()"))( Error("()", "Unspecified value parameters: a: Int"), Error("()", "Unspecified value parameters: b: Int") ) } def testMissingArgumentClause(): Unit = { assertMessagesSorted(messages("new DDD(3)"))( Error(")", "Missing argument list for constructor DDD(Int)(Int)") ) assertMessagesSorted(messages("new DDD2(true)"))( Error(")", "Missing argument list for constructor DDD2(Boolean)(Boolean)") ) } def testMissingArgumentClauseWithImplicit(): Unit = { assertMessagesSorted(messages("class Test()(implicit impl: Test); new Test()")) ( Error("Test()", "No implicit arguments of type: Test") ) assertMessagesSorted(messages("class Test()(private implicit impl: Test); new Test()")) ( Error("Test()", "No implicit arguments of type: Test") ) } def testMissingAndTypeMismatch(): Unit = { assertMessagesSorted(messages("new DD(true)"))( Error("DD", "Cannot resolve overloaded constructor `DD`") // SCL-15594 ) } def testPositionalAfterNamed(): Unit = { assertMatches(messages("new Complex(i = 1.0, 5.0)")) { case Error("5.0", "Positional after named argument") :: Nil => } assertMatches(messages("new Complex(i = 1.0, 5.0) {}")) { case Error("5.0", "Positional after named argument") :: Nil => } } def testNamedDuplicates(): Unit = { assertMessagesSorted(messages("new A(a = null, a = Unit)"))( Error("a", "Parameter specified multiple times"), Error("a", "Parameter specified multiple times") ) assertMessagesSorted(messages("new A(a = null, a = Unit) {}"))( Error("a", "Parameter specified multiple times"), Error("a", "Parameter specified multiple times") ) } def testSinglePrivateConstructorIsInaccessible(): Unit = { val text = """ |class P private(a: Int) | |new P() """.stripMargin assertMatches(messages(text)) { case Error("()", "No constructor accessible from here") :: Nil => } assertMatches(messages(text + " {}")) { case Error("()", "No constructor accessible from here") :: Nil => } } def testMultiplePrivateConstructorsAreInaccessible(): Unit = { val text = """ |class P private(a: Int) { | private def this(a: Boolean) = this(???) |} | |new P(3) """.stripMargin assertMatches(messages(text)) { case Error("(3)", "No constructor accessible from here") :: Nil => } assertMatches(messages(text + " {}")) { case Error("(3)", "No constructor accessible from here") :: Nil => } } def testPrivatePrimaryConstructorIsIgnored(): Unit = { val text = """ |class P private (a: Int) { | def this(a: Boolean) = this(3) |} | |new P(3) """.stripMargin assertMatches(messages(text)) { case Error("3", "Type mismatch, expected: Boolean, actual: Int") :: Nil => } assertMatches(messages(text + " {}")) { case Error("3", "Type mismatch, expected: Boolean, actual: Int") :: Nil => } } def testPrivateSecondaryConstructorIsIgnored(): Unit = { val text = """ |class P(a: Int) { | private def this(a: Boolean) = this(???) |} | |new P(true) """.stripMargin assertMatches(messages(text)) { case Error("true", "Type mismatch, expected: Int, actual: Boolean") :: Nil => } assertMatches(messages(text + " {}")) { case Error("true", "Type mismatch, expected: Int, actual: Boolean") :: Nil => } } def testTypeMismatch(): Unit = { assertMatches(messages("new A(false)")) { case Error("false", "Type mismatch, expected: Int, actual: Boolean") :: Nil => } assertMatches(messages("new B[Int](false)")) { case Error("false", "Type mismatch, expected: Int, actual: Boolean") :: Nil => } assertMessagesSorted(messages("new D(3.3)"))( Error("D", "Cannot resolve overloaded constructor `D`") // SCL-15594 ) assertMessagesSorted(messages("new D(3.3) {}"))( Error("D", "Cannot resolve overloaded constructor `D`")// SCL-15594 ) assertMessagesSorted(messages("new DDD(true)(false)"))( Error("true", "Type mismatch, expected: Int, actual: Boolean") // SCL-15592 ) assertMessagesSorted(messages("new FFF(3)(true)"))( Error("true", "Type mismatch, expected: Int, actual: Boolean"), ) } def testMalformedSignature(): Unit = { assertMatches(messages("class Malformed(a: A*, b: B); new Malformed(0)")) { case Error("Malformed", "Constructor has malformed definition") :: Nil => } } def testTraitInstantiation(): Unit = { val code = """ |trait T |new T {} """.stripMargin assertNothing(messages(code)) } def testTraitInstantiationWithSingleEmptyParameterList(): Unit = { val code = """ |trait T |new T() {} """.stripMargin assertNothing(messages(code)) } def testTraitInstantiationWithNonExistingConstructor(): Unit = { val code = """ |trait T |new T(2, 2) {} """.stripMargin assertMessages(messages(code))( Error("(2, 2)", "trait T is a trait; does not take constructor arguments") ) } def testMissingParamaterLists(): Unit = { val code = """ |new DDD(2) """.stripMargin assertMessages(messages(code))( Error(")", "Missing argument list for constructor DDD(Int)(Int)") ) } // TODO: Type Aliases //class A(a: Int) //class B[X](a: X) // //type AA[A] = A[A] //type BB[A] = B[A] //new AA(0) //new BB(0) //new AA[Int](0) def messages(@Language(value = "Scala", prefix = Header) code: String): List[Message] = { val file: ScalaFile = (Header + code).parseWithEventSystem implicit val mock: AnnotatorHolderMock = new AnnotatorHolderMock(file) val seq = file.depthFirst().findByType[ScClass] Compatibility.seqClass = seq try { file.depthFirst().filterByType[ScConstructorInvocation].foreach { ScConstructorInvocationAnnotator.annotate(_) } mock.annotations } finally { Compatibility.seqClass = None } } } class JavaConstructorInvocationAnnotatorTest extends ScalaHighlightingTestBase with MatcherAssertions { val javaCode = """ |public class JavaClass { | private final int x; | private final int y; | public JavaClass(int x, int y) { | this.x = x; | this.y = y; | } |} """.stripMargin private def setup(): Unit = { getFixture.configureByText("JavaClass.java", javaCode) } def messages(scalaText: String): List[Message] = { setup() errorsFromScalaCode(scalaText) } def test_SCL15398_ok_new(): Unit = { assertNothing(messages("new JavaClass(1, 2)")) } def test_SCL15398_ok_extends(): Unit = { assertNothing(messages("class Impl extends JavaClass(1, 2)")) } def test_SCL15398_missing_parameter_new(): Unit = { assertMessages(messages("new JavaClass(1)"))( Error("(1)", "Unspecified value parameters: y: Int") ) } def test_SCL15398_missing_parameter_extends(): Unit = { assertMessages(messages("class Impl extends JavaClass(1)"))( Error("(1)", "Unspecified value parameters: y: Int") ) } def testSCL4504(): Unit = { assertNothing(messages( """ |class B |trait C { val b: B} |class A(override implicit val b: B) extends C |//class A(implicit override val b: B) extends C | |implicit val b = new B |new A() """.stripMargin)) } }
JetBrains/intellij-scala
scala/scala-impl/test/org/jetbrains/plugins/scala/annotator/ConstructorInvocationAnnotatorTest.scala
Scala
apache-2.0
12,183
package scodec.protocols package ip package v4 import scodec.bits._ import scodec.Codec import scodec.codecs._ import scodec.codecs.literals._ import scodec.stream._ import shapeless._ /** Simplified version of the IPv4 header format. */ case class SimpleHeader( dataLength: Int, id: Int, ttl: Int, protocol: Int, sourceIp: Address, destinationIp: Address ) object SimpleHeader { implicit val codec: Codec[SimpleHeader] = { val componentCodec = { // Word 1 -------------------------------- ("version" | bin"0100" ) :: ("ihl" | uint4 ) :: ("dscp" | ignore(6) ) :: ("ecn" | ignore(2) ) :: ("total_length" | uint16 ) :: // Word 2 -------------------------------- ("id" | uint16 ) :: ("flags" | ignore(3) ) :: ("fragment_offset" | ignore(13) ) :: // Word 3 -------------------------------- ("ttl" | uint8 ) :: ("proto" | uint8 ) :: ("checksum" | bits(16) ) :: // Word 4 -------------------------------- ("src_ip" | Codec[Address]) :: // Word 5 -------------------------------- ("dest_ip" | Codec[Address]) }.dropUnits new Codec[SimpleHeader] { def encode(header: SimpleHeader) = { val totalLength = header.dataLength + 20 for { encoded <- componentCodec.encode(5 :: totalLength :: header.id :: header.ttl :: header.protocol :: BitVector.low(16) :: header.sourceIp :: header.destinationIp :: HNil) chksum = checksum(encoded) } yield encoded.patch(16 + 16 + 16, chksum) } def decode(bits: BitVector) = { componentCodec.decode(bits) map { case (rest, _ :: totalLength :: id :: ttl :: proto :: chksum :: srcIp :: dstIp :: HNil) => rest -> SimpleHeader(totalLength - 20, id, ttl, proto, srcIp, dstIp) } } } } def sdecoder(ethernetHeader: pcap.EthernetFrameHeader): StreamDecoder[SimpleHeader] = if (ethernetHeader.ethertype == Some(pcap.EtherType.IPv4)) decode.once[SimpleHeader] else decode.halt }
sbuzzard/scodec-protocols
src/main/scala/scodec/protocols/ip/v4/SimpleHeader.scala
Scala
bsd-3-clause
2,230
/* Copyright (c) 2016 Lucas Satabin * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package lingua sealed abstract class Predicate[-In] extends (In => Boolean) { def &&[In1 <: In](that: Predicate[In1]): Predicate[In1] = (this, that) match { case (EmptyPredicate, _) | (_, EmptyPredicate) => EmptyPredicate case (AnyPredicate, _) => that case (_, AnyPredicate) => this case (SetPredicate(s1, true), SetPredicate(s2, true)) => val i = s1.intersect(s2) if (i.isEmpty) EmptyPredicate else SetPredicate(i, true) case (SetPredicate(s1, true), SetPredicate(s2, false)) => val s = s1.filterNot(s2.contains(_)) if (s.isEmpty) EmptyPredicate else SetPredicate(s, true) case (SetPredicate(s1, false), SetPredicate(s2, true)) => val s = s2.filterNot(s1.contains(_)) if (s.isEmpty) EmptyPredicate else SetPredicate(s, true) case (SetPredicate(s1, false), SetPredicate(s2, false)) => SetPredicate(s1.union(s2), false) } def ||[In1 <: In](that: Predicate[In1]): Predicate[In1] = (this, that) match { case (EmptyPredicate, _) => that case (_, EmptyPredicate) => this case (AnyPredicate, _) | (_, AnyPredicate) => AnyPredicate case (SetPredicate(s1, true), SetPredicate(s2, true)) => SetPredicate(s1.union(s2), true) case (SetPredicate(s1, true), SetPredicate(s2, false)) => val d = s2.diff(s1) if (d.isEmpty) AnyPredicate else SetPredicate(d, false) case (SetPredicate(s1, false), SetPredicate(s2, true)) => val d = s1.diff(s2) if (d.isEmpty) AnyPredicate else SetPredicate(d, false) case (SetPredicate(s1, false), SetPredicate(s2, false)) => val i = s1.intersect(s2) if (i.isEmpty) AnyPredicate else SetPredicate(i, false) } def unary_! : Predicate[In] = this match { case EmptyPredicate => AnyPredicate case AnyPredicate => EmptyPredicate case SetPredicate(s, p) => SetPredicate(s, !p) } } object Predicate { def apply[T](v: T): Predicate[T] = SetPredicate(Set(v), true) } object NonEmptyPredicate { def unapply[T](p: Predicate[T]): Option[Predicate[T]] = p match { case EmptyPredicate => None case _ => Some(p) } } case object EmptyPredicate extends Predicate[Any] { def apply(a: Any) = false override def toString = "∅" } case object AnyPredicate extends Predicate[Any] { def apply(a: Any) = true override def toString = "★" } final case class SetPredicate[In](set: Set[In], positive: Boolean) extends Predicate[In] { def apply(i: In) = if (positive) set.contains(i) else !set.contains(i) override def toString = f"${if (!positive) "¬" else ""}${set.mkString("{", ", ", "}")}" }
satabin/lingua
fst/src/main/scala/lingua/Predicate.scala
Scala
apache-2.0
3,592
package org.jetbrains.plugins.scala.lang.completion package postfix package templates package selector import java.{util => ju} import com.intellij.codeInsight.template.postfix.templates.PostfixTemplateExpressionSelectorBase import com.intellij.openapi.editor.Document import com.intellij.openapi.util.{Condition, Conditions} import com.intellij.psi.PsiElement import com.intellij.psi.util.PsiTreeUtil import org.jetbrains.plugins.scala.extensions.PsiClassExt import org.jetbrains.plugins.scala.lang.psi.api.expr.ScExpression import org.jetbrains.plugins.scala.lang.psi.types.api.ExtractClass import org.jetbrains.plugins.scala.lang.psi.types.{ScType, api} import org.jetbrains.plugins.scala.lang.surroundWith.surrounders.expression.ScalaExpressionSurrounder import scala.annotation.tailrec import scala.collection.JavaConverters /** * @author Roman.Shein * @since 08.09.2015. */ sealed abstract class AncestorSelector(condition: Condition[PsiElement]) extends PostfixTemplateExpressionSelectorBase(condition) { override final protected def getFilters(offset: Int): Condition[PsiElement] = Conditions.and(super.getFilters(offset), getPsiErrorFilter) override final def getNonFilteredExpressions(context: PsiElement, document: Document, offset: Int): ju.List[PsiElement] = PsiTreeUtil.getParentOfType(context, classOf[ScExpression], false) match { case expression: ScExpression => import JavaConverters._ iterateOverParents(expression, expression :: Nil)(offset).asJava case _ => ju.Collections.emptyList() } protected def isAcceptable(current: PsiElement): Boolean = current != null @tailrec private def iterateOverParents(element: PsiElement, result: List[PsiElement]) (implicit offset: Int): List[PsiElement] = element.getParent match { case current if isAcceptable(current) && current.getTextRange.getEndOffset <= offset => val newTail = result match { case head :: tail if head.getText == current.getText => tail case list => list } iterateOverParents(current, current :: newTail) case _ => result.reverse } } object AncestorSelector { final case class SelectAllAncestors(private val condition: Condition[PsiElement] = AnyExpression) extends AncestorSelector(condition) object SelectAllAncestors { def apply(surrounder: ScalaExpressionSurrounder): SelectAllAncestors = new SelectAllAncestors(surrounder) } final case class SelectTopmostAncestors(private val condition: Condition[PsiElement] = BooleanExpression) extends AncestorSelector(condition) { override protected def isAcceptable(current: PsiElement): Boolean = current.isInstanceOf[ScExpression] } object SelectTopmostAncestors { def apply(surrounder: ScalaExpressionSurrounder): SelectTopmostAncestors = new SelectTopmostAncestors(surrounder) } val AnyExpression: Condition[PsiElement] = (_: PsiElement).isInstanceOf[ScExpression] val BooleanExpression: Condition[PsiElement] = expressionTypeCondition { case (expression, scType) => scType.conforms(api.Boolean(expression)) } def isSameOrInheritor(fqns: String*): Condition[PsiElement] = expressionTypeCondition { case (expression, ExtractClass(clazz)) => val elementScope = expression.elementScope fqns.flatMap(elementScope.getCachedClass) .exists(clazz.sameOrInheritor) case _ => false } private[this] def expressionTypeCondition(isValid: (ScExpression, ScType) => Boolean): Condition[PsiElement] = { case expression: ScExpression => expression.getTypeIgnoreBaseType.exists { isValid(expression, _) } case _ => false } private[this] implicit def surrounderToCondition(surrounder: ScalaExpressionSurrounder): Condition[PsiElement] = surrounder.isApplicable(_: PsiElement) }
jastice/intellij-scala
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/completion/postfix/templates/selector/AncestorSelector.scala
Scala
apache-2.0
3,941
package gapt.prooftool import java.awt.event.{ ActionEvent, KeyEvent } import javax.swing.KeyStroke import scala.swing.event.Key import scala.swing.{ Action, CheckMenuItem, FileChooser, MenuItem } /** * An object that contains some common menu buttons. */ object MenuButtons { /** * * @param main An instance of ProoftoolViewer * @return A menu button that calls main's fOpen function. */ def openButton( main: ProofToolViewer[_] ) = new MenuItem( Action( "Open ..." ) { main.fOpen() } ) { mnemonic = Key.O this.peer.setAccelerator( KeyStroke.getKeyStroke( KeyEvent.VK_O, ActionEvent.CTRL_MASK ) ) } /** * * @param main An instance of ProoftoolViewer * @return A menu button that calls main's exportToPDF function. */ def exportToPDFButton( main: ProofToolViewer[_] ) = new MenuItem( Action( "Export to PDF" ) { main.fExportPdf( main.mainComponent ) } ) { mnemonic = Key.D this.peer.setAccelerator( KeyStroke.getKeyStroke( KeyEvent.VK_D, ActionEvent.CTRL_MASK ) ) } /** * * @param main An instance of ProoftoolViewer * @return A menu button that calls main's exportToPNG function. */ def exportToPNGButton( main: ProofToolViewer[_] ) = new MenuItem( Action( "Export to PNG" ) { main.fExportPng( main.mainComponent ) } ) { mnemonic = Key.N this.peer.setAccelerator( KeyStroke.getKeyStroke( KeyEvent.VK_N, ActionEvent.CTRL_MASK ) ) } /** * * @param main An instance of ProoftoolViewer * @return A menu button that calls main's increaseFontSize function. */ def increaseFontSizeButton( main: ProofToolViewer[_] ) = new MenuItem( Action( "Increase font size" ) { main.increaseFontSize() } ) { this.peer.setAccelerator( KeyStroke.getKeyStroke( KeyEvent.VK_UP, ActionEvent.ALT_MASK ) ) } /** * * @param main An instance of ProoftoolViewer * @return A menu button that calls main's decreaseFontSize function. */ def decreaseFontSizeButton( main: ProofToolViewer[_] ) = new MenuItem( Action( "Decrease font size" ) { main.decreaseFontSize() } ) { this.peer.setAccelerator( KeyStroke.getKeyStroke( KeyEvent.VK_DOWN, ActionEvent.ALT_MASK ) ) } /** * * @param main An instance of ProoftoolViewer with Savable. * @return A menu button that calls main's saveAs function. */ def saveAsButton[T]( main: ProofToolViewer[T] with Savable[T] ) = new MenuItem( Action( "Save as..." ) { main.fSave( main.name, main.content ) } ) { mnemonic = Key.S this.peer.setAccelerator( KeyStroke.getKeyStroke( KeyEvent.VK_S, ActionEvent.CTRL_MASK ) ) } /** * * @param main An instance of ProoftoolViewer with ContainsLKProof. * @return A menu button that calls main's hideStructuralRules/showAllRules function. */ def hideStructuralRulesButton( main: ProofToolViewer[_] with ContainsLKProof ) = new CheckMenuItem( "Hide structural rules" ) { outer => action = Action( "Hide structural rules" ) { if ( outer.selected ) main.hideStructuralRules() else main.showAllRules() } } /** * * @param main An instance of ProoftoolViewer with ContainsSequentProof. * @return A menu button that calls main's hideSequentContext/showAllFormulas function. */ def hideContextsButton( main: ProofToolViewer[_] with ContainsSequentProof ) = new CheckMenuItem( "Hide sequent contexts" ) { outer => action = Action( "Hide sequent contexts" ) { if ( outer.selected ) main.hideSequentContext() else main.showAllFormulas() } } /** * * @param main An instance of ProoftoolViewer with ContainsLKProof. * @return A menu button that calls main's markCutAncestors/removeMarking function. */ def marCutAncestorsButton( main: ProofToolViewer[_] with ContainsLKProof ) = new CheckMenuItem( "Mark cut ancestors" ) { outer => action = Action( "Mark cut ancestors" ) { if ( outer.selected ) main.markCutAncestors() else main.unmarkCutAncestors() } } def removeAllMarkingsButton( main: ProofToolViewer[_] with ContainsSequentProof ) = new MenuItem( Action( "Remove all markings" ) { main.removeAllMarkings() } ) def ShowDebugBordersButton( main: ProofToolViewer[_] ) = new CheckMenuItem( "Show debug borders" ) { outer => action = Action( "Show debug borders" ) { if ( outer.selected ) main.publisher.publish( ShowDebugBorders ) else main.publisher.publish( HideDebugBorders ) } } } trait ContainsSequentProof { /** * Hides all formulas except main and auxiliary ones. */ def hideSequentContext(): Unit /** * Shows all formulas in the proof */ def showAllFormulas(): Unit /** * Removes all markings. */ def removeAllMarkings(): Unit } /** * A trait for ProofToolViewer objects that contain (old or new) LK proofs. */ trait ContainsLKProof extends ContainsSequentProof { /** * Hides structural rules in the proof. */ def hideStructuralRules(): Unit /** * Shows all rules in the proof. */ def showAllRules(): Unit /** * Marks the ancestors of cut formulas. */ def markCutAncestors(): Unit /** * Unmarks the ancestors of cut formulas. */ def unmarkCutAncestors(): Unit }
gapt/gapt
core/src/main/scala/gapt/prooftool/MenuButtons.scala
Scala
gpl-3.0
5,309
package algorithms import data.Problems import types.Types.ProblemData import scala.util.Random /** * Created by alejandro on 23/04/16. */ object VNS { val maxK = 5 def apply(inputs: ProblemData, random: Random) = { val (n, _,_ ) = inputs val initialSolution = randomSolution(n, random) var k = 1 var best = LocalSearch(inputs, random, startFrom = Some(initialSolution)) var bestCost = cost(inputs, best) while(k <= maxK) { val mutated = mutate(best, n/(9-k), random) val newSol = LocalSearch(inputs, random, startFrom = Some(mutated)) val newCost = cost(inputs, newSol) if (newCost < bestCost){ best = newSol bestCost = newCost k = 1 } else { k += 1 } } best } }
Truji92/MH-algoritmos-basados-en-trayectorias
src/main/scala/algorithms/VNS.scala
Scala
mit
785
/* sxr-helper-maven-plugin * Copyright 2010 Olivier Michallat */ package com.github.olim7t import java.io.{File, PrintStream, FileOutputStream} import org.apache.maven.plugin._ import org.apache.maven.project.MavenProject import org.apache.maven.plugin.reactor.SuperProjectSorter import org.apache.maven.artifact.ArtifactUtils import org.scala_tools.maven.mojo.annotations._ import scala.collection.JavaConversions._ import org.codehaus.plexus.util.dag.DAG /** * For each module of the project, generate an sxr link file pointing to the modules it depends on. * This will enable cross-module when generating the Vim data files. * * Note: all the work is currently done at the parent project level, which means that the plugin * must not be inherited. This also caused a problem when the link file was generated in the target * directory and 'clean generate-sources' was executed, as the parent project would generate the * files and the 'clean' of each module would delete them. I've avoided this by not generating in * target. * A better approach would be to have each module handle its link file itself. */ @goal("write-link-file") @phase("generate-sources") class WriteLinkFileMojo extends AbstractMojo with SxrMojo { @throws(classOf[MojoExecutionException]) override def execute() { val sorter = new SuperProjectSorter(collectedProjects) val dag = sorter.getDAG() for (module <- modules) writeLinkFile(sxrIndex(module), moduleDependencies(module, dag)) // Writes a global link file in the root project, to be used by external projects log.info(sxrIndex(project).getAbsolutePath) writeLinkFile(sxrIndex(project), modules) } private def sxrDir(module: MavenProject) = new File(module.getBuild.getOutputDirectory + ".sxr") private def sxrIndex(module: MavenProject) = new File(module.getBasedir, "sxr.links") // Finds which modules a module depends on private def moduleDependencies(module: MavenProject, dag: DAG): Seq[MavenProject] = { val projectId = makeProjectId(module) require(dag.getVertex(projectId) != null) val depIds = dag.getChildLabels(projectId) for { i <- depIds ; depId = i.asInstanceOf[String] dep <- modules.find(makeProjectId(_) == depId) if hasScalaSources(dep) } yield dep } private def writeLinkFile(file: File, dependencies: Seq[MavenProject]) { val linkList = dependencies.map(sxrDir(_).toURI + "/").mkString("\\n") write(linkList, file) } }
olim7t/sxr-helper-maven-plugin
src/main/scala/WriteLinkFileMojo.scala
Scala
bsd-3-clause
2,474
/* * Copyright (c) 2012, TU Berlin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the TU Berlin nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL TU Berlin BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ package de.tuberlin.uebb.sl2.modules import de.tuberlin.uebb.sl2.modules.Syntax.{ VarFirstClass } import scala.collection.immutable.List.{ fill } import scala.util.Either.{ cond } /** * */ trait LetRecSplitter { self: Syntax with EnrichedLambdaCalculus with Graph[VarFirstClass] with Errors => /** * Letrec splitting * * This function performs a dependency analysis for each letrec- * expression and splits it into minimal let- and letrec-expressions. * * @param boundVars Variables bound in outer context * @param expr Expression to split */ def splitLetRecs(boundVars: Set[VarFirstClass], expr: ELC): Either[Error, ELC] = expr match { case ELetRec(defs, body, attr) => { /* * Build a dependency graph; the left-hand sides of the letrec are the * vertices of the graph and edges `f' -> `g' are added whenever the * definition of the left-hand side `f' requires the definition of the * left-hand side `g'. */ for ( gs <- buildDepGraph(boundVars, expr).right; /* * Split the right-hand sides of the letrec's definitions which * might contain letrecs themselves. Care has to be taken regarding * the outer context. For each definition v = e only those left-hand * sides of the letrec may be added to the outer context the expression * e depends on, i.e., those v has a dependency edge to. Otherwise, * expressions in which inner recursive functions shadow outer bindings * are not split correctly, like: * * letrec f = \\ x . x * main = letrec f = \\ x . case x <= 1 of * True -> 1 * False -> f (x - 1) * in f 3 * in main * * If we put {f, main} in the outer context when splitting the * right-hand side of the `main' definition, the inner `f' is not * recognized as recursive */ splitRhs <- { val (graph, succs) = gs def splitRhs(d: EDefinition) = { val visibleLhsVars = succs.get(d.lhs).get.toSet splitLetRecs(boundVars union visibleLhsVars, d.rhs) } errorMap(defs, splitRhs).right }; /* * Split the body. In contrast to the right-hand sides of the * definitions we split the body with all left-hand sides in * the outer context, since they are all truly visible in the body. */ splitBody <- { val lhsVars = defs.map(_.lhs).toSet splitLetRecs(boundVars union lhsVars, body).right } ) yield { val (graph, succs) = gs // Perform the actual splitting val leftHandSides = defs map (_.lhs) val signatures = leftHandSides.zip(defs.map(_.sig)).toMap val splitDefs = leftHandSides.zip(splitRhs).toMap val sccs = topologicalSort(stronglyConnectedComponents(graph), graph) /* * Split the definition(s) in the strongly connected component. * Non-recursive functions with no dependencies will be transformed * into a simple, non-recursive let-binding. Otherwise, we generate * a recursive let-binding for each strongly connected component. */ def split(scc: Set[VarFirstClass], body: ELC) = { // A definition depends on no other function definitions if (scc.size == 1) { val lhs = scc.head val lhsSuccessors = succs.get(lhs).get val definition = EDefinition(lhs, signatures.get(lhs).get, splitDefs.get(lhs).get, attr) // Recursive function in defintion if (lhsSuccessors contains lhs) ELetRec(List(definition), body, attr) // Non-recursive function in defintion else ELet(definition, body, attr) } // A definition depends on other function definitions else { val definitions: List[EDefinition] = scc.map { lhs => EDefinition(lhs, signatures.get(lhs).get, splitDefs.get(lhs).get, attr) }.toList ELetRec(definitions, body, attr) } } sccs.foldRight(splitBody)(split) } } /* * The following cases are syntax traversal splitting each * subexpression. Lexical scoping of non-recursive let-, * lambda-, and case-expressions is respected by adding the * bound variables to 'splitLetRecs' first argument. */ case EApp(fun, arg, attr) => { for ( splitFun <- splitLetRecs(boundVars, fun).right; splitArg <- splitLetRecs(boundVars, arg).right ) yield EApp(splitFun, splitArg, attr) } case ELam(pattern, body, attr) => { for (splitBody <- splitLetRecs(boundVars union pattern.vars.map(Syntax.Var(_)).toSet, body).right) yield ELam(pattern, splitBody, attr) } case ELet(EDefinition(lhs, sig, rhs, attrD), body, attrL) => { for ( splitRhs <- splitLetRecs(boundVars, rhs).right; splitBody <- splitLetRecs(boundVars + lhs, body).right ) yield ELet(EDefinition(lhs, sig, splitRhs, attrD), splitBody, attrL) } case EChoice(choices, attr) => { for (splitChoices <- errorMap(choices, (c: ELC) => splitLetRecs(boundVars, c)).right) yield EChoice(splitChoices, attr) } case ECase(expr, alts, attr) => { def splitLetRecsAlt(alt: EAlternative) = { for (splitExpr <- splitLetRecs(boundVars union alt.pattern.vars.map(Syntax.Var(_)).toSet, alt.expr).right) yield EAlternative(alt.pattern, splitExpr, alt.attribute) } for ( splitExpr <- splitLetRecs(boundVars, expr).right; splitAlts <- errorMap(alts, splitLetRecsAlt).right ) yield ECase(splitExpr, splitAlts, attr) } case _ => Right(expr) } type Successors = Map[VarFirstClass, List[VarFirstClass]] type DependencyGraph = (Graph, Successors) /** * Build dependency graph. * * This method detects undefined variables in the given expression. */ def buildDepGraph(outerCtx: Set[VarFirstClass], expr: ELC): Either[Error, DependencyGraph] = { var succs: Successors = Map.empty /** * Record a dependency edge. */ def recordEdge(from: VarFirstClass, to: VarFirstClass) = succs.get(from) match { case None => succs = succs + (from -> List(to)) case Some(vs) => succs = succs + (from -> (to :: vs)) } /** * Traverse a definition. * * @param outerCtx Outer context * @param lhs Left-hand sides of current letrec * @param d Definition to look at */ def traverseDef(outerCtx: Set[VarFirstClass], lhs: Set[VarFirstClass])(d: EDefinition): Either[Error, Unit] = { traverseRhs(d.lhs, lhs, outerCtx, d.rhs) } /** * Traverse the right-hand side of a definition. * * @param f Name of the current definition (source of vertices added, if any) * @param lhs Left-hand sides bound in current letrec * @param outerCtx Names available in outer context */ def traverseRhs(f: VarFirstClass, lhs: Set[VarFirstClass], outerCtx: Set[VarFirstClass], expr: ELC): Either[Error, Unit] = expr match { case EVar(v, attr) => cond(isKnown(v)(lhs, outerCtx), // Only do something if v is bound in the current letrec if (isDep(v)(lhs, outerCtx)) recordEdge(f, v), UndefinedError("variable", v.toString, attr)) case EApp(fun, arg, _) => collectErrors(traverseRhs(f, lhs, outerCtx, fun), traverseRhs(f, lhs, outerCtx, arg)) case ELam(pattern, body, _) => { // Body has to be traversed with pattern variables in outer context val patVars = pattern.vars.map(Syntax.Var(_).asInstanceOf[VarFirstClass]).toSet traverseRhs(f, lhs, outerCtx union patVars, body) } case ELet(EDefinition(v, _, rhs, _), body, _) => { // Similar to lambda-abstraction collectErrors(traverseRhs(f, lhs, outerCtx, rhs), traverseRhs(f, lhs, outerCtx + v, body)) } case ELetRec(defs, body, _) => { /* * Right-hand sides are traversed with all left-hand sides * in the outer context. This realizes that bindings in inner * letrecs shadow bindings in outer letrecs. */ val knownVars = outerCtx union (defs map (_.lhs)).toSet val rightHandSides = defs map (_.rhs) for ( _ <- errorMap(rightHandSides, (e: ELC) => traverseRhs(f, lhs, knownVars, e)).right; _ <- traverseRhs(f, lhs, knownVars, body).right ) yield () } case EChoice(choices, _) => { for (_ <- errorMap(choices, (e: ELC) => traverseRhs(f, lhs, outerCtx, e)).right) yield () } case ECase(e, alts, _) => { /* * Traverse each branch of the case expression like a * lambda-abstraction or a let-binding. */ def traverseAlt(alt: EAlternative) = { val patVars = alt.pattern.vars.map(Syntax.Var(_).asInstanceOf[VarFirstClass]).toSet traverseRhs(f, lhs, outerCtx union patVars, alt.expr) } for ( _ <- traverseRhs(f, lhs, outerCtx, e).right; _ <- errorMap(alts, traverseAlt).right ) yield () } case _ => Right() } /** * Variable 'v' is known in the left-hand sides of the current letrec * or is available in the outer context, i.e., 'v' is known at all. */ def isKnown(v: VarFirstClass)(lhs: Set[VarFirstClass], outerCtx: Set[VarFirstClass]) = { (lhs contains v) || (outerCtx contains v) } /** * Variable 'v' is known in the left-hand sides of the current letrec and is * not in the outer context, i.e., a dependency edge has to be inserted for v. */ def isDep(v: VarFirstClass)(lhs: Set[VarFirstClass], outerCtx: Set[VarFirstClass]) = { (lhs contains v) && (!outerCtx.contains(v)) } expr match { case ELetRec(defs, _, _) => { val leftHandSides = defs map { (d: EDefinition) => d.lhs } /* Set up the graph with the left-hand sides as vertices and no edges. */ succs = (leftHandSides map { (lhs: VarFirstClass) => (lhs -> Nil) }).toMap for (_ <- errorMap(defs, traverseDef(outerCtx, leftHandSides.toSet)).right) yield { val vertices = succs.keySet val edges = succs.toList.flatMap { case (from, tos) => fill(tos.length)(from) zip tos } (directedGraph(vertices, edges), succs) } } case _ => Left(GenericError("Dependency graph of non letrec requested")) } } }
mzuber/simple-language
src/main/scala/modules/LetRecSplitter.scala
Scala
bsd-3-clause
11,911
/* * Copyright (c) 2014 - 2015 Contributor. All rights reserved. */ package org.scalaide.debug.internal.expression package proxies.primitives import org.scalaide.debug.internal.expression.context.JdiContext import com.sun.jdi.DoubleValue /** * JdiProxy implementation for `double`, `scala.Double` and `java.lang.Double`. */ case class DoubleJdiProxy(__context: JdiContext, __value: DoubleValue) extends PrimitiveJdiProxy[Double, DoubleJdiProxy, DoubleValue](DoubleJdiProxy) { override def __primitiveValue[I]: I = this.__value.value.asInstanceOf[I] } object DoubleJdiProxy extends PrimitiveJdiProxyCompanion[Double, DoubleJdiProxy, DoubleValue](TypeNames.Double) { protected def mirror(value: Double, context: JdiContext): DoubleValue = context.mirrorOf(value) }
dragos/scala-ide
org.scala-ide.sdt.debug.expression/src/org/scalaide/debug/internal/expression/proxies/primitives/DoubleJdiProxy.scala
Scala
bsd-3-clause
780
package microtools.models import org.scalacheck.Arbitrary.arbString import org.scalacheck.Prop.forAll import org.scalacheck.Prop.BooleanOperators import org.scalacheck.{Arbitrary, Properties} import play.api.libs.json.{JsError, JsString, Json} class CustomerSubjectSpec extends Properties("CustomerSubjectProperties") { implicit val arbCustomerSubject: Arbitrary[CustomerSubject] = Arbitrary( Arbitrary.arbString.arbitrary.map(CustomerSubject(_)) ) property("JSON format") = { Json.toJson(CustomerSubject("XYC3299")) == JsString("customer/XYC3299") } property("JSON serialization") = forAll { customerSubject: CustomerSubject => val jsValue = Json.toJson(customerSubject) val deserialized = jsValue.as[CustomerSubject] customerSubject == deserialized } property("Proper error messages for invalid JSON") = forAll { s: String => (!s.startsWith("customer") && s.nonEmpty) ==> { val error = JsString(s).validate[CustomerSubject].asInstanceOf[JsError] error.toString contains "CustomerSubject" } } }
21re/play-micro-tools
src/test/scala/microtools/models/CustomerSubjectSpec.scala
Scala
mit
1,063
/* * Copyright (c) 2016 Tinkoff * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ru.tinkoff.aerospike.dsl import com.aerospike.client.Key import com.aerospike.client.async.IAsyncClient import com.aerospike.client.large.{LargeList, LargeMap, LargeSet, LargeStack} import com.aerospike.client.policy.WritePolicy import scala.concurrent.{ExecutionContext, Future} /** * Created by danylee on 11/09/16. */ trait CollectionsProvider { def client: IAsyncClient implicit val ec: ExecutionContext def getLargeList(policy: WritePolicy, key: Key, binName: String): Future[LargeList] = Future(client.getLargeList(policy, key, binName)) def getLargeSet(policy: WritePolicy, key: Key, binName: String, userModule: String): Future[LargeSet] = Future(client.getLargeSet(policy, key, binName, userModule)) def getLargeStack(policy: WritePolicy, key: Key, binName: String, userModule: String): Future[LargeStack] = Future(client.getLargeStack(policy, key, binName, userModule)) def getLargeMap(policy: WritePolicy, key: Key, binName: String, userModule: String): Future[LargeMap] = Future(client.getLargeMap(policy, key, binName, userModule)) }
TinkoffCreditSystems/aerospike-scala
src/main/scala/ru/tinkoff/aerospike/dsl/CollectionsProvider.scala
Scala
apache-2.0
1,692
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.crunch.scrunch import org.apache.crunch.{PObject => JPObject} import org.apache.crunch.Target /** * Represents a singleton value that results from a distributed computation. * * @param native The Java PObject that backs this Scala PObject. * @tparam T The type of value encapsulated by this PObject. */ class PObject[T] private (private val native: JPObject[T]) { /** * Gets the value associated with this PObject. Calling this method will trigger * whatever computation is necessary to obtain the value and block until that computation * succeeds. * * @return The value associated with this PObject. */ def value(): T = native.getValue() } /** * The companion object for PObject that provides factory methods for creating PObjects. */ protected[scrunch] object PObject { /** * Creates a new Scala PObject from a Java PObject. * * @param native The Java PObject that will back this Scala PObject. * @tparam T The type of value encapsulated by the PObject. * @return A Scala PObject backed by the Java PObject specified. */ def apply[T](native: JPObject[T]): PObject[T] = new PObject[T](native) }
abeppu/incubator-crunch
crunch-scrunch/src/main/scala/org/apache/crunch/scrunch/PObject.scala
Scala
apache-2.0
1,978
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.api import org.apache.kafka.common.record.RecordVersion /** * This class contains the different Kafka versions. * Right now, we use them for upgrades - users can configure the version of the API brokers will use to communicate between themselves. * This is only for inter-broker communications - when communicating with clients, the client decides on the API version. * * Note that the ID we initialize for each version is important. * We consider a version newer than another, if it has a higher ID (to avoid depending on lexicographic order) * * Since the api protocol may change more than once within the same release and to facilitate people deploying code from * trunk, we have the concept of internal versions (first introduced during the 0.10.0 development cycle). For example, * the first time we introduce a version change in a release, say 0.10.0, we will add a config value "0.10.0-IV0" and a * corresponding case object KAFKA_0_10_0-IV0. We will also add a config value "0.10.0" that will be mapped to the * latest internal version object, which is KAFKA_0_10_0-IV0. When we change the protocol a second time while developing * 0.10.0, we will add a new config value "0.10.0-IV1" and a corresponding case object KAFKA_0_10_0-IV1. We will change * the config value "0.10.0" to map to the latest internal version object KAFKA_0_10_0-IV1. The config value of * "0.10.0-IV0" is still mapped to KAFKA_0_10_0-IV0. This way, if people are deploying from trunk, they can use * "0.10.0-IV0" and "0.10.0-IV1" to upgrade one internal version at a time. For most people who just want to use * released version, they can use "0.10.0" when upgrading to the 0.10.0 release. */ object ApiVersion { // This implicit is necessary due to: https://issues.scala-lang.org/browse/SI-8541 implicit def orderingByVersion[A <: ApiVersion]: Ordering[A] = Ordering.by(_.id) val allVersions: Seq[ApiVersion] = Seq( KAFKA_0_8_0, KAFKA_0_8_1, KAFKA_0_8_2, KAFKA_0_9_0, // 0.10.0-IV0 is introduced for KIP-31/32 which changes the message format. KAFKA_0_10_0_IV0, // 0.10.0-IV1 is introduced for KIP-36(rack awareness) and KIP-43(SASL handshake). KAFKA_0_10_0_IV1, // introduced for JoinGroup protocol change in KIP-62 KAFKA_0_10_1_IV0, // 0.10.1-IV1 is introduced for KIP-74(fetch response size limit). KAFKA_0_10_1_IV1, // introduced ListOffsetRequest v1 in KIP-79 KAFKA_0_10_1_IV2, // introduced UpdateMetadataRequest v3 in KIP-103 KAFKA_0_10_2_IV0, // KIP-98 (idempotent and transactional producer support) KAFKA_0_11_0_IV0, // introduced DeleteRecordsRequest v0 and FetchRequest v4 in KIP-107 KAFKA_0_11_0_IV1, // Introduced leader epoch fetches to the replica fetcher via KIP-101 KAFKA_0_11_0_IV2, // Introduced LeaderAndIsrRequest V1, UpdateMetadataRequest V4 and FetchRequest V6 via KIP-112 KAFKA_1_0_IV0, // Introduced DeleteGroupsRequest V0 via KIP-229, plus KIP-227 incremental fetch requests, // and KafkaStorageException for fetch requests. KAFKA_1_1_IV0, // Introduced OffsetsForLeaderEpochRequest V1 via KIP-279 (Fix log divergence between leader and follower after fast leader fail over) KAFKA_2_0_IV0, // Several request versions were bumped due to KIP-219 (Improve quota communication) KAFKA_2_0_IV1, // Introduced new schemas for group offset (v2) and group metadata (v2) (KIP-211) KAFKA_2_1_IV0 ) // Map keys are the union of the short and full versions private val versionMap: Map[String, ApiVersion] = allVersions.map(v => v.version -> v).toMap ++ allVersions.groupBy(_.shortVersion).map { case (k, v) => k -> v.last } /** * Return an `ApiVersion` instance for `versionString`, which can be in a variety of formats (e.g. "0.8.0", "0.8.0.x", * "0.10.0", "0.10.0-IV1"). `IllegalArgumentException` is thrown if `versionString` cannot be mapped to an `ApiVersion`. */ def apply(versionString: String): ApiVersion = { val versionSegments = versionString.split('.').toSeq val numSegments = if (versionString.startsWith("0.")) 3 else 2 val key = versionSegments.take(numSegments).mkString(".") versionMap.getOrElse(key, throw new IllegalArgumentException(s"Version `$versionString` is not a valid version")) } def latestVersion: ApiVersion = allVersions.last /** * Return the minimum `ApiVersion` that supports `RecordVersion`. */ def minSupportedFor(recordVersion: RecordVersion): ApiVersion = { recordVersion match { case RecordVersion.V0 => KAFKA_0_8_0 case RecordVersion.V1 => KAFKA_0_10_0_IV0 case RecordVersion.V2 => KAFKA_0_11_0_IV0 case _ => throw new IllegalArgumentException(s"Invalid message format version $recordVersion") } } } sealed trait ApiVersion extends Ordered[ApiVersion] { def version: String def shortVersion: String def recordVersion: RecordVersion def id: Int override def compare(that: ApiVersion): Int = ApiVersion.orderingByVersion.compare(this, that) override def toString: String = version } /** * For versions before 0.10.0, `version` and `shortVersion` were the same. */ sealed trait LegacyApiVersion extends ApiVersion { def version = shortVersion } /** * From 0.10.0 onwards, each version has a sub-version. For example, IV0 is the sub-version of 0.10.0-IV0. */ sealed trait DefaultApiVersion extends ApiVersion { lazy val version = shortVersion + "-" + subVersion protected def subVersion: String } // Keep the IDs in order of versions case object KAFKA_0_8_0 extends LegacyApiVersion { val shortVersion = "0.8.0" val recordVersion = RecordVersion.V0 val id: Int = 0 } case object KAFKA_0_8_1 extends LegacyApiVersion { val shortVersion = "0.8.1" val recordVersion = RecordVersion.V0 val id: Int = 1 } case object KAFKA_0_8_2 extends LegacyApiVersion { val shortVersion = "0.8.2" val recordVersion = RecordVersion.V0 val id: Int = 2 } case object KAFKA_0_9_0 extends LegacyApiVersion { val shortVersion = "0.9.0" val subVersion = "" val recordVersion = RecordVersion.V0 val id: Int = 3 } case object KAFKA_0_10_0_IV0 extends DefaultApiVersion { val shortVersion = "0.10.0" val subVersion = "IV0" val recordVersion = RecordVersion.V1 val id: Int = 4 } case object KAFKA_0_10_0_IV1 extends DefaultApiVersion { val shortVersion = "0.10.0" val subVersion = "IV1" val recordVersion = RecordVersion.V1 val id: Int = 5 } case object KAFKA_0_10_1_IV0 extends DefaultApiVersion { val shortVersion = "0.10.1" val subVersion = "IV0" val recordVersion = RecordVersion.V1 val id: Int = 6 } case object KAFKA_0_10_1_IV1 extends DefaultApiVersion { val shortVersion = "0.10.1" val subVersion = "IV1" val recordVersion = RecordVersion.V1 val id: Int = 7 } case object KAFKA_0_10_1_IV2 extends DefaultApiVersion { val shortVersion = "0.10.1" val subVersion = "IV2" val recordVersion = RecordVersion.V1 val id: Int = 8 } case object KAFKA_0_10_2_IV0 extends DefaultApiVersion { val shortVersion = "0.10.2" val subVersion = "IV0" val recordVersion = RecordVersion.V1 val id: Int = 9 } case object KAFKA_0_11_0_IV0 extends DefaultApiVersion { val shortVersion = "0.11.0" val subVersion = "IV0" val recordVersion = RecordVersion.V2 val id: Int = 10 } case object KAFKA_0_11_0_IV1 extends DefaultApiVersion { val shortVersion = "0.11.0" val subVersion = "IV1" val recordVersion = RecordVersion.V2 val id: Int = 11 } case object KAFKA_0_11_0_IV2 extends DefaultApiVersion { val shortVersion = "0.11.0" val subVersion = "IV2" val recordVersion = RecordVersion.V2 val id: Int = 12 } case object KAFKA_1_0_IV0 extends DefaultApiVersion { val shortVersion = "1.0" val subVersion = "IV0" val recordVersion = RecordVersion.V2 val id: Int = 13 } case object KAFKA_1_1_IV0 extends DefaultApiVersion { val shortVersion = "1.1" val subVersion = "IV0" val recordVersion = RecordVersion.V2 val id: Int = 14 } case object KAFKA_2_0_IV0 extends DefaultApiVersion { val shortVersion: String = "2.0" val subVersion = "IV0" val recordVersion = RecordVersion.V2 val id: Int = 15 } case object KAFKA_2_0_IV1 extends DefaultApiVersion { val shortVersion: String = "2.0" val subVersion = "IV1" val recordVersion = RecordVersion.V2 val id: Int = 16 } case object KAFKA_2_1_IV0 extends DefaultApiVersion { val shortVersion: String = "2.1" val subVersion = "IV0" val recordVersion = RecordVersion.V2 val id: Int = 18 }
ollie314/kafka
core/src/main/scala/kafka/api/ApiVersion.scala
Scala
apache-2.0
9,357
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.planner.plan.nodes.calcite import org.apache.flink.table.catalog.{CatalogTable, ObjectIdentifier} import org.apache.flink.table.connector.sink.DynamicTableSink import org.apache.calcite.plan.{Convention, RelOptCluster, RelTraitSet} import org.apache.calcite.rel.RelNode import java.util import scala.collection.JavaConversions._ /** * Sub-class of [[Sink]] that is a relational expression * which writes out data of input node into a [[DynamicTableSink]]. * This class corresponds to Calcite logical rel. */ final class LogicalSink( cluster: RelOptCluster, traitSet: RelTraitSet, input: RelNode, tableIdentifier: ObjectIdentifier, catalogTable: CatalogTable, tableSink: DynamicTableSink, val staticPartitions: Map[String, String]) extends Sink(cluster, traitSet, input, tableIdentifier, catalogTable, tableSink) { override def copy(traitSet: RelTraitSet, inputs: util.List[RelNode]): RelNode = { new LogicalSink( cluster, traitSet, inputs.head, tableIdentifier, catalogTable, tableSink, staticPartitions) } } object LogicalSink { def create( input: RelNode, tableIdentifier: ObjectIdentifier, catalogTable: CatalogTable, tableSink: DynamicTableSink, staticPartitions: Map[String, String] = Map()): LogicalSink = { val traits = input.getCluster.traitSetOf(Convention.NONE) new LogicalSink( input.getCluster, traits, input, tableIdentifier, catalogTable, tableSink, staticPartitions) } }
tzulitai/flink
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/nodes/calcite/LogicalSink.scala
Scala
apache-2.0
2,372
package be.objectify.batch.concurrent /** * */ trait OnFinishListener { def jobFinished(processed: Int, errors: Int) }
schaloner/akka-batch
src/main/scala/be/objectify/batch/concurrent/OnFinishListener.scala
Scala
apache-2.0
124
package org.akozlov.chapter07 import scalax.collection.Graph import scalax.collection.edge._ import scalax.collection.GraphPredef._ import scalax.collection.GraphEdge._ import scalax.collection.edge.Implicits._ /** * An example of JSON pretty print */ object InfluenceDiagramToJson extends App { val g = Graph[String,LDiEdge](("'Weather'" ~+> "'Weather Forecast'")("Forecast"), ("'Weather Forecast'" ~+> "'Vacation Activity'")("Decision"), ("'Vacation Activity'" ~+> "'Satisfaction'")("Deterministic"), ("'Weather'" ~+> "'Satisfaction'")("Deterministic"), ("'Satisfaction'" ~+> "'Recommend to a Friend'")("Probabilistic")) import scalax.collection.io.json.descriptor.predefined.{LDi} import scalax.collection.io.json.descriptor.StringNodeDescriptor import scalax.collection.io.json._ val descriptor = new Descriptor[String]( defaultNodeDescriptor = StringNodeDescriptor, defaultEdgeDescriptor = LDi.descriptor[String,String]("Edge") ) import net.liftweb.json._ println(Printer.pretty(JsonAST.render(JsonParser.parse(g.toJson(descriptor))))) }
alexvk/ml-in-scala
chapter07/src/main/scala/Json.scala
Scala
unlicense
1,077
package com.nextgendata.framework.maps /** * Created by Craig on 2016-04-27. * * This is a stackable trait or decorator that can be mixed in with a [[Mapper]] * implementation. * * It will detect when a get does not return a value (lookup fails) and then invoke * the log function. * * The log function is left abstract so that the application can implement as needed, for * example write a message into their database, or application log file. Due to this, the * application must mix in an additional trait which implements this function. * * Example: * {{{ * val myMapper = Job.sc.broadcast(new MyMap(MyMap()) * with Logging[MyMapKey, MyMapVal] with StdOutLogging[MyMapKey, MyMapVal] * }}} */ trait Logging[K, V] extends Mapper[K, V] { /** * This method intercepts a Mapper's get call and detects when the lookup fails (returns * and empty result) then call the log function to record this failed lookup. * @param srcVal * @return */ abstract override def get(srcVal: K): Option[V] = { val mappedVal = super.get(srcVal) if (mappedVal.isEmpty) log(srcVal, mappedVal) mappedVal } def log (srcVal: K, mappedVal: Option[V]): Unit }
craigjar/nextgendata
src/main/scala/com/nextgendata/framework/maps/Logging.scala
Scala
apache-2.0
1,214
/*** * Copyright 2016 Rackspace US, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.rackspace.com.papi.components.checker.wadl import com.rackspace.com.papi.components.checker.{LogAssertions, TestConfig} import org.apache.logging.log4j.Level import org.junit.runner.RunWith import org.scalatestplus.junit.JUnitRunner import scala.xml._ @RunWith(classOf[JUnitRunner]) class WADLCheckerCheckXSDEngineSuiteSaxonEE extends BaseCheckerSpec with LogAssertions { val config = { val c = TestConfig() c.validateChecker = true c.xsdEngine="SaxonEE" c } scenario ("Load a WADL document check XSDEngine") { Given("a WADL document that ends in") val inWADL = <application xmlns="http://wadl.dev.java.net/2009/02"> <grammars/> <resources base="https://test.api.openstack.com"> <resource path="/"> <resource path="element"> <method name="GET"> <response status="200"/> </method> </resource> </resource> </resources> </application> When("the document is loaded...") val goodCheckerLog = log (Level.DEBUG) { val checker = builder.build (inWADL,config) } Then ("An appropriate DEBUG messages should be emmited.") assert(goodCheckerLog,"Using SaxonEE for checker validation") } }
rackerlabs/api-checker
core/src/test/scala/com/rackspace/com/papi/components/checker/wadl/WADLCheckerCheckXSDEngineSuiteSaxonEE.scala
Scala
apache-2.0
1,940
package sims.test import org.scalatest.FlatSpec import org.scalatest.matchers.ShouldMatchers import sims.math._ import sims.collision._ class LinearOverlapTest extends FlatSpec with ShouldMatchers { "A segment" should "throw IllegalArgumentException if both of its " + "vertices degenerate into a single one" in { evaluating { val s1 = Segment(Vector2D(0,0), Vector2D(0,0)) } should produce [IllegalArgumentException] } it should "not intersect with itself" in { val s1 = Segment(Vector2D(2, 2), Vector2D(3, 5)) s1 intersection s1 should equal (None) } "Two segments" should "have an intersection point if they intersect" in { val s1 = Segment(Vector2D(0, 0), Vector2D(3, 1)) val s2 = Segment(Vector2D(0, 1), Vector2D(3, -2)) s1 intersection s2 should not equal (None) } it should "have an intersection point if they share a vertice" in { val s1 = Segment(Vector2D(1, 2), Vector2D(3, 1)) val s2 = Segment(Vector2D(3, 1), Vector2D(3, -2)) s1 intersection s2 should not equal (None) } it should "have an intersection point if one contains one of the other's vertices" in { val s1 = Segment(Vector2D(2, 4), Vector2D(3, 100)) val s2 = Segment(Vector2D(1, 3), Vector2D(3, 5)) s1 intersection s2 should not equal (None) } it should "not have an intersection point if they are parallel" in { val s1 = Segment(Vector2D(0, 0), Vector2D(3, 1)) val s2 = Segment(Vector2D(0, 1), Vector2D(3, 2)) s1 intersection s2 should equal (None) } it should "not have an intersection point if they are parallel and lie on each other" in { val s1 = Segment(Vector2D(2, 2), Vector2D(6, 6)) val s2 = Segment(Vector2D(3, 3), Vector2D(4, 4)) s1 intersection s2 should equal (None) } "A ray and a segment" should "have an intersection point if they intersect" in { val r1 = Ray(Vector2D(3, 5), Vector2D(3, -1)) val s1 = Segment(Vector2D(6.32, math.sqrt(4.0)), Vector2D(10, 15.5)) r1 intersection s1 should not equal (None) } it should "have an intersection point if they share a vertice" in { val r1 = Ray(Vector2D(3, 4), Vector2D(2, 1)) val s1 = Segment(Vector2D(0, 10), Vector2D(3, 4)) r1 intersection s1 should not equal (None) } it should "have an intersection point if the ray contains one of the segment's vertices" in { val r1 = Ray(Vector2D(0, 0), Vector2D(1, 2)) val s1 = Segment(Vector2D(2, 4), Vector2D(5, 4)) r1 intersection s1 should not equal (None) } it should "have an intersection point if the segment contains the ray's vertice" in { val r1 = Ray(Vector2D(0, math.Pi), Vector2D(1, 2)) val s1 = Segment(Vector2D(0, 0), Vector2D(0, 4)) r1 intersection s1 should not equal (None) val r2 = Ray(Vector2D(2, 3), Vector2D(-2, -1)) val s2 = Segment(Vector2D(0, 4), Vector2D(4, 2)) r2 intersection s2 should not equal (None) } it should "not have an intersection point if they are parallel" in { val r1 = Ray(Vector2D(2, 3), Vector2D(3, 4)) val s1 = Segment(Vector2D(1, 4), Vector2D(4, 8)) r1 intersection s1 should equal (None) } it should "not have an intersection point if they lie on each other" in { val r1 = Ray(Vector2D(0, 1), Vector2D(2, 3)) val s1 = Segment(Vector2D(-1, 0), Vector2D(4, 4)) r1 intersection s1 should equal (None) } }
jodersky/sims2
src/test/scala/sims/test/LinearOverlapTest.scala
Scala
bsd-3-clause
3,263
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.tools import java.util.{Arrays, Properties} import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer} import org.apache.kafka.clients.producer._ import org.apache.kafka.common.utils.Utils import scala.collection.JavaConversions._ /** * This class records the average end to end latency for a single message to travel through Kafka * * broker_list = location of the bootstrap broker for both the producer and the consumer * num_messages = # messages to send * producer_acks = See ProducerConfig.ACKS_DOC * message_size_bytes = size of each message in bytes * * e.g. [localhost:9092 test 10000 1 20] */ object EndToEndLatency { private val timeout: Long = 60000 def main(args: Array[String]) { if (args.length != 5 && args.length != 6) { System.err.println("USAGE: java " + getClass.getName + " broker_list topic num_messages producer_acks message_size_bytes [optional] ssl_properties_file") System.exit(1) } val brokerList = args(0) val topic = args(1) val numMessages = args(2).toInt val producerAcks = args(3) val messageLen = args(4).toInt val sslPropsFile = if (args.length == 6) args(5) else "" if (!List("1", "all").contains(producerAcks)) throw new IllegalArgumentException("Latency testing requires synchronous acknowledgement. Please use 1 or all") val consumerProps = if (sslPropsFile.equals("")) new Properties() else Utils.loadProps(sslPropsFile) consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList) consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "test-group-" + System.currentTimeMillis()) consumerProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest") consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer") consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer") consumerProps.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "0") //ensure we have no temporal batching val consumer = new KafkaConsumer[Array[Byte], Array[Byte]](consumerProps) consumer.subscribe(List(topic)) val producerProps = if (sslPropsFile.equals("")) new Properties() else Utils.loadProps(sslPropsFile) producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList) producerProps.put(ProducerConfig.LINGER_MS_CONFIG, "0") //ensure writes are synchronous producerProps.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, Long.MaxValue.toString) producerProps.put(ProducerConfig.ACKS_CONFIG, producerAcks.toString) producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer") producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer") val producer = new KafkaProducer[Array[Byte], Array[Byte]](producerProps) def finalise() { consumer.commitSync() producer.close() consumer.close() } //Ensure we are at latest offset. seekToEnd evaluates lazily, that is to say actually performs the seek only when //a poll() or position() request is issued. Hence we need to poll after we seek to ensure we see our first write. consumer.seekToEnd() consumer.poll(0) var totalTime = 0.0 val latencies = new Array[Long](numMessages) for (i <- 0 until numMessages) { val message = randomBytesOfLen(messageLen) val begin = System.nanoTime //Send message (of random bytes) synchronously then immediately poll for it producer.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, message)).get() val recordIter = consumer.poll(timeout).iterator val elapsed = System.nanoTime - begin //Check we got results if (!recordIter.hasNext) { finalise() throw new RuntimeException(s"poll() timed out before finding a result (timeout:[$timeout])") } //Check result matches the original record val sent = new String(message) val read = new String(recordIter.next().value()) if (!read.equals(sent)) { finalise() throw new RuntimeException(s"The message read [$read] did not match the message sent [$sent]") } //Check we only got the one message if (recordIter.hasNext) { var count = 1 for (elem <- recordIter) count += 1 throw new RuntimeException(s"Only one result was expected during this test. We found [$count]") } //Report progress if (i % 1000 == 0) println(i + "\\t" + elapsed / 1000.0 / 1000.0) totalTime += elapsed latencies(i) = elapsed / 1000 / 1000 } //Results println("Avg latency: %.4f ms\\n".format(totalTime / numMessages / 1000.0 / 1000.0)) Arrays.sort(latencies) val p50 = latencies((latencies.length * 0.5).toInt) val p99 = latencies((latencies.length * 0.99).toInt) val p999 = latencies((latencies.length * 0.999).toInt) println("Percentiles: 50th = %d, 99th = %d, 99.9th = %d".format(p50, p99, p999)) finalise() } def randomBytesOfLen(len: Int): Array[Byte] = { Array.fill(len)((scala.util.Random.nextInt(26) + 65).toByte) } }
samaitra/kafka
core/src/main/scala/kafka/tools/EndToEndLatency.scala
Scala
apache-2.0
6,141
package TAPLcomp2.fullref import scala.text.Document import scala.text.Document._ // outer means that the term is the top-level term object FullRefPrinter { import TAPLcomp2.Print._ def ptyType(outer: Boolean, ty: Ty): Document = ty match { case TyRef(tyT) => "Ref " :: ptyAType(false, tyT) case TySource(tyT) => "Source " :: ptyAType(false, tyT) case TySink(tyT) => "Sink " :: ptyAType(false, tyT) case ty => ptyArrowType(outer, ty) } def ptyArrowType(outer: Boolean, tyT: Ty): Document = tyT match { case TyArr(tyT1, tyT2) => g2(ptyAType(false, tyT1) :: " ->" :/: ptyArrowType(outer, tyT2)) case tyT => ptyAType(outer, tyT) } def ptyAType(outer: Boolean, tyT: Ty): Document = tyT match { case TyVar(x) => x case TyBot => "Bot" case TyTop => "Top" case TyBool => "Bool" case TyVariant(fields) => def pf(i: Int, li: String, tyTi: Ty): Document = if (i.toString() == li) { ptyType(false, tyTi) } else { li :: ":" :/: ptyType(false, tyTi) } "<" :: fields.zipWithIndex.map { case ((li, tyTi), i) => pf(i + 1, li, tyTi) }. reduceLeftOption(_ :: "," :/: _).getOrElse(empty) :: ">" case TyString => "String" case TyUnit => "Unit" case TyRecord(fields) => def pf(i: Int, li: String, tyTi: Ty): Document = if (i.toString() == li) { ptyType(false, tyTi) } else { g0(li :: ":" :/: ptyType(false, tyTi)) } g2("{" :: fields.zipWithIndex.map { case ((li, tyTi), i) => pf(i + 1, li, tyTi) }.reduceLeftOption(_ :: "," :/: _).getOrElse(empty) :: "}") case TyNat => "Nat" case TyFloat => "Float" case tyT => "(" :: ptyType(outer, tyT) :: ")" } def ptyTy(ty: Ty) = ptyType(true, ty) def ptmTerm(outer: Boolean, t: Term): Document = t match { case TmIf(t1, t2, t3) => val ifB = g2("if" :/: ptmTerm(outer, t1)) val thenB = g2("then" :/: ptmTerm(outer, t2)) val elseB = g2("else" :/: ptmTerm(outer, t3)) g0(ifB :/: thenB :/: elseB) case TmCase(t, cases) => def pc(li: String, xi: String, ti: Term): Document = { "<" :: li :: "=" :: xi :: ">==>" :: ptmTerm(false, ti) } g2("case " :: ptmTerm(false, t) :: " of" :/: cases.map { case (x, y, z) => pc(x, y, z) }.foldRight(empty: Document)(_ :/: "|" :: _)) case TmAbs(x, tyT1, t2) => val abs = g0("lambda" :/: x :: ":" :/: ptyType(false, tyT1) :: ".") val body = ptmTerm(outer, t2) g2(abs :/: body) case TmLet(x, t1, t2) => g0("let " :: x :: " = " :: ptmTerm(false, t1) :/: "in" :/: ptmTerm(false, t2)) case TmFix(t1) => g2("fix " :: ptmTerm(false, t1)) case TmAssign(t1, t2) => g2(ptmAppTerm(false, t1) :/: ":=" :/: ptmAppTerm(false, t2)) case t => ptmAppTerm(outer, t) } def ptmAppTerm(outer: Boolean, t: Term): Document = t match { case TmApp(t1, t2) => g2(ptmAppTerm(false, t1) :/: ptmATerm(false, t2)) case TmRef(t1) => "ref " :: ptmATerm(false, t1) case TmDeref(t1) => "!" :: ptmATerm(false, t1) case TmPred(t1) => "pred " :: ptmATerm(false, t1) case TmIsZero(t1) => "iszero " :: ptmATerm(false, t1) case t => ptmPathTerm(outer, t) } def ptmPathTerm(outer: Boolean, t: Term): Document = t match { case TmProj(t1, l) => ptmATerm(false, t1) :: "." :: l case t1 => ptmAscribeTerm(outer, t1) } def ptmAscribeTerm(outer: Boolean, t: Term): Document = t match { case TmAscribe(t1, tyT1) => g0(ptmAppTerm(false, t1) :/: "as " :: ptyType(false, tyT1)) case t1 => ptmATerm(outer, t1) } def ptmATerm(outer: Boolean, t: Term): Document = t match { case TmInert(tyT) => "inert[" :: ptyType(false, tyT) :: "]" case TmTrue => "true" case TmFalse => "false" case TmTag(l, t, ty) => g2("<" :: l :: "=" :: ptmTerm(false, t) :: ">" :/: "as " :: ptyType(outer, ty)) case TmVar(x) => x case TmString(s) => "\"" :: s :: "\"" case TmUnit => "unit" case TmRecord(fields) => def pf(i: Int, li: String, t: Term): Document = if (i.toString() == li) { ptmTerm(false, t) } else { li :: "=" :: ptmTerm(false, t) } "{" :: fields.zipWithIndex.map { case ((li, tyTi), i) => pf(i + 1, li, tyTi) }. reduceLeftOption(_ :: "," :/: _).getOrElse(empty) :: "}" case TmZero => "0" case TmSucc(t1) => def pf(i: Int, t: Term): Document = t match { case TmZero => i.toString() case TmSucc(s) => pf(i + 1, s) case _ => "(succ " :: ptmATerm(false, t1) :: ")" } pf(1, t1) case TmLoc(l) => "<loc #" + l + ">" case t => "(" :: ptmTerm(outer, t) :: ")" } def ptm(t: Term) = ptmTerm(true, t) }
hy-zhang/parser
Scala/Parser/src/TAPLcomp2/fullref/syntax.scala
Scala
bsd-3-clause
4,955
/* * Copyright (c) 2015-2017 Toby Weston * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package s4j.scala.chapter13 class If { val age = 23 // verbose version if (age > 55) { retire() } else { carryOnWorking() } // without the braces if (age > 55) retire() else carryOnWorking() // succinct version if (age > 55) retire() else carryOnWorking() def retire() = () def carryOnWorking() = () def savings: Int = 0 }
tobyweston/learn-scala-java-devs
src/main/scala/s4j/scala/chapter13/If.scala
Scala
apache-2.0
976
package aerospikez.internal.util private[aerospikez] object TSafe { @annotation.implicitNotFound( msg = """ That Set has been forced to accept only ${T2} (or a Option[${T2}]) as Value, but you provide a ${T1}: """ ) sealed class TypeOf[T1, T2] object TypeOf { implicit def f1[T1, T2](implicit ev: T1 <:< T2): TypeOf[T1, T2] = new TypeOf[T1, T2] implicit def f2[T1, T2](implicit ev: T1 =:= Option[T2]): TypeOf[T1, T2] = new TypeOf[T1, T2] } @annotation.implicitNotFound( msg = """ Aaerospike support only String, Int, Long and Array[Byte] as Key, but you provide a ${K}: """ ) sealed trait KRestriction[K] object KRestriction { implicit object int extends KRestriction[Int] implicit object long extends KRestriction[Long] implicit object string extends KRestriction[String] implicit object arraybyte extends KRestriction[Array[Byte]] } @annotation.implicitNotFound( msg = """ Aaerospike support only String, Int, Long, Map and List as Value (or a Option[T] where T is any type described above), but you provide a ${V}: """ ) sealed class VRestriction[V] object VRestriction { implicit object int extends VRestriction[Int] implicit object long extends VRestriction[Long] implicit object string extends VRestriction[String] implicit def list[A: VRestriction]: VRestriction[List[A]] = new VRestriction[List[A]] implicit def option[A: VRestriction]: VRestriction[Option[A]] = new VRestriction[Option[A]] implicit def map[A: VRestriction, B: VRestriction]: VRestriction[Map[A, B]] = new VRestriction[Map[A, B]] // This is necessary only if the user no specified a type argument (default to Any) implicit object any extends VRestriction[Any] } @annotation.implicitNotFound( msg = """ Aerospike support only Int, Long, String, Map and List as Lua Type Result, but you provide a ${LuaV}: """ ) sealed class LRestriction[LuaV] object LRestriction { implicit object int extends LRestriction[Int] implicit object long extends LRestriction[Long] implicit object string extends LRestriction[String] implicit def list[A]: LRestriction[List[A]] = new LRestriction[List[A]] implicit def map[A, B]: LRestriction[Map[A, B]] = new LRestriction[Map[A, B]] // This is necessary only if the user no specified a type argument (default to Any) implicit object any extends LRestriction[Any] } @annotation.implicitNotFound( msg = """ Aerospike support only Int and String as Type for Secondary Index, but you provide a ${I}: """ ) sealed class IRestriction[I] object IRestriction { implicit object int extends IRestriction[Int] implicit object string extends IRestriction[String] } trait Empty @annotation.implicitNotFound( msg = """ An explicit type parameter is required. """ ) sealed trait =!=[T1, T2] object =!= { class Impl[T1, T2] object Impl { implicit def neq[T1, T2]: T1 Impl T2 = null implicit def neqAmbig1[T1]: T1 Impl T1 = null implicit def neqAmbig2[T1]: T1 Impl T1 = null } implicit def f[T1, T2](implicit eV: T1 Impl T2): T1 =!= T2 = null } sealed class DefaultTypeTo[T1, T2] trait TypePassed { implicit def typePassed[T1, T2]: DefaultTypeTo[T1, T2] = new DefaultTypeTo[T1, T2] } object DefaultTypeTo extends TypePassed { implicit def defaultType[T2]: DefaultTypeTo[T2, T2] = new DefaultTypeTo[T2, T2] } }
otrimegistro/aerospikez
src/main/scala/aerospikez/internal/util/TSafe.scala
Scala
mit
3,485
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.zeppelin.rinterpreter import java.io._ import java.nio.file.{Files, Paths} import java.util.Properties import org.apache.spark.SparkContext import org.apache.spark.api.r.RBackendHelper import org.apache.spark.sql.SQLContext import org.apache.zeppelin.interpreter._ import org.apache.zeppelin.rinterpreter.rscala.RClient._ import org.apache.zeppelin.rinterpreter.rscala._ import org.apache.zeppelin.scheduler._ import org.apache.zeppelin.spark.{SparkInterpreter, SparkZeppelinContext} import org.slf4j._ import scala.collection.JavaConversions._ // TODO: Setup rmr, etc. // TODO: Stress-test spark. What happens on close? Etc. private[rinterpreter] class RContext(private val sockets: ScalaSockets, debug: Boolean) extends RClient(sockets.in, sockets.out, debug) { private val logger: Logger = RContext.logger lazy val getScheduler: Scheduler = SchedulerFactory.singleton().createOrGetFIFOScheduler(this.hashCode().toString) val backend: RBackendHelper = RBackendHelper() private var sc: Option[SparkContext] = None private var sql: Option[SQLContext] = None private var z: Option[SparkZeppelinContext] = None val rPkgMatrix = collection.mutable.HashMap[String,Boolean]() var isOpen: Boolean = false private var isFresh : Boolean = true private var property: Properties = null private[rinterpreter] var sparkRStarted : Boolean = false override def toString() : String = s"""${super.toString()} |\t Open: $isOpen Fresh: $isFresh SparkStarted: $sparkRStarted |\t Progress: $progress |\t Sockets: ${sockets.toString()} """.stripMargin var progress: Int = 0 def getProgress: Int = { return progress } def setProgress(i: Int) : Unit = { progress = i % 100 } def incrementProgress(i: Int) : Unit = { progress = (progress + i) % 100 } // handle properties this way so it can be a mutable object shared with the R Interpreters def setProperty(properties: Properties): Unit = synchronized { if (property == null) property = properties else property.putAll(properties) } def open(startSpark : Option[SparkInterpreter]): Unit = synchronized { if (isOpen && sparkRStarted) { logger.trace("Reusing rContext.") return } testRPackage("rzeppelin", fail = true, message = "The rinterpreter cannot run without the rzeppelin package, which was included in your distribution.") startSpark match { case Some(x : SparkInterpreter) => { sparkStartup(x) } case _ => logger.error("Could not find a SparkInterpreter") } isOpen = true } private def sparkStartup(startSpark : SparkInterpreter): Unit = try { val sparkHome: String = System.getenv("SPARK_HOME") match { case null => { logger.error("SPARK_HOME is not set. The R Interpreter will start without Spark.") return } case y => y } testRPackage("SparkR", fail = true, path = sparkHome) if (startSpark.getSparkVersion() == null) throw new RuntimeException("No spark version") sc = Some(startSpark.getSparkContext()) sql = Some(startSpark.getSQLContext()) z = Some(startSpark.getZeppelinContext()) logger.trace("Registered Spark Contexts") backend.init() backend.start() if (!backend.backendThread.isAlive) throw new RuntimeException("SparkR could not startup because the Backend Thread is not alive") logger.trace("Started Spark Backend") eval( s"""SparkR:::connectBackend("localhost", ${backend.port})""") logger.trace("SparkR backend connected") initializeSparkR(sc.get, sql.get, z.get) logger.info("Initialized SparkR") sparkRStarted = true } catch { case e: Exception => throw new RuntimeException(""" Could not connect R to Spark. If the stack trace is not clear, check whether SPARK_HOME is set properly.""", e) } private def initializeSparkR(sc : SparkContext, sql : SQLContext, z : SparkZeppelinContext) : Unit = synchronized { logger.trace("Getting a handle to the JavaSparkContext") eval("assign(\".scStartTime\", as.integer(Sys.time()), envir = SparkR:::.sparkREnv)") RStatics.setSC(sc) eval( """ |assign( |".sparkRjsc", |SparkR:::callJStatic("org.apache.zeppelin.rinterpreter.RStatics", | "getJSC"), | envir = SparkR:::.sparkREnv)""".stripMargin) eval("assign(\"sc\", get(\".sparkRjsc\", envir = SparkR:::.sparkREnv), envir=.GlobalEnv)") logger.trace("Established SparkR Context") val sqlEnvName = sql match { case null => throw new RuntimeException("Tried to initialize SparkR without setting a SQLContext") case x : org.apache.spark.sql.hive.HiveContext => ".sparkRHivesc" case x : SQLContext => ".sparkRSQLsc" } RStatics.setSQL(sql) eval( s""" |assign( |"${sqlEnvName}", |SparkR:::callJStatic("org.apache.zeppelin.rinterpreter.RStatics", | "getSQL"), | envir = SparkR:::.sparkREnv)""".stripMargin) eval( s""" |assign("sqlContext", |get("$sqlEnvName", |envir = SparkR:::.sparkREnv), |envir = .GlobalEnv) """.stripMargin) logger.trace("Proving spark") val proof = evalS1("names(SparkR:::.sparkREnv)") logger.info("Proof of spark is : " + proof.mkString) RStatics.setZ(z) RStatics.setrCon(this) eval( s""" |assign(".rContext", | SparkR:::callJStatic("org.apache.zeppelin.rinterpreter.RStatics", | "getRCon"), | envir = .GlobalEnv) """.stripMargin ) } def close(): Unit = synchronized { if (isOpen) { if (sparkRStarted) { try { eval("SparkR:::sparkR.stop()") } catch { case e: RException => {} case e: Exception => logger.error("Error closing SparkR", e) } } try { backend.close backend.backendThread.stop() } catch { case e: Exception => logger.error("Error closing RContext ", e) } try { exit() } catch { case e: Exception => logger.error("Shutdown error", e) } } isOpen = false } private[rinterpreter] def testRPackage(pack: String, fail: Boolean = false, license: Boolean = false, message: String = "", path : String = ""): Boolean = synchronized { rPkgMatrix.get(pack) match { case Some(x: Boolean) => return x case None => {} } evalB0( s"""require('$pack',quietly=TRUE, lib.loc="$path/R/lib/")""") match { case true => { rPkgMatrix.put(pack, true) return (true) } case false => { evalB0(s"require('$pack', quietly=TRUE)") match { case true => { rPkgMatrix.put(pack, true) return true } case false => { rPkgMatrix.put(pack, false) val failMessage = s"""The $pack package could not be loaded. """ + { if (license) "We cannot install it for you because it is published under the GPL3 license." else "" } + message logger.error(failMessage) if (fail) throw new RException(failMessage) return (false) } } } } } logger.info("RContext Finished Starting") } object RContext { val logger: Logger = LoggerFactory.getLogger(getClass) logger.trace("Inside the RContext Object") private val contextMap : collection.mutable.HashMap[String, RContext] = collection.mutable.HashMap[String,RContext]() // This function is here to work around inconsistencies in the SparkInterpreter startup sequence // that caused testing issues private[rinterpreter] def resetRcon() : Boolean = synchronized { contextMap foreach((con) => { con._2.close() if (con._2.isOpen) throw new RuntimeException("Failed to close an existing RContext") contextMap.remove(con._1) }) return true } def apply( property: Properties, id : String): RContext = synchronized { contextMap.get(id) match { case Some(x : RContext) if x.isFresh || x.isOpen => return(x) case Some(x : RContext) => resetRcon() case _ => {} } val debug: Boolean = property.getProperty("rscala.debug", "false").toBoolean val timeout: Int = property.getProperty("rscala.timeout", "60").toInt import scala.sys.process._ logger.trace("Creating processIO") var cmd: PrintWriter = null val command = RClient.defaultRCmd +: RClient.defaultArguments val processCmd = Process(command) val processIO = new ProcessIO( o => { cmd = new PrintWriter(o) }, reader("STDOUT DEBUG: "), reader("STDERR DEBUG: "), true ) val portsFile = File.createTempFile("rscala-", "") val processInstance = processCmd.run(processIO) // Find rzeppelin val libpath : String = if (Files.exists(Paths.get("R/lib"))) "R/lib" else if (Files.exists(Paths.get("../R/lib"))) "../R/lib" else throw new RuntimeException("Could not find rzeppelin - it must be in either R/lib or ../R/lib") val snippet = s""" library(lib.loc="$libpath", rzeppelin) rzeppelin:::rServe(rzeppelin:::newSockets('${portsFile.getAbsolutePath.replaceAll(File.separator, "/")}',debug=${if (debug) "TRUE" else "FALSE"},timeout=${timeout})) q(save='no')""" while (cmd == null) Thread.sleep(100) cmd.println(snippet) cmd.flush() val sockets = RClient.makeSockets(portsFile.getAbsolutePath) sockets.out.writeInt(RClient.Protocol.OK) sockets.out.flush() val packVersion = RClient.readString(sockets.in) if (packVersion != org.apache.zeppelin.rinterpreter.rscala.Version) { logger.warn("Connection to R started but versions don't match " + packVersion + " " + org.apache.zeppelin.rinterpreter.rscala.Version) } else { logger.trace("Connected to a new R Session") } val context = new RContext(sockets, debug) context.setProperty(property) contextMap.put(id, context) context } }
joroKr21/incubator-zeppelin
r/src/main/scala/org/apache/zeppelin/rinterpreter/RContext.scala
Scala
apache-2.0
11,285
package de.juergens import java.time.temporal._ import java.time.{LocalDate => Date} import de.juergens.util.Ordinal abstract class OrdinalAttribute(ordinal:Ordinal, val predicate:(TemporalAccessor)=>Boolean) extends TemporalAdjuster
hjuergens/date-parser
date-rule-combinators/src/main/scala/de/juergens/OrdinalAttribute.scala
Scala
apache-2.0
242
package com.sksamuel.elastic4s.searches import cats.syntax.either._ import com.sksamuel.elastic4s.HitReader import com.sksamuel.elastic4s.searches.aggs.RichAggregations import com.sksamuel.elastic4s.searches.suggestions._ import org.elasticsearch.action.search.{SearchResponse, ShardSearchFailure} import org.elasticsearch.search.SearchHits import scala.concurrent.duration._ case class RichSearchResponse(original: SearchResponse) { def size: Int = original.getHits.hits().length def ids: Seq[String] = hits.map(_.id) def totalHits: Long = original.getHits.getTotalHits def maxScore: Float = original.getHits.getMaxScore def hits: Array[RichSearchHit] = original.getHits.getHits.map(RichSearchHit.apply) def to[T: HitReader]: IndexedSeq[T] = safeTo.flatMap(_.toOption) def safeTo[T: HitReader]: IndexedSeq[Either[Throwable, T]] = hits.map(_.safeTo[T]).toIndexedSeq def scrollId: String = original.getScrollId def scrollIdOpt: Option[String] = Option(scrollId) def totalShards: Int = original.getTotalShards def successfulShards: Int = original.getSuccessfulShards def shardFailures: Array[ShardSearchFailure] = Option(original.getShardFailures).getOrElse(Array.empty) def tookInMillis: Long = original.getTookInMillis def took: Duration = original.getTookInMillis.millis def aggregations: RichAggregations = RichAggregations(original.getAggregations) def isEmpty: Boolean = hits.isEmpty def nonEmpty: Boolean = hits.nonEmpty def suggest: SuggestResult = SuggestResult(original.getSuggest) def suggestions = suggest.suggestions def suggestion(name: String): SuggestionResult = suggest.suggestions.find(_.name == name).get def termSuggestion(name: String): TermSuggestionResult = suggestion(name).asInstanceOf[TermSuggestionResult] def completionSuggestion(name: String) = suggestion(name).asInstanceOf[CompletionSuggestionResult] def phraseSuggestion(name: String): PhraseSuggestionResult = suggestion(name).asInstanceOf[PhraseSuggestionResult] def isTimedOut: Boolean = original.isTimedOut def isTerminatedEarly: Option[Boolean] = Option[java.lang.Boolean](original.isTerminatedEarly).map(_.booleanValue()) @deprecated("use resp.aggregations, or resp.original.getAggregations", "2.0.0") def getAggregations = original.getAggregations // java aliases @deprecated("use suggest", "5.0.0") def getSuggest = original.getSuggest @deprecated("use scrollId or scrollIdOpt", "5.0.0") def getScrollId: String = original.getScrollId @deprecated("use hits", "5.0.0") def getHits: SearchHits = original.getHits @deprecated("use took", "5.0.0") def getTook = original.getTook @deprecated("use tookInMillis", "5.0.0") def getTookInMillis = original.getTookInMillis }
tyth/elastic4s
elastic4s-tcp/src/main/scala/com/sksamuel/elastic4s/searches/RichSearchResponse.scala
Scala
apache-2.0
2,749
package scalajs.antdesign import japgolly.scalajs.react.{React, ReactComponentU_, ReactNode} import scala.scalajs.js import scala.scalajs.js.{Dynamic, Object, |} /** * @see https://ant.design/components/popover/#API * @note Please ensure that the child node of [[Popover]] accepts onMouseEnter, onMouseLeave, onFocus, onClick event. * @param title title of the card * @param content content of the card */ case class Popover(title: js.UndefOr[String | ReactNode] = js.undefined, content: js.UndefOr[String | ReactNode] = js.undefined) { def toJS: Object with Dynamic = { val p = js.Dynamic.literal() title.foreach { x => p.updateDynamic("title")(x.asInstanceOf[js.Any]) } content.foreach { x => p.updateDynamic("content")(x.asInstanceOf[js.Any]) } p } def apply(children: ReactNode*): ReactComponentU_ = { val f = React.asInstanceOf[js.Dynamic].createFactory(js.Dynamic.global.antd.Popover) f(toJS, children.toJsArray).asInstanceOf[ReactComponentU_] } }
mdedetrich/scalajs-antdesign
src/main/scala/scalajs/antdesign/Popover.scala
Scala
bsd-3-clause
1,045
package com.datawizards.sparklocal.datastore.es import java.util.Date trait ElasticsearchTimeSeriesIndexDate { def getDate: Date }
piotr-kalanski/spark-local
src/main/scala/com/datawizards/sparklocal/datastore/es/ElasticsearchTimeSeriesIndexDate.scala
Scala
apache-2.0
135
class Hello_+(name: String) { def ! = println("Hello "+name+"!") def ? = println("How are you "+name+"?") }
grzegorzbalcerek/scala-book-examples
examples/HelloIds.scala
Scala
mit
112
/* * Copyright (c) 2013, Scodec * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package scodec import scodec.bits.BitVector /** Result of a decoding operation, which consists of the decoded value and the remaining bits that were not consumed by decoding. */ case class DecodeResult[+A](value: A, remainder: BitVector): /** Maps the supplied function over the decoded value. */ def map[B](f: A => B): DecodeResult[B] = DecodeResult(f(value), remainder) /** Maps the supplied function over the remainder. */ def mapRemainder(f: BitVector => BitVector): DecodeResult[A] = DecodeResult(value, f(remainder))
scodec/scodec
shared/src/main/scala/scodec/DecodeResult.scala
Scala
bsd-3-clause
2,113
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.util import scala.collection.JavaConverters._ import org.apache.arrow.memory.RootAllocator import org.apache.arrow.vector.complex.MapVector import org.apache.arrow.vector.types.{DateUnit, FloatingPointPrecision, TimeUnit} import org.apache.arrow.vector.types.pojo.{ArrowType, Field, FieldType, Schema} import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types._ private[sql] object ArrowUtils { val rootAllocator = new RootAllocator(Long.MaxValue) // todo: support more types. /** Maps data type from Spark to Arrow. NOTE: timeZoneId required for TimestampTypes */ def toArrowType(dt: DataType, timeZoneId: String): ArrowType = dt match { case BooleanType => ArrowType.Bool.INSTANCE case ByteType => new ArrowType.Int(8, true) case ShortType => new ArrowType.Int(8 * 2, true) case IntegerType => new ArrowType.Int(8 * 4, true) case LongType => new ArrowType.Int(8 * 8, true) case FloatType => new ArrowType.FloatingPoint(FloatingPointPrecision.SINGLE) case DoubleType => new ArrowType.FloatingPoint(FloatingPointPrecision.DOUBLE) case StringType => ArrowType.Utf8.INSTANCE case BinaryType => ArrowType.Binary.INSTANCE case DecimalType.Fixed(precision, scale) => new ArrowType.Decimal(precision, scale) case DateType => new ArrowType.Date(DateUnit.DAY) case TimestampType => if (timeZoneId == null) { throw new UnsupportedOperationException( s"${TimestampType.catalogString} must supply timeZoneId parameter") } else { new ArrowType.Timestamp(TimeUnit.MICROSECOND, timeZoneId) } case _ => throw new UnsupportedOperationException(s"Unsupported data type: ${dt.catalogString}") } def fromArrowType(dt: ArrowType): DataType = dt match { case ArrowType.Bool.INSTANCE => BooleanType case int: ArrowType.Int if int.getIsSigned && int.getBitWidth == 8 => ByteType case int: ArrowType.Int if int.getIsSigned && int.getBitWidth == 8 * 2 => ShortType case int: ArrowType.Int if int.getIsSigned && int.getBitWidth == 8 * 4 => IntegerType case int: ArrowType.Int if int.getIsSigned && int.getBitWidth == 8 * 8 => LongType case float: ArrowType.FloatingPoint if float.getPrecision() == FloatingPointPrecision.SINGLE => FloatType case float: ArrowType.FloatingPoint if float.getPrecision() == FloatingPointPrecision.DOUBLE => DoubleType case ArrowType.Utf8.INSTANCE => StringType case ArrowType.Binary.INSTANCE => BinaryType case d: ArrowType.Decimal => DecimalType(d.getPrecision, d.getScale) case date: ArrowType.Date if date.getUnit == DateUnit.DAY => DateType case ts: ArrowType.Timestamp if ts.getUnit == TimeUnit.MICROSECOND => TimestampType case _ => throw new UnsupportedOperationException(s"Unsupported data type: $dt") } /** Maps field from Spark to Arrow. NOTE: timeZoneId required for TimestampType */ def toArrowField( name: String, dt: DataType, nullable: Boolean, timeZoneId: String): Field = { dt match { case ArrayType(elementType, containsNull) => val fieldType = new FieldType(nullable, ArrowType.List.INSTANCE, null) new Field(name, fieldType, Seq(toArrowField("element", elementType, containsNull, timeZoneId)).asJava) case StructType(fields) => val fieldType = new FieldType(nullable, ArrowType.Struct.INSTANCE, null) new Field(name, fieldType, fields.map { field => toArrowField(field.name, field.dataType, field.nullable, timeZoneId) }.toSeq.asJava) case MapType(keyType, valueType, valueContainsNull) => val mapType = new FieldType(nullable, new ArrowType.Map(false), null) // Note: Map Type struct can not be null, Struct Type key field can not be null new Field(name, mapType, Seq(toArrowField(MapVector.DATA_VECTOR_NAME, new StructType() .add(MapVector.KEY_NAME, keyType, nullable = false) .add(MapVector.VALUE_NAME, valueType, nullable = valueContainsNull), nullable = false, timeZoneId)).asJava) case dataType => val fieldType = new FieldType(nullable, toArrowType(dataType, timeZoneId), null) new Field(name, fieldType, Seq.empty[Field].asJava) } } def fromArrowField(field: Field): DataType = { field.getType match { case _: ArrowType.Map => val elementField = field.getChildren.get(0) val keyType = fromArrowField(elementField.getChildren.get(0)) val valueType = fromArrowField(elementField.getChildren.get(1)) MapType(keyType, valueType, elementField.getChildren.get(1).isNullable) case ArrowType.List.INSTANCE => val elementField = field.getChildren().get(0) val elementType = fromArrowField(elementField) ArrayType(elementType, containsNull = elementField.isNullable) case ArrowType.Struct.INSTANCE => val fields = field.getChildren().asScala.map { child => val dt = fromArrowField(child) StructField(child.getName, dt, child.isNullable) } StructType(fields) case arrowType => fromArrowType(arrowType) } } /** Maps schema from Spark to Arrow. NOTE: timeZoneId required for TimestampType in StructType */ def toArrowSchema(schema: StructType, timeZoneId: String): Schema = { new Schema(schema.map { field => toArrowField(field.name, field.dataType, field.nullable, timeZoneId) }.asJava) } def fromArrowSchema(schema: Schema): StructType = { StructType(schema.getFields.asScala.map { field => val dt = fromArrowField(field) StructField(field.getName, dt, field.isNullable) }) } /** Return Map with conf settings to be used in ArrowPythonRunner */ def getPythonRunnerConfMap(conf: SQLConf): Map[String, String] = { val timeZoneConf = Seq(SQLConf.SESSION_LOCAL_TIMEZONE.key -> conf.sessionLocalTimeZone) val pandasColsByName = Seq(SQLConf.PANDAS_GROUPED_MAP_ASSIGN_COLUMNS_BY_NAME.key -> conf.pandasGroupedMapAssignColumnsByName.toString) val arrowSafeTypeCheck = Seq(SQLConf.PANDAS_ARROW_SAFE_TYPE_CONVERSION.key -> conf.arrowSafeTypeConversion.toString) Map(timeZoneConf ++ pandasColsByName ++ arrowSafeTypeCheck: _*) } }
goldmedal/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/util/ArrowUtils.scala
Scala
apache-2.0
7,132
class Document { import scala.collection.mutable._ type Index = HashMap[String, (Int, Int)] } class Book extends Document { val tables = new Index val figures = new Index def addTableRef(title: String, chapter: Int, section: Int) { tables += title -> (chapter, section) } def addFigureRef(caption: String, chapter: Int, section: Int) { figures += caption -> (chapter, section) } }
yeahnoob/scala-impatient-2e-code
src/ch19/sec04/Book.scala
Scala
gpl-3.0
408
package models import scala.concurrent.duration._ import scala.language.postfixOps class TimeConstantsImpl extends TimeConstants { def workTime() = (25 minutes).toMillis def intervalBetweenWorks() = (5 minutes).toMillis def bigIntervalBetweenWorks() = (20 minutes).toMillis }
tomoki/pomodoro-sync
app/models/TimeConstantsImpl.scala
Scala
apache-2.0
302
object A { val comparator: /*start*/java.util.Comparator[java.lang.Boolean]/*end*/ = null } /* import java.lang.Boolean import java.util.Comparator object A { val comparator: /*start*/Comparator[Boolean]/*end*/ = null } */
triggerNZ/intellij-scala
testdata/adjustTypes/ParameterizedJava.scala
Scala
apache-2.0
228
/* * Copyright 2020 CJWW Development * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package helpers.repositories import com.cjwwdev.mongo.responses._ import helpers.other.Fixtures import models.Session import org.mockito.ArgumentMatchers.any import org.mockito.Mockito.{reset, when} import org.mockito.stubbing.OngoingStubbing import org.scalatest.BeforeAndAfterEach import org.scalatest.mockito.MockitoSugar import org.scalatestplus.play.PlaySpec import repositories.SessionRepository import scala.concurrent.Future trait MockSessionRepository extends BeforeAndAfterEach with MockitoSugar with Fixtures { self: PlaySpec => val mockSessionRepository = mock[SessionRepository] override protected def beforeEach(): Unit = { super.beforeEach() reset(mockSessionRepository) } def mockCacheDataRepository(success: Boolean): OngoingStubbing[Future[MongoCreateResponse]] = { when(mockSessionRepository.cacheData(any())(any(), any())) .thenReturn(Future.successful(if(success) MongoSuccessCreate else MongoFailedCreate)) } def mockGetSession(session: Option[Session]): OngoingStubbing[Future[Option[Session]]] = { when(mockSessionRepository.getSession(any())(any())) .thenReturn(Future.successful(session)) } def mockRenewSession(success: Boolean): OngoingStubbing[Future[MongoUpdatedResponse]] = { when(mockSessionRepository.renewSession(any())(any(), any())) .thenReturn(Future.successful(if(success) MongoSuccessUpdate else MongoFailedUpdate)) } def mockGetSessions(sessions: List[Session]): OngoingStubbing[Future[List[Session]]] = { when(mockSessionRepository.getSessions(any())) .thenReturn(Future.successful(sessions)) } def mockUpdateSession(success: Boolean): OngoingStubbing[Future[(String, String)]] = { when(mockSessionRepository.updateSession(any(), any(), any())(any(), any())) .thenReturn( Future.successful(if(success) "key" -> MongoSuccessUpdate.toString else "key" -> MongoFailedUpdate.toString) ) } def mockRemoveSession(success: Boolean): OngoingStubbing[Future[MongoDeleteResponse]] = { when(mockSessionRepository.removeSession(any())(any(), any())) .thenReturn(Future.successful(if(success) MongoSuccessDelete else MongoFailedDelete)) } def mockCleanSession(success: Boolean): OngoingStubbing[Future[MongoDeleteResponse]] = { when(mockSessionRepository.cleanSession(any())(any())) .thenReturn(Future.successful(if(success) MongoSuccessDelete else MongoFailedDelete)) } def mockValidateSession(validated: Boolean): OngoingStubbing[Future[Boolean]] = { when(mockSessionRepository.validateSession(any())(any())) .thenReturn(Future.successful(validated)) } }
cjww-development/session-store
test/helpers/repositories/MockSessionRepository.scala
Scala
apache-2.0
3,241
package cuando.types import play.api.libs.json.JsError import play.api.libs.json.JsObject import play.api.libs.json.JsResult import play.api.libs.json.JsSuccess import play.api.libs.json.JsValue import play.api.libs.json.Reads import play.api.libs.json.Writes import play.api.libs.json.Json case class Record(keyField: String, keyFieldVal: KeyFieldVal, content: RecordContent) { def keyEquals(that: Record): Boolean = { (this.keyField == that.keyField) && (this.keyFieldVal == that.keyFieldVal) } } object Record { implicit def recReads(keyField: String, path: Path, schema: Schema): Reads[Record] = new Reads[Record] { def reads(json: JsValue): JsResult[Record] = { val jsContent = (json \\ "content").as[JsObject] require(jsContent.keys.contains(keyField)) val keyFieldVal: KeyFieldVal = jsContent \\ keyField val content = jsContent.as[RecordContent](rcReads(keyField, path, schema)) JsSuccess(Record(keyField, keyFieldVal, content)) } } implicit val recWrites = new Writes[Record] { def writes(rec: Record) = { val jsContent = Json.toJson(rec.content).asInstanceOf[JsObject] val amendedContent = jsContent + (rec.keyField -> rec.keyFieldVal) Json.obj("content" -> amendedContent) } } }
cuando-db/cuando-db
src/main/scala/cuando/types/Record.scala
Scala
mit
1,269
package com.twitter.scalding.reducer_estimation import cascading.flow.{ Flow, FlowStep, FlowStepStrategy } import com.twitter.algebird.Monoid import com.twitter.scalding.estimation.{ Estimator, FallbackEstimatorMonoid, FlowStrategyInfo } import com.twitter.scalding.{ Config, StringUtility } import java.util.{ List => JList } import org.apache.hadoop.mapred.JobConf import org.slf4j.LoggerFactory import scala.collection.JavaConverters._ object ReducerEstimatorStepStrategy extends FlowStepStrategy[JobConf] { private val LOG = LoggerFactory.getLogger(this.getClass) implicit val estimatorMonoid: Monoid[Estimator[Int]] = new FallbackEstimatorMonoid[Int] /** * Make reducer estimate, possibly overriding explicitly-set numReducers, * and save useful info (such as the default & estimate) in JobConf for * later consumption. * * Called by Cascading at the start of each job step. */ final override def apply( flow: Flow[JobConf], preds: JList[FlowStep[JobConf]], step: FlowStep[JobConf]): Unit = { val conf = step.getConfig // for steps with reduce phase, mapred.reduce.tasks is set in the jobconf at this point // so we check that to determine if this is a map-only step. conf.getNumReduceTasks match { case 0 => LOG.info(s"${flow.getName} is a map-only step. Skipping reducer estimation.") case _ => if (skipReducerEstimation(step)) { LOG.info( s""" |Flow step ${step.getName} was configured with reducers |set explicitly (${Config.WithReducersSetExplicitly}=true) and the estimator |explicit override turned off (${Config.ReducerEstimatorOverride}=false). Skipping |reducer estimation. """.stripMargin) } else { estimate(flow, preds.asScala, step) } } } // whether the reducers have been set explicitly with `withReducers` private def reducersSetExplicitly(step: FlowStep[JobConf]) = step.getConfig.getBoolean(Config.WithReducersSetExplicitly, false) // whether we should override explicitly-specified numReducers private def overrideExplicitReducers(step: FlowStep[JobConf]) = step.getConfig.getBoolean(Config.ReducerEstimatorOverride, false) private def skipReducerEstimation(step: FlowStep[JobConf]) = reducersSetExplicitly(step) && !overrideExplicitReducers(step) private def estimate( flow: Flow[JobConf], preds: Seq[FlowStep[JobConf]], step: FlowStep[JobConf]): Unit = { val conf = step.getConfig val stepNumReducers = conf.get(Config.HadoopNumReducers) Option(conf.get(Config.ReducerEstimators)).foreach { clsNames => val clsLoader = Thread.currentThread.getContextClassLoader val estimators = StringUtility.fastSplit(clsNames, ",") .map(clsLoader.loadClass(_).newInstance.asInstanceOf[Estimator[Int]]) val combinedEstimator = Monoid.sum(estimators) val info = FlowStrategyInfo(flow, preds, step) // get estimate val estimatedNumReducers = combinedEstimator.estimate(info) // apply cap if needed val cappedNumReducers = estimatedNumReducers.map { n => val configuredMax = conf.getInt(ReducerEstimatorConfig.maxEstimatedReducersKey, ReducerEstimatorConfig.defaultMaxEstimatedReducers) if (n > configuredMax) { LOG.warn( s""" |Reducer estimator estimated $n reducers, which is more than the configured maximum of $configuredMax. |Will use $configuredMax instead. """.stripMargin) configuredMax } else { n } } // save the estimate and capped estimate in the JobConf which should be saved by hRaven conf.setInt(ReducerEstimatorConfig.estimatedNumReducers, estimatedNumReducers.getOrElse(-1)) conf.setInt(ReducerEstimatorConfig.cappedEstimatedNumReducersKey, cappedNumReducers.getOrElse(-1)) // set number of reducers cappedNumReducers.foreach(conf.setNumReduceTasks) // log in JobConf what was explicitly set by 'withReducers' if (reducersSetExplicitly(step)) { conf.set(ReducerEstimatorConfig.originalNumReducers, stepNumReducers) } } } }
tdyas/scalding
scalding-core/src/main/scala/com/twitter/scalding/reducer_estimation/ReducerEstimatorStepStrategy.scala
Scala
apache-2.0
4,257
package es.ucm.fdi.sscheck.gen import org.scalacheck.Gen import org.scalacheck.util.Buildable import Buildables.buildableSeq object UtilsGen { /** Like containerOfN but with variable number of elements * */ def containerOfNtoM[C[_], T] (n : Int, m : Int, g : Gen[T]) (implicit evb: Buildable[T, C[T]], evt: (C[T]) => Traversable[T]) : Gen[C[T]] = { for { i <- Gen.choose(n, m) xs <- Gen.containerOfN[C, T](i, g) } yield xs } def buildableOfNtoM[C, T] (n : Int, m : Int, g : Gen[T]) (implicit evb: Buildable[T, C], evt: (C) => Traversable[T]) : Gen[C] = { for { i <- Gen.choose(n, m) xs <- Gen.buildableOfN[C, T](i, g) } yield xs } /** Generates n sequences from g and concatenates them * */ def repN[A](n : Int, g : Gen[Seq[A]]) : Gen[Seq[A]] = { for { xs <- Gen.containerOfN(n, g) } yield xs flatten } /** Generates i sequences from g, with i between n and m, and concatenates them * */ def repNtoM[A](n : Int, m : Int, g : Gen[Seq[A]]) : Gen[Seq[A]] = { for { xs <- containerOfNtoM(n, m, g) } yield xs flatten } /** Returns the generator that results from concatenating the sequences * generated by g1 and g2 * */ def concSeq[A](g1 : Gen[Seq[A]], g2 : Gen[Seq[A]]) : Gen[Seq[A]] = { for { xs <- g1 ys <- g2 } yield xs ++ ys } }
juanrh/spark-testing-base
src/main/scala/es/ucm/fdi/sscheck/gen/UtilsGen.scala
Scala
apache-2.0
1,393
/* Copyright 2013 Twitter, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.twitter.scalding.typed.functions import java.io.Serializable /** * This is a composition of one or more FlatMappings * * For some reason, this fails in scala 2.12 if this is an abstract class */ sealed trait FlatMappedFn[-A, +B] extends (A => TraversableOnce[B]) with Serializable { import FlatMappedFn._ final def runAfter[Z](fn: FlatMapping[Z, A]): FlatMappedFn[Z, B] = this match { case Single(FlatMapping.Identity(ev)) => type F[T] = FlatMapping[Z, T] Single(ev.subst[F](fn)) case notId => fn match { case FlatMapping.Identity(ev) => type F[T] = FlatMappedFn[T, B] ev.reverse.subst[F](this) case notIdFn => Series(notIdFn, notId) // only make a Series without either side being identity } } final def combine[C](next: FlatMappedFn[B, C]): FlatMappedFn[A, C] = { /* * We have to reassociate so the front of the series has the * first flatmap, so we can bail out early when there are no more * items in any flatMap result. */ def loop[X, Y, Z](fn0: FlatMappedFn[X, Y], fn1: FlatMappedFn[Y, Z]): FlatMappedFn[X, Z] = fn0 match { case Single(FlatMapping.Identity(ev)) => type F[T] = FlatMappedFn[T, Z] ev.reverse.subst[F](fn1) case Single(f0) => Series(f0, fn1) case Series(f0f, f1f) => Series(f0f, loop(f1f, fn1)) } loop(this, next) } /** * We interpret this composition once to minimize pattern matching when we execute */ private[this] val toFn: A => TraversableOnce[B] = { import FlatMapping._ def loop[A1, B1](fn: FlatMappedFn[A1, B1]): A1 => TraversableOnce[B1] = fn match { case Single(Identity(ev)) => val const: A1 => TraversableOnce[A1] = FlatMapFunctions.FromIdentity[A1]() type F[T] = A1 => TraversableOnce[T] ev.subst[F](const) case Single(Filter(f, ev)) => val filter: A1 => TraversableOnce[A1] = FlatMapFunctions.FromFilter(f) type F[T] = A1 => TraversableOnce[T] ev.subst[F](filter) case Single(Map(f)) => FlatMapFunctions.FromMap(f) case Single(FlatM(f)) => f case Series(Identity(ev), rest) => type F[T] = T => TraversableOnce[B1] ev.subst[F](loop(rest)) case Series(Filter(f, ev), rest) => type F[T] = T => TraversableOnce[B1] val next = ev.subst[F](loop(rest)) // linter:disable:UndesirableTypeInference FlatMapFunctions.FromFilterCompose(f, next) case Series(Map(f), rest) => val next = loop(rest) // linter:disable:UndesirableTypeInference FlatMapFunctions.FromMapCompose(f, next) case Series(FlatM(f), rest) => val next = loop(rest) // linter:disable:UndesirableTypeInference FlatMapFunctions.FromFlatMapCompose(f, next) } loop(this) } def apply(a: A): TraversableOnce[B] = toFn(a) } object FlatMappedFn extends Serializable { def asId[A, B](f: FlatMappedFn[A, B]): Option[EqTypes[_ >: A, _ <: B]] = f match { case Single(FlatMapping.Identity(ev)) => Some(ev) case _ => None } def asFilter[A, B](f: FlatMappedFn[A, B]): Option[(A => Boolean, EqTypes[(_ >: A), (_ <: B)])] = f match { case Single(filter @ FlatMapping.Filter(_, _)) => Some((filter.fn, filter.ev)) case _ => None } def apply[A, B](fn: A => TraversableOnce[B]): FlatMappedFn[A, B] = fn match { case fmf: FlatMappedFn[A, B] => fmf case rawfn => Single(FlatMapping.FlatM(rawfn)) } def identity[T]: FlatMappedFn[T, T] = Single(FlatMapping.Identity[T, T](EqTypes.reflexive[T])) def fromFilter[A](fn: A => Boolean): FlatMappedFn[A, A] = Single(FlatMapping.Filter[A, A](fn, EqTypes.reflexive)) def fromMap[A, B](fn: A => B): FlatMappedFn[A, B] = Single(FlatMapping.Map(fn)) final case class Single[A, B](fn: FlatMapping[A, B]) extends FlatMappedFn[A, B] final case class Series[A, B, C](first: FlatMapping[A, B], next: FlatMappedFn[B, C]) extends FlatMappedFn[A, C] }
twitter/scalding
scalding-core/src/main/scala/com/twitter/scalding/typed/functions/FlatMappedFn.scala
Scala
apache-2.0
4,684
/* * * * Licensed to STRATIO (C) under one or more contributor license agreements. * * See the NOTICE file distributed with this work for additional information * * regarding copyright ownership. The STRATIO (C) licenses this file * * to you under the Apache License, Version 2.0 (the * * "License"); you may not use this file except in compliance * * with the License. You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, * * software distributed under the License is distributed on an * * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * * KIND, either express or implied. See the License for the * * specific language governing permissions and limitations * * under the License. * * */ package io.miguel0afd.carrier import java.util import javax.cache.configuration.FactoryBuilder import org.apache.ignite.cache.CachePeekMode import org.apache.ignite.configuration.{CacheConfiguration, IgniteConfiguration} import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi import org.apache.ignite.spi.discovery.tcp.ipfinder.multicast.TcpDiscoveryMulticastIpFinder import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder import org.apache.ignite._ import scala.collection.JavaConversions._ case class Fruit(name: String, origin: String) object CarrierApp extends App { //System.setProperty("IGNITE_QUIET", "false"); // Distributed environment val discoverySpi: TcpDiscoverySpi = new TcpDiscoverySpi val ipFinder: TcpDiscoveryMulticastIpFinder = new TcpDiscoveryMulticastIpFinder ipFinder.setMulticastGroup("228.10.10.157") ipFinder.setAddresses(util.Arrays.asList("127.0.0.1:47500..47509")) discoverySpi.setIpFinder(ipFinder) val config: IgniteConfiguration = new IgniteConfiguration config.setDiscoverySpi(discoverySpi) // Persistence val cacheConfig: CacheConfiguration[String, Fruit] = new CacheConfiguration[String, Fruit]() val fp: FakePersistence = new FakePersistence cacheConfig.setCacheStoreFactory(FactoryBuilder.factoryOf(fp.getClass)) cacheConfig.setReadThrough(true) cacheConfig.setWriteThrough(true) config.setCacheConfiguration(cacheConfig) val ignite: Ignite = Ignition.start(config) val cc = ignite.configuration.getCacheConfiguration //val ignite: Ignite = Ignition.start("src/resources/ignite-config.xml") //val ignite: Ignite = Ignition.start("src/resources/ignite-config2.xml") //ignite.cluster().localNode().attributes().entrySet().foreach(println) println("Local Port: " + ignite.cluster().localNode().attributes().get("TcpCommunicationSpi.comm.tcp.port")) //val cluster: IgniteCluster = ignite.cluster // Obtain instance of cache named "fruits". // Note that different caches may have different generics. val cache: IgniteCache[String, Fruit] = ignite.getOrCreateCache("fruits") if(cache.size(CachePeekMode.ALL) < 1){ val fruit: Fruit = Fruit("Durian", "Indonesia") cache.put(fruit.name, fruit) val result: Fruit = cache.get(fruit.name) println(result.name + " - " + result.origin) } else { val fruit: Fruit = Fruit("Mango", "India") cache.put(fruit.name, fruit) println("Cache size: " + cache.size(CachePeekMode.PRIMARY)) val result1: Fruit = cache.get("Durian") println(result1.name + " - " + result1.origin) val result2: Fruit = cache.get("Mango") println(result2.name + " - " + result2.origin) } //ignite.close }
miguel0afd/carrier
src/main/scala/io/miguel0afd/carrier/CarrierApp.scala
Scala
apache-2.0
3,529
package effechecka import java.nio.file.Paths import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.{FileIO, Sink} import akka.stream.testkit.scaladsl.TestSink import akka.testkit.TestKit import akka.util.ByteString import io.eels.FilePattern import io.eels.component.parquet.{ParquetSink, ParquetSource} import org.apache.hadoop.fs.Path import org.scalatest.{Matchers, WordSpecLike} class ChecklistFetcherHDFSSpec extends TestKit(ActorSystem("IntegrationTest")) with WordSpecLike with Matchers with ChecklistFetcherHDFS with HDFSTestUtil { implicit val materializer = ActorMaterializer()(system) implicit val ec = system.dispatcher private val reqSelector = SelectorParams("Animalia|Insecta", "ENVELOPE(-150,-50,40,10)", "") val req = ChecklistRequest(reqSelector, Some(2)) val req5 = ChecklistRequest(reqSelector, Some(5)) val reqNew = ChecklistRequest(SelectorParams("Aves|Mammalia", "ENVELOPE(-150,-50,40,10)", ""), Some(2)) "HDFS" should { "have access to test resources" in { getClass.getResource("/hdfs-layout/checklist-summary/u0=55/u1=e4/u2=b0/uuid=55e4b0a0-bcd9-566f-99bc-357439011d85/summary.parquet") shouldNot be(null) getClass.getResource("/hdfs-layout/checklist/u0=55/u1=e4/u2=b0/uuid=55e4b0a0-bcd9-566f-99bc-357439011d85/checklist.parquet") shouldNot be(null) } "status existing" in { statusOf(req) shouldBe Some("ready") } "status non-existing" in { statusOf(reqNew) shouldBe None } "taxon name of empty taxon path" in { taxonNameFor(ChecklistItem("||||", 123)) shouldBe "" } "taxon name of non-empty taxon path" in { taxonNameFor(ChecklistItem("|some|none|empty|", 123)) shouldBe "empty" } "return items" in { val checklist = itemsFor(req).toSeq checklist should contain(ChecklistItem("Animalia|Chordata|Aves|Passeriformes|Paridae|Poecile|atricapillus|Poecile atricapillus (Linnaeus, 1766)", 126643)) checklist.length shouldBe 2 } "return source" in { println(ByteString(116, 97, 120, 111, 110, 78, 97, 109, 101, 9, 116, 97, 120, 111, 110, 80, 97, 116, 104, 9, 114, 101, 99, 111, 114, 100, 67, 111, 117, 110, 116).utf8String) println(ByteString.fromString("bla").utf8String) val probe = tsvFor(req) .runWith(TestSink.probe[ByteString]) probe .request(3) .expectNext(ByteString.fromString("taxonName\\ttaxonPath\\trecordCount")) val items = List(probe.expectNext().utf8String, probe.expectNext().utf8String) items should contain("\\nPoecile atricapillus (Linnaeus, 1766)\\tAnimalia|Chordata|Aves|Passeriformes|Paridae|Poecile|atricapillus|Poecile atricapillus (Linnaeus, 1766)\\t126643") items should contain("\\nTurdus migratorius Linnaeus, 1766\\tAnimalia|Chordata|Aves|Passeriformes|Turdidae|Turdus|migratorius|Turdus migratorius Linnaeus, 1766\\t114323") probe.expectComplete() } "return 5 items" in { val checklist = itemsFor(req5).toSeq checklist.length shouldBe 5 } "return no items" in { val checklist = itemsFor(reqNew).toSeq checklist shouldNot contain(ChecklistItem("Animalia|Chordata|Aves|Passeriformes|Paridae|Poecile|atricapillus|Poecile atricapillus (Linnaeus, 1766)", 126643)) } } }
jhpoelen/effechecka
src/test/scala/effechecka/ChecklistFetcherHDFSSpec.scala
Scala
mit
3,304
/*** * Copyright 2014 Rackspace US, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.rackspace.com.papi.components.checker.util import javax.xml.xpath.{XPathExpression, XPathExpressionException, XPathConstants} import javax.xml.namespace.QName import org.junit.runner.RunWith import org.scalatest.FunSuite import org.scalatest.junit.JUnitRunner import net.sf.saxon.s9api.XdmMap import scala.collection.JavaConversions._ @RunWith(classOf[JUnitRunner]) class VarXPathExpressionSuite extends FunSuite { val XPATH_VERSION_1 = 10 val XPATH_VERSION_2 = 20 val XPATH_VERSION_3 = 30 val XPATH_VERSION_3_1 = 31 val inDoc = { val parser = XMLParserPool.borrowParser try { parser.newDocument } finally { XMLParserPool.returnParser(parser) } } val nsContext = ImmutableNamespaceContext(Map[String,String]()) test ("XPath 1.0 variable should change between differnt evaluations") { var varExpression : VarXPathExpression = null val expression = "concat('Hello ',$name)" // Good'ol XPath 1.0 concat string try { varExpression = XPathExpressionPool.borrowExpression(expression, nsContext, XPATH_VERSION_1).asInstanceOf[VarXPathExpression] assert (varExpression.evaluate (inDoc, XPathConstants.STRING, Map[QName, Object](new QName("name") -> "Jorge")) == "Hello Jorge") assert (varExpression.evaluate (inDoc, XPathConstants.STRING, Map[QName, Object](new QName("name") -> "Rachel")) == "Hello Rachel") } finally { if (varExpression != null) XPathExpressionPool.returnExpression(expression, nsContext, XPATH_VERSION_1, varExpression) } } test ("XPath 2.0 variable should change between differnt evaluations") { var varExpression : VarXPathExpression = null val expression = "upper-case($name)" // upper-case function introduced in 2.0 try { varExpression = XPathExpressionPool.borrowExpression(expression, nsContext, XPATH_VERSION_2).asInstanceOf[VarXPathExpression] assert (varExpression.evaluate (inDoc, XPathConstants.STRING, Map[QName, Object](new QName("name") -> "Jorge")) == "JORGE") assert (varExpression.evaluate (inDoc, XPathConstants.STRING, Map[QName, Object](new QName("name") -> "Rachel")) == "RACHEL") } finally { if (varExpression != null) XPathExpressionPool.returnExpression(expression, nsContext, XPATH_VERSION_2, varExpression) } } test ("XPath 3.0 variable should change between differnt evaluations") { var varExpression : VarXPathExpression = null val expression = "'Hello ' || $name" // This is how you conact strings in 3.0 try { varExpression = XPathExpressionPool.borrowExpression(expression, nsContext, XPATH_VERSION_3).asInstanceOf[VarXPathExpression] assert (varExpression.evaluate (inDoc, XPathConstants.STRING, Map[QName, Object](new QName("name") -> "Jorge")) == "Hello Jorge") assert (varExpression.evaluate (inDoc, XPathConstants.STRING, Map[QName, Object](new QName("name") -> "Rachel")) == "Hello Rachel") } finally { if (varExpression != null) XPathExpressionPool.returnExpression(expression, nsContext, XPATH_VERSION_3, varExpression) } } test ("XPath 3.1 variable should change between differnt evaluations") { var varExpression : VarXPathExpression = null val expression = "$person?firstName || ' ' || $person?lastName" // 3.1 introduced maps try { varExpression = XPathExpressionPool.borrowExpression(expression, nsContext, XPATH_VERSION_3_1).asInstanceOf[VarXPathExpression] assert (varExpression.evaluate (inDoc, XPathConstants.STRING, Map[QName, Object](new QName("person") -> XdmMap.makeMap(mapAsJavaMap(Map[String,String]("firstName"->"Jorge", "lastName"->"Williams"))))) == "Jorge Williams") assert (varExpression.evaluate (inDoc, XPathConstants.STRING, Map[QName, Object](new QName("person") -> XdmMap.makeMap(mapAsJavaMap(Map[String,String]("firstName"->"Rachel", "lastName"->"Kraft"))))) == "Rachel Kraft") } finally { if (varExpression != null) XPathExpressionPool.returnExpression(expression, nsContext, XPATH_VERSION_3_1, varExpression) } } }
wdschei/api-checker
util/src/test/scala/com/rackspace/com/papi/components/checker/util/VarXPathExpressionSuite.scala
Scala
apache-2.0
4,759
package io.youi.component class HTMLSelect(protected val element: html.Select, val existing: Boolean = false) extends HTMLComponent[html.Select] with HTMLSelectTheme { lazy val items: Var[Vector[String]] = Var(Vector.empty) def this() = { this(create[html.Select]("select")) } override protected def init(): Unit = { super.init() element.addEventListener("change", (_: Event) => { valueChanging = true try { value @= element.value } finally { valueChanging = false } }) items.attachAndFire { v => element.innerHTML = "" v.foreach { text => val option = create[html.Option]("option") option.value = text option.innerHTML = text option.selected = value() == text element.appendChild(option) } } } override def componentType: String = "HTMLSelect" } object HTMLSelect extends HTMLSelectTheme { override protected def defaultParentTheme: Theme = HTMLComponent def existing(id: String, in: html.Element = document.body): HTMLSelect = { new HTMLSelect(in.byId[html.Select](id), existing = true) } }
outr/youi
ui/js/src/main/scala/io/youi/component/HTMLSelect.scala
Scala
mit
1,161
package razie.wiki.model import com.mongodb.DBObject import com.mongodb.casbah.Imports._ import razie.wiki.model.Visibility.PUBLIC /** * important concept - query/select a list of topics, based on inclusion/exclusion of tags * * LOGICAL: a/b|c/d is a and (b or c) and d * ACTUAL: a/b,c/d is a and (b or c) and d - in url , is not escaped, so easier to use than | * * todo perf if tq contains a cat like below, optimize to search just those - it's done in DomGuardian * * tag query tricks: if a tag uses an upper case like "Story" then it referes * to the category and it optimizes things a lot in big reactors * * ltags - all AND expressions * atags - the AND expressions without OR * otags * qt - array of array - first is AND second is OR * * NOTE the one way to do the search today is WikiSearch.getList */ class TagQuery(val tags: String) { val ltags = tags.split("/").map(_.trim).filter(goodTag) val atags = ltags.filter(_.indexOf(",") < 0).map(_.toLowerCase) val otags = ltags.filter(_.indexOf(",") >= 0).map(_.split(",").map(_.trim.toLowerCase).filter(goodTag)) val newqt = ltags.map(_.split(",").map(_.toLowerCase).filter(goodTag)) // array of array - first is AND second is OR val qt = ltags.map(_.split(",").filter(goodTag)) def goodTag(x:String) = x.length > 0 && x != "tag" // can't mix public with something else and still get public... def public = ltags contains "public" /** check if a set of tags matches */ def matches(t: Seq[String]) = { atags.foldLeft(true)((a, b) => a && (if (b.startsWith("-")) !t.contains(b.substring(1)) else t.contains(b)) ) && otags.foldLeft(true)((a, b) => a && b.foldLeft(false)((a, c) => a || t.contains(c)) ) } def matches (u:DBObject) = { val utags = if(u.containsField("tags")) u.get("tags").toString.toLowerCase else "" def checkT(b:String) = { utags.contains(b) || b == "*" || (b == "draft" && u.containsField("props") && u.getAs[DBObject]("props").exists(_.containsField("draft"))) || (b == "public" && u.containsField("props") && u.getAs[DBObject]("props").exists(_.getAsOrElse[String]("visibility", PUBLIC) == PUBLIC)) || u.get("category").toString.toLowerCase == b } qt.size <= 0 || u.containsField("tags") && qt.foldLeft(true)((a, b) => a && ( if(b(0).startsWith("-")) ! checkT(b(0).substring(1)) else b.foldLeft(false)((a, b) => a || checkT(b)) )) } def matches (u:WikiEntry) = { val utags = u.tags.mkString def checkT(b: String) = { utags.contains(b) || b == "*" || (b == "draft" && u.isDraft) || (b == "public" && u.visibility == PUBLIC) || u.category.toLowerCase == b } qt.size <= 0 || u.tags.size > 0 && qt.foldLeft(true)((a, b) => a && ( if (b(0).startsWith("-")) !checkT(b(0).substring(1)) else b.foldLeft(false)((a, b) => a || checkT(b)) )) } /** is any of the t list of tags included in this query? * * used to count tags of a result not in the query */ def contains(t: String) = { atags.foldLeft(false)((a, b) => a || t.contains(b)) || otags.foldLeft(false)((a, b) => a || b.foldLeft(false)((a, c) => a || t.contains(c)) ) } }
razie/wikireactor
common/app/razie/wiki/model/TagQuery.scala
Scala
apache-2.0
3,343
/* Deduction Tactics Copyright (C) 2012-2015 Raymond Dodge This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ package com.rayrobdod.deductionTactics.consoleView import com.rayrobdod.deductionTactics.Weaponkinds.Weaponkind import com.rayrobdod.deductionTactics.ai.TokenClassSuspicion import com.rayrobdod.deductionTactics.TokenClass import scala.runtime.{AbstractFunction1 => Function1} /** * @version a.6.0 */ object TokenClassPrinter extends Function1[TokenClass,Unit] { private def out = System.out private val elseString = "Unknown" def apply(tokenClass:TokenClass):Unit = { out.println(tokenClass.name); out.print("Speed: "); out.print(tokenClass.speed); out.print(" Range: "); out.println(tokenClass.range); out.print("Attack: ") out.print(tokenClass.atkElement.name); out.print("; "); out.print(tokenClass.atkWeapon.name); out.print("; "); out.println(tokenClass.atkStatus.name); out.print("Weakness: ") out.print(tokenClass.weakDirection.name); out.print("; "); out.print(getWeakWeapon(tokenClass.weakWeapon)); out.print("; "); out.println(tokenClass.weakStatus.name); } /** * @since a.6.0 */ def apply(tokenClass:TokenClassSuspicion):Unit = { out.println("???"); out.print("Speed: "); out.print(tokenClass.speed.getOrElse("?")); out.print(" Range: "); out.println(tokenClass.range.getOrElse("?")); out.print("Attack: ") out.print(tokenClass.atkElement.map{_.name}.getOrElse("?")); out.print("; "); out.print(tokenClass.atkWeapon.map{_.name}.getOrElse("?")); out.print("; "); out.println(tokenClass.atkStatus.map{_.name}.getOrElse("?")); out.print("Weakness: ") out.print(tokenClass.weakDirection.map{_.name}.getOrElse("?")); out.print("; "); out.print(getWeakWeapon(tokenClass.weakWeapon.map{x => ((x._1, x._2.getOrElse(0f) ))})); out.print("; "); out.println(tokenClass.weakStatus.map{_.name}.getOrElse("?")); } /** * @version a.6.0 */ private def getWeakWeapon(weakWeapon:Map[Weaponkind,Float]) = { val maxWeakness = weakWeapon.map{ (x) => (( x._1, x._2 )) }.maxBy{_._2} if (maxWeakness._2 == 0f) { elseString } else { maxWeakness._1.name } } }
rayrobdod/deductionTactics
src/main/scala/com/rayrobdod/deductionTactics/consoleView/TokenClassPrinter.scala
Scala
gpl-3.0
2,779
package com.alvrod.cryptopals.test.set1 import com.alvrod.cryptopals.{Combine, Convert} import org.junit.runner.RunWith import org.scalatest.FunSuite import org.scalatest.junit.JUnitRunner @RunWith(classOf[JUnitRunner]) class Combinations extends FunSuite { test("Website xor sample") { val hex1 = "1c0111001f010100061a024b53535009181c" val hex2 = "686974207468652062756c6c277320657965" expectResult("746865206b696420646f6e277420706c6179") { val b1 = Convert.hexToBytes(hex1) val b2 = Convert.hexToBytes(hex2) val xored = Combine.xor(b1, b2) Convert.bytesToHex(xored) } } test("Hamming distance") { val a = "this is a test".getBytes val b = "wokka wokka!!!".getBytes expectResult(37) { Combine.hammingDistance(a, b) } } }
alvrod/cryptopals
test/src/com/alvrod/cryptopals/test/set1/Combinations.scala
Scala
gpl-2.0
796
/* * The MIT License * * Copyright (c) 2015-2016 Fulcrum Genomics LLC * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package com.fulcrumgenomics.commons.io import java.nio.file.{Path, Paths} /** Provides utility methods for creating and manipulating Path objects and path-like Strings. */ object PathUtil { @deprecated("Use `IllegalCharacters`") def illegalCharacters: String = IllegalCharacters val MaxFileNameSize: Int = 254 val IllegalCharacters: String = "[!\"#$%&'()*/:;<=>?@\\^`{|}~] " /** Resolves a path from a String, and then makes the path absolute. Prefer this to PathUtil.pathTo elsewhere. */ def pathTo(first: String, more: String*): Path = Paths.get(first, more:_*).toAbsolutePath.normalize /** Replaces a set of illegal characters within a String that is to be used as a filename. This will also * truncate the file name to be, at maximum, 255 characters. * * @param fileName the string that is to be used as a filename * @param illegalCharacters the set of characters to be replaced if found, defaults to [[IllegalCharacters]] * @param replacement an optional replacement character, defaulting to '_'; if None characters are just removed * @return the filename without illegal characters */ def sanitizeFileName(fileName: String, illegalCharacters: String = PathUtil.IllegalCharacters, replacement: Option[Char] = Some('_'), maxFileNameSize: Option[Int] = Some(MaxFileNameSize)): String = { val sanitizedFileName = replacement match { case None => fileName.filter(c => !illegalCharacters.contains(c)) case Some(r) => fileName.map(c => if (illegalCharacters.contains(c)) r else c) } sanitizedFileName.substring(0, Math.min(sanitizedFileName.length, MaxFileNameSize)) } /** Replaces the extension on an existing path. */ def replaceExtension(path: Path, ext:String) : Path = { val name = path.getFileName.toString val index = name.lastIndexOf('.') val newName = (if (index > 0) name.substring(0, index) else name) + ext path.resolveSibling(newName) } /** Remove the extension from a filename if present (the last . to the end of the string). */ def removeExtension(path: Path) : Path = replaceExtension(path, "") /** Remove the extension from a filename if present (the last . to the end of the string). */ def removeExtension(pathname: String) : String = { // Use Paths.get here so as not to turn the name into an absolute path, since we just toString it again removeExtension(Paths.get(pathname)).toString } /** Returns the extension of the filename (including the period) within the path, * or None if there is no period in the name. */ def extensionOf(path: Path) : Option[String] = { val name = path.getFileName.toString val index = name.lastIndexOf('.') if (index < 0) None else Some(name.substring(index)) } /** Works similarly to the unix command basename, by optionally removing an extension, and all leading path elements. */ def basename(name: Path, trimExt: Boolean = true) : String = { val x = if (trimExt) removeExtension(name) else name x.getFileName.toString } /** Works similarly to the unix command basename, by optionally removing an extension, and all leading path elements. */ def basename(name: String, trimExt: Boolean) : String = { basename(pathTo(name), trimExt) } }
fulcrumgenomics/commons
src/main/scala/com/fulcrumgenomics/commons/io/PathUtil.scala
Scala
mit
4,490
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.deploy.client import java.util.concurrent._ import java.util.concurrent.{Future => JFuture, ScheduledFuture => JScheduledFuture} import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference} import scala.concurrent.Future import scala.util.{Failure, Success} import scala.util.control.NonFatal import org.apache.spark.SparkConf import org.apache.spark.deploy.{ApplicationDescription, ExecutorState} import org.apache.spark.deploy.DeployMessages._ import org.apache.spark.deploy.master.Master import org.apache.spark.internal.Logging import org.apache.spark.rpc._ import org.apache.spark.util.{RpcUtils, ThreadUtils} /** * 接口,负责在standalone模式下,application和spark集群通信。 * Interface allowing applications to speak with a Spark standalone cluster manager. * * 接受一个master URL,一个application description,一个集群事件监听器,和各种事件发生时的监听器 * Takes a master URL, an app description, and a listener for cluster events, and calls * back the listener when various events occur. * * @param masterUrls Each url should look like spark://host:port. */ private[spark] class StandaloneAppClient( rpcEnv: RpcEnv, masterUrls: Array[String], appDescription: ApplicationDescription, listener: StandaloneAppClientListener, conf: SparkConf) extends Logging { private val masterRpcAddresses = masterUrls.map(RpcAddress.fromSparkURL(_)) private val REGISTRATION_TIMEOUT_SECONDS = 20 private val REGISTRATION_RETRIES = 3 private val endpoint = new AtomicReference[RpcEndpointRef] private val appId = new AtomicReference[String] private val registered = new AtomicBoolean(false) private class ClientEndpoint(override val rpcEnv: RpcEnv) extends ThreadSafeRpcEndpoint with Logging { private var master: Option[RpcEndpointRef] = None // To avoid calling listener.disconnected() multiple times private var alreadyDisconnected = false // To avoid calling listener.dead() multiple times private val alreadyDead = new AtomicBoolean(false) private val registerMasterFutures = new AtomicReference[Array[JFuture[_]]] private val registrationRetryTimer = new AtomicReference[JScheduledFuture[_]] // A thread pool for registering with masters. Because registering with a master is a blocking // action, this thread pool must be able to create "masterRpcAddresses.size" threads at the same // time so that we can register with all masters. private val registerMasterThreadPool = ThreadUtils.newDaemonCachedThreadPool( "appclient-register-master-threadpool", masterRpcAddresses.length // Make sure we can register with all masters at the same time ) // A scheduled executor for scheduling the registration actions private val registrationRetryThread = ThreadUtils.newDaemonSingleThreadScheduledExecutor("appclient-registration-retry-thread") override def onStart(): Unit = { try { registerWithMaster(1) } catch { case e: Exception => logWarning("Failed to connect to master", e) markDisconnected() stop() } } /** * Register with all masters asynchronously and returns an array `Future`s for cancellation. */ private def tryRegisterAllMasters(): Array[JFuture[_]] = { for (masterAddress <- masterRpcAddresses) yield { registerMasterThreadPool.submit(new Runnable { override def run(): Unit = try { if (registered.get) { return } logInfo("Connecting to master " + masterAddress.toSparkURL + "...") val masterRef = rpcEnv.setupEndpointRef(masterAddress, Master.ENDPOINT_NAME) masterRef.send(RegisterApplication(appDescription, self)) } catch { case ie: InterruptedException => // Cancelled case NonFatal(e) => logWarning(s"Failed to connect to master $masterAddress", e) } }) } } /** * Register with all masters asynchronously. It will call `registerWithMaster` every * REGISTRATION_TIMEOUT_SECONDS seconds until exceeding REGISTRATION_RETRIES times. * Once we connect to a master successfully, all scheduling work and Futures will be cancelled. * * nthRetry means this is the nth attempt to register with master. */ private def registerWithMaster(nthRetry: Int) { registerMasterFutures.set(tryRegisterAllMasters()) registrationRetryTimer.set(registrationRetryThread.schedule(new Runnable { override def run(): Unit = { if (registered.get) { registerMasterFutures.get.foreach(_.cancel(true)) registerMasterThreadPool.shutdownNow() } else if (nthRetry >= REGISTRATION_RETRIES) { markDead("All masters are unresponsive! Giving up.") } else { registerMasterFutures.get.foreach(_.cancel(true)) registerWithMaster(nthRetry + 1) } } }, REGISTRATION_TIMEOUT_SECONDS, TimeUnit.SECONDS)) } /** * Send a message to the current master. If we have not yet registered successfully with any * master, the message will be dropped. */ private def sendToMaster(message: Any): Unit = { master match { case Some(masterRef) => masterRef.send(message) case None => logWarning(s"Drop $message because has not yet connected to master") } } private def isPossibleMaster(remoteAddress: RpcAddress): Boolean = { masterRpcAddresses.contains(remoteAddress) } override def receive: PartialFunction[Any, Unit] = { case RegisteredApplication(appId_, masterRef) => // FIXME How to handle the following cases? // 1. A master receives multiple registrations and sends back multiple // RegisteredApplications due to an unstable network. // 2. Receive multiple RegisteredApplication from different masters because the master is // changing. appId.set(appId_) registered.set(true) master = Some(masterRef) listener.connected(appId.get) case ApplicationRemoved(message) => markDead("Master removed our application: %s".format(message)) stop() case ExecutorAdded(id: Int, workerId: String, hostPort: String, cores: Int, memory: Int) => val fullId = appId + "/" + id logInfo("Executor added: %s on %s (%s) with %d cores".format(fullId, workerId, hostPort, cores)) listener.executorAdded(fullId, workerId, hostPort, cores, memory) case ExecutorUpdated(id, state, message, exitStatus, workerLost) => val fullId = appId + "/" + id val messageText = message.map(s => " (" + s + ")").getOrElse("") logInfo("Executor updated: %s is now %s%s".format(fullId, state, messageText)) if (ExecutorState.isFinished(state)) { listener.executorRemoved(fullId, message.getOrElse(""), exitStatus, workerLost) } // Master改变时,恢复app时给driver发信息时,在此处接收到, // 将现在的master加入到内存中,给master回复信息改变app状态为WAITTING case MasterChanged(masterRef, masterWebUiUrl) => logInfo("Master has changed, new master is at " + masterRef.address.toSparkURL) master = Some(masterRef) alreadyDisconnected = false masterRef.send(MasterChangeAcknowledged(appId.get)) } override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = { case StopAppClient => markDead("Application has been stopped.") sendToMaster(UnregisterApplication(appId.get)) context.reply(true) stop() case r: RequestExecutors => master match { case Some(m) => askAndReplyAsync(m, context, r) case None => logWarning("Attempted to request executors before registering with Master.") context.reply(false) } case k: KillExecutors => master match { case Some(m) => askAndReplyAsync(m, context, k) case None => logWarning("Attempted to kill executors before registering with Master.") context.reply(false) } } private def askAndReplyAsync[T]( endpointRef: RpcEndpointRef, context: RpcCallContext, msg: T): Unit = { // Ask a message and create a thread to reply with the result. Allow thread to be // interrupted during shutdown, otherwise context must be notified of NonFatal errors. endpointRef.ask[Boolean](msg).andThen { case Success(b) => context.reply(b) case Failure(ie: InterruptedException) => // Cancelled case Failure(NonFatal(t)) => context.sendFailure(t) }(ThreadUtils.sameThread) } override def onDisconnected(address: RpcAddress): Unit = { if (master.exists(_.address == address)) { logWarning(s"Connection to $address failed; waiting for master to reconnect...") markDisconnected() } } override def onNetworkError(cause: Throwable, address: RpcAddress): Unit = { if (isPossibleMaster(address)) { logWarning(s"Could not connect to $address: $cause") } } /** * Notify the listener that we disconnected, if we hadn't already done so before. */ def markDisconnected() { if (!alreadyDisconnected) { listener.disconnected() alreadyDisconnected = true } } def markDead(reason: String) { if (!alreadyDead.get) { listener.dead(reason) alreadyDead.set(true) } } override def onStop(): Unit = { if (registrationRetryTimer.get != null) { registrationRetryTimer.get.cancel(true) } registrationRetryThread.shutdownNow() registerMasterFutures.get.foreach(_.cancel(true)) registerMasterThreadPool.shutdownNow() } } def start() { // Just launch an rpcEndpoint; it will call back into the listener. // 内部创建了 ClientEndpoint ,其实是 StandaloneAppClient 的内部类,会和 master 通信, // 在构造时会首先调用 onStart() ,运行 registerWithMaster() -> tryRegisterAllMasters // 向所有的 Master 注册。 endpoint.set(rpcEnv.setupEndpoint("AppClient", new ClientEndpoint(rpcEnv))) } def stop() { if (endpoint.get != null) { try { val timeout = RpcUtils.askRpcTimeout(conf) timeout.awaitResult(endpoint.get.ask[Boolean](StopAppClient)) } catch { case e: TimeoutException => logInfo("Stop request to Master timed out; it may already be shut down.") } endpoint.set(null) } } /** * Request executors from the Master by specifying the total number desired, * including existing pending and running executors. * * @return whether the request is acknowledged. */ def requestTotalExecutors(requestedTotal: Int): Future[Boolean] = { if (endpoint.get != null && appId.get != null) { endpoint.get.ask[Boolean](RequestExecutors(appId.get, requestedTotal)) } else { logWarning("Attempted to request executors before driver fully initialized.") Future.successful(false) } } /** * Kill the given list of executors through the Master. * @return whether the kill request is acknowledged. */ def killExecutors(executorIds: Seq[String]): Future[Boolean] = { if (endpoint.get != null && appId.get != null) { endpoint.get.ask[Boolean](KillExecutors(appId.get, executorIds)) } else { logWarning("Attempted to kill executors before driver fully initialized.") Future.successful(false) } } }
MrCodeYu/spark
core/src/main/scala/org/apache/spark/deploy/client/StandaloneAppClient.scala
Scala
apache-2.0
12,595
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.mllib.recommendation import org.apache.spark.SparkFunSuite import org.apache.spark.mllib.util.MLlibTestSparkContext import org.apache.spark.mllib.util.TestingUtils._ import org.apache.spark.rdd.RDD import org.apache.spark.util.Utils class MatrixFactorizationModelSuite extends SparkFunSuite with MLlibTestSparkContext { val rank = 2 var userFeatures: RDD[(Int, Array[Double])] = _ var prodFeatures: RDD[(Int, Array[Double])] = _ override def beforeAll(): Unit = { super.beforeAll() userFeatures = sc.parallelize(Seq((0, Array(1.0, 2.0)), (1, Array(3.0, 4.0)))) prodFeatures = sc.parallelize(Seq((2, Array(5.0, 6.0)))) } test("constructor") { val model = new MatrixFactorizationModel(rank, userFeatures, prodFeatures) assert(model.predict(0, 2) ~== 17.0 relTol 1e-14) intercept[IllegalArgumentException] { new MatrixFactorizationModel(1, userFeatures, prodFeatures) } val userFeatures1 = sc.parallelize(Seq((0, Array(1.0)), (1, Array(3.0)))) intercept[IllegalArgumentException] { new MatrixFactorizationModel(rank, userFeatures1, prodFeatures) } val prodFeatures1 = sc.parallelize(Seq((2, Array(5.0)))) intercept[IllegalArgumentException] { new MatrixFactorizationModel(rank, userFeatures, prodFeatures1) } } test("save/load") { val model = new MatrixFactorizationModel(rank, userFeatures, prodFeatures) val tempDir = Utils.createTempDir() val path = tempDir.toURI.toString def collect(features: RDD[(Int, Array[Double])]): Set[(Int, Seq[Double])] = { features.mapValues(_.toSeq).collect().toSet } try { model.save(sc, path) val newModel = MatrixFactorizationModel.load(sc, path) assert(newModel.rank === rank) assert(collect(newModel.userFeatures) === collect(userFeatures)) assert(collect(newModel.productFeatures) === collect(prodFeatures)) } finally { Utils.deleteRecursively(tempDir) } } test("batch predict API recommendProductsForUsers") { val model = new MatrixFactorizationModel(rank, userFeatures, prodFeatures) val topK = 10 val recommendations = model.recommendProductsForUsers(topK).collectAsMap() assert(recommendations(0)(0).rating ~== 17.0 relTol 1e-14) assert(recommendations(1)(0).rating ~== 39.0 relTol 1e-14) } test("batch predict API recommendUsersForProducts") { val model = new MatrixFactorizationModel(rank, userFeatures, prodFeatures) val topK = 10 val recommendations = model.recommendUsersForProducts(topK).collectAsMap() assert(recommendations(2)(0).user == 1) assert(recommendations(2)(0).rating ~== 39.0 relTol 1e-14) assert(recommendations(2)(1).user == 0) assert(recommendations(2)(1).rating ~== 17.0 relTol 1e-14) } }
wangyixiaohuihui/spark2-annotation
mllib/src/test/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModelSuite.scala
Scala
apache-2.0
3,702
package com.gmadorell.youtube_sync.module.synchronize.test.acceptance import com.gmadorell.youtube_sync.infrastructure.acceptance.YoutubeSyncAcceptanceTest import com.gmadorell.youtube_sync.infrastructure.configuration.YoutubeSyncConfiguration import com.gmadorell.youtube_sync.module.synchronize.domain.model._ import com.gmadorell.youtube_sync.module.synchronize.test.infrastructure.stub._ final class SynchronizeYoutubePlaylistsTest extends YoutubeSyncAcceptanceTest { "Youtube playlists" should { "be synchronized" in runWithContext { implicit context => context.setupActors() val expected: Map[PlayList, Seq[PlayListVideo]] = expectedPlayListVideosToBeCreated(configuration) eventually { expected.foreach { case (playList, playListVideos) => localPlayListVideoRepository .search(playList) .futureValue should contain theSameElementsAs playListVideos } } } } private def expectedPlayListVideosToBeCreated( configuration: YoutubeSyncConfiguration): Map[PlayList, Seq[PlayListVideo]] = { val playListToVideos = configuration.test.playLists.map { playListConfiguration => val playList = PlayListStub.create(id = PlayListIdStub.create(playListConfiguration.playListId), name = PlayListNameStub.create(playListConfiguration.name)) val videosOfPlayList = playListConfiguration.videos.map( videoConfig => VideoStub.create(id = VideoIdStub.create(videoConfig.videoId), name = VideoNameStub.create(videoConfig.videoName))) (playList, videosOfPlayList) }.toMap val playListToPlayListVideos = playListToVideos.map { case (playList, videos) => (playList, videos.map(video => PlayListVideoStub.create(playList, video))) } playListToPlayListVideos } }
GMadorell/youtube_sync
src/test/scala/com/gmadorell/youtube_sync/module/synchronize/test/acceptance/SynchronizeYoutubePlaylistsTest.scala
Scala
mit
1,898
import org.specs2.mutable._ import scala.collection.immutable.HashMap class Week5Tests extends Specification { //testi za ustvarjanje izrazov //testi za ovrednotenje izrazov //testi za odvajanje //testi za poenostavljanje }
Meemaw/scalaProgramming
src/test/scala/MainTests.scala
Scala
mit
238
import sbt._ import Keys._ object Dependencies { val scalaCompiler = "org.scala-lang" % "scala-compiler" % Commons.targetedScalaVersion val scalatest = "org.scalatest" %% "scalatest" % "2.2.1" % "test" val scalazVersion = "7.1.1" val scalaz = "org.scalaz" %% "scalaz-core" % scalazVersion val config = "com.typesafe" % "config" % "1.2.0" val logging = "com.typesafe.scala-logging" %% "scala-logging" % "3.1.0" val logback = "ch.qos.logback" % "logback-classic" % "1.1.3" val sbtIo = "org.scala-sbt" %% "io" % "1.0.0-M1" val rng = "com.nicta" %% "rng" % "1.3.0" val luceneVersion = "4.10.4" val luceneCore = "org.apache.lucene" % "lucene-core" % luceneVersion val luceneAnalyzersCommon = "org.apache.lucene" % "lucene-analyzers-common" % luceneVersion val luceneQueries = "org.apache.lucene" % "lucene-queries" % luceneVersion val akkaVersion = "2.3.9" val akkaActor = "com.typesafe.akka" %% "akka-actor" % akkaVersion val akkaSlf4j = "com.typesafe.akka" %% "akka-slf4j" % akkaVersion val akkaTestkit = "com.typesafe.akka" %% "akka-testkit" % akkaVersion % "test" val sprayVersion = "1.3.3" val sprayCan = "io.spray" %% "spray-can" % sprayVersion val sprayRouting = "io.spray" %% "spray-routing" % sprayVersion val upickleVersion = "0.3.8" val upickle = "com.lihaoyi" %% "upickle" % upickleVersion val autowireVersion = "0.2.5" val autowire = "com.lihaoyi" %% "autowire" % autowireVersion val scalatagsVersion = "0.5.1" val scalatags = "com.lihaoyi" %% "scalatags" % scalatagsVersion val utestVersion = "0.3.1" val utest = "com.lihaoyi" %% "utest" % utestVersion % "test" val dispatch = "net.databinder.dispatch" %% "dispatch-core" % "0.11.2" val sbtSlf4j = "com.github.eirslett" %% "sbt-slf4j" % "0.1" val nucleusDependencies = Seq( upickle, scalatest) val coreDependencies = Seq( upickle, scalatest, scalaz, config, logging, logback, luceneCore, luceneAnalyzersCommon, luceneQueries) val evaluationDependencies = Seq( scalatest, rng) val apiDependencies = Seq() val webserviceDependencies = Seq( scalaz, scalatest, akkaActor, akkaSlf4j, akkaTestkit, logback, sprayCan, sprayRouting, upickle, autowire, scalatags, sbtIo) val scalaClientDependencies = Seq( scalaCompiler, scalaz, scalatest, dispatch, autowire, upickle, config, logging, logback) val sbtPluginDependencies = Seq( dispatch, autowire, upickle, sbtSlf4j) }
scala-search/scaps
project/dependencies.scala
Scala
mpl-2.0
2,565
package org.sisioh.aws4s.s3.model import com.amazonaws.services.s3.model.BucketLifecycleConfiguration.NoncurrentVersionTransition import com.amazonaws.services.s3.model.StorageClass import org.sisioh.aws4s.PimpedType object NoncurrentVersionTransitionFactory { def create(): NoncurrentVersionTransition = new NoncurrentVersionTransition() } class RichNonCurrentVersionTransition(val underlying: NoncurrentVersionTransition) extends AnyVal with PimpedType[NoncurrentVersionTransition] { def days: Int = underlying.getDays def days_=(value: Int): Unit = underlying.setDays(value) // --- def storageClassOpt: Option[StorageClass] = Option(underlying.getStorageClass) def storageClassOpt_=(value: Option[StorageClass]): Unit = underlying.setStorageClass(value.orNull) def withStorageClassOpt(value: Option[StorageClass]): NoncurrentVersionTransition = underlying.withStorageClass(value.orNull) }
everpeace/aws4s
aws4s-s3/src/main/scala/org/sisioh/aws4s/s3/model/RichNonCurrentVersionTransition.scala
Scala
mit
934
package com.softwaremill.codebrag.domain import org.bson.types.ObjectId case class UserWatchedBranch(id: ObjectId, userId: ObjectId, repoName: String, branchName: String)
softwaremill/codebrag
codebrag-domain/src/main/scala/com/softwaremill/codebrag/domain/UserWatchedBranch.scala
Scala
agpl-3.0
173
package com.ing.bakery.smoke import cats.effect.{IO, Resource} import com.ing.bakery.smoke.k8s.{DefinitionFile, Pod} import io.circe.parser._ import org.scalatest.matchers.should.Matchers import webshop.webservice.OrderStatus import scala.concurrent.duration._ class BakeryWebshopTests extends BakerySmokeTests with Matchers { describe("The Bakery cluster") { test("runs a happy path flow") { context => for { _ <- eventually { for {recipes <- context.clientApp.listRecipeNames _ = recipes.length shouldBe 1 _ = recipes should contain("Webshop") } yield () } _ <- DefinitionFile("extra-recipe.yaml", context.namespace) _ <- within(3 minutes, 30) { for { recipes <- context.clientApp.listRecipeNames _ = recipes.length shouldBe 2 _ = recipes should contain("Webshop") _ = recipes should contain("ItemReservation.recipe") // 'ItemReservation.recipe' is the ID of this recipe } yield () } orderId <- context.clientApp.createCheckoutOrder(List("item1", "item2")) _ <- eventually(s"Order created: $orderId") { context.clientApp.pollOrderStatus(orderId) .map(status => status shouldBe OrderStatus.InfoPending(List("ShippingAddress", "PaymentInformation")).toString) } _ <- context.clientApp.addCheckoutAddressInfo(orderId, "address") _ <- printGreen(s"Address information added") _ <- eventually(s"Address processed") { context.clientApp.pollOrderStatus(orderId) .map(status => status shouldBe OrderStatus.InfoPending(List("PaymentInformation")).toString) } _ <- context.clientApp.addCheckoutPaymentInfo(orderId, "payment-info") _ <- printGreen(s"Payment information added") _ <- eventually(s"Payment received") { context.clientApp.pollOrderStatus(orderId) .map(status => status shouldBe OrderStatus.ProcessingPayment.toString) } _ <- eventually(s"Order completed") { context.clientApp.pollOrderStatus(orderId) .map(status => status shouldBe OrderStatus.Complete.toString) } events <- Pod.execOnNamed("kafka-event-sink", context.namespace, Some("kafkacat"))(s"kafkacat -b localhost:9092 -C -t events -o 0 -c ${ExpectedEvents.size}") _ = events .map(parse) .map(_.toOption.get.asObject.get.apply("name").get.asString.get).sorted shouldBe ExpectedEvents.sorted _ <- printGreen(s"Event streams contain all required events") } yield succeed } } val ExpectedEvents = List( "EventReceived", "EventReceived", "EventReceived", "InteractionCompleted", "InteractionCompleted", "InteractionCompleted", "InteractionStarted", "InteractionStarted", "InteractionStarted", "ItemsReserved", "OrderPlaced", "PaymentInformationReceived", "RecipeAdded", "RecipeAdded", "RecipeInstanceCreated", "ShippingAddressReceived", "ShippingConfirmed", "PaymentSuccessful" ) /** Creates a `Resource` which allocates and liberates the expensive resources each test can use. * For example web servers, network connection, database mocks. * * The objective of this function is to provide "sealed resources context" to each test, that means context * that other tests simply cannot touch. * * @param testArguments arguments built by the `argumentsBuilder` function. * @return the resources each test can use */ def contextBuilder(testArguments: TestArguments): Resource[IO, TestContext] = BakeryEnvironment.resource(testArguments, BakeryEnvironment.namespace) }
ing-bank/baker
bakery/integration-tests/src/test/scala/com/ing/bakery/smoke/BakeryWebshopTests.scala
Scala
mit
3,801
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql import java.io.File import java.nio.charset.StandardCharsets import java.sql.{Date, Timestamp} import java.util.UUID import scala.util.Random import org.scalatest.Matchers._ import org.apache.spark.SparkException import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.plans.logical.{Filter, OneRowRelation, Project, Union} import org.apache.spark.sql.catalyst.util.DateTimeUtils import org.apache.spark.sql.execution.{FilterExec, QueryExecution} import org.apache.spark.sql.execution.aggregate.HashAggregateExec import org.apache.spark.sql.execution.exchange.{BroadcastExchangeExec, ReusedExchangeExec, ShuffleExchange} import org.apache.spark.sql.functions._ import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.{ExamplePoint, ExamplePointUDT, SharedSQLContext} import org.apache.spark.sql.test.SQLTestData.TestData2 import org.apache.spark.sql.types._ import org.apache.spark.util.Utils class DataFrameSuite extends QueryTest with SharedSQLContext { import testImplicits._ test("analysis error should be eagerly reported") { intercept[Exception] { testData.select('nonExistentName) } intercept[Exception] { testData.groupBy('key).agg(Map("nonExistentName" -> "sum")) } intercept[Exception] { testData.groupBy("nonExistentName").agg(Map("key" -> "sum")) } intercept[Exception] { testData.groupBy($"abcd").agg(Map("key" -> "sum")) } } test("dataframe toString") { assert(testData.toString === "[key: int, value: string]") assert(testData("key").toString === "key") assert($"test".toString === "test") } test("rename nested groupby") { val df = Seq((1, (1, 1))).toDF() checkAnswer( df.groupBy("_1").agg(sum("_2._1")).toDF("key", "total"), Row(1, 1) :: Nil) } test("access complex data") { assert(complexData.filter(complexData("a").getItem(0) === 2).count() == 1) assert(complexData.filter(complexData("m").getItem("1") === 1).count() == 1) assert(complexData.filter(complexData("s").getField("key") === 1).count() == 1) } test("table scan") { checkAnswer( testData, testData.collect().toSeq) } test("union all") { val unionDF = testData.union(testData).union(testData) .union(testData).union(testData) // Before optimizer, Union should be combined. assert(unionDF.queryExecution.analyzed.collect { case j: Union if j.children.size == 5 => j }.size === 1) checkAnswer( unionDF.agg(avg('key), max('key), min('key), sum('key)), Row(50.5, 100, 1, 25250) :: Nil ) } test("union should union DataFrames with UDTs (SPARK-13410)") { val rowRDD1 = sparkContext.parallelize(Seq(Row(1, new ExamplePoint(1.0, 2.0)))) val schema1 = StructType(Array(StructField("label", IntegerType, false), StructField("point", new ExamplePointUDT(), false))) val rowRDD2 = sparkContext.parallelize(Seq(Row(2, new ExamplePoint(3.0, 4.0)))) val schema2 = StructType(Array(StructField("label", IntegerType, false), StructField("point", new ExamplePointUDT(), false))) val df1 = spark.createDataFrame(rowRDD1, schema1) val df2 = spark.createDataFrame(rowRDD2, schema2) checkAnswer( df1.union(df2).orderBy("label"), Seq(Row(1, new ExamplePoint(1.0, 2.0)), Row(2, new ExamplePoint(3.0, 4.0))) ) } test("empty data frame") { assert(spark.emptyDataFrame.columns.toSeq === Seq.empty[String]) assert(spark.emptyDataFrame.count() === 0) } test("head and take") { assert(testData.take(2) === testData.collect().take(2)) assert(testData.head(2) === testData.collect().take(2)) assert(testData.head(2).head.schema === testData.schema) } test("dataframe alias") { val df = Seq(Tuple1(1)).toDF("c").as("t") val dfAlias = df.alias("t2") df.col("t.c") dfAlias.col("t2.c") } test("simple explode") { val df = Seq(Tuple1("a b c"), Tuple1("d e")).toDF("words") checkAnswer( df.explode("words", "word") { word: String => word.split(" ").toSeq }.select('word), Row("a") :: Row("b") :: Row("c") :: Row("d") ::Row("e") :: Nil ) } test("explode") { val df = Seq((1, "a b c"), (2, "a b"), (3, "a")).toDF("number", "letters") val df2 = df.explode('letters) { case Row(letters: String) => letters.split(" ").map(Tuple1(_)).toSeq } checkAnswer( df2 .select('_1 as 'letter, 'number) .groupBy('letter) .agg(countDistinct('number)), Row("a", 3) :: Row("b", 2) :: Row("c", 1) :: Nil ) } test("Star Expansion - CreateStruct and CreateArray") { val structDf = testData2.select("a", "b").as("record") // CreateStruct and CreateArray in aggregateExpressions assert(structDf.groupBy($"a").agg(min(struct($"record.*"))).first() == Row(3, Row(3, 1))) assert(structDf.groupBy($"a").agg(min(array($"record.*"))).first() == Row(3, Seq(3, 1))) // CreateStruct and CreateArray in project list (unresolved alias) assert(structDf.select(struct($"record.*")).first() == Row(Row(1, 1))) assert(structDf.select(array($"record.*")).first().getAs[Seq[Int]](0) === Seq(1, 1)) // CreateStruct and CreateArray in project list (alias) assert(structDf.select(struct($"record.*").as("a")).first() == Row(Row(1, 1))) assert(structDf.select(array($"record.*").as("a")).first().getAs[Seq[Int]](0) === Seq(1, 1)) } test("Star Expansion - hash") { val structDf = testData2.select("a", "b").as("record") checkAnswer( structDf.groupBy($"a", $"b").agg(min(hash($"a", $"*"))), structDf.groupBy($"a", $"b").agg(min(hash($"a", $"a", $"b")))) checkAnswer( structDf.groupBy($"a", $"b").agg(hash($"a", $"*")), structDf.groupBy($"a", $"b").agg(hash($"a", $"a", $"b"))) checkAnswer( structDf.select(hash($"*")), structDf.select(hash($"record.*"))) checkAnswer( structDf.select(hash($"a", $"*")), structDf.select(hash($"a", $"record.*"))) } test("Star Expansion - explode should fail with a meaningful message if it takes a star") { val df = Seq(("1", "1,2"), ("2", "4"), ("3", "7,8,9")).toDF("prefix", "csv") val e = intercept[AnalysisException] { df.explode($"*") { case Row(prefix: String, csv: String) => csv.split(",").map(v => Tuple1(prefix + ":" + v)).toSeq }.queryExecution.assertAnalyzed() } assert(e.getMessage.contains("Invalid usage of '*' in explode/json_tuple/UDTF")) checkAnswer( df.explode('prefix, 'csv) { case Row(prefix: String, csv: String) => csv.split(",").map(v => Tuple1(prefix + ":" + v)).toSeq }, Row("1", "1,2", "1:1") :: Row("1", "1,2", "1:2") :: Row("2", "4", "2:4") :: Row("3", "7,8,9", "3:7") :: Row("3", "7,8,9", "3:8") :: Row("3", "7,8,9", "3:9") :: Nil) } test("Star Expansion - explode alias and star") { val df = Seq((Array("a"), 1)).toDF("a", "b") checkAnswer( df.select(explode($"a").as("a"), $"*"), Row("a", Seq("a"), 1) :: Nil) } test("sort after generate with join=true") { val df = Seq((Array("a"), 1)).toDF("a", "b") checkAnswer( df.select($"*", explode($"a").as("c")).sortWithinPartitions("b", "c"), Row(Seq("a"), 1, "a") :: Nil) } test("selectExpr") { checkAnswer( testData.selectExpr("abs(key)", "value"), testData.collect().map(row => Row(math.abs(row.getInt(0)), row.getString(1))).toSeq) } test("selectExpr with alias") { checkAnswer( testData.selectExpr("key as k").select("k"), testData.select("key").collect().toSeq) } test("selectExpr with udtf") { val df = Seq((Map("1" -> 1), 1)).toDF("a", "b") checkAnswer( df.selectExpr("explode(a)"), Row("1", 1) :: Nil) } test("filterExpr") { val res = testData.collect().filter(_.getInt(0) > 90).toSeq checkAnswer(testData.filter("key > 90"), res) checkAnswer(testData.filter("key > 9.0e1"), res) checkAnswer(testData.filter("key > .9e+2"), res) checkAnswer(testData.filter("key > 0.9e+2"), res) checkAnswer(testData.filter("key > 900e-1"), res) checkAnswer(testData.filter("key > 900.0E-1"), res) checkAnswer(testData.filter("key > 9.e+1"), res) } test("filterExpr using where") { checkAnswer( testData.where("key > 50"), testData.collect().filter(_.getInt(0) > 50).toSeq) } test("repartition") { intercept[IllegalArgumentException] { testData.select('key).repartition(0) } checkAnswer( testData.select('key).repartition(10).select('key), testData.select('key).collect().toSeq) } test("coalesce") { intercept[IllegalArgumentException] { testData.select('key).coalesce(0) } assert(testData.select('key).coalesce(1).rdd.partitions.size === 1) checkAnswer( testData.select('key).coalesce(1).select('key), testData.select('key).collect().toSeq) } test("convert $\\"attribute name\\" into unresolved attribute") { checkAnswer( testData.where($"key" === lit(1)).select($"value"), Row("1")) } test("convert Scala Symbol 'attrname into unresolved attribute") { checkAnswer( testData.where('key === lit(1)).select('value), Row("1")) } test("select *") { checkAnswer( testData.select($"*"), testData.collect().toSeq) } test("simple select") { checkAnswer( testData.where('key === lit(1)).select('value), Row("1")) } test("select with functions") { checkAnswer( testData.select(sum('value), avg('value), count(lit(1))), Row(5050.0, 50.5, 100)) checkAnswer( testData2.select('a + 'b, 'a < 'b), Seq( Row(2, false), Row(3, true), Row(3, false), Row(4, false), Row(4, false), Row(5, false))) checkAnswer( testData2.select(sumDistinct('a)), Row(6)) } test("sorting with null ordering") { val data = Seq[java.lang.Integer](2, 1, null).toDF("key") checkAnswer(data.orderBy('key.asc), Row(null) :: Row(1) :: Row(2) :: Nil) checkAnswer(data.orderBy(asc("key")), Row(null) :: Row(1) :: Row(2) :: Nil) checkAnswer(data.orderBy('key.asc_nulls_first), Row(null) :: Row(1) :: Row(2) :: Nil) checkAnswer(data.orderBy(asc_nulls_first("key")), Row(null) :: Row(1) :: Row(2) :: Nil) checkAnswer(data.orderBy('key.asc_nulls_last), Row(1) :: Row(2) :: Row(null) :: Nil) checkAnswer(data.orderBy(asc_nulls_last("key")), Row(1) :: Row(2) :: Row(null) :: Nil) checkAnswer(data.orderBy('key.desc), Row(2) :: Row(1) :: Row(null) :: Nil) checkAnswer(data.orderBy(desc("key")), Row(2) :: Row(1) :: Row(null) :: Nil) checkAnswer(data.orderBy('key.desc_nulls_first), Row(null) :: Row(2) :: Row(1) :: Nil) checkAnswer(data.orderBy(desc_nulls_first("key")), Row(null) :: Row(2) :: Row(1) :: Nil) checkAnswer(data.orderBy('key.desc_nulls_last), Row(2) :: Row(1) :: Row(null) :: Nil) checkAnswer(data.orderBy(desc_nulls_last("key")), Row(2) :: Row(1) :: Row(null) :: Nil) } test("global sorting") { checkAnswer( testData2.orderBy('a.asc, 'b.asc), Seq(Row(1, 1), Row(1, 2), Row(2, 1), Row(2, 2), Row(3, 1), Row(3, 2))) checkAnswer( testData2.orderBy(asc("a"), desc("b")), Seq(Row(1, 2), Row(1, 1), Row(2, 2), Row(2, 1), Row(3, 2), Row(3, 1))) checkAnswer( testData2.orderBy('a.asc, 'b.desc), Seq(Row(1, 2), Row(1, 1), Row(2, 2), Row(2, 1), Row(3, 2), Row(3, 1))) checkAnswer( testData2.orderBy('a.desc, 'b.desc), Seq(Row(3, 2), Row(3, 1), Row(2, 2), Row(2, 1), Row(1, 2), Row(1, 1))) checkAnswer( testData2.orderBy('a.desc, 'b.asc), Seq(Row(3, 1), Row(3, 2), Row(2, 1), Row(2, 2), Row(1, 1), Row(1, 2))) checkAnswer( arrayData.toDF().orderBy('data.getItem(0).asc), arrayData.toDF().collect().sortBy(_.getAs[Seq[Int]](0)(0)).toSeq) checkAnswer( arrayData.toDF().orderBy('data.getItem(0).desc), arrayData.toDF().collect().sortBy(_.getAs[Seq[Int]](0)(0)).reverse.toSeq) checkAnswer( arrayData.toDF().orderBy('data.getItem(1).asc), arrayData.toDF().collect().sortBy(_.getAs[Seq[Int]](0)(1)).toSeq) checkAnswer( arrayData.toDF().orderBy('data.getItem(1).desc), arrayData.toDF().collect().sortBy(_.getAs[Seq[Int]](0)(1)).reverse.toSeq) } test("limit") { checkAnswer( testData.limit(10), testData.take(10).toSeq) checkAnswer( arrayData.toDF().limit(1), arrayData.take(1).map(r => Row.fromSeq(r.productIterator.toSeq))) checkAnswer( mapData.toDF().limit(1), mapData.take(1).map(r => Row.fromSeq(r.productIterator.toSeq))) // SPARK-12340: overstep the bounds of Int in SparkPlan.executeTake checkAnswer( spark.range(2).toDF().limit(2147483638), Row(0) :: Row(1) :: Nil ) } test("except") { checkAnswer( lowerCaseData.except(upperCaseData), Row(1, "a") :: Row(2, "b") :: Row(3, "c") :: Row(4, "d") :: Nil) checkAnswer(lowerCaseData.except(lowerCaseData), Nil) checkAnswer(upperCaseData.except(upperCaseData), Nil) // check null equality checkAnswer( nullInts.except(nullInts.filter("0 = 1")), nullInts) checkAnswer( nullInts.except(nullInts), Nil) // check if values are de-duplicated checkAnswer( allNulls.except(allNulls.filter("0 = 1")), Row(null) :: Nil) checkAnswer( allNulls.except(allNulls), Nil) // check if values are de-duplicated val df = Seq(("id1", 1), ("id1", 1), ("id", 1), ("id1", 2)).toDF("id", "value") checkAnswer( df.except(df.filter("0 = 1")), Row("id1", 1) :: Row("id", 1) :: Row("id1", 2) :: Nil) // check if the empty set on the left side works checkAnswer( allNulls.filter("0 = 1").except(allNulls), Nil) } test("except distinct - SQL compliance") { val df_left = Seq(1, 2, 2, 3, 3, 4).toDF("id") val df_right = Seq(1, 3).toDF("id") checkAnswer( df_left.except(df_right), Row(2) :: Row(4) :: Nil ) } test("except - nullability") { val nonNullableInts = Seq(Tuple1(11), Tuple1(3)).toDF() assert(nonNullableInts.schema.forall(!_.nullable)) val df1 = nonNullableInts.except(nullInts) checkAnswer(df1, Row(11) :: Nil) assert(df1.schema.forall(!_.nullable)) val df2 = nullInts.except(nonNullableInts) checkAnswer(df2, Row(1) :: Row(2) :: Row(null) :: Nil) assert(df2.schema.forall(_.nullable)) val df3 = nullInts.except(nullInts) checkAnswer(df3, Nil) assert(df3.schema.forall(_.nullable)) val df4 = nonNullableInts.except(nonNullableInts) checkAnswer(df4, Nil) assert(df4.schema.forall(!_.nullable)) } test("intersect") { checkAnswer( lowerCaseData.intersect(lowerCaseData), Row(1, "a") :: Row(2, "b") :: Row(3, "c") :: Row(4, "d") :: Nil) checkAnswer(lowerCaseData.intersect(upperCaseData), Nil) // check null equality checkAnswer( nullInts.intersect(nullInts), Row(1) :: Row(2) :: Row(3) :: Row(null) :: Nil) // check if values are de-duplicated checkAnswer( allNulls.intersect(allNulls), Row(null) :: Nil) // check if values are de-duplicated val df = Seq(("id1", 1), ("id1", 1), ("id", 1), ("id1", 2)).toDF("id", "value") checkAnswer( df.intersect(df), Row("id1", 1) :: Row("id", 1) :: Row("id1", 2) :: Nil) } test("intersect - nullability") { val nonNullableInts = Seq(Tuple1(1), Tuple1(3)).toDF() assert(nonNullableInts.schema.forall(!_.nullable)) val df1 = nonNullableInts.intersect(nullInts) checkAnswer(df1, Row(1) :: Row(3) :: Nil) assert(df1.schema.forall(!_.nullable)) val df2 = nullInts.intersect(nonNullableInts) checkAnswer(df2, Row(1) :: Row(3) :: Nil) assert(df2.schema.forall(!_.nullable)) val df3 = nullInts.intersect(nullInts) checkAnswer(df3, Row(1) :: Row(2) :: Row(3) :: Row(null) :: Nil) assert(df3.schema.forall(_.nullable)) val df4 = nonNullableInts.intersect(nonNullableInts) checkAnswer(df4, Row(1) :: Row(3) :: Nil) assert(df4.schema.forall(!_.nullable)) } test("udf") { val foo = udf((a: Int, b: String) => a.toString + b) checkAnswer( // SELECT *, foo(key, value) FROM testData testData.select($"*", foo('key, 'value)).limit(3), Row(1, "1", "11") :: Row(2, "2", "22") :: Row(3, "3", "33") :: Nil ) } test("callUDF without Hive Support") { val df = Seq(("id1", 1), ("id2", 4), ("id3", 5)).toDF("id", "value") df.sparkSession.udf.register("simpleUDF", (v: Int) => v * v) checkAnswer( df.select($"id", callUDF("simpleUDF", $"value")), Row("id1", 1) :: Row("id2", 16) :: Row("id3", 25) :: Nil) } test("withColumn") { val df = testData.toDF().withColumn("newCol", col("key") + 1) checkAnswer( df, testData.collect().map { case Row(key: Int, value: String) => Row(key, value, key + 1) }.toSeq) assert(df.schema.map(_.name) === Seq("key", "value", "newCol")) } test("replace column using withColumn") { val df2 = sparkContext.parallelize(Array(1, 2, 3)).toDF("x") val df3 = df2.withColumn("x", df2("x") + 1) checkAnswer( df3.select("x"), Row(2) :: Row(3) :: Row(4) :: Nil) } test("drop column using drop") { val df = testData.drop("key") checkAnswer( df, testData.collect().map(x => Row(x.getString(1))).toSeq) assert(df.schema.map(_.name) === Seq("value")) } test("drop columns using drop") { val src = Seq((0, 2, 3)).toDF("a", "b", "c") val df = src.drop("a", "b") checkAnswer(df, Row(3)) assert(df.schema.map(_.name) === Seq("c")) } test("drop unknown column (no-op)") { val df = testData.drop("random") checkAnswer( df, testData.collect().toSeq) assert(df.schema.map(_.name) === Seq("key", "value")) } test("drop column using drop with column reference") { val col = testData("key") val df = testData.drop(col) checkAnswer( df, testData.collect().map(x => Row(x.getString(1))).toSeq) assert(df.schema.map(_.name) === Seq("value")) } test("drop unknown column (no-op) with column reference") { val col = Column("random") val df = testData.drop(col) checkAnswer( df, testData.collect().toSeq) assert(df.schema.map(_.name) === Seq("key", "value")) } test("drop unknown column with same name with column reference") { val col = Column("key") val df = testData.drop(col) checkAnswer( df, testData.collect().map(x => Row(x.getString(1))).toSeq) assert(df.schema.map(_.name) === Seq("value")) } test("drop column after join with duplicate columns using column reference") { val newSalary = salary.withColumnRenamed("personId", "id") val col = newSalary("id") // this join will result in duplicate "id" columns val joinedDf = person.join(newSalary, person("id") === newSalary("id"), "inner") // remove only the "id" column that was associated with newSalary val df = joinedDf.drop(col) checkAnswer( df, joinedDf.collect().map { case Row(id: Int, name: String, age: Int, idToDrop: Int, salary: Double) => Row(id, name, age, salary) }.toSeq) assert(df.schema.map(_.name) === Seq("id", "name", "age", "salary")) assert(df("id") == person("id")) } test("drop top level columns that contains dot") { val df1 = Seq((1, 2)).toDF("a.b", "a.c") checkAnswer(df1.drop("a.b"), Row(2)) // Creates data set: {"a.b": 1, "a": {"b": 3}} val df2 = Seq((1)).toDF("a.b").withColumn("a", struct(lit(3) as "b")) // Not like select(), drop() parses the column name "a.b" literally without interpreting "." checkAnswer(df2.drop("a.b").select("a.b"), Row(3)) // "`" is treated as a normal char here with no interpreting, "`a`b" is a valid column name. assert(df2.drop("`a.b`").columns.size == 2) } test("drop(name: String) search and drop all top level columns that matchs the name") { val df1 = Seq((1, 2)).toDF("a", "b") val df2 = Seq((3, 4)).toDF("a", "b") checkAnswer(df1.crossJoin(df2), Row(1, 2, 3, 4)) // Finds and drops all columns that match the name (case insensitive). checkAnswer(df1.crossJoin(df2).drop("A"), Row(2, 4)) } test("withColumnRenamed") { val df = testData.toDF().withColumn("newCol", col("key") + 1) .withColumnRenamed("value", "valueRenamed") checkAnswer( df, testData.collect().map { case Row(key: Int, value: String) => Row(key, value, key + 1) }.toSeq) assert(df.schema.map(_.name) === Seq("key", "valueRenamed", "newCol")) } test("describe") { val describeTestData = Seq( ("Bob", 16, 176), ("Alice", 32, 164), ("David", 60, 192), ("Amy", 24, 180)).toDF("name", "age", "height") val describeResult = Seq( Row("count", "4", "4", "4"), Row("mean", null, "33.0", "178.0"), Row("stddev", null, "19.148542155126762", "11.547005383792516"), Row("min", "Alice", "16", "164"), Row("max", "David", "60", "192")) val emptyDescribeResult = Seq( Row("count", "0", "0", "0"), Row("mean", null, null, null), Row("stddev", null, null, null), Row("min", null, null, null), Row("max", null, null, null)) def getSchemaAsSeq(df: DataFrame): Seq[String] = df.schema.map(_.name) val describeTwoCols = describeTestData.describe("name", "age", "height") assert(getSchemaAsSeq(describeTwoCols) === Seq("summary", "name", "age", "height")) checkAnswer(describeTwoCols, describeResult) // All aggregate value should have been cast to string describeTwoCols.collect().foreach { row => assert(row.get(2).isInstanceOf[String], "expected string but found " + row.get(2).getClass) assert(row.get(3).isInstanceOf[String], "expected string but found " + row.get(3).getClass) } val describeAllCols = describeTestData.describe() assert(getSchemaAsSeq(describeAllCols) === Seq("summary", "name", "age", "height")) checkAnswer(describeAllCols, describeResult) val describeOneCol = describeTestData.describe("age") assert(getSchemaAsSeq(describeOneCol) === Seq("summary", "age")) checkAnswer(describeOneCol, describeResult.map { case Row(s, _, d, _) => Row(s, d)} ) val describeNoCol = describeTestData.select("name").describe() assert(getSchemaAsSeq(describeNoCol) === Seq("summary", "name")) checkAnswer(describeNoCol, describeResult.map { case Row(s, n, _, _) => Row(s, n)} ) val emptyDescription = describeTestData.limit(0).describe() assert(getSchemaAsSeq(emptyDescription) === Seq("summary", "name", "age", "height")) checkAnswer(emptyDescription, emptyDescribeResult) } test("apply on query results (SPARK-5462)") { val df = testData.sparkSession.sql("select key from testData") checkAnswer(df.select(df("key")), testData.select('key).collect().toSeq) } test("inputFiles") { withTempDir { dir => val df = Seq((1, 22)).toDF("a", "b") val parquetDir = new File(dir, "parquet").getCanonicalPath df.write.parquet(parquetDir) val parquetDF = spark.read.parquet(parquetDir) assert(parquetDF.inputFiles.nonEmpty) val jsonDir = new File(dir, "json").getCanonicalPath df.write.json(jsonDir) val jsonDF = spark.read.json(jsonDir) assert(parquetDF.inputFiles.nonEmpty) val unioned = jsonDF.union(parquetDF).inputFiles.sorted val allFiles = (jsonDF.inputFiles ++ parquetDF.inputFiles).distinct.sorted assert(unioned === allFiles) } } ignore("show") { // This test case is intended ignored, but to make sure it compiles correctly testData.select($"*").show() testData.select($"*").show(1000) } test("showString: truncate = [0, 20]") { val longString = Array.fill(21)("1").mkString val df = sparkContext.parallelize(Seq("1", longString)).toDF() val expectedAnswerForFalse = """+---------------------+ ||value | |+---------------------+ ||1 | ||111111111111111111111| |+---------------------+ |""".stripMargin assert(df.showString(10, truncate = 0) === expectedAnswerForFalse) val expectedAnswerForTrue = """+--------------------+ || value| |+--------------------+ || 1| ||11111111111111111...| |+--------------------+ |""".stripMargin assert(df.showString(10, truncate = 20) === expectedAnswerForTrue) } test("showString: truncate = [3, 17]") { val longString = Array.fill(21)("1").mkString val df = sparkContext.parallelize(Seq("1", longString)).toDF() val expectedAnswerForFalse = """+-----+ ||value| |+-----+ || 1| || 111| |+-----+ |""".stripMargin assert(df.showString(10, truncate = 3) === expectedAnswerForFalse) val expectedAnswerForTrue = """+-----------------+ || value| |+-----------------+ || 1| ||11111111111111...| |+-----------------+ |""".stripMargin assert(df.showString(10, truncate = 17) === expectedAnswerForTrue) } test("showString(negative)") { val expectedAnswer = """+---+-----+ ||key|value| |+---+-----+ |+---+-----+ |only showing top 0 rows |""".stripMargin assert(testData.select($"*").showString(-1) === expectedAnswer) } test("showString(0)") { val expectedAnswer = """+---+-----+ ||key|value| |+---+-----+ |+---+-----+ |only showing top 0 rows |""".stripMargin assert(testData.select($"*").showString(0) === expectedAnswer) } test("showString: array") { val df = Seq( (Array(1, 2, 3), Array(1, 2, 3)), (Array(2, 3, 4), Array(2, 3, 4)) ).toDF() val expectedAnswer = """+---------+---------+ || _1| _2| |+---------+---------+ ||[1, 2, 3]|[1, 2, 3]| ||[2, 3, 4]|[2, 3, 4]| |+---------+---------+ |""".stripMargin assert(df.showString(10) === expectedAnswer) } test("showString: binary") { val df = Seq( ("12".getBytes(StandardCharsets.UTF_8), "ABC.".getBytes(StandardCharsets.UTF_8)), ("34".getBytes(StandardCharsets.UTF_8), "12346".getBytes(StandardCharsets.UTF_8)) ).toDF() val expectedAnswer = """+-------+----------------+ || _1| _2| |+-------+----------------+ ||[31 32]| [41 42 43 2E]| ||[33 34]|[31 32 33 34 36]| |+-------+----------------+ |""".stripMargin assert(df.showString(10) === expectedAnswer) } test("showString: minimum column width") { val df = Seq( (1, 1), (2, 2) ).toDF() val expectedAnswer = """+---+---+ || _1| _2| |+---+---+ || 1| 1| || 2| 2| |+---+---+ |""".stripMargin assert(df.showString(10) === expectedAnswer) } test("SPARK-7319 showString") { val expectedAnswer = """+---+-----+ ||key|value| |+---+-----+ || 1| 1| |+---+-----+ |only showing top 1 row |""".stripMargin assert(testData.select($"*").showString(1) === expectedAnswer) } test("SPARK-7327 show with empty dataFrame") { val expectedAnswer = """+---+-----+ ||key|value| |+---+-----+ |+---+-----+ |""".stripMargin assert(testData.select($"*").filter($"key" < 0).showString(1) === expectedAnswer) } test("SPARK-18350 show with session local timezone") { val d = Date.valueOf("2016-12-01") val ts = Timestamp.valueOf("2016-12-01 00:00:00") val df = Seq((d, ts)).toDF("d", "ts") val expectedAnswer = """+----------+-------------------+ ||d |ts | |+----------+-------------------+ ||2016-12-01|2016-12-01 00:00:00| |+----------+-------------------+ |""".stripMargin assert(df.showString(1, truncate = 0) === expectedAnswer) withSQLConf(SQLConf.SESSION_LOCAL_TIMEZONE.key -> "GMT") { val expectedAnswer = """+----------+-------------------+ ||d |ts | |+----------+-------------------+ ||2016-12-01|2016-12-01 08:00:00| |+----------+-------------------+ |""".stripMargin assert(df.showString(1, truncate = 0) === expectedAnswer) } } test("createDataFrame(RDD[Row], StructType) should convert UDTs (SPARK-6672)") { val rowRDD = sparkContext.parallelize(Seq(Row(new ExamplePoint(1.0, 2.0)))) val schema = StructType(Array(StructField("point", new ExamplePointUDT(), false))) val df = spark.createDataFrame(rowRDD, schema) df.rdd.collect() } test("SPARK-6899: type should match when using codegen") { checkAnswer(decimalData.agg(avg('a)), Row(new java.math.BigDecimal(2.0))) } test("SPARK-7133: Implement struct, array, and map field accessor") { assert(complexData.filter(complexData("a")(0) === 2).count() == 1) assert(complexData.filter(complexData("m")("1") === 1).count() == 1) assert(complexData.filter(complexData("s")("key") === 1).count() == 1) assert(complexData.filter(complexData("m")(complexData("s")("value")) === 1).count() == 1) assert(complexData.filter(complexData("a")(complexData("s")("key")) === 1).count() == 1) } test("SPARK-7551: support backticks for DataFrame attribute resolution") { val df = spark.read.json(Seq("""{"a.b": {"c": {"d..e": {"f": 1}}}}""").toDS()) checkAnswer( df.select(df("`a.b`.c.`d..e`.`f`")), Row(1) ) val df2 = spark.read.json(Seq("""{"a b": {"c": {"d e": {"f": 1}}}}""").toDS()) checkAnswer( df2.select(df2("`a b`.c.d e.f")), Row(1) ) def checkError(testFun: => Unit): Unit = { val e = intercept[org.apache.spark.sql.AnalysisException] { testFun } assert(e.getMessage.contains("syntax error in attribute name:")) } checkError(df("`abc.`c`")) checkError(df("`abc`..d")) checkError(df("`a`.b.")) checkError(df("`a.b`.c.`d")) } test("SPARK-7324 dropDuplicates") { val testData = sparkContext.parallelize( (2, 1, 2) :: (1, 1, 1) :: (1, 2, 1) :: (2, 1, 2) :: (2, 2, 2) :: (2, 2, 1) :: (2, 1, 1) :: (1, 1, 2) :: (1, 2, 2) :: (1, 2, 1) :: Nil).toDF("key", "value1", "value2") checkAnswer( testData.dropDuplicates(), Seq(Row(2, 1, 2), Row(1, 1, 1), Row(1, 2, 1), Row(2, 2, 2), Row(2, 1, 1), Row(2, 2, 1), Row(1, 1, 2), Row(1, 2, 2))) checkAnswer( testData.dropDuplicates(Seq("key", "value1")), Seq(Row(2, 1, 2), Row(1, 2, 1), Row(1, 1, 1), Row(2, 2, 2))) checkAnswer( testData.dropDuplicates(Seq("value1", "value2")), Seq(Row(2, 1, 2), Row(1, 2, 1), Row(1, 1, 1), Row(2, 2, 2))) checkAnswer( testData.dropDuplicates(Seq("key")), Seq(Row(2, 1, 2), Row(1, 1, 1))) checkAnswer( testData.dropDuplicates(Seq("value1")), Seq(Row(2, 1, 2), Row(1, 2, 1))) checkAnswer( testData.dropDuplicates(Seq("value2")), Seq(Row(2, 1, 2), Row(1, 1, 1))) checkAnswer( testData.dropDuplicates("key", "value1"), Seq(Row(2, 1, 2), Row(1, 2, 1), Row(1, 1, 1), Row(2, 2, 2))) } test("SPARK-8621: support empty string column name") { val df = Seq(Tuple1(1)).toDF("").as("t") // We should allow empty string as column name df.col("") df.col("t.``") } test("SPARK-8797: sort by float column containing NaN should not crash") { val inputData = Seq.fill(10)(Tuple1(Float.NaN)) ++ (1 to 1000).map(x => Tuple1(x.toFloat)) val df = Random.shuffle(inputData).toDF("a") df.orderBy("a").collect() } test("SPARK-8797: sort by double column containing NaN should not crash") { val inputData = Seq.fill(10)(Tuple1(Double.NaN)) ++ (1 to 1000).map(x => Tuple1(x.toDouble)) val df = Random.shuffle(inputData).toDF("a") df.orderBy("a").collect() } test("NaN is greater than all other non-NaN numeric values") { val maxDouble = Seq(Double.NaN, Double.PositiveInfinity, Double.MaxValue) .map(Tuple1.apply).toDF("a").selectExpr("max(a)").first() assert(java.lang.Double.isNaN(maxDouble.getDouble(0))) val maxFloat = Seq(Float.NaN, Float.PositiveInfinity, Float.MaxValue) .map(Tuple1.apply).toDF("a").selectExpr("max(a)").first() assert(java.lang.Float.isNaN(maxFloat.getFloat(0))) } test("SPARK-8072: Better Exception for Duplicate Columns") { // only one duplicate column present val e = intercept[org.apache.spark.sql.AnalysisException] { Seq((1, 2, 3), (2, 3, 4), (3, 4, 5)).toDF("column1", "column2", "column1") .write.format("parquet").save("temp") } assert(e.getMessage.contains("Duplicate column(s)")) assert(e.getMessage.contains("column1")) assert(!e.getMessage.contains("column2")) // multiple duplicate columns present val f = intercept[org.apache.spark.sql.AnalysisException] { Seq((1, 2, 3, 4, 5), (2, 3, 4, 5, 6), (3, 4, 5, 6, 7)) .toDF("column1", "column2", "column3", "column1", "column3") .write.format("json").save("temp") } assert(f.getMessage.contains("Duplicate column(s)")) assert(f.getMessage.contains("column1")) assert(f.getMessage.contains("column3")) assert(!f.getMessage.contains("column2")) } test("SPARK-6941: Better error message for inserting into RDD-based Table") { withTempDir { dir => val tempParquetFile = new File(dir, "tmp_parquet") val tempJsonFile = new File(dir, "tmp_json") val df = Seq(Tuple1(1)).toDF() val insertion = Seq(Tuple1(2)).toDF("col") // pass case: parquet table (HadoopFsRelation) df.write.mode(SaveMode.Overwrite).parquet(tempParquetFile.getCanonicalPath) val pdf = spark.read.parquet(tempParquetFile.getCanonicalPath) pdf.createOrReplaceTempView("parquet_base") insertion.write.insertInto("parquet_base") // pass case: json table (InsertableRelation) df.write.mode(SaveMode.Overwrite).json(tempJsonFile.getCanonicalPath) val jdf = spark.read.json(tempJsonFile.getCanonicalPath) jdf.createOrReplaceTempView("json_base") insertion.write.mode(SaveMode.Overwrite).insertInto("json_base") // error cases: insert into an RDD df.createOrReplaceTempView("rdd_base") val e1 = intercept[AnalysisException] { insertion.write.insertInto("rdd_base") } assert(e1.getMessage.contains("Inserting into an RDD-based table is not allowed.")) // error case: insert into a logical plan that is not a LeafNode val indirectDS = pdf.select("_1").filter($"_1" > 5) indirectDS.createOrReplaceTempView("indirect_ds") val e2 = intercept[AnalysisException] { insertion.write.insertInto("indirect_ds") } assert(e2.getMessage.contains("Inserting into an RDD-based table is not allowed.")) // error case: insert into an OneRowRelation Dataset.ofRows(spark, OneRowRelation).createOrReplaceTempView("one_row") val e3 = intercept[AnalysisException] { insertion.write.insertInto("one_row") } assert(e3.getMessage.contains("Inserting into an RDD-based table is not allowed.")) } } test("SPARK-8608: call `show` on local DataFrame with random columns should return same value") { val df = testData.select(rand(33)) assert(df.showString(5) == df.showString(5)) // We will reuse the same Expression object for LocalRelation. val df1 = (1 to 10).map(Tuple1.apply).toDF().select(rand(33)) assert(df1.showString(5) == df1.showString(5)) } test("SPARK-8609: local DataFrame with random columns should return same value after sort") { checkAnswer(testData.sort(rand(33)), testData.sort(rand(33))) // We will reuse the same Expression object for LocalRelation. val df = (1 to 10).map(Tuple1.apply).toDF() checkAnswer(df.sort(rand(33)), df.sort(rand(33))) } test("SPARK-9083: sort with non-deterministic expressions") { import org.apache.spark.util.random.XORShiftRandom val seed = 33 val df = (1 to 100).map(Tuple1.apply).toDF("i") val random = new XORShiftRandom(seed) val expected = (1 to 100).map(_ -> random.nextDouble()).sortBy(_._2).map(_._1) val actual = df.sort(rand(seed)).collect().map(_.getInt(0)) assert(expected === actual) } test("Sorting columns are not in Filter and Project") { checkAnswer( upperCaseData.filter('N > 1).select('N).filter('N < 6).orderBy('L.asc), Row(2) :: Row(3) :: Row(4) :: Row(5) :: Nil) } test("SPARK-9323: DataFrame.orderBy should support nested column name") { val df = spark.read.json(Seq("""{"a": {"b": 1}}""").toDS()) checkAnswer(df.orderBy("a.b"), Row(Row(1))) } test("SPARK-9950: correctly analyze grouping/aggregating on struct fields") { val df = Seq(("x", (1, 1)), ("y", (2, 2))).toDF("a", "b") checkAnswer(df.groupBy("b._1").agg(sum("b._2")), Row(1, 1) :: Row(2, 2) :: Nil) } test("SPARK-10093: Avoid transformations on executors") { val df = Seq((1, 1)).toDF("a", "b") df.where($"a" === 1) .select($"a", $"b", struct($"b")) .orderBy("a") .select(struct($"b")) .collect() } test("SPARK-10185: Read multiple Hadoop Filesystem paths and paths with a comma in it") { withTempDir { dir => val df1 = Seq((1, 22)).toDF("a", "b") val dir1 = new File(dir, "dir,1").getCanonicalPath df1.write.format("json").save(dir1) val df2 = Seq((2, 23)).toDF("a", "b") val dir2 = new File(dir, "dir2").getCanonicalPath df2.write.format("json").save(dir2) checkAnswer(spark.read.format("json").load(dir1, dir2), Row(1, 22) :: Row(2, 23) :: Nil) checkAnswer(spark.read.format("json").load(dir1), Row(1, 22) :: Nil) } } test("Alias uses internally generated names 'aggOrder' and 'havingCondition'") { val df = Seq(1 -> 2).toDF("i", "j") val query1 = df.groupBy('i) .agg(max('j).as("aggOrder")) .orderBy(sum('j)) checkAnswer(query1, Row(1, 2)) // In the plan, there are two attributes having the same name 'havingCondition' // One is a user-provided alias name; another is an internally generated one. val query2 = df.groupBy('i) .agg(max('j).as("havingCondition")) .where(sum('j) > 0) .orderBy('havingCondition.asc) checkAnswer(query2, Row(1, 2)) } test("SPARK-10316: respect non-deterministic expressions in PhysicalOperation") { val input = spark.read.json((1 to 10).map(i => s"""{"id": $i}""").toDS()) val df = input.select($"id", rand(0).as('r)) df.as("a").join(df.filter($"r" < 0.5).as("b"), $"a.id" === $"b.id").collect().foreach { row => assert(row.getDouble(1) - row.getDouble(3) === 0.0 +- 0.001) } } test("SPARK-10539: Project should not be pushed down through Intersect or Except") { val df1 = (1 to 100).map(Tuple1.apply).toDF("i") val df2 = (1 to 30).map(Tuple1.apply).toDF("i") val intersect = df1.intersect(df2) val except = df1.except(df2) assert(intersect.count() === 30) assert(except.count() === 70) } test("SPARK-10740: handle nondeterministic expressions correctly for set operations") { val df1 = (1 to 20).map(Tuple1.apply).toDF("i") val df2 = (1 to 10).map(Tuple1.apply).toDF("i") // When generating expected results at here, we need to follow the implementation of // Rand expression. def expected(df: DataFrame): Seq[Row] = { df.rdd.collectPartitions().zipWithIndex.flatMap { case (data, index) => val rng = new org.apache.spark.util.random.XORShiftRandom(7 + index) data.filter(_.getInt(0) < rng.nextDouble() * 10) } } val union = df1.union(df2) checkAnswer( union.filter('i < rand(7) * 10), expected(union) ) checkAnswer( union.select(rand(7)), union.rdd.collectPartitions().zipWithIndex.flatMap { case (data, index) => val rng = new org.apache.spark.util.random.XORShiftRandom(7 + index) data.map(_ => rng.nextDouble()).map(i => Row(i)) } ) val intersect = df1.intersect(df2) checkAnswer( intersect.filter('i < rand(7) * 10), expected(intersect) ) val except = df1.except(df2) checkAnswer( except.filter('i < rand(7) * 10), expected(except) ) } test("SPARK-10743: keep the name of expression if possible when do cast") { val df = (1 to 10).map(Tuple1.apply).toDF("i").as("src") assert(df.select($"src.i".cast(StringType)).columns.head === "i") assert(df.select($"src.i".cast(StringType).cast(IntegerType)).columns.head === "i") } test("SPARK-11301: fix case sensitivity for filter on partitioned columns") { withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") { withTempPath { path => Seq(2012 -> "a").toDF("year", "val").write.partitionBy("year").parquet(path.getAbsolutePath) val df = spark.read.parquet(path.getAbsolutePath) checkAnswer(df.filter($"yEAr" > 2000).select($"val"), Row("a")) } } } /** * Verifies that there is no Exchange between the Aggregations for `df` */ private def verifyNonExchangingAgg(df: DataFrame) = { var atFirstAgg: Boolean = false df.queryExecution.executedPlan.foreach { case agg: HashAggregateExec => atFirstAgg = !atFirstAgg case _ => if (atFirstAgg) { fail("Should not have operators between the two aggregations") } } } /** * Verifies that there is an Exchange between the Aggregations for `df` */ private def verifyExchangingAgg(df: DataFrame) = { var atFirstAgg: Boolean = false df.queryExecution.executedPlan.foreach { case agg: HashAggregateExec => if (atFirstAgg) { fail("Should not have back to back Aggregates") } atFirstAgg = true case e: ShuffleExchange => atFirstAgg = false case _ => } } test("distributeBy and localSort") { val original = testData.repartition(1) assert(original.rdd.partitions.length == 1) val df = original.repartition(5, $"key") assert(df.rdd.partitions.length == 5) checkAnswer(original.select(), df.select()) val df2 = original.repartition(10, $"key") assert(df2.rdd.partitions.length == 10) checkAnswer(original.select(), df2.select()) // Group by the column we are distributed by. This should generate a plan with no exchange // between the aggregates val df3 = testData.repartition($"key").groupBy("key").count() verifyNonExchangingAgg(df3) verifyNonExchangingAgg(testData.repartition($"key", $"value") .groupBy("key", "value").count()) // Grouping by just the first distributeBy expr, need to exchange. verifyExchangingAgg(testData.repartition($"key", $"value") .groupBy("key").count()) val data = spark.sparkContext.parallelize( (1 to 100).map(i => TestData2(i % 10, i))).toDF() // Distribute and order by. val df4 = data.repartition($"a").sortWithinPartitions($"b".desc) // Walk each partition and verify that it is sorted descending and does not contain all // the values. df4.rdd.foreachPartition { p => // Skip empty partition if (p.hasNext) { var previousValue: Int = -1 var allSequential: Boolean = true p.foreach { r => val v: Int = r.getInt(1) if (previousValue != -1) { if (previousValue < v) throw new SparkException("Partition is not ordered.") if (v + 1 != previousValue) allSequential = false } previousValue = v } if (allSequential) throw new SparkException("Partition should not be globally ordered") } } // Distribute and order by with multiple order bys val df5 = data.repartition(2, $"a").sortWithinPartitions($"b".asc, $"a".asc) // Walk each partition and verify that it is sorted ascending df5.rdd.foreachPartition { p => var previousValue: Int = -1 var allSequential: Boolean = true p.foreach { r => val v: Int = r.getInt(1) if (previousValue != -1) { if (previousValue > v) throw new SparkException("Partition is not ordered.") if (v - 1 != previousValue) allSequential = false } previousValue = v } if (allSequential) throw new SparkException("Partition should not be all sequential") } // Distribute into one partition and order by. This partition should contain all the values. val df6 = data.repartition(1, $"a").sortWithinPartitions("b") // Walk each partition and verify that it is sorted ascending and not globally sorted. df6.rdd.foreachPartition { p => var previousValue: Int = -1 var allSequential: Boolean = true p.foreach { r => val v: Int = r.getInt(1) if (previousValue != -1) { if (previousValue > v) throw new SparkException("Partition is not ordered.") if (v - 1 != previousValue) allSequential = false } previousValue = v } if (!allSequential) throw new SparkException("Partition should contain all sequential values") } } test("fix case sensitivity of partition by") { withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") { withTempPath { path => val p = path.getAbsolutePath Seq(2012 -> "a").toDF("year", "val").write.partitionBy("yEAr").parquet(p) checkAnswer(spark.read.parquet(p).select("YeaR"), Row(2012)) } } } // This test case is to verify a bug when making a new instance of LogicalRDD. test("SPARK-11633: LogicalRDD throws TreeNode Exception: Failed to Copy Node") { withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") { val rdd = sparkContext.makeRDD(Seq(Row(1, 3), Row(2, 1))) val df = spark.createDataFrame( rdd, new StructType().add("f1", IntegerType).add("f2", IntegerType), needsConversion = false).select($"F1", $"f2".as("f2")) val df1 = df.as("a") val df2 = df.as("b") checkAnswer(df1.join(df2, $"a.f2" === $"b.f2"), Row(1, 3, 1, 3) :: Row(2, 1, 2, 1) :: Nil) } } test("SPARK-10656: completely support special chars") { val df = Seq(1 -> "a").toDF("i_$.a", "d^'a.") checkAnswer(df.select(df("*")), Row(1, "a")) checkAnswer(df.withColumnRenamed("d^'a.", "a"), Row(1, "a")) } test("SPARK-11725: correctly handle null inputs for ScalaUDF") { val df = sparkContext.parallelize(Seq( new java.lang.Integer(22) -> "John", null.asInstanceOf[java.lang.Integer] -> "Lucy")).toDF("age", "name") // passing null into the UDF that could handle it val boxedUDF = udf[java.lang.Integer, java.lang.Integer] { (i: java.lang.Integer) => if (i == null) -10 else null } checkAnswer(df.select(boxedUDF($"age")), Row(null) :: Row(-10) :: Nil) spark.udf.register("boxedUDF", (i: java.lang.Integer) => (if (i == null) -10 else null): java.lang.Integer) checkAnswer(sql("select boxedUDF(null), boxedUDF(-1)"), Row(-10, null) :: Nil) val primitiveUDF = udf((i: Int) => i * 2) checkAnswer(df.select(primitiveUDF($"age")), Row(44) :: Row(null) :: Nil) } test("SPARK-12398 truncated toString") { val df1 = Seq((1L, "row1")).toDF("id", "name") assert(df1.toString() === "[id: bigint, name: string]") val df2 = Seq((1L, "c2", false)).toDF("c1", "c2", "c3") assert(df2.toString === "[c1: bigint, c2: string ... 1 more field]") val df3 = Seq((1L, "c2", false, 10)).toDF("c1", "c2", "c3", "c4") assert(df3.toString === "[c1: bigint, c2: string ... 2 more fields]") val df4 = Seq((1L, Tuple2(1L, "val"))).toDF("c1", "c2") assert(df4.toString === "[c1: bigint, c2: struct<_1: bigint, _2: string>]") val df5 = Seq((1L, Tuple2(1L, "val"), 20.0)).toDF("c1", "c2", "c3") assert(df5.toString === "[c1: bigint, c2: struct<_1: bigint, _2: string> ... 1 more field]") val df6 = Seq((1L, Tuple2(1L, "val"), 20.0, 1)).toDF("c1", "c2", "c3", "c4") assert(df6.toString === "[c1: bigint, c2: struct<_1: bigint, _2: string> ... 2 more fields]") val df7 = Seq((1L, Tuple3(1L, "val", 2), 20.0, 1)).toDF("c1", "c2", "c3", "c4") assert( df7.toString === "[c1: bigint, c2: struct<_1: bigint, _2: string ... 1 more field> ... 2 more fields]") val df8 = Seq((1L, Tuple7(1L, "val", 2, 3, 4, 5, 6), 20.0, 1)).toDF("c1", "c2", "c3", "c4") assert( df8.toString === "[c1: bigint, c2: struct<_1: bigint, _2: string ... 5 more fields> ... 2 more fields]") val df9 = Seq((1L, Tuple4(1L, Tuple4(1L, 2L, 3L, 4L), 2L, 3L), 20.0, 1)).toDF("c1", "c2", "c3", "c4") assert( df9.toString === "[c1: bigint, c2: struct<_1: bigint," + " _2: struct<_1: bigint," + " _2: bigint ... 2 more fields> ... 2 more fields> ... 2 more fields]") } test("reuse exchange") { withSQLConf("spark.sql.autoBroadcastJoinThreshold" -> "2") { val df = spark.range(100).toDF() val join = df.join(df, "id") val plan = join.queryExecution.executedPlan checkAnswer(join, df) assert( join.queryExecution.executedPlan.collect { case e: ShuffleExchange => true }.size === 1) assert( join.queryExecution.executedPlan.collect { case e: ReusedExchangeExec => true }.size === 1) val broadcasted = broadcast(join) val join2 = join.join(broadcasted, "id").join(broadcasted, "id") checkAnswer(join2, df) assert( join2.queryExecution.executedPlan.collect { case e: ShuffleExchange => true }.size === 1) assert( join2.queryExecution.executedPlan .collect { case e: BroadcastExchangeExec => true }.size === 1) assert( join2.queryExecution.executedPlan.collect { case e: ReusedExchangeExec => true }.size === 4) } } test("sameResult() on aggregate") { val df = spark.range(100) val agg1 = df.groupBy().count() val agg2 = df.groupBy().count() // two aggregates with different ExprId within them should have same result assert(agg1.queryExecution.executedPlan.sameResult(agg2.queryExecution.executedPlan)) val agg3 = df.groupBy().sum() assert(!agg1.queryExecution.executedPlan.sameResult(agg3.queryExecution.executedPlan)) val df2 = spark.range(101) val agg4 = df2.groupBy().count() assert(!agg1.queryExecution.executedPlan.sameResult(agg4.queryExecution.executedPlan)) } test("SPARK-12512: support `.` in column name for withColumn()") { val df = Seq("a" -> "b").toDF("col.a", "col.b") checkAnswer(df.select(df("*")), Row("a", "b")) checkAnswer(df.withColumn("col.a", lit("c")), Row("c", "b")) checkAnswer(df.withColumn("col.c", lit("c")), Row("a", "b", "c")) } test("SPARK-12841: cast in filter") { checkAnswer( Seq(1 -> "a").toDF("i", "j").filter($"i".cast(StringType) === "1"), Row(1, "a")) } test("SPARK-12982: Add table name validation in temp table registration") { val df = Seq("foo", "bar").map(Tuple1.apply).toDF("col") // invalid table names Seq("11111", "t~", "#$@sum", "table!#").foreach { name => val m = intercept[AnalysisException](df.createOrReplaceTempView(name)).getMessage assert(m.contains(s"Invalid view name: $name")) } // valid table names Seq("table1", "`11111`", "`t~`", "`#$@sum`", "`table!#`").foreach { name => df.createOrReplaceTempView(name) } } test("assertAnalyzed shouldn't replace original stack trace") { val e = intercept[AnalysisException] { spark.range(1).select('id as 'a, 'id as 'b).groupBy('a).agg('b) } assert(e.getStackTrace.head.getClassName != classOf[QueryExecution].getName) } test("SPARK-13774: Check error message for non existent path without globbed paths") { val uuid = UUID.randomUUID().toString val baseDir = Utils.createTempDir() try { val e = intercept[AnalysisException] { spark.read.format("csv").load( new File(baseDir, "file").getAbsolutePath, new File(baseDir, "file2").getAbsolutePath, new File(uuid, "file3").getAbsolutePath, uuid).rdd } assert(e.getMessage.startsWith("Path does not exist")) } finally { } } test("SPARK-13774: Check error message for not existent globbed paths") { // Non-existent initial path component: val nonExistentBasePath = "/" + UUID.randomUUID().toString assert(!new File(nonExistentBasePath).exists()) val e = intercept[AnalysisException] { spark.read.format("text").load(s"$nonExistentBasePath/*") } assert(e.getMessage.startsWith("Path does not exist")) // Existent initial path component, but no matching files: val baseDir = Utils.createTempDir() val childDir = Utils.createTempDir(baseDir.getAbsolutePath) assert(childDir.exists()) try { val e1 = intercept[AnalysisException] { spark.read.json(s"${baseDir.getAbsolutePath}/*/*-xyz.json").rdd } assert(e1.getMessage.startsWith("Path does not exist")) } finally { Utils.deleteRecursively(baseDir) } } test("SPARK-15230: distinct() does not handle column name with dot properly") { val df = Seq(1, 1, 2).toDF("column.with.dot") checkAnswer(df.distinct(), Row(1) :: Row(2) :: Nil) } test("SPARK-16181: outer join with isNull filter") { val left = Seq("x").toDF("col") val right = Seq("y").toDF("col").withColumn("new", lit(true)) val joined = left.join(right, left("col") === right("col"), "left_outer") checkAnswer(joined, Row("x", null, null)) checkAnswer(joined.filter($"new".isNull), Row("x", null, null)) } test("SPARK-16664: persist with more than 200 columns") { val size = 201L val rdd = sparkContext.makeRDD(Seq(Row.fromSeq(Seq.range(0, size)))) val schemas = List.range(0, size).map(a => StructField("name" + a, LongType, true)) val df = spark.createDataFrame(rdd, StructType(schemas), false) assert(df.persist.take(1).apply(0).toSeq(100).asInstanceOf[Long] == 100) } test("SPARK-17409: Do Not Optimize Query in CTAS (Data source tables) More Than Once") { withTable("bar") { withTempView("foo") { withSQLConf(SQLConf.DEFAULT_DATA_SOURCE_NAME.key -> "json") { sql("select 0 as id").createOrReplaceTempView("foo") val df = sql("select * from foo group by id") // If we optimize the query in CTAS more than once, the following saveAsTable will fail // with the error: `GROUP BY position 0 is not in select list (valid range is [1, 1])` df.write.mode("overwrite").saveAsTable("bar") checkAnswer(spark.table("bar"), Row(0) :: Nil) val tableMetadata = spark.sessionState.catalog.getTableMetadata(TableIdentifier("bar")) assert(tableMetadata.provider == Some("json"), "the expected table is a data source table using json") } } } } test("copy results for sampling with replacement") { val df = Seq((1, 0), (2, 0), (3, 0)).toDF("a", "b") val sampleDf = df.sample(true, 2.00) val d = sampleDf.withColumn("c", monotonically_increasing_id).select($"c").collect assert(d.size == d.distinct.size) } private def verifyNullabilityInFilterExec( df: DataFrame, expr: String, expectedNonNullableColumns: Seq[String]): Unit = { val dfWithFilter = df.where(s"isnotnull($expr)").selectExpr(expr) // In the logical plan, all the output columns of input dataframe are nullable dfWithFilter.queryExecution.optimizedPlan.collect { case e: Filter => assert(e.output.forall(_.nullable)) } dfWithFilter.queryExecution.executedPlan.collect { // When the child expression in isnotnull is null-intolerant (i.e. any null input will // result in null output), the involved columns are converted to not nullable; // otherwise, no change should be made. case e: FilterExec => assert(e.output.forall { o => if (expectedNonNullableColumns.contains(o.name)) !o.nullable else o.nullable }) } } test("SPARK-17957: no change on nullability in FilterExec output") { val df = sparkContext.parallelize(Seq( null.asInstanceOf[java.lang.Integer] -> new java.lang.Integer(3), new java.lang.Integer(1) -> null.asInstanceOf[java.lang.Integer], new java.lang.Integer(2) -> new java.lang.Integer(4))).toDF() verifyNullabilityInFilterExec(df, expr = "Rand()", expectedNonNullableColumns = Seq.empty[String]) verifyNullabilityInFilterExec(df, expr = "coalesce(_1, _2)", expectedNonNullableColumns = Seq.empty[String]) verifyNullabilityInFilterExec(df, expr = "coalesce(_1, 0) + Rand()", expectedNonNullableColumns = Seq.empty[String]) verifyNullabilityInFilterExec(df, expr = "cast(coalesce(cast(coalesce(_1, _2) as double), 0.0) as int)", expectedNonNullableColumns = Seq.empty[String]) } test("SPARK-17957: set nullability to false in FilterExec output") { val df = sparkContext.parallelize(Seq( null.asInstanceOf[java.lang.Integer] -> new java.lang.Integer(3), new java.lang.Integer(1) -> null.asInstanceOf[java.lang.Integer], new java.lang.Integer(2) -> new java.lang.Integer(4))).toDF() verifyNullabilityInFilterExec(df, expr = "_1 + _2 * 3", expectedNonNullableColumns = Seq("_1", "_2")) verifyNullabilityInFilterExec(df, expr = "_1 + _2", expectedNonNullableColumns = Seq("_1", "_2")) verifyNullabilityInFilterExec(df, expr = "_1", expectedNonNullableColumns = Seq("_1")) // `constructIsNotNullConstraints` infers the IsNotNull(_2) from IsNotNull(_2 + Rand()) // Thus, we are able to set nullability of _2 to false. // If IsNotNull(_2) is not given from `constructIsNotNullConstraints`, the impl of // isNullIntolerant in `FilterExec` needs an update for more advanced inference. verifyNullabilityInFilterExec(df, expr = "_2 + Rand()", expectedNonNullableColumns = Seq("_2")) verifyNullabilityInFilterExec(df, expr = "_2 * 3 + coalesce(_1, 0)", expectedNonNullableColumns = Seq("_2")) verifyNullabilityInFilterExec(df, expr = "cast((_1 + _2) as boolean)", expectedNonNullableColumns = Seq("_1", "_2")) } test("SPARK-17897: Fixed IsNotNull Constraint Inference Rule") { val data = Seq[java.lang.Integer](1, null).toDF("key") checkAnswer(data.filter(!$"key".isNotNull), Row(null)) checkAnswer(data.filter(!(- $"key").isNotNull), Row(null)) } test("SPARK-17957: outer join + na.fill") { val df1 = Seq((1, 2), (2, 3)).toDF("a", "b") val df2 = Seq((2, 5), (3, 4)).toDF("a", "c") val joinedDf = df1.join(df2, Seq("a"), "outer").na.fill(0) val df3 = Seq((3, 1)).toDF("a", "d") checkAnswer(joinedDf.join(df3, "a"), Row(3, 0, 4, 1)) } test("SPARK-17123: Performing set operations that combine non-scala native types") { val dates = Seq( (new Date(0), BigDecimal.valueOf(1), new Timestamp(2)), (new Date(3), BigDecimal.valueOf(4), new Timestamp(5)) ).toDF("date", "timestamp", "decimal") val widenTypedRows = Seq( (new Timestamp(2), 10.5D, "string") ).toDF("date", "timestamp", "decimal") dates.union(widenTypedRows).collect() dates.except(widenTypedRows).collect() dates.intersect(widenTypedRows).collect() } test("SPARK-18070 binary operator should not consider nullability when comparing input types") { val rows = Seq(Row(Seq(1), Seq(1))) val schema = new StructType() .add("array1", ArrayType(IntegerType)) .add("array2", ArrayType(IntegerType, containsNull = false)) val df = spark.createDataFrame(spark.sparkContext.makeRDD(rows), schema) assert(df.filter($"array1" === $"array2").count() == 1) } test("SPARK-17913: compare long and string type column may return confusing result") { val df = Seq(123L -> "123", 19157170390056973L -> "19157170390056971").toDF("i", "j") checkAnswer(df.select($"i" === $"j"), Row(true) :: Row(false) :: Nil) } test("SPARK-19691 Calculating percentile of decimal column fails with ClassCastException") { val df = spark.range(1).selectExpr("CAST(id as DECIMAL) as x").selectExpr("percentile(x, 0.5)") checkAnswer(df, Row(BigDecimal(0.0)) :: Nil) } test("SPARK-19893: cannot run set operations with map type") { val df = spark.range(1).select(map(lit("key"), $"id").as("m")) val e = intercept[AnalysisException](df.intersect(df)) assert(e.message.contains( "Cannot have map type columns in DataFrame which calls set operations")) val e2 = intercept[AnalysisException](df.except(df)) assert(e2.message.contains( "Cannot have map type columns in DataFrame which calls set operations")) val e3 = intercept[AnalysisException](df.distinct()) assert(e3.message.contains( "Cannot have map type columns in DataFrame which calls set operations")) withTempView("v") { df.createOrReplaceTempView("v") val e4 = intercept[AnalysisException](sql("SELECT DISTINCT m FROM v")) assert(e4.message.contains( "Cannot have map type columns in DataFrame which calls set operations")) } } }
jianran/spark
sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
Scala
apache-2.0
64,252
package io.vamp.gateway_driver import akka.actor.Actor import akka.pattern.ask import akka.util.Timeout import io.vamp.common.akka._ import io.vamp.common.notification.{ ErrorNotification, Notification } import io.vamp.common.vitals.InfoRequest import io.vamp.gateway_driver.notification.{ GatewayDriverNotificationProvider, GatewayDriverResponseError, UnsupportedGatewayDriverRequest } import io.vamp.model.artifact._ import io.vamp.model.event.Event import io.vamp.persistence.{ KeyValueStoreActor, PersistenceMarshaller } import io.vamp.pulse.PulseActor import io.vamp.pulse.PulseActor.Publish import io.vamp.pulse.notification.PulseFailureNotifier import scala.concurrent.Future import scala.util.Try object GatewayDriverActor { val rootPath: List[String] = Gateway.kind :: Nil sealed trait GatewayDriverMessage object Pull extends GatewayDriverMessage case class Push(gateways: List[Gateway]) extends GatewayDriverMessage case class GetTemplate(`type`: String, name: String) extends GatewayDriverMessage case class SetTemplate(`type`: String, name: String, template: String) extends GatewayDriverMessage case class ResetTemplate(`type`: String, name: String) extends GatewayDriverMessage case class GetConfiguration(`type`: String, name: String) extends GatewayDriverMessage } case class GatewayMarshallerDefinition(marshaller: GatewayMarshaller, template: String) class GatewayDriverActor(marshallers: Map[String, GatewayMarshallerDefinition]) extends PersistenceMarshaller with PulseFailureNotifier with CommonSupportForActors with GatewayDriverNotificationProvider { import GatewayDriverActor._ lazy implicit val timeout: Timeout = KeyValueStoreActor.timeout() def receive: Actor.Receive = { case InfoRequest ⇒ reply(info) case r: GetTemplate ⇒ reply(getTemplate(r.`type`, r.name)) case r: SetTemplate ⇒ reply(setTemplate(r.`type`, r.name, r.template)) case r: ResetTemplate ⇒ reply(resetTemplate(r.`type`, r.name)) case r: GetConfiguration ⇒ reply(getConfiguration(r.`type`, r.name)) case Pull ⇒ reply(pull()) case Push(gateways) ⇒ push(gateways) case _: Event ⇒ case other ⇒ unsupported(UnsupportedGatewayDriverRequest(other)) } override def errorNotificationClass: Class[_ <: ErrorNotification] = classOf[GatewayDriverResponseError] override def failure(failure: Any, `class`: Class[_ <: Notification] = errorNotificationClass): Exception = super[PulseFailureNotifier].failure(failure, `class`) private def info = Future.successful { Map("marshallers" → marshallers.map { case (name, definition) ⇒ name → definition.marshaller.info }) } private def get(path: List[String]): Future[Option[String]] = { IoC.actorFor[KeyValueStoreActor] ? KeyValueStoreActor.Get(path) map { case Some(content: String) ⇒ Option(content) case _ ⇒ None } } private def set(path: List[String], value: String): Future[_] = { IoC.actorFor[KeyValueStoreActor] ? KeyValueStoreActor.Get(path) flatMap { case Some(content: String) if value == content ⇒ Future.successful(None) case _ ⇒ IoC.actorFor[KeyValueStoreActor] ? KeyValueStoreActor.Set(path, Option(value)) } } private def reset(path: List[String]): Future[_] = { IoC.actorFor[KeyValueStoreActor] ? KeyValueStoreActor.Get(path) flatMap { case Some(c_) ⇒ IoC.actorFor[KeyValueStoreActor] ? KeyValueStoreActor.Set(path, None) case _ ⇒ Future.successful(None) } } private def pull(): Future[List[Gateway]] = { IoC.actorFor[KeyValueStoreActor] ? KeyValueStoreActor.Get(rootPath) map { case Some(content: String) ⇒ Try(unmarshall[Gateway](content)).getOrElse(Nil) case _ ⇒ Nil } } private def push(gateways: List[Gateway]): Unit = { set(rootPath, marshall(gateways)) marshallers.foreach { case (name, definition) ⇒ getTemplate(definition.marshaller.`type`, name).map { content ⇒ set(configurationPath(definition.marshaller.`type`, name), definition.marshaller.marshall(gateways, content)).map { case None | false ⇒ case stored ⇒ IoC.actorFor[PulseActor] ! Publish(Event(Event.defaultVersion, tags(definition.marshaller.`type`, name, "configuration"), None), publishEventValue = false) stored } } } } private def getConfiguration(`type`: String, name: String): Future[String] = { get(configurationPath(`type`, name)).map(_.getOrElse("")) } private def getTemplate(`type`: String, name: String): Future[String] = { get(templatePath(`type`, name)).map { case Some(content) if content.trim.nonEmpty ⇒ content case _ ⇒ marshallers.get(name).map(_.template).getOrElse("") } } private def setTemplate(`type`: String, name: String, template: String) = { set(templatePath(`type`, name), template).map { case None | false ⇒ case stored ⇒ IoC.actorFor[PulseActor] ! Publish(Event(Event.defaultVersion, tags(`type`, name, "template"), None), publishEventValue = false) stored } } private def resetTemplate(`type`: String, name: String) = { reset(templatePath(`type`, name)).map { case None | false ⇒ case result ⇒ IoC.actorFor[PulseActor] ! Publish(Event(Event.defaultVersion, tags(`type`, name, "template"), None), publishEventValue = false) result } } private def tags(`type`: String, name: String, action: String) = Set(s"marshaller:$action", s"type:${`type`}", s"name:$name") private def templatePath(kind: String, name: String) = rootPath :+ kind :+ name :+ "template" private def configurationPath(kind: String, name: String) = rootPath :+ kind :+ name :+ "configuration" }
magneticio/vamp
gateway_driver/src/main/scala/io/vamp/gateway_driver/GatewayDriverActor.scala
Scala
apache-2.0
5,968
/* * (c) Copyright 2016 Hewlett Packard Enterprise Development LP * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //package cogdebugger.ui.fieldvisualizations.vector // //import libcog._ //import cogx.platform.cpumemory.VectorFieldMemory //import scala.swing._ // ///** // * A simple visual test case for the OpenGL accelerated ColorFlow vector // * visualization. // * // * Initially, the display is updated with a "key" field (vectors radiate out // * from center of field, increasing in length towards the edges) to verify // * that the color coding is what we expect. There are also a couple of buttons // * to update the visualization with random data - notable is the "Churn" button, // * which will first generate a large number of fields in memory, and then // * display them, one after the other and as fast as possible, to test how // * quickly the view can animate. // * // * Created by gonztobi on 2/24/14. // */ //object TestColorFlowGL extends SimpleSwingApplication { // val (rows, cols) = (480, 640) // val (midRow, midCol) = (rows / 2, cols / 2) // val MaxLength = midRow min midCol // val Normalize = 1f / MaxLength // val field = VectorFieldMemory(rows, cols, (row, col) => { // val r = (row - midRow) * Normalize // val c = (col - midCol) * Normalize // new Vector(r, c) // }) // //// val field = ColorFlowVectorPanelGL.keyField //// val (rows, cols) = (field.rows, field.columns) // // val viewer = new ColorFlowGL(null, field.fieldShape, field.tensorShape) // viewer.update(null, field, 0L) // // lazy val bunchOfFields = { // println("Pre-generating a bunch of random fields...") // for (i <- 0 until 100) // yield VectorFieldMemory(rows, cols, (_, _) => Vector.random(2) * 2 - 1) // } // // lazy val top = new MainFrame() { // title = "Test ColorFlowGL" // // val model = new javax.swing.SpinnerNumberModel(1, 0, 25, 1) // val jSpinner = new javax.swing.JSpinner(model) // jSpinner.addChangeListener(new javax.swing.event.ChangeListener { // def stateChanged(e: javax.swing.event.ChangeEvent) { // viewer.maxVectorLength = model.getValue.asInstanceOf[Int] // } // }) // val spinner = Component.wrap(jSpinner) // // val resetButton = Button("Reset") { viewer.update(null, field, 0L) } // // /** Update the viewer with a random vector field. */ // val updateButton = Button("Random") { // val mem = VectorFieldMemory(rows, cols, (_, _) => Vector.random(2) * 2 - 1) // viewer.update(null, mem, 0L) // } // // /** Used to test the speed at which the visualization can redraw. This is // * done by pre-generating a bunch of random fields, and then drawing them // * one after the other as quickly as possible.*/ // val churnButton = Button("Churn") { // bunchOfFields // Initialize the lazy val // println("Execute!") // val t0 = java.lang.System.currentTimeMillis() // for (field <- bunchOfFields) viewer.update(null, field, 0L) // val t1 = java.lang.System.currentTimeMillis() // println("Rendered "+bunchOfFields.length+" fields in "+(t1 - t0)+" ms " + // "("+bunchOfFields.length * 1000f / (t1 - t0)+" fps)") // } // // val buttonPanel = new BoxPanel(Orientation.Horizontal) // buttonPanel.contents ++= Seq(resetButton, // updateButton, // churnButton, // Swing.HStrut(10), // new Label("Saturation Threshold:"), // Swing.HStrut(5), // spinner, // Swing.HGlue) // // val panel = new BorderPanel() // panel.layout(viewer) = BorderPanel.Position.Center // panel.layout(buttonPanel) = BorderPanel.Position.South // // contents = panel // } //}
hpe-cct/cct-core
src/test/scala/cogdebugger/ui/fieldvisualizations/vector/TestColorFlowGL.scala
Scala
apache-2.0
4,370
package org.lanyard.util import org.lanyard.random.KISS import org.scalatest.FunSpec import org.scalatest.Matchers class SampleTest extends FunSpec with Matchers { describe("The sample function") { it("returns the correct values if a weights is set to one.") { val rng = KISS(42) val ls = List(1,3,5,6) val weights1 = List(1.0, 0.0, 0.0, 0.0) val (draw1, nextRNG1) = Sample( ls, weights1, rng ) draw1 should be (1) val weights2 = List(0.0, 1.0, 0.0, 0.0) val (draw2, netRNG2) = Sample( ls, weights2, rng ) draw2 should be (3) val weights3 = List(0.0, 0.0, 1.0, 0.0) val (draw3, netRNG3) = Sample( ls, weights3, rng ) draw3 should be (5) val weights4 = List(0.0, 0.0, 0.0, 1.0) val (draw4, netRNG4) = Sample( ls, weights4, rng ) draw4 should be (6) } it("can sample from an array") { val rng = KISS( 42 ) // first draw is ~0.93 println( "case1") val probabilities1 = Array(1.0, 0.0, 0.0) val (draw1, nextRNG1) = Sample.fromArray( probabilities1, rng ) draw1 should be( 0 ) println( "case2") val probabilities2 = Array(0.9, 0.05) val (draw2, nextRNG2) = Sample.fromArray( probabilities2, rng ) draw2 should be( 1 ) println( "case3") val probabilities3 = Array(0.45, 0.05, 0.5) val (draw3, nextRNG3) = Sample.fromArray( probabilities3, nextRNG1 ) draw3 should be( 2 ) } } }
perian/Lanyard
src/test/scala/org/lanyard/util/SampleTest.scala
Scala
gpl-2.0
1,471