code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/**
* Copyright 2013-2015 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.paypal.cascade.common.tests.enumeration
import com.paypal.cascade.common.enumeration._
import org.specs2.Specification
import scala.util.Try
import com.paypal.cascade.common.tests.util.CommonImmutableSpecificationContext
/**
* Tests for [[com.paypal.cascade.common.enumeration.Enumeration]]
*/
class EnumerationSpecs extends Specification { override def is = s2"""
Tests our own type-safe Enumeration framework
LowerEnumReader:
readEnum should return Some if the string is a valid enum ${LowerReadEnum().returnsSome}
readEnum should return None if the string is an invalid enum ${LowerReadEnum().returnsNone}
toEnum should return the enum value if the string is a valid enum ${LowerToEnum().returns}
toEnum should throw if the string is an invalid enum value ${LowerToEnum().throws}
UpperEnumReader:
readEnum should return Some if the string is a valid enum ${UpperReadEnum().returnsSome}
readEnum should return None if the string is an invalid enum ${UpperReadEnum().returnsNone}
toEnum should return the enum value if the string is a valid enum ${UpperToEnum().returns}
toEnum should throw if the string is an invalid enum value ${UpperToEnum().throws}
"""
class LowerContext extends CommonImmutableSpecificationContext {
sealed abstract class MyLowerEnum extends Enumeration
object MyLowerEnum1 extends MyLowerEnum {
override lazy val stringVal = "myenum1"
}
object MyLowerEnum2 extends MyLowerEnum {
override lazy val stringVal = "myenum2"
}
implicit val reader = lowerEnumReader(MyLowerEnum1, MyLowerEnum2)
}
class UpperContext extends CommonImmutableSpecificationContext {
sealed abstract class MyUpperEnum extends Enumeration
object MyUpperEnum1 extends MyUpperEnum {
override lazy val stringVal = "MYENUM1"
}
object MyUpperEnum2 extends MyUpperEnum {
override lazy val stringVal = "MYENUM2"
}
implicit val reader = upperEnumReader(MyUpperEnum1, MyUpperEnum2)
}
case class LowerReadEnum() extends LowerContext {
def returnsSome = apply {
MyLowerEnum1.stringVal.readEnum[MyLowerEnum] must beSome.like {
case e => e must beEqualTo(MyLowerEnum1)
}
}
def returnsNone = apply {
s"${MyLowerEnum1.stringVal}-INVALID".readEnum[MyLowerEnum] must beNone
}
}
case class LowerToEnum() extends LowerContext {
def returns = apply {
MyLowerEnum1.stringVal.toEnum[MyLowerEnum] must beEqualTo(MyLowerEnum1)
}
def throws = apply {
Try(s"${MyLowerEnum1.stringVal}-INVALID".toEnum[MyLowerEnum]).toOption must beNone
}
}
case class UpperReadEnum() extends UpperContext {
def returnsSome = apply {
MyUpperEnum1.stringVal.readEnum[MyUpperEnum] must beSome.like {
case e => e must beEqualTo(MyUpperEnum1)
}
}
def returnsNone = apply {
s"${MyUpperEnum1.stringVal}-INVALID".readEnum[MyUpperEnum] must beNone
}
}
case class UpperToEnum() extends UpperContext {
def returns = apply {
MyUpperEnum1.stringVal.toEnum[MyUpperEnum] must beEqualTo(MyUpperEnum1)
}
def throws = apply {
Try(s"${MyUpperEnum1.stringVal}-INVALID".toEnum[MyUpperEnum]).toOption must beNone
}
}
}
| 2rs2ts/cascade | common/src/test/scala/com/paypal/cascade/common/tests/enumeration/EnumerationSpecs.scala | Scala | apache-2.0 | 3,884 |
package drt.client.components
import drt.client.logger.{Logger, LoggerFactory}
import japgolly.scalajs.react.{Children, JsComponent}
import scala.scalajs.js
import scala.scalajs.js.annotation.JSImport
import scala.util.{Failure, Success, Try}
object HotTable {
val log: Logger = LoggerFactory.getLogger("HotTable")
@JSImport("@handsontable/react", JSImport.Default)
@js.native
object HotTableComponent extends js.Object
@js.native
trait Props extends js.Object {
var settings: js.Dictionary[js.Any] = js.native
}
def props(data: Seq[Seq[Any]],
colHeadings: Seq[String],
rowHeadings: Seq[String],
changeCallback: (Int, Int, Int) => Unit,
colWidths: String = "2em"
): Props = {
import js.JSConverters._
val props = (new js.Object).asInstanceOf[Props]
val afterChange = (changes: js.Array[js.Array[Any]], _: String) => {
val maybeArray = Option(changes)
maybeArray.foreach(
c => {
c.toList.foreach(change =>
(change(0), change(1), change(3)) match {
case (row: Int, col: Int, value: String) =>
Try(Integer.parseInt(value)) match {
case Success(v) =>
changeCallback(row, col, v)
case Failure(f) =>
log.warn(s"Couldn't parse $value to an Integer $f")
}
case (row: Int, col: Int, value: Int) =>
changeCallback(row, col, value)
case other =>
log.error(s"couldn't match $other")
}
)
})
if (maybeArray.isEmpty) {
log.info(s"Called change function with no values")
}
}
props.settings = js.Dictionary(
"data" -> data.map(_.toJSArray).toJSArray,
"rowHeaders" -> rowHeadings.toJSArray,
"colHeaders" -> colHeadings.toJSArray,
"afterChange" -> afterChange,
"colWidth" -> colWidths
)
props
}
val component = JsComponent[Props, Children.None, Null](HotTableComponent)
}
| UKHomeOffice/drt-scalajs-spa-exploration | client/src/main/scala/drt/client/components/HotTable.scala | Scala | apache-2.0 | 2,080 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
/**
* All doc-comments marked as "MDN" are by Mozilla Contributors,
* distributed under the Creative Commons Attribution-ShareAlike license from
* https://developer.mozilla.org/en-US/docs/Web/Reference/API
*/
package scala.scalajs.js
import scala.scalajs.js
import scala.scalajs.js.annotation._
/**
* Creates a JavaScript Date instance that represents a single moment in time.
* Date objects are based on a time value that is the number of milliseconds
* since 1 January, 1970 UTC.
*
* MDN
*
* @note
* `js.Date` objects can represent an *invalid date*, for example, if they
* are constructed from a `String` that cannot be parsed as a date. Most
* methods of such a `js.Date` will return `NaN` (for those returning a
* `Double`) or other invalid values.
*/
@js.native
@JSGlobal
class Date extends js.Object {
def this(value: Double) = this()
def this(value: String) = this()
def this(year: Int, month: Int, date: Int = 1, hours: Int = 0,
minutes: Int = 0, seconds: Int = 0, ms: Int = 0) = this()
def toDateString(): String = js.native
def toTimeString(): String = js.native
def toLocaleDateString(): String = js.native
def toLocaleTimeString(): String = js.native
override def valueOf(): Double = js.native
/**
* Returns the numeric value of the specified date as the number of
* milliseconds since January 1, 1970, 00:00:00 UTC.
* (Negative values are returned for prior times).
*
* MDN
*/
def getTime(): Double = js.native
/**
* Returns the year (4 digits for 4-digit years) of the specified date according to local time.
*
* MDN
*/
def getFullYear(): Double = js.native
/**
* Returns the year (4 digits for 4-digit years) in the specified date according to universal time.
*
* MDN
*/
def getUTCFullYear(): Double = js.native
/**
* Returns the month (0-11) in the specified date according to local time.
*
* MDN
*/
def getMonth(): Double = js.native
/**
* Returns the month (0-11) in the specified date according to universal time.
*
* MDN
*/
def getUTCMonth(): Double = js.native
/**
* Returns the day of the month (1-31) for the specified date according to local time.
*
* MDN
*/
def getDate(): Double = js.native
/**
* Returns the day (date) of the month (1-31) in the specified date according to universal time.
*
* MDN
*/
def getUTCDate(): Double = js.native
/**
* Returns the day of the week (0-6) for the specified date according to local time.
*
* MDN
*/
def getDay(): Double = js.native
/**
* Returns the day of the week (0-6) in the specified date according to universal time.
* MDN
*/
def getUTCDay(): Double = js.native
/**
* Returns the hour (0-23) in the specified date according to local time.
*
* MDN
*/
def getHours(): Double = js.native
/**
* Returns the hours (0-23) in the specified date according to universal time.
*
* MDN
*/
def getUTCHours(): Double = js.native
/**
* Returns the minutes (0-59) in the specified date according to local time.
*
* MDN
*/
def getMinutes(): Double = js.native
/**
* Returns the minutes (0-59) in the specified date according to universal time.
*
* MDN
*/
def getUTCMinutes(): Double = js.native
/**
* Returns the seconds (0-59) in the specified date according to local time.
*
* MDN
*/
def getSeconds(): Double = js.native
/**
* Returns the seconds (0-59) in the specified date according to universal time.
*
* MDN
*/
def getUTCSeconds(): Double = js.native
/**
* Returns the milliseconds (0-999) in the specified date according to local time.
*
* MDN
*/
def getMilliseconds(): Double = js.native
/**
* Returns the milliseconds (0-999) in the specified date according to universal time.
*
* MDN
*/
def getUTCMilliseconds(): Double = js.native
/**
* Returns the time-zone offset in minutes for the current locale.
*
* MDN
*/
def getTimezoneOffset(): Double = js.native
def setTime(time: Double): Unit = js.native
def setMilliseconds(ms: Double): Unit = js.native
def setUTCMilliseconds(ms: Double): Unit = js.native
def setSeconds(sec: Double, ms: Double = getMilliseconds()): Unit = js.native
def setUTCSeconds(sec: Double,
ms: Double = getMilliseconds()): Unit = js.native
def setMinutes(min: Double, sec: Double = getSeconds(),
ms: Double = getMilliseconds()): Unit = js.native
def setUTCMinutes(min: Double, sec: Double = getSeconds(),
ms: Double = getMilliseconds()): Unit = js.native
def setHours(hours: Double, min: Double = getMinutes(),
sec: Double = getSeconds(),
ms: Double = getMilliseconds()): Unit = js.native
def setUTCHours(hours: Double, min: Double = getMinutes(),
sec: Double = getSeconds(),
ms: Double = getMilliseconds()): Unit = js.native
def setDate(date: Double): Unit = js.native
def setUTCDate(date: Double): Unit = js.native
def setMonth(month: Double, date: Double = getDate()): Unit = js.native
def setUTCMonth(month: Double, date: Double = getDate()): Unit = js.native
def setFullYear(year: Double, month: Double = getMonth(),
date: Double = getDate()): Unit = js.native
def setUTCFullYear(year: Double, month: Double = getMonth(),
date: Double = getDate()): Unit = js.native
def toUTCString(): String = js.native
def toISOString(): String = js.native
def toJSON(key: Any): String = js.native
def toJSON(): String = js.native
}
/** Factory for [[js.Date]] objects. */
@js.native
@JSGlobal
object Date extends js.Object {
def apply(): String = js.native
/**
* Parses a string representation of a date and returns the number of
* milliseconds since 1 January, 1970, 00:00:00, local time.
*
* The parse method takes a date string (such as "Dec 25, 1995") and returns
* the number of milliseconds since January 1, 1970, 00:00:00 UTC. The local
* time zone is used to interpret arguments that do not contain time zone
* information. This function is useful for setting date values based on
* string values, for example in conjunction with the setTime() method and
* the Date object.
*
* Given a string representing a time, parse returns the time value. It
* accepts the RFC2822 / IETF date syntax (RFC2822 Section 3.3), e.g.
* "Mon, 25 Dec 1995 13:30:00 GMT". It understands the continental US time-
* zone abbreviations, but for general use, use a time-zone offset, for
* example, "Mon, 25 Dec 1995 13:30:00 +0430" (4 hours, 30 minutes east of
* the Greenwich meridian). If you do not specify a time zone, the local time
* zone is assumed. GMT and UTC are considered equivalent.
*
* MDN
*/
def parse(s: String): Double = js.native
def UTC(year: Int, month: Int, date: Int = 1, hours: Int = 0,
minutes: Int = 0, seconds: Int = 0, ms: Int = 0): Double = js.native
/**
* Returns the numeric value corresponding to the current time - the number
* of milliseconds elapsed since 1 January 1970 00:00:00 UTC.
*
* MDN
*/
def now(): Double = js.native
}
| scala-js/scala-js | library/src/main/scala/scala/scalajs/js/Date.scala | Scala | apache-2.0 | 7,453 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package toolkit.neuralnetwork.util
import libcog._
import toolkit.neuralnetwork.operator.sumSpray
object Classify {
/** Calculate the classification from an inference by taking the softmax of the inference
* and comparing the max output value to the second largest. If the max value is greater
* than the second by greater than the defined margin, then a one-hot code is emitted.
* Otherwise an the output for that example is all zeros, indicating a failure to
* classify above the defined margin.
*
* @param inference field to perform classification with
* @param batchSize number of examples in the batched field
* @param margin margin to use
* @return A one-hot classification code for each example if the margin is met
*/
def apply(inference: Field, batchSize: Int, margin: Float) = {
val inferenceLen = inference.tensorShape(0)
require(inference.tensorShape.dimensions == 1)
require(inferenceLen % batchSize == 0)
val numClasses = inferenceLen / batchSize
val softmax = exp(inference) / max(sumSpray(exp(inference), batchSize), 1e-7f)
val maxVal = blockReduceMax(softmax, numClasses)
val comparison = GPUOperator(softmax.fieldType) {
_globalThreads(softmax.fieldShape, softmax.tensorShape)
val curBatch = _tensorElement / numClasses
val curVal = _readTensorElement(softmax, _tensorElement)
val curMax = _readTensorElement(maxVal, curBatch)
_writeTensorElement(_out0, curVal > (curMax - margin), _tensorElement)
}
val winnerCount = blockReduceSum(comparison, numClasses)
val classification = GPUOperator(softmax.fieldType) {
_globalThreads(softmax.fieldShape, softmax.tensorShape)
val curBatch = _tensorElement / numClasses
val curVal = _readTensorElement(comparison, _tensorElement)
val singleWinner = _readTensorElement(winnerCount, curBatch) === 1f
_writeTensorElement(_out0, curVal * singleWinner, _tensorElement)
}
classification
}
}
| hpe-cct/cct-nn | src/main/scala/toolkit/neuralnetwork/util/Classify.scala | Scala | apache-2.0 | 2,627 |
package dotty.tools
package dotc
package core
import Periods._, Contexts._, Symbols._, Denotations._, Names._, NameOps._, Annotations._
import Types._, Flags._, Decorators._, DenotTransformers._, StdNames._, Scopes._
import NameOps._
import Scopes.Scope
import collection.mutable
import collection.immutable.BitSet
import scala.reflect.io.AbstractFile
import Decorators.SymbolIteratorDecorator
import ast._
import annotation.tailrec
import CheckRealizable._
import typer.Mode
import util.SimpleMap
import util.Stats
import config.Config
import config.Printers._
trait SymDenotations { this: Context =>
import SymDenotations._
/** Factory method for SymDenotion creation. All creations
* should be done via this method.
*/
def SymDenotation(
symbol: Symbol,
owner: Symbol,
name: Name,
initFlags: FlagSet,
initInfo: Type,
initPrivateWithin: Symbol = NoSymbol)(implicit ctx: Context): SymDenotation = {
val result =
if (symbol.isClass)
if (initFlags is Package) new PackageClassDenotation(symbol, owner, name, initFlags, initInfo, initPrivateWithin, ctx.runId)
else new ClassDenotation(symbol, owner, name, initFlags, initInfo, initPrivateWithin, ctx.runId)
else new SymDenotation(symbol, owner, name, initFlags, initInfo, initPrivateWithin)
result.validFor = stablePeriod
result
}
def stillValid(denot: SymDenotation): Boolean =
if (denot.is(ValidForever) || denot.isRefinementClass) true
else {
val initial = denot.initial
val firstPhaseId = initial.validFor.firstPhaseId.max(ctx.typerPhase.id)
if ((initial ne denot) || ctx.phaseId != firstPhaseId)
ctx.withPhase(firstPhaseId).stillValidInOwner(initial.asSymDenotation)
else
stillValidInOwner(denot)
}
private[SymDenotations] def stillValidInOwner(denot: SymDenotation): Boolean = try {
val owner = denot.owner.denot
stillValid(owner) && (
!owner.isClass
|| owner.isRefinementClass
|| (owner.unforcedDecls.lookupAll(denot.name) contains denot.symbol)
|| denot.isSelfSym)
} catch {
case ex: StaleSymbol => false
}
/** Explain why symbol is invalid; used for debugging only */
def traceInvalid(denot: Denotation): Boolean = {
def show(d: Denotation) = s"$d#${d.symbol.id}"
def explain(msg: String) = {
println(s"${show(denot)} is invalid at ${this.period} because $msg")
false
}
denot match {
case denot: SymDenotation =>
def explainSym(msg: String) = explain(s"$msg\\n defined = ${denot.definedPeriodsString}")
if (denot.is(ValidForever) || denot.isRefinementClass) true
else {
implicit val ctx: Context = this
val initial = denot.initial
if ((initial ne denot) || ctx.phaseId != initial.validFor.firstPhaseId) {
ctx.withPhase(initial.validFor.firstPhaseId).traceInvalid(initial.asSymDenotation)
} else try {
val owner = denot.owner.denot
if (!traceInvalid(owner)) explainSym("owner is invalid")
else if (!owner.isClass || owner.isRefinementClass || denot.isSelfSym) true
else if (owner.unforcedDecls.lookupAll(denot.name) contains denot.symbol) true
else explainSym(s"decls of ${show(owner)} are ${owner.unforcedDecls.lookupAll(denot.name).toList}, do not contain ${denot.symbol}")
} catch {
case ex: StaleSymbol => explainSym(s"$ex was thrown")
}
}
case _ =>
explain("denotation is not a SymDenotation")
}
}
}
object SymDenotations {
/** A sym-denotation represents the contents of a definition
* during a period.
*/
class SymDenotation private[SymDenotations] (
symbol: Symbol,
ownerIfExists: Symbol,
final val name: Name,
initFlags: FlagSet,
final val initInfo: Type,
initPrivateWithin: Symbol = NoSymbol) extends SingleDenotation(symbol) {
//assert(symbol.id != 4940, name)
override def hasUniqueSym: Boolean = exists
/** Debug only
override def validFor_=(p: Period) = {
super.validFor_=(p)
}
*/
if (Config.checkNoSkolemsInInfo) assertNoSkolems(initInfo)
// ------ Getting and setting fields -----------------------------
private[this] var myFlags: FlagSet = adaptFlags(initFlags)
private[this] var myInfo: Type = initInfo
private[this] var myPrivateWithin: Symbol = initPrivateWithin
private[this] var myAnnotations: List[Annotation] = Nil
/** The owner of the symbol; overridden in NoDenotation */
def owner: Symbol = ownerIfExists
/** Same as owner, except returns NoSymbol for NoSymbol */
def maybeOwner: Symbol = if (exists) owner else NoSymbol
/** The flag set */
final def flags(implicit ctx: Context): FlagSet = { ensureCompleted(); myFlags }
/** The flag set without forcing symbol completion.
* Should be used only for printing.
*/
private[dotc] final def flagsUNSAFE = myFlags
/** Adapt flag set to this denotation's term or type nature */
private def adaptFlags(flags: FlagSet) = if (isType) flags.toTypeFlags else flags.toTermFlags
/** Update the flag set */
final def flags_=(flags: FlagSet): Unit =
myFlags = adaptFlags(flags)
/** Set given flags(s) of this denotation */
final def setFlag(flags: FlagSet): Unit = { myFlags |= flags }
/** Unset given flags(s) of this denotation */
final def resetFlag(flags: FlagSet): Unit = { myFlags &~= flags }
/** Set applicable flags from `flags` which is a subset of {NoInits, PureInterface} */
final def setApplicableFlags(flags: FlagSet): Unit = {
val mask = if (myFlags.is(Trait)) NoInitsInterface else NoInits
setFlag(flags & mask)
}
/** Has this denotation one of the flags in `fs` set? */
final def is(fs: FlagSet)(implicit ctx: Context) = {
(if (fs <= FromStartFlags) myFlags else flags) is fs
}
/** Has this denotation one of the flags in `fs` set, whereas none of the flags
* in `butNot` are set?
*/
final def is(fs: FlagSet, butNot: FlagSet)(implicit ctx: Context) =
(if (fs <= FromStartFlags && butNot <= FromStartFlags) myFlags else flags) is (fs, butNot)
/** Has this denotation all of the flags in `fs` set? */
final def is(fs: FlagConjunction)(implicit ctx: Context) =
(if (fs <= FromStartFlags) myFlags else flags) is fs
/** Has this denotation all of the flags in `fs` set, whereas none of the flags
* in `butNot` are set?
*/
final def is(fs: FlagConjunction, butNot: FlagSet)(implicit ctx: Context) =
(if (fs <= FromStartFlags && butNot <= FromStartFlags) myFlags else flags) is (fs, butNot)
/** The type info.
* The info is an instance of TypeType iff this is a type denotation
* Uncompleted denotations set myInfo to a LazyType.
*/
final def info(implicit ctx: Context): Type = myInfo match {
case myInfo: LazyType => completeFrom(myInfo); info
case _ => myInfo
}
/** The type info, or, if symbol is not yet completed, the completer */
final def infoOrCompleter = myInfo
/** Optionally, the info if it is completed */
final def unforcedInfo: Option[Type] = myInfo match {
case myInfo: LazyType => None
case _ => Some(myInfo)
}
private def completeFrom(completer: LazyType)(implicit ctx: Context): Unit = {
if (completions ne noPrinter) {
completions.println(i"${" " * indent}completing ${if (isType) "type" else "val"} $name")
indent += 1
}
if (myFlags is Touched) throw CyclicReference(this)
myFlags |= Touched
// completions.println(s"completing ${this.debugString}")
try completer.complete(this)(ctx.withPhase(validFor.firstPhaseId))
catch {
case ex: CyclicReference =>
completions.println(s"error while completing ${this.debugString}")
throw ex
}
finally
if (completions ne noPrinter) {
indent -= 1
completions.println(i"${" " * indent}completed $name in $owner")
}
// completions.println(s"completed ${this.debugString}")
}
protected[dotc] def info_=(tp: Type) = {
/* // DEBUG
def illegal: String = s"illegal type for $this: $tp"
if (this is Module) // make sure module invariants that allow moduleClass and sourceModule to work are kept.
tp match {
case tp: ClassInfo => assert(tp.selfInfo.isInstanceOf[TermRefBySym], illegal)
case tp: NamedType => assert(tp.isInstanceOf[TypeRefBySym], illegal)
case tp: ExprType => assert(tp.resultType.isInstanceOf[TypeRefBySym], illegal)
case _ =>
}
*/
if (Config.checkNoSkolemsInInfo) assertNoSkolems(initInfo)
myInfo = tp
}
/** The name, except
* - if this is a module class, strip the module class suffix
* - if this is a companion object with a clash-avoiding name, strip the
* "avoid clash" suffix
*/
def effectiveName(implicit ctx: Context) =
if (this is ModuleClass) name.stripModuleClassSuffix
else name.stripAvoidClashSuffix
/** The privateWithin boundary, NoSymbol if no boundary is given.
*/
final def privateWithin(implicit ctx: Context): Symbol = { ensureCompleted(); myPrivateWithin }
/** Set privateWithin. */
protected[core] final def privateWithin_=(sym: Symbol): Unit =
myPrivateWithin = sym
/** The annotations of this denotation */
final def annotations(implicit ctx: Context): List[Annotation] = {
ensureCompleted(); myAnnotations
}
/** Update the annotations of this denotation */
private[core] final def annotations_=(annots: List[Annotation]): Unit =
myAnnotations = annots
/** Does this denotation have an annotation matching the given class symbol? */
final def hasAnnotation(cls: Symbol)(implicit ctx: Context) =
dropOtherAnnotations(annotations, cls).nonEmpty
/** Apply transform `f` to all annotations of this denotation */
final def transformAnnotations(f: Annotation => Annotation)(implicit ctx: Context): Unit =
annotations = annotations.mapConserve(f)
/** Keep only those annotations that satisfy `p` */
final def filterAnnotations(p: Annotation => Boolean)(implicit ctx: Context): Unit =
annotations = annotations.filterConserve(p)
/** Optionally, the annotation matching the given class symbol */
final def getAnnotation(cls: Symbol)(implicit ctx: Context): Option[Annotation] =
dropOtherAnnotations(annotations, cls) match {
case annot :: _ => Some(annot)
case nil => None
}
/** Add given annotation to the annotations of this denotation */
final def addAnnotation(annot: Annotation): Unit =
annotations = annot :: myAnnotations
/** Remove annotation with given class from this denotation */
final def removeAnnotation(cls: Symbol)(implicit ctx: Context): Unit =
annotations = myAnnotations.filterNot(_ matches cls)
/** Add all given annotations to this symbol */
final def addAnnotations(annots: TraversableOnce[Annotation])(implicit ctx: Context): Unit =
annots.foreach(addAnnotation)
@tailrec
private def dropOtherAnnotations(anns: List[Annotation], cls: Symbol)(implicit ctx: Context): List[Annotation] = anns match {
case ann :: rest => if (ann matches cls) anns else dropOtherAnnotations(rest, cls)
case Nil => Nil
}
/** The denotation is completed: info is not a lazy type and attributes have defined values */
final def isCompleted: Boolean = !myInfo.isInstanceOf[LazyType]
/** The denotation is in train of being completed */
final def isCompleting: Boolean = (myFlags is Touched) && !isCompleted
/** The completer of this denotation. @pre: Denotation is not yet completed */
final def completer: LazyType = myInfo.asInstanceOf[LazyType]
/** Make sure this denotation is completed */
final def ensureCompleted()(implicit ctx: Context): Unit = info
/** The symbols defined in this class or object.
* Careful! This does not force the type, so is compilation order dependent.
* This method should be used only in the following circumstances:
*
* 1. When accessing type parameters or type parameter accessors (both are entered before
* completion).
* 2. When obtaining the current scope in order to enter, rename or delete something there.
* 3. When playing it safe in order not to raise CylicReferences, e.g. for printing things
* or taking more efficient shortcuts (e.g. the stillValid test).
*/
final def unforcedDecls(implicit ctx: Context): Scope = myInfo match {
case cinfo: LazyType =>
val knownDecls = cinfo.decls
if (knownDecls ne EmptyScope) knownDecls
else { completeFrom(cinfo); unforcedDecls } // complete-once
case _ => info.decls
}
/** If this is a package class, the symbols entered in it
* before it is completed. (this is needed to eagerly enter synthetic
* aliases such as AnyRef into a package class without forcing it.
* Right now, the only usage is for the AnyRef alias in Definitions.
*/
final private[core] def currentPackageDecls(implicit ctx: Context): MutableScope = myInfo match {
case pinfo: SymbolLoaders # PackageLoader => pinfo.currentDecls
case _ => unforcedDecls.openForMutations
}
// ------ Names ----------------------------------------------
/** The expanded name of this denotation. */
final def expandedName(implicit ctx: Context) =
if (is(ExpandedName) || isConstructor) name
else {
def legalize(name: Name): Name = // JVM method names may not contain `<' or `>' characters
if (is(Method)) name.replace('<', '(').replace('>', ')') else name
legalize(name.expandedName(initial.asSymDenotation.owner))
}
// need to use initial owner to disambiguate, as multiple private symbols with the same name
// might have been moved from different origins into the same class
/** The name with which the denoting symbol was created */
final def originalName(implicit ctx: Context) = {
val d = initial.asSymDenotation
if (d is ExpandedName) d.name.unexpandedName else d.name // !!!DEBUG, was: effectiveName
}
/** The encoded full path name of this denotation, where outer names and inner names
* are separated by `separator` strings.
* Never translates expansions of operators back to operator symbol.
* Drops package objects. Represents terms in the owner chain by a simple `~`.
* (Note: scalac uses nothing to represent terms, which can cause name clashes
* between same-named definitions in different enclosing methods. Before this commit
* we used `$' but this can cause ambiguities with the class separator '$').
* A separator "" means "flat name"; the real separator in this case is "$" and
* enclosing packages do not form part of the name.
*/
def fullNameSeparated(separator: String)(implicit ctx: Context): Name = {
var sep = separator
var stopAtPackage = false
if (sep.isEmpty) {
sep = "$"
stopAtPackage = true
}
if (symbol == NoSymbol ||
owner == NoSymbol ||
owner.isEffectiveRoot ||
stopAtPackage && owner.is(PackageClass)) name
else {
var encl = owner
while (!encl.isClass && !encl.isPackageObject) {
encl = encl.owner
sep += "~"
}
if (owner.is(ModuleClass, butNot = Package) && sep == "$") sep = "" // duplicate scalac's behavior: don't write a double '$$' for module class members.
val fn = encl.fullNameSeparated(separator) ++ sep ++ name
if (isType) fn.toTypeName else fn.toTermName
}
}
/** The encoded flat name of this denotation, where joined names are separated by `separator` characters. */
def flatName(implicit ctx: Context): Name = fullNameSeparated("")
/** `fullName` where `.' is the separator character */
def fullName(implicit ctx: Context): Name = fullNameSeparated(".")
// ----- Tests -------------------------------------------------
/** Is this denotation a type? */
override def isType: Boolean = name.isTypeName
/** Is this denotation a class? */
final def isClass: Boolean = isInstanceOf[ClassDenotation]
/** Is this denotation a non-trait class? */
final def isRealClass(implicit ctx: Context) = isClass && !is(Trait)
/** Cast to class denotation */
final def asClass: ClassDenotation = asInstanceOf[ClassDenotation]
/** is this symbol the result of an erroneous definition? */
def isError: Boolean = false
/** Make denotation not exist */
final def markAbsent(): Unit =
myInfo = NoType
/** Is symbol known to not exist? */
final def isAbsent(implicit ctx: Context): Boolean =
myInfo == NoType ||
(this is (ModuleVal, butNot = Package)) && moduleClass.isAbsent
/** Is this symbol the root class or its companion object? */
final def isRoot: Boolean =
(name.toTermName == nme.ROOT || name == nme.ROOTPKG) && (owner eq NoSymbol)
/** Is this symbol the empty package class or its companion object? */
final def isEmptyPackage(implicit ctx: Context): Boolean =
name.toTermName == nme.EMPTY_PACKAGE && owner.isRoot
/** Is this symbol the empty package class or its companion object? */
final def isEffectiveRoot(implicit ctx: Context) = isRoot || isEmptyPackage
/** Is this symbol an anonymous class? */
final def isAnonymousClass(implicit ctx: Context): Boolean =
isClass && (initial.asSymDenotation.name startsWith tpnme.ANON_CLASS)
final def isAnonymousFunction(implicit ctx: Context) =
this.symbol.is(Method) && (initial.asSymDenotation.name startsWith nme.ANON_FUN)
final def isAnonymousModuleVal(implicit ctx: Context) =
this.symbol.is(ModuleVal) && (initial.asSymDenotation.name startsWith nme.ANON_CLASS)
/** Is this a companion class method or companion object method?
* These methods are generated by Symbols#synthesizeCompanionMethod
* and used in SymDenotations#companionClass and
* SymDenotations#companionModule .
*/
final def isCompanionMethod(implicit ctx: Context) =
name.toTermName == nme.COMPANION_CLASS_METHOD ||
name.toTermName == nme.COMPANION_MODULE_METHOD
/** Is this a syntetic method that represents conversions between representations of a value class
* These methods are generated in ExtensionMethods
* and used in ElimErasedValueType.
*/
final def isValueClassConvertMethod(implicit ctx: Context) =
name.toTermName == nme.U2EVT ||
name.toTermName == nme.EVT2U
/** Is symbol a primitive value class? */
def isPrimitiveValueClass(implicit ctx: Context) =
maybeOwner == defn.ScalaPackageClass && defn.ScalaValueClasses().contains(symbol)
/** Is symbol a primitive numeric value class? */
def isNumericValueClass(implicit ctx: Context) =
maybeOwner == defn.ScalaPackageClass && defn.ScalaNumericValueClasses().contains(symbol)
/** Is symbol a phantom class for which no runtime representation exists? */
def isPhantomClass(implicit ctx: Context) = defn.PhantomClasses contains symbol
/** Is this symbol a class representing a refinement? These classes
* are used only temporarily in Typer and Unpickler as an intermediate
* step for creating Refinement types.
*/
final def isRefinementClass(implicit ctx: Context): Boolean =
name.decode == tpnme.REFINE_CLASS
/** is this symbol a trait representing a type lambda? */
final def isLambdaTrait(implicit ctx: Context): Boolean =
isClass && name.startsWith(tpnme.hkLambdaPrefix) && owner == defn.ScalaPackageClass
/** Is this symbol a package object or its module class? */
def isPackageObject(implicit ctx: Context): Boolean = {
val poName = if (isType) nme.PACKAGE_CLS else nme.PACKAGE
(name.toTermName == poName) && (owner is Package) && (this is Module)
}
/** Is this symbol an abstract type? */
final def isAbstractType(implicit ctx: Context) = isType && (this is Deferred)
/** Is this symbol an alias type? */
final def isAliasType(implicit ctx: Context) = isAbstractOrAliasType && !(this is Deferred)
/** Is this symbol an abstract or alias type? */
final def isAbstractOrAliasType = isType & !isClass
/** Is this the denotation of a self symbol of some class?
* This is the case if one of two conditions holds:
* 1. It is the symbol referred to in the selfInfo part of the ClassInfo
* which is the type of this symbol's owner.
* 2. This symbol is owned by a class, it's selfInfo field refers to a type
* (indicating the self definition does not introduce a name), and the
* symbol's name is "_".
* TODO: Find a more robust way to characterize self symbols, maybe by
* spending a Flag on them?
*/
final def isSelfSym(implicit ctx: Context) = owner.infoOrCompleter match {
case ClassInfo(_, _, _, _, selfInfo) =>
selfInfo == symbol ||
selfInfo.isInstanceOf[Type] && name == nme.WILDCARD
case _ => false
}
/** Is this definition contained in `boundary`?
* Same as `ownersIterator contains boundary` but more efficient.
*/
final def isContainedIn(boundary: Symbol)(implicit ctx: Context): Boolean = {
def recur(sym: Symbol): Boolean =
if (sym eq boundary) true
else if (sym eq NoSymbol) false
else if ((sym is PackageClass) && !(boundary is PackageClass)) false
else recur(sym.owner)
recur(symbol)
}
final def isProperlyContainedIn(boundary: Symbol)(implicit ctx: Context): Boolean =
symbol != boundary && isContainedIn(boundary)
/** Is this denotation static (i.e. with no outer instance)? */
final def isStatic(implicit ctx: Context) =
(this is JavaStatic) || this.exists && owner.isStaticOwner || this.isRoot
/** Is this a package class or module class that defines static symbols? */
final def isStaticOwner(implicit ctx: Context): Boolean =
(this is PackageClass) || (this is ModuleClass) && isStatic
/** Is this denotation defined in the same scope and compilation unit as that symbol? */
final def isCoDefinedWith(that: Symbol)(implicit ctx: Context) =
(this.effectiveOwner == that.effectiveOwner) &&
( !(this.effectiveOwner is PackageClass)
|| this.isAbsent || that.isAbsent
|| { // check if they are defined in the same file(or a jar)
val thisFile = this.symbol.associatedFile
val thatFile = that.symbol.associatedFile
( thisFile == null
|| thatFile == null
|| thisFile.path == thatFile.path // Cheap possibly wrong check, then expensive normalization
|| thisFile.canonicalPath == thatFile.canonicalPath
)
}
)
/** Is this a denotation of a stable term (or an arbitrary type)? */
final def isStable(implicit ctx: Context) =
isType || is(Stable) || !(is(UnstableValue) || info.isInstanceOf[ExprType])
/** Is this a "real" method? A real method is a method which is:
* - not an accessor
* - not a label
* - not an anonymous function
* - not a companion method
*/
final def isRealMethod(implicit ctx: Context) =
this.is(Method, butNot = AccessorOrLabel) &&
!isAnonymousFunction &&
!isCompanionMethod
/** Is this a getter? */
final def isGetter(implicit ctx: Context) =
(this is Accessor) && !originalName.isSetterName && !originalName.isScala2LocalSuffix
/** Is this a setter? */
final def isSetter(implicit ctx: Context) =
(this is Accessor) &&
originalName.isSetterName &&
(!isCompleted || info.firstParamTypes.nonEmpty) // to avoid being fooled by var x_= : Unit = ...
/** is this the constructor of a class? */
final def isClassConstructor = name == nme.CONSTRUCTOR
/** Is this the constructor of a trait? */
final def isImplClassConstructor = name == nme.TRAIT_CONSTRUCTOR
/** Is this the constructor of a trait or a class */
final def isConstructor = name.isConstructorName
/** Is this a local template dummmy? */
final def isLocalDummy: Boolean = name.isLocalDummyName
/** Does this symbol denote the primary constructor of its enclosing class? */
final def isPrimaryConstructor(implicit ctx: Context) =
isConstructor && owner.primaryConstructor == symbol
/** Is this a subclass of the given class `base`? */
def isSubClass(base: Symbol)(implicit ctx: Context) = false
/** Is this a subclass of `base`,
* and is the denoting symbol also different from `Null` or `Nothing`?
* @note erroneous classes are assumed to derive from all other classes
* and all classes derive from them.
*/
def derivesFrom(base: Symbol)(implicit ctx: Context) = false
/** Is this symbol a class that extends `AnyVal`? */
final def isValueClass(implicit ctx: Context): Boolean = {
val di = this.initial.asSymDenotation
di.isClass &&
di.derivesFrom(defn.AnyValClass)(ctx.withPhase(di.validFor.firstPhaseId))
// We call derivesFrom at the initial phase both because AnyVal does not exist
// after Erasure and to avoid cyclic references caused by forcing denotations
}
/** Is this symbol a class references to which that are supertypes of null? */
final def isNullableClass(implicit ctx: Context): Boolean =
isClass && !isValueClass && !(this is ModuleClass)
/** Is this definition accessible as a member of tree with type `pre`?
* @param pre The type of the tree from which the selection is made
* @param superAccess Access is via super
* Everything is accessible if `pre` is `NoPrefix`.
* A symbol with type `NoType` is not accessible for any other prefix.
*/
final def isAccessibleFrom(pre: Type, superAccess: Boolean = false, whyNot: StringBuffer = null)(implicit ctx: Context): Boolean = {
/** Are we inside definition of `boundary`? */
def accessWithin(boundary: Symbol) = {
def test(implicit ctx: Context) =
ctx.owner.isContainedIn(boundary) &&
(!(this is JavaDefined) || // disregard package nesting for Java
ctx.owner.enclosingPackageClass == boundary.enclosingPackageClass)
try test
catch {
// It might be we are in a definition whose symbol is not defined at the
// period where the test is made. Retry with FutureDefsOK. The reason
// for not doing this outright is speed. We would like to avoid
// creating a new context object each time we call accessWithin.
// Note that the exception should be thrown only infrequently.
case ex: NotDefinedHere => test(ctx.addMode(Mode.FutureDefsOK))
}
}
/** Are we within definition of linked class of `boundary`? */
def accessWithinLinked(boundary: Symbol) = {
val linked = boundary.linkedClass
(linked ne NoSymbol) && accessWithin(linked)
}
/** Is `pre` the same as C.thisThis, where C is exactly the owner of this symbol,
* or, if this symbol is protected, a subclass of the owner?
*/
def isCorrectThisType(pre: Type): Boolean = pre match {
case pre: ThisType =>
(pre.cls eq owner) || (this is Protected) && pre.cls.derivesFrom(owner)
case pre: TermRef =>
pre.symbol.moduleClass == owner
case _ =>
false
}
/** Is protected access to target symbol permitted? */
def isProtectedAccessOK = {
def fail(str: => String): Boolean = {
if (whyNot != null) whyNot append str
false
}
val cls = owner.enclosingSubClass
if (!cls.exists)
fail(
s""" Access to protected $this not permitted because
| enclosing ${ctx.owner.enclosingClass.showLocated} is not a subclass of
| ${owner.showLocated} where target is defined""".stripMargin)
else if (
!( isType // allow accesses to types from arbitrary subclasses fixes #4737
|| pre.baseTypeRef(cls).exists // ??? why not use derivesFrom ???
|| isConstructor
|| (owner is ModuleClass) // don't perform this check for static members
))
fail(
s""" Access to protected ${symbol.show} not permitted because
| prefix type ${pre.widen.show} does not conform to
| ${cls.showLocated} where the access takes place""".stripMargin)
else true
}
if (pre eq NoPrefix) true
else if (info eq NoType) false
else {
val boundary = accessBoundary(owner)
( boundary.isTerm
|| boundary.isRoot
|| (accessWithin(boundary) || accessWithinLinked(boundary)) &&
( !(this is Local)
|| (owner is ImplClass) // allow private local accesses to impl class members
|| isCorrectThisType(pre)
)
|| (this is Protected) &&
( superAccess
|| pre.isInstanceOf[ThisType]
|| ctx.phase.erasedTypes
|| isProtectedAccessOK
)
)
}
}
/** Do members of this symbol need translation via asSeenFrom when
* accessed via prefix `pre`?
*/
def membersNeedAsSeenFrom(pre: Type)(implicit ctx: Context) =
!( this.isTerm
|| this.isStaticOwner
|| ctx.erasedTypes
|| (pre eq NoPrefix) || (pre eq thisType)
)
/** Is this symbol concrete, or that symbol deferred? */
def isAsConcrete(that: Symbol)(implicit ctx: Context): Boolean =
!(this is Deferred) || (that is Deferred)
/** Does this symbol have defined or inherited default parameters? */
def hasDefaultParams(implicit ctx: Context): Boolean =
if (this is HasDefaultParams) true
else if (this is NoDefaultParams) false
else {
val result = allOverriddenSymbols exists (_.hasDefaultParams)
setFlag(if (result) InheritedDefaultParams else NoDefaultParams)
result
}
/** Symbol is an owner that would be skipped by effectiveOwner. Skipped are
* - package objects
* - labels
* - non-lazy valdefs
*/
def isWeakOwner(implicit ctx: Context): Boolean =
isPackageObject ||
isTerm && !is(MethodOrLazy, butNot = Label) && !isLocalDummy
// def isOverridable: Boolean = !!! need to enforce that classes cannot be redefined
def isSkolem: Boolean = name == nme.SKOLEM
// ------ access to related symbols ---------------------------------
/* Modules and module classes are represented as follows:
*
* object X extends Y { def f() }
*
* <module> lazy val X: X$ = new X$
* <module> class X$ extends Y { this: X.type => def f() }
*
* During completion, references to moduleClass and sourceModules are stored in
* the completers.
*/
/** The class implementing this module, NoSymbol if not applicable. */
final def moduleClass(implicit ctx: Context): Symbol = {
def notFound = { println(s"missing module class for $name: $myInfo"); NoSymbol }
if (this is ModuleVal)
myInfo match {
case info: TypeRef => info.symbol
case ExprType(info: TypeRef) => info.symbol // needed after uncurry, when module terms might be accessor defs
case info: LazyType => info.moduleClass
case t: MethodType =>
t.resultType match {
case info: TypeRef => info.symbol
case _ => notFound
}
case _ => notFound
}
else NoSymbol
}
/** The module implemented by this module class, NoSymbol if not applicable. */
final def sourceModule(implicit ctx: Context): Symbol = myInfo match {
case ClassInfo(_, _, _, _, selfType) if this is ModuleClass =>
selfType match {
case selfType: TermRef => selfType.symbol
case selfType: Symbol => selfType.info.asInstanceOf[TermRef].symbol
}
case info: LazyType =>
info.sourceModule
case _ =>
NoSymbol
}
/** The field accessed by this getter or setter, or if it does not exist, the getter */
def accessedFieldOrGetter(implicit ctx: Context): Symbol = {
val fieldName = if (isSetter) name.asTermName.getterName else name
val d = owner.info.decl(fieldName)
val field = d.suchThat(!_.is(Method)).symbol
def getter = d.suchThat(_.info.isParameterless).symbol
field orElse getter
}
/** The field accessed by a getter or setter, or
* if it does not exists, the getter of a setter, or
* if that does not exist the symbol itself.
*/
def underlyingSymbol(implicit ctx: Context): Symbol =
if (is(Accessor)) accessedFieldOrGetter orElse symbol else symbol
/** The chain of owners of this denotation, starting with the denoting symbol itself */
final def ownersIterator(implicit ctx: Context) = new Iterator[Symbol] {
private[this] var current = symbol
def hasNext = current.exists
def next: Symbol = {
val result = current
current = current.owner
result
}
}
/** If this is a weak owner, its owner, otherwise the denoting symbol. */
final def skipWeakOwner(implicit ctx: Context): Symbol =
if (isWeakOwner) owner.skipWeakOwner else symbol
/** The owner, skipping package objects, labels and non-lazy valdefs. */
final def effectiveOwner(implicit ctx: Context) = owner.skipWeakOwner
/** The class containing this denotation.
* If this denotation is already a class, return itself
* Definitions flagged with InSuperCall are treated specially.
* Their enclosing class is not the lexically enclosing class,
* but in turn the enclosing class of the latter. This reflects
* the context created by `Context#superCallContext`, `Context#thisCallArgContext`
* for these definitions.
*
* Note, that as packages have ClassSymbols, top level classes will have an `enclosingClass`
* with Package flag set.
*/
final def enclosingClass(implicit ctx: Context): Symbol = {
def enclClass(sym: Symbol, skip: Boolean): Symbol = {
def newSkip = sym.is(InSuperCall) || sym.is(JavaStaticTerm)
if (!sym.exists)
NoSymbol
else if (sym.isClass)
if (skip) enclClass(sym.owner, newSkip) else sym
else
enclClass(sym.owner, skip || newSkip)
}
enclClass(symbol, false)
}
/** A symbol is effectively final if it cannot be overridden in a subclass */
final def isEffectivelyFinal(implicit ctx: Context): Boolean =
is(PrivateOrFinal) || !owner.isClass || owner.is(ModuleOrFinal) || owner.isAnonymousClass
/** The class containing this denotation which has the given effective name. */
final def enclosingClassNamed(name: Name)(implicit ctx: Context): Symbol = {
val cls = enclosingClass
if (cls.effectiveName == name || !cls.exists) cls else cls.owner.enclosingClassNamed(name)
}
/** The closest enclosing method containing this definition.
* A local dummy owner is mapped to the primary constructor of the class.
*/
final def enclosingMethod(implicit ctx: Context): Symbol =
if (this is (Method, butNot = Label)) symbol
else if (this.isClass) primaryConstructor
else if (this.exists) owner.enclosingMethod
else NoSymbol
/** The top-level class containing this denotation,
* except for a toplevel module, where its module class is returned.
*/
final def topLevelClass(implicit ctx: Context): Symbol = {
def topLevel(d: SymDenotation): Symbol = {
if ((d is PackageClass) || (d.owner is PackageClass)) d.symbol
else topLevel(d.owner)
}
val sym = topLevel(this)
if (sym.isClass) sym else sym.moduleClass
}
/** The package class containing this denotation */
final def enclosingPackageClass(implicit ctx: Context): Symbol =
if (this is PackageClass) symbol else owner.enclosingPackageClass
/** The module object with the same (term-) name as this class or module class,
* and which is also defined in the same scope and compilation unit.
* NoSymbol if this module does not exist.
*/
final def companionModule(implicit ctx: Context): Symbol = {
if (this.flagsUNSAFE is Flags.Module) this.sourceModule
else {
val companionMethod = info.decls.denotsNamed(nme.COMPANION_MODULE_METHOD, selectPrivate).first
if (companionMethod.exists)
companionMethod.info.resultType.classSymbol.sourceModule
else
NoSymbol
}
}
/** The class with the same (type-) name as this module or module class,
* and which is also defined in the same scope and compilation unit.
* NoSymbol if this class does not exist.
*/
final def companionClass(implicit ctx: Context): Symbol = {
val companionMethod = info.decls.denotsNamed(nme.COMPANION_CLASS_METHOD, selectPrivate).first
if (companionMethod.exists)
companionMethod.info.resultType.classSymbol
else
NoSymbol
}
final def scalacLinkedClass(implicit ctx: Context): Symbol =
if (this is ModuleClass) companionNamed(effectiveName.toTypeName)
else if (this.isClass) companionNamed(effectiveName.moduleClassName).sourceModule.moduleClass
else NoSymbol
/** Find companion class symbol with given name, or NoSymbol if none exists.
* Three alternative strategies:
* 1. If owner is a class, look in its members, otherwise
* 2. If current compilation unit has a typed tree,
* determine the defining statement sequence and search its trees, otherwise
* 3. If context has an enclosing scope which defines this symbol,
* lookup its companion in the same scope.
*/
private def companionNamed(name: TypeName)(implicit ctx: Context): Symbol =
if (owner.isClass)
owner.info.decl(name).suchThat(_.isCoDefinedWith(symbol)).symbol
else if (!owner.exists || ctx.compilationUnit == null)
NoSymbol
else if (!ctx.compilationUnit.tpdTree.isEmpty)
tpd.definingStats(symbol).iterator
.map(tpd.definedSym)
.find(_.name == name)
.getOrElse(NoSymbol)
else if (ctx.scope == null)
NoSymbol
else if (ctx.scope.lookup(this.name) == symbol)
ctx.scope.lookup(name)
else
companionNamed(name)(ctx.outersIterator.dropWhile(_.scope eq ctx.scope).next)
/** If this is a class, the module class of its companion object.
* If this is a module class, its companion class.
* NoSymbol otherwise.
*/
final def linkedClass(implicit ctx: Context): Symbol =
if (this is ModuleClass) companionClass
else if (this.isClass) companionModule.moduleClass
else NoSymbol
/** The class that encloses the owner of the current context
* and that is a subclass of this class. NoSymbol if no such class exists.
*/
final def enclosingSubClass(implicit ctx: Context) =
ctx.owner.ownersIterator.findSymbol(_.isSubClass(symbol))
/** The non-private symbol whose name and type matches the type of this symbol
* in the given class.
* @param inClass The class containing the result symbol's definition
* @param site The base type from which member types are computed
*
* inClass <-- find denot.symbol class C { <-- symbol is here
*
* site: Subtype of both inClass and C
*/
final def matchingDecl(inClass: Symbol, site: Type)(implicit ctx: Context): Symbol = {
var denot = inClass.info.nonPrivateDecl(name)
if (denot.isTerm) // types of the same name always match
denot = denot.matchingDenotation(site, site.memberInfo(symbol))
denot.symbol
}
/** The non-private member of `site` whose name and type matches the type of this symbol
*/
final def matchingMember(site: Type)(implicit ctx: Context): Symbol = {
var denot = site.nonPrivateMember(name)
if (denot.isTerm) // types of the same name always match
denot = denot.matchingDenotation(site, site.memberInfo(symbol))
denot.symbol
}
/** If false, this symbol cannot possibly participate in an override,
* either as overrider or overridee.
*/
final def canMatchInheritedSymbols(implicit ctx: Context): Boolean =
maybeOwner.isClass && memberCanMatchInheritedSymbols
/** If false, this class member cannot possibly participate in an override,
* either as overrider or overridee.
*/
final def memberCanMatchInheritedSymbols(implicit ctx: Context): Boolean =
!isConstructor && !is(Private)
/** The symbol, in class `inClass`, that is overridden by this denotation. */
final def overriddenSymbol(inClass: ClassSymbol)(implicit ctx: Context): Symbol =
if (!canMatchInheritedSymbols && (owner ne inClass)) NoSymbol
else matchingDecl(inClass, owner.thisType)
/** All symbols overriden by this denotation. */
final def allOverriddenSymbols(implicit ctx: Context): Iterator[Symbol] =
if (!canMatchInheritedSymbols) Iterator.empty
else overriddenFromType(owner.info)
/** Returns all all matching symbols defined in parents of the selftype. */
final def extendedOverriddenSymbols(implicit ctx: Context): Iterator[Symbol] =
if (!canMatchInheritedSymbols) Iterator.empty
else overriddenFromType(owner.asClass.classInfo.selfType)
private def overriddenFromType(tp: Type)(implicit ctx: Context): Iterator[Symbol] =
tp.baseClasses.tail.iterator map overriddenSymbol filter (_.exists)
/** The symbol overriding this symbol in given subclass `ofclazz`.
*
* @param ofclazz is a subclass of this symbol's owner
*/
final def overridingSymbol(inClass: ClassSymbol)(implicit ctx: Context): Symbol =
if (canMatchInheritedSymbols) matchingDecl(inClass, inClass.thisType)
else NoSymbol
/** The symbol accessed by a super in the definition of this symbol when
* seen from class `base`. This symbol is always concrete.
* pre: `this.owner` is in the base class sequence of `base`.
*/
final def superSymbolIn(base: Symbol)(implicit ctx: Context): Symbol = {
def loop(bcs: List[ClassSymbol]): Symbol = bcs match {
case bc :: bcs1 =>
val sym = matchingDecl(bcs.head, base.thisType)
.suchThat(alt => !(alt is Deferred)).symbol
if (sym.exists) sym else loop(bcs.tail)
case _ =>
NoSymbol
}
loop(base.info.baseClasses.dropWhile(owner != _).tail)
}
/** A member of class `base` is incomplete if
* (1) it is declared deferred or
* (2) it is abstract override and its super symbol in `base` is
* nonexistent or incomplete.
*/
final def isIncompleteIn(base: Symbol)(implicit ctx: Context): Boolean =
(this is Deferred) ||
(this is AbsOverride) && {
val supersym = superSymbolIn(base)
supersym == NoSymbol || supersym.isIncompleteIn(base)
}
/** The class or term symbol up to which this symbol is accessible,
* or RootClass if it is public. As java protected statics are
* otherwise completely inaccessible in scala, they are treated
* as public.
* @param base The access boundary to assume if this symbol is protected
*/
final def accessBoundary(base: Symbol)(implicit ctx: Context): Symbol = {
val fs = flags
if (fs is Private) owner
else if (fs is StaticProtected) defn.RootClass
else if (privateWithin.exists && !ctx.phase.erasedTypes) privateWithin
else if (fs is Protected) base
else defn.RootClass
}
/** The primary constructor of a class or trait, NoSymbol if not applicable. */
def primaryConstructor(implicit ctx: Context): Symbol = NoSymbol
// ----- type-related ------------------------------------------------
/** The type parameters of a class symbol, Nil for all other symbols */
def typeParams(implicit ctx: Context): List[TypeSymbol] = Nil
/** The type This(cls), where cls is this class, NoPrefix for all other symbols */
def thisType(implicit ctx: Context): Type = NoPrefix
override def typeRef(implicit ctx: Context): TypeRef =
TypeRef(owner.thisType, name.asTypeName, this)
override def termRef(implicit ctx: Context): TermRef =
TermRef(owner.thisType, name.asTermName, this)
override def valRef(implicit ctx: Context): TermRef =
TermRef.withSigAndDenot(owner.thisType, name.asTermName, Signature.NotAMethod, this)
override def termRefWithSig(implicit ctx: Context): TermRef =
TermRef.withSigAndDenot(owner.thisType, name.asTermName, signature, this)
def nonMemberTermRef(implicit ctx: Context): TermRef =
TermRef.withFixedSym(owner.thisType, name.asTermName, symbol.asTerm)
/** The variance of this type parameter or type member as an Int, with
* +1 = Covariant, -1 = Contravariant, 0 = Nonvariant, or not a type parameter
*/
final def variance(implicit ctx: Context): Int =
if (this is Covariant) 1
else if (this is Contravariant) -1
else 0
/** The flags to be used for a type parameter owned by this symbol.
* Overridden by ClassDenotation.
*/
def typeParamCreationFlags: FlagSet = TypeParam
override def toString = {
val kindString =
if (myFlags is ModuleClass) "module class"
else if (isClass) "class"
else if (isType) "type"
else if (myFlags is Module) "module"
else if (myFlags is Method) "method"
else "val"
s"$kindString $name"
}
// ----- Sanity checks and debugging */
def debugString = toString + "#" + symbol.id // !!! DEBUG
def hasSkolems(tp: Type): Boolean = tp match {
case tp: SkolemType => true
case tp: NamedType => hasSkolems(tp.prefix)
case tp: RefinedType => hasSkolems(tp.parent) || hasSkolems(tp.refinedInfo)
case tp: PolyType => tp.paramBounds.exists(hasSkolems) || hasSkolems(tp.resType)
case tp: MethodType => tp.paramTypes.exists(hasSkolems) || hasSkolems(tp.resType)
case tp: ExprType => hasSkolems(tp.resType)
case tp: AndOrType => hasSkolems(tp.tp1) || hasSkolems(tp.tp2)
case tp: TypeBounds => hasSkolems(tp.lo) || hasSkolems(tp.hi)
case tp: AnnotatedType => hasSkolems(tp.tpe)
case tp: TypeVar => hasSkolems(tp.inst)
case _ => false
}
def assertNoSkolems(tp: Type) =
if (!this.isSkolem)
assert(!hasSkolems(tp), s"assigning type $tp containing skolems to $this")
// ----- copies and transforms ----------------------------------------
protected def newLikeThis(s: Symbol, i: Type): SingleDenotation = new UniqueRefDenotation(s, i, validFor)
/** Copy this denotation, overriding selective fields */
final def copySymDenotation(
symbol: Symbol = this.symbol,
owner: Symbol = this.owner,
name: Name = this.name,
initFlags: FlagSet = UndefinedFlags,
info: Type = null,
privateWithin: Symbol = null,
annotations: List[Annotation] = null)(implicit ctx: Context) =
{ // simulate default parameters, while also passing implicit context ctx to the default values
val initFlags1 = (if (initFlags != UndefinedFlags) initFlags else this.flags) &~ Frozen
val info1 = if (info != null) info else this.info
val privateWithin1 = if (privateWithin != null) privateWithin else this.privateWithin
val annotations1 = if (annotations != null) annotations else this.annotations
val d = ctx.SymDenotation(symbol, owner, name, initFlags1, info1, privateWithin1)
d.annotations = annotations1
d
}
/** Install this denotation as the result of the given denotation transformer. */
override def installAfter(phase: DenotTransformer)(implicit ctx: Context): Unit =
super.installAfter(phase)
/** Apply a transformation `f` to all denotations in this group that start at or after
* given phase. Denotations are replaced while keeping the same validity periods.
*/
override def transformAfter(phase: DenotTransformer, f: SymDenotation => SymDenotation)(implicit ctx: Context): Unit =
super.transformAfter(phase, f)
/** If denotation is private, remove the Private flag and expand the name if necessary */
def ensureNotPrivate(implicit ctx: Context) =
if (is(Private))
copySymDenotation(
name = expandedName,
initFlags = this.flags &~ Private | ExpandedName)
else this
}
/** The contents of a class definition during a period
*/
class ClassDenotation private[SymDenotations] (
symbol: Symbol,
ownerIfExists: Symbol,
name: Name,
initFlags: FlagSet,
initInfo: Type,
initPrivateWithin: Symbol,
initRunId: RunId)
extends SymDenotation(symbol, ownerIfExists, name, initFlags, initInfo, initPrivateWithin) {
import util.LRUCache
// ----- denotation fields and accessors ------------------------------
if (initFlags is (Module, butNot = Package)) assert(name.isModuleClassName, s"module naming inconsistency: $name")
/** The symbol asserted to have type ClassSymbol */
def classSymbol: ClassSymbol = symbol.asInstanceOf[ClassSymbol]
/** The info asserted to have type ClassInfo */
def classInfo(implicit ctx: Context): ClassInfo = info.asInstanceOf[ClassInfo]
/** TODO: Document why caches are supposedly safe to use */
private[this] var myTypeParams: List[TypeSymbol] = _
/** The type parameters of this class */
override final def typeParams(implicit ctx: Context): List[TypeSymbol] = {
def computeTypeParams = {
if (ctx.erasedTypes || is(Module)) Nil // fast return for modules to avoid scanning package decls
else if (this ne initial) initial.asSymDenotation.typeParams
else unforcedDecls.filter(sym =>
(sym is TypeParam) && sym.owner == symbol).asInstanceOf[List[TypeSymbol]]
}
if (myTypeParams == null) myTypeParams = computeTypeParams
myTypeParams
}
override protected[dotc] final def info_=(tp: Type) = {
super.info_=(tp)
myTypeParams = null // changing the info might change decls, and with it typeParams
}
/** The denotations of all parents in this class. */
def classParents(implicit ctx: Context): List[TypeRef] = info match {
case classInfo: ClassInfo => classInfo.classParents
case _ => Nil
}
/** The symbol of the superclass, NoSymbol if no superclass exists */
def superClass(implicit ctx: Context): Symbol = classParents match {
case parent :: _ =>
val cls = parent.classSymbol
if (cls is Trait) NoSymbol else cls
case _ =>
NoSymbol
}
/** The denotation is fully completed: all attributes are fully defined.
* ClassDenotations compiled from source are first completed, then fully completed.
* Packages are never fully completed since members can be added at any time.
* @see Namer#ClassCompleter
*/
private def isFullyCompleted(implicit ctx: Context): Boolean = {
def isFullyCompletedRef(tp: TypeRef) = tp.denot match {
case d: ClassDenotation => d.isFullyCompleted
case _ => false
}
def testFullyCompleted =
if (classParents.isEmpty) !is(Package) && symbol.eq(defn.AnyClass)
else classParents.forall(isFullyCompletedRef)
flagsUNSAFE.is(FullyCompleted) ||
isCompleted && testFullyCompleted && { setFlag(FullyCompleted); true }
}
// ------ syncing inheritance-related info -----------------------------
private var firstRunId: RunId = initRunId
/** invalidate caches influenced by parent classes if one of the parents
* is younger than the denotation itself.
*/
override def syncWithParents(implicit ctx: Context): SingleDenotation = {
def isYounger(tref: TypeRef) = tref.symbol.denot match {
case denot: ClassDenotation =>
if (denot.validFor.runId < ctx.runId) denot.current // syncs with its parents in turn
val result = denot.firstRunId > this.firstRunId
if (result) incremental.println(s"$denot is younger than $this")
result
case _ => false
}
val parentIsYounger = (firstRunId < ctx.runId) && {
infoOrCompleter match {
case cinfo: ClassInfo => cinfo.classParents exists isYounger
case _ => false
}
}
if (parentIsYounger) {
incremental.println(s"parents of $this are invalid; symbol id = ${symbol.id}, copying ...\\n")
invalidateInheritedInfo()
}
firstRunId = ctx.runId
this
}
/** Invalidate all caches and fields that depend on base classes and their contents */
override def invalidateInheritedInfo(): Unit = {
myBaseClasses = null
mySuperClassBits = null
myMemberFingerPrint = FingerPrint.unknown
myMemberCache = null
myMemberCachePeriod = Nowhere
memberNamesCache = SimpleMap.Empty
}
// ------ class-specific operations -----------------------------------
private[this] var myThisType: Type = null
/** The this-type depends on the kind of class:
* - for a package class `p`: ThisType(TypeRef(Noprefix, p))
* - for a module class `m`: A term ref to m's source module.
* - for all other classes `c` with owner `o`: ThisType(TypeRef(o.thisType, c))
*/
override def thisType(implicit ctx: Context): Type = {
if (myThisType == null) myThisType = computeThisType
myThisType
}
private def computeThisType(implicit ctx: Context): Type =
ThisType.raw(
TypeRef(if (this is Package) NoPrefix else owner.thisType, symbol.asType))
/* else {
val pre = owner.thisType
if (this is Module)
if (isMissing(pre)) TermRef(pre, sourceModule.asTerm)
else TermRef.withSig(pre, name.sourceModuleName, Signature.NotAMethod)
else ThisType.raw(TypeRef(pre, symbol.asType))
}
*/
private[this] var myTypeRef: TypeRef = null
override def typeRef(implicit ctx: Context): TypeRef = {
if (myTypeRef == null) myTypeRef = super.typeRef
myTypeRef
}
private[this] var myBaseClasses: List[ClassSymbol] = null
private[this] var mySuperClassBits: BitSet = null
/** Invalidate baseTypeRefCache, baseClasses and superClassBits on new run */
private def checkBasesUpToDate()(implicit ctx: Context) =
if (baseTypeRefValid != ctx.runId) {
baseTypeRefCache = new java.util.HashMap[CachedType, Type]
myBaseClasses = null
mySuperClassBits = null
baseTypeRefValid = ctx.runId
}
private def computeBases(implicit ctx: Context): (List[ClassSymbol], BitSet) = {
if (myBaseClasses eq Nil) throw CyclicReference(this)
myBaseClasses = Nil
val seen = new mutable.BitSet
val locked = new mutable.BitSet
def addBaseClasses(bcs: List[ClassSymbol], to: List[ClassSymbol])
: List[ClassSymbol] = bcs match {
case bc :: bcs1 =>
val bcs1added = addBaseClasses(bcs1, to)
val id = bc.superId
if (seen contains id) bcs1added
else {
seen += id
bc :: bcs1added
}
case nil =>
to
}
def addParentBaseClasses(ps: List[Type], to: List[ClassSymbol]): List[ClassSymbol] = ps match {
case p :: ps1 =>
addParentBaseClasses(ps1, addBaseClasses(p.baseClasses, to))
case nil =>
to
}
val bcs = classSymbol :: addParentBaseClasses(classParents, Nil)
val scbits = seen.toImmutable
if (isFullyCompleted) {
myBaseClasses = bcs
mySuperClassBits = scbits
}
else myBaseClasses = null
(bcs, scbits)
}
/** A bitset that contains the superId's of all base classes */
private def superClassBits(implicit ctx: Context): BitSet =
if (classParents.isEmpty) BitSet() // can happen when called too early in Namers
else {
checkBasesUpToDate()
if (mySuperClassBits != null) mySuperClassBits else computeBases._2
}
/** The base classes of this class in linearization order,
* with the class itself as first element.
*/
def baseClasses(implicit ctx: Context): List[ClassSymbol] =
if (classParents.isEmpty) classSymbol :: Nil // can happen when called too early in Namers
else {
checkBasesUpToDate()
if (myBaseClasses != null) myBaseClasses else computeBases._1
}
final override def derivesFrom(base: Symbol)(implicit ctx: Context): Boolean =
!isAbsent &&
base.isClass &&
( (symbol eq base)
|| (superClassBits contains base.superId)
|| (this is Erroneous)
|| (base is Erroneous)
)
final override def isSubClass(base: Symbol)(implicit ctx: Context) =
derivesFrom(base) ||
base.isClass && (
(symbol eq defn.NothingClass) ||
(symbol eq defn.NullClass) && (base ne defn.NothingClass))
final override def typeParamCreationFlags = ClassTypeParamCreationFlags
private[this] var myMemberFingerPrint: FingerPrint = FingerPrint.unknown
private def computeMemberFingerPrint(implicit ctx: Context): FingerPrint = {
var fp = FingerPrint()
var e = info.decls.lastEntry
while (e != null) {
fp.include(e.name)
e = e.prev
}
var ps = classParents
while (ps.nonEmpty) {
val parent = ps.head.typeSymbol
parent.denot match {
case parentDenot: ClassDenotation =>
fp.include(parentDenot.memberFingerPrint)
if (parentDenot.isFullyCompleted) parentDenot.setFlag(Frozen)
case _ =>
}
ps = ps.tail
}
fp
}
/** A bloom filter for the names of all members in this class.
* Makes sense only for parent classes, and should definitely
* not be used for package classes because cache never
* gets invalidated.
*/
def memberFingerPrint(implicit ctx: Context): FingerPrint =
if (myMemberFingerPrint != FingerPrint.unknown) myMemberFingerPrint
else {
val fp = computeMemberFingerPrint
if (isFullyCompleted) myMemberFingerPrint = fp
fp
}
private[this] var myMemberCache: LRUCache[Name, PreDenotation] = null
private[this] var myMemberCachePeriod: Period = Nowhere
private def memberCache(implicit ctx: Context): LRUCache[Name, PreDenotation] = {
if (myMemberCachePeriod != ctx.period) {
myMemberCache = new LRUCache
myMemberCachePeriod = ctx.period
}
myMemberCache
}
/** Enter a symbol in current scope, and future scopes of same denotation.
* Note: We require that this does not happen after the first time
* someone does a findMember on a subclass.
* @param scope The scope in which symbol should be entered.
* If this is EmptyScope, the scope is `decls`.
*/
def enter(sym: Symbol, scope: Scope = EmptyScope)(implicit ctx: Context): Unit = {
val mscope = scope match {
case scope: MutableScope =>
// if enter gets a scope as an argument,
// than this is a scope that will eventually become decls of this symbol.
// And this should only happen if this is first time the scope of symbol
// is computed, ie symbol yet has no future.
assert(this.nextInRun.validFor.code <= this.validFor.code)
scope
case _ => unforcedDecls.openForMutations
}
if (this is PackageClass) {
val entry = mscope.lookupEntry(sym.name)
if (entry != null) {
if (entry.sym == sym) return
mscope.unlink(entry)
entry.sym.denot = sym.denot // to avoid stale symbols
}
}
enterNoReplace(sym, mscope)
val nxt = this.nextInRun
if (nxt.validFor.code > this.validFor.code) {
this.nextInRun.asSymDenotation.asClass.enter(sym)
}
}
/** Enter a symbol in given `scope` without potentially replacing the old copy. */
def enterNoReplace(sym: Symbol, scope: MutableScope)(implicit ctx: Context): Unit = {
require((sym.denot.flagsUNSAFE is Private) || !(this is Frozen) || (scope ne this.unforcedDecls))
scope.enter(sym)
if (myMemberFingerPrint != FingerPrint.unknown)
myMemberFingerPrint.include(sym.name)
if (myMemberCache != null)
myMemberCache invalidate sym.name
}
/** Replace symbol `prev` (if defined in current class) by symbol `replacement`.
* If `prev` is not defined in current class, do nothing.
* @pre `prev` and `replacement` have the same name.
*/
def replace(prev: Symbol, replacement: Symbol)(implicit ctx: Context): Unit = {
require(!(this is Frozen))
unforcedDecls.openForMutations.replace(prev, replacement)
if (myMemberCache != null)
myMemberCache invalidate replacement.name
}
/** Delete symbol from current scope.
* Note: We require that this does not happen after the first time
* someone does a findMember on a subclass.
*/
def delete(sym: Symbol)(implicit ctx: Context) = {
require(!(this is Frozen))
info.decls.openForMutations.unlink(sym)
myMemberFingerPrint = FingerPrint.unknown
if (myMemberCache != null) myMemberCache invalidate sym.name
}
/** Make sure the type parameters of this class are `tparams`, reorder definitions
* in scope if necessary.
* @pre All type parameters in `tparams` are entered in class scope `info.decls`.
*/
def updateTypeParams(tparams: List[Symbol])(implicit ctx: Context): Unit =
if (!typeParams.corresponds(tparams)(_.name == _.name)) {
val decls = info.decls
val decls1 = newScope
for (tparam <- tparams) decls1.enter(decls.lookup(tparam.name))
for (sym <- decls) if (!typeParams.contains(sym)) decls1.enter(sym)
info = classInfo.derivedClassInfo(decls = decls1)
myTypeParams = null
}
/** All members of this class that have the given name.
* The elements of the returned pre-denotation all
* have existing symbols.
*/
final def membersNamed(name: Name)(implicit ctx: Context): PreDenotation = {
val privates = info.decls.denotsNamed(name, selectPrivate)
privates union nonPrivateMembersNamed(name).filterDisjoint(privates)
}
/** All non-private members of this class that have the given name.
* The elements of the returned pre-denotation all
* have existing symbols.
* @param inherited The method is called on a parent class from computeNPMembersNamed
*/
final def nonPrivateMembersNamed(name: Name, inherited: Boolean = false)(implicit ctx: Context): PreDenotation = {
Stats.record("nonPrivateMembersNamed")
if (Config.cacheMembersNamed) {
var denots: PreDenotation = memberCache lookup name
if (denots == null) {
denots = computeNPMembersNamed(name, inherited)
if (isFullyCompleted) memberCache.enter(name, denots)
} else if (Config.checkCacheMembersNamed) {
val denots1 = computeNPMembersNamed(name, inherited)
assert(denots.exists == denots1.exists, s"cache inconsistency: cached: $denots, computed $denots1, name = $name, owner = $this")
}
denots
} else computeNPMembersNamed(name, inherited)
}
private[core] def computeNPMembersNamed(name: Name, inherited: Boolean)(implicit ctx: Context): PreDenotation = /*>|>*/ Stats.track("computeNPMembersNamed") /*<|<*/ {
if (!inherited ||
!Config.useFingerPrints ||
(memberFingerPrint contains name)) {
Stats.record("computeNPMembersNamed after fingerprint")
ensureCompleted()
val ownDenots = info.decls.denotsNamed(name, selectNonPrivate)
if (debugTrace) // DEBUG
println(s"$this.member($name), ownDenots = $ownDenots")
def collect(denots: PreDenotation, parents: List[TypeRef]): PreDenotation = parents match {
case p :: ps =>
val denots1 = collect(denots, ps)
p.symbol.denot match {
case parentd: ClassDenotation =>
denots1 union
parentd.nonPrivateMembersNamed(name, inherited = true)
.mapInherited(ownDenots, denots1, thisType)
case _ =>
denots1
}
case nil =>
denots
}
if (name.isConstructorName) ownDenots
else collect(ownDenots, classParents)
} else NoDenotation
}
override final def findMember(name: Name, pre: Type, excluded: FlagSet)(implicit ctx: Context): Denotation = {
val raw = if (excluded is Private) nonPrivateMembersNamed(name) else membersNamed(name)
raw.filterExcluded(excluded).asSeenFrom(pre).toDenot(pre)
}
private[this] var baseTypeRefCache: java.util.HashMap[CachedType, Type] = null
private[this] var baseTypeRefValid: RunId = NoRunId
/** Compute tp.baseTypeRef(this) */
final def baseTypeRefOf(tp: Type)(implicit ctx: Context): Type = {
def foldGlb(bt: Type, ps: List[Type]): Type = ps match {
case p :: ps1 => foldGlb(bt & baseTypeRefOf(p), ps1)
case _ => bt
}
def inCache(tp: Type) = baseTypeRefCache.containsKey(tp)
/** We cannot cache:
* - type variables which are uninstantiated or whose instances can
* change, depending on typerstate.
* - types where the underlying type is an ErasedValueType, because
* this underlying type will change after ElimErasedValueType,
* and this changes subtyping relations. As a shortcut, we do not
* cache ErasedValueType at all.
*/
def isCachable(tp: Type): Boolean = tp match {
case _: TypeErasure.ErasedValueType => false
case tp: TypeVar => tp.inst.exists && inCache(tp.inst)
case tp: TypeProxy => inCache(tp.underlying)
case tp: AndOrType => inCache(tp.tp1) && inCache(tp.tp2)
case _ => true
}
def computeBaseTypeRefOf(tp: Type): Type = {
Stats.record("computeBaseTypeOf")
if (symbol.isStatic && tp.derivesFrom(symbol))
symbol.typeRef
else tp match {
case tp: TypeRef =>
val subcls = tp.symbol
if (subcls eq symbol)
tp
else subcls.denot match {
case cdenot: ClassDenotation =>
if (cdenot.superClassBits contains symbol.superId) foldGlb(NoType, tp.parents)
else NoType
case _ =>
baseTypeRefOf(tp.underlying)
}
case tp: TypeProxy =>
baseTypeRefOf(tp.underlying)
case AndType(tp1, tp2) =>
baseTypeRefOf(tp1) & baseTypeRefOf(tp2)
case OrType(tp1, tp2) =>
baseTypeRefOf(tp1) | baseTypeRefOf(tp2)
case JavaArrayType(_) if symbol == defn.ObjectClass =>
this.typeRef
case _ =>
NoType
}
}
/*>|>*/ ctx.debugTraceIndented(s"$tp.baseTypeRef($this)") /*<|<*/ {
tp match {
case tp: CachedType =>
checkBasesUpToDate()
var basetp = baseTypeRefCache get tp
if (basetp == null) {
baseTypeRefCache.put(tp, NoPrefix)
basetp = computeBaseTypeRefOf(tp)
if (isCachable(tp)) baseTypeRefCache.put(tp, basetp)
else baseTypeRefCache.remove(tp)
} else if (basetp == NoPrefix) {
baseTypeRefCache.put(tp, null)
throw CyclicReference(this)
}
basetp
case _ =>
computeBaseTypeRefOf(tp)
}
}
}
private[this] var memberNamesCache: SimpleMap[NameFilter, Set[Name]] = SimpleMap.Empty
def memberNames(keepOnly: NameFilter)(implicit ctx: Context): Set[Name] = {
def computeMemberNames: Set[Name] = {
var names = Set[Name]()
def maybeAdd(name: Name) = if (keepOnly(thisType, name)) names += name
for (p <- classParents)
for (name <- p.memberNames(keepOnly, thisType)) maybeAdd(name)
val ownSyms =
if (keepOnly == implicitFilter)
if (this is Package) Iterator.empty
else info.decls.iterator filter (_ is Implicit)
else info.decls.iterator
for (sym <- ownSyms) maybeAdd(sym.name)
names
}
if ((this is PackageClass) || !Config.cacheMemberNames)
computeMemberNames // don't cache package member names; they might change
else {
val cached = memberNamesCache(keepOnly)
if (cached != null) cached
else {
val names = computeMemberNames
if (isFullyCompleted) {
setFlag(Frozen)
memberNamesCache = memberNamesCache.updated(keepOnly, names)
}
names
}
}
}
private[this] var fullNameCache: SimpleMap[String, Name] = SimpleMap.Empty
override final def fullNameSeparated(separator: String)(implicit ctx: Context): Name = {
val cached = fullNameCache(separator)
if (cached != null) cached
else {
val fn = super.fullNameSeparated(separator)
fullNameCache = fullNameCache.updated(separator, fn)
fn
}
}
// to avoid overloading ambiguities
override def fullName(implicit ctx: Context): Name = super.fullName
override def primaryConstructor(implicit ctx: Context): Symbol = {
def constrNamed(cname: TermName) = info.decls.denotsNamed(cname).last.symbol
// denotsNamed returns Symbols in reverse order of occurrence
if (this.is(ImplClass)) constrNamed(nme.TRAIT_CONSTRUCTOR) // ignore normal constructor
else
constrNamed(nme.CONSTRUCTOR).orElse(constrNamed(nme.TRAIT_CONSTRUCTOR))
}
/** The parameter accessors of this class. Term and type accessors,
* getters and setters are all returned int his list
*/
def paramAccessors(implicit ctx: Context): List[Symbol] =
unforcedDecls.filter(_ is ParamAccessor).toList
/** If this class has the same `decls` scope reference in `phase` and
* `phase.next`, install a new denotation with a cloned scope in `phase.next`.
*/
def ensureFreshScopeAfter(phase: DenotTransformer)(implicit ctx: Context): Unit =
if (ctx.phaseId != phase.next.id) ensureFreshScopeAfter(phase)(ctx.withPhase(phase.next))
else {
val prevCtx = ctx.withPhase(phase)
val ClassInfo(pre, _, ps, decls, selfInfo) = classInfo
if (classInfo(prevCtx).decls eq decls)
copySymDenotation(info = ClassInfo(pre, classSymbol, ps, decls.cloneScope, selfInfo))
.installAfter(phase)
}
}
/** The denotation of a package class.
* It overrides ClassDenotation to take account of package objects when looking for members
*/
class PackageClassDenotation private[SymDenotations] (
symbol: Symbol,
ownerIfExists: Symbol,
name: Name,
initFlags: FlagSet,
initInfo: Type,
initPrivateWithin: Symbol,
initRunId: RunId)
extends ClassDenotation(symbol, ownerIfExists, name, initFlags, initInfo, initPrivateWithin, initRunId) {
private[this] var packageObjCache: SymDenotation = _
private[this] var packageObjRunId: RunId = NoRunId
/** The package object in this class, of one exists */
def packageObj(implicit ctx: Context): SymDenotation = {
if (packageObjRunId != ctx.runId) {
packageObjRunId = ctx.runId
packageObjCache = NoDenotation // break cycle in case we are looking for package object itself
packageObjCache = findMember(nme.PACKAGE, thisType, EmptyFlags).asSymDenotation
}
packageObjCache
}
/** Look first for members in package; if none are found look in package object */
override def computeNPMembersNamed(name: Name, inherited: Boolean)(implicit ctx: Context): PreDenotation = {
val denots = super.computeNPMembersNamed(name, inherited)
if (denots.exists) denots
else packageObj.moduleClass.denot match {
case pcls: ClassDenotation => pcls.computeNPMembersNamed(name, inherited)
case _ => denots
}
}
/** The union of the member names of the package and the package object */
override def memberNames(keepOnly: NameFilter)(implicit ctx: Context): Set[Name] = {
val ownNames = super.memberNames(keepOnly)
packageObj.moduleClass.denot match {
case pcls: ClassDenotation => ownNames union pcls.memberNames(keepOnly)
case _ => ownNames
}
}
}
class NoDenotation extends SymDenotation(
NoSymbol, NoSymbol, "<none>".toTermName, Permanent, NoType) {
override def exists = false
override def isTerm = false
override def isType = false
override def owner: Symbol = throw new AssertionError("NoDenotation.owner")
override def computeAsSeenFrom(pre: Type)(implicit ctx: Context): SingleDenotation = this
validFor = Period.allInRun(NoRunId) // will be brought forward automatically
}
@sharable val NoDenotation = new NoDenotation
@sharable val NotDefinedHereDenotation = new NoDenotation
// ---- Completion --------------------------------------------------------
/** Instances of LazyType are carried by uncompleted symbols.
* Note: LazyTypes double up as (constant) functions from Symbol and
* from (TermSymbol, ClassSymbol) to LazyType. That way lazy types can be
* directly passed to symbol creation methods in Symbols that demand instances
* of these function types.
*/
abstract class LazyType extends UncachedGroundType
with (Symbol => LazyType)
with ((TermSymbol, ClassSymbol) => LazyType) { self =>
/** Sets all missing fields of given denotation */
def complete(denot: SymDenotation)(implicit ctx: Context): Unit
def apply(sym: Symbol) = this
def apply(module: TermSymbol, modcls: ClassSymbol) = this
private var myDecls: Scope = EmptyScope
private var mySourceModuleFn: Context => Symbol = NoSymbolFn
private var myModuleClassFn: Context => Symbol = NoSymbolFn
/** A proxy to this lazy type that keeps the complete operation
* but provides fresh slots for scope/sourceModule/moduleClass
*/
def proxy: LazyType = new LazyType {
override def complete(denot: SymDenotation)(implicit ctx: Context) = self.complete(denot)
}
def decls: Scope = myDecls
def sourceModule(implicit ctx: Context): Symbol = mySourceModuleFn(ctx)
def moduleClass(implicit ctx: Context): Symbol = myModuleClassFn(ctx)
def withDecls(decls: Scope): this.type = { myDecls = decls; this }
def withSourceModule(sourceModuleFn: Context => Symbol): this.type = { mySourceModuleFn = sourceModuleFn; this }
def withModuleClass(moduleClassFn: Context => Symbol): this.type = { myModuleClassFn = moduleClassFn; this }
}
/** A subclass of LazyTypes where type parameters can be completed independently of
* the info.
*/
abstract class TypeParamsCompleter extends LazyType {
/** The type parameters computed by the completer before completion has finished */
def completerTypeParams(sym: Symbol): List[TypeSymbol]
}
val NoSymbolFn = (ctx: Context) => NoSymbol
/** A missing completer */
@sharable class NoCompleter extends LazyType {
def complete(denot: SymDenotation)(implicit ctx: Context): Unit = unsupported("complete")
}
object NoCompleter extends NoCompleter
/** A lazy type for modules that points to the module class.
* Needed so that `moduleClass` works before completion.
* Completion of modules is always completion of the underlying
* module class, followed by copying the relevant fields to the module.
*/
class ModuleCompleter(_moduleClass: ClassSymbol) extends LazyType {
override def moduleClass(implicit ctx: Context) = _moduleClass
def complete(denot: SymDenotation)(implicit ctx: Context): Unit = {
val from = moduleClass.denot.asClass
denot.setFlag(from.flags.toTermFlags & RetainedModuleValFlags)
denot.annotations = from.annotations filter (_.appliesToModule)
// !!! ^^^ needs to be revised later. The problem is that annotations might
// only apply to the module but not to the module class. The right solution
// is to have the module class completer set the annotations of both the
// class and the module.
denot.info = moduleClass.typeRef
denot.privateWithin = from.privateWithin
}
}
/** A completer for missing references */
class StubInfo() extends LazyType {
def initializeToDefaults(denot: SymDenotation)(implicit ctx: Context) = {
denot.info = denot match {
case denot: ClassDenotation =>
ClassInfo(denot.owner.thisType, denot.classSymbol, Nil, EmptyScope)
case _ =>
ErrorType
}
denot.privateWithin = NoSymbol
}
def complete(denot: SymDenotation)(implicit ctx: Context): Unit = {
val sym = denot.symbol
val file = sym.associatedFile
val (location, src) =
if (file != null) (s" in $file", file.toString)
else ("", "the signature")
val name = ctx.fresh.setSetting(ctx.settings.debugNames, true).nameString(denot.name)
ctx.error(
s"""|bad symbolic reference. A signature$location
|refers to $name in ${denot.owner.showKind} ${denot.owner.showFullName} which is not available.
|It may be completely missing from the current classpath, or the version on
|the classpath might be incompatible with the version used when compiling $src.""".stripMargin)
if (ctx.debug) throw new Error()
initializeToDefaults(denot)
}
}
// ---- Fingerprints -----------------------------------------------------
/** A fingerprint is a bitset that acts as a bloom filter for sets
* of names.
*/
class FingerPrint(val bits: Array[Long]) extends AnyVal {
import FingerPrint._
/** Include some bits of name's hashcode in set */
def include(name: Name): Unit = {
val hash = name.hashCode & Mask
bits(hash >> WordSizeLog) |= (1L << hash)
}
/** Include all bits of `that` fingerprint in set */
def include(that: FingerPrint): Unit =
for (i <- 0 until NumWords) bits(i) |= that.bits(i)
/** Does set contain hash bits of given name? */
def contains(name: Name): Boolean = {
val hash = name.hashCode & Mask
(bits(hash >> WordSizeLog) & (1L << hash)) != 0
}
}
object FingerPrint {
def apply() = new FingerPrint(new Array[Long](NumWords))
val unknown = new FingerPrint(null)
private final val WordSizeLog = 6
private final val NumWords = 32
private final val NumBits = NumWords << WordSizeLog
private final val Mask = NumBits - 1
}
private val AccessorOrLabel = Accessor | Label
@sharable private var indent = 0 // for completions printing
}
| densh/dotty | src/dotty/tools/dotc/core/SymDenotations.scala | Scala | bsd-3-clause | 80,047 |
package avrohugger
package test
package specific
import avrohugger._
import avrohugger.format.SpecificRecord
import org.specs2._
class SpecificStringToFileSpec extends Specification {
def is = s2"""
SpecificRecord Generator stringToFiles method should
correctly generate from a protocol with messages $e1
correctly generate a simple case class definition in a package $e2
correctly generate a simple case class definition in the default package $e3
correctly generate a nested case class definition from a schem $e4
correctly generate a nested case class from IDL $e5
correctly generate a recursive case class from IDL $e6
correctly generate enums from schema $e7
correctly generate enums from protocol $e8
correctly generate enums from IDL $e9
correctly generate nested enums $e10
correctly generate bytes from schema $e11
correctly generate bytes from protocol $e12
correctly generate bytes from IDL $e13
correctly generate records depending on others defined in a different- and same-namespaced AVDL and AVSC $e14
correctly generate an empty case class definition $e15
correctly generate default values $e16
correctly generate a protocol with no ADT when asked $e21
"""
// correctly generate logical types from IDL $e22
// """
// tests common to fileToX and stringToX
def e1 = {
val inputString = util.Util.readFile("avrohugger-core/src/test/avro/mail.avpr")
val gen = new Generator(SpecificRecord)
val outDir = gen.defaultOutputDir + "/specific/"
gen.stringToFile(inputString, outDir)
val sourceTrait = util.Util.readFile(s"$outDir/example/proto/Mail.scala")
val sourceRecord = util.Util.readFile(s"$outDir/example/proto/Message.scala")
sourceTrait === util.Util.readFile("avrohugger-core/src/test/expected/specific/example/proto/Mail.scala")
sourceRecord === util.Util.readFile("avrohugger-core/src/test/expected/specific/example/proto/Message.scala")
}
def e2 = {
val inputString = util.Util.readFile("avrohugger-core/src/test/avro/user.avsc")
val gen = new Generator(SpecificRecord)
val outDir = gen.defaultOutputDir + "/specific/"
gen.stringToFile(inputString, outDir)
val source = util.Util.readFile("target/generated-sources/specific/example/User.scala")
source === util.Util.readFile("avrohugger-core/src/test/expected/specific/example/User.scala")
}
def e3 = {
val inputString = util.Util.readFile("avrohugger-core/src/test/avro/AvroTypeProviderTestNoNamespace.avsc")
val gen = new Generator(SpecificRecord)
val outDir = gen.defaultOutputDir + "/specific/"
gen.stringToFile(inputString, outDir)
val source = util.Util.readFile("target/generated-sources/specific/AvroTypeProviderTestNoNamespace.scala")
source === util.Util.readFile("avrohugger-core/src/test/expected/specific/AvroTypeProviderTestNoNamespace.scala")
}
def e4 = {
val inputString = util.Util.readFile("avrohugger-core/src/test/avro/nested.avsc")
val gen = new Generator(SpecificRecord)
val outDir = gen.defaultOutputDir + "/specific/"
gen.stringToFile(inputString, outDir)
val source0 = util.Util.readFile("target/generated-sources/specific/example/Level0.scala")
val source1 = util.Util.readFile("target/generated-sources/specific/example/Level1.scala")
val source2 = util.Util.readFile("target/generated-sources/specific/example/Level2.scala")
source0 === util.Util.readFile("avrohugger-core/src/test/expected/specific/example/Level0.scala")
source1 === util.Util.readFile("avrohugger-core/src/test/expected/specific/example/Level1.scala")
source2 === util.Util.readFile("avrohugger-core/src/test/expected/specific/example/Level2.scala")
}
def e5 = {
val inputString = util.Util.readFile("avrohugger-core/src/test/avro/nested.avdl")
val myAvroScalaCustomTypes = SpecificRecord.defaultTypes.copy(protocol = types.ScalaADT)
val gen = new Generator(format = SpecificRecord, avroScalaCustomTypes = Some(myAvroScalaCustomTypes))
val outDir = gen.defaultOutputDir + "/specific/"
gen.stringToFile(inputString, outDir)
val source = util.Util.readFile("target/generated-sources/specific/example/idl/NestedProtocol.scala")
source === util.Util.readFile("avrohugger-core/src/test/expected/specific/example/idl/NestedProtocol.scala")
}
def e6 = {
val inputString = util.Util.readFile("avrohugger-core/src/test/avro/recursive.avdl")
val myAvroScalaCustomTypes = SpecificRecord.defaultTypes.copy(protocol = types.ScalaADT)
val gen = new Generator(format = SpecificRecord, avroScalaCustomTypes = Some(myAvroScalaCustomTypes))
val outDir = gen.defaultOutputDir + "/specific/"
gen.stringToFile(inputString, outDir)
val source = util.Util.readFile("target/generated-sources/specific/example/idl/Recursive.scala")
source === util.Util.readFile("avrohugger-core/src/test/expected/specific/example/idl/Recursive.scala")
}
def e7 = {
val inputString = util.Util.readFile("avrohugger-core/src/test/avro/enums.avsc")
val gen = new Generator(SpecificRecord)
val outDir = gen.defaultOutputDir + "/specific/"
gen.stringToFile(inputString, outDir)
val source = util.Util.readFile("target/generated-sources/specific/example/Suit.java")
source === util.Util.readFile("avrohugger-core/src/test/expected/specific/example/Suit.java")
}
def e8 = {
val inputString = util.Util.readFile("avrohugger-core/src/test/avro/enums.avpr")
val gen = new Generator(SpecificRecord)
val outDir = gen.defaultOutputDir + "/specific/"
gen.stringToFile(inputString, outDir)
val sourceEnum = util.Util.readFile("target/generated-sources/specific/example/proto/Suit.java")
val sourceRecord = util.Util.readFile("target/generated-sources/specific/example/proto/Card.scala")
sourceEnum === util.Util.readFile("avrohugger-core/src/test/expected/specific/example/proto/Suit.java")
sourceRecord === util.Util.readFile("avrohugger-core/src/test/expected/specific/example/proto/Card.scala")
}
def e9 = {
val inputString = util.Util.readFile("avrohugger-core/src/test/avro/enums.avdl")
val gen = new Generator(SpecificRecord)
val outDir = gen.defaultOutputDir + "/specific/"
gen.stringToFile(inputString, outDir)
val sourceEnum = util.Util.readFile("target/generated-sources/specific/example/idl/Suit.java")
val sourceRecord = util.Util.readFile("target/generated-sources/specific/example/idl/Card.scala")
sourceEnum === util.Util.readFile("avrohugger-core/src/test/expected/specific/example/idl/Suit.java")
sourceRecord === util.Util.readFile("avrohugger-core/src/test/expected/specific/example/idl/Card.scala")
}
def e10 = {
val inputString = util.Util.readFile("avrohugger-core/src/test/avro/enums_nested.avsc")
val gen = new Generator(SpecificRecord)
val outDir = gen.defaultOutputDir + "/specific/"
gen.stringToFile(inputString, outDir)
val sourceEnum = util.Util.readFile("target/generated-sources/specific/example/Direction.java")
val sourceRecord = util.Util.readFile("target/generated-sources/specific/example/Compass.scala")
sourceEnum === util.Util.readFile("avrohugger-core/src/test/expected/specific/example/Direction.java")
sourceRecord === util.Util.readFile("avrohugger-core/src/test/expected/specific/example/Compass.scala")
}
def e11 = {
val inputString = util.Util.readFile("avrohugger-core/src/test/avro/bytes.avsc")
val gen = new Generator(SpecificRecord)
val outDir = gen.defaultOutputDir + "/specific/"
gen.stringToFile(inputString, outDir)
val source = util.Util.readFile("target/generated-sources/specific/example/BinarySc.scala")
source === util.Util.readFile("avrohugger-core/src/test/expected/specific/example/BinarySc.scala")
}
def e12 = {
val inputString = util.Util.readFile("avrohugger-core/src/test/avro/bytes.avpr")
val gen = new Generator(SpecificRecord)
val outDir = gen.defaultOutputDir + "/specific/"
gen.stringToFile(inputString, outDir)
val source = util.Util.readFile("target/generated-sources/specific/example/proto/BinaryPr.scala")
source === util.Util.readFile("avrohugger-core/src/test/expected/specific/example/proto/BinaryPr.scala")
}
def e13 = {
val inputString = util.Util.readFile("avrohugger-core/src/test/avro/bytes.avdl")
val gen = new Generator(SpecificRecord)
val outDir = gen.defaultOutputDir + "/specific/"
gen.stringToFile(inputString, outDir)
val source = util.Util.readFile("target/generated-sources/specific/example/idl/BinaryIdl.scala")
source === util.Util.readFile("avrohugger-core/src/test/expected/specific/example/idl/BinaryIdl.scala")
}
def e14 = {
val inputString = util.Util.readFile("avrohugger-core/src/test/avro/import.avdl")
val gen = new Generator(SpecificRecord)
val outDir = gen.defaultOutputDir + "/specific/"
gen.stringToFile(inputString, outDir) must throwA(new java.lang.RuntimeException("Imports not supported in String IDLs, only avdl files."))
}
def e15 = {
val inputString = util.Util.readFile("avrohugger-core/src/test/avro/AvroTypeProviderTestEmptyRecord.avdl")
val myAvroScalaCustomTypes = SpecificRecord.defaultTypes.copy(protocol = types.ScalaADT)
val gen = new Generator(format = SpecificRecord, avroScalaCustomTypes = Some(myAvroScalaCustomTypes))
val outDir = gen.defaultOutputDir + "/specific/"
gen.stringToFile(inputString, outDir)
val source = util.Util.readFile("target/generated-sources/specific/test/Calculator.scala")
source === util.Util.readFile("avrohugger-core/src/test/expected/specific/test/Calculator.scala")
}
def e16 = {
val inputString = util.Util.readFile("avrohugger-core/src/test/avro/defaults.avdl")
val gen = new Generator(SpecificRecord)
val outDir = gen.defaultOutputDir + "/specific/"
gen.stringToFile(inputString, outDir)
val sourceRecord = util.Util.readFile("target/generated-sources/specific/example/idl/Defaults.scala")
val sourceEnum = util.Util.readFile("target/generated-sources/specific/example/idl/DefaultEnum.java")
sourceRecord === util.Util.readFile("avrohugger-core/src/test/expected/specific/example/idl/Defaults.scala")
sourceEnum === util.Util.readFile("avrohugger-core/src/test/expected/specific/example/idl/DefaultEnum.java")
}
def e21 = {
val inputString = util.Util.readFile("avrohugger-core/src/test/avro/AvroTypeProviderTestProtocol.avdl")
val gen = new Generator(format = SpecificRecord)
val outDir = gen.defaultOutputDir + "/specific/"
gen.stringToFile(inputString, outDir)
val source = util.Util.readFile("target/generated-sources/specific/test/Joystick.scala")
source === util.Util.readFile("avrohugger-core/src/test/expected/specific/test/Joystick.scala")
}
// def e22 = {
// val inputString = util.Util.readFile("avrohugger-core/src/test/avro/logical.avdl")
// val gen = new Generator(SpecificRecord)
// val outDir = gen.defaultOutputDir + "/specific/"
// gen.stringToFile(inputString, outDir)
// val source = util.Util.readFile("target/generated-sources/specific/example/idl/LogicalIdl.scala")
// source === util.Util.readFile("avrohugger-core/src/test/expected/specific/example/idl/LogicalIdl.scala")
// }
}
| julianpeeters/avrohugger | avrohugger-core/src/test/scala/specific/SpecificStringToFileSpec.scala | Scala | apache-2.0 | 11,492 |
package com.twitter.finagle.mysql.protocol
import com.twitter.finagle.mysql.protocol.Capability._
import java.security.MessageDigest
/**
* Initial Result received from server during handshaking.
*/
case class ServersGreeting(
protocol: Byte,
version: String,
threadId: Int,
salt: Array[Byte], // 20 bytes from 2 different fields
serverCap: Capability,
charset: Short,
status: Short
) extends Result
object ServersGreeting {
def decode(packet: Packet): ServersGreeting = {
val br = BufferReader(packet.body)
val protocol = br.readByte()
val version = br.readNullTerminatedString()
val threadId = br.readInt()
val salt1 = br.take(8)
br.skip(1) // 1 filler byte always 0x00
val serverCap = Capability(br.readUnsignedShort())
val charset = br.readUnsignedByte()
val status = br.readShort()
br.skip(13)
val salt2 = br.take(12)
ServersGreeting(
protocol,
version,
threadId,
Array.concat(salt1, salt2),
serverCap,
charset,
status
)
}
}
/**
* Reply to ServerGreeting sent during handshaking phase.
*/
case class LoginRequest(
username: String,
password: String,
database: Option[String],
clientCap: Capability,
salt: Array[Byte],
serverCap: Capability,
charset: Short = Charset.Utf8_general_ci,
maxPacket: Int = 0x10000000
) extends Request(seq = 1) {
private[this] val fixedBodySize = 34
private[this] val dbNameSize = database map { _.size+1 } getOrElse(0)
private[this] val dataSize = username.size + hashPassword.size + dbNameSize + fixedBodySize
lazy val hashPassword = encryptPassword(password, salt)
override val data = {
val bw = BufferWriter(new Array[Byte](dataSize))
val capability = if (dbNameSize == 0) clientCap - ConnectWithDB else clientCap
bw.writeInt(capability.mask)
bw.writeInt(maxPacket)
bw.writeByte(charset)
bw.fill(23, 0.toByte) // 23 reserved bytes - zeroed out
bw.writeNullTerminatedString(username)
bw.writeLengthCodedBytes(hashPassword)
if (clientCap.has(ConnectWithDB) && serverCap.has(ConnectWithDB))
bw.writeNullTerminatedString(database.get)
bw.toChannelBuffer
}
private[this] def encryptPassword(password: String, salt: Array[Byte]) = {
val md = MessageDigest.getInstance("SHA-1")
val hash1 = md.digest(password.getBytes(Charset.defaultCharset.displayName))
md.reset()
val hash2 = md.digest(hash1)
md.reset()
md.update(salt)
md.update(hash2)
val digest = md.digest()
(0 until digest.length) foreach { i =>
digest(i) = (digest(i) ^ hash1(i)).toByte
}
digest
}
}
| foursquare/finagle | finagle-mysql/src/main/scala/com/twitter/finagle/mysql/protocol/Handshake.scala | Scala | apache-2.0 | 2,644 |
/**
* Copyright (C) 2009-2015 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.persistence.chronicle
import java.io.File
import scala.language.postfixOps
import org.scalatest.Matchers
import org.scalatest.WordSpecLike
import com.typesafe.config.ConfigFactory
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
class SettingsSpec extends WordSpecLike with Matchers with Settings {
override def chronicleConfig = ConfigFactory.load("reference.conf").getConfig(chroniclePath)
"configuration" must {
"contain default settings" in {
val settings = chronicleSettings
import settings._
val userDir = System.getProperty("user.dir")
val K = 1024
val M = K * K
extension.clusterEnable should be(false)
extension.exposeReplicationStream should be(false)
extension.exposeNotificationStream should be(false)
extension.replicator.name should be("chronicle-replicator")
extension.replicator.role should be("chronicle-persistence")
serializer.identifier should be(888)
journal.provider should be(classOf[ChronicleSyncJournal].getName)
journal.folder should be(new File(s"${userDir}/store/journal"))
journal.persistSender should be(false)
journal.namingMapper should be(classOf[DirectNamingMapper].getName)
import journal.chronicleQueue
chronicleQueue.synchronous should be(false)
chronicleQueue.useCheckedExcerpt should be(false)
chronicleQueue.messageCapacity should be(256)
chronicleQueue.indexBlockSize should be(4 * M)
chronicleQueue.dataBlockSize should be(16 * M)
chronicleQueue.cacheLineSize should be(64)
snapshotStore.provider should be(classOf[ChronicleSnapshotStore].getName)
snapshotStore.folder should be(new File(s"${userDir}/store/snapshot"))
snapshotStore.limit should be(2)
import snapshotStore.chronicleMap
chronicleMap.count should be(16 * K)
chronicleMap.averageKeySize should be(128)
chronicleMap.averageValueSize should be(64 * K)
chronicleMap.maxChunksPerEntry should be(64)
rotationManager.rotateEnable should be(true)
rotationManager.messageCount should be(16 * K)
}
}
}
| carrot-garden/akka-persistence-chronicle | src/test/scala/akka/persistence/chronicle/ConfigurationSuite.scala | Scala | apache-2.0 | 2,236 |
/*
* Copyright 2015 Heiko Seeberger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.heikoseeberger.constructr.coordination.etcd
import akka.Done
import akka.actor.{ ActorSystem, AddressFromURIString }
import akka.testkit.{ TestDuration, TestProbe }
import com.typesafe.config.ConfigFactory
import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpec }
import scala.concurrent.duration.{ Duration, DurationInt, FiniteDuration }
import scala.concurrent.{ Await, Awaitable }
import scala.util.Random
object EtcdCoordinationSpec {
private val coordinationHost = {
val dockerHostPattern = """tcp://(\\S+):\\d{1,5}""".r
sys.env
.get("DOCKER_HOST")
.collect { case dockerHostPattern(address) => address }
.getOrElse("127.0.0.1")
}
}
class EtcdCoordinationSpec extends WordSpec with Matchers with BeforeAndAfterAll {
import EtcdCoordinationSpec._
private implicit val system = {
val config =
ConfigFactory
.parseString(s"constructr.coordination.host = $coordinationHost")
.withFallback(ConfigFactory.load())
ActorSystem("default", config)
}
private val address = AddressFromURIString("akka.tcp://default@a:2552")
private val address2 = AddressFromURIString("akka.tcp://default@b:2552")
"EtcdCoordination" should {
"correctly interact with etcd" in {
val coordination = new EtcdCoordination(randomString(), system)
resultOf(coordination.getNodes()) shouldBe 'empty
resultOf(coordination.lock(address, 10.seconds.dilated)) shouldBe true
resultOf(coordination.lock(address, 10.seconds.dilated)) shouldBe true
resultOf(coordination.lock(address2, 10.seconds.dilated)) shouldBe false
resultOf(coordination.addSelf(address, 10.seconds.dilated)) shouldBe Done
resultOf(coordination.getNodes()) shouldBe Set(address)
resultOf(coordination.refresh(address, 1.second.dilated)) shouldBe Done
resultOf(coordination.getNodes()) shouldBe Set(address)
val probe = TestProbe()
probe.within(5.seconds.dilated) { // 2 seconds should be enough, but who knows hows ...
probe.awaitAssert {
resultOf(coordination.getNodes()) shouldBe 'empty
}
}
}
}
override protected def afterAll() = {
Await.ready(system.terminate(), Duration.Inf)
super.afterAll()
}
private def resultOf[A](awaitable: Awaitable[A], max: FiniteDuration = 3.seconds.dilated) =
Await.result(awaitable, max)
private def randomString() = math.abs(Random.nextInt).toString
}
| hseeberger/constructr | coordination-etcd/src/test/scala/de/heikoseeberger/constructr/coordination/etcd/EtcdCoordinationSpec.scala | Scala | apache-2.0 | 3,050 |
package uk.me.arseni.search.queries
import scala.collection.mutable.ArrayBuffer
/**
* Created by Arseni on 3/26/2016.
*/
class MultipleQueriesQuery(queries: Seq[Query]) extends Query {
// http://stackoverflow.com/a/1264772/1628088
def mergeMap[A, B](ms: List[Map[A, B]])(f: (B, B) => B): Map[A, B] =
(Map[A, B]() /: (for (m <- ms; kv <- m) yield kv)) { (a, kv) =>
a + (if (a.contains(kv._1)) kv._1 -> f(a(kv._1), kv._2) else kv)
}
override def findMatchingDocuments(invertedTermIndex: Map[Int, ArrayBuffer[(Int, Double)]]): Map[Int, Double] = {
val allRes = queries.par.map(query => query.findMatchingDocuments(invertedTermIndex)).toList
mergeMap(allRes)((v1, v2) => v1 + v2)
}
override def execute(invertedTermIndex: Map[Int, ArrayBuffer[(Int, Double)]]
, documentIndex: Map[Int, Vector[Int]]): Array[(Int, Double)] = {
rankResults(findMatchingDocuments(invertedTermIndex))
}
}
| yarrseni/scala_search_engine | src/main/scala/uk/me/arseni/search/queries/MultipleQueriesQuery.scala | Scala | apache-2.0 | 944 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import java.io.File
import java.math.BigInteger
import java.sql.Timestamp
import scala.collection.mutable.ArrayBuffer
import com.google.common.io.Files
import org.apache.hadoop.fs.Path
import org.apache.parquet.hadoop.ParquetOutputFormat
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Literal
import org.apache.spark.sql.execution.datasources.{HadoopFsRelation, LogicalRelation, PartitionDirectory => Partition, PartitioningUtils, PartitionSpec}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
// The data where the partitioning key exists only in the directory structure.
case class ParquetData(intField: Int, stringField: String)
// The data that also includes the partitioning key
case class ParquetDataWithKey(intField: Int, pi: Int, stringField: String, ps: String)
class ParquetPartitionDiscoverySuite extends QueryTest with ParquetTest with SharedSQLContext {
import PartitioningUtils._
import testImplicits._
val defaultPartitionName = "__HIVE_DEFAULT_PARTITION__"
test("column type inference") {
def check(raw: String, literal: Literal): Unit = {
assert(inferPartitionColumnValue(raw, defaultPartitionName, true) === literal)
}
check("10", Literal.create(10, IntegerType))
check("1000000000000000", Literal.create(1000000000000000L, LongType))
check("1.5", Literal.create(1.5, DoubleType))
check("hello", Literal.create("hello", StringType))
check(defaultPartitionName, Literal.create(null, NullType))
}
test("parse invalid partitioned directories") {
// Invalid
var paths = Seq(
"hdfs://host:9000/invalidPath",
"hdfs://host:9000/path/a=10/b=20",
"hdfs://host:9000/path/a=10.5/b=hello")
var exception = intercept[AssertionError] {
parsePartitions(paths.map(new Path(_)), defaultPartitionName, true, Set.empty[Path])
}
assert(exception.getMessage().contains("Conflicting directory structures detected"))
// Valid
paths = Seq(
"hdfs://host:9000/path/_temporary",
"hdfs://host:9000/path/a=10/b=20",
"hdfs://host:9000/path/_temporary/path")
parsePartitions(
paths.map(new Path(_)),
defaultPartitionName,
true,
Set(new Path("hdfs://host:9000/path/")))
// Valid
paths = Seq(
"hdfs://host:9000/path/something=true/table/",
"hdfs://host:9000/path/something=true/table/_temporary",
"hdfs://host:9000/path/something=true/table/a=10/b=20",
"hdfs://host:9000/path/something=true/table/_temporary/path")
parsePartitions(
paths.map(new Path(_)),
defaultPartitionName,
true,
Set(new Path("hdfs://host:9000/path/something=true/table")))
// Valid
paths = Seq(
"hdfs://host:9000/path/table=true/",
"hdfs://host:9000/path/table=true/_temporary",
"hdfs://host:9000/path/table=true/a=10/b=20",
"hdfs://host:9000/path/table=true/_temporary/path")
parsePartitions(
paths.map(new Path(_)),
defaultPartitionName,
true,
Set(new Path("hdfs://host:9000/path/table=true")))
// Invalid
paths = Seq(
"hdfs://host:9000/path/_temporary",
"hdfs://host:9000/path/a=10/b=20",
"hdfs://host:9000/path/path1")
exception = intercept[AssertionError] {
parsePartitions(
paths.map(new Path(_)),
defaultPartitionName,
true,
Set(new Path("hdfs://host:9000/path/")))
}
assert(exception.getMessage().contains("Conflicting directory structures detected"))
// Invalid
// Conflicting directory structure:
// "hdfs://host:9000/tmp/tables/partitionedTable"
// "hdfs://host:9000/tmp/tables/nonPartitionedTable1"
// "hdfs://host:9000/tmp/tables/nonPartitionedTable2"
paths = Seq(
"hdfs://host:9000/tmp/tables/partitionedTable",
"hdfs://host:9000/tmp/tables/partitionedTable/p=1/",
"hdfs://host:9000/tmp/tables/nonPartitionedTable1",
"hdfs://host:9000/tmp/tables/nonPartitionedTable2")
exception = intercept[AssertionError] {
parsePartitions(
paths.map(new Path(_)),
defaultPartitionName,
true,
Set(new Path("hdfs://host:9000/tmp/tables/")))
}
assert(exception.getMessage().contains("Conflicting directory structures detected"))
}
test("parse partition") {
def check(path: String, expected: Option[PartitionValues]): Unit = {
val actual = parsePartition(new Path(path), defaultPartitionName, true, Set.empty[Path])._1
assert(expected === actual)
}
def checkThrows[T <: Throwable: Manifest](path: String, expected: String): Unit = {
val message = intercept[T] {
parsePartition(new Path(path), defaultPartitionName, true, Set.empty[Path])
}.getMessage
assert(message.contains(expected))
}
check("file://path/a=10", Some {
PartitionValues(
ArrayBuffer("a"),
ArrayBuffer(Literal.create(10, IntegerType)))
})
check("file://path/a=10/b=hello/c=1.5", Some {
PartitionValues(
ArrayBuffer("a", "b", "c"),
ArrayBuffer(
Literal.create(10, IntegerType),
Literal.create("hello", StringType),
Literal.create(1.5, DoubleType)))
})
check("file://path/a=10/b_hello/c=1.5", Some {
PartitionValues(
ArrayBuffer("c"),
ArrayBuffer(Literal.create(1.5, DoubleType)))
})
check("file:///", None)
check("file:///path/_temporary", None)
check("file:///path/_temporary/c=1.5", None)
check("file:///path/_temporary/path", None)
check("file://path/a=10/_temporary/c=1.5", None)
check("file://path/a=10/c=1.5/_temporary", None)
checkThrows[AssertionError]("file://path/=10", "Empty partition column name")
checkThrows[AssertionError]("file://path/a=", "Empty partition column value")
}
test("parse partition with base paths") {
// when the basePaths is the same as the path to a leaf directory
val partitionSpec1: Option[PartitionValues] = parsePartition(
path = new Path("file://path/a=10"),
defaultPartitionName = defaultPartitionName,
typeInference = true,
basePaths = Set(new Path("file://path/a=10")))._1
assert(partitionSpec1.isEmpty)
// when the basePaths is the path to a base directory of leaf directories
val partitionSpec2: Option[PartitionValues] = parsePartition(
path = new Path("file://path/a=10"),
defaultPartitionName = defaultPartitionName,
typeInference = true,
basePaths = Set(new Path("file://path")))._1
assert(partitionSpec2 ==
Option(PartitionValues(
ArrayBuffer("a"),
ArrayBuffer(Literal.create(10, IntegerType)))))
}
test("parse partitions") {
def check(
paths: Seq[String],
spec: PartitionSpec,
rootPaths: Set[Path] = Set.empty[Path]): Unit = {
val actualSpec =
parsePartitions(
paths.map(new Path(_)),
defaultPartitionName,
true,
rootPaths)
assert(actualSpec === spec)
}
check(Seq(
"hdfs://host:9000/path/a=10/b=hello"),
PartitionSpec(
StructType(Seq(
StructField("a", IntegerType),
StructField("b", StringType))),
Seq(Partition(InternalRow(10, UTF8String.fromString("hello")),
"hdfs://host:9000/path/a=10/b=hello"))))
check(Seq(
"hdfs://host:9000/path/a=10/b=20",
"hdfs://host:9000/path/a=10.5/b=hello"),
PartitionSpec(
StructType(Seq(
StructField("a", DoubleType),
StructField("b", StringType))),
Seq(
Partition(InternalRow(10, UTF8String.fromString("20")),
"hdfs://host:9000/path/a=10/b=20"),
Partition(InternalRow(10.5, UTF8String.fromString("hello")),
"hdfs://host:9000/path/a=10.5/b=hello"))))
check(Seq(
"hdfs://host:9000/path/_temporary",
"hdfs://host:9000/path/a=10/b=20",
"hdfs://host:9000/path/a=10.5/b=hello",
"hdfs://host:9000/path/a=10.5/_temporary",
"hdfs://host:9000/path/a=10.5/_TeMpOrArY",
"hdfs://host:9000/path/a=10.5/b=hello/_temporary",
"hdfs://host:9000/path/a=10.5/b=hello/_TEMPORARY",
"hdfs://host:9000/path/_temporary/path",
"hdfs://host:9000/path/a=11/_temporary/path",
"hdfs://host:9000/path/a=10.5/b=world/_temporary/path"),
PartitionSpec(
StructType(Seq(
StructField("a", DoubleType),
StructField("b", StringType))),
Seq(
Partition(InternalRow(10, UTF8String.fromString("20")),
"hdfs://host:9000/path/a=10/b=20"),
Partition(InternalRow(10.5, UTF8String.fromString("hello")),
"hdfs://host:9000/path/a=10.5/b=hello"))))
check(Seq(
s"hdfs://host:9000/path/a=10/b=20",
s"hdfs://host:9000/path/a=$defaultPartitionName/b=hello"),
PartitionSpec(
StructType(Seq(
StructField("a", IntegerType),
StructField("b", StringType))),
Seq(
Partition(InternalRow(10, UTF8String.fromString("20")),
s"hdfs://host:9000/path/a=10/b=20"),
Partition(InternalRow(null, UTF8String.fromString("hello")),
s"hdfs://host:9000/path/a=$defaultPartitionName/b=hello"))))
check(Seq(
s"hdfs://host:9000/path/a=10/b=$defaultPartitionName",
s"hdfs://host:9000/path/a=10.5/b=$defaultPartitionName"),
PartitionSpec(
StructType(Seq(
StructField("a", DoubleType),
StructField("b", StringType))),
Seq(
Partition(InternalRow(10, null), s"hdfs://host:9000/path/a=10/b=$defaultPartitionName"),
Partition(InternalRow(10.5, null),
s"hdfs://host:9000/path/a=10.5/b=$defaultPartitionName"))))
check(Seq(
s"hdfs://host:9000/path1",
s"hdfs://host:9000/path2"),
PartitionSpec.emptySpec)
}
test("parse partitions with type inference disabled") {
def check(paths: Seq[String], spec: PartitionSpec): Unit = {
val actualSpec =
parsePartitions(paths.map(new Path(_)), defaultPartitionName, false, Set.empty[Path])
assert(actualSpec === spec)
}
check(Seq(
"hdfs://host:9000/path/a=10/b=hello"),
PartitionSpec(
StructType(Seq(
StructField("a", StringType),
StructField("b", StringType))),
Seq(Partition(InternalRow(UTF8String.fromString("10"), UTF8String.fromString("hello")),
"hdfs://host:9000/path/a=10/b=hello"))))
check(Seq(
"hdfs://host:9000/path/a=10/b=20",
"hdfs://host:9000/path/a=10.5/b=hello"),
PartitionSpec(
StructType(Seq(
StructField("a", StringType),
StructField("b", StringType))),
Seq(
Partition(InternalRow(UTF8String.fromString("10"), UTF8String.fromString("20")),
"hdfs://host:9000/path/a=10/b=20"),
Partition(InternalRow(UTF8String.fromString("10.5"), UTF8String.fromString("hello")),
"hdfs://host:9000/path/a=10.5/b=hello"))))
check(Seq(
"hdfs://host:9000/path/_temporary",
"hdfs://host:9000/path/a=10/b=20",
"hdfs://host:9000/path/a=10.5/b=hello",
"hdfs://host:9000/path/a=10.5/_temporary",
"hdfs://host:9000/path/a=10.5/_TeMpOrArY",
"hdfs://host:9000/path/a=10.5/b=hello/_temporary",
"hdfs://host:9000/path/a=10.5/b=hello/_TEMPORARY",
"hdfs://host:9000/path/_temporary/path",
"hdfs://host:9000/path/a=11/_temporary/path",
"hdfs://host:9000/path/a=10.5/b=world/_temporary/path"),
PartitionSpec(
StructType(Seq(
StructField("a", StringType),
StructField("b", StringType))),
Seq(
Partition(InternalRow(UTF8String.fromString("10"), UTF8String.fromString("20")),
"hdfs://host:9000/path/a=10/b=20"),
Partition(InternalRow(UTF8String.fromString("10.5"), UTF8String.fromString("hello")),
"hdfs://host:9000/path/a=10.5/b=hello"))))
check(Seq(
s"hdfs://host:9000/path/a=10/b=20",
s"hdfs://host:9000/path/a=$defaultPartitionName/b=hello"),
PartitionSpec(
StructType(Seq(
StructField("a", StringType),
StructField("b", StringType))),
Seq(
Partition(InternalRow(UTF8String.fromString("10"), UTF8String.fromString("20")),
s"hdfs://host:9000/path/a=10/b=20"),
Partition(InternalRow(null, UTF8String.fromString("hello")),
s"hdfs://host:9000/path/a=$defaultPartitionName/b=hello"))))
check(Seq(
s"hdfs://host:9000/path/a=10/b=$defaultPartitionName",
s"hdfs://host:9000/path/a=10.5/b=$defaultPartitionName"),
PartitionSpec(
StructType(Seq(
StructField("a", StringType),
StructField("b", StringType))),
Seq(
Partition(InternalRow(UTF8String.fromString("10"), null),
s"hdfs://host:9000/path/a=10/b=$defaultPartitionName"),
Partition(InternalRow(UTF8String.fromString("10.5"), null),
s"hdfs://host:9000/path/a=10.5/b=$defaultPartitionName"))))
check(Seq(
s"hdfs://host:9000/path1",
s"hdfs://host:9000/path2"),
PartitionSpec.emptySpec)
}
test("read partitioned table - normal case") {
withTempDir { base =>
for {
pi <- Seq(1, 2)
ps <- Seq("foo", "bar")
} {
val dir = makePartitionDir(base, defaultPartitionName, "pi" -> pi, "ps" -> ps)
makeParquetFile(
(1 to 10).map(i => ParquetData(i, i.toString)),
dir)
// Introduce _temporary dir to test the robustness of the schema discovery process.
new File(dir.toString, "_temporary").mkdir()
}
// Introduce _temporary dir to the base dir the robustness of the schema discovery process.
new File(base.getCanonicalPath, "_temporary").mkdir()
spark.read.parquet(base.getCanonicalPath).createOrReplaceTempView("t")
withTempView("t") {
checkAnswer(
sql("SELECT * FROM t"),
for {
i <- 1 to 10
pi <- Seq(1, 2)
ps <- Seq("foo", "bar")
} yield Row(i, i.toString, pi, ps))
checkAnswer(
sql("SELECT intField, pi FROM t"),
for {
i <- 1 to 10
pi <- Seq(1, 2)
_ <- Seq("foo", "bar")
} yield Row(i, pi))
checkAnswer(
sql("SELECT * FROM t WHERE pi = 1"),
for {
i <- 1 to 10
ps <- Seq("foo", "bar")
} yield Row(i, i.toString, 1, ps))
checkAnswer(
sql("SELECT * FROM t WHERE ps = 'foo'"),
for {
i <- 1 to 10
pi <- Seq(1, 2)
} yield Row(i, i.toString, pi, "foo"))
}
}
}
test("read partitioned table using different path options") {
withTempDir { base =>
val pi = 1
val ps = "foo"
val path = makePartitionDir(base, defaultPartitionName, "pi" -> pi, "ps" -> ps)
makeParquetFile(
(1 to 10).map(i => ParquetData(i, i.toString)), path)
// when the input is the base path containing partitioning directories
val baseDf = spark.read.parquet(base.getCanonicalPath)
assert(baseDf.schema.map(_.name) === Seq("intField", "stringField", "pi", "ps"))
// when the input is a path to the leaf directory containing a parquet file
val partDf = spark.read.parquet(path.getCanonicalPath)
assert(partDf.schema.map(_.name) === Seq("intField", "stringField"))
path.listFiles().foreach { f =>
if (f.getName.toLowerCase().endsWith(".parquet")) {
// when the input is a path to a parquet file
val df = spark.read.parquet(f.getCanonicalPath)
assert(df.schema.map(_.name) === Seq("intField", "stringField"))
}
}
path.listFiles().foreach { f =>
if (f.getName.toLowerCase().endsWith(".parquet")) {
// when the input is a path to a parquet file but `basePath` is overridden to
// the base path containing partitioning directories
val df = spark
.read.option("basePath", base.getCanonicalPath)
.parquet(f.getCanonicalPath)
assert(df.schema.map(_.name) === Seq("intField", "stringField", "pi", "ps"))
}
}
}
}
test("read partitioned table - partition key included in Parquet file") {
withTempDir { base =>
for {
pi <- Seq(1, 2)
ps <- Seq("foo", "bar")
} {
makeParquetFile(
(1 to 10).map(i => ParquetDataWithKey(i, pi, i.toString, ps)),
makePartitionDir(base, defaultPartitionName, "pi" -> pi, "ps" -> ps))
}
spark.read.parquet(base.getCanonicalPath).createOrReplaceTempView("t")
withTempView("t") {
checkAnswer(
sql("SELECT * FROM t"),
for {
i <- 1 to 10
pi <- Seq(1, 2)
ps <- Seq("foo", "bar")
} yield Row(i, pi, i.toString, ps))
checkAnswer(
sql("SELECT intField, pi FROM t"),
for {
i <- 1 to 10
pi <- Seq(1, 2)
_ <- Seq("foo", "bar")
} yield Row(i, pi))
checkAnswer(
sql("SELECT * FROM t WHERE pi = 1"),
for {
i <- 1 to 10
ps <- Seq("foo", "bar")
} yield Row(i, 1, i.toString, ps))
checkAnswer(
sql("SELECT * FROM t WHERE ps = 'foo'"),
for {
i <- 1 to 10
pi <- Seq(1, 2)
} yield Row(i, pi, i.toString, "foo"))
}
}
}
test("read partitioned table - with nulls") {
withTempDir { base =>
for {
// Must be `Integer` rather than `Int` here. `null.asInstanceOf[Int]` results in a zero...
pi <- Seq(1, null.asInstanceOf[Integer])
ps <- Seq("foo", null.asInstanceOf[String])
} {
makeParquetFile(
(1 to 10).map(i => ParquetData(i, i.toString)),
makePartitionDir(base, defaultPartitionName, "pi" -> pi, "ps" -> ps))
}
val parquetRelation = spark.read.format("parquet").load(base.getCanonicalPath)
parquetRelation.createOrReplaceTempView("t")
withTempView("t") {
checkAnswer(
sql("SELECT * FROM t"),
for {
i <- 1 to 10
pi <- Seq(1, null.asInstanceOf[Integer])
ps <- Seq("foo", null.asInstanceOf[String])
} yield Row(i, i.toString, pi, ps))
checkAnswer(
sql("SELECT * FROM t WHERE pi IS NULL"),
for {
i <- 1 to 10
ps <- Seq("foo", null.asInstanceOf[String])
} yield Row(i, i.toString, null, ps))
checkAnswer(
sql("SELECT * FROM t WHERE ps IS NULL"),
for {
i <- 1 to 10
pi <- Seq(1, null.asInstanceOf[Integer])
} yield Row(i, i.toString, pi, null))
}
}
}
test("read partitioned table - with nulls and partition keys are included in Parquet file") {
withTempDir { base =>
for {
pi <- Seq(1, 2)
ps <- Seq("foo", null.asInstanceOf[String])
} {
makeParquetFile(
(1 to 10).map(i => ParquetDataWithKey(i, pi, i.toString, ps)),
makePartitionDir(base, defaultPartitionName, "pi" -> pi, "ps" -> ps))
}
val parquetRelation = spark.read.format("parquet").load(base.getCanonicalPath)
parquetRelation.createOrReplaceTempView("t")
withTempView("t") {
checkAnswer(
sql("SELECT * FROM t"),
for {
i <- 1 to 10
pi <- Seq(1, 2)
ps <- Seq("foo", null.asInstanceOf[String])
} yield Row(i, pi, i.toString, ps))
checkAnswer(
sql("SELECT * FROM t WHERE ps IS NULL"),
for {
i <- 1 to 10
pi <- Seq(1, 2)
} yield Row(i, pi, i.toString, null))
}
}
}
test("read partitioned table - merging compatible schemas") {
withTempDir { base =>
makeParquetFile(
(1 to 10).map(i => Tuple1(i)).toDF("intField"),
makePartitionDir(base, defaultPartitionName, "pi" -> 1))
makeParquetFile(
(1 to 10).map(i => (i, i.toString)).toDF("intField", "stringField"),
makePartitionDir(base, defaultPartitionName, "pi" -> 2))
spark
.read
.option("mergeSchema", "true")
.format("parquet")
.load(base.getCanonicalPath)
.createOrReplaceTempView("t")
withTempView("t") {
checkAnswer(
sql("SELECT * FROM t"),
(1 to 10).map(i => Row(i, null, 1)) ++ (1 to 10).map(i => Row(i, i.toString, 2)))
}
}
}
test("SPARK-7749 Non-partitioned table should have empty partition spec") {
withTempPath { dir =>
(1 to 10).map(i => (i, i.toString)).toDF("a", "b").write.parquet(dir.getCanonicalPath)
val queryExecution = spark.read.parquet(dir.getCanonicalPath).queryExecution
queryExecution.analyzed.collectFirst {
case LogicalRelation(relation: HadoopFsRelation, _, _) =>
assert(relation.partitionSpec === PartitionSpec.emptySpec)
}.getOrElse {
fail(s"Expecting a ParquetRelation2, but got:\\n$queryExecution")
}
}
}
test("SPARK-7847: Dynamic partition directory path escaping and unescaping") {
withTempPath { dir =>
val df = Seq("/", "[]", "?").zipWithIndex.map(_.swap).toDF("i", "s")
df.write.format("parquet").partitionBy("s").save(dir.getCanonicalPath)
checkAnswer(spark.read.parquet(dir.getCanonicalPath), df.collect())
}
}
test("Various partition value types") {
val row =
Row(
100.toByte,
40000.toShort,
Int.MaxValue,
Long.MaxValue,
1.5.toFloat,
4.5,
new java.math.BigDecimal(new BigInteger("212500"), 5),
new java.math.BigDecimal(2.125),
java.sql.Date.valueOf("2015-05-23"),
new Timestamp(0),
"This is a string, /[]?=:",
"This is not a partition column")
// BooleanType is not supported yet
val partitionColumnTypes =
Seq(
ByteType,
ShortType,
IntegerType,
LongType,
FloatType,
DoubleType,
DecimalType(10, 5),
DecimalType.SYSTEM_DEFAULT,
DateType,
TimestampType,
StringType)
val partitionColumns = partitionColumnTypes.zipWithIndex.map {
case (t, index) => StructField(s"p_$index", t)
}
val schema = StructType(partitionColumns :+ StructField(s"i", StringType))
val df = spark.createDataFrame(sparkContext.parallelize(row :: Nil), schema)
withTempPath { dir =>
df.write.format("parquet").partitionBy(partitionColumns.map(_.name): _*).save(dir.toString)
val fields = schema.map(f => Column(f.name).cast(f.dataType))
checkAnswer(spark.read.load(dir.toString).select(fields: _*), row)
}
}
test("SPARK-8037: Ignores files whose name starts with dot") {
withTempPath { dir =>
val df = (1 to 3).map(i => (i, i, i, i)).toDF("a", "b", "c", "d")
df.write
.format("parquet")
.partitionBy("b", "c", "d")
.save(dir.getCanonicalPath)
Files.touch(new File(s"${dir.getCanonicalPath}/b=1", ".DS_Store"))
Files.createParentDirs(new File(s"${dir.getCanonicalPath}/b=1/c=1/.foo/bar"))
checkAnswer(spark.read.format("parquet").load(dir.getCanonicalPath), df)
}
}
test("SPARK-11678: Partition discovery stops at the root path of the dataset") {
withTempPath { dir =>
val tablePath = new File(dir, "key=value")
val df = (1 to 3).map(i => (i, i, i, i)).toDF("a", "b", "c", "d")
df.write
.format("parquet")
.partitionBy("b", "c", "d")
.save(tablePath.getCanonicalPath)
Files.touch(new File(s"${tablePath.getCanonicalPath}/", "_SUCCESS"))
Files.createParentDirs(new File(s"${dir.getCanonicalPath}/b=1/c=1/.foo/bar"))
checkAnswer(spark.read.format("parquet").load(tablePath.getCanonicalPath), df)
}
withTempPath { dir =>
val path = new File(dir, "key=value")
val tablePath = new File(path, "table")
val df = (1 to 3).map(i => (i, i, i, i)).toDF("a", "b", "c", "d")
df.write
.format("parquet")
.partitionBy("b", "c", "d")
.save(tablePath.getCanonicalPath)
Files.touch(new File(s"${tablePath.getCanonicalPath}/", "_SUCCESS"))
Files.createParentDirs(new File(s"${dir.getCanonicalPath}/b=1/c=1/.foo/bar"))
checkAnswer(spark.read.format("parquet").load(tablePath.getCanonicalPath), df)
}
}
test("use basePath to specify the root dir of a partitioned table.") {
withTempPath { dir =>
val tablePath = new File(dir, "table")
val df = (1 to 3).map(i => (i, i, i, i)).toDF("a", "b", "c", "d")
df.write
.format("parquet")
.partitionBy("b", "c", "d")
.save(tablePath.getCanonicalPath)
val twoPartitionsDF =
spark
.read
.option("basePath", tablePath.getCanonicalPath)
.parquet(
s"${tablePath.getCanonicalPath}/b=1",
s"${tablePath.getCanonicalPath}/b=2")
checkAnswer(twoPartitionsDF, df.filter("b != 3"))
intercept[AssertionError] {
spark
.read
.parquet(
s"${tablePath.getCanonicalPath}/b=1",
s"${tablePath.getCanonicalPath}/b=2")
}
}
}
test("use basePath and file globbing to selectively load partitioned table") {
withTempPath { dir =>
val df = Seq(
(1, "foo", 100),
(1, "bar", 200),
(2, "foo", 300),
(2, "bar", 400)
).toDF("p1", "p2", "v")
df.write
.mode(SaveMode.Overwrite)
.partitionBy("p1", "p2")
.parquet(dir.getCanonicalPath)
def check(path: String, basePath: String, expectedDf: DataFrame): Unit = {
val testDf = spark.read
.option("basePath", basePath)
.parquet(path)
checkAnswer(testDf, expectedDf)
}
// Should find all the data with partitioning columns when base path is set to the root
val resultDf = df.select("v", "p1", "p2")
check(path = s"$dir", basePath = s"$dir", resultDf)
check(path = s"$dir/*", basePath = s"$dir", resultDf)
check(path = s"$dir/*/*", basePath = s"$dir", resultDf)
check(path = s"$dir/*/*/*", basePath = s"$dir", resultDf)
// Should find selective partitions of the data if the base path is not set to root
check( // read from ../p1=1 with base ../p1=1, should not infer p1 col
path = s"$dir/p1=1/*",
basePath = s"$dir/p1=1/",
resultDf.filter("p1 = 1").drop("p1"))
check( // red from ../p1=1/p2=foo with base ../p1=1/ should not infer p1
path = s"$dir/p1=1/p2=foo/*",
basePath = s"$dir/p1=1/",
resultDf.filter("p1 = 1").filter("p2 = 'foo'").drop("p1"))
check( // red from ../p1=1/p2=foo with base ../p1=1/p2=foo, should not infer p1, p2
path = s"$dir/p1=1/p2=foo/*",
basePath = s"$dir/p1=1/p2=foo/",
resultDf.filter("p1 = 1").filter("p2 = 'foo'").drop("p1", "p2"))
}
}
test("_SUCCESS should not break partitioning discovery") {
Seq(1, 32).foreach { threshold =>
// We have two paths to list files, one at driver side, another one that we use
// a Spark job. We need to test both ways.
withSQLConf(SQLConf.PARALLEL_PARTITION_DISCOVERY_THRESHOLD.key -> threshold.toString) {
withTempPath { dir =>
val tablePath = new File(dir, "table")
val df = (1 to 3).map(i => (i, i, i, i)).toDF("a", "b", "c", "d")
df.write
.format("parquet")
.partitionBy("b", "c", "d")
.save(tablePath.getCanonicalPath)
Files.touch(new File(s"${tablePath.getCanonicalPath}/b=1", "_SUCCESS"))
Files.touch(new File(s"${tablePath.getCanonicalPath}/b=1/c=1", "_SUCCESS"))
Files.touch(new File(s"${tablePath.getCanonicalPath}/b=1/c=1/d=1", "_SUCCESS"))
checkAnswer(spark.read.format("parquet").load(tablePath.getCanonicalPath), df)
}
}
}
}
test("listConflictingPartitionColumns") {
def makeExpectedMessage(colNameLists: Seq[String], paths: Seq[String]): String = {
val conflictingColNameLists = colNameLists.zipWithIndex.map { case (list, index) =>
s"\\tPartition column name list #$index: $list"
}.mkString("\\n", "\\n", "\\n")
// scalastyle:off
s"""Conflicting partition column names detected:
|$conflictingColNameLists
|For partitioned table directories, data files should only live in leaf directories.
|And directories at the same level should have the same partition column name.
|Please check the following directories for unexpected files or inconsistent partition column names:
|${paths.map("\\t" + _).mkString("\\n", "\\n", "")}
""".stripMargin.trim
// scalastyle:on
}
assert(
listConflictingPartitionColumns(
Seq(
(new Path("file:/tmp/foo/a=1"), PartitionValues(Seq("a"), Seq(Literal(1)))),
(new Path("file:/tmp/foo/b=1"), PartitionValues(Seq("b"), Seq(Literal(1)))))).trim ===
makeExpectedMessage(Seq("a", "b"), Seq("file:/tmp/foo/a=1", "file:/tmp/foo/b=1")))
assert(
listConflictingPartitionColumns(
Seq(
(new Path("file:/tmp/foo/a=1/_temporary"), PartitionValues(Seq("a"), Seq(Literal(1)))),
(new Path("file:/tmp/foo/a=1"), PartitionValues(Seq("a"), Seq(Literal(1)))))).trim ===
makeExpectedMessage(
Seq("a"),
Seq("file:/tmp/foo/a=1/_temporary", "file:/tmp/foo/a=1")))
assert(
listConflictingPartitionColumns(
Seq(
(new Path("file:/tmp/foo/a=1"),
PartitionValues(Seq("a"), Seq(Literal(1)))),
(new Path("file:/tmp/foo/a=1/b=foo"),
PartitionValues(Seq("a", "b"), Seq(Literal(1), Literal("foo")))))).trim ===
makeExpectedMessage(
Seq("a", "a, b"),
Seq("file:/tmp/foo/a=1", "file:/tmp/foo/a=1/b=foo")))
}
test("Parallel partition discovery") {
withTempPath { dir =>
withSQLConf(SQLConf.PARALLEL_PARTITION_DISCOVERY_THRESHOLD.key -> "1") {
val path = dir.getCanonicalPath
val df = spark.range(5).select('id as 'a, 'id as 'b, 'id as 'c).coalesce(1)
df.write.partitionBy("b", "c").parquet(path)
checkAnswer(spark.read.parquet(path), df)
}
}
}
test("SPARK-15895 summary files in non-leaf partition directories") {
withTempPath { dir =>
val path = dir.getCanonicalPath
withSQLConf(ParquetOutputFormat.ENABLE_JOB_SUMMARY -> "true") {
spark.range(3).write.parquet(s"$path/p0=0/p1=0")
}
val p0 = new File(path, "p0=0")
val p1 = new File(p0, "p1=0")
// Builds the following directory layout by:
//
// 1. copying Parquet summary files we just wrote into `p0=0`, and
// 2. touching a dot-file `.dummy` under `p0=0`.
//
// <base>
// +- p0=0
// |- _metadata
// |- _common_metadata
// |- .dummy
// +- p1=0
// |- _metadata
// |- _common_metadata
// |- part-00000.parquet
// |- part-00001.parquet
// +- ...
//
// The summary files and the dot-file under `p0=0` should not fail partition discovery.
Files.copy(new File(p1, "_metadata"), new File(p0, "_metadata"))
Files.copy(new File(p1, "_common_metadata"), new File(p0, "_common_metadata"))
Files.touch(new File(p0, ".dummy"))
checkAnswer(spark.read.parquet(s"$path"), Seq(
Row(0, 0, 0),
Row(1, 0, 0),
Row(2, 0, 0)
))
}
}
}
| gioenn/xSpark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala | Scala | apache-2.0 | 32,948 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.builders
import cats.effect.IO
import cats.laws._
import cats.laws.discipline._
import monix.eval.Task
import monix.execution.Ack.Continue
import monix.execution.exceptions.DummyException
import monix.execution.internal.Platform.recommendedBatchSize
import monix.reactive.observers.Subscriber
import monix.reactive.{BaseTestSuite, Observable}
import scala.concurrent.duration.MILLISECONDS
object UnfoldEvalObservableSuite extends BaseTestSuite {
test("unfoldEval should be exception-proof") { implicit s =>
val dummy = DummyException("dummy")
var received = 0
Observable.unfoldEval(0)(i => if (i < 20) Task.now(Some((i, i + 1))) else throw dummy).subscribe { _: Int =>
received += 1
Continue
}
assertEquals((0 until received).toList, (0 to 19).toList)
assertEquals(s.state.lastReportedError, dummy)
}
test("unfoldEval and fromAsyncStateAction results should be equal given generated inputs") { implicit s =>
check2 { (s: Int, i: Int) =>
val seed = s % (recommendedBatchSize * 2)
val n = i % (recommendedBatchSize * 2)
val f: Int => Task[Option[(Int, Int)]] = i => if (i < n) Task.delay(Some((i, i + 1))) else Task.now(None)
val f2: Int => Task[(Int, Int)] = i => Task.delay((i, i + 1))
Observable.unfoldEval(seed)(f).toListL <-> Observable.fromAsyncStateAction(f2)(seed).takeWhile(_ < n).toListL
}
}
test("unfoldEval should be cancelable") { implicit s =>
var wasCompleted = false
var sum = 0
val cancelable = Observable
.unfoldEval(s.clockMonotonic(MILLISECONDS))(intNowOption)
.unsafeSubscribeFn(new Subscriber[Int] {
implicit val scheduler = s
def onNext(elem: Int) = {
sum += 1
Continue
}
def onComplete() = wasCompleted = true
def onError(ex: Throwable) = wasCompleted = true
})
cancelable.cancel()
s.tick()
assertEquals(sum, s.executionModel.recommendedBatchSize / 2)
assert(!wasCompleted)
}
test("unfoldEvalF should be exception-proof") { implicit s =>
val dummy = DummyException("dummy")
var received = 0
Observable.unfoldEvalF(0)(i => if (i < 20) IO(Option((i, i + 1))) else throw dummy).subscribe { _: Int =>
received += 1
Continue
}
assertEquals((0 until received).toList, (0 to 19).toList)
assertEquals(s.state.lastReportedError, dummy)
}
test("unfoldEvalF and fromAsyncStateActionF results should be equal given generated inputs") { implicit s =>
check2 { (s: Int, i: Int) =>
val seed = s % (recommendedBatchSize * 2)
val n = i % (recommendedBatchSize * 2)
val f: Int => IO[Option[(Int, Int)]] = i => if (i < n) IO.delay(Some((i, i + 1))) else IO.pure(None)
val f2: Int => IO[(Int, Int)] = i => IO.delay((i, i + 1))
Observable.unfoldEvalF(seed)(f).toListL <-> Observable.fromAsyncStateActionF(f2)(seed).takeWhile(_ < n).toListL
}
}
test("unfoldEvalF should be cancelable") { implicit s =>
var wasCompleted = false
var sum = 0
val cancelable = Observable
.unfoldEvalF(s.clockMonotonic(MILLISECONDS))(intOptionIO)
.unsafeSubscribeFn(new Subscriber[Int] {
implicit val scheduler = s
def onNext(elem: Int) = {
sum += 1
Continue
}
def onComplete() = wasCompleted = true
def onError(ex: Throwable) = wasCompleted = true
})
cancelable.cancel()
s.tick()
assertEquals(sum, s.executionModel.recommendedBatchSize / 2)
assert(!wasCompleted)
}
def intNowOption(seed: Long): Task[Option[(Int, Long)]] = Task.now(Option(int(seed)))
def intOptionIO(seed: Long): IO[Option[(Int, Long)]] = IO.delay(Option(int(seed)))
def intOption(seed: Long): Option[(Int, Long)] = Option(int(seed))
def int(seed: Long): (Int, Long) = {
// `&` is bitwise AND. We use the current seed to generate a new seed.
val newSeed = (seed * 0X5DEECE66DL + 0XBL) & 0XFFFFFFFFFFFFL
// The next state, which is an `RNG` instance created from the new seed.
val nextRNG = newSeed
// `>>>` is right binary shift with zero fill. The value `n` is our new pseudo-random integer.
val n = (newSeed >>> 16).toInt
// The return value is a tuple containing both a pseudo-random integer and the next `RNG` state.
(n, nextRNG)
}
}
| alexandru/monifu | monix-reactive/shared/src/test/scala/monix/reactive/internal/builders/UnfoldEvalObservableSuite.scala | Scala | apache-2.0 | 5,056 |
// Copyright 2013 Foursquare Labs Inc. All Rights Reserved.
package io.fsq.rogue.index
/** A trait that represents the fact that a record type includes a list of the
* indexes that exist in MongoDB for that type.
*/
trait IndexedRecord[M] {
val mongoIndexList: Seq[MongoIndex[_]] = Vector.empty
val mongoTextIndex: Option[MongoTextIndex[_]] = None
}
| sgrouples/rogue-fsqio | index/src/main/scala/IndexedRecord.scala | Scala | apache-2.0 | 360 |
package application
import org.scalatest.{BeforeAndAfter, FunSuite}
import scalikejdbc.ConnectionPool
class RegularObservationTest extends FunSuite with BeforeAndAfter {
test("testSummary") {
ConnectionPool.singleton("jdbc:mysql://localhost:6603/scalatrader", "root", "password")
//(new RegularObservation(null)).summary("QCYtAnfkaZiwrNwnxIlR6CTfG3gf90Latabg5241ABR5W1uDFNIkn")
}
}
| rysh/scalatrader | scalatrader/test/application/RegularObservationTest.scala | Scala | mit | 398 |
/**
*
* NameDongleFragment
* Ledger wallet
*
* Created by Pierre Pollastri on 21/01/15.
*
* The MIT License (MIT)
*
* Copyright (c) 2015 Ledger
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
package co.ledger.wallet.app.ui.m2fa.pairing
import android.os.Bundle
import android.view.ViewTreeObserver.OnGlobalLayoutListener
import android.view._
import android.view.inputmethod.EditorInfo
import co.ledger.wallet.R
import co.ledger.wallet.app.base.{ContractFragment, BaseFragment}
import co.ledger.wallet.models.PairedDongle
import co.ledger.wallet.core.utils.{Convert, TR}
import co.ledger.wallet.core.widget.{TextView, EditText}
import co.ledger.wallet.core.utils.AndroidImplicitConversions._
class NameDongleFragment extends BaseFragment with ContractFragment[CreateDonglePairingActivity.CreateDonglePairingProccessContract] {
lazy val nameEditText = TR(R.id.name).as[EditText]
lazy val bottomText = TR(R.id.bottom_text).as[TextView]
lazy val frame = TR(R.id.frame).as[View]
override def onCreate(b: Bundle): Unit = {
super.onCreate(b)
setHasOptionsMenu(true)
}
override def onCreateView(inflater: LayoutInflater, container: ViewGroup, savedInstanceState: Bundle): View = {
inflater.inflate(R.layout.name_dongle_fragment, container, false)
}
override def onViewCreated(view: View, savedInstanceState: Bundle): Unit = {
super.onViewCreated(view, savedInstanceState)
nameEditText.setOnEditorActionListener((actionId: Int, event: KeyEvent) => {
actionId match {
case EditorInfo.IME_ACTION_DONE => nextStep()
case _ => false
}
})
}
override def onCreateOptionsMenu(menu: Menu, inflater: MenuInflater): Unit = {
super.onCreateOptionsMenu(menu, inflater)
inflater.inflate(R.menu.done_menu, menu)
}
override def onOptionsItemSelected(item: MenuItem): Boolean = {
super.onOptionsItemSelected(item)
item.getItemId match {
case R.id.action_done => nextStep()
case _ => false
}
}
override def onResume(): Unit = {
super.onResume()
activity.foreach((activity) => {
activity.getSupportActionBar.setDisplayHomeAsUpEnabled(false)
activity.getSupportActionBar.setHomeButtonEnabled(false)
})
frame.getViewTreeObserver.addOnGlobalLayoutListener(layoutObserver)
nameEditText.requestFocus()
nameEditText.setSelection(nameEditText.getText().length())
}
override def onPause(): Unit = {
super.onPause()
frame.getViewTreeObserver.removeOnGlobalLayoutListener(layoutObserver)
}
val layoutObserver = new OnGlobalLayoutListener {
override def onGlobalLayout(): Unit = {
if (frame.getHeight < Convert.dpToPx(200)) {
bottomText.setVisibility(View.GONE)
}
else {
bottomText.setVisibility(View.VISIBLE)
}
}
}
private def nextStep(): Boolean = {
if (nameEditText.getText().length() > 0)
contract.setDongleName(nameEditText.getText().toString)
true
}
override def tag: String = "NameDongleFragment"
} | LedgerHQ/ledger-wallet-android | app/src/main/scala/co/ledger/wallet/app/ui/m2fa/pairing/NameDongleFragment.scala | Scala | mit | 4,062 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3.executionplan.builders
import org.neo4j.cypher.internal.compiler.v2_3.commands._
import org.neo4j.cypher.internal.compiler.v2_3.commands.expressions._
import org.neo4j.cypher.internal.compiler.v2_3.commands.predicates._
import org.neo4j.cypher.internal.compiler.v2_3.commands.values.KeyToken
import org.neo4j.cypher.internal.compiler.v2_3.spi.PlanContext
import org.neo4j.cypher.internal.compiler.v2_3.symbols.SymbolTable
/*
This rather simple class finds a starting strategy for a given single node and a list of predicates required
to be true for that node
@see NodeStrategy
*/
object NodeFetchStrategy {
val nodeStrategies: Seq[NodeStrategy] = Seq(NodeByIdStrategy, IndexSeekStrategy, LabelScanStrategy, GlobalStrategy)
def findStartStrategy(node: String, where: Seq[Predicate], ctx: PlanContext, symbols: SymbolTable): RatedStartItem = {
val ratedItems = nodeStrategies.flatMap(_.findRatedStartItems(node, where, ctx, symbols))
ratedItems.sortBy(_.rating).head
}
def findUniqueIndexes(props: Map[KeyToken, Expression], labels: Seq[KeyToken], ctx: PlanContext): Seq[(KeyToken, KeyToken)] = {
val indexes = labels.flatMap { (label: KeyToken) => findUniqueIndexesForLabel( label, props.keys, ctx ) }
implicit val ordering = KeyToken.Ordering
indexes.sorted
}
def findUniqueIndexesForLabel(label: KeyToken, keys: Iterable[KeyToken], ctx: PlanContext): Seq[(KeyToken, KeyToken)] =
keys.flatMap { (key: KeyToken) =>
ctx.getUniquenessConstraint(label.name, key.name).map { _ => (label, key) }
}.toSeq
val Single = 0
val IndexEquality = 1
val IndexRange = 2
val IndexScan = 3
val LabelScan = 4
val Global = 5
}
import org.neo4j.cypher.internal.compiler.v2_3.executionplan.builders.NodeFetchStrategy.{Global, IndexEquality, LabelScan, Single}
/*
Bundles a possible start item with a rating (where lower implies better) and a list of predicates that
are implicitly solved when using the start item
*/
case class RatedStartItem(s: StartItem, rating: Int, solvedPredicates: Seq[Predicate], newUnsolvedPredicates: Seq[Predicate] = Seq.empty)
/*
Finders produce StartItemWithRatings for a node and a set of required predicates over that node
*/
trait NodeStrategy {
type LabelName = String
type IdentifierName = String
type PropertyKey = String
def findRatedStartItems(node: String, where: Seq[Predicate], ctx: PlanContext, symbols: SymbolTable): Seq[RatedStartItem]
protected def findLabelsForNode(node: String, where: Seq[Predicate]): Seq[SolvedPredicate[LabelName]] =
where.collect {
case predicate @ HasLabel(Identifier(identifier), label) if identifier == node => SolvedPredicate(label.name, predicate)
}
case class SolvedPredicate[+T](solution: T, predicate: Predicate, newUnsolvedPredicate: Option[Predicate] = None)
}
object NodeByIdStrategy extends NodeStrategy {
def findRatedStartItems(node: String, where: Seq[Predicate], ctx: PlanContext, symbols: SymbolTable): Seq[RatedStartItem] = {
val solvedPredicates: Seq[SolvedPredicate[Expression]] = findEqualityPredicatesForBoundIdentifiers(node, symbols, where)
val solutions: Seq[Expression] = solvedPredicates.map(_.solution)
val predicates: Seq[Predicate] = solvedPredicates.map(_.predicate)
solutions match {
case Seq() => Seq()
case head :: tail => Seq(RatedStartItem(NodeByIdOrEmpty(node, head), Single, predicates))
}
}
private def findEqualityPredicatesForBoundIdentifiers(identifier: IdentifierName, symbols: SymbolTable, where: Seq[Predicate]): Seq[SolvedPredicate[Expression]] = {
def computable(expression: Expression): Boolean = expression.symbolDependenciesMet(symbols)
where.collect {
case predicate @ Equals(IdFunction(Identifier(id)), Literal(idValue: Number)) if id == identifier => SolvedPredicate(Literal(idValue.longValue()), predicate)
case predicate @ Equals(Literal(idValue: Number), IdFunction(Identifier(id))) if id == identifier => SolvedPredicate(Literal(idValue.longValue()), predicate)
case predicate @ Equals(IdFunction(Identifier(id)), expression) if id == identifier && computable(expression) => SolvedPredicate(expression, predicate)
case predicate @ Equals(expression, IdFunction(Identifier(id))) if id == identifier && computable(expression) => SolvedPredicate(expression, predicate)
case predicate @ AnyInCollection(collectionExpression, _, Equals(IdFunction(Identifier(id)), _)) if id == identifier && computable(collectionExpression) => SolvedPredicate(collectionExpression, predicate)
}
}
}
object IndexSeekStrategy extends NodeStrategy {
def findRatedStartItems(node: String, where: Seq[Predicate], ctx: PlanContext, symbols: SymbolTable): Seq[RatedStartItem] = {
val labelPredicates: Seq[SolvedPredicate[LabelName]] = findLabelsForNode(node, where)
val equalityPredicates: Seq[SolvedPredicate[PropertyKey]] = findEqualityPredicatesOnProperty(node, where, symbols)
val seekByPrefixPredicates: Seq[SolvedPredicate[PropertyKey]] = findIndexSeekByPrefixPredicatesOnProperty(node, where, symbols)
val seekByRangePredicates: Seq[SolvedPredicate[PropertyKey]] = findIndexSeekByRangePredicatesOnProperty(node, where, symbols)
val result = for (
labelPredicate <- labelPredicates
) yield {
val equalityItems: Seq[RatedStartItem] =
for (equalityPredicate <- equalityPredicates if ctx.getIndexRule(labelPredicate.solution, equalityPredicate.solution).nonEmpty)
yield {
val optConstraint = ctx.getUniquenessConstraint(labelPredicate.solution, equalityPredicate.solution)
val rating = if (optConstraint.isDefined) Single else IndexEquality
val indexType = if (optConstraint.isDefined) UniqueIndex else AnyIndex
val schemaIndex = SchemaIndex(node, labelPredicate.solution, equalityPredicate.solution, indexType, None)
RatedStartItem(schemaIndex, rating, solvedPredicates = Seq.empty,
newUnsolvedPredicates = equalityPredicate.newUnsolvedPredicate.toSeq)
}
val seekByPrefixItems: Seq[RatedStartItem] =
for (seekByPrefixPredicate <- seekByPrefixPredicates if ctx.getIndexRule(labelPredicate.solution, seekByPrefixPredicate.solution).nonEmpty)
yield {
val schemaIndex = SchemaIndex(node, labelPredicate.solution, seekByPrefixPredicate.solution, AnyIndex, None)
RatedStartItem(schemaIndex, NodeFetchStrategy.IndexRange, solvedPredicates = Seq.empty,
newUnsolvedPredicates = seekByPrefixPredicate.newUnsolvedPredicate.toSeq)
}
val seekByRangeItems: Seq[RatedStartItem] =
for (seekByRangePredicate <- seekByRangePredicates if ctx.getIndexRule(labelPredicate.solution, seekByRangePredicate.solution).nonEmpty)
yield {
val schemaIndex = SchemaIndex(node, labelPredicate.solution, seekByRangePredicate.solution, AnyIndex, None)
RatedStartItem(schemaIndex, NodeFetchStrategy.IndexRange, solvedPredicates = Seq.empty)
}
equalityItems ++ seekByPrefixItems ++ seekByRangeItems
}
result.flatten
}
private def findEqualityPredicatesOnProperty(identifier: IdentifierName, where: Seq[Predicate], symbols: SymbolTable): Seq[SolvedPredicate[PropertyKey]] = {
where.collect {
case predicate @ Equals(Property(Identifier(id), propertyKey), expression)
if id == identifier && expression.symbolDependenciesMet(symbols) => SolvedPredicate(propertyKey.name, predicate)
case predicate @ Equals(expression, Property(Identifier(id), propertyKey))
if id == identifier && expression.symbolDependenciesMet(symbols) => SolvedPredicate(propertyKey.name, predicate)
case predicate @ AnyInCollection(expression, _, Equals(Property(Identifier(id), propertyKey),Identifier(_)))
if id == identifier && expression.symbolDependenciesMet(symbols) => SolvedPredicate(propertyKey.name, predicate)
}
}
private def findIndexSeekByPrefixPredicatesOnProperty(identifier: IdentifierName, where: Seq[Predicate], initialSymbols: SymbolTable): Seq[SolvedPredicate[PropertyKey]] = {
where.collect {
case literalPredicate@StartsWith(p@Property(Identifier(id), prop), _) if id == identifier =>
SolvedPredicate(prop.name, literalPredicate)
}
}
private def findIndexSeekByRangePredicatesOnProperty(identifier: IdentifierName, where: Seq[Predicate], symbols: SymbolTable): Seq[SolvedPredicate[PropertyKey]] =
where.collect {
case predicate@AndedPropertyComparablePredicates(Identifier(id), prop@Property(_, key), comparables)
if id == identifier && comparables.forall(_.other(prop).symbolDependenciesMet(symbols)) =>
SolvedPredicate(key.name, predicate)
}
}
object GlobalStrategy extends NodeStrategy {
def findRatedStartItems(node: String, where: Seq[Predicate], ctx: PlanContext, symbols: SymbolTable): Seq[RatedStartItem] =
Seq(RatedStartItem(AllNodes(node), Global, Seq.empty))
}
object LabelScanStrategy extends NodeStrategy {
def findRatedStartItems(node: String, where: Seq[Predicate], ctx: PlanContext, symbols: SymbolTable): Seq[RatedStartItem] = {
val labelPredicates: Seq[SolvedPredicate[LabelName]] = findLabelsForNode(node, where)
labelPredicates.map {
case SolvedPredicate(labelName, predicate, newUnsolvedPredicate) =>
RatedStartItem(NodeByLabel(node, labelName), LabelScan, Seq(predicate), newUnsolvedPredicate.toSeq)
}
}
} | HuangLS/neo4j | community/cypher/cypher-compiler-2.3/src/main/scala/org/neo4j/cypher/internal/compiler/v2_3/executionplan/builders/NodeFetchStrategy.scala | Scala | apache-2.0 | 10,425 |
package com.twitter.finagle.memcached.unit.protocol.text
import com.twitter.finagle.memcached.protocol.StorageCommand
import com.twitter.finagle.memcached.protocol.text.Framer
import com.twitter.finagle.memcached.protocol.text.server.ServerFramer
import com.twitter.io.{Buf, ByteReader}
import org.scalatest.FunSuite
class FramerTest extends FunSuite {
private class TestFramer extends ServerFramer(StorageCommand.StorageCommands)
test("return empty frame sequence on partial frame") {
val framer = new TestFramer
assert(framer(Buf.Utf8("set")) == Seq.empty)
}
test("frame response without data") {
val framer = new TestFramer
assert(framer(Buf.Utf8("STORED\\r\\n")) == Seq(Buf.Utf8("STORED")))
}
test("accumulate partial response frame") {
val framer = new TestFramer
assert(framer(Buf.Utf8("ST")).isEmpty)
assert(framer(Buf.Utf8("OR")).isEmpty)
assert(framer(Buf.Utf8("ED\\r")).isEmpty)
assert(framer(Buf.Utf8("\\n")) == Seq(Buf.Utf8("STORED")))
}
test("accumulate response frame after returning frame") {
val framer = new TestFramer
assert(framer(Buf.Utf8("ST")).isEmpty)
assert(framer(Buf.Utf8("ORED\\r\\nNOT_ST")) == Seq(Buf.Utf8("STORED")))
assert(framer(Buf.Utf8("ORED\\r\\n")) == Seq(Buf.Utf8("NOT_STORED")))
}
test("Frame multiple frames") {
val framer = new TestFramer
assert(
framer(Buf.Utf8("STORED\\r\\nNOT_STORED\\r\\n")) ==
Seq(Buf.Utf8("STORED"), Buf.Utf8("NOT_STORED"))
)
}
test("Frame data frame") {
val framer = new TestFramer
assert(framer(Buf.Utf8("set foo 0 0 10\\r\\n")) == Seq(Buf.Utf8("set foo 0 0 10")))
assert(framer(Buf.Utf8("abcdefghij\\r\\n")) == Seq(Buf.Utf8("abcdefghij")))
}
test("accumulate partial data frames") {
val framer = new TestFramer
assert(framer(Buf.Utf8("set foo 0 0 10\\r\\nabc")) == Seq(Buf.Utf8("set foo 0 0 10")))
assert(framer(Buf.Utf8("def")).isEmpty)
assert(framer(Buf.Utf8("ghi")).isEmpty)
assert(framer(Buf.Utf8("j\\r\\n")) == Seq(Buf.Utf8("abcdefghij")))
}
test("accumulate response after framing data frame") {
val framer = new TestFramer
assert(
framer(Buf.Utf8("set foo 0 0 3\\r\\nabc\\r\\nSTO")) ==
Seq(Buf.Utf8("set foo 0 0 3"), Buf.Utf8("abc"))
)
assert(framer(Buf.Utf8("RED\\r\\n")) == Seq(Buf.Utf8("STORED")))
}
test("Don't frame data frame until newlines are received") {
val framer = new TestFramer
assert(framer(Buf.Utf8("set foo 0 0 3\\r\\n")) == Seq(Buf.Utf8("set foo 0 0 3")))
assert(framer(Buf.Utf8("abc")) == Seq.empty)
assert(framer(Buf.Utf8("\\r\\n")) == Seq(Buf.Utf8("abc")))
}
test("Ignore newlines in the middle of data frames") {
val framer = new TestFramer
assert(framer(Buf.Utf8("set foo 0 0 10\\r\\n")) == Seq(Buf.Utf8("set foo 0 0 10")))
assert(framer(Buf.Utf8("abc\\r\\ndef\\r\\n\\r\\n")) == Seq(Buf.Utf8("abc\\r\\ndef\\r\\n")))
}
test("bytesBeforeLineEnd returns -1 on empty ByteReader") {
val reader = ByteReader(Buf.Empty)
assert(Framer.bytesBeforeLineEnd(reader) == -1)
}
test("bytesBeforeLineEnd returns 0 when reader's underlying buf starts with \\r\\n") {
val reader = ByteReader(Buf.Utf8("\\r\\n"))
assert(Framer.bytesBeforeLineEnd(reader) == 0)
}
test("bytesBeforeLineEnd returns -1 when reader's underling buf does not contain \\r\\n") {
val reader = ByteReader(Buf.Utf8("foo bar baz"))
assert(Framer.bytesBeforeLineEnd(reader) == -1)
}
test("bytesBeforeLineEnd returns index of \\r\\n in reader's underlying buf") {
val reader = ByteReader(Buf.Utf8("foo \\r\\n bar"))
assert(Framer.bytesBeforeLineEnd(reader) == 4)
}
}
| mkhq/finagle | finagle-memcached/src/test/scala/com/twitter/finagle/memcached/unit/protocol/text/FramerTest.scala | Scala | apache-2.0 | 3,636 |
/*
Copyright 2016-17, Hasso-Plattner-Institut fuer Softwaresystemtechnik GmbH
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package de.hpi.ingestion.dataimport.wikipedia.models
case class WikipediaEntry(
title: String,
var text: Option[String] = None
){
def setText(t: String): Unit = text = Option(t)
def getText(): String = text.getOrElse("")
}
| bpn1/ingestion | src/main/scala/de/hpi/ingestion/dataimport/wikipedia/models/WikipediaEntry.scala | Scala | apache-2.0 | 843 |
/*
* Copyright 2013 深圳市葡萄藤网络科技有限公司 (Shenzhen Putaoteng Network Technology Co., Ltd.)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com
package dongxiguo
package memcontinuationed
import org.junit._
import java.util.concurrent._
import java.nio.channels._
import java.io._
import scala.util.continuations._
import scala.util.control.Exception.Catcher
object SetTest {
implicit val (logger, formatter, appender) = ZeroLoggerFactory.newLogger(this)
}
class SetTest {
import SetTest._
@Test
def stored() {
synchronized {
var isFailed = false;
val executor = Executors.newFixedThreadPool(1)
val channelGroup = AsynchronousChannelGroup.withThreadPool(executor)
val memcontinuationed = new Memcontinuationed(channelGroup, TestServerAddress.getAddress _)
val accessor = new UTF8Accessor("memcontinuationed_set_test_stored")
implicit def defaultCatcher: Catcher[Unit] = {
case e: Exception =>
synchronized {
isFailed = true
notify()
}
}
reset {
memcontinuationed.set(accessor, "1")
val result = memcontinuationed.require(accessor)
synchronized {
result match {
case "1" =>
case _ =>
isFailed = true
}
notify()
}
}
wait()
reset {
memcontinuationed.delete(accessor)
synchronized {
notify()
}
}
wait()
memcontinuationed.shutdown()
channelGroup.shutdown()
if (isFailed) {
Assert.fail()
}
}
}
}
// vim: set ts=2 sw=2 et:
| Atry/memcontinuationed | src/test/scala/com/dongxiguo/memcontinuationed/SetTest.scala | Scala | apache-2.0 | 2,187 |
package com.kakao.shaded.jackson.module.scala.util
import scala.language.implicitConversions
trait PimpedType[X] {
def value: X
}
object PimpedType {
implicit def UnwrapPimpedType[X](p: PimpedType[X]): X = p.value
}
| kakao/mango | mango-shaded/src/main/scala/com/kakao/shaded/jackson/module/scala/util/PimpedType.scala | Scala | apache-2.0 | 227 |
package scodec
package codecs
import scodec.bits.BitVector
/** Codec that prefixes error messages with the specified name. */
private[codecs] final class NamedCodec[A](name: String, target: Codec[A]) extends Codec[A] {
override def encode(a: A) =
target.encode(a).leftMap { e => s"$name: $e" }
override def decode(buffer: BitVector) =
target.decode(buffer).leftMap { e => s"$name: $e" }
override def toString = s"$name($target)"
}
| ceedubs/scodec | src/main/scala/scodec/codecs/NamedCodec.scala | Scala | bsd-3-clause | 451 |
package ingraph.ire.nodes.unary.aggregation
import ingraph.ire.datatypes._
import ingraph.ire.math.GenericMath
class StatefulSum(sumKey: Int) extends StatefulAggregate {
var sum: Any = 0
override def maintainPositive(values: Iterable[Tuple]): Unit = {
for (tuple <- values) {
sum = GenericMath.add(sum, tuple(sumKey))
}
}
override def maintainNegative(values: Iterable[Tuple]): Unit = {
for (tuple <- values) {
sum = GenericMath.subtract(sum, tuple(sumKey))
}
}
override def value(): Any = sum
}
| FTSRG/ingraph | ire/src/main/scala/ingraph/ire/nodes/unary/aggregation/StatefulSum.scala | Scala | epl-1.0 | 541 |
/**
* Copyright (c) 2011, Andrew Shewring
* Licensed under the new BSD License (see the LICENSE.txt file for details).
*/
package com.github.ashewring.sbttycho
import sbt._
import java.io.File
import SbtTychoConstants._
/**
* Main trait to be mixed into the parent project
*/
trait SbtTycho {
this: Project =>
/**
* Configuration to be defined within the project definition
*/
def tychoConfiguration: SbtTychoConfiguration
/**
* Main entry point to invoke sbt-tycho build
*/
/*
lazy val tychoBuild = task {
args =>
// For command parsing, see http://stackoverflow.com/questions/2315912/scala-best-way-to-parse-command-line-parameters-cli/3183991#3183991
if (args.length == 0) {
log.info(TychoBuildUsage)
exit(1)
}
executeTychoBuild(args)
} describedAs ("Launches Maven Tycho build on SBT-generated POMs")
lazy val tychoPreBuild = task {
executeTychoPreBuild()
} describedAs ("sbt-tycho step before delegating to Maven Tycho")
lazy val tychoPostBuild = task {
executeTychoPostBuild()
} describedAs ("sbt-tycho step after delegating to Maven Tycho")
lazy val tychoCleanup = task {
val generator = createGenerator()
generator.cleanup()
None
} describedAs ("Removes generated files")
// TODO test
lazy val tychoDeploy = task {
val configuration = tychoConfiguration.deployConfiguration.getOrElse(
error("No deployment configuration found - the deployConfiguration method must be overridden")
)
val repositoryUrl = if (isSnapshotVersion) {
configuration.snapshotsRepositoryUrl
} else {
configuration.releasesRepositoryUrl
}
// set repository location
val altDeploymentRepository = "repo::default::" + repositoryUrl
System.setProperty("altDeploymentRepository", altDeploymentRepository)
executeTychoBuild(Array("deploy"))
None
} describedAs ("Deploys to one of the configured Maven repositories")
def isSnapshotVersion() = {
projectVersion.toString.trim.endsWith("SNAPSHOT")
}
private def createGenerator() = {
SbtTychoGenerator(tychoConfiguration, log)
}
private def executeTychoPreBuild() = {
val generator = createGenerator()
generator.cleanup()
printBuildInformation()
generator.generateTychoFiles()
None
}
private def executeTychoPostBuild() = {
log.debug("Cleaning up after successful build")
val generator = createGenerator()
generator.cleanup()
None
}
private def executeTychoBuild(args: Array[String]) = task {
try {
executeTychoPreBuild()
val tychoResult = delegateToMavenTycho(args)
if (tychoResult) {
executeTychoPostBuild()
} else {
val message = "Tycho exited abnormally. Cleanup step was not executed, to allow inspection of generated files."
log.info(message)
Some(message)
}
} catch {
case e => Some("Tycho build failed - " + e)
}
}
private def delegateToMavenTycho(args: Array[String]): Boolean = {
import Process._
val mavenExecutable = findPathToMavenExecutable()
val goalList = args.mkString(" ")
log.info("Executing Maven Tycho with goal(s) '" + goalList + "'...")
// TODO make this customisable
val opts = "-e -X "
val command = mavenExecutable + " " + opts + goalList
val result = <x>{command}</x> ! log
result == 0
}
private def findPathToMavenExecutable(): File = {
var mavenHome = System.getenv("MAVEN_HOME")
if (mavenHome == null) {
mavenHome = System.getenv("M3_HOME")
}
if (mavenHome == null) {
error("Could not resolve Maven home - Tycho build cannot be invoked")
}
val binDirectory = new File(mavenHome, "bin")
val osName = System.getProperty("os.name").toLowerCase
val fileName = if (osName.contains("win")) {
// windows
"mvn.bat"
} else {
// Unix / Mac
"mvn"
}
new File(binDirectory, fileName)
}
private def printBuildInformation() {
log.info("organization: " + projectOrganization.get.get)
log.info("name: " + projectName.get.get)
log.info("version: " + projectVersion.get.get)
log.info("Environment variables:")
printProperty("MAVEN_HOME")
printProperty("M3_HOME")
printProperty("MAVEN_OPTS")
printProperty("SBT_OPTS")
}
private def printProperty(name: String) {
val variable = System.getenv(name)
val displayValue = if (variable == null) "<not set>" else variable
log.info(name + " = " + displayValue)
}
*/
}
| ashewring/sbt-tycho | src/main/scala/com/github/ashewring/sbttycho/SbtTycho.scala | Scala | bsd-3-clause | 4,320 |
package com.eharmony.aloha.feature
/**
* Sos2 takes a value and breaks it apart into a linear combination of the two closest values as specified by the
* ''min'', ''max'', and ''delta''. As long as the value being sos,,2,, binned exists in the interval
* [''min'', ''max''], then there exists an isomorphism between the value and the sos,,2,, binned value.
*
* {{{
* // delta = 1
* // |<--------------------->|<--------------------->|
* // | | | | | | | | |
* // | | | | | | | | |
* // 0 0.25 0.5 0.75 1 1.25 1.5 1.75 2
*
* // Show the keys output ('=' followed by a value)
* val v = 1.25
* val s = sos2(v, 0, 2, 1)
* assert(s == List(("=1", 0.75), ("=2", 0.25)))
*
* // Show the existing isomorphism between v and sos2I(v, min, max, delta) (if min <= v <= max)
* val vPrime = sos2I(v, 0, 2, 1).foldLeft(0.0){case(s, (k, v)) => s + k * v}
* assert(v == vPrime)
* }}}
*/
trait Sos2 { self: DefaultPossessor with BasicMath =>
private[this] val UnknownSeq = Option(DefaultForMissingDataInReg)
private[this] val UnderflowKey = Some("UNDERFLOW")
@inline def sos2(value: Option[Double], min: Long, max: Long, delta: Long): Iterable[(String, Double)] =
sos2U(value, min, max, delta, None, None)
/**
* See [[com.eharmony.aloha.feature.Sos2]].
*
* @param value number to be sos,,2,, binned
* @param min minimum bin value
* @param max minimum bin value
* @param delta bin size
* @param underflowKey When value < min, an underflow key-value pair is emitted. This controls the key that is
* emitted.
* @param unknownKey When value is missing, a separate key-value pair is emitted. This controls the key that is
* emitted.
* @return sos,,2,, binned value
*/
@inline def sos2(value: Option[Double], min: Long, max: Long, delta: Long, underflowKey: String, unknownKey: String): Iterable[(String, Double)] =
sos2U(value, min, max, delta, Option(underflowKey), Option(Seq((s"=$unknownKey", 1.0))))
@inline def sos2U(value: Option[Double], min: Long, max: Long, delta: Long): Iterable[(String, Double)] =
sos2U(value, min, max, delta, UnderflowKey, UnknownSeq)
/**
* {{{
* scala> (0 to 10).map(_ / 4.0 - 0.25).map(v => s"$v\\t${sos2U(v, 0, 2, 1)}").foreach(println)
* -0.25 List((=UNDERFLOW,1.0))
* 0.0 List((=0,1.0))
* 0.25 List((=0,0.75), (=1,0.25))
* 0.5 List((=0,0.5), (=1,0.5))
* 0.75 List((=0,0.25), (=1,0.75))
* 1.0 List((=1,1.0))
* 1.25 List((=1,0.75), (=2,0.25))
* 1.5 List((=1,0.5), (=2,0.5))
* 1.75 List((=1,0.25), (=2,0.75))
* 2.0 List((=2,1.0))
* 2.25 List((=2,1.0))
* }}}
* @param value number to be sos,,2,, binned
* @param min minimum bin value
* @param max minimum bin value
* @param delta bin size
* @return sos,,2,, binned value
*/
@inline def sos2U(value: Double, min: Long, max: Long, delta: Long): Iterable[(String, Double)] =
sos2U(Some(value), min, max, delta, UnderflowKey, UnknownSeq)
/** Like sos2U but no underflows are reported. Instead the values are first clamped to be in range so in the
* event value < min, return a tuple representing the min.
* @param value number to be sos,,2,, binned
* @param min minimum bin value
* @param max minimum bin value
* @param delta bin size
* @return sos,,2,, binned value
*/
@inline def sos2(value: Double, min: Long, max: Long, delta: Long): Iterable[(String, Double)] =
sos2I(value, min, max, delta).map(p => (s"=${p._1}", p._2))
/**
*
* @param value number to be sos,,2,, binned
* @param min minimum bin value
* @param max minimum bin value
* @param delta bin size
* @param underflowKey When value < min, an underflow key-value pair is emitted. This controls the key that is
* emitted.
* @param unknown When value is missing (None), a separate key-value pair is emitted. This controls the pair(s)
* that is/are emitted.
* @return
*/
def sos2U(value: Option[Double], min: Long, max: Long, delta: Long, underflowKey: Option[String], unknown: Option[Iterable[(String, Double)]]): Iterable[(String, Double)] = {
value flatMap {_ match {
case v if v.isNaN => unknown
case v if v < min => badPair(underflowKey)
case v => Option(sos2(v, min, max, delta))
}} orElse unknown getOrElse Nil
}
/** This is the purest form of sos,,2,, binning that clamps the values in the [''min'', ''max''] interval and then
* bins.
* {{{
* scala> (0 to 10).map(_ / 4.0 - 0.25).map(v => s"$v\\t${sos2(v, 0, 2, 1)}").foreach(println)
* -0.25 List((0,1.0))
* 0.0 List((0,1.0))
* 0.25 List((0,0.75), (1,0.25))
* 0.5 List((0,0.5), (1,0.5))
* 0.75 List((0,0.25), (1,0.75))
* 1.0 List((1,1.0))
* 1.25 List((1,0.75), (2,0.25))
* 1.5 List((1,0.5), (2,0.5))
* 1.75 List((1,0.25), (2,0.75))
* 2.0 List((2,1.0))
* 2.25 List((2,1.0))
* }}}
* @param value number to be sos,,2,, binned
* @param min minimum bin value
* @param max minimum bin value
* @param delta bin size
* @return sos,,2,, binned value
*/
def sos2I(value: Double, min: Long, max: Long, delta: Long) = {
val v = (clamp(value, min, max) - min) / delta
val bin = v.toInt
val binName = (min + bin * delta).toInt
val fraction = v - bin
val oneMinus = 1 - fraction
if (1 == oneMinus) {
Seq((binName, oneMinus))
}
else {
// TODO: Determine whether this variable is intended to equal binName when (value / delta) is an integer.
// If so, then the two keys will be the same and the key specified second will clobber the first key-value
// pair. This is fine but, it should be noted. It also has the implication that the second key-value
// pair should be declared first because it seems like a better idea that if we have to specify one k-v
// pair, then it should be the one that has an associated value of 1. It seems weird to specify it so
// that the key with value 0 is the declared one.
val binNameP1 = (min + (bin + 1) * delta).toInt
// See above note!
Seq((binName, oneMinus), (binNameP1, fraction))
}
}
@inline private[this] def badPair(s: Option[String]) = s.map(k => Seq((s"=$k", 1.0)))
}
| eHarmony/aloha | aloha-core/src/main/scala/com/eharmony/aloha/feature/Sos2.scala | Scala | mit | 6,865 |
package io.citrine.lolo.linear
import breeze.linalg.{diag, pinv, sum, DenseMatrix, DenseVector}
import io.citrine.lolo.{Learner, Model, PredictionResult, TrainingResult}
/**
* Linear and ridge regression learner
*
* Created by maxhutch on 12/6/16.
*
* @param fitIntercept whether to fit an intercept or not
*/
case class LinearRegressionLearner(
regParam: Option[Double] = None,
fitIntercept: Boolean = true
) extends Learner {
/**
* Train a linear model via direct inversion.
*
* @param trainingData to train on
* @param weights for the training rows, if applicable
* @return a model
*/
override def train(
trainingData: Seq[(Vector[Any], Any)],
weights: Option[Seq[Double]]
): LinearRegressionTrainingResult = {
val n = trainingData.size
/* Get the indices of the continuous features */
val indices: Vector[Int] = trainingData.head._1.zipWithIndex
.filter(_._1.isInstanceOf[Double])
.filterNot(_._1.asInstanceOf[Double].isNaN)
.map(_._2)
.filterNot(i => trainingData.exists(_._1(i).asInstanceOf[Double].isNaN))
.filterNot { i =>
val unregularized = !regParam.exists(_.asInstanceOf[Double] > 0.0)
lazy val constant = trainingData.forall(_._1(i) == trainingData.head._1(i))
unregularized && constant // remove constant features if there's no regularization
}
/* If we are fitting the intercept, add a row of 1s */
val At = if (fitIntercept) {
new DenseMatrix(
indices.size + 1,
n,
trainingData.map(r => indices.map(r._1(_).asInstanceOf[Double]) :+ 1.0).flatten.toArray
)
} else {
new DenseMatrix(indices.size, n, trainingData.map(r => indices.map(r._1(_).asInstanceOf[Double])).flatten.toArray)
}
val k = At.rows
/* If the weights are specified, multiply At by them */
val weightsMatrix = weights.map(w => diag(new DenseVector(w.toArray)))
val Atw = if (weightsMatrix.isDefined) {
At * weightsMatrix.get
} else {
At
}
val A = Atw.t
val b = if (weightsMatrix.isDefined) {
new DenseVector(trainingData.map(_._2.asInstanceOf[Double]).zip(weights.get).map(p => p._1 * p._2).toArray)
} else {
new DenseVector(trainingData.map(_._2.asInstanceOf[Double]).toArray)
}
val beta = if (regParam.exists(_ > 0) || n >= k) {
/* Construct the regularized problem and solve it */
val regVector = Math.pow(regParam.getOrElse(0.0), 2) * DenseVector.ones[Double](k)
if (fitIntercept) regVector(-1) = 0.0
val M = At * A + diag(regVector)
try {
val Mi = pinv(M)
/* Backsub to get the coefficients */
Mi * At * b
} catch {
case x: Throwable =>
val mean = if (weightsMatrix.isDefined) sum(b) / weights.get.sum else sum(b) / b.length
val res = DenseVector.zeros[Double](k)
res(-1) = mean
res
}
} else {
pinv(A) * b
}
val indicesToModel = if (indices.size < trainingData.head._1.size) {
Some(indices, trainingData.head._1.size)
} else {
None
}
/* If we fit the intercept, take it off the end of the coefficients */
val model = if (fitIntercept) {
new LinearRegressionModel(beta(0 to -2), beta(-1), indices = indicesToModel)
} else {
new LinearRegressionModel(beta, 0.0, indices = indicesToModel)
}
new LinearRegressionTrainingResult(model)
}
}
/**
* Simple container around the model
*
* @param model contained
*/
class LinearRegressionTrainingResult(model: LinearRegressionModel) extends TrainingResult {
override def getModel(): LinearRegressionModel = model
/**
* Get a measure of the importance of the model features
*
* @return feature influences as an array of doubles
*/
override def getFeatureImportance(): Option[Vector[Double]] = {
val beta: Vector[Double] = model.getBeta().map(Math.abs)
val renorm: Double = 1.0 / beta.sum
Some(beta.map(_ * renorm))
}
}
/**
* Linear regression model as a coefficient vector and intercept
*
* @param beta coefficient vector
* @param intercept intercept
* @param indices optional indices from which to extract real features
*/
class LinearRegressionModel(
beta: DenseVector[Double],
intercept: Double,
indices: Option[(Vector[Int], Int)] = None
) extends Model[LinearRegressionResult] {
/**
* Apply the model to a seq of inputs
*
* @param inputs to apply the model to
* @return a predictionresult which includes, at least, the expected outputs
*/
override def transform(inputs: Seq[Vector[Any]]): LinearRegressionResult = {
val filteredInputs = indices
.map { case (ind, size) => inputs.map(inp => ind.map(inp(_))) }
.getOrElse(inputs)
.flatten
.asInstanceOf[Seq[Double]]
val inputMatrix = new DenseMatrix(filteredInputs.size / inputs.size, inputs.size, filteredInputs.toArray)
val resultVector = beta.t * inputMatrix + intercept
val result = resultVector.t.toArray.toSeq
val grad = getBeta()
new LinearRegressionResult(result, grad)
}
/**
* Get the beta from the linear model \\beta^T X = y
* @return beta as a vector of double
*/
def getBeta(): Vector[Double] = {
indices
.map {
case (inds, size) =>
val empty = DenseVector.zeros[Double](size)
inds.zipWithIndex.foreach { case (j, i) => empty(j) = beta(i) }
empty
}
.getOrElse(beta)
.toArray
.toVector
}
}
/**
* Simple container around the result and coefficient array
*
* @param values computed from the model
* @param grad gradient vector, which are just the linear coefficients
*/
class LinearRegressionResult(values: Seq[Double], grad: Vector[Double]) extends PredictionResult[Double] {
/**
* Get the expected values for this prediction
*
* @return expected value of each prediction
*/
override def getExpected(): Seq[Double] = values
/**
* Get the gradient, which is uniform
*
* @return a vector of doubles for each prediction
*/
override def getGradient(): Option[Seq[Vector[Double]]] = Some(Seq.fill(values.size)(grad))
}
| CitrineInformatics/lolo | src/main/scala/io/citrine/lolo/linear/LinearRegression.scala | Scala | apache-2.0 | 6,260 |
package scala.pickling
package pickler
import scala.collection.generic.CanBuildFrom
// TODO(jsuereth) - Register runtime pickler generators
trait VectorPicklers {
implicit def vectorPickler[T: FastTypeTag](implicit elemPickler: Pickler[T],
elemUnpickler: Unpickler[T], collTag: FastTypeTag[Vector[T]], cbf: CanBuildFrom[Vector[T], T, Vector[T]]):
Pickler[Vector[T]] with Unpickler[Vector[T]] =
SeqSetPickler[T, Vector]
}
| beni55/pickling | core/src/main/scala/scala/pickling/pickler/Vector.scala | Scala | bsd-3-clause | 438 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.io.File
import kafka.utils._
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.message.DescribeLogDirsRequestData
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.requests._
import org.junit.Assert._
import org.junit.Test
class DescribeLogDirsRequestTest extends BaseRequestTest {
override val logDirCount = 2
override val brokerCount: Int = 1
val topic = "topic"
val partitionNum = 2
val tp0 = new TopicPartition(topic, 0)
val tp1 = new TopicPartition(topic, 1)
@Test
def testDescribeLogDirsRequest(): Unit = {
val onlineDir = new File(servers.head.config.logDirs.head).getAbsolutePath
val offlineDir = new File(servers.head.config.logDirs.tail.head).getAbsolutePath
servers.head.replicaManager.handleLogDirFailure(offlineDir)
createTopic(topic, partitionNum, 1)
TestUtils.generateAndProduceMessages(servers, topic, 10)
val request = new DescribeLogDirsRequest.Builder(new DescribeLogDirsRequestData().setTopics(null)).build()
val response = connectAndReceive[DescribeLogDirsResponse](request, destination = controllerSocketServer)
val logDirInfos = response.logDirInfos()
assertEquals(logDirCount, logDirInfos.size())
assertEquals(Errors.KAFKA_STORAGE_ERROR, logDirInfos.get(offlineDir).error)
assertEquals(0, logDirInfos.get(offlineDir).replicaInfos.size())
assertEquals(Errors.NONE, logDirInfos.get(onlineDir).error)
val replicaInfo0 = logDirInfos.get(onlineDir).replicaInfos.get(tp0)
val replicaInfo1 = logDirInfos.get(onlineDir).replicaInfos.get(tp1)
val log0 = servers.head.logManager.getLog(tp0).get
val log1 = servers.head.logManager.getLog(tp1).get
assertEquals(log0.size, replicaInfo0.size)
assertEquals(log1.size, replicaInfo1.size)
val logEndOffset = servers.head.logManager.getLog(tp0).get.logEndOffset
assertTrue(s"LogEndOffset '$logEndOffset' should be > 0", logEndOffset > 0)
assertEquals(servers.head.replicaManager.getLogEndOffsetLag(tp0, log0.logEndOffset, false), replicaInfo0.offsetLag)
assertEquals(servers.head.replicaManager.getLogEndOffsetLag(tp1, log1.logEndOffset, false), replicaInfo1.offsetLag)
}
}
| sslavic/kafka | core/src/test/scala/unit/kafka/server/DescribeLogDirsRequestTest.scala | Scala | apache-2.0 | 3,051 |
object HasUpperCase1 {
def unapply(s: String): Boolean = s.exists(_.isUpper)
}
| grzegorzbalcerek/scala-book-examples | examples/Extractors1.scala | Scala | mit | 81 |
package ee.cone.c4gate
import java.lang.Math.toIntExact
import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit
import com.sun.net.httpserver.{HttpExchange, HttpHandler, HttpServer}
import ee.cone.c4actor.{Executable, Execution, FinallyClose, Observer, Trace}
import ee.cone.c4gate.HttpProtocol.N_Header
import okio.ByteString
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration.{Duration, SECONDS}
import scala.collection.JavaConverters.mapAsScalaMapConverter
import scala.collection.JavaConverters.iterableAsScalaIterableConverter
/*
* this 'll be fallback impl:
* sun HttpServer is not so effective due to blocking
* */
class SunReqHandler(handler: FHttpHandler, executionContext: ExecutionContext) extends HttpHandler {
def handle(httpExchange: HttpExchange) =
Trace{ FinallyClose[HttpExchange,Unit](_.close())(httpExchange) { ex =>
val method = httpExchange.getRequestMethod
val path = httpExchange.getRequestURI.getPath
val reqHeaders: List[N_Header] = httpExchange.getRequestHeaders.asScala
.flatMap{ case(k,l)=>l.asScala.map(v=>N_Header(k,v)) }.toList
val buffer = (new okio.Buffer).readFrom(httpExchange.getRequestBody)
val body = buffer.readByteString()
val request = FHttpRequest(method, path, reqHeaders, body)
val responseF = handler.handle(request)(executionContext)
val response = Await.result(responseF,Duration(600,SECONDS))
val headers = httpExchange.getResponseHeaders
response.headers.foreach(header=>headers.add(header.key,header.value))
val bytes = response.body.toByteArray
httpExchange.sendResponseHeaders(toIntExact(response.status), bytes.length)
if(bytes.nonEmpty) httpExchange.getResponseBody.write(bytes)
} }
}
class SunHttpServer(port: Int, handler: FHttpHandler, execution: Execution) extends Executable {
def run(): Unit = concurrent.blocking{
val pool = execution.newExecutorService("http-",None) //newWorkStealingPool
execution.onShutdown("Pool",()=>{
val tasks = pool.shutdownNow()
pool.awaitTermination(Long.MaxValue,TimeUnit.SECONDS)
})
val executionContext: ExecutionContext = ExecutionContext.fromExecutor(pool)
val server: HttpServer = HttpServer.create(new InetSocketAddress(port),0)
execution.onShutdown("HttpServer",()=>server.stop(Int.MaxValue))
server.setExecutor(pool)
server.createContext("/", new SunReqHandler(handler,executionContext))
server.start()
}
}
class MutableStatefulReceiver[Message](execution: Execution, inner: List[Observer[Message]]) extends StatefulReceiver[Message] {
var state: Future[List[Observer[Message]]] = Future.successful(inner)
def send(message: Message): Unit = execution.fatal{ implicit ec =>
synchronized{
state = state.map(_.flatMap(_.activate(message)))
state
}
}
}
class MutableStatefulReceiverFactory(execution: Execution) extends StatefulReceiverFactory {
def create[Message](inner: List[Observer[Message]])(implicit executionContext: ExecutionContext): Future[StatefulReceiver[Message]] =
Future.successful(new MutableStatefulReceiver[Message](execution,inner))
}
| conecenter/c4proto | bak/c4gate-sun/src/main/scala/ee/cone/c4gate/SunServerImpl.scala | Scala | apache-2.0 | 3,192 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command
import java.util.Locale
import java.util.concurrent.TimeUnit._
import scala.collection.{GenMap, GenSeq}
import scala.collection.parallel.ForkJoinTaskSupport
import scala.collection.parallel.immutable.ParVector
import scala.util.control.NonFatal
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs._
import org.apache.hadoop.mapred.{FileInputFormat, JobConf}
import org.apache.spark.internal.config.RDD_PARALLEL_LISTING_THRESHOLD
import org.apache.spark.sql.{AnalysisException, Row, SparkSession}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.Resolver
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.datasources.{HadoopFsRelation, LogicalRelation, PartitioningUtils}
import org.apache.spark.sql.execution.datasources.orc.OrcFileFormat
import org.apache.spark.sql.execution.datasources.parquet.ParquetSchemaConverter
import org.apache.spark.sql.internal.HiveSerDe
import org.apache.spark.sql.types._
import org.apache.spark.util.{SerializableConfiguration, ThreadUtils}
// Note: The definition of these commands are based on the ones described in
// https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL
/**
* A command for users to create a new database.
*
* It will issue an error message when the database with the same name already exists,
* unless 'ifNotExists' is true.
* The syntax of using this command in SQL is:
* {{{
* CREATE (DATABASE|SCHEMA) [IF NOT EXISTS] database_name
* [COMMENT database_comment]
* [LOCATION database_directory]
* [WITH DBPROPERTIES (property_name=property_value, ...)];
* }}}
*/
case class CreateDatabaseCommand(
databaseName: String,
ifNotExists: Boolean,
path: Option[String],
comment: Option[String],
props: Map[String, String])
extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
catalog.createDatabase(
CatalogDatabase(
databaseName,
comment.getOrElse(""),
path.map(CatalogUtils.stringToURI).getOrElse(catalog.getDefaultDBPath(databaseName)),
props),
ifNotExists)
Seq.empty[Row]
}
}
/**
* A command for users to remove a database from the system.
*
* 'ifExists':
* - true, if database_name does't exist, no action
* - false (default), if database_name does't exist, a warning message will be issued
* 'cascade':
* - true, the dependent objects are automatically dropped before dropping database.
* - false (default), it is in the Restrict mode. The database cannot be dropped if
* it is not empty. The inclusive tables must be dropped at first.
*
* The syntax of using this command in SQL is:
* {{{
* DROP DATABASE [IF EXISTS] database_name [RESTRICT|CASCADE];
* }}}
*/
case class DropDatabaseCommand(
databaseName: String,
ifExists: Boolean,
cascade: Boolean)
extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
sparkSession.sessionState.catalog.dropDatabase(databaseName, ifExists, cascade)
Seq.empty[Row]
}
}
/**
* A command for users to add new (key, value) pairs into DBPROPERTIES
* If the database does not exist, an error message will be issued to indicate the database
* does not exist.
* The syntax of using this command in SQL is:
* {{{
* ALTER (DATABASE|SCHEMA) database_name SET DBPROPERTIES (property_name=property_value, ...)
* }}}
*/
case class AlterDatabasePropertiesCommand(
databaseName: String,
props: Map[String, String])
extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val db: CatalogDatabase = catalog.getDatabaseMetadata(databaseName)
catalog.alterDatabase(db.copy(properties = db.properties ++ props))
Seq.empty[Row]
}
}
/**
* A command for users to set new location path for a database
* If the database does not exist, an error message will be issued to indicate the database
* does not exist.
* The syntax of using this command in SQL is:
* {{{
* ALTER (DATABASE|SCHEMA) database_name SET LOCATION path
* }}}
*/
case class AlterDatabaseSetLocationCommand(databaseName: String, location: String)
extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val oldDb = catalog.getDatabaseMetadata(databaseName)
catalog.alterDatabase(oldDb.copy(locationUri = CatalogUtils.stringToURI(location)))
Seq.empty[Row]
}
}
/**
* A command for users to show the name of the database, its comment (if one has been set), and its
* root location on the filesystem. When extended is true, it also shows the database's properties
* If the database does not exist, an error message will be issued to indicate the database
* does not exist.
* The syntax of using this command in SQL is
* {{{
* DESCRIBE DATABASE [EXTENDED] db_name
* }}}
*/
case class DescribeDatabaseCommand(
databaseName: String,
extended: Boolean)
extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val dbMetadata: CatalogDatabase =
sparkSession.sessionState.catalog.getDatabaseMetadata(databaseName)
val result =
Row("Database Name", dbMetadata.name) ::
Row("Description", dbMetadata.description) ::
Row("Location", CatalogUtils.URIToString(dbMetadata.locationUri)) :: Nil
if (extended) {
val properties =
if (dbMetadata.properties.isEmpty) {
""
} else {
dbMetadata.properties.toSeq.mkString("(", ", ", ")")
}
result :+ Row("Properties", properties)
} else {
result
}
}
override val output: Seq[Attribute] = {
AttributeReference("database_description_item", StringType, nullable = false)() ::
AttributeReference("database_description_value", StringType, nullable = false)() :: Nil
}
}
/**
* Drops a table/view from the metastore and removes it if it is cached.
*
* The syntax of this command is:
* {{{
* DROP TABLE [IF EXISTS] table_name;
* DROP VIEW [IF EXISTS] [db_name.]view_name;
* }}}
*/
case class DropTableCommand(
tableName: TableIdentifier,
ifExists: Boolean,
isView: Boolean,
purge: Boolean) extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val isTempView = catalog.isTemporaryTable(tableName)
if (!isTempView && catalog.tableExists(tableName)) {
// If the command DROP VIEW is to drop a table or DROP TABLE is to drop a view
// issue an exception.
catalog.getTableMetadata(tableName).tableType match {
case CatalogTableType.VIEW if !isView =>
throw new AnalysisException(
"Cannot drop a view with DROP TABLE. Please use DROP VIEW instead")
case o if o != CatalogTableType.VIEW && isView =>
throw new AnalysisException(
s"Cannot drop a table with DROP VIEW. Please use DROP TABLE instead")
case _ =>
}
}
if (isTempView || catalog.tableExists(tableName)) {
try {
sparkSession.sharedState.cacheManager.uncacheQuery(
sparkSession.table(tableName), cascade = !isTempView)
} catch {
case NonFatal(e) => log.warn(e.toString, e)
}
catalog.refreshTable(tableName)
catalog.dropTable(tableName, ifExists, purge)
} else if (ifExists) {
// no-op
} else {
throw new AnalysisException(s"Table or view not found: ${tableName.identifier}")
}
Seq.empty[Row]
}
}
/**
* A command that sets table/view properties.
*
* The syntax of this command is:
* {{{
* ALTER TABLE table1 SET TBLPROPERTIES ('key1' = 'val1', 'key2' = 'val2', ...);
* ALTER VIEW view1 SET TBLPROPERTIES ('key1' = 'val1', 'key2' = 'val2', ...);
* }}}
*/
case class AlterTableSetPropertiesCommand(
tableName: TableIdentifier,
properties: Map[String, String],
isView: Boolean)
extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val table = catalog.getTableMetadata(tableName)
DDLUtils.verifyAlterTableType(catalog, table, isView)
// This overrides old properties and update the comment parameter of CatalogTable
// with the newly added/modified comment since CatalogTable also holds comment as its
// direct property.
val newTable = table.copy(
properties = table.properties ++ properties,
comment = properties.get("comment").orElse(table.comment))
catalog.alterTable(newTable)
Seq.empty[Row]
}
}
/**
* A command that unsets table/view properties.
*
* The syntax of this command is:
* {{{
* ALTER TABLE table1 UNSET TBLPROPERTIES [IF EXISTS] ('key1', 'key2', ...);
* ALTER VIEW view1 UNSET TBLPROPERTIES [IF EXISTS] ('key1', 'key2', ...);
* }}}
*/
case class AlterTableUnsetPropertiesCommand(
tableName: TableIdentifier,
propKeys: Seq[String],
ifExists: Boolean,
isView: Boolean)
extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val table = catalog.getTableMetadata(tableName)
DDLUtils.verifyAlterTableType(catalog, table, isView)
if (!ifExists) {
propKeys.foreach { k =>
if (!table.properties.contains(k) && k != "comment") {
throw new AnalysisException(
s"Attempted to unset non-existent property '$k' in table '${table.identifier}'")
}
}
}
// If comment is in the table property, we reset it to None
val tableComment = if (propKeys.contains("comment")) None else table.comment
val newProperties = table.properties.filter { case (k, _) => !propKeys.contains(k) }
val newTable = table.copy(properties = newProperties, comment = tableComment)
catalog.alterTable(newTable)
Seq.empty[Row]
}
}
/**
* A command to change the column for a table, only support changing the comment of a non-partition
* column for now.
*
* The syntax of using this command in SQL is:
* {{{
* ALTER TABLE table_identifier
* CHANGE [COLUMN] column_old_name column_new_name column_dataType [COMMENT column_comment]
* [FIRST | AFTER column_name];
* }}}
*/
case class AlterTableChangeColumnCommand(
tableName: TableIdentifier,
columnName: String,
newColumn: StructField) extends RunnableCommand {
// TODO: support change column name/dataType/metadata/position.
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val table = catalog.getTableMetadata(tableName)
val resolver = sparkSession.sessionState.conf.resolver
DDLUtils.verifyAlterTableType(catalog, table, isView = false)
// Find the origin column from dataSchema by column name.
val originColumn = findColumnByName(table.dataSchema, columnName, resolver)
// Throw an AnalysisException if the column name/dataType is changed.
if (!columnEqual(originColumn, newColumn, resolver)) {
throw new AnalysisException(
"ALTER TABLE CHANGE COLUMN is not supported for changing column " +
s"'${originColumn.name}' with type '${originColumn.dataType}' to " +
s"'${newColumn.name}' with type '${newColumn.dataType}'")
}
val newDataSchema = table.dataSchema.fields.map { field =>
if (field.name == originColumn.name) {
// Create a new column from the origin column with the new comment.
addComment(field, newColumn.getComment)
} else {
field
}
}
catalog.alterTableDataSchema(tableName, StructType(newDataSchema))
Seq.empty[Row]
}
// Find the origin column from schema by column name, throw an AnalysisException if the column
// reference is invalid.
private def findColumnByName(
schema: StructType, name: String, resolver: Resolver): StructField = {
schema.fields.collectFirst {
case field if resolver(field.name, name) => field
}.getOrElse(throw new AnalysisException(
s"Can't find column `$name` given table data columns " +
s"${schema.fieldNames.mkString("[`", "`, `", "`]")}"))
}
// Add the comment to a column, if comment is empty, return the original column.
private def addComment(column: StructField, comment: Option[String]): StructField =
comment.map(column.withComment).getOrElse(column)
// Compare a [[StructField]] to another, return true if they have the same column
// name(by resolver) and dataType.
private def columnEqual(
field: StructField, other: StructField, resolver: Resolver): Boolean = {
resolver(field.name, other.name) && field.dataType == other.dataType
}
}
/**
* A command that sets the serde class and/or serde properties of a table/view.
*
* The syntax of this command is:
* {{{
* ALTER TABLE table [PARTITION spec] SET SERDE serde_name [WITH SERDEPROPERTIES props];
* ALTER TABLE table [PARTITION spec] SET SERDEPROPERTIES serde_properties;
* }}}
*/
case class AlterTableSerDePropertiesCommand(
tableName: TableIdentifier,
serdeClassName: Option[String],
serdeProperties: Option[Map[String, String]],
partSpec: Option[TablePartitionSpec])
extends RunnableCommand {
// should never happen if we parsed things correctly
require(serdeClassName.isDefined || serdeProperties.isDefined,
"ALTER TABLE attempted to set neither serde class name nor serde properties")
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val table = catalog.getTableMetadata(tableName)
DDLUtils.verifyAlterTableType(catalog, table, isView = false)
// For datasource tables, disallow setting serde or specifying partition
if (partSpec.isDefined && DDLUtils.isDatasourceTable(table)) {
throw new AnalysisException("Operation not allowed: ALTER TABLE SET " +
"[SERDE | SERDEPROPERTIES] for a specific partition is not supported " +
"for tables created with the datasource API")
}
if (serdeClassName.isDefined && DDLUtils.isDatasourceTable(table)) {
throw new AnalysisException("Operation not allowed: ALTER TABLE SET SERDE is " +
"not supported for tables created with the datasource API")
}
if (partSpec.isEmpty) {
val newTable = table.withNewStorage(
serde = serdeClassName.orElse(table.storage.serde),
properties = table.storage.properties ++ serdeProperties.getOrElse(Map()))
catalog.alterTable(newTable)
} else {
val spec = partSpec.get
val part = catalog.getPartition(table.identifier, spec)
val newPart = part.copy(storage = part.storage.copy(
serde = serdeClassName.orElse(part.storage.serde),
properties = part.storage.properties ++ serdeProperties.getOrElse(Map())))
catalog.alterPartitions(table.identifier, Seq(newPart))
}
Seq.empty[Row]
}
}
/**
* Add Partition in ALTER TABLE: add the table partitions.
*
* An error message will be issued if the partition exists, unless 'ifNotExists' is true.
*
* The syntax of this command is:
* {{{
* ALTER TABLE table ADD [IF NOT EXISTS] PARTITION spec1 [LOCATION 'loc1']
* PARTITION spec2 [LOCATION 'loc2']
* }}}
*/
case class AlterTableAddPartitionCommand(
tableName: TableIdentifier,
partitionSpecsAndLocs: Seq[(TablePartitionSpec, Option[String])],
ifNotExists: Boolean)
extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val table = catalog.getTableMetadata(tableName)
DDLUtils.verifyAlterTableType(catalog, table, isView = false)
DDLUtils.verifyPartitionProviderIsHive(sparkSession, table, "ALTER TABLE ADD PARTITION")
val parts = partitionSpecsAndLocs.map { case (spec, location) =>
val normalizedSpec = PartitioningUtils.normalizePartitionSpec(
spec,
table.partitionColumnNames,
table.identifier.quotedString,
sparkSession.sessionState.conf.resolver)
// inherit table storage format (possibly except for location)
CatalogTablePartition(normalizedSpec, table.storage.copy(
locationUri = location.map(CatalogUtils.stringToURI)))
}
catalog.createPartitions(table.identifier, parts, ignoreIfExists = ifNotExists)
if (table.stats.nonEmpty) {
if (sparkSession.sessionState.conf.autoSizeUpdateEnabled) {
val addedSize = parts.map { part =>
CommandUtils.calculateLocationSize(sparkSession.sessionState, table.identifier,
part.storage.locationUri)
}.sum
if (addedSize > 0) {
val newStats = CatalogStatistics(sizeInBytes = table.stats.get.sizeInBytes + addedSize)
catalog.alterTableStats(table.identifier, Some(newStats))
}
} else {
catalog.alterTableStats(table.identifier, None)
}
}
Seq.empty[Row]
}
}
/**
* Alter a table partition's spec.
*
* The syntax of this command is:
* {{{
* ALTER TABLE table PARTITION spec1 RENAME TO PARTITION spec2;
* }}}
*/
case class AlterTableRenamePartitionCommand(
tableName: TableIdentifier,
oldPartition: TablePartitionSpec,
newPartition: TablePartitionSpec)
extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val table = catalog.getTableMetadata(tableName)
DDLUtils.verifyAlterTableType(catalog, table, isView = false)
DDLUtils.verifyPartitionProviderIsHive(sparkSession, table, "ALTER TABLE RENAME PARTITION")
val normalizedOldPartition = PartitioningUtils.normalizePartitionSpec(
oldPartition,
table.partitionColumnNames,
table.identifier.quotedString,
sparkSession.sessionState.conf.resolver)
val normalizedNewPartition = PartitioningUtils.normalizePartitionSpec(
newPartition,
table.partitionColumnNames,
table.identifier.quotedString,
sparkSession.sessionState.conf.resolver)
catalog.renamePartitions(
tableName, Seq(normalizedOldPartition), Seq(normalizedNewPartition))
Seq.empty[Row]
}
}
/**
* Drop Partition in ALTER TABLE: to drop a particular partition for a table.
*
* This removes the data and metadata for this partition.
* The data is actually moved to the .Trash/Current directory if Trash is configured,
* unless 'purge' is true, but the metadata is completely lost.
* An error message will be issued if the partition does not exist, unless 'ifExists' is true.
* Note: purge is always false when the target is a view.
*
* The syntax of this command is:
* {{{
* ALTER TABLE table DROP [IF EXISTS] PARTITION spec1[, PARTITION spec2, ...] [PURGE];
* }}}
*/
case class AlterTableDropPartitionCommand(
tableName: TableIdentifier,
specs: Seq[TablePartitionSpec],
ifExists: Boolean,
purge: Boolean,
retainData: Boolean)
extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val table = catalog.getTableMetadata(tableName)
DDLUtils.verifyAlterTableType(catalog, table, isView = false)
DDLUtils.verifyPartitionProviderIsHive(sparkSession, table, "ALTER TABLE DROP PARTITION")
val normalizedSpecs = specs.map { spec =>
PartitioningUtils.normalizePartitionSpec(
spec,
table.partitionColumnNames,
table.identifier.quotedString,
sparkSession.sessionState.conf.resolver)
}
catalog.dropPartitions(
table.identifier, normalizedSpecs, ignoreIfNotExists = ifExists, purge = purge,
retainData = retainData)
CommandUtils.updateTableStats(sparkSession, table)
Seq.empty[Row]
}
}
case class PartitionStatistics(numFiles: Int, totalSize: Long)
/**
* Recover Partitions in ALTER TABLE: recover all the partition in the directory of a table and
* update the catalog.
*
* The syntax of this command is:
* {{{
* ALTER TABLE table RECOVER PARTITIONS;
* MSCK REPAIR TABLE table;
* }}}
*/
case class AlterTableRecoverPartitionsCommand(
tableName: TableIdentifier,
cmd: String = "ALTER TABLE RECOVER PARTITIONS") extends RunnableCommand {
// These are list of statistics that can be collected quickly without requiring a scan of the data
// see https://github.com/apache/hive/blob/master/
// common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
val NUM_FILES = "numFiles"
val TOTAL_SIZE = "totalSize"
val DDL_TIME = "transient_lastDdlTime"
private def getPathFilter(hadoopConf: Configuration): PathFilter = {
// Dummy jobconf to get to the pathFilter defined in configuration
// It's very expensive to create a JobConf(ClassUtil.findContainingJar() is slow)
val jobConf = new JobConf(hadoopConf, this.getClass)
val pathFilter = FileInputFormat.getInputPathFilter(jobConf)
path: Path => {
val name = path.getName
if (name != "_SUCCESS" && name != "_temporary" && !name.startsWith(".")) {
pathFilter == null || pathFilter.accept(path)
} else {
false
}
}
}
override def run(spark: SparkSession): Seq[Row] = {
val catalog = spark.sessionState.catalog
val table = catalog.getTableMetadata(tableName)
val tableIdentWithDB = table.identifier.quotedString
DDLUtils.verifyAlterTableType(catalog, table, isView = false)
if (table.partitionColumnNames.isEmpty) {
throw new AnalysisException(
s"Operation not allowed: $cmd only works on partitioned tables: $tableIdentWithDB")
}
if (table.storage.locationUri.isEmpty) {
throw new AnalysisException(s"Operation not allowed: $cmd only works on table with " +
s"location provided: $tableIdentWithDB")
}
val root = new Path(table.location)
logInfo(s"Recover all the partitions in $root")
val hadoopConf = spark.sessionState.newHadoopConf()
val fs = root.getFileSystem(hadoopConf)
val threshold = spark.sparkContext.conf.get(RDD_PARALLEL_LISTING_THRESHOLD)
val pathFilter = getPathFilter(hadoopConf)
val evalPool = ThreadUtils.newForkJoinPool("AlterTableRecoverPartitionsCommand", 8)
val partitionSpecsAndLocs: Seq[(TablePartitionSpec, Path)] =
try {
scanPartitions(spark, fs, pathFilter, root, Map(), table.partitionColumnNames, threshold,
spark.sessionState.conf.resolver, new ForkJoinTaskSupport(evalPool)).seq
} finally {
evalPool.shutdown()
}
val total = partitionSpecsAndLocs.length
logInfo(s"Found $total partitions in $root")
val partitionStats = if (spark.sqlContext.conf.gatherFastStats) {
gatherPartitionStats(spark, partitionSpecsAndLocs, fs, pathFilter, threshold)
} else {
GenMap.empty[String, PartitionStatistics]
}
logInfo(s"Finished to gather the fast stats for all $total partitions.")
addPartitions(spark, table, partitionSpecsAndLocs, partitionStats)
// Updates the table to indicate that its partition metadata is stored in the Hive metastore.
// This is always the case for Hive format tables, but is not true for Datasource tables created
// before Spark 2.1 unless they are converted via `msck repair table`.
spark.sessionState.catalog.alterTable(table.copy(tracksPartitionsInCatalog = true))
catalog.refreshTable(tableName)
logInfo(s"Recovered all partitions ($total).")
Seq.empty[Row]
}
private def scanPartitions(
spark: SparkSession,
fs: FileSystem,
filter: PathFilter,
path: Path,
spec: TablePartitionSpec,
partitionNames: Seq[String],
threshold: Int,
resolver: Resolver,
evalTaskSupport: ForkJoinTaskSupport): GenSeq[(TablePartitionSpec, Path)] = {
if (partitionNames.isEmpty) {
return Seq(spec -> path)
}
val statuses = fs.listStatus(path, filter)
val statusPar: GenSeq[FileStatus] =
if (partitionNames.length > 1 && statuses.length > threshold || partitionNames.length > 2) {
// parallelize the list of partitions here, then we can have better parallelism later.
val parArray = new ParVector(statuses.toVector)
parArray.tasksupport = evalTaskSupport
parArray
} else {
statuses
}
statusPar.flatMap { st =>
val name = st.getPath.getName
if (st.isDirectory && name.contains("=")) {
val ps = name.split("=", 2)
val columnName = ExternalCatalogUtils.unescapePathName(ps(0))
// TODO: Validate the value
val value = ExternalCatalogUtils.unescapePathName(ps(1))
if (resolver(columnName, partitionNames.head)) {
scanPartitions(spark, fs, filter, st.getPath, spec ++ Map(partitionNames.head -> value),
partitionNames.drop(1), threshold, resolver, evalTaskSupport)
} else {
logWarning(
s"expected partition column ${partitionNames.head}, but got ${ps(0)}, ignoring it")
Seq.empty
}
} else {
logWarning(s"ignore ${new Path(path, name)}")
Seq.empty
}
}
}
private def gatherPartitionStats(
spark: SparkSession,
partitionSpecsAndLocs: GenSeq[(TablePartitionSpec, Path)],
fs: FileSystem,
pathFilter: PathFilter,
threshold: Int): GenMap[String, PartitionStatistics] = {
if (partitionSpecsAndLocs.length > threshold) {
val hadoopConf = spark.sessionState.newHadoopConf()
val serializableConfiguration = new SerializableConfiguration(hadoopConf)
val serializedPaths = partitionSpecsAndLocs.map(_._2.toString).toArray
// Set the number of parallelism to prevent following file listing from generating many tasks
// in case of large #defaultParallelism.
val numParallelism = Math.min(serializedPaths.length,
Math.min(spark.sparkContext.defaultParallelism, 10000))
// gather the fast stats for all the partitions otherwise Hive metastore will list all the
// files for all the new partitions in sequential way, which is super slow.
logInfo(s"Gather the fast stats in parallel using $numParallelism tasks.")
spark.sparkContext.parallelize(serializedPaths, numParallelism)
.mapPartitions { paths =>
val pathFilter = getPathFilter(serializableConfiguration.value)
paths.map(new Path(_)).map{ path =>
val fs = path.getFileSystem(serializableConfiguration.value)
val statuses = fs.listStatus(path, pathFilter)
(path.toString, PartitionStatistics(statuses.length, statuses.map(_.getLen).sum))
}
}.collectAsMap()
} else {
partitionSpecsAndLocs.map { case (_, location) =>
val statuses = fs.listStatus(location, pathFilter)
(location.toString, PartitionStatistics(statuses.length, statuses.map(_.getLen).sum))
}.toMap
}
}
private def addPartitions(
spark: SparkSession,
table: CatalogTable,
partitionSpecsAndLocs: GenSeq[(TablePartitionSpec, Path)],
partitionStats: GenMap[String, PartitionStatistics]): Unit = {
val total = partitionSpecsAndLocs.length
var done = 0L
// Hive metastore may not have enough memory to handle millions of partitions in single RPC,
// we should split them into smaller batches. Since Hive client is not thread safe, we cannot
// do this in parallel.
val batchSize = 100
partitionSpecsAndLocs.toIterator.grouped(batchSize).foreach { batch =>
val now = MILLISECONDS.toSeconds(System.currentTimeMillis())
val parts = batch.map { case (spec, location) =>
val params = partitionStats.get(location.toString).map {
case PartitionStatistics(numFiles, totalSize) =>
// This two fast stat could prevent Hive metastore to list the files again.
Map(NUM_FILES -> numFiles.toString,
TOTAL_SIZE -> totalSize.toString,
// Workaround a bug in HiveMetastore that try to mutate a read-only parameters.
// see metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
DDL_TIME -> now.toString)
}.getOrElse(Map.empty)
// inherit table storage format (possibly except for location)
CatalogTablePartition(
spec,
table.storage.copy(locationUri = Some(location.toUri)),
params)
}
spark.sessionState.catalog.createPartitions(tableName, parts, ignoreIfExists = true)
done += parts.length
logDebug(s"Recovered ${parts.length} partitions ($done/$total so far)")
}
}
}
/**
* A command that sets the location of a table or a partition.
*
* For normal tables, this just sets the location URI in the table/partition's storage format.
* For datasource tables, this sets a "path" parameter in the table/partition's serde properties.
*
* The syntax of this command is:
* {{{
* ALTER TABLE table_name [PARTITION partition_spec] SET LOCATION "loc";
* }}}
*/
case class AlterTableSetLocationCommand(
tableName: TableIdentifier,
partitionSpec: Option[TablePartitionSpec],
location: String)
extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val table = catalog.getTableMetadata(tableName)
val locUri = CatalogUtils.stringToURI(location)
DDLUtils.verifyAlterTableType(catalog, table, isView = false)
partitionSpec match {
case Some(spec) =>
DDLUtils.verifyPartitionProviderIsHive(
sparkSession, table, "ALTER TABLE ... SET LOCATION")
// Partition spec is specified, so we set the location only for this partition
val part = catalog.getPartition(table.identifier, spec)
val newPart = part.copy(storage = part.storage.copy(locationUri = Some(locUri)))
catalog.alterPartitions(table.identifier, Seq(newPart))
case None =>
// No partition spec is specified, so we set the location for the table itself
catalog.alterTable(table.withNewStorage(locationUri = Some(locUri)))
}
CommandUtils.updateTableStats(sparkSession, table)
Seq.empty[Row]
}
}
object DDLUtils {
val HIVE_PROVIDER = "hive"
def isHiveTable(table: CatalogTable): Boolean = {
isHiveTable(table.provider)
}
def isHiveTable(provider: Option[String]): Boolean = {
provider.isDefined && provider.get.toLowerCase(Locale.ROOT) == HIVE_PROVIDER
}
def isDatasourceTable(table: CatalogTable): Boolean = {
table.provider.isDefined && table.provider.get.toLowerCase(Locale.ROOT) != HIVE_PROVIDER
}
def readHiveTable(table: CatalogTable): HiveTableRelation = {
HiveTableRelation(
table,
// Hive table columns are always nullable.
table.dataSchema.asNullable.toAttributes,
table.partitionSchema.asNullable.toAttributes)
}
/**
* Throws a standard error for actions that require partitionProvider = hive.
*/
def verifyPartitionProviderIsHive(
spark: SparkSession, table: CatalogTable, action: String): Unit = {
val tableName = table.identifier.table
if (!spark.sqlContext.conf.manageFilesourcePartitions && isDatasourceTable(table)) {
throw new AnalysisException(
s"$action is not allowed on $tableName since filesource partition management is " +
"disabled (spark.sql.hive.manageFilesourcePartitions = false).")
}
if (!table.tracksPartitionsInCatalog && isDatasourceTable(table)) {
throw new AnalysisException(
s"$action is not allowed on $tableName since its partition metadata is not stored in " +
"the Hive metastore. To import this information into the metastore, run " +
s"`msck repair table $tableName`")
}
}
/**
* If the command ALTER VIEW is to alter a table or ALTER TABLE is to alter a view,
* issue an exception [[AnalysisException]].
*
* Note: temporary views can be altered by both ALTER VIEW and ALTER TABLE commands,
* since temporary views can be also created by CREATE TEMPORARY TABLE. In the future,
* when we decided to drop the support, we should disallow users to alter temporary views
* by ALTER TABLE.
*/
def verifyAlterTableType(
catalog: SessionCatalog,
tableMetadata: CatalogTable,
isView: Boolean): Unit = {
if (!catalog.isTemporaryTable(tableMetadata.identifier)) {
tableMetadata.tableType match {
case CatalogTableType.VIEW if !isView =>
throw new AnalysisException(
"Cannot alter a view with ALTER TABLE. Please use ALTER VIEW instead")
case o if o != CatalogTableType.VIEW && isView =>
throw new AnalysisException(
s"Cannot alter a table with ALTER VIEW. Please use ALTER TABLE instead")
case _ =>
}
}
}
private[sql] def checkDataColNames(table: CatalogTable): Unit = {
checkDataColNames(table, table.dataSchema.fieldNames)
}
private[sql] def checkDataColNames(table: CatalogTable, colNames: Seq[String]): Unit = {
table.provider.foreach {
_.toLowerCase(Locale.ROOT) match {
case HIVE_PROVIDER =>
val serde = table.storage.serde
if (serde == HiveSerDe.sourceToSerDe("orc").get.serde) {
OrcFileFormat.checkFieldNames(colNames)
} else if (serde == HiveSerDe.sourceToSerDe("parquet").get.serde ||
serde == Some("parquet.hive.serde.ParquetHiveSerDe") ||
serde == Some("org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe")) {
ParquetSchemaConverter.checkFieldNames(colNames)
}
case "parquet" => ParquetSchemaConverter.checkFieldNames(colNames)
case "orc" => OrcFileFormat.checkFieldNames(colNames)
case _ =>
}
}
}
/**
* Throws exception if outputPath tries to overwrite inputpath.
*/
def verifyNotReadPath(query: LogicalPlan, outputPath: Path) : Unit = {
val inputPaths = query.collect {
case LogicalRelation(r: HadoopFsRelation, _, _, _) =>
r.location.rootPaths
}.flatten
if (inputPaths.contains(outputPath)) {
throw new AnalysisException(
"Cannot overwrite a path that is also being read from.")
}
}
}
| caneGuy/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala | Scala | apache-2.0 | 35,531 |
package circumflex
import core._, web._
import java.io._
import _root_.freemarker.template._
/*!# The `ftl` package
Package `ftl` contains rendering methods, `ftl` for use in Circumflex Web Framework and
`ftl2xxx` to render an FTL template into `xxx`. It also maintains Freemarker configuration,
use `ftlConfig` to access it if you need custom operations, or use `ftl.configuration`
configuration parameter to provide your own implementation of FreeMarker `Configuration`.
You should import this package to use Circumflex FreeMarker Helper in your application:
``` {.scala}
import circumflex.freemarker._
```
*/
package object freemarker {
val FTL_LOG = new Logger("circumflex.ftl")
val DEFAULT_FTL_CONFIGURATION: Configuration =
cx.instantiate[Configuration](
"ftl.configuration", new DefaultConfiguration)
def ftlConfig = ctx.getAs[Configuration]("ftl.configuration")
.getOrElse(DEFAULT_FTL_CONFIGURATION)
def ftl(template: String, data: Any = ctx): Nothing =
response.body { r =>
ftlConfig
.getTemplate(template)
.process(data, r.getWriter)
}.flush()
def ftl2string(template: String, root: Any = ctx): String = {
val result = new StringWriter
ftlConfig.getTemplate(template).process(root, result)
result.toString
}
/*!# Configuring Object Wrapper
Circumflex FreeMarker Helper provides facilities to make Scala objects available inside
FreeMarker templates. These facilities are implemented inside `ScalaObjectWrapper`.
There are couple of things which can be configured:
* by default, all public fields can be resolved on any object (e.g. `${myObj.myField}`);
to disable this, set `ftl.wrapper.resolveFields` configuration parameter to `false`;
* by default, all public methods can be resolved on any object (e.g. `${myObj.myMethod("Hello")}`);
to disable this, set `ftl.wrapper.resolveMethods` configuration parameter to `false`;
* you can set `ftl.wrapper.delegateToDefault` configuration parameter to `true` in order to
delegate resolving to FreeMarker's default object wrapper (`ObjectWrapper.DEFAULT_WRAPPER`);
this can be useful if you work with Java types in your Scala applications (e.g. Java lists or
maps); by default the delegation does not occur (`null` is returned if resolving fails).
*/
val resolveFields = cx.get("ftl.wrapper.resolveFields") match {
case Some(b: Boolean) => b
case Some(s: String) => s.toBoolean
case _ => true
}
val resolveMethods = cx.get("ftl.wrapper.resolveMethods") match {
case Some(b: Boolean) => b
case Some(s: String) => s.toBoolean
case _ => true
}
val delegateToDefault = cx.get("ftl.wrapper.delegateToDefault") match {
case Some(b: Boolean) => b
case Some(s: String) => s.toBoolean
case _ => false
}
}
| inca/circumflex | ftl/src/main/scala/package.scala | Scala | bsd-2-clause | 2,825 |
package com.cyrusinnovation.computation.persistence.writer
import com.cyrusinnovation.computation.specification.Library
trait Writer {
def write(library: Library)
}
| psfblair/computation-engine | persistence/src/main/scala/com/cyrusinnovation/computation/persistence/writer/Writer.scala | Scala | apache-2.0 | 169 |
package chapter3
object Exercise3_19 {
/**
*
*/
def filter[A](l: List[A])(f: A => Boolean): List[A] = l match {
case Nil => Nil
case Cons(h, tail) => if(f(h)) Cons(h, filter(tail)(f)) else filter(tail)(f)
}
def main(args: Array[String]): Unit = {
val l = List(1, 2, 3, 4)
assert(filter(l)( _ > 5) == Nil)
assert(filter(l)( _ < 5) == l)
assert(filter(l)( _ < 3) == List(1, 2))
println("All tests successful")
}
} | amolnayak311/functional-programming-in-scala | src/chapter3/Exercise3_19.scala | Scala | unlicense | 493 |
package io.buoyant.namerd.iface
import com.google.common.base.Ticker
import com.google.common.cache.{CacheBuilder, RemovalNotification}
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.util.{Return, Stopwatch, Throw, Try}
import io.buoyant.namerd.iface.ThriftNamerInterface.Observer
import java.util.concurrent.TimeUnit.SECONDS
import java.util.concurrent.{Callable, ConcurrentHashMap}
class MaximumObservationsReached(maxObservations: Int)
extends Exception(s"The maximum number of concurrent observations has been reached ($maxObservations)")
/**
* A cache for Observer[T]. ObserverCache contains two levels of caching: active and inactive.
*
* The active cache is for Observers that have current outstanding requests against them.
* Therefore, any Observer in the active cache must not be closed as this would cause the
* outstanding requests to hang.
*
* The inactive cache is for Observers without outstanding requests against them. These Observers
* are kept open while in the inactive cache in case they receive a request and need to be moved
* to the active cache. The inactive cache has LRU and time-based eviction and Observers are
* closed upon eviction. Since Observers in the inactive cache have no outstanding requests against
* them, this is safe to do.
*
* When get is called, the Observer for that key is moved to the active cache if it exists or
* created and placed in the active cache if it does not exist. When the value of an Observer
* in the active cache changes, the Observer is moved to the inactive cache. If the inactive
* cache is full at that time, an older inactive Observer will be evicted.
*
* @param activeCapacity The maximum size of the active cache. If get would cause the active cache
* to exceed this size, a MaximumObservationsReached exception is returned
* instead.
* @param inactiveCapacity The maximum size of the inactive cache. LRU eviction is used to
* maintain this constraint.
* @param inactiveTTLSecs The amount of time, in seconds, to keep observer in inactive cache before
* expiring them.
* @param mkObserver The function to use to create new Observers if they are not in either cache.
* @param stopwatch Function used to measure expiration time. System.nanoTime() used by default.
*/
class ObserverCache[K <: AnyRef, T](
activeCapacity: Int,
inactiveCapacity: Int,
inactiveTTLSecs: Int,
stats: StatsReceiver,
mkObserver: K => Observer[T],
stopwatch: () => Long = Stopwatch.systemNanos
) {
def get(key: K): Try[Observer[T]] =
Option(activeCache.get(key)).map(Return(_)).getOrElse {
synchronized {
// now that we have entered the synchronized block we again check that key hasn't entered
// the active cache
Option(activeCache.get(key))
.map(Return(_))
.getOrElse(makeActive(key))
}
}
// ConcurrentHashMap is used to make reads lockless, but all updates are explicitly synchronized
private[this] val activeCache = new ConcurrentHashMap[K, Observer[T]]
private[this] val inactiveCache = CacheBuilder.newBuilder()
.ticker(new Ticker {
override def read(): Long = stopwatch()
})
.maximumSize(inactiveCapacity)
.expireAfterAccess(inactiveTTLSecs, SECONDS)
.removalListener(
(notification: RemovalNotification[K, Observer[T]]) =>
if (notification.wasEvicted) {
val _ = notification.getValue.close()
}
)
.build[K, Observer[T]]()
private[this] val activeSize = stats.addGauge("active")(activeCache.size)
private[this] val inactiveSize = stats.addGauge("inactive")(inactiveCache.size)
private[this] def makeActive(key: K): Try[Observer[T]] = synchronized {
if (activeCache.size < activeCapacity) {
val obs = Option(inactiveCache.getIfPresent(key)).getOrElse {
mkObserver(key)
}
activeCache.put(key, obs)
inactiveCache.invalidate(key)
obs.nextValue.ensure {
makeInactive(key, obs)
}
Return(obs)
} else {
Throw(new MaximumObservationsReached(activeCapacity))
}
}
private[this] def makeInactive(key: K, obs: Observer[T]): Unit = synchronized {
activeCache.remove(key)
// insert obs into the inactive cache if it's not present
val _ = inactiveCache.get(
key, new Callable[Observer[T]] {
def call = obs
}
)
}
// Only for testing purpose
// Caches built with CacheBuilder do not perform cleanup instantly after a value expires
// https://github.com/google/guava/wiki/CachesExplained#when-does-cleanup-happen
private[iface] def inactiveCacheCleanup() = inactiveCache.cleanUp()
}
| linkerd/linkerd | namerd/iface/interpreter-thrift/src/main/scala/io/buoyant/namerd/iface/ObserverCache.scala | Scala | apache-2.0 | 4,747 |
package org.cakesolutions.akkapatterns.api
import cc.spray.test.SprayTest
import java.util.concurrent.TimeUnit
import akka.actor.ActorRef
import cc.spray.RequestContext
import cc.spray.http._
import io.Source
import org.specs2.mutable.Specification
import org.cakesolutions.akkapatterns.test.{DefaultTestData, SpecConfiguration}
import org.cakesolutions.akkapatterns.core.Core
import concurrent.util.Duration
trait RootSprayTest extends SprayTest {
protected def testRoot(request: HttpRequest, timeout: Duration = Duration(10000, TimeUnit.MILLISECONDS))
(root: ActorRef): ServiceResultWrapper = {
val routeResult = new RouteResult
root !
RequestContext(
request = request,
responder = routeResult.requestResponder,
unmatchedPath = request.path
)
// since the route might detach we block until the route actually completes or times out
routeResult.awaitResult(timeout)
new ServiceResultWrapper(routeResult, timeout)
}
}
trait JsonSource {
def jsonFor(location: String) = Source.fromInputStream(classOf[JsonSource].getResourceAsStream(location)).mkString
def jsonContent(location: String) = Some(HttpContent(ContentType(MediaTypes.`application/json`), jsonFor(location)))
}
/**
* Convenience trait for API tests
*/
trait ApiSpecification extends Specification with SpecConfiguration with RootSprayTest with Core with Api with Unmarshallers with Marshallers with LiftJSON {
import cc.spray.typeconversion._
protected def respond(method: HttpMethod, url: String, content: Option[HttpContent] = None)
(implicit root: ActorRef) = {
val request = HttpRequest(method, url, content = content)
testRoot(request)(root).response
}
protected def perform[A](method: HttpMethod, url: String, content: Option[HttpContent] = None)
(implicit root: ActorRef, unmarshaller: Unmarshaller[A]): A = {
val request = HttpRequest(method, url, content = content)
val response = testRoot(request)(root).response.content
val obj = response.as[A] match {
case Left(e) => throw new Exception(e.toString)
case Right(r) => r
}
obj
}
protected def perform[In, Out](method: HttpMethod, url: String, in: In)
(implicit root: ActorRef, marshaller: Marshaller[In], unmarshaller: Unmarshaller[Out]): Out = {
marshaller(t => Some(t)) match {
case MarshalWith(f) =>
val sb = new StringBuilder()
val ctx = new StringBuilderMarshallingContent(sb)
f(ctx)(in)
perform[Out](method, url, Some(HttpContent(ContentType(MediaTypes.`application/json`), sb.toString())))
case CantMarshal(_) =>
throw new Exception("Cant marshal " + in)
}
}
}
/**
* Convenience trait for API tests; with default test data
*/
trait DefaultApiSpecification extends ApiSpecification with DefaultTestData with JsonSource | anand-singh/akka-patterns | maven/api/src/test/scala/org/cakesolutions/akkapatterns/api/support.scala | Scala | apache-2.0 | 2,944 |
package com.twitter.finagle.http.codec
import com.twitter.finagle.ChannelBufferUsageException
import com.twitter.conversions.storage._
import org.jboss.netty.channel._
import org.jboss.netty.buffer.ChannelBuffers
import org.junit.runner.RunWith
import org.mockito.Mockito._
import org.scalatest.mock.MockitoSugar
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ChannelBufferManagerTest extends FunSuite with MockitoSugar {
val me = mock[MessageEvent]
val c = mock[Channel]
val ctx = mock[ChannelHandlerContext]
val e = mock[ChannelStateEvent]
val wce = mock[WriteCompletionEvent]
when(me.getChannel).thenReturn(c)
def makeGetMessage(channelCapacity: Int): Unit = {
val channelBuffer = ChannelBuffers.directBuffer(channelCapacity)
doReturn(channelBuffer).when(me).getMessage
}
def usageTrackerFactory() = {
val usageTracker = new ChannelBufferUsageTracker(1000.bytes)
assert(usageTracker.usageLimit == (1000.bytes))
usageTracker
}
def handlerFactory(usageTracker: ChannelBufferUsageTracker) = new ChannelBufferManager(usageTracker)
test("track the capacity of the channel buffer") {
val usageTracker = usageTrackerFactory()
val handler = handlerFactory(usageTracker)
makeGetMessage(256)
handler.messageReceived(ctx, me)
assert(usageTracker.currentUsage == (256.bytes))
makeGetMessage(512)
handler.messageReceived(ctx, me)
assert(usageTracker.currentUsage == (768.bytes))
handler.writeComplete(ctx, wce)
usageTracker.currentUsage == (0.bytes)
makeGetMessage(128)
handler.messageReceived(ctx, me)
usageTracker.currentUsage == (128.bytes)
handler.channelClosed(ctx, e)
usageTracker.currentUsage == (0.bytes)
usageTracker.maxUsage == (768.bytes)
}
test("throw exception if usage exceeds limit at the beginning of the request") {
val usageTracker = usageTrackerFactory()
val handler = handlerFactory(usageTracker)
usageTracker.setUsageLimit(10.bytes)
assert(usageTracker.usageLimit == (10.bytes))
assert(usageTracker.currentUsage == (0.bytes))
makeGetMessage(20)
intercept[ChannelBufferUsageException] {
handler.messageReceived(ctx, me)
}
assert(usageTracker.currentUsage == (0.bytes))
handler.channelClosed(ctx, e)
assert(usageTracker.currentUsage == (0.bytes))
assert(usageTracker.maxUsage == (0.bytes))
}
test("throw exception if usage exceeds limit in the middle of the request") {
val usageTracker = usageTrackerFactory()
val handler = handlerFactory(usageTracker)
usageTracker.setUsageLimit(300.bytes)
assert(usageTracker.usageLimit == (300.bytes))
assert(usageTracker.currentUsage == (0.bytes))
makeGetMessage(100)
handler.messageReceived(ctx, me)
assert(usageTracker.currentUsage == (100.bytes))
makeGetMessage(350)
intercept[ChannelBufferUsageException] {
handler.messageReceived(ctx, me)
}
assert(usageTracker.currentUsage == (100.bytes))
assert(usageTracker.maxUsage == (100.bytes))
makeGetMessage(50)
handler.messageReceived(ctx, me)
assert(usageTracker.currentUsage == (150.bytes))
assert(usageTracker.maxUsage == (150.bytes))
makeGetMessage(150)
handler.messageReceived(ctx, me)
assert(usageTracker.currentUsage == (300.bytes))
handler.channelClosed(ctx, e)
assert(usageTracker.currentUsage == (0.bytes))
assert(usageTracker.maxUsage == (300.bytes))
}
}
| sveinnfannar/finagle | finagle-http/src/test/scala/com/twitter/finagle/http/codec/ChannelBufferManagerTest.scala | Scala | apache-2.0 | 3,511 |
package mesosphere.marathon.api.v2
import java.net.URI
import javax.inject.{ Inject, Named }
import javax.servlet.http.HttpServletRequest
import javax.ws.rs._
import javax.ws.rs.core.{ Context, MediaType, Response }
import akka.event.EventStream
import com.codahale.metrics.annotation.Timed
import mesosphere.marathon.api.v2.Validation._
import mesosphere.marathon.api.v2.json.AppUpdate
import mesosphere.marathon.api.v2.json.Formats._
import mesosphere.marathon.api.{ AuthResource, MarathonMediaType, RestResource }
import mesosphere.marathon.core.appinfo.{ AppInfoService, AppSelector, TaskCounts }
import mesosphere.marathon.core.appinfo.AppInfo
import mesosphere.marathon.core.base.Clock
import mesosphere.marathon.event.{ ApiPostEvent, EventModule }
import mesosphere.marathon.plugin.auth._
import mesosphere.marathon.state.PathId._
import mesosphere.marathon.state._
import mesosphere.marathon.{ ConflictingChangeException, MarathonConf, MarathonSchedulerService, UnknownAppException }
import play.api.libs.json.Json
import scala.collection.JavaConverters._
import scala.collection.immutable.Seq
@Path("v2/apps")
@Consumes(Array(MediaType.APPLICATION_JSON))
@Produces(Array(MarathonMediaType.PREFERRED_APPLICATION_JSON))
class AppsResource @Inject() (
clock: Clock,
@Named(EventModule.busName) eventBus: EventStream,
appTasksRes: AppTasksResource,
service: MarathonSchedulerService,
appInfoService: AppInfoService,
val config: MarathonConf,
val authenticator: Authenticator,
val authorizer: Authorizer,
groupManager: GroupManager) extends RestResource with AuthResource {
private[this] val ListApps = """^((?:.+/)|)\\*$""".r
@GET
@Timed
def index(@QueryParam("cmd") cmd: String,
@QueryParam("id") id: String,
@QueryParam("label") label: String,
@QueryParam("embed") embed: java.util.Set[String],
@Context req: HttpServletRequest): Response = authenticated(req) { implicit identity =>
val selector = selectAuthorized(search(Option(cmd), Option(id), Option(label)))
// additional embeds are deprecated!
val resolvedEmbed = InfoEmbedResolver.resolveApp(embed.asScala.toSet) +
AppInfo.Embed.Counts + AppInfo.Embed.Deployments
val mapped = result(appInfoService.selectAppsBy(selector, resolvedEmbed))
Response.ok(jsonObjString("apps" -> mapped)).build()
}
@POST
@Timed
def create(body: Array[Byte],
@DefaultValue("false")@QueryParam("force") force: Boolean,
@Context req: HttpServletRequest): Response = authenticated(req) { implicit identity =>
withValid(Json.parse(body).as[AppDefinition].withCanonizedIds()) { appDef =>
val now = clock.now()
val app = appDef.copy(versionInfo = AppDefinition.VersionInfo.OnlyVersion(now))
checkAuthorization(CreateApp, app)
def createOrThrow(opt: Option[AppDefinition]) = opt
.map(_ => throw new ConflictingChangeException(s"An app with id [${app.id}] already exists."))
.getOrElse(app)
val plan = result(groupManager.updateApp(app.id, createOrThrow, app.version, force))
val appWithDeployments = AppInfo(
app,
maybeCounts = Some(TaskCounts.zero),
maybeTasks = Some(Seq.empty),
maybeDeployments = Some(Seq(Identifiable(plan.id)))
)
maybePostEvent(req, appWithDeployments.app)
Response
.created(new URI(app.id.toString))
.entity(jsonString(appWithDeployments))
.build()
}
}
@GET
@Path("""{id:.+}""")
@Timed
def show(@PathParam("id") id: String,
@QueryParam("embed") embed: java.util.Set[String],
@Context req: HttpServletRequest): Response = authenticated(req) { implicit identity =>
val resolvedEmbed = InfoEmbedResolver.resolveApp(embed.asScala.toSet) ++ Set(
// deprecated. For compatibility.
AppInfo.Embed.Counts, AppInfo.Embed.Tasks, AppInfo.Embed.LastTaskFailure, AppInfo.Embed.Deployments
)
def transitiveApps(groupId: PathId): Response = {
result(groupManager.group(groupId)) match {
case Some(group) =>
checkAuthorization(ViewGroup, group)
val appsWithTasks = result(appInfoService.selectAppsInGroup(groupId, allAuthorized, resolvedEmbed))
ok(jsonObjString("*" -> appsWithTasks))
case None =>
unknownGroup(groupId)
}
}
def app(appId: PathId): Response = {
result(appInfoService.selectApp(appId, allAuthorized, resolvedEmbed)) match {
case Some(appInfo) =>
checkAuthorization(ViewApp, appInfo.app)
ok(jsonObjString("app" -> appInfo))
case None => unknownApp(appId)
}
}
id match {
case ListApps(gid) => transitiveApps(gid.toRootPath)
case _ => app(id.toRootPath)
}
}
@PUT
@Path("""{id:.+}""")
@Timed
def replace(
@PathParam("id") id: String,
body: Array[Byte],
@DefaultValue("false")@QueryParam("force") force: Boolean,
@Context req: HttpServletRequest): Response = authenticated(req) { implicit identity =>
val appId = id.toRootPath
val now = clock.now()
withValid(Json.parse(body).as[AppUpdate].copy(id = Some(appId))) { appUpdate =>
val plan = result(groupManager.updateApp(appId, updateOrCreate(appId, _, appUpdate, now), now, force))
val response = plan.original.app(appId)
.map(_ => Response.ok())
.getOrElse(Response.created(new URI(appId.toString)))
maybePostEvent(req, plan.target.app(appId).get)
deploymentResult(plan, response)
}
}
@PUT
@Timed
def replaceMultiple(@DefaultValue("false")@QueryParam("force") force: Boolean,
body: Array[Byte],
@Context req: HttpServletRequest): Response = authenticated(req) { implicit identity =>
withValid(Json.parse(body).as[Seq[AppUpdate]].map(_.withCanonizedIds())) { updates =>
val version = clock.now()
def updateGroup(root: Group): Group = updates.foldLeft(root) { (group, update) =>
update.id match {
case Some(id) => group.updateApp(id, updateOrCreate(id, _, update, version), version)
case None => group
}
}
deploymentResult(result(groupManager.update(PathId.empty, updateGroup, version, force)))
}
}
@DELETE
@Path("""{id:.+}""")
@Timed
def delete(@DefaultValue("true")@QueryParam("force") force: Boolean,
@PathParam("id") id: String,
@Context req: HttpServletRequest): Response = authenticated(req) { implicit identity =>
val appId = id.toRootPath
def deleteAppFromGroup(group: Group) = {
checkAuthorization(DeleteApp, group.app(appId), UnknownAppException(appId))
group.removeApplication(appId)
}
deploymentResult(result(groupManager.update(appId.parent, deleteAppFromGroup, force = force)))
}
@Path("{appId:.+}/tasks")
def appTasksResource(): AppTasksResource = appTasksRes
@Path("{appId:.+}/versions")
def appVersionsResource(): AppVersionsResource = new AppVersionsResource(service, groupManager, authenticator,
authorizer, config)
@POST
@Path("{id:.+}/restart")
def restart(@PathParam("id") id: String,
@DefaultValue("false")@QueryParam("force") force: Boolean,
@Context req: HttpServletRequest): Response = authenticated(req) { implicit identity =>
val appId = id.toRootPath
def markForRestartingOrThrow(opt: Option[AppDefinition]) = {
opt
.map(checkAuthorization(UpdateApp, _))
.map(_.markedForRestarting)
.getOrElse(throw UnknownAppException(appId))
}
val newVersion = clock.now()
val restartDeployment = result(
groupManager.updateApp(id.toRootPath, markForRestartingOrThrow, newVersion, force)
)
deploymentResult(restartDeployment)
}
private def updateOrCreate(appId: PathId,
existing: Option[AppDefinition],
appUpdate: AppUpdate,
newVersion: Timestamp)(implicit identity: Identity): AppDefinition = {
def createApp(): AppDefinition = {
val app = validateOrThrow(appUpdate(AppDefinition(appId)))
checkAuthorization(CreateApp, app)
}
def updateApp(current: AppDefinition): AppDefinition = {
val app = validateOrThrow(appUpdate(current))
checkAuthorization(UpdateApp, app)
}
def rollback(current: AppDefinition, version: Timestamp): AppDefinition = {
val app = service.getApp(appId, version).getOrElse(throw UnknownAppException(appId))
checkAuthorization(ViewApp, app)
checkAuthorization(UpdateApp, current)
app
}
def updateOrRollback(current: AppDefinition): AppDefinition = appUpdate.version
.map(rollback(current, _))
.getOrElse(updateApp(current))
existing match {
case Some(app) =>
// we can only rollback existing apps because we deleted all old versions when dropping an app
updateOrRollback(app)
case None =>
createApp()
}
}
private def maybePostEvent(req: HttpServletRequest, app: AppDefinition) =
eventBus.publish(ApiPostEvent(req.getRemoteAddr, req.getRequestURI, app))
private[v2] def search(cmd: Option[String], id: Option[String], label: Option[String]): AppSelector = {
def containCaseInsensitive(a: String, b: String): Boolean = b.toLowerCase contains a.toLowerCase
val selectors = Seq[Option[AppSelector]](
cmd.map(c => AppSelector(_.cmd.exists(containCaseInsensitive(c, _)))),
id.map(s => AppSelector(app => containCaseInsensitive(s, app.id.toString))),
label.map(new LabelSelectorParsers().parsed)
).flatten
AppSelector.forall(selectors)
}
def allAuthorized(implicit identity: Identity): AppSelector = new AppSelector {
override def matches(app: AppDefinition): Boolean = isAuthorized(ViewApp, app)
}
def selectAuthorized(fn: => AppSelector)(implicit identity: Identity): AppSelector = {
val authSelector = new AppSelector {
override def matches(app: AppDefinition): Boolean = isAuthorized(ViewApp, app)
}
AppSelector.forall(Seq(authSelector, fn))
}
}
| vivekjuneja/marathon | src/main/scala/mesosphere/marathon/api/v2/AppsResource.scala | Scala | apache-2.0 | 10,199 |
package com.pragmasoft.scaldingunit
import com.twitter.scalding._
import cascading.tuple.Tuple
import scala.Predef._
import org.scalatest.{Matchers, FlatSpec}
import scala.collection.mutable.Buffer
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import cascading.pipe.Pipe
import Dsl._
// Should stay here and not inside the class otherwise Hadoop will try to serialise the container too
object ScalaTestHadoopSupportSpecOperations {
implicit class OperationsWrapper(val pipe: Pipe) extends Serializable {
def changeColValue : Pipe = {
pipe.map('col1 -> 'col1_transf) {
col1: String => col1 + "_transf"
}
}
def withTwoPipes(pipe2: Pipe) : Pipe = {
pipe.joinWithSmaller('name -> 'name, pipe2).map('address -> 'address_transf) {
address: String => address + "_transf"
}
}
}
implicit def fromRichPipe(rp: RichPipe) = new OperationsWrapper(rp.pipe)
}
@RunWith(classOf[JUnitRunner])
class ScalaTestHadoopSupportSpec extends FlatSpec with Matchers {
import ScalaTestHadoopSupportSpecOperations._
"A test with single source" should "accept an operation with a single input rich pipe" in new HadoopTestInfrastructureWithSpy {
Given {
List(("col1_1", "col2_1"), ("col1_2", "col2_2")) withSchema (('col1, 'col2))
} When {
pipe: RichPipe => pipe.changeColValue
} Then {
buffer: Buffer[(String, String, String)] => {
buffer.forall({
case (_, _, transformed) => transformed.endsWith("_transf")
}) should be(true)
}
}
assert( testHasBeenExecutedInHadoopMode )
}
"A test with two sources" should "accept an operation with two input richPipes" in new HadoopTestInfrastructureWithSpy {
Given {
List(("Stefano", "110"), ("Rajah", "220")) withSchema('name, 'points)
} And {
List(("Stefano", "home1"), ("Rajah", "home2")) withSchema('name, 'address)
} When {
(pipe1: RichPipe, pipe2: RichPipe) => pipe1.withTwoPipes(pipe2)
} Then {
buffer: Buffer[(String, String, String, String)] => {
println("Output " + buffer.toList)
buffer.forall({
case (_, _, _, addressTransf) => addressTransf.endsWith("_transf")
}) should be(true)
}
}
assert( testHasBeenExecutedInHadoopMode )
}
} | scalding-io/ScaldingUnit | scalding-unit/src/test/scala/com/pragmasoft/scaldingunit/ScalaTestHadoopSupportSpec.scala | Scala | apache-2.0 | 2,315 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File
import sbt._
import sbt.Keys._
import sbtsparkpackage.SparkPackagePlugin.autoImport._
object CassandraSparkBuild extends Build {
import Settings._
import sbtassembly.AssemblyPlugin
import Versions.scalaBinary
import sbtsparkpackage.SparkPackagePlugin
val namespace = "spark-cassandra-connector"
val demosPath = file(s"$namespace-demos")
lazy val root = RootProject(
name = "root",
dir = file("."),
settings = rootSettings ++ Seq(cassandraServerClasspath := { "" }),
contains = Seq(embedded, connector, demos, jconnector)
).disablePlugins(AssemblyPlugin, SparkPackagePlugin)
lazy val cassandraServerProject = Project(
id = "cassandra-server",
base = file("cassandra-server"),
settings = defaultSettings ++ Seq(
libraryDependencies ++= Seq(Artifacts.cassandraServer % "it", Artifacts.airlift),
cassandraServerClasspath := {
(fullClasspath in IntegrationTest).value.map(_.data.getAbsoluteFile).mkString(File.pathSeparator)
}
)
) configs IntegrationTest
lazy val embedded = CrossScalaVersionsProject(
name = s"$namespace-embedded",
conf = defaultSettings ++ Seq(libraryDependencies ++= Dependencies.embedded)
).disablePlugins(AssemblyPlugin, SparkPackagePlugin) configs IntegrationTest
lazy val connector = CrossScalaVersionsProject(
name = namespace,
conf = assembledSettings ++ Seq(libraryDependencies ++= Dependencies.connector ++ Seq(
"org.scala-lang" % "scala-reflect" % scalaVersion.value,
"org.scala-lang" % "scala-compiler" % scalaVersion.value % "test,it")) ++ pureCassandraSettings
).copy(dependencies = Seq(embedded % "test->test;it->it,test;")
) configs IntegrationTest
lazy val jconnector = Project(
id = s"$namespace-java",
base = file(s"$namespace-java"),
settings = japiSettings ++ connector.settings :+ (spName := s"datastax/$namespace-java"),
dependencies = Seq(connector % "compile;runtime->runtime;test->test;it->it,test;provided->provided")
) configs IntegrationTest
lazy val demos = RootProject(
name = "demos",
dir = demosPath,
contains = Seq(simpleDemos/*, kafkaStreaming*/, twitterStreaming)
).disablePlugins(AssemblyPlugin, SparkPackagePlugin)
lazy val simpleDemos = Project(
id = "simple-demos",
base = demosPath / "simple-demos",
settings = japiSettings ++ demoSettings,
dependencies = Seq(connector, jconnector, embedded)
).disablePlugins(AssemblyPlugin, SparkPackagePlugin)
/*
lazy val kafkaStreaming = CrossScalaVersionsProject(
name = "kafka-streaming",
conf = demoSettings ++ kafkaDemoSettings ++ Seq(
libraryDependencies ++= (CrossVersion.partialVersion(scalaVersion.value) match {
case Some((2, minor)) if minor < 11 => Dependencies.kafka
case _ => Seq.empty
}))).copy(base = demosPath / "kafka-streaming", dependencies = Seq(connector, embedded))
*/
lazy val twitterStreaming = Project(
id = "twitter-streaming",
base = demosPath / "twitter-streaming",
settings = demoSettings ++ Seq(libraryDependencies ++= Dependencies.twitter),
dependencies = Seq(connector)
).disablePlugins(AssemblyPlugin, SparkPackagePlugin)
def crossBuildPath(base: sbt.File, v: String): sbt.File = base / s"scala-$v" / "src"
/* templates */
def CrossScalaVersionsProject(name: String,
conf: Seq[Def.Setting[_]],
reliesOn: Seq[ClasspathDep[ProjectReference]] = Seq.empty) =
Project(id = name, base = file(name), dependencies = reliesOn, settings = conf ++ Seq(
unmanagedSourceDirectories in (Compile, packageBin) +=
crossBuildPath(baseDirectory.value, scalaBinaryVersion.value),
unmanagedSourceDirectories in (Compile, doc) +=
crossBuildPath(baseDirectory.value, scalaBinaryVersion.value),
unmanagedSourceDirectories in Compile +=
crossBuildPath(baseDirectory.value, scalaBinaryVersion.value)
))
def RootProject(
name: String,
dir: sbt.File, settings: =>
scala.Seq[sbt.Def.Setting[_]] = Seq.empty,
contains: Seq[ProjectReference]): Project =
Project(
id = name,
base = dir,
settings = parentSettings ++ settings,
aggregate = contains)
}
object Artifacts {
import Versions._
implicit class Exclude(module: ModuleID) {
def guavaExclude: ModuleID =
module exclude("com.google.guava", "guava")
def sparkExclusions: ModuleID = module.guavaExclude
.exclude("org.apache.spark", s"spark-core_$scalaBinary")
def logbackExclude: ModuleID = module
.exclude("ch.qos.logback", "logback-classic")
.exclude("ch.qos.logback", "logback-core")
def replExclusions: ModuleID = module.guavaExclude
.exclude("org.apache.spark", s"spark-bagel_$scalaBinary")
.exclude("org.apache.spark", s"spark-mllib_$scalaBinary")
.exclude("org.scala-lang", "scala-compiler")
def kafkaExclusions: ModuleID = module
.exclude("org.slf4j", "slf4j-simple")
.exclude("com.sun.jmx", "jmxri")
.exclude("com.sun.jdmk", "jmxtools")
.exclude("net.sf.jopt-simple", "jopt-simple")
}
val akkaActor = "com.typesafe.akka" %% "akka-actor" % Akka % "provided" // ApacheV2
val akkaRemote = "com.typesafe.akka" %% "akka-remote" % Akka % "provided" // ApacheV2
val akkaSlf4j = "com.typesafe.akka" %% "akka-slf4j" % Akka % "provided" // ApacheV2
val cassandraClient = "org.apache.cassandra" % "cassandra-clientutil" % Cassandra guavaExclude // ApacheV2
val cassandraDriver = "com.datastax.cassandra" % "cassandra-driver-core" % CassandraDriver guavaExclude // ApacheV2
val commonsLang3 = "org.apache.commons" % "commons-lang3" % CommonsLang3 // ApacheV2
val config = "com.typesafe" % "config" % Config % "provided" // ApacheV2
val guava = "com.google.guava" % "guava" % Guava
val jodaC = "org.joda" % "joda-convert" % JodaC
val jodaT = "joda-time" % "joda-time" % JodaT
val lzf = "com.ning" % "compress-lzf" % Lzf % "provided"
val slf4jApi = "org.slf4j" % "slf4j-api" % Slf4j % "provided" // MIT
val jsr166e = "com.twitter" % "jsr166e" % JSR166e // Creative Commons
val airlift = "io.airlift" % "airline" % Airlift
/* To allow spark artifact inclusion in the demos at runtime, we set 'provided' below. */
val sparkCore = "org.apache.spark" %% "spark-core" % Spark guavaExclude // ApacheV2
val sparkUnsafe = "org.apache.spark" %% "spark-unsafe" % Spark guavaExclude // ApacheV2
val sparkStreaming = "org.apache.spark" %% "spark-streaming" % Spark guavaExclude // ApacheV2
val sparkSql = "org.apache.spark" %% "spark-sql" % Spark sparkExclusions // ApacheV2
val sparkCatalyst = "org.apache.spark" %% "spark-catalyst" % Spark sparkExclusions // ApacheV2
val sparkHive = "org.apache.spark" %% "spark-hive" % Spark sparkExclusions // ApacheV2
val cassandraServer = "org.apache.cassandra" % "cassandra-all" % Cassandra logbackExclude // ApacheV2
object Metrics {
val metricsCore = "com.codahale.metrics" % "metrics-core" % CodaHaleMetrics % "provided"
val metricsJson = "com.codahale.metrics" % "metrics-json" % CodaHaleMetrics % "provided"
}
object Jetty {
val jettyServer = "org.eclipse.jetty" % "jetty-server" % SparkJetty % "provided"
val jettyServlet = "org.eclipse.jetty" % "jetty-servlet" % SparkJetty % "provided"
}
object Embedded {
val akkaCluster = "com.typesafe.akka" %% "akka-cluster" % Akka // ApacheV2
val jopt = "net.sf.jopt-simple" % "jopt-simple" % JOpt
val kafka = "org.apache.kafka" %% "kafka" % Kafka kafkaExclusions // ApacheV2
val sparkRepl = "org.apache.spark" %% "spark-repl" % Spark % "provided" replExclusions // ApacheV2
val snappy = "org.xerial.snappy" % "snappy-java" % "1.1.1.7"
}
object Demos {
val kafka = "org.apache.kafka" % "kafka_2.10" % Kafka kafkaExclusions // ApacheV2
val kafkaStreaming = "org.apache.spark" % "spark-streaming-kafka_2.10" % Spark % "provided" sparkExclusions // ApacheV2
val twitterStreaming = "org.apache.spark" %% "spark-streaming-twitter" % Spark % "provided" sparkExclusions // ApacheV2
}
object Test {
val akkaTestKit = "com.typesafe.akka" %% "akka-testkit" % Akka % "test,it" // ApacheV2
val commonsIO = "commons-io" % "commons-io" % CommonsIO % "test,it" // ApacheV2
val scalaMock = "org.scalamock" %% "scalamock-scalatest-support" % ScalaMock % "test,it" // BSD
val scalaTest = "org.scalatest" %% "scalatest" % ScalaTest % "test,it" // ApacheV2
val scalactic = "org.scalactic" %% "scalactic" % Scalactic % "test,it" // ApacheV2
val sparkCoreT = "org.apache.spark" %% "spark-core" % Spark % "test,it" classifier "tests"
val sparkStreamingT = "org.apache.spark" %% "spark-streaming" % Spark % "test,it" classifier "tests"
val mockito = "org.mockito" % "mockito-all" % "1.10.19" % "test,it" // MIT
val junit = "junit" % "junit" % "4.11" % "test,it"
val junitInterface = "com.novocode" % "junit-interface" % "0.10" % "test,it"
val powerMock = "org.powermock" % "powermock-module-junit4" % "1.6.2" % "test,it" // ApacheV2
val powerMockMockito = "org.powermock" % "powermock-api-mockito" % "1.6.2" % "test,it" // ApacheV2
}
}
object Dependencies {
import BuildUtil._
import Artifacts._
val logging = Seq(slf4jApi)
val metrics = Seq(Metrics.metricsCore, Metrics.metricsJson)
val jetty = Seq(Jetty.jettyServer, Jetty.jettyServlet)
val testKit = Seq(
Test.akkaTestKit,
Test.commonsIO,
Test.junit,
Test.junitInterface,
Test.scalaMock,
Test.scalaTest,
Test.scalactic,
Test.sparkCoreT,
Test.sparkStreamingT,
Test.mockito,
Test.powerMock,
Test.powerMockMockito
)
val akka = Seq(akkaActor, akkaRemote, akkaSlf4j)
val cassandra = Seq(cassandraClient, cassandraDriver)
val spark = Seq(sparkCore, sparkStreaming, sparkSql, sparkCatalyst, sparkHive, sparkUnsafe)
val connector = testKit ++ metrics ++ jetty ++ logging ++ akka ++ cassandra ++ spark.map(_ % "provided") ++ Seq(
commonsLang3, config, guava, jodaC, jodaT, lzf, jsr166e)
val embedded = logging ++ spark ++ cassandra ++ Seq(
cassandraServer % "it,test", Embedded.jopt, Embedded.sparkRepl, Embedded.kafka, Embedded.snappy, guava)
val kafka = Seq(Demos.kafka, Demos.kafkaStreaming)
val twitter = Seq(sparkStreaming, Demos.twitterStreaming)
val documentationMappings = Seq(
DocumentationMapping(url(s"http://spark.apache.org/docs/${Versions.Spark}/api/scala/"),
sparkCore, sparkStreaming, sparkSql, sparkCatalyst, sparkHive
),
DocumentationMapping(url(s"http://doc.akka.io/api/akka/${Versions.Akka}/"),
akkaActor, akkaRemote, akkaSlf4j
)
)
}
| debasish83/cassandra-driver-spark | project/CassandraSparkBuild.scala | Scala | apache-2.0 | 13,099 |
package coordinates
case class Coordinates(x: Int, y: Int)
object Coordinates {
def GoUp(coordinates: Coordinates) = Coordinates(coordinates.x, coordinates.y + 1)
def GoDown(coordinates: Coordinates) = Coordinates(coordinates.x, coordinates.y - 1)
def GoLeft(coordinates: Coordinates) = Coordinates(coordinates.x + 1, coordinates.y)
def GoRight(coordinates: Coordinates) = Coordinates(coordinates.x - 1, coordinates.y)
} | RosculescuCiprian/RoverManager | src/main/scala/coordinates/Coordinates.scala | Scala | unlicense | 435 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.{AtomicInteger, AtomicReference}
import javax.annotation.concurrent.GuardedBy
import scala.collection.mutable.{HashMap, HashSet}
import scala.concurrent.Future
import org.apache.hadoop.security.UserGroupInformation
import org.apache.spark.{ExecutorAllocationClient, SparkEnv, SparkException, TaskState}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.deploy.security.HadoopDelegationTokenManager
import org.apache.spark.executor.ExecutorLogUrlHandler
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.Network._
import org.apache.spark.rpc._
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages._
import org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend.ENDPOINT_NAME
import org.apache.spark.util.{RpcUtils, SerializableBuffer, ThreadUtils, Utils}
/**
* A scheduler backend that waits for coarse-grained executors to connect.
* This backend holds onto each executor for the duration of the Spark job rather than relinquishing
* executors whenever a task is done and asking the scheduler to launch a new executor for
* each new task. Executors may be launched in a variety of ways, such as Mesos tasks for the
* coarse-grained Mesos mode or standalone processes for Spark's standalone deploy mode
* (spark.deploy.*).
*/
private[spark]
class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: RpcEnv)
extends ExecutorAllocationClient with SchedulerBackend with Logging {
// Use an atomic variable to track total number of cores in the cluster for simplicity and speed
protected val totalCoreCount = new AtomicInteger(0)
// Total number of executors that are currently registered
protected val totalRegisteredExecutors = new AtomicInteger(0)
protected val conf = scheduler.sc.conf
private val maxRpcMessageSize = RpcUtils.maxMessageSizeBytes(conf)
private val defaultAskTimeout = RpcUtils.askRpcTimeout(conf)
// Submit tasks only after (registered resources / total expected resources)
// is equal to at least this value, that is double between 0 and 1.
private val _minRegisteredRatio =
math.min(1, conf.get(SCHEDULER_MIN_REGISTERED_RESOURCES_RATIO).getOrElse(0.0))
// Submit tasks after maxRegisteredWaitingTime milliseconds
// if minRegisteredRatio has not yet been reached
private val maxRegisteredWaitingTimeNs = TimeUnit.MILLISECONDS.toNanos(
conf.get(SCHEDULER_MAX_REGISTERED_RESOURCE_WAITING_TIME))
private val createTimeNs = System.nanoTime()
// Accessing `executorDataMap` in `DriverEndpoint.receive/receiveAndReply` doesn't need any
// protection. But accessing `executorDataMap` out of `DriverEndpoint.receive/receiveAndReply`
// must be protected by `CoarseGrainedSchedulerBackend.this`. Besides, `executorDataMap` should
// only be modified in `DriverEndpoint.receive/receiveAndReply` with protection by
// `CoarseGrainedSchedulerBackend.this`.
private val executorDataMap = new HashMap[String, ExecutorData]
// Number of executors requested by the cluster manager, [[ExecutorAllocationManager]]
@GuardedBy("CoarseGrainedSchedulerBackend.this")
private var requestedTotalExecutors = 0
// Number of executors requested from the cluster manager that have not registered yet
@GuardedBy("CoarseGrainedSchedulerBackend.this")
private var numPendingExecutors = 0
private val listenerBus = scheduler.sc.listenerBus
// Executors we have requested the cluster manager to kill that have not died yet; maps
// the executor ID to whether it was explicitly killed by the driver (and thus shouldn't
// be considered an app-related failure).
@GuardedBy("CoarseGrainedSchedulerBackend.this")
private val executorsPendingToRemove = new HashMap[String, Boolean]
// A map to store hostname with its possible task number running on it
@GuardedBy("CoarseGrainedSchedulerBackend.this")
protected var hostToLocalTaskCount: Map[String, Int] = Map.empty
// The number of pending tasks which is locality required
@GuardedBy("CoarseGrainedSchedulerBackend.this")
protected var localityAwareTasks = 0
// The num of current max ExecutorId used to re-register appMaster
@volatile protected var currentExecutorIdCounter = 0
// Current set of delegation tokens to send to executors.
private val delegationTokens = new AtomicReference[Array[Byte]]()
// The token manager used to create security tokens.
private var delegationTokenManager: Option[HadoopDelegationTokenManager] = None
private val reviveThread =
ThreadUtils.newDaemonSingleThreadScheduledExecutor("driver-revive-thread")
class DriverEndpoint extends ThreadSafeRpcEndpoint with Logging {
override val rpcEnv: RpcEnv = CoarseGrainedSchedulerBackend.this.rpcEnv
// Executors that have been lost, but for which we don't yet know the real exit reason.
protected val executorsPendingLossReason = new HashSet[String]
protected val addressToExecutorId = new HashMap[RpcAddress, String]
// Spark configuration sent to executors. This is a lazy val so that subclasses of the
// scheduler can modify the SparkConf object before this view is created.
private lazy val sparkProperties = scheduler.sc.conf.getAll
.filter { case (k, _) => k.startsWith("spark.") }
.toSeq
private val logUrlHandler: ExecutorLogUrlHandler = new ExecutorLogUrlHandler(
conf.get(UI.CUSTOM_EXECUTOR_LOG_URL))
override def onStart() {
// Periodically revive offers to allow delay scheduling to work
val reviveIntervalMs = conf.get(SCHEDULER_REVIVE_INTERVAL).getOrElse(1000L)
reviveThread.scheduleAtFixedRate(() => Utils.tryLogNonFatalError {
Option(self).foreach(_.send(ReviveOffers))
}, 0, reviveIntervalMs, TimeUnit.MILLISECONDS)
}
override def receive: PartialFunction[Any, Unit] = {
case StatusUpdate(executorId, taskId, state, data, resources) =>
scheduler.statusUpdate(taskId, state, data.value)
if (TaskState.isFinished(state)) {
executorDataMap.get(executorId) match {
case Some(executorInfo) =>
executorInfo.freeCores += scheduler.CPUS_PER_TASK
resources.foreach { case (k, v) =>
executorInfo.resourcesInfo.get(k).foreach { r =>
r.release(v.addresses)
}
}
makeOffers(executorId)
case None =>
// Ignoring the update since we don't know about the executor.
logWarning(s"Ignored task status update ($taskId state $state) " +
s"from unknown executor with ID $executorId")
}
}
case ReviveOffers =>
makeOffers()
case KillTask(taskId, executorId, interruptThread, reason) =>
executorDataMap.get(executorId) match {
case Some(executorInfo) =>
executorInfo.executorEndpoint.send(
KillTask(taskId, executorId, interruptThread, reason))
case None =>
// Ignoring the task kill since the executor is not registered.
logWarning(s"Attempted to kill task $taskId for unknown executor $executorId.")
}
case KillExecutorsOnHost(host) =>
scheduler.getExecutorsAliveOnHost(host).foreach { exec =>
killExecutors(exec.toSeq, adjustTargetNumExecutors = false, countFailures = false,
force = true)
}
case UpdateDelegationTokens(newDelegationTokens) =>
updateDelegationTokens(newDelegationTokens)
case RemoveExecutor(executorId, reason) =>
// We will remove the executor's state and cannot restore it. However, the connection
// between the driver and the executor may be still alive so that the executor won't exit
// automatically, so try to tell the executor to stop itself. See SPARK-13519.
executorDataMap.get(executorId).foreach(_.executorEndpoint.send(StopExecutor))
removeExecutor(executorId, reason)
}
override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = {
case RegisterExecutor(executorId, executorRef, hostname, cores, logUrls,
attributes, resources) =>
if (executorDataMap.contains(executorId)) {
executorRef.send(RegisterExecutorFailed("Duplicate executor ID: " + executorId))
context.reply(true)
} else if (scheduler.nodeBlacklist.contains(hostname)) {
// If the cluster manager gives us an executor on a blacklisted node (because it
// already started allocating those resources before we informed it of our blacklist,
// or if it ignored our blacklist), then we reject that executor immediately.
logInfo(s"Rejecting $executorId as it has been blacklisted.")
executorRef.send(RegisterExecutorFailed(s"Executor is blacklisted: $executorId"))
context.reply(true)
} else {
// If the executor's rpc env is not listening for incoming connections, `hostPort`
// will be null, and the client connection should be used to contact the executor.
val executorAddress = if (executorRef.address != null) {
executorRef.address
} else {
context.senderAddress
}
logInfo(s"Registered executor $executorRef ($executorAddress) with ID $executorId")
addressToExecutorId(executorAddress) = executorId
totalCoreCount.addAndGet(cores)
totalRegisteredExecutors.addAndGet(1)
val resourcesInfo = resources.map{ case (k, v) =>
(v.name, new ExecutorResourceInfo(v.name, v.addresses))}
val data = new ExecutorData(executorRef, executorAddress, hostname,
cores, cores, logUrlHandler.applyPattern(logUrls, attributes), attributes,
resourcesInfo)
// This must be synchronized because variables mutated
// in this block are read when requesting executors
CoarseGrainedSchedulerBackend.this.synchronized {
executorDataMap.put(executorId, data)
if (currentExecutorIdCounter < executorId.toInt) {
currentExecutorIdCounter = executorId.toInt
}
if (numPendingExecutors > 0) {
numPendingExecutors -= 1
logDebug(s"Decremented number of pending executors ($numPendingExecutors left)")
}
}
executorRef.send(RegisteredExecutor)
// Note: some tests expect the reply to come after we put the executor in the map
context.reply(true)
listenerBus.post(
SparkListenerExecutorAdded(System.currentTimeMillis(), executorId, data))
makeOffers()
}
case StopDriver =>
context.reply(true)
stop()
case StopExecutors =>
logInfo("Asking each executor to shut down")
for ((_, executorData) <- executorDataMap) {
executorData.executorEndpoint.send(StopExecutor)
}
context.reply(true)
case RemoveWorker(workerId, host, message) =>
removeWorker(workerId, host, message)
context.reply(true)
case RetrieveSparkAppConfig =>
val reply = SparkAppConfig(
sparkProperties,
SparkEnv.get.securityManager.getIOEncryptionKey(),
Option(delegationTokens.get()))
context.reply(reply)
}
// Make fake resource offers on all executors
private def makeOffers() {
// Make sure no executor is killed while some task is launching on it
val taskDescs = withLock {
// Filter out executors under killing
val activeExecutors = executorDataMap.filterKeys(executorIsAlive)
val workOffers = activeExecutors.map {
case (id, executorData) =>
new WorkerOffer(id, executorData.executorHost, executorData.freeCores,
Some(executorData.executorAddress.hostPort),
executorData.resourcesInfo.map { case (rName, rInfo) =>
(rName, rInfo.availableAddrs.toBuffer)
})
}.toIndexedSeq
scheduler.resourceOffers(workOffers)
}
if (taskDescs.nonEmpty) {
launchTasks(taskDescs)
}
}
override def onDisconnected(remoteAddress: RpcAddress): Unit = {
addressToExecutorId
.get(remoteAddress)
.foreach(removeExecutor(_, SlaveLost("Remote RPC client disassociated. Likely due to " +
"containers exceeding thresholds, or network issues. Check driver logs for WARN " +
"messages.")))
}
// Make fake resource offers on just one executor
private def makeOffers(executorId: String) {
// Make sure no executor is killed while some task is launching on it
val taskDescs = withLock {
// Filter out executors under killing
if (executorIsAlive(executorId)) {
val executorData = executorDataMap(executorId)
val workOffers = IndexedSeq(
new WorkerOffer(executorId, executorData.executorHost, executorData.freeCores,
Some(executorData.executorAddress.hostPort),
executorData.resourcesInfo.map { case (rName, rInfo) =>
(rName, rInfo.availableAddrs.toBuffer)
}))
scheduler.resourceOffers(workOffers)
} else {
Seq.empty
}
}
if (taskDescs.nonEmpty) {
launchTasks(taskDescs)
}
}
private def executorIsAlive(executorId: String): Boolean = synchronized {
!executorsPendingToRemove.contains(executorId) &&
!executorsPendingLossReason.contains(executorId)
}
// Launch tasks returned by a set of resource offers
private def launchTasks(tasks: Seq[Seq[TaskDescription]]) {
for (task <- tasks.flatten) {
val serializedTask = TaskDescription.encode(task)
if (serializedTask.limit() >= maxRpcMessageSize) {
Option(scheduler.taskIdToTaskSetManager.get(task.taskId)).foreach { taskSetMgr =>
try {
var msg = "Serialized task %s:%d was %d bytes, which exceeds max allowed: " +
s"${RPC_MESSAGE_MAX_SIZE.key} (%d bytes). Consider increasing " +
s"${RPC_MESSAGE_MAX_SIZE.key} or using broadcast variables for large values."
msg = msg.format(task.taskId, task.index, serializedTask.limit(), maxRpcMessageSize)
taskSetMgr.abort(msg)
} catch {
case e: Exception => logError("Exception in error callback", e)
}
}
}
else {
val executorData = executorDataMap(task.executorId)
// Do resources allocation here. The allocated resources will get released after the task
// finishes.
executorData.freeCores -= scheduler.CPUS_PER_TASK
task.resources.foreach { case (rName, rInfo) =>
assert(executorData.resourcesInfo.contains(rName))
executorData.resourcesInfo(rName).acquire(rInfo.addresses)
}
logDebug(s"Launching task ${task.taskId} on executor id: ${task.executorId} hostname: " +
s"${executorData.executorHost}.")
executorData.executorEndpoint.send(LaunchTask(new SerializableBuffer(serializedTask)))
}
}
}
// Remove a disconnected slave from the cluster
private def removeExecutor(executorId: String, reason: ExecutorLossReason): Unit = {
logDebug(s"Asked to remove executor $executorId with reason $reason")
executorDataMap.get(executorId) match {
case Some(executorInfo) =>
// This must be synchronized because variables mutated
// in this block are read when requesting executors
val killed = CoarseGrainedSchedulerBackend.this.synchronized {
addressToExecutorId -= executorInfo.executorAddress
executorDataMap -= executorId
executorsPendingLossReason -= executorId
executorsPendingToRemove.remove(executorId).getOrElse(false)
}
totalCoreCount.addAndGet(-executorInfo.totalCores)
totalRegisteredExecutors.addAndGet(-1)
scheduler.executorLost(executorId, if (killed) ExecutorKilled else reason)
listenerBus.post(
SparkListenerExecutorRemoved(System.currentTimeMillis(), executorId, reason.toString))
case None =>
// SPARK-15262: If an executor is still alive even after the scheduler has removed
// its metadata, we may receive a heartbeat from that executor and tell its block
// manager to reregister itself. If that happens, the block manager master will know
// about the executor, but the scheduler will not. Therefore, we should remove the
// executor from the block manager when we hit this case.
scheduler.sc.env.blockManager.master.removeExecutorAsync(executorId)
logInfo(s"Asked to remove non-existent executor $executorId")
}
}
// Remove a lost worker from the cluster
private def removeWorker(workerId: String, host: String, message: String): Unit = {
logDebug(s"Asked to remove worker $workerId with reason $message")
scheduler.workerRemoved(workerId, host, message)
}
/**
* Stop making resource offers for the given executor. The executor is marked as lost with
* the loss reason still pending.
*
* @return Whether executor should be disabled
*/
protected def disableExecutor(executorId: String): Boolean = {
val shouldDisable = CoarseGrainedSchedulerBackend.this.synchronized {
if (executorIsAlive(executorId)) {
executorsPendingLossReason += executorId
true
} else {
// Returns true for explicitly killed executors, we also need to get pending loss reasons;
// For others return false.
executorsPendingToRemove.contains(executorId)
}
}
if (shouldDisable) {
logInfo(s"Disabling executor $executorId.")
scheduler.executorLost(executorId, LossReasonPending)
}
shouldDisable
}
}
val driverEndpoint = rpcEnv.setupEndpoint(ENDPOINT_NAME, createDriverEndpoint())
protected def minRegisteredRatio: Double = _minRegisteredRatio
override def start() {
if (UserGroupInformation.isSecurityEnabled()) {
delegationTokenManager = createTokenManager()
delegationTokenManager.foreach { dtm =>
val ugi = UserGroupInformation.getCurrentUser()
val tokens = if (dtm.renewalEnabled) {
dtm.start()
} else if (ugi.hasKerberosCredentials() || SparkHadoopUtil.get.isProxyUser(ugi)) {
val creds = ugi.getCredentials()
dtm.obtainDelegationTokens(creds)
SparkHadoopUtil.get.serialize(creds)
} else {
null
}
if (tokens != null) {
updateDelegationTokens(tokens)
}
}
}
}
protected def createDriverEndpoint(): DriverEndpoint = new DriverEndpoint()
def stopExecutors() {
try {
if (driverEndpoint != null) {
logInfo("Shutting down all executors")
driverEndpoint.askSync[Boolean](StopExecutors)
}
} catch {
case e: Exception =>
throw new SparkException("Error asking standalone scheduler to shut down executors", e)
}
}
override def stop() {
reviveThread.shutdownNow()
stopExecutors()
delegationTokenManager.foreach(_.stop())
try {
if (driverEndpoint != null) {
driverEndpoint.askSync[Boolean](StopDriver)
}
} catch {
case e: Exception =>
throw new SparkException("Error stopping standalone scheduler's driver endpoint", e)
}
}
/**
* Reset the state of CoarseGrainedSchedulerBackend to the initial state. Currently it will only
* be called in the yarn-client mode when AM re-registers after a failure.
* */
protected def reset(): Unit = {
val executors: Set[String] = synchronized {
requestedTotalExecutors = 0
numPendingExecutors = 0
executorsPendingToRemove.clear()
executorDataMap.keys.toSet
}
// Remove all the lingering executors that should be removed but not yet. The reason might be
// because (1) disconnected event is not yet received; (2) executors die silently.
executors.foreach { eid =>
removeExecutor(eid, SlaveLost("Stale executor after cluster manager re-registered."))
}
}
override def reviveOffers() {
driverEndpoint.send(ReviveOffers)
}
override def killTask(
taskId: Long, executorId: String, interruptThread: Boolean, reason: String) {
driverEndpoint.send(KillTask(taskId, executorId, interruptThread, reason))
}
override def defaultParallelism(): Int = {
conf.getInt("spark.default.parallelism", math.max(totalCoreCount.get(), 2))
}
/**
* Called by subclasses when notified of a lost worker. It just fires the message and returns
* at once.
*/
protected def removeExecutor(executorId: String, reason: ExecutorLossReason): Unit = {
driverEndpoint.send(RemoveExecutor(executorId, reason))
}
protected def removeWorker(workerId: String, host: String, message: String): Unit = {
driverEndpoint.ask[Boolean](RemoveWorker(workerId, host, message)).failed.foreach(t =>
logError(t.getMessage, t))(ThreadUtils.sameThread)
}
def sufficientResourcesRegistered(): Boolean = true
override def isReady(): Boolean = {
if (sufficientResourcesRegistered) {
logInfo("SchedulerBackend is ready for scheduling beginning after " +
s"reached minRegisteredResourcesRatio: $minRegisteredRatio")
return true
}
if ((System.nanoTime() - createTimeNs) >= maxRegisteredWaitingTimeNs) {
logInfo("SchedulerBackend is ready for scheduling beginning after waiting " +
s"maxRegisteredResourcesWaitingTime: $maxRegisteredWaitingTimeNs(ns)")
return true
}
false
}
/**
* Return the number of executors currently registered with this backend.
*/
private def numExistingExecutors: Int = executorDataMap.size
override def getExecutorIds(): Seq[String] = {
executorDataMap.keySet.toSeq
}
override def isExecutorActive(id: String): Boolean = synchronized {
executorDataMap.contains(id) && !executorsPendingToRemove.contains(id)
}
override def maxNumConcurrentTasks(): Int = {
executorDataMap.values.map { executor =>
executor.totalCores / scheduler.CPUS_PER_TASK
}.sum
}
// this function is for testing only
def getExecutorAvailableResources(executorId: String): Map[String, ExecutorResourceInfo] = {
executorDataMap.get(executorId).map(_.resourcesInfo).getOrElse(Map.empty)
}
/**
* Request an additional number of executors from the cluster manager.
* @return whether the request is acknowledged.
*/
final override def requestExecutors(numAdditionalExecutors: Int): Boolean = {
if (numAdditionalExecutors < 0) {
throw new IllegalArgumentException(
"Attempted to request a negative number of additional executor(s) " +
s"$numAdditionalExecutors from the cluster manager. Please specify a positive number!")
}
logInfo(s"Requesting $numAdditionalExecutors additional executor(s) from the cluster manager")
val response = synchronized {
requestedTotalExecutors += numAdditionalExecutors
numPendingExecutors += numAdditionalExecutors
logDebug(s"Number of pending executors is now $numPendingExecutors")
if (requestedTotalExecutors !=
(numExistingExecutors + numPendingExecutors - executorsPendingToRemove.size)) {
logDebug(
s"""requestExecutors($numAdditionalExecutors): Executor request doesn't match:
|requestedTotalExecutors = $requestedTotalExecutors
|numExistingExecutors = $numExistingExecutors
|numPendingExecutors = $numPendingExecutors
|executorsPendingToRemove = ${executorsPendingToRemove.size}""".stripMargin)
}
// Account for executors pending to be added or removed
doRequestTotalExecutors(requestedTotalExecutors)
}
defaultAskTimeout.awaitResult(response)
}
/**
* Update the cluster manager on our scheduling needs. Three bits of information are included
* to help it make decisions.
* @param numExecutors The total number of executors we'd like to have. The cluster manager
* shouldn't kill any running executor to reach this number, but,
* if all existing executors were to die, this is the number of executors
* we'd want to be allocated.
* @param localityAwareTasks The number of tasks in all active stages that have a locality
* preferences. This includes running, pending, and completed tasks.
* @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages
* that would like to like to run on that host.
* This includes running, pending, and completed tasks.
* @return whether the request is acknowledged by the cluster manager.
*/
final override def requestTotalExecutors(
numExecutors: Int,
localityAwareTasks: Int,
hostToLocalTaskCount: Map[String, Int]
): Boolean = {
if (numExecutors < 0) {
throw new IllegalArgumentException(
"Attempted to request a negative number of executor(s) " +
s"$numExecutors from the cluster manager. Please specify a positive number!")
}
val response = synchronized {
this.requestedTotalExecutors = numExecutors
this.localityAwareTasks = localityAwareTasks
this.hostToLocalTaskCount = hostToLocalTaskCount
numPendingExecutors =
math.max(numExecutors - numExistingExecutors + executorsPendingToRemove.size, 0)
doRequestTotalExecutors(numExecutors)
}
defaultAskTimeout.awaitResult(response)
}
/**
* Request executors from the cluster manager by specifying the total number desired,
* including existing pending and running executors.
*
* The semantics here guarantee that we do not over-allocate executors for this application,
* since a later request overrides the value of any prior request. The alternative interface
* of requesting a delta of executors risks double counting new executors when there are
* insufficient resources to satisfy the first request. We make the assumption here that the
* cluster manager will eventually fulfill all requests when resources free up.
*
* @return a future whose evaluation indicates whether the request is acknowledged.
*/
protected def doRequestTotalExecutors(requestedTotal: Int): Future[Boolean] =
Future.successful(false)
/**
* Request that the cluster manager kill the specified executors.
*
* @param executorIds identifiers of executors to kill
* @param adjustTargetNumExecutors whether the target number of executors be adjusted down
* after these executors have been killed
* @param countFailures if there are tasks running on the executors when they are killed, whether
* those failures be counted to task failure limits?
* @param force whether to force kill busy executors, default false
* @return the ids of the executors acknowledged by the cluster manager to be removed.
*/
final override def killExecutors(
executorIds: Seq[String],
adjustTargetNumExecutors: Boolean,
countFailures: Boolean,
force: Boolean): Seq[String] = {
logInfo(s"Requesting to kill executor(s) ${executorIds.mkString(", ")}")
val response = withLock {
val (knownExecutors, unknownExecutors) = executorIds.partition(executorDataMap.contains)
unknownExecutors.foreach { id =>
logWarning(s"Executor to kill $id does not exist!")
}
// If an executor is already pending to be removed, do not kill it again (SPARK-9795)
// If this executor is busy, do not kill it unless we are told to force kill it (SPARK-9552)
val executorsToKill = knownExecutors
.filter { id => !executorsPendingToRemove.contains(id) }
.filter { id => force || !scheduler.isExecutorBusy(id) }
executorsToKill.foreach { id => executorsPendingToRemove(id) = !countFailures }
logInfo(s"Actual list of executor(s) to be killed is ${executorsToKill.mkString(", ")}")
// If we do not wish to replace the executors we kill, sync the target number of executors
// with the cluster manager to avoid allocating new ones. When computing the new target,
// take into account executors that are pending to be added or removed.
val adjustTotalExecutors =
if (adjustTargetNumExecutors) {
requestedTotalExecutors = math.max(requestedTotalExecutors - executorsToKill.size, 0)
if (requestedTotalExecutors !=
(numExistingExecutors + numPendingExecutors - executorsPendingToRemove.size)) {
logDebug(
s"""killExecutors($executorIds, $adjustTargetNumExecutors, $countFailures, $force):
|Executor counts do not match:
|requestedTotalExecutors = $requestedTotalExecutors
|numExistingExecutors = $numExistingExecutors
|numPendingExecutors = $numPendingExecutors
|executorsPendingToRemove = ${executorsPendingToRemove.size}""".stripMargin)
}
doRequestTotalExecutors(requestedTotalExecutors)
} else {
numPendingExecutors += executorsToKill.size
Future.successful(true)
}
val killExecutors: Boolean => Future[Boolean] =
if (executorsToKill.nonEmpty) {
_ => doKillExecutors(executorsToKill)
} else {
_ => Future.successful(false)
}
val killResponse = adjustTotalExecutors.flatMap(killExecutors)(ThreadUtils.sameThread)
killResponse.flatMap(killSuccessful =>
Future.successful (if (killSuccessful) executorsToKill else Seq.empty[String])
)(ThreadUtils.sameThread)
}
defaultAskTimeout.awaitResult(response)
}
/**
* Kill the given list of executors through the cluster manager.
* @return whether the kill request is acknowledged.
*/
protected def doKillExecutors(executorIds: Seq[String]): Future[Boolean] =
Future.successful(false)
/**
* Request that the cluster manager kill all executors on a given host.
* @return whether the kill request is acknowledged.
*/
final override def killExecutorsOnHost(host: String): Boolean = {
logInfo(s"Requesting to kill any and all executors on host ${host}")
// A potential race exists if a new executor attempts to register on a host
// that is on the blacklist and is no no longer valid. To avoid this race,
// all executor registration and killing happens in the event loop. This way, either
// an executor will fail to register, or will be killed when all executors on a host
// are killed.
// Kill all the executors on this host in an event loop to ensure serialization.
driverEndpoint.send(KillExecutorsOnHost(host))
true
}
/**
* Create the delegation token manager to be used for the application. This method is called
* once during the start of the scheduler backend (so after the object has already been
* fully constructed), only if security is enabled in the Hadoop configuration.
*/
protected def createTokenManager(): Option[HadoopDelegationTokenManager] = None
/**
* Called when a new set of delegation tokens is sent to the driver. Child classes can override
* this method but should always call this implementation, which handles token distribution to
* executors.
*/
protected def updateDelegationTokens(tokens: Array[Byte]): Unit = {
SparkHadoopUtil.get.addDelegationTokens(tokens, conf)
delegationTokens.set(tokens)
executorDataMap.values.foreach { ed =>
ed.executorEndpoint.send(UpdateDelegationTokens(tokens))
}
}
protected def currentDelegationTokens: Array[Byte] = delegationTokens.get()
// SPARK-27112: We need to ensure that there is ordering of lock acquisition
// between TaskSchedulerImpl and CoarseGrainedSchedulerBackend objects in order to fix
// the deadlock issue exposed in SPARK-27112
private def withLock[T](fn: => T): T = scheduler.synchronized {
CoarseGrainedSchedulerBackend.this.synchronized { fn }
}
}
private[spark] object CoarseGrainedSchedulerBackend {
val ENDPOINT_NAME = "CoarseGrainedScheduler"
}
| pgandhi999/spark | core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala | Scala | apache-2.0 | 33,673 |
package controllers
import play.api.data.Form
import play.api.mvc._, Results._
import lila.api.Context
import lila.app._
import lila.common.LilaCookie
import views._
object Pref extends LilaController {
private def api = Env.pref.api
private def forms = Env.pref.forms
def form = Auth { implicit ctx =>
me =>
Ok(html.account.pref(me, forms prefOf ctx.pref)).fuccess
}
def formApply = AuthBody { implicit ctx =>
me =>
implicit val req = ctx.body
FormFuResult(forms.pref) { err =>
fuccess(html.account.pref(me, err))
} { data =>
api.setPref(data(ctx.pref), notifyChange = true) inject Ok("saved")
}
}
def miniFormApply = AuthBody { implicit ctx =>
me =>
implicit val req = ctx.body
FormFuResult(forms.miniPref) { err =>
fuccess("nope")
} { data =>
api.setPref(data(ctx.pref), notifyChange = true) inject Ok("saved")
}
}
def set(name: String) = OpenBody { implicit ctx =>
implicit val req = ctx.body
(setters get name) ?? {
case (form, fn) => FormResult(form) { v =>
fn(v, ctx) map { Ok(()) withCookies _ }
}
}
}
def saveTag(name: String, value: String) = Auth { implicit ctx =>
me =>
api.saveTag(me, name, value)
}
private lazy val setters = Map(
"theme" -> (forms.theme -> save("theme") _),
"pieceSet" -> (forms.pieceSet -> save("pieceSet") _),
"theme3d" -> (forms.theme3d -> save("theme3d") _),
"pieceSet3d" -> (forms.pieceSet3d -> save("pieceSet3d") _),
"bg" -> (forms.bg -> save("bg") _),
"is3d" -> (forms.is3d -> save("is3d") _))
private def save(name: String)(value: String, ctx: Context): Fu[Cookie] =
ctx.me ?? {
api.setPrefString(_, name, value, notifyChange = false)
} inject LilaCookie.session(name, value)(ctx.req)
}
| Happy0/lila | app/controllers/Pref.scala | Scala | mit | 1,844 |
/*
* Copyright 2017 Ahmad Mozafarnia
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ir.bama.services
import java.time.{LocalDate, LocalDateTime}
import javax.inject.{Inject, Singleton}
import akka.actor.{Actor, ActorSystem, InvalidActorNameException, Props}
import akka.pattern.ask
import akka.util.Timeout
import ir.bama.models.SellerType.SellerType
import ir.bama.models._
import ir.bama.repositories.SellAdRepo.ListSpecs
import ir.bama.repositories.SellAdRepo
import play.api.http.Status._
import scala.concurrent.duration.{Duration, DurationLong}
import scala.concurrent.{Await, ExecutionContext, Future}
/**
* @author ahmad
*/
@Singleton
class SellAdService @Inject()(adRepo: SellAdRepo, sellerService: SellerService, system: ActorSystem)
(implicit ec: ExecutionContext) extends BaseService[SellAd, SellAdRepo](adRepo) {
import repo.dbConfig._
import profile.api._
private val dispatcher = system.actorOf(Props(new ActionDispatcher), "ad-action-dispatcher")
private implicit val timeout = Timeout(30 seconds)
private val visibleStatuses = Seq(SellAdStatus.SUBMITTED, SellAdStatus.RESUBMITTED)
type Limits = Map[SellerType, (Int, Int, Option[Int])]
private case class Submit(userId: Long, ad: SellAd, limits: Limits)
private case class Resubmit(userId: Long, adId: Long, limits: Limits)
private case class Cancel(userId: Long, adId: Long)
def submit(userId: Long, ad: SellAd, limits: Limits): Future[PersistenceResult] =
(dispatcher ? Submit(userId, ad, limits)).mapTo[PersistenceResult]
def resubmit(userId: Long, adId: Long, limits: Limits): Future[PersistenceResult] =
(dispatcher ? Resubmit(userId, adId, limits)).mapTo[PersistenceResult]
def cancel(userId: Long, adId: Long): Future[PersistenceResult] =
(dispatcher ? Cancel(userId, adId)).mapTo[PersistenceResult]
private class ActionDispatcher extends Actor {
override def receive: Receive = {
case msg@Submit(userId, _, _) => dispatch(msg, userId)
case msg@Resubmit(userId, _, _) => dispatch(msg, userId)
case msg@Cancel(userId, _) => dispatch(msg, userId)
}
private def dispatch(msg: Any, userId: Long) = {
val name = s"runner-$userId"
context.child(name) match {
case Some(runner) => runner forward msg
case _ =>
val currentSender = sender()
sellerService.findIdAndTypeByUserId(userId).map {
case Some((sellerId, sellerType)) =>
try {
context.actorOf(ActionRunner.props(sellerId, sellerType), name) tell(msg, currentSender)
} catch {
case _: InvalidActorNameException => self ! msg
}
case _ => currentSender ! Right(None)
}
}
}
}
private class ActionRunner(sellerId: Long, sellerType: SellerType) extends Actor {
private val someSeller: Option[Seller[_]] = Some(Seller.id(sellerId))
override def receive: Receive = {
case Submit(_, ad, limits) => submit(ad, limits)
case Resubmit(_, adId, limits) => resubmit(adId, limits)
case Cancel(_, adId) => cancel(adId)
}
private def submit(ad: SellAd, limits: Limits) = {
val limit = limits(sellerType)
replyBlocking {
db.run {
val start = LocalDate.now().minusDays(limit._1 - 1).atStartOfDay()
val action: DBIO[PersistenceResult] = repo.countAds(sellerId, start, LocalDateTime.now()).flatMap { c =>
if (c < limit._2) {
repo.persist(ad.copy(seller = someSeller)).map(id => Right(Some(id)))
} else {
DBIO.successful(Left(
s"You cannot submit more than ${limit._2} ad(s) in the period of ${limit._1} day(s). Please try again later."))
}
}
action
}
}
}
private def resubmit(adId: Long, limits: Limits) = {
val limit = limits(sellerType)
replyBlocking {
db.run {
val action: DBIO[PersistenceResult] = repo.findSellerIdAndStatusById(adId).flatMap {
case Some((seId, status)) =>
if (seId != sellerId) {
DBIO.successful(Left("You are not the owner of this ad.".error(FORBIDDEN)))
} else if (visibleStatuses.contains(status)) {
val start = LocalDate.now().minusDays(limit._1 - 1).atStartOfDay()
repo.countSubmissions(adId, start, LocalDateTime.now()).flatMap {
case (total, inRange) =>
if (limit._3.exists(total >= _)) {
DBIO.successful(Left(
s"You cannot resubmit an ad more than ${limit._3.get} times."))
} else if (inRange >= limit._2) {
DBIO.successful(Left(
s"You cannot resubmit an ad more than ${limit._2} times in the period of ${limit._1} day(s). Please try again later."))
} else {
repo.resubmit(adId).map(_ => Right(Some(adId)))
}
}
} else {
DBIO.successful(Left("This ad is not in visible status.".error(FORBIDDEN)))
}
case _ => DBIO.successful(Right(None))
}
action
}
}
}
private def cancel(adId: Long) = replyBlocking {
db.run {
val action: DBIO[PersistenceResult] = repo.findSellerIdById(adId).flatMap {
case Some(seId) =>
if (seId == sellerId) {
repo.cancel(adId).map { success =>
if (success) {
Right(Some(adId))
} else {
Left("This ad has already been canceled.")
}
}
} else {
DBIO.successful(Left("You are not the owner of this ad.".error(FORBIDDEN)))
}
case None => DBIO.successful(Right(None))
}
action
}
}
private def replyBlocking(opFuture: Future[PersistenceResult]) = {
// we have to wait for result of the operation (i.e. block), to prevent concurrent inserts/updates
sender ! Await.result(opFuture, Duration.Inf)
}
}
private object ActionRunner {
def props(sellerId: Long, sellerType: SellerType) = Props(new ActionRunner(sellerId, sellerType))
}
def load(adId: Long, maybeUserId: Option[Long]): Future[Option[(SellAd, Boolean)]] = db.run {
findSellerId(maybeUserId) { maybeSellerId =>
repo.load(adId, maybeSellerId)
}.map {
_.flatMap {
case result@(ad, owner) =>
ad.seller.map {
case x: PrivateSeller =>
if (x.publicProfile || owner) result else {
val maybeSeller: Option[Seller[_]] = if (ad.phoneNumber.isEmpty) Some(Seller.phoneNumbers(x.phoneNumbers)) else None
(ad.copy(seller = maybeSeller), owner)
}
case _: Dealer => result
}
}
}
}
def list(specs: ListSpecs, maybeUserId: Option[Long], range: Option[Range]): Future[Seq[(SellAd, Boolean)]] = db.run {
findSellerId(maybeUserId) { maybeSellerId =>
repo.list(specs, maybeSellerId, visibleStatuses, range)
}
}
private def findSellerId[A](maybeUserId: Option[Long])(block: (Option[Long]) => DBIO[A]) =
maybeUserId match {
case Some(userId) => sellerService.repo.findIdByUserId(userId).flatMap(block)
case _ => block(None)
}
def incrementViews(adId: Long): Future[Boolean] =
db.run(repo.incrementViews(adId))
def incrementPhoneNumberViews(adId: Long): Future[Boolean] =
db.run(repo.incrementPhoneNumberViews(adId))
}
| ahmadmo/bama-api-demo | app/ir/bama/services/SellAdService.scala | Scala | apache-2.0 | 8,211 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.graph.scala.test.operations
import org.apache.flink.api.common.functions.MapFunction
import org.apache.flink.api.scala._
import org.apache.flink.graph.scala._
import org.apache.flink.graph.scala.test.TestGraphUtils
import org.apache.flink.graph.scala.utils.EdgeToTuple3Map
import org.apache.flink.graph.{Edge, EdgeJoinFunction}
import org.apache.flink.test.util.{MultipleProgramsTestBase, TestBaseUtils}
import org.junit.Test
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import _root_.scala.collection.JavaConverters._
@RunWith(classOf[Parameterized])
class JoinWithEdgesITCase(mode: MultipleProgramsTestBase.TestExecutionMode) extends
MultipleProgramsTestBase(mode) {
private var expectedResult: String = null
@Test
@throws(classOf[Exception])
def testWithEdgesInputDataset {
val env: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment
val graph: Graph[Long, Long, Long] = Graph.fromDataSet(TestGraphUtils
.getLongLongVertexData(env), TestGraphUtils.getLongLongEdgeData(env), env)
val result: Graph[Long, Long, Long] = graph.joinWithEdges(graph.getEdges.map(new
EdgeToTuple3Map[Long, Long]), new AddValuesMapper)
val res = result.getEdges.collect().toList
expectedResult = "1,2,24\\n" + "1,3,26\\n" + "2,3,46\\n" + "3,4,68\\n" + "3,5,70\\n" + "4,5," +
"90\\n" + "5,1,102\\n"
TestBaseUtils.compareResultAsTuples(res.asJava, expectedResult)
}
@Test
@throws(classOf[Exception])
def testWithEdgesInputDatasetSugar {
val env: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment
val graph: Graph[Long, Long, Long] = Graph.fromDataSet(TestGraphUtils
.getLongLongVertexData(env), TestGraphUtils.getLongLongEdgeData(env), env)
val result: Graph[Long, Long, Long] = graph.joinWithEdges(graph.getEdges.map(new
EdgeToTuple3Map[Long, Long]), (originalValue: Long, tupleValue: Long) =>
originalValue + tupleValue)
val res = result.getEdges.collect().toList
expectedResult = "1,2,24\\n" + "1,3,26\\n" + "2,3,46\\n" + "3,4,68\\n" + "3,5,70\\n" + "4,5," +
"90\\n" + "5,1,102\\n"
TestBaseUtils.compareResultAsTuples(res.asJava, expectedResult)
}
@Test
@throws(classOf[Exception])
def testWithEdgesOnSource {
val env: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment
val graph: Graph[Long, Long, Long] = Graph.fromDataSet(TestGraphUtils
.getLongLongVertexData(env), TestGraphUtils.getLongLongEdgeData(env), env)
val result: Graph[Long, Long, Long] = graph.joinWithEdgesOnSource[Long](graph.getEdges
.map(new ProjectSourceAndValueMapper), (originalValue: Long, tupleValue: Long) =>
originalValue + tupleValue)
val res = result.getEdges.collect().toList
expectedResult = "1,2,24\\n" + "1,3,25\\n" + "2,3,46\\n" + "3,4,68\\n" + "3,5,69\\n" + "4,5," +
"90\\n" + "5,1,102\\n"
TestBaseUtils.compareResultAsTuples(res.asJava, expectedResult)
}
@Test
@throws(classOf[Exception])
def testWithEdgesOnSourceSugar {
val env: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment
val graph: Graph[Long, Long, Long] = Graph.fromDataSet(TestGraphUtils
.getLongLongVertexData(env), TestGraphUtils.getLongLongEdgeData(env), env)
val result: Graph[Long, Long, Long] = graph.joinWithEdgesOnSource[Long](graph.getEdges
.map(new ProjectSourceAndValueMapper), (originalValue: Long, tupleValue: Long) =>
originalValue + tupleValue)
val res = result.getEdges.collect().toList
expectedResult = "1,2,24\\n" + "1,3,25\\n" + "2,3,46\\n" + "3,4,68\\n" + "3,5,69\\n" + "4,5," +
"90\\n" + "5,1,102\\n"
TestBaseUtils.compareResultAsTuples(res.asJava, expectedResult)
}
@Test
@throws(classOf[Exception])
def testWithEdgesOnTarget {
val env: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment
val graph: Graph[Long, Long, Long] = Graph.fromDataSet(TestGraphUtils
.getLongLongVertexData(env), TestGraphUtils.getLongLongEdgeData(env), env)
val result: Graph[Long, Long, Long] = graph.joinWithEdgesOnTarget[Long](graph.getEdges
.map(new ProjectTargetAndValueMapper), (originalValue: Long, tupleValue: Long) =>
originalValue + tupleValue)
val res = result.getEdges.collect().toList
expectedResult = "1,2,24\\n" + "1,3,26\\n" + "2,3,36\\n" + "3,4,68\\n" + "3,5,70\\n" + "4,5," +
"80\\n" + "5,1,102\\n"
TestBaseUtils.compareResultAsTuples(res.asJava, expectedResult)
}
@Test
@throws(classOf[Exception])
def testWithEdgesOnTargetSugar {
val env: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment
val graph: Graph[Long, Long, Long] = Graph.fromDataSet(TestGraphUtils
.getLongLongVertexData(env), TestGraphUtils.getLongLongEdgeData(env), env)
val result: Graph[Long, Long, Long] = graph.joinWithEdgesOnTarget[Long](graph.getEdges
.map(new ProjectTargetAndValueMapper), (originalValue: Long, tupleValue: Long) =>
originalValue + tupleValue)
val res = result.getEdges.collect().toList
expectedResult = "1,2,24\\n" + "1,3,26\\n" + "2,3,36\\n" + "3,4,68\\n" + "3,5,70\\n" + "4,5," +
"80\\n" + "5,1,102\\n"
TestBaseUtils.compareResultAsTuples(res.asJava, expectedResult)
}
final class AddValuesMapper extends EdgeJoinFunction[Long, Long] {
@throws(classOf[Exception])
def edgeJoin(edgeValue: Long, inputValue: Long): Long = {
edgeValue + inputValue
}
}
final class ProjectSourceAndValueMapper extends MapFunction[Edge[Long, Long], (Long, Long)] {
@throws(classOf[Exception])
def map(edge: Edge[Long, Long]): (Long, Long) = {
(edge.getSource, edge.getValue)
}
}
final class ProjectTargetAndValueMapper extends MapFunction[Edge[Long, Long], (Long, Long)] {
@throws(classOf[Exception])
def map(edge: Edge[Long, Long]): (Long, Long) = {
(edge.getTarget, edge.getValue)
}
}
}
| hequn8128/flink | flink-libraries/flink-gelly-scala/src/test/scala/org/apache/flink/graph/scala/test/operations/JoinWithEdgesITCase.scala | Scala | apache-2.0 | 6,739 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.data.jdbc.meta
import java.sql.Types
import java.sql.Types.{BIGINT, BOOLEAN, INTEGER, SMALLINT}
import org.beangle.data.jdbc.engine.{Engines, PostgreSQL}
import org.scalatest.matchers.should.Matchers
import org.scalatest.flatspec.AnyFlatSpec
class PostgreSQLEngineTest extends AnyFlatSpec with Matchers {
"big number (size >=65535) in postgresql " should " trip to less 1000 size" in {
val engine = Engines.PostgreSQL
val scale = 0
val precision = 65535
engine.toType(Types.NUMERIC, precision, scale).name equals "numeric(1000, 0)" should be(true)
engine.toType(Types.DECIMAL, 1, 0).name shouldEqual "boolean"
//engine.toType(Types.DECIMAL,1,0) shouldEqual SqlType(BOOLEAN, "boolean", 1)
engine.toType(Types.DECIMAL,5,0) shouldEqual SqlType(SMALLINT, "smallint", 5)
engine.toType(Types.DECIMAL,10,0) shouldEqual SqlType(INTEGER, "integer", 10)
engine.toType(Types.DECIMAL,19,0) shouldEqual SqlType(BIGINT, "bigint", 19)
}
}
| beangle/data | jdbc/src/test/scala/org/beangle/data/jdbc/meta/PostgreSQLEngineTest.scala | Scala | lgpl-3.0 | 1,712 |
package com.nikolastojiljkovic.quilltrait
import io.getquill.{ PostgresAsyncContext, PostgresEscape }
package object postgres {
object testContext extends PostgresAsyncContext[PostgresEscape]("testPostgresDB") with AnnotatedTraitSupport
}
| nstojiljkovic/quill-trait | quill-trait-core/jvm/src/test/scala/com/nikolastojiljkovic/quilltrait/postgres/package.scala | Scala | apache-2.0 | 243 |
package org.joda.time.format
import java.util.Collection
import java.util.HashSet
import org.joda.time.DateTimeFieldType
object ISODateTimeFormat {
def forFields(fields: Collection[DateTimeFieldType],
extended: Boolean,
strictISO: Boolean): DateTimeFormatter = {
if (fields == null || fields.size == 0) {
throw new IllegalArgumentException(
"The fields must not be null or empty")
}
val workingFields = new HashSet[DateTimeFieldType](fields)
val inputSize = workingFields.size
var reducedPrec = false
val bld = new DateTimeFormatterBuilder()
if (workingFields.contains(DateTimeFieldType.monthOfYear())) {
reducedPrec = dateByMonth(bld, workingFields, extended, strictISO)
} else if (workingFields.contains(DateTimeFieldType.dayOfYear())) {
reducedPrec = dateByOrdinal(bld, workingFields, extended, strictISO)
} else if (workingFields.contains(DateTimeFieldType.weekOfWeekyear())) {
reducedPrec = dateByWeek(bld, workingFields, extended, strictISO)
} else if (workingFields.contains(DateTimeFieldType.dayOfMonth())) {
reducedPrec = dateByMonth(bld, workingFields, extended, strictISO)
} else if (workingFields.contains(DateTimeFieldType.dayOfWeek())) {
reducedPrec = dateByWeek(bld, workingFields, extended, strictISO)
} else if (workingFields.remove(DateTimeFieldType.year())) {
bld.append(Constants.ye)
reducedPrec = true
} else if (workingFields.remove(DateTimeFieldType.weekyear())) {
bld.append(Constants.we)
reducedPrec = true
}
val datePresent = workingFields.size < inputSize
time(bld, workingFields, extended, strictISO, reducedPrec, datePresent)
if (bld.canBuildFormatter() == false) {
throw new IllegalArgumentException(
"No valid format for fields: " + fields)
}
try {
fields.retainAll(workingFields)
} catch {
case ex: UnsupportedOperationException =>
}
bld.toFormatter()
}
private def dateByMonth(bld: DateTimeFormatterBuilder,
fields: Collection[DateTimeFieldType],
extended: Boolean,
strictISO: Boolean): Boolean = {
var reducedPrec = false
if (fields.remove(DateTimeFieldType.year())) {
bld.append(Constants.ye)
if (fields.remove(DateTimeFieldType.monthOfYear())) {
if (fields.remove(DateTimeFieldType.dayOfMonth())) {
appendSeparator(bld, extended)
bld.appendMonthOfYear(2)
appendSeparator(bld, extended)
bld.appendDayOfMonth(2)
} else {
bld.appendLiteral('-')
bld.appendMonthOfYear(2)
reducedPrec = true
}
} else {
if (fields.remove(DateTimeFieldType.dayOfMonth())) {
checkNotStrictISO(fields, strictISO)
bld.appendLiteral('-')
bld.appendLiteral('-')
bld.appendDayOfMonth(2)
} else {
reducedPrec = true
}
}
} else if (fields.remove(DateTimeFieldType.monthOfYear())) {
bld.appendLiteral('-')
bld.appendLiteral('-')
bld.appendMonthOfYear(2)
if (fields.remove(DateTimeFieldType.dayOfMonth())) {
appendSeparator(bld, extended)
bld.appendDayOfMonth(2)
} else {
reducedPrec = true
}
} else if (fields.remove(DateTimeFieldType.dayOfMonth())) {
bld.appendLiteral('-')
bld.appendLiteral('-')
bld.appendLiteral('-')
bld.appendDayOfMonth(2)
}
reducedPrec
}
private def dateByOrdinal(bld: DateTimeFormatterBuilder,
fields: Collection[DateTimeFieldType],
extended: Boolean,
strictISO: Boolean): Boolean = {
var reducedPrec = false
if (fields.remove(DateTimeFieldType.year())) {
bld.append(Constants.ye)
if (fields.remove(DateTimeFieldType.dayOfYear())) {
appendSeparator(bld, extended)
bld.appendDayOfYear(3)
} else {
reducedPrec = true
}
} else if (fields.remove(DateTimeFieldType.dayOfYear())) {
bld.appendLiteral('-')
bld.appendDayOfYear(3)
}
reducedPrec
}
private def dateByWeek(bld: DateTimeFormatterBuilder,
fields: Collection[DateTimeFieldType],
extended: Boolean,
strictISO: Boolean): Boolean = {
var reducedPrec = false
if (fields.remove(DateTimeFieldType.weekyear())) {
bld.append(Constants.we)
if (fields.remove(DateTimeFieldType.weekOfWeekyear())) {
appendSeparator(bld, extended)
bld.appendLiteral('W')
bld.appendWeekOfWeekyear(2)
if (fields.remove(DateTimeFieldType.dayOfWeek())) {
appendSeparator(bld, extended)
bld.appendDayOfWeek(1)
} else {
reducedPrec = true
}
} else {
if (fields.remove(DateTimeFieldType.dayOfWeek())) {
checkNotStrictISO(fields, strictISO)
appendSeparator(bld, extended)
bld.appendLiteral('W')
bld.appendLiteral('-')
bld.appendDayOfWeek(1)
} else {
reducedPrec = true
}
}
} else if (fields.remove(DateTimeFieldType.weekOfWeekyear())) {
bld.appendLiteral('-')
bld.appendLiteral('W')
bld.appendWeekOfWeekyear(2)
if (fields.remove(DateTimeFieldType.dayOfWeek())) {
appendSeparator(bld, extended)
bld.appendDayOfWeek(1)
} else {
reducedPrec = true
}
} else if (fields.remove(DateTimeFieldType.dayOfWeek())) {
bld.appendLiteral('-')
bld.appendLiteral('W')
bld.appendLiteral('-')
bld.appendDayOfWeek(1)
}
reducedPrec
}
private def time(bld: DateTimeFormatterBuilder,
fields: Collection[DateTimeFieldType],
extended: Boolean,
strictISO: Boolean,
reducedPrec: Boolean,
datePresent: Boolean) {
val hour = fields.remove(DateTimeFieldType.hourOfDay())
val minute = fields.remove(DateTimeFieldType.minuteOfHour())
val second = fields.remove(DateTimeFieldType.secondOfMinute())
val milli = fields.remove(DateTimeFieldType.millisOfSecond())
if (!hour && !minute && !second && !milli) {
return
}
if (hour || minute || second || milli) {
if (strictISO && reducedPrec) {
throw new IllegalArgumentException(
"No valid ISO8601 format for fields because Date was reduced precision: " +
fields)
}
if (datePresent) {
bld.appendLiteral('T')
}
}
if (hour && minute && second || (hour && !second && !milli)) {} else {
if (strictISO && datePresent) {
throw new IllegalArgumentException(
"No valid ISO8601 format for fields because Time was truncated: " +
fields)
}
if (!hour && (minute && second || (minute && !milli) || second)) {} else {
if (strictISO) {
throw new IllegalArgumentException(
"No valid ISO8601 format for fields: " + fields)
}
}
}
if (hour) {
bld.appendHourOfDay(2)
} else if (minute || second || milli) {
bld.appendLiteral('-')
}
if (extended && hour && minute) {
bld.appendLiteral(':')
}
if (minute) {
bld.appendMinuteOfHour(2)
} else if (second || milli) {
bld.appendLiteral('-')
}
if (extended && minute && second) {
bld.appendLiteral(':')
}
if (second) {
bld.appendSecondOfMinute(2)
} else if (milli) {
bld.appendLiteral('-')
}
if (milli) {
bld.appendLiteral('.')
bld.appendMillisOfSecond(3)
}
}
private def checkNotStrictISO(fields: Collection[DateTimeFieldType],
strictISO: Boolean) {
if (strictISO) {
throw new IllegalArgumentException(
"No valid ISO8601 format for fields: " + fields)
}
}
private def appendSeparator(bld: DateTimeFormatterBuilder,
extended: Boolean) {
if (extended) {
bld.appendLiteral('-')
}
}
def dateParser(): DateTimeFormatter = Constants.dp
def localDateParser(): DateTimeFormatter = Constants.ldp
def dateElementParser(): DateTimeFormatter = Constants.dpe
def timeParser(): DateTimeFormatter = Constants.tp
def localTimeParser(): DateTimeFormatter = Constants.ltp
def timeElementParser(): DateTimeFormatter = Constants.tpe
def dateTimeParser(): DateTimeFormatter = Constants.dtp
def dateOptionalTimeParser(): DateTimeFormatter = Constants.dotp
def localDateOptionalTimeParser(): DateTimeFormatter = Constants.ldotp
def date(): DateTimeFormatter = yearMonthDay()
def time(): DateTimeFormatter = Constants.t
def timeNoMillis(): DateTimeFormatter = Constants.tx
def tTime(): DateTimeFormatter = Constants.tt
def tTimeNoMillis(): DateTimeFormatter = Constants.ttx
def dateTime(): DateTimeFormatter = Constants.dt
def dateTimeNoMillis(): DateTimeFormatter = Constants.dtx
def ordinalDate(): DateTimeFormatter = Constants.od
def ordinalDateTime(): DateTimeFormatter = Constants.odt
def ordinalDateTimeNoMillis(): DateTimeFormatter = Constants.odtx
def weekDate(): DateTimeFormatter = Constants.wwd
def weekDateTime(): DateTimeFormatter = Constants.wdt
def weekDateTimeNoMillis(): DateTimeFormatter = Constants.wdtx
def basicDate(): DateTimeFormatter = Constants.bd
def basicTime(): DateTimeFormatter = Constants.bt
def basicTimeNoMillis(): DateTimeFormatter = Constants.btx
def basicTTime(): DateTimeFormatter = Constants.btt
def basicTTimeNoMillis(): DateTimeFormatter = Constants.bttx
def basicDateTime(): DateTimeFormatter = Constants.bdt
def basicDateTimeNoMillis(): DateTimeFormatter = Constants.bdtx
def basicOrdinalDate(): DateTimeFormatter = Constants.bod
def basicOrdinalDateTime(): DateTimeFormatter = Constants.bodt
def basicOrdinalDateTimeNoMillis(): DateTimeFormatter = Constants.bodtx
def basicWeekDate(): DateTimeFormatter = Constants.bwd
def basicWeekDateTime(): DateTimeFormatter = Constants.bwdt
def basicWeekDateTimeNoMillis(): DateTimeFormatter = Constants.bwdtx
def year(): DateTimeFormatter = Constants.ye
def yearMonth(): DateTimeFormatter = Constants.ym
def yearMonthDay(): DateTimeFormatter = Constants.ymd
def weekyear(): DateTimeFormatter = Constants.we
def weekyearWeek(): DateTimeFormatter = Constants.ww
def weekyearWeekDay(): DateTimeFormatter = Constants.wwd
def hour(): DateTimeFormatter = Constants.hde
def hourMinute(): DateTimeFormatter = Constants.hm
def hourMinuteSecond(): DateTimeFormatter = Constants.hms
def hourMinuteSecondMillis(): DateTimeFormatter = Constants.hmsl
def hourMinuteSecondFraction(): DateTimeFormatter = Constants.hmsf
def dateHour(): DateTimeFormatter = Constants.dh
def dateHourMinute(): DateTimeFormatter = Constants.dhm
def dateHourMinuteSecond(): DateTimeFormatter = Constants.dhms
def dateHourMinuteSecondMillis(): DateTimeFormatter = Constants.dhmsl
def dateHourMinuteSecondFraction(): DateTimeFormatter = Constants.dhmsf
private object Constants {
val ye: DateTimeFormatter = yearElement()
val mye: DateTimeFormatter = monthElement()
val dme: DateTimeFormatter = dayOfMonthElement()
val we: DateTimeFormatter = weekyearElement()
val wwe: DateTimeFormatter = weekElement()
val dwe: DateTimeFormatter = dayOfWeekElement()
val dye: DateTimeFormatter = dayOfYearElement()
val hde: DateTimeFormatter = hourElement()
val mhe: DateTimeFormatter = minuteElement()
val sme: DateTimeFormatter = secondElement()
val fse: DateTimeFormatter = fractionElement()
val ze: DateTimeFormatter = offsetElement()
val lte: DateTimeFormatter = literalTElement()
val ym: DateTimeFormatter = yearMonth()
val ymd: DateTimeFormatter = yearMonthDay()
val ww: DateTimeFormatter = weekyearWeek()
val wwd: DateTimeFormatter = weekyearWeekDay()
val hm: DateTimeFormatter = hourMinute()
val hms: DateTimeFormatter = hourMinuteSecond()
val hmsl: DateTimeFormatter = hourMinuteSecondMillis()
val hmsf: DateTimeFormatter = hourMinuteSecondFraction()
val dh: DateTimeFormatter = dateHour()
val dhm: DateTimeFormatter = dateHourMinute()
val dhms: DateTimeFormatter = dateHourMinuteSecond()
val dhmsl: DateTimeFormatter = dateHourMinuteSecondMillis()
val dhmsf: DateTimeFormatter = dateHourMinuteSecondFraction()
val t: DateTimeFormatter = time()
val tx: DateTimeFormatter = timeNoMillis()
val tt: DateTimeFormatter = tTime()
val ttx: DateTimeFormatter = tTimeNoMillis()
val dt: DateTimeFormatter = dateTime()
val dtx: DateTimeFormatter = dateTimeNoMillis()
val wdt: DateTimeFormatter = weekDateTime()
val wdtx: DateTimeFormatter = weekDateTimeNoMillis()
val od: DateTimeFormatter = ordinalDate()
val odt: DateTimeFormatter = ordinalDateTime()
val odtx: DateTimeFormatter = ordinalDateTimeNoMillis()
val bd: DateTimeFormatter = basicDate()
val bt: DateTimeFormatter = basicTime()
val btx: DateTimeFormatter = basicTimeNoMillis()
val btt: DateTimeFormatter = basicTTime()
val bttx: DateTimeFormatter = basicTTimeNoMillis()
val bdt: DateTimeFormatter = basicDateTime()
val bdtx: DateTimeFormatter = basicDateTimeNoMillis()
val bod: DateTimeFormatter = basicOrdinalDate()
val bodt: DateTimeFormatter = basicOrdinalDateTime()
val bodtx: DateTimeFormatter = basicOrdinalDateTimeNoMillis()
val bwd: DateTimeFormatter = basicWeekDate()
val bwdt: DateTimeFormatter = basicWeekDateTime()
val bwdtx: DateTimeFormatter = basicWeekDateTimeNoMillis()
val dpe: DateTimeFormatter = dateElementParser()
val tpe: DateTimeFormatter = timeElementParser()
val dp: DateTimeFormatter = dateParser()
val ldp: DateTimeFormatter = localDateParser()
val tp: DateTimeFormatter = timeParser()
val ltp: DateTimeFormatter = localTimeParser()
val dtp: DateTimeFormatter = dateTimeParser()
val dotp: DateTimeFormatter = dateOptionalTimeParser()
val ldotp: DateTimeFormatter = localDateOptionalTimeParser()
private def dateParser(): DateTimeFormatter = {
if (dp == null) {
val tOffset = new DateTimeFormatterBuilder()
.appendLiteral('T')
.append(offsetElement())
.toParser()
return new DateTimeFormatterBuilder()
.append(dateElementParser())
.appendOptional(tOffset)
.toFormatter()
}
dp
}
private def localDateParser(): DateTimeFormatter = {
if (ldp == null) {
return dateElementParser().withZoneUTC()
}
ldp
}
private def dateElementParser(): DateTimeFormatter = {
if (dpe == null) {
return new DateTimeFormatterBuilder()
.append(null,
Array(new DateTimeFormatterBuilder()
.append(yearElement())
.appendOptional(
new DateTimeFormatterBuilder()
.append(monthElement())
.appendOptional(dayOfMonthElement().getParser)
.toParser())
.toParser(),
new DateTimeFormatterBuilder()
.append(weekyearElement())
.append(weekElement())
.appendOptional(dayOfWeekElement().getParser)
.toParser(),
new DateTimeFormatterBuilder()
.append(yearElement())
.append(dayOfYearElement())
.toParser()))
.toFormatter()
}
dpe
}
private def timeParser(): DateTimeFormatter = {
if (tp == null) {
return new DateTimeFormatterBuilder()
.appendOptional(literalTElement().getParser)
.append(timeElementParser())
.appendOptional(offsetElement().getParser)
.toFormatter()
}
tp
}
private def localTimeParser(): DateTimeFormatter = {
if (ltp == null) {
return new DateTimeFormatterBuilder()
.appendOptional(literalTElement().getParser)
.append(timeElementParser())
.toFormatter()
.withZoneUTC()
}
ltp
}
private def timeElementParser(): DateTimeFormatter = {
if (tpe == null) {
val decimalPoint = new DateTimeFormatterBuilder()
.append(
null,
Array(
new DateTimeFormatterBuilder().appendLiteral('.').toParser(),
new DateTimeFormatterBuilder().appendLiteral(',').toParser()))
.toParser()
return new DateTimeFormatterBuilder()
.append(hourElement())
.append(null,
Array(new DateTimeFormatterBuilder()
.append(minuteElement())
.append(null,
Array(new DateTimeFormatterBuilder()
.append(secondElement())
.appendOptional(
new DateTimeFormatterBuilder()
.append(decimalPoint)
.appendFractionOfSecond(1, 9)
.toParser())
.toParser(),
new DateTimeFormatterBuilder()
.append(decimalPoint)
.appendFractionOfMinute(1, 9)
.toParser(),
null))
.toParser(),
new DateTimeFormatterBuilder()
.append(decimalPoint)
.appendFractionOfHour(1, 9)
.toParser(),
null))
.toFormatter()
}
tpe
}
private def dateTimeParser(): DateTimeFormatter = {
if (dtp == null) {
val time = new DateTimeFormatterBuilder()
.appendLiteral('T')
.append(timeElementParser())
.appendOptional(offsetElement().getParser)
.toParser()
return new DateTimeFormatterBuilder()
.append(null, Array(time, dateOptionalTimeParser().getParser))
.toFormatter()
}
dtp
}
private def dateOptionalTimeParser(): DateTimeFormatter = {
if (dotp == null) {
val timeOrOffset = new DateTimeFormatterBuilder()
.appendLiteral('T')
.appendOptional(timeElementParser().getParser)
.appendOptional(offsetElement().getParser)
.toParser()
return new DateTimeFormatterBuilder()
.append(dateElementParser())
.appendOptional(timeOrOffset)
.toFormatter()
}
dotp
}
private def localDateOptionalTimeParser(): DateTimeFormatter = {
if (ldotp == null) {
val time = new DateTimeFormatterBuilder()
.appendLiteral('T')
.append(timeElementParser())
.toParser()
return new DateTimeFormatterBuilder()
.append(dateElementParser())
.appendOptional(time)
.toFormatter()
.withZoneUTC()
}
ldotp
}
private def time(): DateTimeFormatter = {
if (t == null) {
return new DateTimeFormatterBuilder()
.append(hourMinuteSecondFraction())
.append(offsetElement())
.toFormatter()
}
t
}
private def timeNoMillis(): DateTimeFormatter = {
if (tx == null) {
return new DateTimeFormatterBuilder()
.append(hourMinuteSecond())
.append(offsetElement())
.toFormatter()
}
tx
}
private def tTime(): DateTimeFormatter = {
if (tt == null) {
return new DateTimeFormatterBuilder()
.append(literalTElement())
.append(time())
.toFormatter()
}
tt
}
private def tTimeNoMillis(): DateTimeFormatter = {
if (ttx == null) {
return new DateTimeFormatterBuilder()
.append(literalTElement())
.append(timeNoMillis())
.toFormatter()
}
ttx
}
private def dateTime(): DateTimeFormatter = {
if (dt == null) {
return new DateTimeFormatterBuilder()
.append(date())
.append(tTime())
.toFormatter()
}
dt
}
private def dateTimeNoMillis(): DateTimeFormatter = {
if (dtx == null) {
return new DateTimeFormatterBuilder()
.append(date())
.append(tTimeNoMillis())
.toFormatter()
}
dtx
}
private def ordinalDate(): DateTimeFormatter = {
if (od == null) {
return new DateTimeFormatterBuilder()
.append(yearElement())
.append(dayOfYearElement())
.toFormatter()
}
od
}
private def ordinalDateTime(): DateTimeFormatter = {
if (odt == null) {
return new DateTimeFormatterBuilder()
.append(ordinalDate())
.append(tTime())
.toFormatter()
}
odt
}
private def ordinalDateTimeNoMillis(): DateTimeFormatter = {
if (odtx == null) {
return new DateTimeFormatterBuilder()
.append(ordinalDate())
.append(tTimeNoMillis())
.toFormatter()
}
odtx
}
private def weekDateTime(): DateTimeFormatter = {
if (wdt == null) {
return new DateTimeFormatterBuilder()
.append(weekDate())
.append(tTime())
.toFormatter()
}
wdt
}
private def weekDateTimeNoMillis(): DateTimeFormatter = {
if (wdtx == null) {
return new DateTimeFormatterBuilder()
.append(weekDate())
.append(tTimeNoMillis())
.toFormatter()
}
wdtx
}
private def basicDate(): DateTimeFormatter = {
if (bd == null) {
return new DateTimeFormatterBuilder()
.appendYear(4, 4)
.appendFixedDecimal(DateTimeFieldType.monthOfYear(), 2)
.appendFixedDecimal(DateTimeFieldType.dayOfMonth(), 2)
.toFormatter()
}
bd
}
private def basicTime(): DateTimeFormatter = {
if (bt == null) {
return new DateTimeFormatterBuilder()
.appendFixedDecimal(DateTimeFieldType.hourOfDay(), 2)
.appendFixedDecimal(DateTimeFieldType.minuteOfHour(), 2)
.appendFixedDecimal(DateTimeFieldType.secondOfMinute(), 2)
.appendLiteral('.')
.appendFractionOfSecond(3, 9)
.appendTimeZoneOffset("Z", showSeparators = false, 2, 2)
.toFormatter()
}
bt
}
private def basicTimeNoMillis(): DateTimeFormatter = {
if (btx == null) {
return new DateTimeFormatterBuilder()
.appendFixedDecimal(DateTimeFieldType.hourOfDay(), 2)
.appendFixedDecimal(DateTimeFieldType.minuteOfHour(), 2)
.appendFixedDecimal(DateTimeFieldType.secondOfMinute(), 2)
.appendTimeZoneOffset("Z", showSeparators = false, 2, 2)
.toFormatter()
}
btx
}
private def basicTTime(): DateTimeFormatter = {
if (btt == null) {
return new DateTimeFormatterBuilder()
.append(literalTElement())
.append(basicTime())
.toFormatter()
}
btt
}
private def basicTTimeNoMillis(): DateTimeFormatter = {
if (bttx == null) {
return new DateTimeFormatterBuilder()
.append(literalTElement())
.append(basicTimeNoMillis())
.toFormatter()
}
bttx
}
private def basicDateTime(): DateTimeFormatter = {
if (bdt == null) {
return new DateTimeFormatterBuilder()
.append(basicDate())
.append(basicTTime())
.toFormatter()
}
bdt
}
private def basicDateTimeNoMillis(): DateTimeFormatter = {
if (bdtx == null) {
return new DateTimeFormatterBuilder()
.append(basicDate())
.append(basicTTimeNoMillis())
.toFormatter()
}
bdtx
}
private def basicOrdinalDate(): DateTimeFormatter = {
if (bod == null) {
return new DateTimeFormatterBuilder()
.appendYear(4, 4)
.appendFixedDecimal(DateTimeFieldType.dayOfYear(), 3)
.toFormatter()
}
bod
}
private def basicOrdinalDateTime(): DateTimeFormatter = {
if (bodt == null) {
return new DateTimeFormatterBuilder()
.append(basicOrdinalDate())
.append(basicTTime())
.toFormatter()
}
bodt
}
private def basicOrdinalDateTimeNoMillis(): DateTimeFormatter = {
if (bodtx == null) {
return new DateTimeFormatterBuilder()
.append(basicOrdinalDate())
.append(basicTTimeNoMillis())
.toFormatter()
}
bodtx
}
private def basicWeekDate(): DateTimeFormatter = {
if (bwd == null) {
return new DateTimeFormatterBuilder()
.appendWeekyear(4, 4)
.appendLiteral('W')
.appendFixedDecimal(DateTimeFieldType.weekOfWeekyear(), 2)
.appendFixedDecimal(DateTimeFieldType.dayOfWeek(), 1)
.toFormatter()
}
bwd
}
private def basicWeekDateTime(): DateTimeFormatter = {
if (bwdt == null) {
return new DateTimeFormatterBuilder()
.append(basicWeekDate())
.append(basicTTime())
.toFormatter()
}
bwdt
}
private def basicWeekDateTimeNoMillis(): DateTimeFormatter = {
if (bwdtx == null) {
return new DateTimeFormatterBuilder()
.append(basicWeekDate())
.append(basicTTimeNoMillis())
.toFormatter()
}
bwdtx
}
private def yearMonth(): DateTimeFormatter = {
if (ym == null) {
return new DateTimeFormatterBuilder()
.append(yearElement())
.append(monthElement())
.toFormatter()
}
ym
}
private def yearMonthDay(): DateTimeFormatter = {
if (ymd == null) {
return new DateTimeFormatterBuilder()
.append(yearElement())
.append(monthElement())
.append(dayOfMonthElement())
.toFormatter()
}
ymd
}
private def weekyearWeek(): DateTimeFormatter = {
if (ww == null) {
return new DateTimeFormatterBuilder()
.append(weekyearElement())
.append(weekElement())
.toFormatter()
}
ww
}
private def weekyearWeekDay(): DateTimeFormatter = {
if (wwd == null) {
return new DateTimeFormatterBuilder()
.append(weekyearElement())
.append(weekElement())
.append(dayOfWeekElement())
.toFormatter()
}
wwd
}
private def hourMinute(): DateTimeFormatter = {
if (hm == null) {
return new DateTimeFormatterBuilder()
.append(hourElement())
.append(minuteElement())
.toFormatter()
}
hm
}
private def hourMinuteSecond(): DateTimeFormatter = {
if (hms == null) {
return new DateTimeFormatterBuilder()
.append(hourElement())
.append(minuteElement())
.append(secondElement())
.toFormatter()
}
hms
}
private def hourMinuteSecondMillis(): DateTimeFormatter = {
if (hmsl == null) {
return new DateTimeFormatterBuilder()
.append(hourElement())
.append(minuteElement())
.append(secondElement())
.appendLiteral('.')
.appendFractionOfSecond(3, 3)
.toFormatter()
}
hmsl
}
private def hourMinuteSecondFraction(): DateTimeFormatter = {
if (hmsf == null) {
return new DateTimeFormatterBuilder()
.append(hourElement())
.append(minuteElement())
.append(secondElement())
.append(fractionElement())
.toFormatter()
}
hmsf
}
private def dateHour(): DateTimeFormatter = {
if (dh == null) {
return new DateTimeFormatterBuilder()
.append(date())
.append(literalTElement())
.append(hour())
.toFormatter()
}
dh
}
private def dateHourMinute(): DateTimeFormatter = {
if (dhm == null) {
return new DateTimeFormatterBuilder()
.append(date())
.append(literalTElement())
.append(hourMinute())
.toFormatter()
}
dhm
}
private def dateHourMinuteSecond(): DateTimeFormatter = {
if (dhms == null) {
return new DateTimeFormatterBuilder()
.append(date())
.append(literalTElement())
.append(hourMinuteSecond())
.toFormatter()
}
dhms
}
private def dateHourMinuteSecondMillis(): DateTimeFormatter = {
if (dhmsl == null) {
return new DateTimeFormatterBuilder()
.append(date())
.append(literalTElement())
.append(hourMinuteSecondMillis())
.toFormatter()
}
dhmsl
}
private def dateHourMinuteSecondFraction(): DateTimeFormatter = {
if (dhmsf == null) {
return new DateTimeFormatterBuilder()
.append(date())
.append(literalTElement())
.append(hourMinuteSecondFraction())
.toFormatter()
}
dhmsf
}
private def yearElement(): DateTimeFormatter = {
if (ye == null) {
return new DateTimeFormatterBuilder().appendYear(4, 9).toFormatter()
}
ye
}
private def monthElement(): DateTimeFormatter = {
if (mye == null) {
return new DateTimeFormatterBuilder()
.appendLiteral('-')
.appendMonthOfYear(2)
.toFormatter()
}
mye
}
private def dayOfMonthElement(): DateTimeFormatter = {
if (dme == null) {
return new DateTimeFormatterBuilder()
.appendLiteral('-')
.appendDayOfMonth(2)
.toFormatter()
}
dme
}
private def weekyearElement(): DateTimeFormatter = {
if (we == null) {
return new DateTimeFormatterBuilder()
.appendWeekyear(4, 9)
.toFormatter()
}
we
}
private def weekElement(): DateTimeFormatter = {
if (wwe == null) {
return new DateTimeFormatterBuilder()
.appendLiteral("-W")
.appendWeekOfWeekyear(2)
.toFormatter()
}
wwe
}
private def dayOfWeekElement(): DateTimeFormatter = {
if (dwe == null) {
return new DateTimeFormatterBuilder()
.appendLiteral('-')
.appendDayOfWeek(1)
.toFormatter()
}
dwe
}
private def dayOfYearElement(): DateTimeFormatter = {
if (dye == null) {
return new DateTimeFormatterBuilder()
.appendLiteral('-')
.appendDayOfYear(3)
.toFormatter()
}
dye
}
private def literalTElement(): DateTimeFormatter = {
if (lte == null) {
return new DateTimeFormatterBuilder().appendLiteral('T').toFormatter()
}
lte
}
private def hourElement(): DateTimeFormatter = {
if (hde == null) {
return new DateTimeFormatterBuilder().appendHourOfDay(2).toFormatter()
}
hde
}
private def minuteElement(): DateTimeFormatter = {
if (mhe == null) {
return new DateTimeFormatterBuilder()
.appendLiteral(':')
.appendMinuteOfHour(2)
.toFormatter()
}
mhe
}
private def secondElement(): DateTimeFormatter = {
if (sme == null) {
return new DateTimeFormatterBuilder()
.appendLiteral(':')
.appendSecondOfMinute(2)
.toFormatter()
}
sme
}
private def fractionElement(): DateTimeFormatter = {
if (fse == null) {
return new DateTimeFormatterBuilder()
.appendLiteral('.')
.appendFractionOfSecond(3, 9)
.toFormatter()
}
fse
}
private def offsetElement(): DateTimeFormatter = {
if (ze == null) {
return new DateTimeFormatterBuilder()
.appendTimeZoneOffset("Z", showSeparators = true, 2, 4)
.toFormatter()
}
ze
}
}
}
| mdedetrich/soda-time | jvm/src/main/scala/org/joda/time/format/ISODateTimeFormat.scala | Scala | bsd-2-clause | 32,926 |
package com.oni.udash.styles.partials
import com.oni.udash.styles.constants.StyleConstants
import com.oni.udash.styles.fonts.{FontWeight, UdashFonts}
import com.oni.udash.styles.utils.{MediaQueries, StyleUtils}
import scala.language.postfixOps
import scalacss.DevDefaults._
object FooterStyles extends StyleSheet.Inline {
import dsl._
val footer = style(
backgroundColor.black,
height(StyleConstants.Sizes.FooterHeight px),
fontSize(1.2 rem),
color.white,
MediaQueries.phone(
style(
height.auto,
padding(2 rem, `0`)
)
)
)
val footerInner = style(
StyleUtils.relativeMiddle,
MediaQueries.phone(
style(
top.auto,
transform := "none"
)
)
)
val footerLogo = style(
display.inlineBlock,
verticalAlign.middle,
width(50 px),
marginRight(25 px)
)
val footerLinks = style(
display.inlineBlock,
verticalAlign.middle
)
val footerMore = style(
UdashFonts.acumin(FontWeight.SemiBold),
marginBottom(1.5 rem),
fontSize(2.2 rem)
)
val footerCopyrights = style(
position.absolute,
right(`0`),
bottom(`0`),
fontSize.inherit,
MediaQueries.tabletPortrait(
style(
position.relative,
textAlign.right
)
)
)
val footerAvsystemLink = style(
StyleUtils.transition(),
color.inherit,
textDecoration := "underline",
&.hover (
color(StyleConstants.Colors.Yellow)
),
&.visited (
color.inherit,
&.hover (
color(StyleConstants.Colors.Yellow)
)
)
)
} | ObjectNirvana/oni-web | frontend/src/main/scala/com/oni/udash/styles/partials/FooterStyles.scala | Scala | epl-1.0 | 1,602 |
package uk.co.sprily
package btf.web
package controllers
import play.api._
import play.api.mvc._
object Router extends Controller {
def javascriptRoutes = Action { implicit request =>
Ok(
Routes.javascriptRouter("jsRoutes")(
routes.javascript.Application.socket,
routes.javascript.Logging.log,
routes.javascript.Config.get
)
).as("text/javascript")
}
}
| sprily/brush-training | on-site/app/controllers/Router.scala | Scala | gpl-3.0 | 405 |
/*
* Copyright 2015 Shao Tian-Chen (Austin)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.au9ustine.puzzles.s99
/**
* Problem 01: Find the last item of a list
*
* Created by au9ustine on 4/21/15.
*/
object P01 {
def last[T](lst: List[T]) = lst.last
}
| au9ustine/org.au9ustine.puzzles.s99 | src/main/scala/org/au9ustine/puzzles/s99/P01.scala | Scala | apache-2.0 | 784 |
package org.jetbrains.plugins.scala
package codeInspection.collections
import org.jetbrains.plugins.scala.codeInspection.InspectionBundle
/**
* Nikolay.Tropin
* 5/30/13
*/
class FoldTrueAndTest extends OperationsOnCollectionInspectionTest {
val hint = InspectionBundle.message("fold.true.and.hint")
def test_1() {
val selected = s"List(false).${START}foldLeft(true){_ && _}$END"
check(selected)
val text = "List(false).foldLeft(true){_ && _}"
val result = "List(false).forall(_)"
testFix(text, result, hint)
}
def test_2() {
val selected = s"""def a(x: String) = false
|List("a").$START/:(true) (_ && a(_))$END""".stripMargin
check(selected)
val text = """def a(x: String) = false
|List("a")./:(true) (_ && a(_))""".stripMargin
val result = """def a(x: String) = false
|List("a").forall(a(_))""".stripMargin
testFix(text, result, hint)
}
def test_3() {
val selected = s"""def a(x: String) = false
|List("a").${START}fold(true) ((x,y) => x && a(y))$END""".stripMargin
check(selected)
val text = """def a(x: String) = false
|List("a").fold(true) ((x,y) => x && a(y))""".stripMargin
val result = """def a(x: String) = false
|List("a").forall(y => a(y))""".stripMargin
testFix(text, result, hint)
}
def test_4() {
val text = """def a(x: String) = false
|List("a").foldLeft(true) ((x,y) => x && a(x))""".stripMargin
checkTextHasNoErrors(text, hint, inspectionClass)
}
def testWithoutSideEffect(): Unit = {
doTest(
s"""
|List(0).${START}foldLeft(true) {(x, y) =>
| x && {
| var z = 1
| z += 1
| z + y % 2 == 1
| }
|}$END
""".stripMargin,
"""
|List(0).foldLeft(true) {(x, y) =>
| x && {
| var z = 1
| z += 1
| z + y % 2 == 1
| }
|}
""".stripMargin,
"""
|List(0).forall(y => {
| var z = 1
| z += 1
| z + y % 2 == 1
|})
""".stripMargin)
}
def testWithSideEffect(): Unit = {
checkTextHasNoErrors(
"""
|var q = 1
|List(0).foldLeft(true) {(x, y) =>
| x && {
| var z = 1
| q += 1
| z + y % 2 == 1
| }
|}
""".stripMargin)
}
override val inspectionClass = classOf[FoldTrueAndInspection]
}
| triggerNZ/intellij-scala | test/org/jetbrains/plugins/scala/codeInspection/collections/FoldTrueAndTest.scala | Scala | apache-2.0 | 2,537 |
package collins.controllers.actions.ipaddress
import scala.concurrent.Future
import java.sql.SQLException
import play.api.data.Form
import play.api.data.Forms.tuple
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import collins.controllers.Api
import collins.controllers.SecureController
import collins.controllers.actions.AssetAction
import collins.controllers.actions.EphemeralDataHolder
import collins.controllers.actions.RequestDataHolder
import collins.controllers.actions.SecureAction
import collins.controllers.validators.ParamValidation
import collins.models.Asset
import collins.models.IpAddresses
import collins.models.shared.IpAddressConfig
import collins.util.IpAddress
import collins.util.security.SecuritySpecification
import collins.validation.StringUtil
case class UpdateAction(
assetTag: String,
spec: SecuritySpecification,
handler: SecureController
) extends SecureAction(spec, handler) with AssetAction with AddressActionHelper with ParamValidation {
case class ActionDataHolder(
asset: Asset, oldAddress: Option[Long], address: Option[Long],
gateway: Option[Long], netmask: Option[Long], pool: Option[String]
) extends RequestDataHolder {
def merge(ipAddress: Option[IpAddresses]): IpAddresses = ipAddress.map { ip =>
ip.copy(
address = address.getOrElse(ip.address),
gateway = gateway.getOrElse(ip.gateway),
netmask = netmask.getOrElse(ip.netmask),
pool = pool.map(convertPoolName(_)).getOrElse(ip.pool)
)
}.getOrElse {
if (address.isDefined && gateway.isDefined && netmask.isDefined) {
val p = convertPoolName(pool.getOrElse(IpAddressConfig.DefaultPoolName))
IpAddresses(asset.getId, gateway.get, address.get, netmask.get, p)
} else {
throw new Exception("If creating a new IP the address, gateway and netmask must be specified")
}
}
}
val optionalIpAddress = validatedOptionalText(7)
type DataForm = Tuple5[Option[String],Option[String],Option[String],Option[String],Option[String]]
val dataForm = Form(tuple(
"old_address" -> optionalIpAddress,
"address" -> optionalIpAddress,
"gateway" -> optionalIpAddress,
"netmask" -> optionalIpAddress,
"pool" -> validatedOptionalText(1)
))
override def validate(): Validation = withValidAsset(assetTag) { asset =>
dataForm.bindFromRequest()(request).fold(
e => Left(RequestDataHolder.error400(fieldError(e))),
f => normalizeForm(asset, f)
)
}
override def execute(rd: RequestDataHolder) = Future {
rd match {
case adh@ActionDataHolder(asset, old, address, gateway, netmask, pool) =>
val addressInfo = IpAddresses.findAllByAsset(asset)
.find(_.address == old.getOrElse(0L))
val newAddress = adh.merge(addressInfo)
validateUpdatedAddress(newAddress) match {
case Left(err) => handleError(err)
case Right(_) =>
try {
val (status, success) = update(asset, newAddress)
Api.statusResponse(success, status)
} catch {
case e: SQLException =>
handleError(RequestDataHolder.error409("Possible duplicate IP address"))
case e: Throwable =>
handleError(
RequestDataHolder.error500("Unable to update address: %s".format(e.getMessage), e)
)
}
}
}
}
protected def update(asset: Asset, address: IpAddresses) = address.id match {
case update if update > 0 => handleUpdate(asset, address)
case _ => handleCreate(asset, address)
}
protected def handleUpdate(asset: Asset, address: IpAddresses) = {
IpAddresses.update(address) match {
case 1 =>
tattler.notice("Updated IP address %s".format(address.dottedAddress), asset)
(Status.Ok, true)
case _ =>
tattler.warning("Failed to update address %s".format(
address.dottedAddress
), asset)
(Status.InternalServerError, false)
}
}
protected def handleCreate(asset: Asset, address: IpAddresses) = {
IpAddresses.create(address).id match {
case fail if fail <= 0 =>
tattler.warning("Failed to create address %s".format(
address.dottedAddress
), asset)
(Status.InternalServerError, false)
case success =>
tattler.notice("Created IP address %s".format(address.dottedAddress), asset)
(Status.Created, true)
}
}
/**
* Validate a merged address. This protects against changing an existing address into a different
* pool in an invalid IP range, or changing an address into a different pool.
*/
protected def validateUpdatedAddress(address: IpAddresses): Validation = {
if (!IpAddresses.AddressConfig.isDefined)
return Left(RequestDataHolder.error500("No address pools have been setup to allocate from"))
val config = IpAddresses.AddressConfig.get
if (!config.strict)
return Right(EphemeralDataHolder())
val poolName = address.pool
if (!config.hasPool(poolName))
return Left(RequestDataHolder.error400("Specified pool is invalid"))
val pool = config.pool(poolName).get
if (!pool.isInRange(address.address))
return Left(RequestDataHolder.error400("Specified address is not in range for pool"))
else
return Right(EphemeralDataHolder())
}
/**
* Do some basic pre validation with the data we have available to us
*/
type NormalizedForm = Either[RequestDataHolder,ActionDataHolder]
protected def normalizeForm(asset: Asset, form: DataForm): NormalizedForm = {
val (old,add,gate,net,pool) = form
val seq = Seq(old,add,gate,net,pool)
if (!IpAddresses.AddressConfig.isDefined)
return Left(RequestDataHolder.error500("No address pools have been setup to allocate from"))
val addressConfig = IpAddresses.AddressConfig.get
if (addressConfig.strict && pool.isDefined) {
val poolName = pool.get
if (!addressConfig.hasPool(poolName))
return Left(RequestDataHolder.error400("Specified pool is invalid"))
if (add.isDefined && !addressConfig.pool(poolName).get.isInRange(add.get))
return Left(RequestDataHolder.error400("Specified address is not in range for pool"))
}
seq.filter(_.isDefined).map(_.get).foreach { opt =>
val trimmed = StringUtil.trim(opt)
if (!trimmed.isDefined)
return Left(RequestDataHolder.error400("Invalid (empty) value '%s'".format(opt)))
if (trimmed.get != opt)
return Left(RequestDataHolder.error400("Invalid (padded) value '%s'".format(opt)))
}
Seq(old,add,gate,net).filter(_.isDefined).map(_.get).foreach { opt =>
if (!IpAddress.toOptLong(opt).isDefined)
return Left(RequestDataHolder.error400("'%s' is not a valid IP address".format(opt)))
}
Right(ActionDataHolder(
asset, old.map(IpAddress.toLong(_)), add.map(IpAddress.toLong(_)),
gate.map(IpAddress.toLong(_)), net.map(IpAddress.toLong(_)), pool
))
}
protected def fieldError(form: Form[DataForm]): String = form match {
case f if f.error("old_address").isDefined => "old_address not valid"
case f if f.error("address").isDefined => "invalid address specified"
case f if f.error("gateway").isDefined => "invalid gateway specified"
case f if f.error("netmask").isDefined => "invalid netmask specified"
case f if f.error("pool").isDefined => "invalid pool specified"
case o => "An unknown error occurred"
}
}
| funzoneq/collins | app/collins/controllers/actions/ipaddress/UpdateAction.scala | Scala | apache-2.0 | 7,549 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package docs.home.scaladsl.persistence
//#full-example
import akka.Done
import com.lightbend.lagom.scaladsl.persistence.PersistentEntity
final class Post extends PersistentEntity {
override type Command = BlogCommand
override type Event = BlogEvent
override type State = BlogState
override def initialState: BlogState = BlogState.empty
override def behavior: Behavior = {
case state if state.isEmpty => initial
case state if !state.isEmpty => postAdded
}
private val initial: Actions = {
Actions()
// Command handlers are invoked for incoming messages (commands).
// A command handler must "return" the events to be persisted (if any).
.onCommand[AddPost, AddPostDone] {
case (AddPost(content), ctx, state) =>
if (content.title == null || content.title.equals("")) {
ctx.invalidCommand("Title must be defined")
ctx.done
} else {
ctx.thenPersist(PostAdded(entityId, content)) { _ =>
// After persist is done additional side effects can be performed
ctx.reply(AddPostDone(entityId))
}
}
}
// Event handlers are used both when persisting new events and when replaying
// events.
.onEvent {
case (PostAdded(postId, content), state) =>
BlogState(Some(content), published = false)
}
}
private val postAdded: Actions = {
Actions()
.onCommand[ChangeBody, Done] {
case (ChangeBody(body), ctx, state) =>
ctx.thenPersist(BodyChanged(entityId, body))(_ => ctx.reply(Done))
}
.onEvent {
case (BodyChanged(_, body), state) =>
state.withBody(body)
}
.onReadOnlyCommand[GetPost.type, PostContent] {
case (GetPost, ctx, state) =>
ctx.reply(state.content.get)
}
}
}
//#full-example
| lagom/lagom | docs/manual/scala/guide/cluster/code/docs/home/scaladsl/persistence/Post.scala | Scala | apache-2.0 | 1,945 |
package com.mesosphere.cosmos
import com.mesosphere.cosmos.thirdparty.marathon.model.AppId
import com.mesosphere.universe
import com.mesosphere.util.RoundTrip
import io.lemonlabs.uri.Uri
import io.circe.JsonObject
object RoundTrips {
def withInstallV1(
name: String,
version: Option[universe.v2.model.PackageDetailsVersion] = None,
options: Option[JsonObject] = None,
appId: Option[AppId] = None
): RoundTrip[rpc.v1.model.InstallResponse] = {
RoundTrip(
Requests.installV1(name, version, options, appId)
) { ir =>
Requests.uninstall(ir.packageName, Some(ir.appId))
Requests.waitForDeployments()
}
}
def withInstallV2(
name: String,
version: Option[universe.v2.model.PackageDetailsVersion] = None,
options: Option[JsonObject] = None,
appId: Option[AppId] = None,
managerId: Option[String] = None
): RoundTrip[rpc.v2.model.InstallResponse] = {
RoundTrip(
Requests.installV2(name, version, options, appId, managerId)
) { ir =>
Requests.uninstall(ir.packageName, ir.appId, None, managerId)
Requests.waitForDeployments()
}
}
def withDeletedRepository(
name: Option[String] = None,
uri: Option[Uri] = None
): RoundTrip[rpc.v1.model.PackageRepositoryDeleteResponse] = {
RoundTrip.lift {
val repos = Requests.listRepositories()
val repo = repos.find { repo =>
name.contains(repo.name) || uri.contains(repo.uri)
}
(repo, repo.map(repos.indexOf(_)))
}.flatMap { case (repo, index) =>
withDeletedRepository(name, uri, repo, index)
}
}
def withRepository(
name: String,
uri: Uri,
index: Option[Int] = None
): RoundTrip[rpc.v1.model.PackageRepositoryAddResponse] = {
RoundTrip(
Requests.addRepository(name, uri, index))(_ =>
Requests.deleteRepository(Some(name))
)
}
private[this] def withDeletedRepository(
name: Option[String],
uri: Option[Uri],
oldRepo: Option[rpc.v1.model.PackageRepository],
oldIndex: Option[Int]
): RoundTrip[rpc.v1.model.PackageRepositoryDeleteResponse] = {
RoundTrip(
Requests.deleteRepository(name, uri)
) { _ =>
val repo = oldRepo.getOrElse {
throw new RuntimeException("Unable to restore repository")
}
val index = oldIndex.getOrElse(
throw new RuntimeException("Unable to restore repository index")
)
Requests.addRepository(repo.name, repo.uri, Some(index))
}
}
}
| dcos/cosmos | cosmos-integration-tests/src/main/scala/com/mesosphere/cosmos/RoundTrips.scala | Scala | apache-2.0 | 2,478 |
package mesosphere.marathon.integration
import mesosphere.marathon.api.v2.json.V2AppDefinition
import mesosphere.marathon.api.v2.json.V2GroupUpdate
import mesosphere.marathon.integration.setup.{ WaitTestSupport, IntegrationHealthCheck, IntegrationFunSuite, SingleMarathonIntegrationTest }
import mesosphere.marathon.state.{ AppDefinition, PathId, UpgradeStrategy }
import org.scalatest._
import spray.http.DateTime
import spray.httpx.UnsuccessfulResponseException
import scala.concurrent.duration._
class GroupDeployIntegrationTest
extends IntegrationFunSuite
with SingleMarathonIntegrationTest
with Matchers
with BeforeAndAfter
with GivenWhenThen {
//clean up state before running the test case
before(cleanUp())
test("create empty group successfully") {
Given("A group which does not exist in marathon")
val group = V2GroupUpdate.empty("test".toRootTestPath)
When("The group gets created")
val result = marathon.createGroup(group)
Then("The group is created. A success event for this group is send.")
result.code should be(201) //created
val event = waitForChange(result)
}
test("update empty group successfully") {
Given("An existing group")
val name = "test2".toRootTestPath
val group = V2GroupUpdate.empty(name)
val dependencies = Set("/test".toTestPath)
waitForChange(marathon.createGroup(group))
When("The group gets updated")
waitForChange(marathon.updateGroup(name, group.copy(dependencies = Some(dependencies))))
Then("The group is updated")
val result = marathon.group("test2".toRootTestPath)
result.code should be(200)
result.value.dependencies should be(dependencies)
}
test("deleting an existing group gives a 200 http response") {
Given("An existing group")
val group = V2GroupUpdate.empty("test3".toRootTestPath)
waitForChange(marathon.createGroup(group))
When("The group gets deleted")
val result = marathon.deleteGroup(group.id.get)
waitForChange(result)
Then("The group is deleted")
result.code should be(200)
// only expect the test base group itself
marathon.listGroupsInBaseGroup.value.filter { group => group.id != testBasePath } should be('empty)
}
test("delete a non existing group should give a 404 http response") {
When("A non existing group is deleted")
val result = intercept[UnsuccessfulResponseException] {
val missing = marathon.deleteGroup("does_not_exist".toRootTestPath)
}
Then("We get a 404 http response code")
result.response.status.intValue should be(404)
}
test("create a group with applications to start") {
Given("A group with one application")
val app = v2AppProxy("/test/app".toRootTestPath, "v1", 2, withHealth = false)
val group = V2GroupUpdate("/test".toRootTestPath, Set(app))
When("The group is created")
waitForChange(marathon.createGroup(group))
Then("A success event is send and the application has been started")
val tasks = waitForTasks(app.id, app.instances)
tasks should have size 2
}
test("update a group with applications to restart") {
Given("A group with one application started")
val id = "test".toRootTestPath
val appId = id / "app"
val app1V1 = v2AppProxy(appId, "v1", 2, withHealth = false)
waitForChange(marathon.createGroup(V2GroupUpdate(id, Set(app1V1))))
waitForTasks(app1V1.id, app1V1.instances)
When("The group is updated, with a changed application")
val app1V2 = v2AppProxy(appId, "v2", 2, withHealth = false)
waitForChange(marathon.updateGroup(id, V2GroupUpdate(id, Set(app1V2))))
Then("A success event is send and the application has been started")
waitForTasks(app1V2.id, app1V2.instances)
}
test("create a group with application with health checks") {
Given("A group with one application")
val id = "proxy".toRootTestPath
val appId = id / "app"
val proxy = v2AppProxy(appId, "v1", 1)
val group = V2GroupUpdate(id, Set(proxy))
When("The group is created")
val create = marathon.createGroup(group)
Then("A success event is send and the application has been started")
waitForChange(create)
}
test("upgrade a group with application with health checks") {
Given("A group with one application")
val id = "test".toRootTestPath
val appId = id / "app"
val proxy = v2AppProxy(appId, "v1", 1)
val group = V2GroupUpdate(id, Set(proxy))
waitForChange(marathon.createGroup(group))
val check = appProxyCheck(proxy.id, "v1", state = true)
When("The group is updated")
check.afterDelay(1.second, state = false)
check.afterDelay(3.seconds, state = true)
val update = marathon.updateGroup(id, group.copy(apps = Some(Set(v2AppProxy(appId, "v2", 1)))))
Then("A success event is send and the application has been started")
waitForChange(update)
}
test("rollback from an upgrade of group") {
Given("A group with one application")
val gid = "proxy".toRootTestPath
val appId = gid / "app"
val proxy = v2AppProxy(appId, "v1", 2)
val group = V2GroupUpdate(gid, Set(proxy))
val create = marathon.createGroup(group)
waitForChange(create)
waitForTasks(proxy.id, proxy.instances)
val v1Checks = appProxyCheck(appId, "v1", state = true)
When("The group is updated")
waitForChange(marathon.updateGroup(gid, group.copy(apps = Some(Set(v2AppProxy(appId, "v2", 2))))))
Then("The new version is deployed")
val v2Checks = appProxyCheck(appId, "v2", state = true)
WaitTestSupport.validFor("all v2 apps are available", 10.seconds) { v2Checks.pingSince(2.seconds) }
When("A rollback to the first version is initiated")
waitForChange(marathon.rollbackGroup(gid, create.value.version), 120.seconds)
Then("The rollback will be performed and the old version is available")
v1Checks.healthy
WaitTestSupport.validFor("all v1 apps are available", 10.seconds) { v1Checks.pingSince(2.seconds) }
}
test("during Deployment the defined minimum health capacity is never undershot") {
Given("A group with one application")
val id = "test".toRootTestPath
val appId = id / "app"
val proxy = v2AppProxy(appId, "v1", 2).copy(upgradeStrategy = UpgradeStrategy(1))
val group = V2GroupUpdate(id, Set(proxy))
val create = marathon.createGroup(group)
waitForChange(create)
waitForTasks(appId, proxy.instances)
val v1Check = appProxyCheck(appId, "v1", state = true)
When("The new application is not healthy")
val v2Check = appProxyCheck(appId, "v2", state = false) //will always fail
val update = marathon.updateGroup(id, group.copy(apps = Some(Set(v2AppProxy(appId, "v2", 2)))))
Then("All v1 applications are kept alive")
v1Check.healthy
WaitTestSupport.validFor("all v1 apps are always available", 15.seconds) { v1Check.pingSince(3.seconds) }
When("The new application becomes healthy")
v2Check.state = true //make v2 healthy, so the app can be cleaned
waitForChange(update)
}
test("An upgrade in progress can not be interrupted without force") {
Given("A group with one application with an upgrade in progress")
val id = "forcetest".toRootTestPath
val appId = id / "app"
val proxy = v2AppProxy(appId, "v1", 2)
val group = V2GroupUpdate(id, Set(proxy))
val create = marathon.createGroup(group)
waitForChange(create)
appProxyCheck(appId, "v2", state = false) //will always fail
marathon.updateGroup(id, group.copy(apps = Some(Set(v2AppProxy(appId, "v2", 2)))))
When("Another upgrade is triggered, while the old one is not completed")
intercept[UnsuccessfulResponseException] {
marathon.updateGroup(id, group.copy(apps = Some(Set(v2AppProxy(appId, "v3", 2)))))
}
Then("An error is indicated")
waitForEvent("group_change_failed")
When("Another upgrade is triggered with force, while the old one is not completed")
val force = marathon.updateGroup(id, group.copy(apps = Some(Set(v2AppProxy(appId, "v4", 2)))), force = true)
Then("The update is performed")
waitForChange(force)
}
test("A group with a running deployment can not be deleted without force") {
Given("A group with one application with an upgrade in progress")
val id = "forcetest".toRootTestPath
val appId = id / "app"
val proxy = v2AppProxy(appId, "v1", 2)
appProxyCheck(appId, "v1", state = false) //will always fail
val group = V2GroupUpdate(id, Set(proxy))
val create = marathon.createGroup(group)
When("Delete the group, while the deployment is in progress")
intercept[UnsuccessfulResponseException] {
marathon.deleteGroup(id)
}
Then("An error is indicated")
waitForEvent("group_change_failed")
When("Delete is triggered with force, while the deployment is not completed")
val force = marathon.deleteGroup(id, force = true)
Then("The delete is performed")
waitForChange(force)
}
test("Groups with Applications with circular dependencies can not get deployed") {
Given("A group with 3 circular dependent applications")
val db = v2AppProxy("/test/db".toTestPath, "v1", 1, dependencies = Set("/test/frontend1".toTestPath))
val service = v2AppProxy("/test/service".toTestPath, "v1", 1, dependencies = Set(db.id))
val frontend = v2AppProxy("/test/frontend1".toTestPath, "v1", 1, dependencies = Set(service.id))
val group = V2GroupUpdate("test".toTestPath, Set(db, service, frontend))
When("The group gets posted")
val exception = intercept[UnsuccessfulResponseException] {
marathon.createGroup(group)
}
Then("An unsuccessfull response has been posted, with an error indicating cyclic dependencies")
exception.response.entity.asString should include("cyclic dependencies")
}
test("Applications with dependencies get deployed in the correct order") {
Given("A group with 3 dependent applications")
val db = v2AppProxy("/test/db".toTestPath, "v1", 1)
val service = v2AppProxy("/test/service".toTestPath, "v1", 1, dependencies = Set(db.id))
val frontend = v2AppProxy("/test/frontend1".toTestPath, "v1", 1, dependencies = Set(service.id))
val group = V2GroupUpdate("/test".toTestPath, Set(db, service, frontend))
When("The group gets deployed")
var ping = Map.empty[PathId, DateTime]
def storeFirst(health: IntegrationHealthCheck) {
if (!ping.contains(health.appId)) ping += health.appId -> DateTime.now
}
val dbHealth = appProxyCheck(db.id, "v1", state = true).withHealthAction(storeFirst)
val serviceHealth = appProxyCheck(service.id, "v1", state = true).withHealthAction(storeFirst)
val frontendHealth = appProxyCheck(frontend.id, "v1", state = true).withHealthAction(storeFirst)
waitForChange(marathon.createGroup(group))
Then("The correct order is maintained")
ping should have size 3
ping(db.id) should be < ping(service.id)
ping(service.id) should be < ping(frontend.id)
}
test("Groups with dependencies get deployed in the correct order") {
Given("A group with 3 dependent applications")
val db = v2AppProxy("/test/db/db1".toTestPath, "v1", 1)
val service = v2AppProxy("/test/service/service1".toTestPath, "v1", 1)
val frontend = v2AppProxy("/test/frontend/frontend1".toTestPath, "v1", 1)
val group = V2GroupUpdate(
"/test".toTestPath,
Set.empty[V2AppDefinition],
Set(
V2GroupUpdate(PathId("db"), apps = Set(db)),
V2GroupUpdate(PathId("service"), apps = Set(service)).copy(dependencies = Some(Set("/test/db".toTestPath))),
V2GroupUpdate(PathId("frontend"), apps = Set(frontend)).copy(dependencies = Some(Set("/test/service".toTestPath)))
)
)
When("The group gets deployed")
var ping = Map.empty[PathId, DateTime]
def storeFirst(health: IntegrationHealthCheck) {
if (!ping.contains(health.appId)) ping += health.appId -> DateTime.now
}
val dbHealth = appProxyCheck(db.id, "v1", state = true).withHealthAction(storeFirst)
val serviceHealth = appProxyCheck(service.id, "v1", state = true).withHealthAction(storeFirst)
val frontendHealth = appProxyCheck(frontend.id, "v1", state = true).withHealthAction(storeFirst)
waitForChange(marathon.createGroup(group))
Then("The correct order is maintained")
ping should have size 3
ping(db.id) should be < ping(service.id)
ping(service.id) should be < ping(frontend.id)
}
test("Groups with dependant Applications get upgraded in the correct order with maintained upgrade strategy") {
var ping = Map.empty[String, DateTime]
def key(health: IntegrationHealthCheck) = s"${health.appId}_${health.versionId}"
def storeFirst(health: IntegrationHealthCheck) {
if (!ping.contains(key(health))) ping += key(health) -> DateTime.now
}
def create(version: String) = {
val db = v2AppProxy("/test/db".toTestPath, version, 1)
val service = v2AppProxy("/test/service".toTestPath, version, 1, dependencies = Set(db.id))
val frontend = v2AppProxy("/test/frontend1".toTestPath, version, 1, dependencies = Set(service.id))
(V2GroupUpdate("/test".toTestPath, Set(db, service, frontend)),
appProxyCheck(db.id, version, state = true).withHealthAction(storeFirst),
appProxyCheck(service.id, version, state = true).withHealthAction(storeFirst),
appProxyCheck(frontend.id, version, state = true).withHealthAction(storeFirst))
}
Given("A group with 3 dependent applications")
val (groupV1, dbV1, serviceV1, frontendV1) = create("v1")
waitForChange(marathon.createGroup(groupV1))
When("The group gets updated, where frontend2 is not healthy")
val (groupV2, dbV2, serviceV2, frontendV2) = create("v2")
frontendV2.state = false
dbV2.state = false
serviceV2.state = false
val upgrade = marathon.updateGroup(groupV2.id.get, groupV2)
waitForHealthCheck(dbV2)
Then("The correct order is maintained")
ping should have size 4
ping(key(dbV1)) should be < ping(key(serviceV1))
ping(key(serviceV1)) should be < ping(key(frontendV1))
WaitTestSupport.validFor("all v1 apps are available as well as db v2", 15.seconds) {
dbV1.pingSince(2.seconds) &&
serviceV1.pingSince(2.seconds) &&
frontendV1.pingSince(2.seconds) &&
dbV2.pingSince(2.seconds)
}
When("The v2 db becomes healthy")
dbV2.state = true
waitForHealthCheck(serviceV2)
Then("The correct order is maintained")
ping should have size 5
ping(key(serviceV1)) should be < ping(key(frontendV1))
ping(key(dbV2)) should be < ping(key(serviceV2))
WaitTestSupport.validFor("service and frontend v1 are available as well as db and service v2", 15.seconds) {
serviceV1.pingSince(2.seconds) &&
frontendV1.pingSince(2.seconds) &&
dbV2.pingSince(2.seconds) &&
serviceV2.pingSince(2.seconds)
}
When("The v2 service becomes healthy")
serviceV2.state = true
waitForHealthCheck(frontendV2)
Then("The correct order is maintained")
ping should have size 6
ping(key(dbV2)) should be < ping(key(serviceV2))
ping(key(serviceV2)) should be < ping(key(frontendV2))
WaitTestSupport.validFor("frontend v1 is available as well as all v2", 15.seconds) {
frontendV1.pingSince(2.seconds) &&
dbV2.pingSince(2.seconds) &&
serviceV2.pingSince(2.seconds) &&
frontendV2.pingSince(2.seconds)
}
When("The v2 frontend becomes healthy")
frontendV2.state = true
Then("The deployment can be finished. All v1 apps are destroyed and all v2 apps are healthy.")
waitForChange(upgrade)
List(dbV1, serviceV1, frontendV1).foreach(_.pinged = false)
WaitTestSupport.validFor("all v2 apps are alive", 15.seconds) {
!dbV1.pinged && !serviceV1.pinged && !frontendV1.pinged &&
dbV2.pingSince(2.seconds) && serviceV2.pingSince(2.seconds) && frontendV2.pingSince(2.seconds)
}
}
}
| bsideup/marathon | src/test/scala/mesosphere/marathon/integration/GroupDeployIntegrationTest.scala | Scala | apache-2.0 | 15,995 |
/*
* Copyright 2017 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.mongodb
package io
import java.nio.ByteBuffer
import laws.discipline.arbitrary._
import org.bson.{BsonBinaryReader, BsonBinaryWriter}
import org.bson.codecs.{DecoderContext, EncoderContext}
import org.bson.io.BasicOutputBuffer
import org.scalatest.FunSuite
import org.scalatest.prop.GeneratorDrivenPropertyChecks
class CodecTests extends FunSuite with GeneratorDrivenPropertyChecks {
def roundTrip(doc: BsonDocument): BsonValue = {
val out = new BasicOutputBuffer()
io.bsonValueCodec.encode(new BsonBinaryWriter(out), doc, EncoderContext.builder.build)
io.bsonValueCodec.decode(new BsonBinaryReader(ByteBuffer.wrap(out.getInternalBuffer)), DecoderContext.builder.build)
}
test("Encoding and decoding BSON documents should leave them unchanged") {
forAll { doc: BsonDocument ⇒
assert(doc == roundTrip(doc))
}
}
}
| nrinaudo/kantan.mongodb | core/src/test/scala/kantan/mongodb/io/CodecTests.scala | Scala | apache-2.0 | 1,466 |
package slick.compiler
import slick.util.{ConstArrayOp, ConstArray}
import slick.{SlickTreeException, SlickException}
import slick.ast._
import Util._
import TypeUtil._
import scala.collection.mutable
/** Expand sum types and their catamorphisms to equivalent product type operations. */
class ExpandSums extends Phase {
val name = "expandSums"
def apply(state: CompilerState) =
if(state.get(Phase.assignUniqueSymbols).map(_.nonPrimitiveOption).getOrElse(true)) state.map(expandSums)
else state
val Disc1 = LiteralNode(ScalaBaseType.optionDiscType.optionType, Option(1))
val DiscNone = LiteralNode(ScalaBaseType.optionDiscType.optionType, None)
def expandSums(n: Node): Node = {
var multi = false
/** Perform the sum expansion on a Node */
def tr(tree: Node, oldDiscCandidates: Set[(TypeSymbol, List[TermSymbol])]): Node = {
val discCandidates = oldDiscCandidates ++ (tree match {
case Filter(_, _, p) => collectDiscriminatorCandidates(p)
case Bind(_, j: Join, _) => collectDiscriminatorCandidates(j.on)
case _ => Set.empty
})
val tree2 = tree.mapChildren(tr(_, discCandidates), keepType = true)
val tree3 = tree2 match {
// Expand multi-column null values in ELSE branches (used by Rep[Option].filter) with correct type
case IfThenElse(ConstArray(pred, then1 :@ tpe, LiteralNode(None) :@ OptionType(ScalaBaseType.nullType))) =>
multi = true
IfThenElse(ConstArray(pred, then1, buildMultiColumnNone(tpe))) :@ tpe
// Identity OptionFold/OptionApply combination -> remove
case OptionFold(from, LiteralNode(None) :@ OptionType(ScalaBaseType.nullType), oa @ OptionApply(Ref(s)), gen) if s == gen =>
silentCast(oa.nodeType, from)
// Primitive OptionFold representing GetOrElse -> translate to GetOrElse
case OptionFold(from :@ OptionType.Primitive(_), LiteralNode(v), Ref(s), gen) if s == gen =>
GetOrElse(from, () => v).infer()
// Primitive OptionFold -> translate to null check
case OptionFold(from :@ OptionType.Primitive(_), ifEmpty, map, gen) =>
val pred = Library.==.typed[Boolean](from, LiteralNode(null))
val n2 = (ifEmpty, map) match {
case (LiteralNode(true), LiteralNode(false)) => pred
case (LiteralNode(false), LiteralNode(true)) => Library.Not.typed[Boolean](pred)
case _ =>
val ifDefined = map.replace({
case r @ Ref(s) if s == gen => silentCast(r.nodeType, from)
}, keepType = true)
val ifEmpty2 = silentCast(ifDefined.nodeType.structural, ifEmpty)
IfThenElse(ConstArray(pred, ifEmpty2, ifDefined))
}
n2.infer()
// Other OptionFold -> translate to discriminator check
case OptionFold(from, ifEmpty, map, gen) =>
multi = true
val left = from.select(ElementSymbol(1)).infer()
val pred = Library.==.typed[Boolean](left, LiteralNode(null))
val n2 = (ifEmpty, map) match {
case (LiteralNode(true), LiteralNode(false)) => pred
case (LiteralNode(false), LiteralNode(true)) => Library.Not.typed[Boolean](pred)
case _ =>
val ifDefined = map.replace({
case r @ Ref(s) if s == gen => silentCast(r.nodeType, from.select(ElementSymbol(2)).infer())
}, keepType = true)
val ifEmpty2 = silentCast(ifDefined.nodeType.structural, ifEmpty)
if(left == Disc1) ifDefined else IfThenElse(ConstArray(Library.Not.typed[Boolean](pred), ifDefined, ifEmpty2))
}
n2.infer()
// Primitive OptionApply -> leave unchanged
case n @ OptionApply(_) :@ OptionType.Primitive(_) => n
// Other OptionApply -> translate to product form
case n @ OptionApply(ch) =>
multi = true
ProductNode(ConstArray(Disc1, silentCast(toOptionColumns(ch.nodeType), ch))).infer()
// Non-primitive GetOrElse
// (.get is only defined on primitive Options, but this can occur inside of HOFs like .map)
case g @ GetOrElse(ch :@ tpe, _) =>
tpe match {
case OptionType.Primitive(_) => g
case _ => throw new SlickException(".get may only be called on Options of top-level primitive types")
}
// Option-extended left outer, right outer or full outer join
case bind @ Bind(bsym, Join(_, _, _, _, jt, _), _) if jt == JoinType.LeftOption || jt == JoinType.RightOption || jt == JoinType.OuterOption =>
multi = true
translateJoin(bind, discCandidates)
case n => n
}
val tree4 = fuse(tree3)
tree4 :@ trType(tree4.nodeType)
}
val n2 = tr(n, Set.empty)
if(multi) expandConditionals(n2) else n2
}
/** Translate an Option-extended left outer, right outer or full outer join */
def translateJoin(bind: Bind, discCandidates: Set[(TypeSymbol, List[TermSymbol])]): Bind = {
logger.debug("translateJoin", bind)
val Bind(bsym, (join @ Join(lsym, rsym, left :@ CollectionType(_, leftElemType), right :@ CollectionType(_, rightElemType), jt, on)) :@ CollectionType(cons, elemType), pure) = bind
val lComplex = !leftElemType.structural.isInstanceOf[AtomicType]
val rComplex = !rightElemType.structural.isInstanceOf[AtomicType]
logger.debug(s"Translating join ($jt, complex: $lComplex, $rComplex):", bind)
// Find an existing column that can serve as a discriminator
def findDisc(t: Type): Option[List[TermSymbol]] = {
val global: Set[List[TermSymbol]] = t match {
case NominalType(ts, exp) =>
val c = discCandidates.filter { case (t, ss) => t == ts && ss.nonEmpty }.map(_._2)
logger.debug("Discriminator candidates from surrounding Filter and Join predicates: "+
c.map(Path.toString).mkString(", "))
c
case _ => Set.empty
}
def find(t: Type, path: List[TermSymbol]): Vector[List[TermSymbol]] = t.structural match {
case StructType(defs) => defs.toSeq.flatMap { case (s, t) => find(t, s :: path) }(collection.breakOut)
case p: ProductType => p.elements.iterator.zipWithIndex.flatMap { case (t, i) => find(t, ElementSymbol(i+1) :: path) }.toVector
case _: AtomicType => Vector(path)
case _ => Vector.empty
}
val local = find(t, Nil).sortBy { ss =>
(if(global contains ss) 3 else 1) * (ss.head match {
case f: FieldSymbol =>
if(f.options contains ColumnOption.PrimaryKey) -2 else -1
case _ => 0
})
}
logger.debug("Local candidates: "+local.map(Path.toString).mkString(", "))
local.headOption
}
// Option-extend one side of the join with a discriminator column
def extend(side: Node, sym: TermSymbol, on: Node): (Node, Node, Boolean) = {
val extendGen = new AnonSymbol
val elemType = side.nodeType.asCollectionType.elementType
val (disc, createDisc) = findDisc(elemType) match {
case Some(path) =>
logger.debug("Using existing column "+Path(path)+" as discriminator in "+elemType)
(FwdPath(extendGen :: path.reverse), true)
case None =>
logger.debug("No suitable discriminator column found in "+elemType)
(Disc1, false)
}
val extend :@ CollectionType(_, extendedElementType) = Bind(extendGen, side, Pure(ProductNode(ConstArray(disc, Ref(extendGen))))).infer()
val sideInCondition = Select(Ref(sym) :@ extendedElementType, ElementSymbol(2)).infer()
val on2 = on.replace({
case Ref(s) if s == sym => sideInCondition
}, bottomUp = true).infer()
(extend, on2, createDisc)
}
// Translate the join depending on JoinType and Option type
val (left2, right2, on2, jt2, ldisc, rdisc) = jt match {
case JoinType.LeftOption =>
val (right2, on2, rdisc) = if(rComplex) extend(right, rsym, on) else (right, on, false)
(left, right2, on2, JoinType.Left, false, rdisc)
case JoinType.RightOption =>
val (left2, on2, ldisc) = if(lComplex) extend(left, lsym, on) else (left, on, false)
(left2, right, on2, JoinType.Right, ldisc, false)
case JoinType.OuterOption =>
val (left2, on2, ldisc) = if(lComplex) extend(left, lsym, on) else (left, on, false)
val (right2, on3, rdisc) = if(rComplex) extend(right, rsym, on2) else (right, on2, false)
(left2, right2, on3, JoinType.Outer, ldisc, rdisc)
}
// Cast to translated Option type in outer bind
val join2 :@ CollectionType(_, elemType2) = Join(lsym, rsym, left2, right2, jt2, on2).infer()
def optionCast(idx: Int, createDisc: Boolean): Node = {
val ref = Select(Ref(bsym) :@ elemType2, ElementSymbol(idx+1))
val v = if(createDisc) {
val protoDisc = Select(ref, ElementSymbol(1)).infer()
val rest = Select(ref, ElementSymbol(2))
val disc = IfThenElse(ConstArray(Library.==.typed[Boolean](silentCast(OptionType(protoDisc.nodeType), protoDisc), LiteralNode(null)), DiscNone, Disc1))
ProductNode(ConstArray(disc, rest))
} else ref
silentCast(trType(elemType.asInstanceOf[ProductType].children(idx)), v)
}
val ref = ProductNode(ConstArray(optionCast(0, ldisc), optionCast(1, rdisc))).infer()
val pure2 = pure.replace({
case Ref(s) if s == bsym => ref
// Hoist SilentCasts and remove unnecessary ones
case Library.SilentCast(Library.SilentCast(ch)) :@ tpe => silentCast(tpe, ch)
case Select(Library.SilentCast(ch), s) :@ tpe => silentCast(tpe, ch.select(s).infer())
}, bottomUp = true, keepType = true)
val res = Bind(bsym, join2, pure2).infer()
logger.debug("Translated join:", res)
res
}
/** Create a SilentCast call unless the type already matches */
def silentCast(tpe: Type, n: Node): Node = n match {
case LiteralNode(None) :@ OptionType(ScalaBaseType.nullType) => buildMultiColumnNone(tpe)
case n :@ tpe2 if tpe2 == tpe => n
case n =>
if(tpe == UnassignedType) throw new SlickTreeException("Unexpected UnassignedType for:", n)
Library.SilentCast.typed(tpe, n).infer()
}
/** Create a Node representing a structure of null values of the given Type */
def buildMultiColumnNone(tpe: Type): Node = (tpe.structural match {
case ProductType(ch) => ProductNode(ch.map(buildMultiColumnNone))
case StructType(ch) => StructNode(ch.map { case (sym, t) => (sym, buildMultiColumnNone(t)) })
case OptionType(ch) => LiteralNode(tpe, None)
case t => throw new SlickException("Unexpected non-Option type in multi-column None")
}) :@ tpe
/** Perform the sum expansion on a Type */
def trType(tpe: Type): Type = {
def f(tpe: Type): Type = tpe.mapChildren(f) match {
case t @ OptionType.Primitive(_) => t
case OptionType(ch) => ProductType(ConstArray(ScalaBaseType.optionDiscType.optionType, toOptionColumns(ch)))
case t => t
}
val tpe2 = f(tpe)
logger.debug(s"Translated type: $tpe -> $tpe2")
tpe2
}
/** Strip nominal types and convert all atomic types to OptionTypes */
def toOptionColumns(tpe: Type): Type = tpe match {
case NominalType(_, str) => toOptionColumns(str)
case o @ OptionType(ch) if ch.structural.isInstanceOf[AtomicType] => o
case t: AtomicType => OptionType(t)
case t => t.mapChildren(toOptionColumns)
}
/** Fuse unnecessary Option operations */
def fuse(n: Node): Node = n match {
// Option.map
case IfThenElse(ConstArray(Library.Not(Library.==(disc, LiteralNode(null))), ProductNode(ConstArray(Disc1, map)), ProductNode(ConstArray(DiscNone, _)))) =>
ProductNode(ConstArray(disc, map)).infer()
case n => n
}
/** Collect discriminator candidate fields in a predicate. These are all paths below an
* OptionApply, which indicates their future use under a discriminator guard. */
def collectDiscriminatorCandidates(n: Node): Set[(TypeSymbol, List[TermSymbol])] = n.collectAll[(TypeSymbol, List[TermSymbol])] {
case OptionApply(ch) =>
ch.collect[(TypeSymbol, List[TermSymbol])] { case PathOnTypeSymbol(ts, ss) => (ts, ss) }
}.toSet
object PathOnTypeSymbol {
def unapply(n: Node): Option[(TypeSymbol, List[TermSymbol])] = n match {
case (n: PathElement) :@ NominalType(ts, _) => Some((ts, Nil))
case Select(in, s) => unapply(in).map { case (ts, l) => (ts, s :: l) }
case Library.SilentCast(ch) => unapply(ch)
case _ => None
}
}
/** Expand multi-column conditional expressions and SilentCasts.
* Single-column conditionals involving NULL values are optimized away where possible. */
def expandConditionals(n: Node): Node = {
val invalid = mutable.HashSet.empty[TypeSymbol]
def invalidate(n: Node): Unit = invalid ++= n.nodeType.collect { case NominalType(ts, _) => ts }.toSeq
def tr(n: Node): Node = n.mapChildren(tr, keepType = true) match {
// Expand multi-column SilentCasts
case cast @ Library.SilentCast(ch) :@ Type.Structural(ProductType(typeCh)) =>
invalidate(ch)
val elems = typeCh.zipWithIndex.map { case (t, idx) => tr(Library.SilentCast.typed(t, ch.select(ElementSymbol(idx+1))).infer()) }
ProductNode(elems).infer()
case Library.SilentCast(ch) :@ Type.Structural(StructType(typeCh)) =>
invalidate(ch)
val elems = typeCh.map { case (sym, t) => (sym, tr(Library.SilentCast.typed(t, ch.select(sym)).infer())) }
StructNode(elems).infer()
// Optimize trivial SilentCasts
case Library.SilentCast(v :@ tpe) :@ tpe2 if tpe.structural == tpe2.structural =>
invalidate(v)
v
case Library.SilentCast(Library.SilentCast(ch)) :@ tpe => tr(Library.SilentCast.typed(tpe, ch).infer())
case Library.SilentCast(LiteralNode(None)) :@ (tpe @ OptionType.Primitive(_)) => LiteralNode(tpe, None).infer()
// Expand multi-column IfThenElse
case (cond @ IfThenElse(_)) :@ Type.Structural(ProductType(chTypes)) =>
val ch = ConstArrayOp.from(1 to chTypes.length).map { idx =>
val sym = ElementSymbol(idx)
tr(cond.mapResultClauses(n => n.select(sym)).infer())
}
ProductNode(ch).infer()
case (cond @ IfThenElse(_)) :@ Type.Structural(StructType(chTypes)) =>
val ch = chTypes.map { case (sym, _) =>
(sym, tr(cond.mapResultClauses(n => n.select(sym)).infer()))
}
StructNode(ch).infer()
// Optimize null-propagating single-column IfThenElse
case IfThenElse(ConstArray(Library.==(r, LiteralNode(null)), Library.SilentCast(LiteralNode(None)), c @ Library.SilentCast(r2))) if r == r2 => c
// Fix Untyped nulls in else clauses
case cond @ IfThenElse(clauses) if (clauses.last match { case LiteralNode(None) :@ OptionType(ScalaBaseType.nullType) => true; case _ => false }) =>
cond.copy(clauses.init :+ LiteralNode(cond.nodeType, None))
// Resolve Selects into ProductNodes and StructNodes
case Select(ProductNode(ch), ElementSymbol(idx)) => ch(idx-1)
case Select(StructNode(ch), sym) => ch.find(_._1 == sym).get._2
case n2 @ Pure(_, ts) if n2 ne n =>
invalid += ts
n2
case n => n
}
val n2 = tr(n)
logger.debug("Invalidated TypeSymbols: "+invalid.mkString(", "))
n2.replace({
case n: PathElement if n.nodeType.containsSymbol(invalid) => n.untyped
}, bottomUp = true).infer()
}
}
| xavier-fernandez/slick | slick/src/main/scala/slick/compiler/ExpandSums.scala | Scala | bsd-2-clause | 15,526 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import scala.util.Random
class Vector(val elements: Array[Double]) extends Serializable {
def length = elements.length
def apply(index: Int) = elements(index)
def + (other: Vector): Vector = {
if (length != other.length)
throw new IllegalArgumentException("Vectors of different length")
Vector(length, i => this(i) + other(i))
}
def add(other: Vector) = this + other
def - (other: Vector): Vector = {
if (length != other.length)
throw new IllegalArgumentException("Vectors of different length")
Vector(length, i => this(i) - other(i))
}
def subtract(other: Vector) = this - other
def dot(other: Vector): Double = {
if (length != other.length)
throw new IllegalArgumentException("Vectors of different length")
var ans = 0.0
var i = 0
while (i < length) {
ans += this(i) * other(i)
i += 1
}
ans
}
/**
* return (this + plus) dot other, but without creating any intermediate storage
* @param plus
* @param other
* @return
*/
def plusDot(plus: Vector, other: Vector): Double = {
if (length != other.length)
throw new IllegalArgumentException("Vectors of different length")
if (length != plus.length)
throw new IllegalArgumentException("Vectors of different length")
var ans = 0.0
var i = 0
while (i < length) {
ans += (this(i) + plus(i)) * other(i)
i += 1
}
ans
}
def += (other: Vector): Vector = {
if (length != other.length)
throw new IllegalArgumentException("Vectors of different length")
var i = 0
while (i < length) {
elements(i) += other(i)
i += 1
}
this
}
def addInPlace(other: Vector) = this +=other
def * (scale: Double): Vector = Vector(length, i => this(i) * scale)
def multiply (d: Double) = this * d
def / (d: Double): Vector = this * (1 / d)
def divide (d: Double) = this / d
def unary_- = this * -1
def sum = elements.reduceLeft(_ + _)
def squaredDist(other: Vector): Double = {
var ans = 0.0
var i = 0
while (i < length) {
ans += (this(i) - other(i)) * (this(i) - other(i))
i += 1
}
ans
}
def dist(other: Vector): Double = math.sqrt(squaredDist(other))
override def toString = elements.mkString("(", ", ", ")")
}
object Vector {
def apply(elements: Array[Double]) = new Vector(elements)
def apply(elements: Double*) = new Vector(elements.toArray)
def apply(length: Int, initializer: Int => Double): Vector = {
val elements: Array[Double] = Array.tabulate(length)(initializer)
new Vector(elements)
}
def zeros(length: Int) = new Vector(new Array[Double](length))
def ones(length: Int) = Vector(length, _ => 1)
/**
* Creates this [[org.apache.spark.util.Vector]] of given length containing random numbers
* between 0.0 and 1.0. Optional [[scala.util.Random]] number generator can be provided.
*/
def random(length: Int, random: Random = new XORShiftRandom()) = Vector(length, _ => random.nextDouble())
class Multiplier(num: Double) {
def * (vec: Vector) = vec * num
}
implicit def doubleToMultiplier(num: Double) = new Multiplier(num)
implicit object VectorAccumParam extends org.apache.spark.AccumulatorParam[Vector] {
def addInPlace(t1: Vector, t2: Vector) = t1 + t2
def zero(initialValue: Vector) = Vector.zeros(initialValue.length)
}
}
| iiisthu/sparkSdn | core/src/main/scala/org/apache/spark/util/Vector.scala | Scala | apache-2.0 | 4,228 |
package pl.touk.nussknacker.engine.spel
import cats.data.NonEmptyList._
import cats.data.Validated._
import cats.data.{NonEmptyList, Validated, ValidatedNel}
import cats.instances.list._
import cats.instances.map._
import cats.kernel.{Monoid, Semigroup}
import cats.syntax.traverse._
import com.typesafe.scalalogging.LazyLogging
import org.springframework.expression.Expression
import org.springframework.expression.common.{CompositeStringExpression, LiteralExpression}
import org.springframework.expression.spel.ast._
import org.springframework.expression.spel.{SpelNode, standard}
import pl.touk.nussknacker.engine.TypeDefinitionSet
import pl.touk.nussknacker.engine.api.Context
import pl.touk.nussknacker.engine.api.context.ValidationContext
import pl.touk.nussknacker.engine.api.expression.{ExpressionParseError, ExpressionTypingInfo}
import pl.touk.nussknacker.engine.api.process.ClassExtractionSettings
import pl.touk.nussknacker.engine.api.typed.supertype.{CommonSupertypeFinder, NumberTypesPromotionStrategy}
import pl.touk.nussknacker.engine.api.typed.typing._
import pl.touk.nussknacker.engine.dict.SpelDictTyper
import pl.touk.nussknacker.engine.expression.NullExpression
import pl.touk.nussknacker.engine.spel.Typer._
import pl.touk.nussknacker.engine.spel.ast.SpelAst.SpelNodeId
import pl.touk.nussknacker.engine.spel.ast.SpelNodePrettyPrinter
import pl.touk.nussknacker.engine.spel.internal.EvaluationContextPreparer
import pl.touk.nussknacker.engine.spel.typer.{MapLikePropertyTyper, TypeMethodReference}
import pl.touk.nussknacker.engine.types.EspTypeUtils
import scala.annotation.tailrec
import scala.reflect.runtime._
import scala.util.{Failure, Success, Try}
private[spel] class Typer(classLoader: ClassLoader, commonSupertypeFinder: CommonSupertypeFinder,
dictTyper: SpelDictTyper, strictMethodsChecking: Boolean,
staticMethodInvocationsChecking: Boolean,
typeDefinitionSet: TypeDefinitionSet,
evaluationContextPreparer: EvaluationContextPreparer,
methodExecutionForUnknownAllowed: Boolean,
dynamicPropertyAccessAllowed: Boolean
)(implicit settings: ClassExtractionSettings) extends LazyLogging {
import ast.SpelAst._
type NodeTypingResult = ValidatedNel[ExpressionParseError, CollectedTypingResult]
def typeExpression(expr: Expression, ctx: ValidationContext): ValidatedNel[ExpressionParseError, CollectedTypingResult] = {
expr match {
case e: standard.SpelExpression =>
typeExpression(e, ctx)
case e: CompositeStringExpression =>
val validatedParts = e.getExpressions.toList.map(typeExpression(_, ctx)).sequence
// We drop intermediate results here:
// * It's tricky to combine it as each of the subexpressions has it's own abstract tree with positions relative to the subexpression's starting position
// * CompositeStringExpression is dedicated to template SpEL expressions. It cannot be nested (as templates cannot be nested)
// * Currently we don't use intermediate typing results outside of Typer
validatedParts.map(_ => CollectedTypingResult.withEmptyIntermediateResults(TypingResultWithContext(Typed[String])))
case e: LiteralExpression =>
Valid(CollectedTypingResult.withEmptyIntermediateResults(TypingResultWithContext(Typed[String])))
case e: NullExpression =>
Valid(CollectedTypingResult.withEmptyIntermediateResults(TypingResultWithContext(Typed[String])))
}
}
private def typeExpression(spelExpression: standard.SpelExpression, ctx: ValidationContext): ValidatedNel[ExpressionParseError, CollectedTypingResult] = {
val ast = spelExpression.getAST
val result = typeNode(ctx, ast, TypingContext(List.empty, Map.empty))
logger.whenTraceEnabled {
result match {
case Valid(collectedResult) =>
val printer = new SpelNodePrettyPrinter(n => collectedResult.intermediateResults.get(SpelNodeId(n)).map(_.display).getOrElse("NOT_TYPED"))
logger.trace("typed valid expression: " + printer.print(ast))
case Invalid(errors) =>
logger.trace(s"typed invalid expression: ${spelExpression.getExpressionString}, errors: ${errors.toList.mkString(", ")}")
}
}
result
}
private def typeNode(validationContext: ValidationContext, node: SpelNode, current: TypingContext): NodeTypingResult = {
def toResult(typ: TypingResult) = current.toResult(TypedNode(node, TypingResultWithContext(typ)))
def valid(typ: TypingResult) = Valid(toResult(typ))
val withTypedChildren = typeChildren(validationContext, node, current) _
def fixedWithNewCurrent(newCurrent: TypingContext) = typeChildrenAndReturnFixed(validationContext, node, newCurrent) _
val fixed = fixedWithNewCurrent(current)
def withChildrenOfType[Parts: universe.TypeTag](result: TypingResultWithContext) = withTypedChildren {
case list if list.forall(_.typingResult.canBeSubclassOf(Typed.fromDetailedType[Parts])) => Valid(result)
case _ => invalid("Wrong part types")
}
def catchUnexpectedErrors(block: => NodeTypingResult): NodeTypingResult = Try(block) match {
case Success(value) =>
value
case Failure(e) =>
throw new SpelCompilationException(node, e)
}
def typeUnion(e: Indexer, possibleTypes: Set[SingleTypingResult]): NodeTypingResult = {
val typedPossibleTypes = possibleTypes.map(possibleType => typeIndexer(e, possibleType)).toList
val typingResult = typedPossibleTypes.sequence.map(_.map(_.finalResult.typingResult).toSet).map(typingResults => Typed.apply(typingResults))
typingResult.map(toResult)
}
@tailrec
def typeIndexer(e: Indexer, typingResult: TypingResult): NodeTypingResult = {
typingResult match {
case TypedClass(clazz, param :: Nil) if clazz.isAssignableFrom(classOf[java.util.List[_]]) => valid(param)
case TypedClass(clazz, keyParam :: valueParam :: Nil) if clazz.isAssignableFrom(classOf[java.util.Map[_, _]]) => valid(valueParam)
case d: TypedDict => dictTyper.typeDictValue(d, e).map(toResult)
case TypedUnion(possibleTypes) => typeUnion(e, possibleTypes)
case TypedTaggedValue(underlying, _) => typeIndexer(e, underlying)
case _ => if (dynamicPropertyAccessAllowed) valid(Unknown) else invalid("Dynamic property access is not allowed")
}
}
catchUnexpectedErrors(node match {
case e: Assign => invalid("Value modifications are not supported")
case e: BeanReference => invalid("Bean reference is not supported")
case e: CompoundExpression => e.children match {
case first :: rest =>
val validatedLastType = rest.foldLeft(typeNode(validationContext, first, current)) {
case (Valid(prevResult), next) => typeNode(validationContext, next, current.pushOnStack(prevResult))
case (invalid, _) => invalid
}
validatedLastType.map { lastType =>
CollectedTypingResult(lastType.intermediateResults + (SpelNodeId(e) -> lastType.finalResult), lastType.finalResult)
}
//should not happen as CompoundExpression doesn't allow this...
case Nil => valid(Unknown)
}
case e: ConstructorReference => withTypedChildren { _ =>
val className = e.getChild(0).toStringAST
val classToUse = Try(evaluationContextPreparer.prepareEvaluationContext(Context(""), Map.empty).getTypeLocator.findType(className)).toOption
//TODO: validate constructor parameters...
val clazz = classToUse.flatMap(kl => typeDefinitionSet.typeDefinitions.find(_.clazzName.klass == kl).map(_.clazzName))
clazz match {
case Some(typedClass) => Valid(TypingResultWithContext(typedClass))
case None => invalid(s"Cannot create instance of unknown class $classToUse")
}
}
case e: Elvis => withTypedChildren(l => Valid(TypingResultWithContext(Typed(l.map(_.typingResult).toSet))))
//TODO: what should be here?
case e: FunctionReference => valid(Unknown)
//TODO: what should be here?
case e: Identifier => valid(Unknown)
//TODO: what should be here?
case e: Indexer => current.stack.headOption match {
case None => invalid("Cannot do indexing here")
case Some(result) => typeIndexer(e, result.typingResult)
}
case e: BooleanLiteral => valid(Typed[Boolean])
case e: IntLiteral => valid(Typed[java.lang.Integer])
case e: LongLiteral => valid(Typed[java.lang.Long])
case e: RealLiteral => valid(Typed(Typed[java.lang.Float]))
case e: FloatLiteral => valid(Typed[java.lang.Float])
case e: StringLiteral => valid(Typed[String])
case e: NullLiteral => valid(Unknown)
case e: InlineList => withTypedChildren { children =>
//We don't want Typed.empty here, as currently it means it won't validate for any signature
val elementType = if (children.isEmpty) TypingResultWithContext(Unknown) else TypingResultWithContext(Typed(children.map(typ => typ.typingResult).toSet))
Valid(TypingResultWithContext(Typed.genericTypeClass[java.util.List[_]](List(elementType.typingResult))))
}
case e: InlineMap =>
val zipped = e.children.zipWithIndex
val keys = zipped.filter(_._2 % 2 == 0).map(_._1)
val values = zipped.filter(_._2 % 2 == 1).map(_._1)
val literalKeys = keys
.collect {
case a: PropertyOrFieldReference => a.getName
case b: StringLiteral => b.getLiteralValue.getValue.toString
}
if (literalKeys.size != keys.size) {
invalid("Currently inline maps with not literal keys (e.g. expressions as keys) are not supported")
} else {
values.map(typeNode(validationContext, _, current.withoutIntermediateResults)).sequence.andThen { typedValues =>
withCombinedIntermediate(typedValues, current) { typedValues =>
val typ = TypedObjectTypingResult(literalKeys.zip(typedValues.map(_.typingResult)))
Valid(TypedNode(node, TypingResultWithContext(typ)))
}
}
}
case e: MethodReference =>
extractMethodReference(e, validationContext, node, current, methodExecutionForUnknownAllowed)
case e: OpEQ => checkEqualityLikeOperation(validationContext, e, current)
case e: OpNE => checkEqualityLikeOperation(validationContext, e, current)
case e: OpAnd => withChildrenOfType[Boolean](TypingResultWithContext(Typed[Boolean]))
case e: OpOr => withChildrenOfType[Boolean](TypingResultWithContext(Typed[Boolean]))
case e: OpGE => withChildrenOfType[Number](TypingResultWithContext(Typed[Boolean]))
case e: OpGT => withChildrenOfType[Number](TypingResultWithContext(Typed[Boolean]))
case e: OpLE => withChildrenOfType[Number](TypingResultWithContext(Typed[Boolean]))
case e: OpLT => withChildrenOfType[Number](TypingResultWithContext(Typed[Boolean]))
case e: OpDec => checkSingleOperandArithmeticOperation(validationContext, e, current)
case e: OpInc => checkSingleOperandArithmeticOperation(validationContext, e, current)
case e: OpDivide => checkTwoOperandsArithmeticOperation(validationContext, e, current)(NumberTypesPromotionStrategy.ForMathOperation)
case e: OpMinus => withTypedChildren {
case TypingResultWithContext(left, _) :: TypingResultWithContext(right, _) :: Nil if left.canBeSubclassOf(Typed[Number]) && right.canBeSubclassOf(Typed[Number]) => Valid(TypingResultWithContext(commonSupertypeFinder.commonSupertype(left, right)(NumberTypesPromotionStrategy.ForMathOperation)))
case TypingResultWithContext(left, _) :: TypingResultWithContext(right, _) :: Nil => invalid(s"Operator '${e.getOperatorName}' used with mismatch types: ${left.display} and ${right.display}")
case TypingResultWithContext(left, _) :: Nil if left.canBeSubclassOf(Typed[Number]) => Valid(TypingResultWithContext(left))
case TypingResultWithContext(left, _) :: Nil => invalid(s"Operator '${e.getOperatorName}' used with non numeric type: ${left.display}")
case Nil => invalid("Empty minus")
}
case e: OpModulus => checkTwoOperandsArithmeticOperation(validationContext, e, current)(NumberTypesPromotionStrategy.ForMathOperation)
case e: OpMultiply => checkTwoOperandsArithmeticOperation(validationContext, e, current)(NumberTypesPromotionStrategy.ForMathOperation)
case e: OperatorPower => checkTwoOperandsArithmeticOperation(validationContext, e, current)(NumberTypesPromotionStrategy.ForPowerOperation)
case e: OpPlus => withTypedChildren {
case TypingResultWithContext(left, _) :: TypingResultWithContext(right, _) :: Nil if left == Unknown || right == Unknown => Valid(TypingResultWithContext(Unknown))
case TypingResultWithContext(left, _) :: TypingResultWithContext(right, _) :: Nil if left.canBeSubclassOf(Typed[String]) || right.canBeSubclassOf(Typed[String]) => Valid(TypingResultWithContext(Typed[String]))
case TypingResultWithContext(left, _) :: TypingResultWithContext(right, _) :: Nil if left.canBeSubclassOf(Typed[Number]) && right.canBeSubclassOf(Typed[Number]) => Valid(TypingResultWithContext(commonSupertypeFinder.commonSupertype(left, right)(NumberTypesPromotionStrategy.ForMathOperation)))
case TypingResultWithContext(left, _) :: TypingResultWithContext(right, _) :: Nil => invalid(s"Operator '${e.getOperatorName}' used with mismatch types: ${left.display} and ${right.display}")
case TypingResultWithContext(left, _) :: Nil if left.canBeSubclassOf(Typed[Number]) => Valid(TypingResultWithContext(left))
case TypingResultWithContext(left, _) :: Nil => invalid(s"Operator '${e.getOperatorName}' used with non numeric type: ${left.display}")
case Nil => invalid("Empty plus")
}
case e: OperatorBetween => fixed(TypingResultWithContext(Typed[Boolean]))
case e: OperatorInstanceof => fixed(TypingResultWithContext(Typed[Boolean]))
case e: OperatorMatches => withChildrenOfType[String](TypingResultWithContext(Typed[Boolean]))
case e: OperatorNot => withChildrenOfType[Boolean](TypingResultWithContext(Typed[Boolean]))
case e: Projection => current.stackHead match {
case None => invalid("Cannot do projection here")
//index, check if can project?
case Some(iterateType) =>
extractIterativeType(iterateType.typingResult).andThen { listType =>
typeChildren(validationContext, node, current.pushOnStack(listType)) {
case TypingResultWithContext(result, _) :: Nil => Valid(TypingResultWithContext(Typed.genericTypeClass[java.util.List[_]](List(result))))
case other => invalid(s"Wrong selection type: ${other.map(_.display)}")
}
}
}
case e: PropertyOrFieldReference =>
current.stackHead.map(head => extractProperty(e, head.typingResult).map(toResult)).getOrElse {
invalid(s"Non reference '${e.toStringAST}' occurred. Maybe you missed '#' in front of it?")
}
//TODO: what should be here?
case e: QualifiedIdentifier => fixed(TypingResultWithContext(Unknown))
case e: Selection => current.stackHead match {
case None => invalid("Cannot do selection here")
case Some(iterateType) =>
extractIterativeType(iterateType.typingResult).andThen { elementType =>
typeChildren(validationContext, node, current.pushOnStack(elementType)) {
case TypingResultWithContext(result, _) :: Nil if result.canBeSubclassOf(Typed[Boolean]) => Valid(resolveSelectionTypingResult(e, iterateType, elementType))
case other => invalid(s"Wrong selection type: ${other.map(_.display)}")
}
}
}
case e: Ternary => withTypedChildren {
case TypingResultWithContext(condition, _) :: TypingResultWithContext(onTrue, _) :: TypingResultWithContext(onFalse, _) :: Nil =>
lazy val superType = commonSupertypeFinder.commonSupertype(onTrue, onFalse)(NumberTypesPromotionStrategy.ToSupertype)
if (!condition.canBeSubclassOf(Typed[Boolean])) {
invalid(s"Not a boolean expression used in ternary operator (expr ? onTrue : onFalse). Computed expression type: ${condition.display}")
} else if (superType == Typed.empty) {
invalid(s"Ternary operator (expr ? onTrue : onFalse) used with mismatch result types: ${onTrue.display} and ${onFalse.display}")
} else {
Valid(TypingResultWithContext(superType))
}
case _ => invalid("Invalid ternary operator") // shouldn't happen
}
case e: TypeReference =>
if (staticMethodInvocationsChecking) {
typeDefinitionSet.validateTypeReference(e, evaluationContextPreparer.prepareEvaluationContext(Context(""), Map.empty))
.map(typedClass => current.toResult(TypedNode(e, TypingResultWithContext(typedClass, staticContext = true))))
} else {
valid(Unknown)
}
case e: VariableReference =>
//only sane way of getting variable name :|
val name = e.toStringAST.substring(1)
validationContext.get(name).orElse(current.stackHead.map(_.typingResult).filter(_ => name == "this")) match {
case Some(result) => valid(result)
case None => invalid(s"Unresolved reference '$name'")
}
})
}
//currently there is no better way than to check ast string starting with $ or ^
private def resolveSelectionTypingResult(node: Selection, parentType: TypingResultWithContext, childElementType: TypingResult) = {
val isSingleElementSelection = List("$", "^").map(node.toStringAST.startsWith(_)).foldLeft(false)(_ || _)
if (isSingleElementSelection) TypingResultWithContext(childElementType) else parentType
}
private def checkEqualityLikeOperation(validationContext: ValidationContext, node: Operator, current: TypingContext): ValidatedNel[ExpressionParseError, CollectedTypingResult] = {
typeChildren(validationContext, node, current) {
case TypingResultWithContext(left, _) :: TypingResultWithContext(right, _) :: Nil if commonSupertypeFinder.commonSupertype(right, left)(NumberTypesPromotionStrategy.ToSupertype) != Typed.empty => Valid(TypingResultWithContext(Typed[Boolean]))
case TypingResultWithContext(left, _) :: TypingResultWithContext(right, _) :: Nil => invalid(s"Operator '${node.getOperatorName}' used with not comparable types: ${left.display} and ${right.display}")
case _ => invalid(s"Bad '${node.getOperatorName}' operator construction") // shouldn't happen
}
}
private def checkTwoOperandsArithmeticOperation(validationContext: ValidationContext, node: Operator, current: TypingContext)
(implicit numberPromotionStrategy: NumberTypesPromotionStrategy): ValidatedNel[ExpressionParseError, CollectedTypingResult] = {
typeChildren(validationContext, node, current) {
case TypingResultWithContext(left, _) :: TypingResultWithContext(right, _) :: Nil if left.canBeSubclassOf(Typed[Number]) && right.canBeSubclassOf(Typed[Number]) => Valid(TypingResultWithContext(commonSupertypeFinder.commonSupertype(left, right)))
case TypingResultWithContext(left, _) :: TypingResultWithContext(right, _) :: Nil => invalid(s"Operator '${node.getOperatorName}' used with mismatch types: ${left.display} and ${right.display}")
case _ => invalid(s"Bad '${node.getOperatorName}' operator construction") // shouldn't happen
}
}
private def checkSingleOperandArithmeticOperation(validationContext: ValidationContext, node: Operator, current: TypingContext): ValidatedNel[ExpressionParseError, CollectedTypingResult] = {
typeChildren(validationContext, node, current) {
case TypingResultWithContext(left, _) :: Nil if left.canBeSubclassOf(Typed[Number]) => Valid(TypingResultWithContext(left))
case TypingResultWithContext(left, _) :: Nil => invalid(s"Operator '${node.getOperatorName}' used with non numeric type: ${left.display}")
case _ => invalid(s"Bad '${node.getOperatorName}' operator construction") // shouldn't happen
}
}
private def extractProperty(e: PropertyOrFieldReference, t: TypingResult): ValidatedNel[ExpressionParseError, TypingResult] = t match {
case Unknown =>
if (methodExecutionForUnknownAllowed)
Valid(Unknown)
else
invalid("Property access on Unknown is not allowed")
case s: SingleTypingResult =>
extractSingleProperty(e)(s)
case TypedUnion(possible) =>
val l = possible.toList.flatMap(single => extractSingleProperty(e)(single).toOption)
if (l.isEmpty)
invalid(s"There is no property '${e.getName}' in type: ${t.display}")
else
Valid(Typed(l.toSet))
}
private def extractMethodReference(reference: MethodReference, validationContext: ValidationContext, node: SpelNode, context: TypingContext, disableMethodExecutionForUnknown: Boolean) = {
context.stack match {
case head :: tail =>
val isStatic = head.staticContext
typeChildren(validationContext, node, context.copy(stack = tail)) { typedParams =>
TypeMethodReference(reference.getName, head.typingResult, typedParams.map(_.typingResult), isStatic, methodExecutionForUnknownAllowed) match {
case Right(typingResult) => Valid(TypingResultWithContext(typingResult))
case Left(errorMsg) => if (strictMethodsChecking) invalid(errorMsg) else Valid(TypingResultWithContext(Unknown))
}
}
case Nil =>
invalid(s"Invalid method reference: ${reference.toStringAST}.")
}
}
@tailrec
private def extractSingleProperty(e: PropertyOrFieldReference)
(t: SingleTypingResult): ValidatedNel[ExpressionParseError, TypingResult] = {
t match {
case tagged: TypedTaggedValue =>
extractSingleProperty(e)(tagged.objType)
case typedClass: TypedClass =>
propertyTypeBasedOnMethod(e)(typedClass).orElse(MapLikePropertyTyper.mapLikeValueType(typedClass))
.map(Valid(_))
.getOrElse(invalid(s"There is no property '${e.getName}' in type: ${t.display}"))
case TypedObjectTypingResult(fields, objType, _) =>
val typeBasedOnFields = fields.get(e.getName)
typeBasedOnFields.orElse(propertyTypeBasedOnMethod(e)(objType))
.map(Valid(_))
.getOrElse(invalid(s"There is no property '${e.getName}' in type: ${t.display}"))
case dict: TypedDict =>
dictTyper.typeDictValue(dict, e)
}
}
private def propertyTypeBasedOnMethod(e: PropertyOrFieldReference)(typedClass: TypedClass) = {
val clazzDefinition = EspTypeUtils.clazzDefinition(typedClass.klass)
clazzDefinition.getPropertyOrFieldType(e.getName)
}
private def extractIterativeType(parent: TypingResult): Validated[NonEmptyList[ExpressionParseError], TypingResult] = parent match {
case tc: SingleTypingResult if tc.objType.canBeSubclassOf(Typed[java.util.Collection[_]]) =>
Valid(tc.objType.params.headOption.getOrElse(Unknown))
case tc: SingleTypingResult if tc.objType.canBeSubclassOf(Typed[java.util.Map[_, _]]) =>
Valid(TypedObjectTypingResult(List(
("key", tc.objType.params.headOption.getOrElse(Unknown)),
("value", tc.objType.params.drop(1).headOption.getOrElse(Unknown)))))
case tc: SingleTypingResult if tc.objType.klass.isArray =>
Valid(tc.objType.params.headOption.getOrElse(Unknown))
case tc: SingleTypingResult => Validated.invalidNel(ExpressionParseError(s"Cannot do projection/selection on ${tc.display}"))
//FIXME: what if more results are present?
case _ => Valid(Unknown)
}
private def typeChildrenAndReturnFixed(validationContext: ValidationContext, node: SpelNode, current: TypingContext)(result: TypingResultWithContext)
: Validated[NonEmptyList[ExpressionParseError], CollectedTypingResult] = {
typeChildren(validationContext, node, current)(_ => Valid(result))
}
private def typeChildren(validationContext: ValidationContext, node: SpelNode, current: TypingContext)
(result: List[TypingResultWithContext] => ValidatedNel[ExpressionParseError, TypingResultWithContext])
: ValidatedNel[ExpressionParseError, CollectedTypingResult] = {
val data = node.children.map(child => typeNode(validationContext, child, current.withoutIntermediateResults)).sequence
data.andThen { collectedChildrenResults =>
withCombinedIntermediate(collectedChildrenResults, current) { childrenResults =>
result(childrenResults).map(TypedNode(node, _))
}
}
}
private def withCombinedIntermediate(intermediate: List[CollectedTypingResult], current: TypingContext)
(result: List[TypingResultWithContext] => ValidatedNel[ExpressionParseError, TypedNode])
: ValidatedNel[ExpressionParseError, CollectedTypingResult] = {
val intermediateResultsCombination = Monoid.combineAll(current.intermediateResults :: intermediate.map(_.intermediateResults))
val intermediateTypes = intermediate.map(_.finalResult)
result(intermediateTypes).map(CollectedTypingResult.withIntermediateAndFinal(intermediateResultsCombination, _))
}
private def invalid[T](message: String): ValidatedNel[ExpressionParseError, T] =
Invalid(NonEmptyList.of(ExpressionParseError(message)))
def withDictTyper(dictTyper: SpelDictTyper) =
new Typer(classLoader, commonSupertypeFinder, dictTyper, strictMethodsChecking = strictMethodsChecking,
staticMethodInvocationsChecking, typeDefinitionSet, evaluationContextPreparer, methodExecutionForUnknownAllowed, dynamicPropertyAccessAllowed)
}
object Typer {
// This Semigroup is used in combining `intermediateResults: Map[SpelNodeId, TypingResult]` in TYper.
// If there is no bug in Typer, collisions shouldn't happen
implicit def notAcceptingMergingSemigroup: Semigroup[TypingResultWithContext] = new Semigroup[TypingResultWithContext] with LazyLogging {
override def combine(x: TypingResultWithContext, y: TypingResultWithContext): TypingResultWithContext = {
assert(x == y, s"Types not matching during combination of types for spel nodes: $x != $y")
// merging the same types is not bad but it is a warning that sth went wrong e.g. typer typed something more than one time
// or spel node's identity is broken
logger.warn(s"Merging same types: $x for the same nodes. This shouldn't happen")
x
}
}
case class TypingResultWithContext(typingResult: TypingResult, staticContext: Boolean = false) {
def display: String = typingResult.display
}
/**
* It contains stack of types for recognition of nested node type.
* intermediateResults are all results that we can collect for intermediate nodes
*/
private case class TypingContext(stack: List[TypingResultWithContext], intermediateResults: Map[SpelNodeId, TypingResultWithContext]) {
def pushOnStack(typingResultWithContext: TypingResultWithContext): TypingContext = copy(stack = typingResultWithContext :: stack)
def pushOnStack(typingResult: TypingResult): TypingContext = copy(stack = TypingResultWithContext(typingResult) :: stack)
def pushOnStack(typingResult: CollectedTypingResult): TypingContext =
TypingContext(typingResult.finalResult :: stack, intermediateResults ++ typingResult.intermediateResults)
def stackHead: Option[TypingResultWithContext] = stack.headOption
def withoutIntermediateResults: TypingContext = copy(intermediateResults = Map.empty)
def toResult(finalNode: TypedNode): CollectedTypingResult =
CollectedTypingResult(intermediateResults + (finalNode.nodeId -> finalNode.typ), finalNode.typ)
}
class SpelCompilationException(node: SpelNode, cause: Throwable)
extends RuntimeException(s"Can't compile SpEL expression: `${node.toStringAST}`, message: `${cause.getMessage}`.", cause)
}
private[spel] case class TypedNode(nodeId: SpelNodeId, typ: TypingResultWithContext)
private[spel] object TypedNode {
def apply(node: SpelNode, typ: TypingResultWithContext): TypedNode =
TypedNode(SpelNodeId(node), typ)
}
private[spel] case class CollectedTypingResult(intermediateResults: Map[SpelNodeId, TypingResultWithContext], finalResult: TypingResultWithContext) {
def typingInfo: SpelExpressionTypingInfo = SpelExpressionTypingInfo(intermediateResults.map(intermediateResult => (intermediateResult._1 -> intermediateResult._2.typingResult)), finalResult.typingResult)
}
private[spel] object CollectedTypingResult {
def withEmptyIntermediateResults(finalResult: TypingResultWithContext): CollectedTypingResult =
CollectedTypingResult(Map.empty, finalResult)
def withIntermediateAndFinal(intermediateResults: Map[SpelNodeId, TypingResultWithContext], finalNode: TypedNode): CollectedTypingResult = {
CollectedTypingResult(intermediateResults + (finalNode.nodeId -> finalNode.typ), finalNode.typ)
}
}
case class SpelExpressionTypingInfo(intermediateResults: Map[SpelNodeId, TypingResult],
typingResult: TypingResult) extends ExpressionTypingInfo
| TouK/nussknacker | interpreter/src/main/scala/pl/touk/nussknacker/engine/spel/Typer.scala | Scala | apache-2.0 | 29,430 |
package uk.gov.gds.ier.transaction.overseas.confirmation
import uk.gov.gds.ier.test.ControllerTestSuite
import uk.gov.gds.ier.service.apiservice.EroAuthorityDetails
import uk.gov.gds.ier.model._
import uk.gov.gds.ier.controller.MockConfig
import uk.gov.gds.ier.security.{Base64EncodingService, EncryptionService}
import uk.gov.gds.ier.transaction.complete.CompleteCookie
import org.joda.time.LocalDate
import uk.gov.gds.ier.validation.constants.DateOfBirthConstants
class ConfirmationStepTest extends ControllerTestSuite {
val config = new MockConfig
implicit val _serialiser = jsonSerialiser
implicit val encryptionService = new EncryptionService (new Base64EncodingService, config)
behavior of "ConfirmationStep.post submit application and set Refnum and LocalAuthority for the next step"
running(FakeApplication()) {
val Some(resultFuture) = route(
FakeRequest(POST, "/register-to-vote/overseas/confirmation")
.withIerSession()
.withApplication(completeOverseasApplication.copy(
lastUkAddress = Some(PartialAddress(
addressLine = Some("1 The Cottages, Moseley Road, Hallow, Worcestershire"),
uprn = Some("100120595384"),
postcode = "WR2 6NJ",
gssCode = Some("E07000235"),
manualAddress = None
))
))
)
it should "redirect to Complete page" in {
status(resultFuture) should be(SEE_OTHER)
redirectLocation(resultFuture) should be(Some("/register-to-vote/complete"))
}
val allCookies = cookies(resultFuture)
it should "delete application inprogress data, delete main cookie but not session" in {
allCookies.get(sessionPayloadKey) should not be (None)
allCookies.get(sessionPayloadKey).get.value should be("")
}
it should "add new cookie with confirmation data for Complete page" in {
allCookies.get(completeCookieKey) should not be (None)
allCookies.get(completeCookieKey).get.value.trim should not be empty
}
"content of Confirmation cookie" should "contain refnum and local ERO authority details" in {
val result = getCompleteCookie(allCookies)
result should not be (None)
result.get.refNum.trim should not be ("")
result.get.hasOtherAddress should be(false)
result.get.backToStartUrl should be("/register-to-vote")
result.get.authority should be(Some(EroAuthorityDetails(
name = "Malvern Hills (test)",
urls = List(
"http://www.malvernhills.gov.uk/",
"http://www.malvernhills.gov.uk/cms/council-and-democracy/elections.aspx"
),
email = Some("worcestershirehub@malvernhills.gov.uk.test"),
addressLine1 = Some("Council House"),
addressLine2 = Some("Avenue Road"),
addressLine3 = Some("Malvern"),
addressLine4 = Some(""),
postcode = Some("WR14 3AF"),
phone = Some("01684 862151")
)))
}
}
behavior of "showEmailConfirmation flag"
it should "submit application and set show email message flag to false for no email addresses present" in runningApp {
val Some(result) = route(
FakeRequest(POST, "/register-to-vote/overseas/confirmation")
.withIerSession()
.withApplication(completeOverseasApplication.copy(
waysToVote = Some(WaysToVote(WaysToVoteType.InPerson)),
postalOrProxyVote = None,
contact = Some(Contact(
post = true,
email = None,
phone = None
))
))
)
status(result) should be(SEE_OTHER)
redirectLocation(result) should be(Some("/register-to-vote/complete"))
val allCookies = cookies(result)
val completeStepData = getCompleteCookie(allCookies)
completeStepData should not be(None)
completeStepData.get.showEmailConfirmation should be(false)
}
it should "submit application and set show email message flag to true if contact email is present" in runningApp {
val Some(result) = route(
FakeRequest(POST, "/register-to-vote/overseas/confirmation")
.withIerSession()
.withApplication(completeOverseasApplication.copy(
waysToVote = Some(WaysToVote(WaysToVoteType.InPerson)),
postalOrProxyVote = None,
contact = Some(Contact(
post = false,
email = Some(ContactDetail(
contactMe = true,
detail = Some("test@email.com")
)),
phone = None
))
))
)
status(result) should be(SEE_OTHER)
redirectLocation(result) should be(Some("/register-to-vote/complete"))
val allCookies = cookies(result)
val completeStepData = getCompleteCookie(allCookies)
completeStepData should not be(None)
completeStepData.get.showEmailConfirmation should be(true)
}
it should "submit application and set show email message flag to true if postal email is present" in runningApp {
val Some(result) = route(
FakeRequest(POST, "/register-to-vote/overseas/confirmation")
.withIerSession()
.withApplication(completeOverseasApplication.copy(
waysToVote = Some(WaysToVote(WaysToVoteType.ByPost)),
postalOrProxyVote = Some(PostalOrProxyVote(
typeVote = WaysToVoteType.ByPost,
postalVoteOption = Some(true),
deliveryMethod = Some(PostalVoteDeliveryMethod(
deliveryMethod = Some("email"),
emailAddress = Some("test@email.com")
))
)),
contact = Some(Contact(
post = true,
email = None,
phone = None
))
))
)
status(result) should be(SEE_OTHER)
redirectLocation(result) should be(Some("/register-to-vote/complete"))
val allCookies = cookies(result)
val completeStepData = getCompleteCookie(allCookies)
completeStepData should not be(None)
completeStepData.get.showEmailConfirmation should be(true)
}
it should "submit application and set show email message flag to true if proxy postal email is present" in runningApp {
val Some(result) = route(
FakeRequest(POST, "/register-to-vote/overseas/confirmation")
.withIerSession()
.withApplication(completeOverseasApplication.copy(
waysToVote = Some(WaysToVote(WaysToVoteType.ByProxy)),
postalOrProxyVote = Some(PostalOrProxyVote(
typeVote = WaysToVoteType.ByPost,
postalVoteOption = Some(true),
deliveryMethod = Some(PostalVoteDeliveryMethod(
deliveryMethod = Some("email"),
emailAddress = Some("test@email.com")
))
)),
contact = Some(Contact(
post = true,
email = None,
phone = None
))
))
)
status(result) should be(SEE_OTHER)
redirectLocation(result) should be(Some("/register-to-vote/complete"))
val allCookies = cookies(result)
val completeStepData = getCompleteCookie(allCookies)
completeStepData should not be(None)
completeStepData.get.showEmailConfirmation should be(true)
}
it should "submit application and set show email message flag to true if postal email and contact email are present" in runningApp {
val Some(result) = route(
FakeRequest(POST, "/register-to-vote/overseas/confirmation")
.withIerSession()
.withApplication(completeOverseasApplication.copy(
waysToVote = Some(WaysToVote(WaysToVoteType.ByProxy)),
postalOrProxyVote = Some(PostalOrProxyVote(
typeVote = WaysToVoteType.ByPost,
postalVoteOption = Some(true),
deliveryMethod = Some(PostalVoteDeliveryMethod(
deliveryMethod = Some("email"),
emailAddress = Some("test@email.com")
))
)),
contact = Some(Contact(
post = false,
email = Some(ContactDetail(
contactMe = true,
detail = Some("test@email.com")
)),
phone = None
))
))
)
status(result) should be(SEE_OTHER)
redirectLocation(result) should be(Some("/register-to-vote/complete"))
val allCookies = cookies(result)
val completeStepData = getCompleteCookie(allCookies)
completeStepData should not be(None)
completeStepData.get.showEmailConfirmation should be(true)
}
behavior of "showBirthdayBunting flag"
it should "submit application and set show bunting flag to true if its applicants birthday" in {
applicationWithDateOfBirth(LocalDate.now, expectedBuntingFlagValue = true)
applicationWithDateOfBirth(LocalDate.now.minusYears(1), expectedBuntingFlagValue = true)
applicationWithDateOfBirth(LocalDate.now.minusYears(30), expectedBuntingFlagValue = true)
applicationWithDateOfBirth(LocalDate.now.minusYears(60), expectedBuntingFlagValue = true)
}
it should "submit application and set show bunting flag to false if its not applicants birthday" in {
applicationWithDateOfBirth(LocalDate.now.minusDays(1), expectedBuntingFlagValue = false)
applicationWithDateOfBirth(LocalDate.now.minusDays(2), expectedBuntingFlagValue = false)
applicationWithDateOfBirth(LocalDate.now.minusMonths(1), expectedBuntingFlagValue = false)
applicationWithDateOfBirth(LocalDate.now.minusMonths(2), expectedBuntingFlagValue = false)
applicationWithDateOfBirth(LocalDate.now.minusMonths(11).minusYears(80), expectedBuntingFlagValue = false)
}
def applicationWithDateOfBirth(localDate: LocalDate, expectedBuntingFlagValue: Boolean): Unit =
applicationWithDateOfBirth(createDoBFrom(localDate), expectedBuntingFlagValue: Boolean)
def applicationWithDateOfBirth(dateOfBirth: DOB, expectedBuntingFlagValue: Boolean): Unit =
running(FakeApplication()) {
val Some(result) = route(
FakeRequest(POST, "/register-to-vote/overseas/confirmation")
.withIerSession()
.withApplication(completeOverseasApplication.copy(dob = Some(dateOfBirth)))
)
status(result) should be(SEE_OTHER)
redirectLocation(result) should be(Some("/register-to-vote/complete"))
val allCookies = cookies(result)
val completeStepData = getCompleteCookie(allCookies)
completeStepData should not be (None)
completeStepData.get.showBirthdayBunting should be(expectedBuntingFlagValue)
}
private def createDoBFrom(localDate: LocalDate) =
DOB(year = localDate.year.get,
month = localDate.monthOfYear.get,
day = localDate.dayOfMonth.get)
}
| michaeldfallen/ier-frontend | test/uk/gov/gds/ier/transaction/overseas/confirmation/ConfirmationStepTest.scala | Scala | mit | 10,539 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.jms.action
import javax.jms.Message
import io.gatling.commons.validation._
import io.gatling.core.action.RequestAction
import io.gatling.core.session._
import io.gatling.core.util.NameGen
import io.gatling.jms.client.JmsConnectionPool
import io.gatling.jms.protocol.JmsProtocol
import io.gatling.jms.request._
abstract class JmsAction(attributes: JmsAttributes, protocol: JmsProtocol, pool: JmsConnectionPool)
extends RequestAction with JmsLogging with NameGen {
override val requestName = attributes.requestName
protected val jmsConnection = pool.jmsConnection(protocol.connectionFactory, protocol.credentials)
private val jmsDestination = jmsConnection.destination(attributes.destination)
protected val producer = jmsConnection.producer(jmsDestination, protocol.deliveryMode)
override def sendRequest(requestName: String, session: Session): Validation[Unit] =
for {
jmsType <- resolveOptionalExpression(attributes.jmsType, session)
props <- resolveProperties(attributes.messageProperties, session)
} yield {
val beforeSend0 = beforeSend(requestName, session) _
val p = producer.get()
attributes.message match {
case BytesJmsMessage(bytes) => bytes(session).map(bytes => p.sendBytesMessage(bytes, props, jmsType, beforeSend0))
case MapJmsMessage(map) => map(session).map(map => p.sendMapMessage(map, props, jmsType, beforeSend0))
case ObjectJmsMessage(o) => o(session).map(o => p.sendObjectMessage(o, props, jmsType, beforeSend0))
case TextJmsMessage(txt) => txt(session).map(txt => p.sendTextMessage(txt, props, jmsType, beforeSend0))
}
}
private def resolveProperties(
properties: Map[Expression[String], Expression[Any]],
session: Session
): Validation[Map[String, Any]] =
properties.foldLeft(Map.empty[String, Any].success) {
case (resolvedProperties, (key, value)) =>
for {
key <- key(session)
value <- value(session)
resolvedProperties <- resolvedProperties
} yield resolvedProperties + (key -> value)
}
protected def beforeSend(requestName: String, session: Session)(message: Message): Unit
}
| MykolaB/gatling | gatling-jms/src/main/scala/io/gatling/jms/action/JmsAction.scala | Scala | apache-2.0 | 2,827 |
package com.softwaremill.macwire
import com.softwaremill.macwire.dependencyLookup._
import scala.reflect.macros.blackbox
object MacwireMacros {
private val log = new Logger()
def wire_impl[T: c.WeakTypeTag](c: blackbox.Context): c.Expr[T] = {
import c.universe._
def abort(msg: String): Nothing = c.abort(c.enclosingPosition, msg)
lazy val dependencyResolver = new DependencyResolver[c.type](c, log)
def tryCompanionObject(targetType: Type): Tree = {
if (targetType.companion == NoType) {
abort(s"Cannot find a public constructor nor a companion object for $targetType")
} else {
val isCompanionMethodFactory = (method: Symbol) => {
method.isMethod &&
method.isPublic &&
method.asMethod.returnType <:< targetType &&
method.asMethod.name.decodedName.toString == "apply"
}
targetType.companion.members.filter(isCompanionMethodFactory).toList match {
case Nil => abort(s"Cannot find a public constructor for $targetType, nor apply method in its companion object")
case applyMethod :: Nil =>
wireParameters(
Select(Ident(targetType.typeSymbol.companion), applyMethod),
applyMethod.asMethod.paramLists,
_.typeSignature)
case moreThanOne => abort(s"No public primary constructor found for $targetType and " +
"multiple matching apply method in its companion object were found.")
}
}
}
def wireParameters(constructionMethodTree: Tree, paramLists: List[List[Symbol]], resolveType: c.Symbol => c.Type): Tree = {
filterOutImplicitParams(paramLists).foldLeft(constructionMethodTree) { case (applicationTree, paramList) =>
val constructorParams: List[Tree] = for (param <- paramList) yield {
val wireToOpt = dependencyResolver.resolve(param, resolveType(param))
// If no value is found, an error has been already reported.
wireToOpt.getOrElse(reify(null).tree)
}
Apply(applicationTree, constructorParams)
}
}
def filterOutImplicitParams(targetConstructorParamLists: List[List[Symbol]]): List[List[Symbol]] = {
targetConstructorParamLists.filterNot(_.headOption.exists(_.isImplicit))
}
def wirePrimaryConstructor(targetType: Type, targetConstructor: Symbol): Tree = {
// We need to get the "real" type in case the type parameter is a type alias - then it cannot
// be directly instantiated
val targetTpe = targetType.dealias
val (sym, tpeArgs) = targetTpe match {
case TypeRef(_, sym, tpeArgs) => (sym, tpeArgs)
case t => abort(s"Target type not supported for wiring: $t. Please file a bug report with your use-case.")
}
def paramType(param: Symbol): Type = {
val pTpe = param.typeSignature.substituteTypes(sym.asClass.typeParams, tpeArgs)
if (param.asTerm.isByNameParam) {
pTpe.typeArgs.head
} else {
pTpe
}
}
val constructionMethodTree = Select(New(Ident(targetTpe.typeSymbol)), termNames.CONSTRUCTOR)
wireParameters(
constructionMethodTree,
targetConstructor.asMethod.paramLists,
sym => paramType(sym)) // SI-4751
}
def createNewTargetWithParams(): Expr[T] = {
val targetType = implicitly[c.WeakTypeTag[T]].tpe
log.withBlock(s"Trying to find parameters to create new instance of: [$targetType] at ${c.enclosingPosition}") {
val targetConstructorOpt = targetType.members.find(m => m.isMethod && m.asMethod.isPrimaryConstructor && m.isPublic)
val code = targetConstructorOpt match {
case None =>
tryCompanionObject(targetType)
case Some(targetConstructor) =>
wirePrimaryConstructor(targetType, targetConstructor)
}
log(s"Generated code: ${showRaw(code)}")
c.Expr(code)
}
}
createNewTargetWithParams()
}
def wireWith_impl[T: c.WeakTypeTag](c: blackbox.Context)(factory: c.Tree): c.Tree = {
import c.universe._
val typeCheckUtil = new TypeCheckUtil[c.type](c, log)
val dependencyResolver = new DependencyResolver[c.type](c, log)
import typeCheckUtil.typeCheckIfNeeded
val Block(Nil, Function(params, Apply(fun, _))) = factory
val values = params.map {
case vd @ ValDef(_, name, tpt, rhs) =>
dependencyResolver.resolve(vd.symbol, typeCheckIfNeeded(tpt)).getOrElse(reify(null).tree)
}
val code = q"$fun(..$values)"
log("Generated code: " + showCode(code))
code
}
def wireSet_impl[T: c.WeakTypeTag](c: blackbox.Context): c.Tree = {
import c.universe._
val targetType = implicitly[c.WeakTypeTag[T]]
val dependencyResolver = new DependencyResolver[c.type](c, log)
val instances = dependencyResolver.resolveAll(targetType.tpe)
// The lack of hygiene can be seen here as a feature, the choice of Set implementation
// is left to the user - you want a `mutable.Set`, just import `mutable.Set` before the `wireSet[T]` call
val code = q"Set(..$instances)"
log("Generated code: " + show(code))
code
}
def wiredInModule_impl(c: blackbox.Context)(in: c.Expr[AnyRef]): c.Tree = {
import c.universe._
def extractTypeFromNullaryType(tpe: Type) = {
tpe match {
case NullaryMethodType(underlying) => Some(underlying)
case _ => None
}
}
val capturedIn = TermName(c.freshName())
def instanceFactoriesByClassInTree(tree: Tree): List[Tree] = {
val members = tree.tpe.members
val pairs = members
.filter(s => s.isMethod && s.isPublic)
.flatMap { m =>
extractTypeFromNullaryType(m.typeSignature) match {
case Some(tpe) => Some((m, tpe))
case None =>
log(s"Cannot extract type from ${m.typeSignature} for member $m!")
None
}
}
.filter { case (_, tpe) => tpe <:< typeOf[AnyRef] }
.map { case (member, tpe) =>
val key = Literal(Constant(tpe))
val value = q"$capturedIn.$member"
log(s"Found a mapping: $key -> $value")
q"scala.Predef.ArrowAssoc($key) -> (() => $value)"
}
pairs.toList
}
log.withBlock(s"Generating wired-in-module for ${in.tree}") {
val pairs = instanceFactoriesByClassInTree(in.tree)
val code = q"""
val $capturedIn = $in
com.softwaremill.macwire.Wired(scala.collection.immutable.Map(..$pairs))
"""
log(s"Generated code: " + show(code))
code
}
}
}
| guersam/macwire | macros/src/main/scala/com/softwaremill/macwire/MacwireMacros.scala | Scala | apache-2.0 | 6,632 |
import scala.collection.mutable.ListBuffer
import Extensions._
/**
* Created by tiennv on 06/02/2016.
*/
class MerkleTree[T](val seed: List[T]) {
def buildTree(): String = {
var hash : List[String] = hashed(seed.leaves)
while ( hash.size != 1 ) hash = hashed(hash)
hash.head
}
def hashed(data: List[String]): List[String] = {
var i = 0
val hashBuffer = new ListBuffer[String]()
while (i < data.size) {
val left: String = data(i)
i += 1
var right: String = null.asInstanceOf[String]
if (i != data.size) right = data(i)
hashBuffer += hashing(left, right)
i += 1
}
hashBuffer.toList
}
def hashing(left: String, right: String): String = (left, right) match {
case (_:String, _:String) => (left + right).sha256
case _ => null.asInstanceOf[String]
}
def leaves(): List[String] = {
seed.leaves
}
}
| nvxtien/merkle-tree-scala | src/main/scala/MerkleTree.scala | Scala | apache-2.0 | 899 |
///////////////////////////////////////////////////////////////////////////////
// Perceptron.scala
//
// Copyright (C) 2012 Ben Wing, The University of Texas at Austin
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////////////
package opennlp.fieldspring.perceptron
/**
* A perceptron for binary classification.
*
* @author Ben Wing
*/
import util.control.Breaks._
import collection.mutable
import io.Source
/**
* A vector of real-valued features. In general, features are indexed
* both by a non-negative integer and by a class label (i.e. a label for
* the class that is associated with a particular instance by a classifier).
* Commonly, the class label is ignored when looking up a feature's value.
* Some implementations might want to evaluate the features on-the-fly
* rather than store an actual vector of values.
*/
trait FeatureVector {
/** Return the length of the feature vector. This is the number of weights
* that need to be created -- not necessarily the actual number of items
* stored in the vector (which will be different especially in the case
* of sparse vectors). */
def length: Int
/** Return the value at index `i`, for class `label`. */
def apply(i: Int, label: Int): Double
/** Return the squared magnitude of the feature vector for class `label`,
* i.e. dot product of feature vector with itself */
def squared_magnitude(label: Int): Double
/** Return the squared magnitude of the difference between the values of
* this feature vector for the two labels `label1` and `label2`. */
def diff_squared_magnitude(label1: Int, label2: Int): Double
/** Return the dot product of the given weight vector with the feature
* vector for class `label`. */
def dot_product(weights: WeightVector, label: Int): Double
/** Update a weight vector by adding a scaled version of the feature vector,
* with class `label`. */
def update_weights(weights: WeightVector, scale: Double, label: Int)
}
/**
* A feature vector that ignores the class label.
*/
trait SimpleFeatureVector extends FeatureVector {
/** Return the value at index `i`. */
def apply(i: Int): Double
def apply(i: Int, label: Int) = apply(i)
}
/**
* A feature vector in which the features are stored densely, i.e. as
* an array of values.
*/
trait DenseFeatureVector extends FeatureVector {
def dot_product(weights: WeightVector, label: Int) =
(for (i <- 0 until length) yield apply(i, label)*weights(i)).sum
def squared_magnitude(label: Int) =
(for (i <- 0 until length; va = apply(i, label)) yield va*va).sum
def diff_squared_magnitude(label1: Int, label2: Int) =
(for (i <- 0 until length; va = apply(i, label1) - apply(i, label2))
yield va*va).sum
def update_weights(weights: WeightVector, scale: Double, label: Int) {
(0 until length).foreach(i => { weights(i) += scale*apply(i, label) })
}
}
/**
* A vector of real-valued features, stored explicitly. The values passed in
* are used exactly as the values of the feature; no additional term is
* inserted to handle a "bias" or "intercept" weight.
*/
class RawArrayFeatureVector(
values: WeightVector
) extends DenseFeatureVector with SimpleFeatureVector {
/** Add two feature vectors. */
def +(other: SimpleFeatureVector) = {
val len = length
val res = new WeightVector(len)
for (i <- 0 until len)
res(i) = this(i) + other(i)
new RawArrayFeatureVector(res)
}
/** Subtract two feature vectors. */
def -(other: SimpleFeatureVector) = {
val len = length
val res = new WeightVector(len)
for (i <- 0 until len)
res(i) = this(i) - other(i)
new RawArrayFeatureVector(res)
}
/** Scale a feature vector. */
def *(scalar: Double) = {
val len = length
val res = new WeightVector(len)
for (i <- 0 until len)
res(i) = this(i)*scalar
new RawArrayFeatureVector(res)
}
/** Return the length of the feature vector. */
def length = values.length
/** Return the value at index `i`. */
def apply(i: Int) = values(i)
def update(i: Int, value: Double) { values(i) = value }
}
/**
* A vector of real-valued features, stored explicitly. An additional value
* set to a constant 1 is automatically stored at the end of the vector.
*/
class ArrayFeatureVector(
values: WeightVector
) extends DenseFeatureVector with SimpleFeatureVector {
/** Return the length of the feature vector; + 1 including the extra bias
* term. */
def length = values.length + 1
/** Return the value at index `i`, but return 1.0 at the last index. */
def apply(i: Int) = {
if (i == values.length) 1.0
else values(i)
}
def update(i: Int, value: Double) {
if (i == values.length) {
if (value != 1.0) {
throw new IllegalArgumentException(
"Element at the last index (index %s) unmodifiable, fixed at 1.0"
format i)
}
} else { values(i) = value }
}
}
/**
* A feature vector in which the features are stored sparsely, i.e. only
* the features with non-zero values are stored, using a hash table or
* similar.
*/
class SparseFeatureVector(
feature_values: Map[String, Double]
) extends SimpleFeatureVector {
protected val memoized_features = Map(0 -> 0.0) ++ // the intercept term
feature_values.map {
case (name, value) =>
(SparseFeatureVector.feature_mapper.memoize_string(name), value)
}
def length = {
// +1 because of the intercept term
SparseFeatureVector.feature_mapper.number_of_entries + 1
}
def apply(index: Int) = memoized_features.getOrElse(index, 0.0)
def apply(feature: String): Double =
apply(SparseFeatureVector.feature_mapper.memoize_string(feature))
def squared_magnitude(label: Int) =
memoized_features.map {
case (index, value) => value * value
}.sum
def diff_squared_magnitude(label1: Int, label2: Int) = 0.0
def dot_product(weights: WeightVector, label: Int) =
memoized_features.map {
case (index, value) => value * weights(index)
}.sum
def update_weights(weights: WeightVector, scale: Double, label: Int) {
memoized_features.map {
case (index, value) => weights(index) += scale * value
}
}
override def toString = {
"SparseFeatureVector(%s)" format
memoized_features.filter { case (index, value) => value > 0}.
toSeq.sorted.map {
case (index, value) =>
"%s(%s)=%.2f" format (
SparseFeatureVector.feature_mapper.unmemoize_string(index),
index, value
)
}.mkString(",")
}
}
object SparseFeatureVector {
// Set the minimum index to 1 so we can use 0 for the intercept term
val feature_mapper = new IntStringMemoizer(minimum_index = 1)
}
/**
* A sparse feature vector built up out of nominal strings. A global
* mapping table is maintained to convert between strings and array
* indices into a logical vector.
*/
class SparseNominalFeatureVector(
nominal_features: Iterable[String]
) extends SparseFeatureVector(
nominal_features.map((_, 1.0)).toMap
) {
override def toString = {
"SparseNominalFeatureVector(%s)" format
memoized_features.filter { case (index, value) => value > 0}.
toSeq.sorted.map {
case (index, value) =>
"%s(%s)" format (
SparseFeatureVector.feature_mapper.unmemoize_string(index),
index
)
}.mkString(",")
}
}
/**
* A data instance (a statistical unit), consisting of a feature vector
* specifying the characteristics of the instance and a label, to be
* predicted.
*
* @tparam LabelType type of the label (e.g. Int for classification,
* Double for regression, etc.).
*/
abstract class Instance[LabelType] {
/** Return the label. */
def getLabel: LabelType
/** Return the feature vector. */
def getFeatures: FeatureVector
}
/**
* A factory object for creating sparse nominal instances for classification,
* consisting of a nominal label and a set of nominal features. "Nominal"
* in this case means data described using an arbitrary string. Nominal
* features are either present or absent, and nominal labels have no ordering
* or other numerical significance.
*/
class SparseNominalInstanceFactory {
val label_mapper = new IntStringMemoizer(minimum_index = 0)
def label_to_index(label: String) = label_mapper.memoize_string(label)
def index_to_label(index: Int) = label_mapper.unmemoize_string(index)
def number_of_labels = label_mapper.number_of_entries
def make_labeled_instance(features: Iterable[String], label: String) = {
val featvec = new SparseNominalFeatureVector(features)
val labelind = label_to_index(label)
(featvec, labelind)
}
def get_csv_labeled_instances(source: Source) = {
val lines = source.getLines
for (line <- lines) yield {
val atts = line.split(",")
val label = atts.last
val features = atts.dropRight(1)
make_labeled_instance(features, label)
}
}
}
trait LinearClassifier {
/** Return number of labels. */
def number_of_labels: Int
assert(number_of_labels >= 2)
/** Classify a given instance, returning the class (a label from 0 to
* `number_of_labels`-1). */
def classify(instance: FeatureVector): Int
/** Score a given instance. Return a sequence of predicted scores, of
* the same length as the number of labels present. There is one score
* per label, and the maximum score corresponds to the single predicted
* label if such a prediction is desired. */
def score(instance: FeatureVector): IndexedSeq[Double]
}
/**
* A binary linear classifier, created from an array of weights. Normally
* created automatically by one of the trainer classes.
*/
class BinaryLinearClassifier (
val weights: WeightVector
) extends LinearClassifier {
val number_of_labels = 2
/** Classify a given instance, returning the class, either 0 or 1. */
def classify(instance: FeatureVector) = {
val sc = binary_score(instance)
if (sc > 0) 1 else 0
}
/** Score a given instance, returning a single real number. If the score
* is > 0, 1 is predicted, else 0. */
def binary_score(instance: FeatureVector) = instance.dot_product(weights, 1)
def score(instance: FeatureVector) =
IndexedSeq(0, binary_score(instance))
}
/**
* Class for training a linear classifier given a set of training instances and
* associated labels.
*/
trait LinearClassifierTrainer {
/** Create and initialize a vector of weights of length `len`.
* By default, initialized to all 0's, but could be changed. */
def new_weights(len: Int) = new WeightVector(len)
/** Create and initialize a vector of weights of length `len` to all 0's. */
def new_zero_weights(len: Int) = new WeightVector(len)
/** Check that all instances have the same length. */
def check_sequence_lengths(data: Iterable[(FeatureVector, Int)]) {
val len = data.head._1.length
for ((inst, label) <- data)
assert(inst.length == len)
}
/** Train a perceptron given a set of labeled instances. */
def apply(data: Iterable[(FeatureVector, Int)], num_classes: Int):
LinearClassifier
}
/**
* Class for training a binary perceptron given a set of training instances
* and associated labels. Use function application to train a new
* perceptron, e.g. `new BinaryPerceptronTrainer()(data)`.
*
* The basic perceptron training algorithm, in all its variants, works as
* follows:
*
* 1. We do multiple iterations, and in each iteration we loop through the
* training instances.
* 2. We process the training instances one-by-one, and potentially update
* the weight vector each time we process a training instance. (Hence,
* the algorithm is "online" or sequential, as opposed to an "off-line"
* or batch algorithm that attempts to satisfy some globally optimal
* function, e.g. maximize the joint probability of seeing the entire
* training set. An off-line iterative algorithm updates the weight
* function once per iteration in a way that attempts to improve the
* overall performance of the algorithm on the entire training set.)
* 3. Each time we see a training instance, we run the prediction algorithm
* to see how we would do on that training instance. In general, if
* we produce the right answer, we make no changes to the weights.
* However, if we produce the wrong answer, we change the weights in
* such a way that we will subsequently do better on the given training
* instance, generally by adding to the weight vector a simple scalar
* multiple (possibly negative) of the feature vector associated with the
* training instance in question.
* 4. We repeat until no further change (or at least, the total change is
* less than some small value), or until we've done a maximum number of
* iterations.
* @param error_threshold Threshold that the sum of all scale factors for
* all instances must be below in order for training to stop. In
* practice, in order to succeed with a threshold such as 1e-10, the
* actual sum of scale factors must be 0.
* @param max_iterations Maximum number of iterations. Training stops either
* when the threshold constraint succeeds of the maximum number of
* iterations is reached.
*/
abstract class BinaryPerceptronTrainer(
averaged: Boolean = false,
error_threshold: Double = 1e-10,
max_iterations: Int = 1000
) extends LinearClassifierTrainer {
assert(error_threshold >= 0)
assert(max_iterations > 0)
/** Check that the arguments passed in are kosher, and return an array of
* the weights to be learned. */
def initialize(data: Iterable[(FeatureVector, Int)]) = {
check_sequence_lengths(data)
for ((inst, label) <- data)
assert(label == 0 || label == 1)
new_weights(data.head._1.length)
}
/** Return the scale factor used for updating the weight vector to a
* new weight vector.
*
* @param inst Instance we are currently processing.
* @param label True label of that instance.
* @param score Predicted score on that instance.
*/
def get_scale_factor(inst: FeatureVector, label: Int, score: Double):
Double
/** Train a binary perceptron given a set of labeled instances. */
def apply(data: Iterable[(FeatureVector, Int)]) = {
val debug = false
val weights = initialize(data)
val avg_weights = new_zero_weights(weights.length)
def print_weights() {
Console.err.println("Weights: length=%s,max=%s,min=%s" format
(weights.length, weights.max, weights.min))
// Console.err.println("Weights: [%s]" format weights.mkString(","))
}
val len = weights.length
var iter = 0
if (debug)
print_weights()
breakable {
while (true) {
iter += 1
if (debug)
Console.err.println("Iteration %s" format iter)
var total_error = 0.0
for ((inst, label) <- data) {
if (debug)
Console.err.println("Instance %s, label %s" format (inst, label))
val score = inst.dot_product(weights, 1)
if (debug)
Console.err.println("Score %s" format score)
val scale = get_scale_factor(inst, label, score)
if (debug)
Console.err.println("Scale %s" format scale)
inst.update_weights(weights, scale, 1)
if (debug)
print_weights()
total_error += math.abs(scale)
}
if (averaged)
(0 until len).foreach(i => avg_weights(i) += weights(i))
Console.err.println("Iteration %s, total_error %s" format (iter, total_error))
if (total_error < error_threshold || iter >= max_iterations)
break
}
}
if (averaged) {
(0 until len).foreach(i => avg_weights(i) /= iter)
new BinaryLinearClassifier(avg_weights)
} else new BinaryLinearClassifier(weights)
}
/** Train a perceptron given a set of labeled instances. */
def apply(data: Iterable[(FeatureVector, Int)], num_classes: Int) = {
assert(num_classes == 2)
apply(data)
}
}
/** Train a binary perceptron using the basic algorithm. See the above
* description of the general perceptron training algorithm. In this case,
* when we process an instance, if our prediction is wrong, we either
* push the weight up (if the correct prediction is positive) or down (if the
* correct prediction is negative), according to `alpha` times the feature
* vector of the instance we just evaluated on.
*/
class BasicBinaryPerceptronTrainer(
alpha: Double,
averaged: Boolean = false,
error_threshold: Double = 1e-10,
max_iterations: Int = 1000
) extends BinaryPerceptronTrainer(averaged, error_threshold, max_iterations) {
def get_scale_factor(inst: FeatureVector, label: Int, score: Double) = {
val pred = if (score > 0) 1 else -1
// Map from 0/1 to -1/1
val symmetric_label = label*2 - 1
alpha*(symmetric_label - pred)
}
}
trait PassiveAggressivePerceptronTrainer {
val _variant: Int
val _aggressiveness_param: Double
def compute_update_factor(loss: Double, sqmag: Double) = {
assert(_variant >= 0 && _variant <= 2)
assert(_aggressiveness_param > 0)
if (_variant == 0)
loss / sqmag
else if (_variant == 1)
_aggressiveness_param min (loss / sqmag)
else
loss / (sqmag + 1.0/(2.0*_aggressiveness_param))
}
/** Return set of "yes" labels associated with an instance. Currently only
* one yes label per instance, but this could be changed by redoing this
* function. */
def yes_labels(label: Int, num_classes: Int) =
(0 until 0) ++ (label to label)
/** Return set of "no" labels associated with an instance -- complement of
* the set of "yes" labels. */
def no_labels(label: Int, num_classes: Int) =
(0 until label) ++ (label until num_classes)
}
/** Train a binary perceptron using the basic algorithm. See the above
* description of the general perceptron training algorithm. When processing
* a training instance, the algorithm is "passive" in the sense that it makes
* no changes if the prediction is correct (as in all perceptron training
* algorithms), and "aggressive" when a prediction is wrong in the sense that
* it changes the weight as much as necessary (but no more) to satisfy a
* given constraint. In this case, the idea is to change the weight as
* little as possible while ensuring that the prediction on the instance is
* not only correct but has a score that exceeds the minimally required score
* for correctness by at least as much as a given "margin". Hence, we
* essentially * try to progess as much as possible in each step (the
* constraint satisfaction) while also trying to preserve as much information
* as possible that was learned previously (the minimal constraint
* satisfaction).
*
* @param variant Variant 0 directly implements the algorithm just
* described. The other variants are designed for training sets that may
* not be linearly separable, and as a result are less aggressive.
* Variant 1 simply limits the total change to be no more than a given
* factor, while variant 2 scales the total change down relatively. In
* both cases, an "aggressiveness factor" needs to be given.
* @param aggressiveness_param As just described above. Higher values
* cause more aggressive changes to the weight vector during training.
*/
class PassiveAggressiveBinaryPerceptronTrainer(
variant: Int,
aggressiveness_param: Double = 20.0,
error_threshold: Double = 1e-10,
max_iterations: Int = 1000
) extends BinaryPerceptronTrainer(false, error_threshold, max_iterations)
with PassiveAggressivePerceptronTrainer {
val _variant = variant; val _aggressiveness_param = aggressiveness_param
def get_scale_factor(inst: FeatureVector, label: Int, score: Double) = {
// Map from 0/1 to -1/1
val symmetric_label = label*2 - 1
val loss = 0.0 max (1.0 - symmetric_label*score)
val sqmag = inst.squared_magnitude(1)
compute_update_factor(loss, sqmag)*symmetric_label
}
}
object Maxutil {
/** Return the argument producing the maximum when the function is applied
* to it. */
def argmax[T](args: Iterable[T], fun: T => Double) = {
(args zip args.map(fun)).maxBy(_._2)._1
}
/** Return both the argument producing the maximum and the maximum value
* itself, when the function is applied to the arguments. */
def argandmax[T](args: Iterable[T], fun: T => Double) = {
(args zip args.map(fun)).maxBy(_._2)
}
/** Return the argument producing the minimum when the function is applied
* to it. */
def argmin[T](args: Iterable[T], fun: T => Double) = {
(args zip args.map(fun)).minBy(_._2)._1
}
/** Return both the argument producing the minimum and the minimum value
* itself, when the function is applied to the arguments. */
def argandmin[T](args: Iterable[T], fun: T => Double) = {
(args zip args.map(fun)).minBy(_._2)
}
}
/**
* A multi-class perceptron with only a single set of weights for all classes.
* Note that the feature vector is passed the class in when a value is
* requested; it is assumed that class-specific features are handled
* automatically through this mechanism.
*/
class SingleWeightMultiClassLinearClassifier (
val weights: WeightVector,
val number_of_labels: Int
) extends LinearClassifier {
/** Classify a given instance, returning the class. */
def classify(instance: FeatureVector) =
Maxutil.argmax[Int](0 until number_of_labels, score_class(instance, _))
/** Score a given instance for a single class. */
def score_class(instance: FeatureVector, clazz: Int) =
instance.dot_product(weights, clazz)
/** Score a given instance, returning an array of scores, one per class. */
def score(instance: FeatureVector) =
(0 until number_of_labels).map(score_class(instance, _)).toArray
}
/**
* A multi-class perceptron with a different set of weights for each class.
* Note that the feature vector is also passed the class in when a value is
* requested.
*/
class MultiClassLinearClassifier (
val weights: IndexedSeq[WeightVector]
) extends LinearClassifier {
val number_of_labels = weights.length
/** Classify a given instance, returning the class. */
def classify(instance: FeatureVector) =
Maxutil.argmax[Int](0 until number_of_labels, score_class(instance, _))
/** Score a given instance for a single class. */
def score_class(instance: FeatureVector, clazz: Int) =
instance.dot_product(weights(clazz), clazz)
/** Score a given instance, returning an array of scores, one per class. */
def score(instance: FeatureVector) =
(0 until number_of_labels).map(score_class(instance, _)).toArray
}
/**
* Class for training a multi-class perceptron with only a single set of
* weights for all classes.
*/
abstract class SingleWeightMultiClassPerceptronTrainer(
error_threshold: Double = 1e-10,
max_iterations: Int = 1000
) extends LinearClassifierTrainer {
assert(error_threshold >= 0)
assert(max_iterations > 0)
/** Check that the arguments passed in are kosher, and return an array of
* the weights to be learned. */
def initialize(data: Iterable[(FeatureVector, Int)], num_classes: Int) = {
assert(num_classes >= 2)
for ((inst, label) <- data)
assert(label >= 0 && label < num_classes)
new_weights(data.head._1.length)
}
def apply(data: Iterable[(FeatureVector, Int)], num_classes: Int):
SingleWeightMultiClassLinearClassifier
}
/**
* Class for training a passive-aggressive multi-class perceptron with only a
* single set of weights for all classes.
*/
class PassiveAggressiveSingleWeightMultiClassPerceptronTrainer(
variant: Int,
aggressiveness_param: Double = 20.0,
error_threshold: Double = 1e-10,
max_iterations: Int = 1000
) extends SingleWeightMultiClassPerceptronTrainer(
error_threshold, max_iterations
) with PassiveAggressivePerceptronTrainer {
val _variant = variant; val _aggressiveness_param = aggressiveness_param
/**
* Actually train a passive-aggressive single-weight multi-class
* perceptron. Note that, although we're passed in a single correct label
* per instance, the code below is written so that it can handle a set of
* correct labels; you'd just have to change `yes_labels` and `no_labels`
* and pass the appropriate set of correct labels in.
*/
def apply(data: Iterable[(FeatureVector, Int)], num_classes: Int) = {
val weights = initialize(data, num_classes)
val len = weights.length
var iter = 0
breakable {
while (iter < max_iterations) {
var total_error = 0.0
for ((inst, label) <- data) {
def dotprod(x: Int) = inst.dot_product(weights, x)
val yeslabs = yes_labels(label, num_classes)
val nolabs = no_labels(label, num_classes)
val (r,rscore) = Maxutil.argandmin[Int](yeslabs, dotprod(_))
val (s,sscore) = Maxutil.argandmax[Int](nolabs, dotprod(_))
val margin = rscore - sscore
val loss = 0.0 max (1.0 - margin)
val sqmagdiff = inst.diff_squared_magnitude(r, s)
val scale = compute_update_factor(loss, sqmagdiff)
inst.update_weights(weights, scale, r)
inst.update_weights(weights, -scale, s)
total_error += math.abs(scale)
}
if (total_error < error_threshold)
break
iter += 1
}
}
new SingleWeightMultiClassLinearClassifier(weights, num_classes)
}
}
/**
* Class for training a multi-class perceptron with separate weights for each
* class.
*/
abstract class MultiClassPerceptronTrainer(
error_threshold: Double = 1e-10,
max_iterations: Int = 1000
) extends LinearClassifierTrainer {
assert(error_threshold >= 0)
assert(max_iterations > 0)
/** Check that the arguments passed in are kosher, and return an array of
* the weights to be learned. */
def initialize(data: Iterable[(FeatureVector, Int)], num_classes: Int) = {
assert(num_classes >= 2)
for ((inst, label) <- data)
assert(label >= 0 && label < num_classes)
val len = data.head._1.length
IndexedSeq[WeightVector](
(for (i <- 0 until num_classes) yield new_weights(len)) :_*)
}
def apply(data: Iterable[(FeatureVector, Int)], num_classes: Int):
MultiClassLinearClassifier
}
/**
* Class for training a passive-aggressive multi-class perceptron with only a
* single set of weights for all classes.
*/
class PassiveAggressiveMultiClassPerceptronTrainer(
variant: Int,
aggressiveness_param: Double = 20.0,
error_threshold: Double = 1e-10,
max_iterations: Int = 1000
) extends MultiClassPerceptronTrainer(
error_threshold, max_iterations
) with PassiveAggressivePerceptronTrainer {
val _variant = variant; val _aggressiveness_param = aggressiveness_param
/**
* Actually train a passive-aggressive multi-weight multi-class
* perceptron. Note that, although we're passed in a single correct label
* per instance, the code below is written so that it can handle a set of
* correct labels; you'd just have to change `yes_labels` and `no_labels`
* and pass the appropriate set of correct labels in.
*/
def apply(data: Iterable[(FeatureVector, Int)], num_classes: Int) = {
val weights = initialize(data, num_classes)
val len = weights(0).length
var iter = 0
breakable {
while (iter < max_iterations) {
var total_error = 0.0
for ((inst, label) <- data) {
def dotprod(x: Int) = inst.dot_product(weights(x), x)
val yeslabs = yes_labels(label, num_classes)
val nolabs = no_labels(label, num_classes)
val (r,rscore) = Maxutil.argandmin[Int](yeslabs, dotprod(_))
val (s,sscore) = Maxutil.argandmax[Int](nolabs, dotprod(_))
val margin = rscore - sscore
val loss = 0.0 max (1.0 - margin)
val rmag = inst.squared_magnitude(r)
val smag = inst.squared_magnitude(s)
val sqmagdiff = rmag + smag
val scale = compute_update_factor(loss, sqmagdiff)
inst.update_weights(weights(r), scale, r)
inst.update_weights(weights(s), -scale, s)
total_error += math.abs(scale)
}
if (total_error < error_threshold)
break
iter += 1
}
}
new MultiClassLinearClassifier(weights)
}
}
/**
* Class for training a cost-sensitive multi-class perceptron with only a
* single set of weights for all classes.
*/
abstract class CostSensitiveSingleWeightMultiClassPerceptronTrainer(
error_threshold: Double = 1e-10,
max_iterations: Int = 1000
) extends SingleWeightMultiClassPerceptronTrainer {
assert(error_threshold >= 0)
assert(max_iterations > 0)
def cost(correct: Int, predicted: Int): Double
}
/**
* Class for training a passive-aggressive cost-sensitive multi-class
* perceptron with only a single set of weights for all classes.
*/
abstract class PassiveAggressiveCostSensitiveSingleWeightMultiClassPerceptronTrainer(
prediction_based: Boolean,
variant: Int,
aggressiveness_param: Double = 20.0,
error_threshold: Double = 1e-10,
max_iterations: Int = 1000
) extends CostSensitiveSingleWeightMultiClassPerceptronTrainer(
error_threshold, max_iterations
) with PassiveAggressivePerceptronTrainer {
val _variant = variant; val _aggressiveness_param = aggressiveness_param
/**
* Actually train a passive-aggressive single-weight multi-class
* perceptron. Note that, although we're passed in a single correct label
* per instance, the code below is written so that it can handle a set of
* correct labels; you'd just have to change `yes_labels` and `no_labels`
* and pass the appropriate set of correct labels in.
*/
def apply(data: Iterable[(FeatureVector, Int)], num_classes: Int) = {
val weights = initialize(data, num_classes)
val len = weights.length
var iter = 0
val all_labs = 0 until num_classes
breakable {
while (iter < max_iterations) {
var total_error = 0.0
for ((inst, label) <- data) {
def dotprod(x: Int) = inst.dot_product(weights, x)
val goldscore = dotprod(label)
val predlab =
if (prediction_based)
Maxutil.argmax[Int](all_labs, dotprod(_))
else
Maxutil.argmax[Int](all_labs,
x=>(dotprod(x) - goldscore + math.sqrt(cost(label, x))))
val loss = dotprod(predlab) - goldscore +
math.sqrt(cost(label, predlab))
val sqmagdiff = inst.diff_squared_magnitude(label, predlab)
val scale = compute_update_factor(loss, sqmagdiff)
inst.update_weights(weights, scale, label)
inst.update_weights(weights, -scale, predlab)
total_error += math.abs(scale)
}
if (total_error < error_threshold)
break
iter += 1
}
}
new SingleWeightMultiClassLinearClassifier(weights, num_classes)
}
}
/**
* Class for training a cost-sensitive multi-class perceptron with a separate
* set of weights per class.
*/
abstract class CostSensitiveMultiClassPerceptronTrainer(
error_threshold: Double = 1e-10,
max_iterations: Int = 1000
) extends MultiClassPerceptronTrainer {
assert(error_threshold >= 0)
assert(max_iterations > 0)
def cost(correct: Int, predicted: Int): Double
}
/**
* Class for training a passive-aggressive cost-sensitive multi-class
* perceptron with a separate set of weights per class.
*/
abstract class PassiveAggressiveCostSensitiveMultiClassPerceptronTrainer(
prediction_based: Boolean,
variant: Int,
aggressiveness_param: Double = 20.0,
error_threshold: Double = 1e-10,
max_iterations: Int = 1000
) extends CostSensitiveMultiClassPerceptronTrainer(
error_threshold, max_iterations
) with PassiveAggressivePerceptronTrainer {
val _variant = variant; val _aggressiveness_param = aggressiveness_param
/**
* Actually train a passive-aggressive single-weight multi-class
* perceptron. Note that, although we're passed in a single correct label
* per instance, the code below is written so that it can handle a set of
* correct labels; you'd just have to change `yes_labels` and `no_labels`
* and pass the appropriate set of correct labels in.
*/
def apply(data: Iterable[(FeatureVector, Int)], num_classes: Int) = {
val weights = initialize(data, num_classes)
val len = weights(0).length
var iter = 0
val all_labs = 0 until num_classes
breakable {
while (iter < max_iterations) {
var total_error = 0.0
for ((inst, label) <- data) {
def dotprod(x: Int) = inst.dot_product(weights(x), x)
val goldscore = dotprod(label)
val predlab =
if (prediction_based)
Maxutil.argmax[Int](all_labs, dotprod(_))
else
Maxutil.argmax[Int](all_labs,
x=>(dotprod(x) - goldscore + math.sqrt(cost(label, x))))
val loss = dotprod(predlab) - goldscore +
math.sqrt(cost(label, predlab))
val rmag = inst.squared_magnitude(label)
val smag = inst.squared_magnitude(predlab)
val sqmagdiff = rmag + smag
val scale = compute_update_factor(loss, sqmagdiff)
inst.update_weights(weights(label), scale, label)
inst.update_weights(weights(predlab), -scale, predlab)
total_error += math.abs(scale)
}
if (total_error < error_threshold)
break
iter += 1
}
}
new MultiClassLinearClassifier(weights)
}
}
| utcompling/fieldspring | src/main/scala/opennlp/fieldspring/perceptron/Perceptron.scala | Scala | apache-2.0 | 34,224 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package toolkit.neuralnetwork.function
import libcog._
import toolkit.neuralnetwork.{ComputeTests, DifferentiableField, UnitSpec}
/** Tests the self-consistency of the forward/jacobian/jacobianAdjoint functions of the spatial convolution operator.
*
* @author Dick Carter
*/
class SpatialConvolutionSpec extends UnitSpec with ComputeTests {
val inputShapes = Seq(Shape(32, 64), Shape(5, 5))
val inputLens = Seq(11, 11 * 13)
def fn(downsampleFactor: Int, borderPolicy: BorderPolicy = BorderValid) = {
a: Seq[DifferentiableField] => SpatialConvolution(a.head, a(1), stride = downsampleFactor, border = borderPolicy)
}
"The spatial conv op (BorderValid)" should "handle batchSize 1" in {
val batchSizes = Seq(1, 1)
val downsampleFactor = 1
val node = fn(downsampleFactor)
jacobian(node, inputShapes, inputLens, batchSizes)
jacobianAdjoint(node, inputShapes, inputLens, batchSizes)
}
it should "handle batchSize 1 with downsample 2" in {
val batchSizes = Seq(1, 1)
val inputLens = Seq(11, 11 * 13)
val downsampleFactor = 2
val node = fn(downsampleFactor)
jacobian(node, inputShapes, inputLens, batchSizes)
jacobianAdjoint(node, inputShapes, inputLens, batchSizes)
}
it should "handle batchSize 1 with downsample 3" in {
val batchSizes = Seq(1, 1)
val inputShapes = Seq(Shape(36, 66), Shape(7, 7))
val inputLens = Seq(12, 12 * 8)
val downsampleFactor = 3
val node = fn(downsampleFactor)
jacobian(node, inputShapes, inputLens, batchSizes)
jacobianAdjoint(node, inputShapes, inputLens, batchSizes)
}
it should "handle batchSize >1" in {
val batchSizes = Seq(2, 1)
val downsampleFactor = 1
val node = fn(downsampleFactor)
jacobian(node, inputShapes, inputLens, batchSizes)
jacobianAdjoint(node, inputShapes, inputLens, batchSizes)
}
it should "handle batchSize >1 with downsample 2" in {
val batchSizes = Seq(2, 1)
val inputLens = Seq(11, 11 * 13)
val downsampleFactor = 2
val node = fn(downsampleFactor)
jacobian(node, inputShapes, inputLens, batchSizes)
jacobianAdjoint(node, inputShapes, inputLens, batchSizes)
}
it should "handle batchSize >1 with downsample 3" in {
val batchSizes = Seq(10, 1)
val inputShapes = Seq(Shape(36, 66), Shape(7, 7))
val inputLens = Seq(4, 4 * 17)
val downsampleFactor = 3
val node = fn(downsampleFactor)
jacobian(node, inputShapes, inputLens, batchSizes)
jacobianAdjoint(node, inputShapes, inputLens, batchSizes)
}
it should "handle AlexNet CL1-like parameters" in {
// Like AlexNet layer 1, although with reduced number of filters and small batch size
val inputRows = 230
val inputColumns = 230
val colorPlanes = 3
val batchSize = 8
val filterSize = 11
val numFilters = 8
val inputShapes = Seq(Shape(inputRows, inputColumns), Shape(filterSize, filterSize))
val batchSizes = Seq(batchSize, 1)
val inputLens = Seq(colorPlanes, colorPlanes * numFilters)
val downsampleFactor = 4
val node = fn(downsampleFactor)
jacobian(node, inputShapes, inputLens, batchSizes)
jacobianAdjoint(node, inputShapes, inputLens, batchSizes)
}
"The spatial conv op (BorderZero)" should "handle batchSize 1" in {
val batchSizes = Seq(1, 1)
val downsampleFactor = 1
val node = fn(downsampleFactor, BorderZero)
jacobian(node, inputShapes, inputLens, batchSizes)
jacobianAdjoint(node, inputShapes, inputLens, batchSizes)
}
it should "handle batchSize 1 with downsample 2" in {
val batchSizes = Seq(1, 1)
val inputLens = Seq(11, 11 * 13)
val downsampleFactor = 2
val node = fn(downsampleFactor)
jacobian(node, inputShapes, inputLens, batchSizes)
jacobianAdjoint(node, inputShapes, inputLens, batchSizes)
}
it should "handle batchSize 1 with downsample 3" in {
val batchSizes = Seq(1, 1)
val inputShapes = Seq(Shape(36, 66), Shape(7, 7))
val inputLens = Seq(12, 12 * 8)
val downsampleFactor = 3
val node = fn(downsampleFactor)
jacobian(node, inputShapes, inputLens, batchSizes)
jacobianAdjoint(node, inputShapes, inputLens, batchSizes)
}
it should "handle batchSize >1" in {
val batchSizes = Seq(2, 1)
val downsampleFactor = 1
val node = fn(downsampleFactor)
jacobian(node, inputShapes, inputLens, batchSizes)
jacobianAdjoint(node, inputShapes, inputLens, batchSizes)
}
it should "handle batchSize >1 with downsample 2" in {
val batchSizes = Seq(2, 1)
val inputLens = Seq(11, 11 * 13)
val downsampleFactor = 2
val node = fn(downsampleFactor)
jacobian(node, inputShapes, inputLens, batchSizes)
jacobianAdjoint(node, inputShapes, inputLens, batchSizes)
}
it should "handle batchSize >1 with downsample 3" in {
val batchSizes = Seq(10, 1)
val inputShapes = Seq(Shape(36, 66), Shape(7, 7))
val inputLens = Seq(4, 4 * 17)
val downsampleFactor = 3
val node = fn(downsampleFactor)
jacobian(node, inputShapes, inputLens, batchSizes)
jacobianAdjoint(node, inputShapes, inputLens, batchSizes)
}
it should "handle AlexNet CL2-like parameters" in {
// Like AlexNet layer 2, although with reduced number of filters and small batch size
val inputRows = 55
val inputColumns = 55
val inputPlanes = 96
val batchSize = 8
val filterSize = 5
val numFilters = 8
val inputShapes = Seq(Shape(inputRows, inputColumns), Shape(filterSize, filterSize))
val batchSizes = Seq(batchSize, 1)
val inputLens = Seq(inputPlanes, inputPlanes * numFilters)
val node = fn(1, BorderZero)
jacobian(node, inputShapes, inputLens, batchSizes)
jacobianAdjoint(node, inputShapes, inputLens, batchSizes)
}
}
| hpe-cct/cct-nn | src/test/scala/toolkit/neuralnetwork/function/SpatialConvolutionSpec.scala | Scala | apache-2.0 | 6,400 |
package controllers
import javax.inject.Inject
import models.db._
import models.entity.{AgeGroupRanking, Cities, SchoolingRanking, SimpleCity}
import models.form.AnalysesForm
import models.query._
import play.api.data.Form
import play.api.data.Forms._
import play.api.i18n.{I18nSupport, MessagesApi}
import play.api.mvc.{Action, Controller}
import scala.concurrent.{ExecutionContext, Future}
class RankingController @Inject()(val schoolingRankingRepository: SchoolingRankingRepository,
val cityRepository: CityRepository,
val schoolingRepository: SchoolingRepository,
val dataImportRepository: DataImportRepository,
val ageGroupRepository: AgeGroupRepository,
val ageGroupRankingRepository: AgeGroupRankingRepository,
val messagesApi: MessagesApi)(implicit ec: ExecutionContext)
extends Controller with I18nSupport {
val analysesForm: Form[AnalysesForm] = Form {
mapping(
"yearMonth" -> nonEmptyText
)(AnalysesForm.apply)(AnalysesForm.unapply)
}
val betwennRules = Seq[((Int, Int), String)](
((0, 10000), "Até 10.000 eleitores apenas"),
((10001, 50000), "Entre 10.001 e 50.000 eleitores"),
((50001, 100000), "Entre 50.001 e 100.000 eleitores"),
((100001, 200000), "Entre 100.001 e 200.000 eleitores"),
((200001, 500000), "Entre 200.001 e 500.000 eleitores"),
((500001, 1000000), "Entre 500.001 até 1.000.000 eleitores"),
((1000000, 9999999), "Acima de 1.000.000 de eleitores")
)
def schoolingViewRequest = Action.async { implicit request =>
analysesForm.bindFromRequest.fold(
error => {
Future {
Redirect(routes.RankingController.schoolingAnalysesPage)
}
},
analyses => schoolingRankingRepository.getYears flatMap { yearMonths =>
val years = formatYears(yearMonths)
val selectedYear: Option[(String, String)] = years.filter(_._1 == analyses.yearMonth).headOption
(for {
transformed <- schoolingTransformation(analyses.yearMonth)
rankings <- schoolingViewParser(transformed)
} yield (rankings)) flatMap { rankings =>
Future(
Ok(views.html.schooling_ranking(
selectedYear.getOrElse(("N/A", "N/A"))._2,
analysesForm,
years,
rankings)))
}
})
}
def schoolingAnalysesPage = Action.async { implicit request =>
schoolingRankingRepository.getYears flatMap { yearMonths =>
val years = formatYears(yearMonths)
if (years.isEmpty)
Future(Ok(views.html.no_ranking()))
else {
val lastYear = years.headOption.getOrElse(("N/A", "N/A"))._2
(for {
transformed <- schoolingTransformation(lastYear)
rankings <- schoolingViewParser(transformed)
} yield (rankings)) map { rankings =>
Ok(views.html.schooling_ranking(lastYear, analysesForm, years, rankings))
}
}
}
}
def schoolingViewParser(rankings: Map[Int, Seq[((Int, Int, String), Seq[SchoolingRanking])]]): Future[Seq[SchoolingRankingLevel]] = {
(for {
schoolings <- schoolingRepository.getAll
cities <- cityRepository.getAll
} yield (schoolings, Cities.citiesToSimpleCity(cities).toSeq)) map {
case (schoolings, cities) =>
rankings.map {
case (schoolingId, values) =>
val schooling = schoolings.filter {
_.id.get == schoolingId
}.head
val rankingsByLimit = values.map {
case ((base, limit, message), schoolingRankings) =>
SchoolingRankingsByLimit(base, limit, message, schoolingRankings.map(toDetail(cities, _)))
}
SchoolingRankingLevel(schooling.position, schooling.level, rankingsByLimit)
}.toSeq.sortBy(_.position)
}
}
def toDetail(cities: Seq[SimpleCity], schoolingRanking: SchoolingRanking) = {
val city = cities.filter(_.id == schoolingRanking.cityCode).head
SchoolingRankingDetails(
cityCode = schoolingRanking.cityCode,
name = city.name,
state = city.state,
percent = schoolingRanking.percentOfTotal,
peoples = schoolingRanking.peoples,
total = schoolingRanking.total)
}
def schoolingTransformation(yearMonth: String = "2016"): Future[Map[Int, Seq[((Int, Int, String), Seq[SchoolingRanking])]]] = {
(for {
foreignCities <- cityRepository.getForeignCities()
rankings <- schoolingRankingRepository.getAllByYearMonth(yearMonth)
} yield (foreignCities.map(_.code).toSet, rankings)) map { case (foreignCitiesCode, rankings) =>
val groupedBySchooling = rankings.groupBy(_.schoolingId)
groupedBySchooling.map {
case (id, rankings) =>
val filtered = betwennRules.map {
case ((base, limit), message) => {
val mapped = rankings
.filter { ranking => foreignCitiesCode.contains(ranking.cityCode) }
.filter { ranking => ranking.total >= base && ranking.total <= limit }
.sortBy {
_.percentOfTotal
}(Ordering[Double].reverse)
.take(10)
((base, limit, message), mapped)
}
}
(id, filtered)
}
}
}
def ageGroupViewRequest = Action.async { implicit request =>
analysesForm.bindFromRequest.fold(
error => {
Future {
Redirect(routes.RankingController.ageGroupAnalysesPage)
}
},
analyses => ageGroupRankingRepository.getYears flatMap { imports =>
val years = formatYears(imports)
val selectedYear = years.filter(_._1 == analyses.yearMonth).head
(for {
transformed <- ageGroupTransformation(analyses.yearMonth)
rankings <- ageGroupViewParser(transformed)
} yield (rankings)) flatMap { rankings =>
Future(Ok(views.html.age_group_ranking(
selectedYear._2,
analysesForm,
years,
rankings))
)
}
})
}
def ageGroupAnalysesPage = Action.async { implicit request =>
ageGroupRankingRepository.getYears flatMap { yearMonths =>
val years = formatYears(yearMonths)
if (years.isEmpty)
Future(Ok(views.html.no_ranking()))
else {
val lastYear = years.head._2
(for {
transformed <- ageGroupTransformation(lastYear)
rankings <- ageGroupViewParser(transformed)
} yield (rankings)) map { rankings =>
Ok(views.html.age_group_ranking(lastYear, analysesForm, years, rankings))
}
}
}
}
def ageGroupViewParser(rankings: Map[Int, Seq[((Int, Int, String), Seq[AgeGroupRanking])]]): Future[Seq[AgeGroupRankingGroup]] = {
(for {
ageGroups <- ageGroupRepository.getAll
cities <- cityRepository.getAll
} yield (ageGroups, Cities.citiesToSimpleCity(cities).toSeq)) map {
case (ageGroups, cities) =>
rankings.map {
case (ageGroupId, values) =>
val ageGroup = ageGroups.filter {
_.id.get == ageGroupId
}.head
val rankingsByLimit = values.map {
case ((base, limit, message), ageGroupRankings) =>
AgeGroupRankingsByLimit(base, limit, message, ageGroupRankings.map(toDetail(cities, _)))
}
AgeGroupRankingGroup(ageGroup.group, rankingsByLimit)
}.toSeq.sortBy(_.group)
}
}
def toDetail(cities: Seq[SimpleCity], ageGroupRanking: AgeGroupRanking) = {
val city = cities.filter(_.id == ageGroupRanking.cityCode).head
AgeGroupRankingDetails(
cityCode = ageGroupRanking.cityCode,
name = city.name,
state = city.state,
percent = ageGroupRanking.percentOfTotal,
peoples = ageGroupRanking.peoples,
total = ageGroupRanking.total)
}
def ageGroupTransformation(yearMonth: String = "2016"): Future[Map[Int, Seq[((Int, Int, String), Seq[AgeGroupRanking])]]] = {
(for {
foreignCities <- cityRepository.getForeignCities()
rankings <- ageGroupRankingRepository.getAllByYearMonth(yearMonth)
} yield (foreignCities.map(_.code).toSet, rankings)) map { case (foreignCitiesCode, rankings) =>
val groupedByAgeGroup = rankings.groupBy(_.ageGroupId)
groupedByAgeGroup.map {
case (id, rankings) =>
val filtered = betwennRules.map {
case ((base, limit), message) => {
val mapped = rankings
.filter { ranking => foreignCitiesCode.contains(ranking.cityCode) }
.filter { ranking => ranking.total >= base && ranking.total <= limit }
.sortBy {
_.percentOfTotal
}(Ordering[Double].reverse)
.take(10)
((base, limit, message), mapped)
}
}
(id, filtered)
}
}
}
}
| LeonardoZ/SAEB | app/controllers/RankingController.scala | Scala | mit | 9,317 |
/*******************************************************************************
* Copyright 2010 Maxime Lévesque
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
***************************************************************************** */
package org.squeryl.dsl.boilerplate
import org.squeryl.dsl.fsm._
import org.squeryl.dsl.ast.{TypedExpressionNode}
trait ComputeMeasuresSignaturesFromGroupByState[G] {
self: GroupQueryYield[G] =>
def compute[T1](e1: =>TypedExpressionNode[T1]): ComputeStateFromGroupByState[G,T1] =
new GroupWithMeasuresQueryYield[G,T1](
this.queryElementzz,
this.groupByClauseClosure,
this.unevaluatedHavingClause,
() =>List(e1)
)
def compute[T1,T2](e1: =>TypedExpressionNode[T1], e2: =>TypedExpressionNode[T2]): ComputeStateFromGroupByState[G,Product2[T1,T2]] =
new GroupWithMeasuresQueryYield[G,Product2[T1,T2]](
this.queryElementzz,
this.groupByClauseClosure,
this.unevaluatedHavingClause,
() =>List(e1, e2)
)
def compute[T1,T2,T3](e1: =>TypedExpressionNode[T1], e2: =>TypedExpressionNode[T2], e3: =>TypedExpressionNode[T3]): ComputeStateFromGroupByState[G,Product3[T1,T2,T3]] =
new GroupWithMeasuresQueryYield[G,Product3[T1,T2,T3]](
this.queryElementzz,
this.groupByClauseClosure,
this.unevaluatedHavingClause,
() =>List(e1, e2, e3)
)
def compute[T1,T2,T3,T4](e1: =>TypedExpressionNode[T1], e2: =>TypedExpressionNode[T2], e3: =>TypedExpressionNode[T3], e4: =>TypedExpressionNode[T4]): ComputeStateFromGroupByState[G,Product4[T1,T2,T3,T4]] =
new GroupWithMeasuresQueryYield[G,Product4[T1,T2,T3,T4]](
this.queryElementzz,
this.groupByClauseClosure,
this.unevaluatedHavingClause,
() =>List(e1, e2, e3, e4)
)
def compute[T1,T2,T3,T4,T5]
(e1: =>TypedExpressionNode[T1], e2: =>TypedExpressionNode[T2], e3: =>TypedExpressionNode[T3], e4: =>TypedExpressionNode[T4],
e5: =>TypedExpressionNode[T5]): ComputeStateFromGroupByState[G,Product5[T1,T2,T3,T4,T5]] =
new GroupWithMeasuresQueryYield[G,Product5[T1,T2,T3,T4,T5]](
this.queryElementzz,
this.groupByClauseClosure,
this.unevaluatedHavingClause,
() =>List(e1, e2, e3, e4, e5)
)
def compute[T1,T2,T3,T4,T5,T6]
(e1: =>TypedExpressionNode[T1], e2: =>TypedExpressionNode[T2], e3: =>TypedExpressionNode[T3], e4: =>TypedExpressionNode[T4],
e5: =>TypedExpressionNode[T5], e6: =>TypedExpressionNode[T6]): ComputeStateFromGroupByState[G,Product6[T1,T2,T3,T4,T5,T6]] =
new GroupWithMeasuresQueryYield[G,Product6[T1,T2,T3,T4,T5,T6]](
this.queryElementzz,
this.groupByClauseClosure,
this.unevaluatedHavingClause,
() =>List(e1, e2, e3, e4, e5, e6)
)
def compute[T1,T2,T3,T4,T5,T6,T7]
(e1: =>TypedExpressionNode[T1], e2: =>TypedExpressionNode[T2], e3: =>TypedExpressionNode[T3], e4: =>TypedExpressionNode[T4],
e5: =>TypedExpressionNode[T5], e6: =>TypedExpressionNode[T6], e7: =>TypedExpressionNode[T7]): ComputeStateFromGroupByState[G,Product7[T1,T2,T3,T4,T5,T6,T7]] =
new GroupWithMeasuresQueryYield[G,Product7[T1,T2,T3,T4,T5,T6,T7]](
this.queryElementzz,
this.groupByClauseClosure,
this.unevaluatedHavingClause,
() =>List(e1, e2, e3, e4, e5, e6, e7)
)
}
| takezoux2/squeryl-experimental | src/main/scala/org/squeryl/dsl/boilerplate/ComputeMeasuresSignaturesFromGroupByState.scala | Scala | apache-2.0 | 3,888 |
/**
* Copyright (c) 2013 Saddle Development Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
package org.saddle
import org.specs2.mutable.Specification
import org.specs2.ScalaCheck
import org.scalacheck.{ Gen, Arbitrary }
import org.scalacheck.Prop._
import Serde.serializedCopy
class FrameCheck extends Specification with ScalaCheck {
"Frame Tests" in {
implicit val frame = Arbitrary(FrameArbitraries.frameDoubleWithNA)
"frame equality" in {
forAll { (f: Frame[Int, Int, Double]) =>
(f must_== f.col(*)) and (f must_== f)
}
}
"frame sortedRowsBy" in {
forAll { (f: Frame[Int, Int, Double]) =>
if (f.numCols > 0) {
val res = f.sortedRowsBy { x => x.at(0) }
val ord = array.argsort(f.colAt(0).toVec)
val exp = f.rowAt(ord)
res must_== exp
}
else
f must_== Frame.empty[Int, Int, Double]
}
}
"frame colSplitAt works" in {
forAll { (f: Frame[Int, Int, Double]) =>
val idx = Gen.choose(0, f.numCols - 1)
forAll(idx) { i =>
val (l, r) = f.colSplitAt(i)
l.numCols must_== i
r.numCols must_== f.numCols - i
(l rconcat r) must_== f
}
}
}
"frame rowSplitAt works" in {
forAll { (f: Frame[Int, Int, Double]) =>
val idx = Gen.choose(0, f.numRows - 1)
forAll(idx) { i =>
val (l, r) = f.rowSplitAt(i)
l.numRows must_== i
r.numRows must_== f.numRows - i
(l concat r) must_== f
}
}
}
"Stringify works for one col, zero rows" in {
val f = Frame(Array(Vec.empty[Double]) : _*)
f.toString must throwAn[RuntimeException].not
}
"Transpose must work for a string frame" in {
val f = Frame(Vec("a", "b", "c"), Vec("d", "e", "f"))
f.T must_== Frame(Vec("a", "d"), Vec("b", "e"), Vec("c", "f"))
}
"serialization works" in {
forAll { f: Frame[Int, Int, Double] =>
f must_== serializedCopy(f)
}
}
}
}
| jyt109/saddle | saddle-core/src/test/scala/org/saddle/FrameCheck.scala | Scala | apache-2.0 | 2,581 |
package com.monsanto.arch.cloudformation.model.resource
import com.monsanto.arch.cloudformation.model.{ConditionRef, ResourceRef, Token}
import spray.json.{DefaultJsonProtocol, JsonFormat}
case class `AWS::EKS::Cluster`(
name: String,
Name: Token[String],
ResourcesVpcConfig: Token[ResourcesVpcConfig],
RoleArn: Token[String],
Version: Option[Token[String]] = None,
override val DependsOn: Option[Seq[String]] = None,
override val Condition: Option[ConditionRef] = None
) extends Resource[`AWS::EKS::Cluster`] {
def when(newCondition: Option[ConditionRef] = Condition) =
new `AWS::EKS::Cluster`(name, Name, ResourcesVpcConfig, RoleArn, Version, DependsOn, newCondition)
}
case class ResourcesVpcConfig(SecurityGroupIds : Seq[Token[ResourceRef[`AWS::EC2::SecurityGroup`]]], SubnetIds : Seq[Token[ResourceRef[`AWS::EC2::Subnet`]]])
object ResourcesVpcConfig extends DefaultJsonProtocol {
implicit val format: JsonFormat[ResourcesVpcConfig] = jsonFormat2(ResourcesVpcConfig.apply)
}
object `AWS::EKS::Cluster` extends DefaultJsonProtocol {
implicit val format: JsonFormat[`AWS::EKS::Cluster`] = jsonFormat7(`AWS::EKS::Cluster`.apply)
}
| MonsantoCo/cloudformation-template-generator | src/main/scala/com/monsanto/arch/cloudformation/model/resource/EKS.scala | Scala | bsd-3-clause | 1,372 |
package org.apache.spark.rdd.spookystuff
import java.util.EventListener
import com.tribbloids.spookystuff.utils.CachingUtils
import org.apache.spark.TaskContext
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.metrics.source.Source
import org.apache.spark.shuffle.FetchFailedException
import org.apache.spark.util.{AccumulatorV2, TaskCompletionListener, TaskFailureListener}
case class UncleanTaskContext(
self: TaskContext
) extends TaskContext
with AutoCloseable {
override def isCompleted(): Boolean = self.isCompleted()
override def isInterrupted(): Boolean = self.isInterrupted()
override def isRunningLocally(): Boolean = self.isRunningLocally()
lazy val listeners: CachingUtils.ConcurrentMap[Long, EventListener] =
CachingUtils.ConcurrentMap[Long, EventListener]()
override def addTaskCompletionListener(listener: TaskCompletionListener): TaskContext = {
listeners += (System.currentTimeMillis() -> listener)
this
}
override def addTaskFailureListener(listener: TaskFailureListener): TaskContext =
self.addTaskFailureListener(listener)
override def stageId(): Int = self.stageId()
override def stageAttemptNumber(): Int = self.stageAttemptNumber()
override def partitionId(): Int = self.partitionId()
override def attemptNumber(): Int = self.attemptNumber()
override def taskAttemptId(): Long = self.taskAttemptId()
override def getLocalProperty(key: String): String = self.getLocalProperty(key)
override def taskMetrics(): TaskMetrics = self.taskMetrics()
override def getMetricsSources(sourceName: String): Seq[Source] = self.getMetricsSources(sourceName)
override private[spark] def killTaskIfInterrupted(): Unit = self.killTaskIfInterrupted()
override private[spark] def getKillReason() = self.getKillReason()
override private[spark] def taskMemoryManager() = self.taskMemoryManager()
override private[spark] def registerAccumulator(a: AccumulatorV2[_, _]): Unit = self.registerAccumulator(a)
override private[spark] def setFetchFailed(fetchFailed: FetchFailedException): Unit = self.fetchFailed
override private[spark] def markInterrupted(reason: String): Unit = self.markInterrupted(reason)
override private[spark] def markTaskFailed(error: Throwable): Unit = self.markTaskFailed(error)
override private[spark] def markTaskCompleted(error: Option[Throwable]): Unit = self.markTaskCompleted(error)
override private[spark] def fetchFailed = self.fetchFailed
override private[spark] def getLocalProperties = self.getLocalProperties
override def close(): Unit = {
listeners.keys.toList.sorted.foreach { time =>
listeners.get(time).foreach {
case v: TaskCompletionListener =>
v.onTaskCompletion(self)
}
}
}
}
| tribbloid/spookystuff | core/src/main/scala/org/apache/spark/rdd/spookystuff/UncleanTaskContext.scala | Scala | apache-2.0 | 2,783 |
package com.automatak.render.dnp3.objects.groups
import com.automatak.render.dnp3.objects._
import FixedSizeField._
import com.automatak.render.dnp3.objects.VariationNames._
import com.automatak.render.dnp3.objects.generators.ConversionToFrozenCounter
// frozen counter events
object Group23 extends ObjectGroup {
def objects = List(Group23Var0, Group23Var1, Group23Var2, Group23Var5, Group23Var6)
def group: Byte = 23
def desc: String = "Frozen Counter Event"
def isEventGroup: Boolean = true
}
object Group23Var0 extends AnyVariation(Group23, 0)
object Group23Var1 extends FixedSize(Group23, 1, bit32WithFlag)(flags, count32) with ConversionToFrozenCounter
object Group23Var2 extends FixedSize(Group23, 2, bit16WithFlag)(flags, count16) with ConversionToFrozenCounter
object Group23Var5 extends FixedSize(Group23, 5, bit32WithFlagTime)(flags, count32, time48) with ConversionToFrozenCounter
object Group23Var6 extends FixedSize(Group23, 6, bit16WithFlagTime)(flags, count16, time48) with ConversionToFrozenCounter
| thiagoralves/OpenPLC_v2 | dnp3/generation/dnp3/src/main/scala/com/automatak/render/dnp3/objects/groups/Group23.scala | Scala | gpl-3.0 | 1,027 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.coordinator
import java.util.Properties
import java.util.concurrent.atomic.AtomicBoolean
import kafka.common.{OffsetAndMetadata, OffsetMetadataAndError, TopicAndPartition}
import kafka.log.LogConfig
import kafka.message.{Message, UncompressedCodec}
import kafka.server._
import kafka.utils._
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.requests.JoinGroupRequest
import scala.collection.{Map, Seq, immutable}
case class GroupConfig(groupMinSessionTimeoutMs: Int,
groupMaxSessionTimeoutMs: Int)
case class JoinGroupResult(members: Map[String, Array[Byte]],
memberId: String,
generationId: Int,
subProtocol: String,
leaderId: String,
errorCode: Short)
/**
* GroupCoordinator handles general group membership and offset management.
*
* Each Kafka server instantiates a coordinator which is responsible for a set of
* groups. Groups are assigned to coordinators based on their group names.
*/
class GroupCoordinator(val brokerId: Int,
val groupConfig: GroupConfig,
val offsetConfig: OffsetConfig,
val groupManager: GroupMetadataManager) extends Logging {
type JoinCallback = JoinGroupResult => Unit
type SyncCallback = (Array[Byte], Short) => Unit
this.logIdent = "[GroupCoordinator " + brokerId + "]: "
private val isActive = new AtomicBoolean(false)
private var heartbeatPurgatory: DelayedOperationPurgatory[DelayedHeartbeat] = null
private var joinPurgatory: DelayedOperationPurgatory[DelayedJoin] = null
def this(brokerId: Int,
groupConfig: GroupConfig,
offsetConfig: OffsetConfig,
replicaManager: ReplicaManager,
zkUtils: ZkUtils,
scheduler: Scheduler) = this(brokerId, groupConfig, offsetConfig,
new GroupMetadataManager(brokerId, offsetConfig, replicaManager, zkUtils, scheduler))
def offsetsTopicConfigs: Properties = {
val props = new Properties
props.put(LogConfig.CleanupPolicyProp, LogConfig.Compact)
props.put(LogConfig.SegmentBytesProp, offsetConfig.offsetsTopicSegmentBytes.toString)
props.put(LogConfig.CompressionTypeProp, UncompressedCodec.name)
props
}
/**
* NOTE: If a group lock and metadataLock are simultaneously needed,
* be sure to acquire the group lock before metadataLock to prevent deadlock
*/
/**
* Startup logic executed at the same time when the server starts up.
*/
def startup() {
info("Starting up.")
heartbeatPurgatory = new DelayedOperationPurgatory[DelayedHeartbeat]("Heartbeat", brokerId)
joinPurgatory = new DelayedOperationPurgatory[DelayedJoin]("Rebalance", brokerId)
isActive.set(true)
info("Startup complete.")
}
/**
* Shutdown logic executed at the same time when server shuts down.
* Ordering of actions should be reversed from the startup process.
*/
def shutdown() {
info("Shutting down.")
isActive.set(false)
groupManager.shutdown()
heartbeatPurgatory.shutdown()
joinPurgatory.shutdown()
info("Shutdown complete.")
}
def handleJoinGroup(groupId: String,
memberId: String,
clientId: String,
clientHost: String,
sessionTimeoutMs: Int,
protocolType: String,
protocols: List[(String, Array[Byte])],
responseCallback: JoinCallback) {
if (!isActive.get) {
responseCallback(joinError(memberId, Errors.GROUP_COORDINATOR_NOT_AVAILABLE.code))
} else if (!validGroupId(groupId)) {
responseCallback(joinError(memberId, Errors.INVALID_GROUP_ID.code))
} else if (!isCoordinatorForGroup(groupId)) {
responseCallback(joinError(memberId, Errors.NOT_COORDINATOR_FOR_GROUP.code))
} else if (isCoordinatorLoadingInProgress(groupId)) {
responseCallback(joinError(memberId, Errors.GROUP_LOAD_IN_PROGRESS.code))
} else if (sessionTimeoutMs < groupConfig.groupMinSessionTimeoutMs ||
sessionTimeoutMs > groupConfig.groupMaxSessionTimeoutMs) {
responseCallback(joinError(memberId, Errors.INVALID_SESSION_TIMEOUT.code))
} else {
// only try to create the group if the group is not unknown AND
// the member id is UNKNOWN, if member is specified but group does not
// exist we should reject the request
var group = groupManager.getGroup(groupId)
if (group == null) {
if (memberId != JoinGroupRequest.UNKNOWN_MEMBER_ID) {
responseCallback(joinError(memberId, Errors.UNKNOWN_MEMBER_ID.code))
} else {
group = groupManager.addGroup(groupId, protocolType)
doJoinGroup(group, memberId, clientId, clientHost, sessionTimeoutMs, protocolType, protocols, responseCallback)
}
} else {
doJoinGroup(group, memberId, clientId, clientHost, sessionTimeoutMs, protocolType, protocols, responseCallback)
}
}
}
private def doJoinGroup(group: GroupMetadata,
memberId: String,
clientId: String,
clientHost: String,
sessionTimeoutMs: Int,
protocolType: String,
protocols: List[(String, Array[Byte])],
responseCallback: JoinCallback) {
group synchronized {
if (group.protocolType != protocolType || !group.supportsProtocols(protocols.map(_._1).toSet)) {
// if the new member does not support the group protocol, reject it
responseCallback(joinError(memberId, Errors.INCONSISTENT_GROUP_PROTOCOL.code))
} else if (memberId != JoinGroupRequest.UNKNOWN_MEMBER_ID && !group.has(memberId)) {
// if the member trying to register with a un-recognized id, send the response to let
// it reset its member id and retry
responseCallback(joinError(memberId, Errors.UNKNOWN_MEMBER_ID.code))
} else {
group.currentState match {
case Dead =>
// if the group is marked as dead, it means some other thread has just removed the group
// from the coordinator metadata; this is likely that the group has migrated to some other
// coordinator OR the group is in a transient unstable phase. Let the member retry
// joining without the specified member id,
responseCallback(joinError(memberId, Errors.UNKNOWN_MEMBER_ID.code))
case PreparingRebalance =>
if (memberId == JoinGroupRequest.UNKNOWN_MEMBER_ID) {
addMemberAndRebalance(sessionTimeoutMs, clientId, clientHost, protocols, group, responseCallback)
} else {
val member = group.get(memberId)
updateMemberAndRebalance(group, member, protocols, responseCallback)
}
case AwaitingSync =>
if (memberId == JoinGroupRequest.UNKNOWN_MEMBER_ID) {
addMemberAndRebalance(sessionTimeoutMs, clientId, clientHost, protocols, group, responseCallback)
} else {
val member = group.get(memberId)
if (member.matches(protocols)) {
// member is joining with the same metadata (which could be because it failed to
// receive the initial JoinGroup response), so just return current group information
// for the current generation.
responseCallback(JoinGroupResult(
members = if (memberId == group.leaderId) {
group.currentMemberMetadata
} else {
Map.empty
},
memberId = memberId,
generationId = group.generationId,
subProtocol = group.protocol,
leaderId = group.leaderId,
errorCode = Errors.NONE.code))
} else {
// member has changed metadata, so force a rebalance
updateMemberAndRebalance(group, member, protocols, responseCallback)
}
}
case Stable =>
if (memberId == JoinGroupRequest.UNKNOWN_MEMBER_ID) {
// if the member id is unknown, register the member to the group
addMemberAndRebalance(sessionTimeoutMs, clientId, clientHost, protocols, group, responseCallback)
} else {
val member = group.get(memberId)
if (memberId == group.leaderId || !member.matches(protocols)) {
// force a rebalance if a member has changed metadata or if the leader sends JoinGroup.
// The latter allows the leader to trigger rebalances for changes affecting assignment
// which do not affect the member metadata (such as topic metadata changes for the consumer)
updateMemberAndRebalance(group, member, protocols, responseCallback)
} else {
// for followers with no actual change to their metadata, just return group information
// for the current generation which will allow them to issue SyncGroup
responseCallback(JoinGroupResult(
members = Map.empty,
memberId = memberId,
generationId = group.generationId,
subProtocol = group.protocol,
leaderId = group.leaderId,
errorCode = Errors.NONE.code))
}
}
}
if (group.is(PreparingRebalance))
joinPurgatory.checkAndComplete(ConsumerGroupKey(group.groupId))
}
}
}
def handleSyncGroup(groupId: String,
generation: Int,
memberId: String,
groupAssignment: Map[String, Array[Byte]],
responseCallback: SyncCallback) {
if (!isActive.get) {
responseCallback(Array.empty, Errors.GROUP_COORDINATOR_NOT_AVAILABLE.code)
} else if (!isCoordinatorForGroup(groupId)) {
responseCallback(Array.empty, Errors.NOT_COORDINATOR_FOR_GROUP.code)
} else {
val group = groupManager.getGroup(groupId)
if (group == null)
responseCallback(Array.empty, Errors.UNKNOWN_MEMBER_ID.code)
else
doSyncGroup(group, generation, memberId, groupAssignment, responseCallback)
}
}
private def doSyncGroup(group: GroupMetadata,
generationId: Int,
memberId: String,
groupAssignment: Map[String, Array[Byte]],
responseCallback: SyncCallback) {
var delayedGroupStore: Option[DelayedStore] = None
group synchronized {
if (!group.has(memberId)) {
responseCallback(Array.empty, Errors.UNKNOWN_MEMBER_ID.code)
} else if (generationId != group.generationId) {
responseCallback(Array.empty, Errors.ILLEGAL_GENERATION.code)
} else {
group.currentState match {
case Dead =>
responseCallback(Array.empty, Errors.UNKNOWN_MEMBER_ID.code)
case PreparingRebalance =>
responseCallback(Array.empty, Errors.REBALANCE_IN_PROGRESS.code)
case AwaitingSync =>
group.get(memberId).awaitingSyncCallback = responseCallback
completeAndScheduleNextHeartbeatExpiration(group, group.get(memberId))
// if this is the leader, then we can attempt to persist state and transition to stable
if (memberId == group.leaderId) {
info(s"Assignment received from leader for group ${group.groupId} for generation ${group.generationId}")
// fill any missing members with an empty assignment
val missing = group.allMembers -- groupAssignment.keySet
val assignment = groupAssignment ++ missing.map(_ -> Array.empty[Byte]).toMap
delayedGroupStore = Some(groupManager.prepareStoreGroup(group, assignment, (errorCode: Short) => {
group synchronized {
// another member may have joined the group while we were awaiting this callback,
// so we must ensure we are still in the AwaitingSync state and the same generation
// when it gets invoked. if we have transitioned to another state, then do nothing
if (group.is(AwaitingSync) && generationId == group.generationId) {
if (errorCode != Errors.NONE.code) {
resetAndPropagateAssignmentError(group, errorCode)
maybePrepareRebalance(group)
} else {
setAndPropagateAssignment(group, assignment)
group.transitionTo(Stable)
}
}
}
}))
}
case Stable =>
// if the group is stable, we just return the current assignment
val memberMetadata = group.get(memberId)
responseCallback(memberMetadata.assignment, Errors.NONE.code)
completeAndScheduleNextHeartbeatExpiration(group, group.get(memberId))
}
}
}
// store the group metadata without holding the group lock to avoid the potential
// for deadlock when the callback is invoked
delayedGroupStore.foreach(groupManager.store)
}
def handleLeaveGroup(groupId: String, consumerId: String, responseCallback: Short => Unit) {
if (!isActive.get) {
responseCallback(Errors.GROUP_COORDINATOR_NOT_AVAILABLE.code)
} else if (!isCoordinatorForGroup(groupId)) {
responseCallback(Errors.NOT_COORDINATOR_FOR_GROUP.code)
} else if (isCoordinatorLoadingInProgress(groupId)) {
responseCallback(Errors.GROUP_LOAD_IN_PROGRESS.code)
} else {
val group = groupManager.getGroup(groupId)
if (group == null) {
// if the group is marked as dead, it means some other thread has just removed the group
// from the coordinator metadata; this is likely that the group has migrated to some other
// coordinator OR the group is in a transient unstable phase. Let the consumer to retry
// joining without specified consumer id,
responseCallback(Errors.UNKNOWN_MEMBER_ID.code)
} else {
group synchronized {
if (group.is(Dead)) {
responseCallback(Errors.UNKNOWN_MEMBER_ID.code)
} else if (!group.has(consumerId)) {
responseCallback(Errors.UNKNOWN_MEMBER_ID.code)
} else {
val member = group.get(consumerId)
removeHeartbeatForLeavingMember(group, member)
onMemberFailure(group, member)
responseCallback(Errors.NONE.code)
}
}
}
}
}
def handleHeartbeat(groupId: String,
memberId: String,
generationId: Int,
responseCallback: Short => Unit) {
if (!isActive.get) {
responseCallback(Errors.GROUP_COORDINATOR_NOT_AVAILABLE.code)
} else if (!isCoordinatorForGroup(groupId)) {
responseCallback(Errors.NOT_COORDINATOR_FOR_GROUP.code)
} else if (isCoordinatorLoadingInProgress(groupId)) {
// the group is still loading, so respond just blindly
responseCallback(Errors.NONE.code)
} else {
val group = groupManager.getGroup(groupId)
if (group == null) {
responseCallback(Errors.UNKNOWN_MEMBER_ID.code)
} else {
group synchronized {
if (group.is(Dead)) {
// if the group is marked as dead, it means some other thread has just removed the group
// from the coordinator metadata; this is likely that the group has migrated to some other
// coordinator OR the group is in a transient unstable phase. Let the member retry
// joining without the specified member id,
responseCallback(Errors.UNKNOWN_MEMBER_ID.code)
} else if (!group.is(Stable)) {
responseCallback(Errors.REBALANCE_IN_PROGRESS.code)
} else if (!group.has(memberId)) {
responseCallback(Errors.UNKNOWN_MEMBER_ID.code)
} else if (generationId != group.generationId) {
responseCallback(Errors.ILLEGAL_GENERATION.code)
} else {
val member = group.get(memberId)
completeAndScheduleNextHeartbeatExpiration(group, member)
responseCallback(Errors.NONE.code)
}
}
}
}
}
def handleCommitOffsets(groupId: String,
memberId: String,
generationId: Int,
offsetMetadata: immutable.Map[TopicAndPartition, OffsetAndMetadata],
responseCallback: immutable.Map[TopicAndPartition, Short] => Unit) {
var delayedOffsetStore: Option[DelayedStore] = None
if (!isActive.get) {
responseCallback(offsetMetadata.mapValues(_ => Errors.GROUP_COORDINATOR_NOT_AVAILABLE.code))
} else if (!isCoordinatorForGroup(groupId)) {
responseCallback(offsetMetadata.mapValues(_ => Errors.NOT_COORDINATOR_FOR_GROUP.code))
} else if (isCoordinatorLoadingInProgress(groupId)) {
responseCallback(offsetMetadata.mapValues(_ => Errors.GROUP_LOAD_IN_PROGRESS.code))
} else {
val group = groupManager.getGroup(groupId)
if (group == null) {
if (generationId < 0)
// the group is not relying on Kafka for partition management, so allow the commit
delayedOffsetStore = Some(groupManager.prepareStoreOffsets(groupId, memberId, generationId, offsetMetadata,
responseCallback))
else
// the group has failed over to this coordinator (which will be handled in KAFKA-2017),
// or this is a request coming from an older generation. either way, reject the commit
responseCallback(offsetMetadata.mapValues(_ => Errors.ILLEGAL_GENERATION.code))
} else {
group synchronized {
if (group.is(Dead)) {
responseCallback(offsetMetadata.mapValues(_ => Errors.UNKNOWN_MEMBER_ID.code))
} else if (group.is(AwaitingSync)) {
responseCallback(offsetMetadata.mapValues(_ => Errors.REBALANCE_IN_PROGRESS.code))
} else if (!group.has(memberId)) {
responseCallback(offsetMetadata.mapValues(_ => Errors.UNKNOWN_MEMBER_ID.code))
} else if (generationId != group.generationId) {
responseCallback(offsetMetadata.mapValues(_ => Errors.ILLEGAL_GENERATION.code))
} else {
delayedOffsetStore = Some(groupManager.prepareStoreOffsets(groupId, memberId, generationId,
offsetMetadata, responseCallback))
}
}
}
}
// store the offsets without holding the group lock
delayedOffsetStore.foreach(groupManager.store)
}
def handleFetchOffsets(groupId: String,
partitions: Seq[TopicAndPartition]): Map[TopicAndPartition, OffsetMetadataAndError] = {
if (!isActive.get) {
partitions.map {case topicAndPartition => (topicAndPartition, OffsetMetadataAndError.GroupCoordinatorNotAvailable)}.toMap
} else if (!isCoordinatorForGroup(groupId)) {
partitions.map {case topicAndPartition => (topicAndPartition, OffsetMetadataAndError.NotCoordinatorForGroup)}.toMap
} else if (isCoordinatorLoadingInProgress(groupId)) {
partitions.map {case topicAndPartition => (topicAndPartition, OffsetMetadataAndError.GroupLoading)}.toMap
} else {
// return offsets blindly regardless the current group state since the group may be using
// Kafka commit storage without automatic group management
groupManager.getOffsets(groupId, partitions)
}
}
def handleListGroups(): (Errors, List[GroupOverview]) = {
if (!isActive.get) {
(Errors.GROUP_COORDINATOR_NOT_AVAILABLE, List[GroupOverview]())
} else {
val errorCode = if (groupManager.isLoading()) Errors.GROUP_LOAD_IN_PROGRESS else Errors.NONE
(errorCode, groupManager.currentGroups.map(_.overview).toList)
}
}
def handleDescribeGroup(groupId: String): (Errors, GroupSummary) = {
if (!isActive.get) {
(Errors.GROUP_COORDINATOR_NOT_AVAILABLE, GroupCoordinator.EmptyGroup)
} else if (!isCoordinatorForGroup(groupId)) {
(Errors.NOT_COORDINATOR_FOR_GROUP, GroupCoordinator.EmptyGroup)
} else if (isCoordinatorLoadingInProgress(groupId)) {
(Errors.GROUP_LOAD_IN_PROGRESS, GroupCoordinator.EmptyGroup)
} else {
val group = groupManager.getGroup(groupId)
if (group == null) {
(Errors.NONE, GroupCoordinator.DeadGroup)
} else {
group synchronized {
(Errors.NONE, group.summary)
}
}
}
}
def handleGroupImmigration(offsetTopicPartitionId: Int) = {
groupManager.loadGroupsForPartition(offsetTopicPartitionId)
}
def handleGroupEmigration(offsetTopicPartitionId: Int) = {
groupManager.removeGroupsForPartition(offsetTopicPartitionId)
}
private def setAndPropagateAssignment(group: GroupMetadata, assignment: Map[String, Array[Byte]]) {
assert(group.is(AwaitingSync))
group.allMemberMetadata.foreach(member => member.assignment = assignment(member.memberId))
propagateAssignment(group, Errors.NONE.code)
}
private def resetAndPropagateAssignmentError(group: GroupMetadata, errorCode: Short) {
assert(group.is(AwaitingSync))
group.allMemberMetadata.foreach(_.assignment = Array.empty[Byte])
propagateAssignment(group, errorCode)
}
private def propagateAssignment(group: GroupMetadata, errorCode: Short) {
for (member <- group.allMemberMetadata) {
if (member.awaitingSyncCallback != null) {
member.awaitingSyncCallback(member.assignment, errorCode)
member.awaitingSyncCallback = null
// reset the session timeout for members after propagating the member's assignment.
// This is because if any member's session expired while we were still awaiting either
// the leader sync group or the storage callback, its expiration will be ignored and no
// future heartbeat expectations will not be scheduled.
completeAndScheduleNextHeartbeatExpiration(group, member)
}
}
}
private def validGroupId(groupId: String): Boolean = {
groupId != null && !groupId.isEmpty
}
private def joinError(memberId: String, errorCode: Short): JoinGroupResult = {
JoinGroupResult(
members=Map.empty,
memberId=memberId,
generationId=0,
subProtocol=GroupCoordinator.NoProtocol,
leaderId=GroupCoordinator.NoLeader,
errorCode=errorCode)
}
/**
* Complete existing DelayedHeartbeats for the given member and schedule the next one
*/
private def completeAndScheduleNextHeartbeatExpiration(group: GroupMetadata, member: MemberMetadata) {
// complete current heartbeat expectation
member.latestHeartbeat = SystemTime.milliseconds
val memberKey = ConsumerKey(member.groupId, member.memberId)
heartbeatPurgatory.checkAndComplete(memberKey)
// reschedule the next heartbeat expiration deadline
val newHeartbeatDeadline = member.latestHeartbeat + member.sessionTimeoutMs
val delayedHeartbeat = new DelayedHeartbeat(this, group, member, newHeartbeatDeadline, member.sessionTimeoutMs)
heartbeatPurgatory.tryCompleteElseWatch(delayedHeartbeat, Seq(memberKey))
}
private def removeHeartbeatForLeavingMember(group: GroupMetadata, member: MemberMetadata) {
member.isLeaving = true
val consumerKey = ConsumerKey(member.groupId, member.memberId)
heartbeatPurgatory.checkAndComplete(consumerKey)
}
private def addMemberAndRebalance(sessionTimeoutMs: Int,
clientId: String,
clientHost: String,
protocols: List[(String, Array[Byte])],
group: GroupMetadata,
callback: JoinCallback) = {
// use the client-id with a random id suffix as the member-id
val memberId = clientId + "-" + group.generateMemberIdSuffix
val member = new MemberMetadata(memberId, group.groupId, clientId, clientHost, sessionTimeoutMs, protocols)
member.awaitingJoinCallback = callback
group.add(member.memberId, member)
maybePrepareRebalance(group)
member
}
private def updateMemberAndRebalance(group: GroupMetadata,
member: MemberMetadata,
protocols: List[(String, Array[Byte])],
callback: JoinCallback) {
member.supportedProtocols = protocols
member.awaitingJoinCallback = callback
maybePrepareRebalance(group)
}
private def maybePrepareRebalance(group: GroupMetadata) {
group synchronized {
if (group.canRebalance)
prepareRebalance(group)
}
}
private def prepareRebalance(group: GroupMetadata) {
// if any members are awaiting sync, cancel their request and have them rejoin
if (group.is(AwaitingSync))
resetAndPropagateAssignmentError(group, Errors.REBALANCE_IN_PROGRESS.code)
group.transitionTo(PreparingRebalance)
info("Preparing to restabilize group %s with old generation %s".format(group.groupId, group.generationId))
val rebalanceTimeout = group.rebalanceTimeout
val delayedRebalance = new DelayedJoin(this, group, rebalanceTimeout)
val consumerGroupKey = ConsumerGroupKey(group.groupId)
joinPurgatory.tryCompleteElseWatch(delayedRebalance, Seq(consumerGroupKey))
}
private def onMemberFailure(group: GroupMetadata, member: MemberMetadata) {
trace("Member %s in group %s has failed".format(member.memberId, group.groupId))
group.remove(member.memberId)
group.currentState match {
case Dead =>
case Stable | AwaitingSync => maybePrepareRebalance(group)
case PreparingRebalance => joinPurgatory.checkAndComplete(ConsumerGroupKey(group.groupId))
}
}
def tryCompleteJoin(group: GroupMetadata, forceComplete: () => Boolean) = {
group synchronized {
if (group.notYetRejoinedMembers.isEmpty)
forceComplete()
else false
}
}
def onExpireJoin() {
// TODO: add metrics for restabilize timeouts
}
def onCompleteJoin(group: GroupMetadata) {
group synchronized {
val failedMembers = group.notYetRejoinedMembers
if (group.isEmpty || !failedMembers.isEmpty) {
failedMembers.foreach { failedMember =>
group.remove(failedMember.memberId)
// TODO: cut the socket connection to the client
}
// TODO KAFKA-2720: only remove group in the background thread
if (group.isEmpty) {
groupManager.removeGroup(group)
info("Group %s generation %s is dead and removed".format(group.groupId, group.generationId))
}
}
if (!group.is(Dead)) {
group.initNextGeneration()
info("Stabilized group %s generation %s".format(group.groupId, group.generationId))
// trigger the awaiting join group response callback for all the members after rebalancing
for (member <- group.allMemberMetadata) {
assert(member.awaitingJoinCallback != null)
val joinResult = JoinGroupResult(
members=if (member.memberId == group.leaderId) { group.currentMemberMetadata } else { Map.empty },
memberId=member.memberId,
generationId=group.generationId,
subProtocol=group.protocol,
leaderId=group.leaderId,
errorCode=Errors.NONE.code)
member.awaitingJoinCallback(joinResult)
member.awaitingJoinCallback = null
completeAndScheduleNextHeartbeatExpiration(group, member)
}
}
}
}
def tryCompleteHeartbeat(group: GroupMetadata, member: MemberMetadata, heartbeatDeadline: Long, forceComplete: () => Boolean) = {
group synchronized {
if (shouldKeepMemberAlive(member, heartbeatDeadline) || member.isLeaving)
forceComplete()
else false
}
}
def onExpireHeartbeat(group: GroupMetadata, member: MemberMetadata, heartbeatDeadline: Long) {
group synchronized {
if (!shouldKeepMemberAlive(member, heartbeatDeadline))
onMemberFailure(group, member)
}
}
def onCompleteHeartbeat() {
// TODO: add metrics for complete heartbeats
}
def partitionFor(group: String): Int = groupManager.partitionFor(group)
private def shouldKeepMemberAlive(member: MemberMetadata, heartbeatDeadline: Long) =
member.awaitingJoinCallback != null ||
member.awaitingSyncCallback != null ||
member.latestHeartbeat + member.sessionTimeoutMs > heartbeatDeadline
private def isCoordinatorForGroup(groupId: String) = groupManager.isGroupLocal(groupId)
private def isCoordinatorLoadingInProgress(groupId: String) = groupManager.isGroupLoading(groupId)
}
object GroupCoordinator {
val NoState = ""
val NoProtocolType = ""
val NoProtocol = ""
val NoLeader = ""
val NoMembers = List[MemberSummary]()
val EmptyGroup = GroupSummary(NoState, NoProtocolType, NoProtocol, NoMembers)
val DeadGroup = GroupSummary(Dead.toString, NoProtocolType, NoProtocol, NoMembers)
// TODO: we store both group metadata and offset data here despite the topic name being offsets only
val GroupMetadataTopicName = "__consumer_offsets"
def create(config: KafkaConfig,
zkUtils: ZkUtils,
replicaManager: ReplicaManager,
scheduler: Scheduler): GroupCoordinator = {
val offsetConfig = OffsetConfig(maxMetadataSize = config.offsetMetadataMaxSize,
loadBufferSize = config.offsetsLoadBufferSize,
offsetsRetentionMs = config.offsetsRetentionMinutes * 60 * 1000L,
offsetsRetentionCheckIntervalMs = config.offsetsRetentionCheckIntervalMs,
offsetsTopicNumPartitions = config.offsetsTopicPartitions,
offsetsTopicReplicationFactor = config.offsetsTopicReplicationFactor,
offsetCommitTimeoutMs = config.offsetCommitTimeoutMs,
offsetCommitRequiredAcks = config.offsetCommitRequiredAcks)
val groupConfig = GroupConfig(groupMinSessionTimeoutMs = config.groupMinSessionTimeoutMs,
groupMaxSessionTimeoutMs = config.groupMaxSessionTimeoutMs)
new GroupCoordinator(config.brokerId, groupConfig, offsetConfig, replicaManager, zkUtils, scheduler)
}
def create(config: KafkaConfig,
groupManager: GroupMetadataManager): GroupCoordinator = {
val offsetConfig = OffsetConfig(maxMetadataSize = config.offsetMetadataMaxSize,
loadBufferSize = config.offsetsLoadBufferSize,
offsetsRetentionMs = config.offsetsRetentionMinutes * 60 * 1000L,
offsetsRetentionCheckIntervalMs = config.offsetsRetentionCheckIntervalMs,
offsetsTopicNumPartitions = config.offsetsTopicPartitions,
offsetsTopicReplicationFactor = config.offsetsTopicReplicationFactor,
offsetCommitTimeoutMs = config.offsetCommitTimeoutMs,
offsetCommitRequiredAcks = config.offsetCommitRequiredAcks)
val groupConfig = GroupConfig(groupMinSessionTimeoutMs = config.groupMinSessionTimeoutMs,
groupMaxSessionTimeoutMs = config.groupMaxSessionTimeoutMs)
new GroupCoordinator(config.brokerId, groupConfig, offsetConfig, groupManager)
}
}
| Zhiqiang-He/kafka-0914-edit | core/src/main/scala/kafka/coordinator/GroupCoordinator.scala | Scala | apache-2.0 | 32,337 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js tools **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013-2014, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.core.tools.linker.backend.emitter
import java.net.URI
import org.scalajs.core.ir.ScalaJSVersions
import org.scalajs.core.tools.io._
import org.scalajs.core.tools.sem._
import org.scalajs.core.tools.linker.backend.{ModuleKind, OutputMode}
import scala.collection.immutable.Seq
import scala.collection.mutable
/* The only reason this is not private[emitter] is that Closure needs it.
* TODO We should try and get rid of this coupling.
*/
private[backend] object CoreJSLibs {
private type Config = (Semantics, OutputMode, ModuleKind)
private val cachedLibByConfig =
mutable.HashMap.empty[Config, VirtualJSFile]
private val ScalaJSEnvLines =
ScalaJSEnvHolder.scalajsenv.split("\\n|\\r\\n?")
private val gitHubBaseURI =
new URI("https://raw.githubusercontent.com/scala-js/scala-js/")
def lib(semantics: Semantics, outputMode: OutputMode,
moduleKind: ModuleKind): VirtualJSFile = {
synchronized {
cachedLibByConfig.getOrElseUpdate(
(semantics, outputMode, moduleKind),
makeLib(semantics, outputMode, moduleKind))
}
}
private def makeLib(semantics: Semantics, outputMode: OutputMode,
moduleKind: ModuleKind): VirtualJSFile = {
new ScalaJSEnvVirtualJSFile(makeContent(semantics, outputMode, moduleKind))
}
private def makeContent(semantics: Semantics, outputMode: OutputMode,
moduleKind: ModuleKind): String = {
// This is a basic sort-of-C-style preprocessor
def getOption(name: String): String = name match {
case "asInstanceOfs" =>
semantics.asInstanceOfs.toString()
case "arrayIndexOutOfBounds" =>
semantics.arrayIndexOutOfBounds.toString()
case "moduleInit" =>
semantics.moduleInit.toString()
case "floats" =>
if (semantics.strictFloats) "Strict"
else "Loose"
case "productionMode" =>
semantics.productionMode.toString()
case "outputMode" =>
outputMode.toString()
case "moduleKind" =>
moduleKind.toString()
}
val originalLines = ScalaJSEnvLines
var skipping = false
var skipDepth = 0
val lines = for (line <- originalLines) yield {
val includeThisLine = if (skipping) {
if (line == "//!else" && skipDepth == 1) {
skipping = false
skipDepth = 0
} else if (line == "//!endif") {
skipDepth -= 1
if (skipDepth == 0)
skipping = false
} else if (line.startsWith("//!if ")) {
skipDepth += 1
}
false
} else {
if (line.startsWith("//!")) {
if (line.startsWith("//!if ")) {
val Array(_, option, op, value) = line.split(" ")
val optionValue = getOption(option)
val success = op match {
case "==" => optionValue == value
case "!=" => optionValue != value
}
if (!success) {
skipping = true
skipDepth = 1
}
} else if (line == "//!else") {
skipping = true
skipDepth = 1
} else if (line == "//!endif") {
// nothing to do
} else {
throw new MatchError(line)
}
false
} else {
true
}
}
if (includeThisLine) line
else "" // blank line preserves line numbers in source maps
}
val content = lines.mkString("", "\\n", "\\n").replace(
"{{LINKER_VERSION}}", ScalaJSVersions.current)
val content1 = outputMode match {
case OutputMode.ECMAScript51Global =>
content
case OutputMode.ECMAScript51Isolated | OutputMode.ECMAScript6 =>
content
.replaceAll("ScalaJS\\\\.d\\\\.", "\\\\$d_")
.replaceAll("ScalaJS\\\\.c\\\\.", "\\\\$c_")
.replaceAll("ScalaJS\\\\.h\\\\.", "\\\\$h_")
.replaceAll("ScalaJS\\\\.s\\\\.", "\\\\$s_")
.replaceAll("ScalaJS\\\\.n\\\\.", "\\\\$n_")
.replaceAll("ScalaJS\\\\.m\\\\.", "\\\\$m_")
.replaceAll("ScalaJS\\\\.is\\\\.", "\\\\$is_")
.replaceAll("ScalaJS\\\\.as\\\\.", "\\\\$as_")
.replaceAll("ScalaJS\\\\.isArrayOf\\\\.", "\\\\$isArrayOf_")
.replaceAll("ScalaJS\\\\.asArrayOf\\\\.", "\\\\$asArrayOf_")
.replaceAll("ScalaJS\\\\.", "\\\\$")
.replaceAll("\\n(\\\\$[A-Za-z0-9_]+) =", "\\nconst $1 =")
}
outputMode match {
case OutputMode.ECMAScript51Global | OutputMode.ECMAScript51Isolated =>
content1
.replaceAll(raw"\\b(let|const)\\b", "var")
case OutputMode.ECMAScript6 =>
content1
}
}
private class ScalaJSEnvVirtualJSFile(override val content: String) extends VirtualJSFile {
override def path: String = "scalajsenv.js"
override def version: Option[String] = Some("")
override def exists: Boolean = true
override def toURI: URI = {
if (!ScalaJSVersions.currentIsSnapshot)
gitHubBaseURI.resolve(s"v${ScalaJSVersions.current}/tools/$path")
else
super.toURI
}
}
}
| xuwei-k/scala-js | tools/shared/src/main/scala/org/scalajs/core/tools/linker/backend/emitter/CoreJSLibs.scala | Scala | bsd-3-clause | 5,546 |
import leon._
import leon.lang._
import leon.collection._
object Parser {
abstract class Token
case object LParen extends Token
case object RParen extends Token
case class Id(id : Int) extends Token
abstract class Tree
case class App(args : _root_.collection.List[Tree]) extends Tree
case class Leaf(id : Int) extends Tree
def parse(in : _root_.collection.List[Token]) : (Option[Tree], _root_.collection.List[Token]) = { in match {
case Cons(Id(s), tl) =>
(Some[Tree](Leaf(s)),tl)
case Cons(LParen, tl) => parseMany(tl) match {
case (Some(trees:_root_.collection.Cons[Tree]), Cons(RParen,rest)) =>
(Some[Tree](App(trees)), rest)
case (_, rest) => (None[Tree](), rest)
}
case _ => (None[Tree](), in)
}} ensuring { _ match {
case ( Some(tree), rest@Cons(h,_)) =>
print(tree, rest) == in
case ( None(), Cons(h,_) ) =>
h == RParen
case _ => true
}}
def parseMany(in : _root_.collection.List[Token]) : (Option[_root_.collection.List[Tree]], _root_.collection.List[Token]) = { parse(in) match {
case (None(), rest) if rest == in => (Some[_root_.collection.List[Tree]](_root_.collection.Nil()), in)
case (None(), rest) => (None[_root_.collection.List[Tree]](), rest)
case (Some(tree), rest) => parseMany(rest) match {
case ( None(), rest2) => (None[_root_.collection.List[Tree]](), rest2)
case ( Some(trees), rest2) =>
( Some[_root_.collection.List[Tree]](trees), rest2 ) // FIXME: should be tree::trees
}
}} ensuring { _ match {
case ( Some(t), rest@Cons(h, _) ) =>
h == RParen && printL(t, rest) == in
case ( None(), Cons(h, _)) =>
h == RParen
case _ => true
}}
def printL(args : _root_.collection.List[Tree], rest : _root_.collection.List[Token]) : _root_.collection.List[Token] = args match {
case Nil() => rest
case Cons(h,t) => print(h, printL(t, rest))
}
def print(t : Tree, rest : _root_.collection.List[Token]) : _root_.collection.List[Token] = t match {
case Leaf(s) => Id(s) :: rest
case App(args) => LParen :: printL(args, RParen :: rest)
}
}
| ericpony/scala-examples | testcases/repair/Parser/Parser2.scala | Scala | mit | 2,143 |
import org.springframework.context.ApplicationContext
import org.springframework.context.annotation.AnnotationConfigApplicationContext
import org.squeryl.PrimitiveTypeMode._
import util.{Try, Success, Failure}
import play.api._
import play.api.mvc._
import play.api.mvc.Results._
import play.api.Play.current
import play.api.libs.concurrent.Akka
import play.api.libs.concurrent.Execution.Implicits._
import models._
import misc.{Constants, SpringConfiguration}
object Global extends play.api.mvc.WithFilters(AccessLog) with GlobalSettings
{
override def onStart(app: Application) {
Logger.info("Starting up play-eventsourced")
initSessionFactory(app)
models.Database.initializeDomain()
}
private def initSessionFactory(implicit app: Application) = {
import play.api.db.DB
import org.squeryl.internals.DatabaseAdapter
import org.squeryl.adapters.{H2Adapter, PostgreSqlAdapter}
import org.squeryl.{Session, SessionFactory}
import play.api.Play.current
def getSession(app: Application, adapter: DatabaseAdapter): Session = {
Session.create(DB.getConnection()(app), adapter)
}
SessionFactory.concreteFactory = app.configuration.getString("db.default.driver") match {
case Some("org.h2.Driver") => Some(() => getSession(app, new H2Adapter))
case Some("org.postgresql.Driver") => Some(() => getSession(app, new PostgreSqlAdapter))
case _ =>
sys.error("Database driver must be either org.h2.Driver or org.postgresql.Driver")
None
}
SessionFactory
}
// override def onHandlerNotFound(request: RequestHeader) = {
// Redirect(controllers.routes.Application.index())
// }
override def onStop(app: Application) = {
Logger.info("Shutting down play-eventsourced")
Akka.system.shutdown()
}
private var ctx: ApplicationContext = new AnnotationConfigApplicationContext(classOf[SpringConfiguration])
override def getControllerInstance[A](clazz: Class[A]): A = {
return ctx.getBean(clazz)
}
}
object AccessLog extends Filter {
import play.api.libs.concurrent.Execution.Implicits._
import org.springframework.util.StopWatch
def apply(next: (RequestHeader) => Result)(requestHeader: RequestHeader) = {
val stopWatch = new org.springframework.util.StopWatch
stopWatch.start()
def logTime(result: PlainResult): Result = {
stopWatch.stop()
val time = stopWatch.getTotalTimeMillis()
val status = result.header.status
Logger.debug(String.format("%s %s took %s ms and returned %s", requestHeader.method, requestHeader.uri, time.toString, status.toString))
result.withHeaders("Request-Time" -> time.toString)
}
next(requestHeader) match {
case plain: PlainResult => {
Logger.debug("requestHeader has plain result")
logTime(plain)
}
case async: AsyncResult => {
Logger.debug("requestHeader has async result")
async.transform(logTime)
}
}
}
}
| alanktwong/play-crud | app/Global.scala | Scala | mit | 2,861 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.python
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.api.python.PythonEvalType
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules.Rule
/**
* Extracts all the Python UDFs in logical aggregate, which depends on aggregate expression or
* grouping key, or doesn't depend on any above expressions, evaluate them after aggregate.
*/
object ExtractPythonUDFFromAggregate extends Rule[LogicalPlan] {
/**
* Returns whether the expression could only be evaluated within aggregate.
*/
private def belongAggregate(e: Expression, agg: Aggregate): Boolean = {
e.isInstanceOf[AggregateExpression] ||
PythonUDF.isGroupedAggPandasUDF(e) ||
agg.groupingExpressions.exists(_.semanticEquals(e))
}
private def hasPythonUdfOverAggregate(expr: Expression, agg: Aggregate): Boolean = {
expr.find {
e => PythonUDF.isScalarPythonUDF(e) &&
(e.references.isEmpty || e.find(belongAggregate(_, agg)).isDefined)
}.isDefined
}
private def extract(agg: Aggregate): LogicalPlan = {
val projList = new ArrayBuffer[NamedExpression]()
val aggExpr = new ArrayBuffer[NamedExpression]()
agg.aggregateExpressions.foreach { expr =>
if (hasPythonUdfOverAggregate(expr, agg)) {
// Python UDF can only be evaluated after aggregate
val newE = expr transformDown {
case e: Expression if belongAggregate(e, agg) =>
val alias = e match {
case a: NamedExpression => a
case o => Alias(e, "agg")()
}
aggExpr += alias
alias.toAttribute
}
projList += newE.asInstanceOf[NamedExpression]
} else {
aggExpr += expr
projList += expr.toAttribute
}
}
// There is no Python UDF over aggregate expression
Project(projList.toSeq, agg.copy(aggregateExpressions = aggExpr.toSeq))
}
def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
case agg: Aggregate if agg.aggregateExpressions.exists(hasPythonUdfOverAggregate(_, agg)) =>
extract(agg)
}
}
/**
* Extracts PythonUDFs in logical aggregate, which are used in grouping keys, evaluate them
* before aggregate.
* This must be executed after `ExtractPythonUDFFromAggregate` rule and before `ExtractPythonUDFs`.
*/
object ExtractGroupingPythonUDFFromAggregate extends Rule[LogicalPlan] {
private def hasScalarPythonUDF(e: Expression): Boolean = {
e.find(PythonUDF.isScalarPythonUDF).isDefined
}
private def extract(agg: Aggregate): LogicalPlan = {
val projList = new ArrayBuffer[NamedExpression]()
val groupingExpr = new ArrayBuffer[Expression]()
val attributeMap = mutable.HashMap[PythonUDF, NamedExpression]()
agg.groupingExpressions.foreach { expr =>
if (hasScalarPythonUDF(expr)) {
val newE = expr transformDown {
case p: PythonUDF =>
// This is just a sanity check, the rule PullOutNondeterministic should
// already pull out those nondeterministic expressions.
assert(p.udfDeterministic, "Non-determinstic PythonUDFs should not appear " +
"in grouping expression")
val canonicalized = p.canonicalized.asInstanceOf[PythonUDF]
if (attributeMap.contains(canonicalized)) {
attributeMap(canonicalized)
} else {
val alias = Alias(p, "groupingPythonUDF")()
projList += alias
attributeMap += ((canonicalized, alias.toAttribute))
alias.toAttribute
}
}
groupingExpr += newE
} else {
groupingExpr += expr
}
}
val aggExpr = agg.aggregateExpressions.map { expr =>
expr.transformUp {
// PythonUDF over aggregate was pull out by ExtractPythonUDFFromAggregate.
// PythonUDF here should be either
// 1. Argument of an aggregate function.
// CheckAnalysis guarantees the arguments are deterministic.
// 2. PythonUDF in grouping key. Grouping key must be deterministic.
// 3. PythonUDF not in grouping key. It is either no arguments or with grouping key
// in its arguments. Such PythonUDF was pull out by ExtractPythonUDFFromAggregate, too.
case p: PythonUDF if p.udfDeterministic =>
val canonicalized = p.canonicalized.asInstanceOf[PythonUDF]
attributeMap.getOrElse(canonicalized, p)
}.asInstanceOf[NamedExpression]
}
agg.copy(
groupingExpressions = groupingExpr.toSeq,
aggregateExpressions = aggExpr,
child = Project((projList ++ agg.child.output).toSeq, agg.child))
}
def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
case agg: Aggregate if agg.groupingExpressions.exists(hasScalarPythonUDF(_)) =>
extract(agg)
}
}
/**
* Extracts PythonUDFs from operators, rewriting the query plan so that the UDF can be evaluated
* alone in a batch.
*
* Only extracts the PythonUDFs that could be evaluated in Python (the single child is PythonUDFs
* or all the children could be evaluated in JVM).
*
* This has the limitation that the input to the Python UDF is not allowed include attributes from
* multiple child operators.
*/
object ExtractPythonUDFs extends Rule[LogicalPlan] with PredicateHelper {
private type EvalType = Int
private type EvalTypeChecker = EvalType => Boolean
private def hasScalarPythonUDF(e: Expression): Boolean = {
e.find(PythonUDF.isScalarPythonUDF).isDefined
}
private def canEvaluateInPython(e: PythonUDF): Boolean = {
e.children match {
// single PythonUDF child could be chained and evaluated in Python
case Seq(u: PythonUDF) => e.evalType == u.evalType && canEvaluateInPython(u)
// Python UDF can't be evaluated directly in JVM
case children => !children.exists(hasScalarPythonUDF)
}
}
private def collectEvaluableUDFsFromExpressions(expressions: Seq[Expression]): Seq[PythonUDF] = {
// If fisrt UDF is SQL_SCALAR_PANDAS_ITER_UDF, then only return this UDF,
// otherwise check if subsequent UDFs are of the same type as the first UDF. (since we can only
// extract UDFs of the same eval type)
var firstVisitedScalarUDFEvalType: Option[Int] = None
def canChainUDF(evalType: Int): Boolean = {
if (evalType == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF) {
false
} else {
evalType == firstVisitedScalarUDFEvalType.get
}
}
def collectEvaluableUDFs(expr: Expression): Seq[PythonUDF] = expr match {
case udf: PythonUDF if PythonUDF.isScalarPythonUDF(udf) && canEvaluateInPython(udf)
&& firstVisitedScalarUDFEvalType.isEmpty =>
firstVisitedScalarUDFEvalType = Some(udf.evalType)
Seq(udf)
case udf: PythonUDF if PythonUDF.isScalarPythonUDF(udf) && canEvaluateInPython(udf)
&& canChainUDF(udf.evalType) =>
Seq(udf)
case e => e.children.flatMap(collectEvaluableUDFs)
}
expressions.flatMap(collectEvaluableUDFs)
}
def apply(plan: LogicalPlan): LogicalPlan = plan match {
// SPARK-26293: A subquery will be rewritten into join later, and will go through this rule
// eventually. Here we skip subquery, as Python UDF only needs to be extracted once.
case s: Subquery if s.correlated => plan
case _ => plan transformUp {
// A safe guard. `ExtractPythonUDFs` only runs once, so we will not hit `BatchEvalPython` and
// `ArrowEvalPython` in the input plan. However if we hit them, we must skip them, as we can't
// extract Python UDFs from them.
case p: BatchEvalPython => p
case p: ArrowEvalPython => p
case plan: LogicalPlan => extract(plan)
}
}
/**
* Extract all the PythonUDFs from the current operator and evaluate them before the operator.
*/
private def extract(plan: LogicalPlan): LogicalPlan = {
val udfs = collectEvaluableUDFsFromExpressions(plan.expressions)
// ignore the PythonUDF that come from second/third aggregate, which is not used
.filter(udf => udf.references.subsetOf(plan.inputSet))
if (udfs.isEmpty) {
// If there aren't any, we are done.
plan
} else {
val attributeMap = mutable.HashMap[PythonUDF, Expression]()
// Rewrite the child that has the input required for the UDF
val newChildren = plan.children.map { child =>
// Pick the UDF we are going to evaluate
val validUdfs = udfs.filter { udf =>
// Check to make sure that the UDF can be evaluated with only the input of this child.
udf.references.subsetOf(child.outputSet)
}
if (validUdfs.nonEmpty) {
require(
validUdfs.forall(PythonUDF.isScalarPythonUDF),
"Can only extract scalar vectorized udf or sql batch udf")
val resultAttrs = validUdfs.zipWithIndex.map { case (u, i) =>
AttributeReference(s"pythonUDF$i", u.dataType)()
}
val evalTypes = validUdfs.map(_.evalType).toSet
if (evalTypes.size != 1) {
throw new AnalysisException(
s"Expected udfs have the same evalType but got different evalTypes: " +
s"${evalTypes.mkString(",")}")
}
val evalType = evalTypes.head
val evaluation = evalType match {
case PythonEvalType.SQL_BATCHED_UDF =>
BatchEvalPython(validUdfs, resultAttrs, child)
case PythonEvalType.SQL_SCALAR_PANDAS_UDF | PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF =>
ArrowEvalPython(validUdfs, resultAttrs, child, evalType)
case _ =>
throw new AnalysisException("Unexcepted UDF evalType")
}
attributeMap ++= validUdfs.zip(resultAttrs)
evaluation
} else {
child
}
}
// Other cases are disallowed as they are ambiguous or would require a cartesian
// product.
udfs.filterNot(attributeMap.contains).foreach { udf =>
sys.error(s"Invalid PythonUDF $udf, requires attributes from more than one child.")
}
val rewritten = plan.withNewChildren(newChildren).transformExpressions {
case p: PythonUDF if attributeMap.contains(p) =>
attributeMap(p)
}
// extract remaining python UDFs recursively
val newPlan = extract(rewritten)
if (newPlan.output != plan.output) {
// Trim away the new UDF value if it was only used for filtering or something.
Project(plan.output, newPlan)
} else {
newPlan
}
}
}
}
| dbtsai/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/python/ExtractPythonUDFs.scala | Scala | apache-2.0 | 11,658 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.yarn
import java.io.{DataOutputStream, File, FileOutputStream, IOException}
import java.nio.ByteBuffer
import java.nio.file.Files
import java.nio.file.attribute.PosixFilePermission._
import java.util.EnumSet
import scala.annotation.tailrec
import scala.collection.JavaConverters._
import scala.concurrent.duration._
import com.codahale.metrics.MetricSet
import org.apache.hadoop.fs.Path
import org.apache.hadoop.metrics2.impl.MetricsSystemImpl
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem
import org.apache.hadoop.service.ServiceStateException
import org.apache.hadoop.yarn.api.records.ApplicationId
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.hadoop.yarn.server.api.{ApplicationInitializationContext, ApplicationTerminationContext}
import org.mockito.Mockito.{mock, when}
import org.scalatest.BeforeAndAfterEach
import org.scalatest.concurrent.Eventually._
import org.scalatest.matchers.must.Matchers
import org.scalatest.matchers.should.Matchers._
import org.apache.spark.SecurityManager
import org.apache.spark.SparkFunSuite
import org.apache.spark.internal.config._
import org.apache.spark.network.shuffle.{ExternalBlockHandler, RemoteBlockPushResolver, ShuffleTestAccessor}
import org.apache.spark.network.shuffle.protocol.ExecutorShuffleInfo
import org.apache.spark.network.util.TransportConf
import org.apache.spark.util.Utils
class YarnShuffleServiceSuite extends SparkFunSuite with Matchers with BeforeAndAfterEach {
private[yarn] var yarnConfig: YarnConfiguration = null
private[yarn] val SORT_MANAGER = "org.apache.spark.shuffle.sort.SortShuffleManager"
private var recoveryLocalDir: File = _
override def beforeEach(): Unit = {
super.beforeEach()
yarnConfig = new YarnConfiguration()
yarnConfig.set(YarnConfiguration.NM_AUX_SERVICES, "spark_shuffle")
yarnConfig.set(YarnConfiguration.NM_AUX_SERVICE_FMT.format("spark_shuffle"),
classOf[YarnShuffleService].getCanonicalName)
yarnConfig.setInt(SHUFFLE_SERVICE_PORT.key, 0)
yarnConfig.setBoolean(YarnShuffleService.STOP_ON_FAILURE_KEY, true)
val localDir = Utils.createTempDir()
yarnConfig.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath)
recoveryLocalDir = Utils.createTempDir()
}
var s1: YarnShuffleService = null
var s2: YarnShuffleService = null
var s3: YarnShuffleService = null
override def afterEach(): Unit = {
try {
if (s1 != null) {
s1.stop()
s1 = null
}
if (s2 != null) {
s2.stop()
s2 = null
}
if (s3 != null) {
s3.stop()
s3 = null
}
} finally {
super.afterEach()
}
}
test("executor state kept across NM restart") {
s1 = new YarnShuffleService
s1.setRecoveryPath(new Path(recoveryLocalDir.toURI))
// set auth to true to test the secrets recovery
yarnConfig.setBoolean(SecurityManager.SPARK_AUTH_CONF, true)
s1.init(yarnConfig)
val app1Id = ApplicationId.newInstance(0, 1)
val app1Data = makeAppInfo("user", app1Id)
s1.initializeApplication(app1Data)
val app2Id = ApplicationId.newInstance(0, 2)
val app2Data = makeAppInfo("user", app2Id)
s1.initializeApplication(app2Data)
val execStateFile = s1.registeredExecutorFile
execStateFile should not be (null)
val secretsFile = s1.secretsFile
secretsFile should not be (null)
val shuffleInfo1 = new ExecutorShuffleInfo(Array("/foo", "/bar"), 3, SORT_MANAGER)
val shuffleInfo2 = new ExecutorShuffleInfo(Array("/bippy"), 5, SORT_MANAGER)
val blockHandler = s1.blockHandler
val blockResolver = ShuffleTestAccessor.getBlockResolver(blockHandler)
ShuffleTestAccessor.registeredExecutorFile(blockResolver) should be (execStateFile)
blockResolver.registerExecutor(app1Id.toString, "exec-1", shuffleInfo1)
blockResolver.registerExecutor(app2Id.toString, "exec-2", shuffleInfo2)
ShuffleTestAccessor.getExecutorInfo(app1Id, "exec-1", blockResolver) should
be (Some(shuffleInfo1))
ShuffleTestAccessor.getExecutorInfo(app2Id, "exec-2", blockResolver) should
be (Some(shuffleInfo2))
if (!execStateFile.exists()) {
@tailrec def findExistingParent(file: File): File = {
if (file == null) file
else if (file.exists()) file
else findExistingParent(file.getParentFile())
}
val existingParent = findExistingParent(execStateFile)
assert(false, s"$execStateFile does not exist -- closest existing parent is $existingParent")
}
assert(execStateFile.exists(), s"$execStateFile did not exist")
// now we pretend the shuffle service goes down, and comes back up
s1.stop()
s2 = new YarnShuffleService
s2.setRecoveryPath(new Path(recoveryLocalDir.toURI))
s2.init(yarnConfig)
s2.secretsFile should be (secretsFile)
s2.registeredExecutorFile should be (execStateFile)
val handler2 = s2.blockHandler
val resolver2 = ShuffleTestAccessor.getBlockResolver(handler2)
// now we reinitialize only one of the apps, and expect yarn to tell us that app2 was stopped
// during the restart
s2.initializeApplication(app1Data)
s2.stopApplication(new ApplicationTerminationContext(app2Id))
ShuffleTestAccessor.getExecutorInfo(app1Id, "exec-1", resolver2) should be (Some(shuffleInfo1))
ShuffleTestAccessor.getExecutorInfo(app2Id, "exec-2", resolver2) should be (None)
// Act like the NM restarts one more time
s2.stop()
s3 = new YarnShuffleService
s3.setRecoveryPath(new Path(recoveryLocalDir.toURI))
s3.init(yarnConfig)
s3.registeredExecutorFile should be (execStateFile)
s3.secretsFile should be (secretsFile)
val handler3 = s3.blockHandler
val resolver3 = ShuffleTestAccessor.getBlockResolver(handler3)
// app1 is still running
s3.initializeApplication(app1Data)
ShuffleTestAccessor.getExecutorInfo(app1Id, "exec-1", resolver3) should be (Some(shuffleInfo1))
ShuffleTestAccessor.getExecutorInfo(app2Id, "exec-2", resolver3) should be (None)
s3.stop()
}
test("removed applications should not be in registered executor file") {
s1 = new YarnShuffleService
s1.setRecoveryPath(new Path(recoveryLocalDir.toURI))
yarnConfig.setBoolean(SecurityManager.SPARK_AUTH_CONF, false)
s1.init(yarnConfig)
val secretsFile = s1.secretsFile
secretsFile should be (null)
val app1Id = ApplicationId.newInstance(0, 1)
val app1Data = makeAppInfo("user", app1Id)
s1.initializeApplication(app1Data)
val app2Id = ApplicationId.newInstance(0, 2)
val app2Data = makeAppInfo("user", app2Id)
s1.initializeApplication(app2Data)
val execStateFile = s1.registeredExecutorFile
execStateFile should not be (null)
val shuffleInfo1 = new ExecutorShuffleInfo(Array("/foo", "/bar"), 3, SORT_MANAGER)
val shuffleInfo2 = new ExecutorShuffleInfo(Array("/bippy"), 5, SORT_MANAGER)
val blockHandler = s1.blockHandler
val blockResolver = ShuffleTestAccessor.getBlockResolver(blockHandler)
ShuffleTestAccessor.registeredExecutorFile(blockResolver) should be (execStateFile)
blockResolver.registerExecutor(app1Id.toString, "exec-1", shuffleInfo1)
blockResolver.registerExecutor(app2Id.toString, "exec-2", shuffleInfo2)
val db = ShuffleTestAccessor.shuffleServiceLevelDB(blockResolver)
ShuffleTestAccessor.reloadRegisteredExecutors(db) should not be empty
s1.stopApplication(new ApplicationTerminationContext(app1Id))
ShuffleTestAccessor.reloadRegisteredExecutors(db) should not be empty
s1.stopApplication(new ApplicationTerminationContext(app2Id))
ShuffleTestAccessor.reloadRegisteredExecutors(db) shouldBe empty
}
test("shuffle service should be robust to corrupt registered executor file") {
s1 = new YarnShuffleService
s1.setRecoveryPath(new Path(recoveryLocalDir.toURI))
s1.init(yarnConfig)
val app1Id = ApplicationId.newInstance(0, 1)
val app1Data = makeAppInfo("user", app1Id)
s1.initializeApplication(app1Data)
val execStateFile = s1.registeredExecutorFile
val shuffleInfo1 = new ExecutorShuffleInfo(Array("/foo", "/bar"), 3, SORT_MANAGER)
val blockHandler = s1.blockHandler
val blockResolver = ShuffleTestAccessor.getBlockResolver(blockHandler)
ShuffleTestAccessor.registeredExecutorFile(blockResolver) should be (execStateFile)
blockResolver.registerExecutor(app1Id.toString, "exec-1", shuffleInfo1)
// now we pretend the shuffle service goes down, and comes back up. But we'll also
// make a corrupt registeredExecutor File
s1.stop()
execStateFile.listFiles().foreach{_.delete()}
val out = new DataOutputStream(new FileOutputStream(execStateFile + "/CURRENT"))
out.writeInt(42)
out.close()
s2 = new YarnShuffleService
s2.setRecoveryPath(new Path(recoveryLocalDir.toURI))
s2.init(yarnConfig)
s2.registeredExecutorFile should be (execStateFile)
val handler2 = s2.blockHandler
val resolver2 = ShuffleTestAccessor.getBlockResolver(handler2)
// we re-initialize app1, but since the file was corrupt there is nothing we can do about it ...
s2.initializeApplication(app1Data)
// however, when we initialize a totally new app2, everything is still happy
val app2Id = ApplicationId.newInstance(0, 2)
val app2Data = makeAppInfo("user", app2Id)
s2.initializeApplication(app2Data)
val shuffleInfo2 = new ExecutorShuffleInfo(Array("/bippy"), 5, SORT_MANAGER)
resolver2.registerExecutor(app2Id.toString, "exec-2", shuffleInfo2)
ShuffleTestAccessor.getExecutorInfo(app2Id, "exec-2", resolver2) should be (Some(shuffleInfo2))
s2.stop()
// another stop & restart should be fine though (e.g., we recover from previous corruption)
s3 = new YarnShuffleService
s3.setRecoveryPath(new Path(recoveryLocalDir.toURI))
s3.init(yarnConfig)
s3.registeredExecutorFile should be (execStateFile)
val handler3 = s3.blockHandler
val resolver3 = ShuffleTestAccessor.getBlockResolver(handler3)
s3.initializeApplication(app2Data)
ShuffleTestAccessor.getExecutorInfo(app2Id, "exec-2", resolver3) should be (Some(shuffleInfo2))
s3.stop()
}
test("get correct recovery path") {
// Test recovery path is set outside the shuffle service, this is to simulate NM recovery
// enabled scenario, where recovery path will be set by yarn.
s1 = new YarnShuffleService
val recoveryPath = new Path(Utils.createTempDir().toURI)
s1.setRecoveryPath(recoveryPath)
s1.init(yarnConfig)
s1._recoveryPath should be (recoveryPath)
s1.stop()
}
test("moving recovery file from NM local dir to recovery path") {
// This is to test when Hadoop is upgrade to 2.5+ and NM recovery is enabled, we should move
// old recovery file to the new path to keep compatibility
// Simulate s1 is running on old version of Hadoop in which recovery file is in the NM local
// dir.
s1 = new YarnShuffleService
s1.setRecoveryPath(new Path(yarnConfig.getTrimmedStrings(YarnConfiguration.NM_LOCAL_DIRS)(0)))
// set auth to true to test the secrets recovery
yarnConfig.setBoolean(SecurityManager.SPARK_AUTH_CONF, true)
s1.init(yarnConfig)
val app1Id = ApplicationId.newInstance(0, 1)
val app1Data = makeAppInfo("user", app1Id)
s1.initializeApplication(app1Data)
val app2Id = ApplicationId.newInstance(0, 2)
val app2Data = makeAppInfo("user", app2Id)
s1.initializeApplication(app2Data)
assert(s1.secretManager.getSecretKey(app1Id.toString()) != null)
assert(s1.secretManager.getSecretKey(app2Id.toString()) != null)
val execStateFile = s1.registeredExecutorFile
execStateFile should not be (null)
val secretsFile = s1.secretsFile
secretsFile should not be (null)
val shuffleInfo1 = new ExecutorShuffleInfo(Array("/foo", "/bar"), 3, SORT_MANAGER)
val shuffleInfo2 = new ExecutorShuffleInfo(Array("/bippy"), 5, SORT_MANAGER)
val blockHandler = s1.blockHandler
val blockResolver = ShuffleTestAccessor.getBlockResolver(blockHandler)
ShuffleTestAccessor.registeredExecutorFile(blockResolver) should be (execStateFile)
blockResolver.registerExecutor(app1Id.toString, "exec-1", shuffleInfo1)
blockResolver.registerExecutor(app2Id.toString, "exec-2", shuffleInfo2)
ShuffleTestAccessor.getExecutorInfo(app1Id, "exec-1", blockResolver) should
be (Some(shuffleInfo1))
ShuffleTestAccessor.getExecutorInfo(app2Id, "exec-2", blockResolver) should
be (Some(shuffleInfo2))
assert(execStateFile.exists(), s"$execStateFile did not exist")
s1.stop()
// Simulate s2 is running on Hadoop 2.5+ with NM recovery is enabled.
assert(execStateFile.exists())
val recoveryPath = new Path(recoveryLocalDir.toURI)
s2 = new YarnShuffleService
s2.setRecoveryPath(recoveryPath)
s2.init(yarnConfig)
// Ensure that s2 has loaded known apps from the secrets db.
assert(s2.secretManager.getSecretKey(app1Id.toString()) != null)
assert(s2.secretManager.getSecretKey(app2Id.toString()) != null)
val execStateFile2 = s2.registeredExecutorFile
val secretsFile2 = s2.secretsFile
recoveryPath.toString should be (new Path(execStateFile2.getParentFile.toURI).toString)
recoveryPath.toString should be (new Path(secretsFile2.getParentFile.toURI).toString)
eventually(timeout(10.seconds), interval(5.milliseconds)) {
assert(!execStateFile.exists())
}
eventually(timeout(10.seconds), interval(5.milliseconds)) {
assert(!secretsFile.exists())
}
val handler2 = s2.blockHandler
val resolver2 = ShuffleTestAccessor.getBlockResolver(handler2)
// now we reinitialize only one of the apps, and expect yarn to tell us that app2 was stopped
// during the restart
// Since recovery file is got from old path, so the previous state should be stored.
s2.initializeApplication(app1Data)
s2.stopApplication(new ApplicationTerminationContext(app2Id))
ShuffleTestAccessor.getExecutorInfo(app1Id, "exec-1", resolver2) should be (Some(shuffleInfo1))
ShuffleTestAccessor.getExecutorInfo(app2Id, "exec-2", resolver2) should be (None)
s2.stop()
}
test("service throws error if cannot start") {
// Set up a read-only local dir.
val roDir = Utils.createTempDir()
Files.setPosixFilePermissions(roDir.toPath(), EnumSet.of(OWNER_READ, OWNER_EXECUTE))
// Try to start the shuffle service, it should fail.
val service = new YarnShuffleService()
service.setRecoveryPath(new Path(roDir.toURI))
try {
val error = intercept[ServiceStateException] {
service.init(yarnConfig)
}
assert(error.getCause().isInstanceOf[IOException])
} finally {
service.stop()
Files.setPosixFilePermissions(roDir.toPath(),
EnumSet.of(OWNER_READ, OWNER_WRITE, OWNER_EXECUTE))
}
}
private def makeAppInfo(user: String, appId: ApplicationId): ApplicationInitializationContext = {
val secret = ByteBuffer.wrap(new Array[Byte](0))
new ApplicationInitializationContext(user, appId, secret)
}
test("recovery db should not be created if NM recovery is not enabled") {
s1 = new YarnShuffleService
s1.init(yarnConfig)
s1._recoveryPath should be (null)
s1.registeredExecutorFile should be (null)
s1.secretsFile should be (null)
}
test("SPARK-31646: metrics should be registered into Node Manager's metrics system") {
s1 = new YarnShuffleService
s1.init(yarnConfig)
val metricsSource = DefaultMetricsSystem.instance.asInstanceOf[MetricsSystemImpl]
.getSource("sparkShuffleService").asInstanceOf[YarnShuffleServiceMetrics]
val metricSetRef = classOf[YarnShuffleServiceMetrics].getDeclaredField("metricSet")
metricSetRef.setAccessible(true)
val metrics = metricSetRef.get(metricsSource).asInstanceOf[MetricSet].getMetrics
assert(metrics.keySet().asScala == Set(
"blockTransferRateBytes",
"numActiveConnections",
"numCaughtExceptions",
"numRegisteredConnections",
"openBlockRequestLatencyMillis",
"registeredExecutorsSize",
"registerExecutorRequestLatencyMillis",
"finalizeShuffleMergeLatencyMillis",
"shuffle-server.usedDirectMemory",
"shuffle-server.usedHeapMemory"
))
}
test("create default merged shuffle file manager instance") {
val mockConf = mock(classOf[TransportConf])
when(mockConf.mergedShuffleFileManagerImpl).thenReturn(
"org.apache.spark.network.shuffle.ExternalBlockHandler$NoOpMergedShuffleFileManager")
val mergeMgr = YarnShuffleService.newMergedShuffleFileManagerInstance(mockConf)
assert(mergeMgr.isInstanceOf[ExternalBlockHandler.NoOpMergedShuffleFileManager])
}
test("create remote block push resolver instance") {
val mockConf = mock(classOf[TransportConf])
when(mockConf.mergedShuffleFileManagerImpl).thenReturn(
"org.apache.spark.network.shuffle.RemoteBlockPushResolver")
val mergeMgr = YarnShuffleService.newMergedShuffleFileManagerInstance(mockConf)
assert(mergeMgr.isInstanceOf[RemoteBlockPushResolver])
}
test("invalid class name of merge manager will use noop instance") {
val mockConf = mock(classOf[TransportConf])
when(mockConf.mergedShuffleFileManagerImpl).thenReturn(
"org.apache.spark.network.shuffle.NotExistent")
val mergeMgr = YarnShuffleService.newMergedShuffleFileManagerInstance(mockConf)
assert(mergeMgr.isInstanceOf[ExternalBlockHandler.NoOpMergedShuffleFileManager])
}
}
| witgo/spark | resource-managers/yarn/src/test/scala/org/apache/spark/network/yarn/YarnShuffleServiceSuite.scala | Scala | apache-2.0 | 18,357 |
package TAPLcomp2.equirec
import scala.text.Document
// outer means that the term is the top-level term
object EquiRecPrinter {
import TAPLcomp2.Print._
def ptyType(outer: Boolean, ty: Ty): Document = ty match {
case TyRec(x, tyT) =>
g2("Rec " :: x :: "." :/: ptyType(outer, tyT))
case ty =>
ptyArrowType(outer, ty)
}
def ptyArrowType(outer: Boolean, tyT: Ty): Document = tyT match {
case TyArr(tyT1, tyT2) =>
g2(ptyAType(false, tyT1) :: " ->" :/: ptyArrowType(outer, tyT2))
case tyT =>
ptyAType(outer, tyT)
}
def ptyAType(outer: Boolean, tyT: Ty): Document = tyT match {
case TyVar(x) => x
case tyT => "(" :: ptyType(outer, tyT) :: ")"
}
def ptyTy(ty: Ty) = ptyType(true, ty)
def ptmTerm(outer: Boolean, t: Term): Document = t match {
case TmAbs(x, tyT1, t2) =>
val abs = g0("lambda" :/: x :: ":" :/: ptyType(false, tyT1) :: ".")
val body = ptmTerm(outer, t2)
g2(abs :/: body)
case t => ptmAppTerm(outer, t)
}
def ptmAppTerm(outer: Boolean, t: Term): Document = t match {
case TmApp(t1, t2) =>
g2(ptmAppTerm(false, t1) :/: ptmATerm(false, t2))
case t =>
ptmATerm(outer, t)
}
def ptmATerm(outer: Boolean, t: Term): Document = t match {
case TmVar(x) =>
x
case t =>
"(" :: ptmTerm(outer, t) :: ")"
}
def ptm(t: Term) = ptmTerm(true, t)
} | hy-zhang/parser | Scala/Parser/src/TAPLcomp2/equirec/syntax.scala | Scala | bsd-3-clause | 1,392 |
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.voltdb
import org.apache.kafka.connect.data.{Schema, SchemaBuilder, Struct}
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
class StructFieldsExtractorTest extends AnyWordSpec with Matchers {
"StructFieldsExtractor" should {
"return all the fields and their bytes value" in {
val schema = SchemaBuilder.struct().name("com.example.Person")
.field("firstName", Schema.STRING_SCHEMA)
.field("lastName", Schema.STRING_SCHEMA)
.field("age", Schema.INT32_SCHEMA)
.field("threshold", Schema.OPTIONAL_FLOAT64_SCHEMA).build()
val struct = new Struct(schema)
.put("firstName", "Alex")
.put("lastName", "Smith")
.put("age", 30)
val min = System.currentTimeMillis()
val record = StructFieldsExtractor("table", true, Map.empty).get(struct)
val map = record
map("firstName") shouldBe "Alex"
map("lastName") shouldBe "Smith"
map("age") shouldBe 30
}
"return all fields and apply the mapping" in {
val schema = SchemaBuilder.struct().name("com.example.Person")
.field("firstName", Schema.STRING_SCHEMA)
.field("lastName", Schema.STRING_SCHEMA)
.field("age", Schema.INT32_SCHEMA)
.field("threshold", Schema.OPTIONAL_FLOAT64_SCHEMA).build()
val struct = new Struct(schema)
.put("firstName", "Alex")
.put("lastName", "Smith")
.put("age", 30)
val map = StructFieldsExtractor("table", includeAllFields = true, Map("lastName" -> "Name", "age" -> "a")).get(struct)
map("firstName") shouldBe "Alex"
map("Name") shouldBe "Smith"
map("a") shouldBe 30
}
"return only the specified fields" in {
val schema = SchemaBuilder.struct().name("com.example.Person")
.field("firstName", Schema.STRING_SCHEMA)
.field("lastName", Schema.STRING_SCHEMA)
.field("age", Schema.INT32_SCHEMA)
.field("threshold", Schema.OPTIONAL_FLOAT64_SCHEMA).build()
val struct = new Struct(schema)
.put("firstName", "Alex")
.put("lastName", "Smith")
.put("age", 30)
val map = StructFieldsExtractor("table", includeAllFields = false, Map("lastName" -> "Name", "age" -> "age")).get(struct)
map("Name") shouldBe "Smith"
map("age") shouldBe 30
map.size shouldBe 2
}
}
}
| datamountaineer/stream-reactor | kafka-connect-voltdb/src/test/scala/com/datamountaineer/streamreactor/connect/voltdb/StructFieldsExtractorTest.scala | Scala | apache-2.0 | 3,023 |
package view.gui
import java.awt.Toolkit
import javax.swing.SwingUtilities
import javax.swing.UIManager
object GUI extends Runnable {
def init(): Unit = {
System.setProperty("awt.useSystemAAFontSettings", "on")
System.setProperty("swing.aatext", "true")
SwingUtilities.invokeAndWait(this)
}
def run(): Unit = {
try {
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName())
} catch {
case (e: Exception) =>
}
val tk = Toolkit.getDefaultToolkit()
try {
val awtAppClassNameField =
tk.getClass().getDeclaredField("awtAppClassName")
awtAppClassNameField.setAccessible(true)
awtAppClassNameField.set(tk, "rob-chess")
} catch {
case (e: Exception) =>
}
}
} | pketelsen/rob-chess | src/main/scala/view/gui/GUI.scala | Scala | mit | 760 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.nio.ByteBuffer
import java.util.Properties
import java.util.concurrent.{CountDownLatch, ExecutorService, LinkedBlockingQueue, ThreadPoolExecutor, TimeUnit}
import java.util.concurrent.atomic.AtomicBoolean
import scala.collection.mutable.{ArrayBuffer, HashMap}
import scala.concurrent.duration._
import scala.language.reflectiveCalls
import org.mockito.ArgumentMatchers.{any, anyInt, anyString, eq => meq}
import org.mockito.Mockito.{atLeast, atMost, never, spy, times, verify, when}
import org.scalatest.BeforeAndAfterEach
import org.scalatest.concurrent.Eventually
import org.scalatestplus.mockito.MockitoSugar
import org.apache.spark._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config
import org.apache.spark.resource.{ExecutorResourceRequests, ResourceProfile, TaskResourceRequests}
import org.apache.spark.resource.ResourceUtils._
import org.apache.spark.resource.TestResourceIDs._
import org.apache.spark.util.{Clock, ManualClock, ThreadUtils}
class FakeSchedulerBackend extends SchedulerBackend {
def start(): Unit = {}
def stop(): Unit = {}
def reviveOffers(): Unit = {}
def defaultParallelism(): Int = 1
def maxNumConcurrentTasks(rp: ResourceProfile): Int = 0
}
class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with BeforeAndAfterEach
with Logging with MockitoSugar with Eventually {
var failedTaskSetException: Option[Throwable] = None
var failedTaskSetReason: String = null
var failedTaskSet = false
var healthTracker: HealthTracker = null
var taskScheduler: TaskSchedulerImpl = null
var dagScheduler: DAGScheduler = null
val stageToMockTaskSetExcludelist = new HashMap[Int, TaskSetExcludelist]()
val stageToMockTaskSetManager = new HashMap[Int, TaskSetManager]()
override def beforeEach(): Unit = {
super.beforeEach()
failedTaskSet = false
failedTaskSetException = None
failedTaskSetReason = null
stageToMockTaskSetExcludelist.clear()
stageToMockTaskSetManager.clear()
}
override def afterEach(): Unit = {
if (taskScheduler != null) {
taskScheduler.stop()
taskScheduler = null
}
if (dagScheduler != null) {
dagScheduler.stop()
dagScheduler = null
}
super.afterEach()
}
def setupScheduler(confs: (String, String)*): TaskSchedulerImpl = {
setupSchedulerWithMaster("local", confs: _*)
}
def setupScheduler(numCores: Int, confs: (String, String)*): TaskSchedulerImpl = {
setupSchedulerWithMaster(s"local[$numCores]", confs: _*)
}
def setupSchedulerWithMaster(master: String, confs: (String, String)*): TaskSchedulerImpl = {
val conf = new SparkConf().setMaster(master).setAppName("TaskSchedulerImplSuite")
confs.foreach { case (k, v) => conf.set(k, v) }
sc = new SparkContext(conf)
taskScheduler = new TaskSchedulerImpl(sc, sc.conf.get(config.TASK_MAX_FAILURES))
setupHelper()
}
def setupSchedulerWithMockTaskSetExcludelist(confs: (String, String)*): TaskSchedulerImpl = {
healthTracker = mock[HealthTracker]
val conf = new SparkConf().setMaster("local").setAppName("TaskSchedulerImplSuite")
conf.set(config.EXCLUDE_ON_FAILURE_ENABLED, true)
confs.foreach { case (k, v) => conf.set(k, v) }
sc = new SparkContext(conf)
taskScheduler =
new TaskSchedulerImpl(sc, sc.conf.get(config.TASK_MAX_FAILURES)) {
override def createTaskSetManager(taskSet: TaskSet, maxFailures: Int): TaskSetManager = {
val tsm = super.createTaskSetManager(taskSet, maxFailures)
// we need to create a spied tsm just so we can set the TaskSetExcludelist
val tsmSpy = spy(tsm)
val taskSetExcludelist = mock[TaskSetExcludelist]
when(tsmSpy.taskSetExcludelistHelperOpt).thenReturn(Some(taskSetExcludelist))
stageToMockTaskSetManager(taskSet.stageId) = tsmSpy
stageToMockTaskSetExcludelist(taskSet.stageId) = taskSetExcludelist
tsmSpy
}
override private[scheduler] lazy val healthTrackerOpt = Some(healthTracker)
}
setupHelper()
}
def setupHelper(): TaskSchedulerImpl = {
taskScheduler.initialize(new FakeSchedulerBackend)
// Need to initialize a DAGScheduler for the taskScheduler to use for callbacks.
dagScheduler = new DAGScheduler(sc, taskScheduler) {
override def taskStarted(task: Task[_], taskInfo: TaskInfo): Unit = {}
override def executorAdded(execId: String, host: String): Unit = {}
override def taskSetFailed(
taskSet: TaskSet,
reason: String,
exception: Option[Throwable]): Unit = {
// Normally the DAGScheduler puts this in the event loop, which will eventually fail
// dependent jobs
failedTaskSet = true
failedTaskSetReason = reason
failedTaskSetException = exception
}
}
taskScheduler
}
test("SPARK-32653: Decommissioned host/executor should be considered as inactive") {
val scheduler = setupScheduler()
val exec0 = "exec0"
val exec1 = "exec1"
val exec2 = "exec2"
val host0 = "host0"
val host1 = "host1"
val workerOffers = IndexedSeq(
WorkerOffer(exec0, host0, 1),
WorkerOffer(exec1, host0, 1),
WorkerOffer(exec2, host1, 1))
scheduler.resourceOffers(workerOffers)
assert(Seq(exec0, exec1, exec2).forall(scheduler.isExecutorAlive))
assert(Seq(host0, host1).forall(scheduler.hasExecutorsAliveOnHost))
assert(scheduler.getExecutorsAliveOnHost(host0)
.exists(s => s.contains(exec0) && s.contains(exec1)))
assert(scheduler.getExecutorsAliveOnHost(host1).exists(_.contains(exec2)))
scheduler.executorDecommission(exec1, ExecutorDecommissionInfo("test", None))
scheduler.executorDecommission(exec2, ExecutorDecommissionInfo("test", Some(host1)))
assert(scheduler.isExecutorAlive(exec0))
assert(!Seq(exec1, exec2).exists(scheduler.isExecutorAlive))
assert(scheduler.hasExecutorsAliveOnHost(host0))
assert(!scheduler.hasExecutorsAliveOnHost(host1))
}
test("Scheduler does not always schedule tasks on the same workers") {
val taskScheduler = setupScheduler()
val numFreeCores = 1
val workerOffers = IndexedSeq(new WorkerOffer("executor0", "host0", numFreeCores),
new WorkerOffer("executor1", "host1", numFreeCores))
// Repeatedly try to schedule a 1-task job, and make sure that it doesn't always
// get scheduled on the same executor. While there is a chance this test will fail
// because the task randomly gets placed on the first executor all 1000 times, the
// probability of that happening is 2^-1000 (so sufficiently small to be considered
// negligible).
val numTrials = 1000
val selectedExecutorIds = 1.to(numTrials).map { _ =>
val taskSet = FakeTask.createTaskSet(1)
taskScheduler.submitTasks(taskSet)
val taskDescriptions = taskScheduler.resourceOffers(workerOffers).flatten
assert(1 === taskDescriptions.length)
taskDescriptions(0).executorId
}
val count = selectedExecutorIds.count(_ == workerOffers(0).executorId)
assert(count > 0)
assert(count < numTrials)
assert(!failedTaskSet)
}
test("Scheduler correctly accounts for multiple CPUs per task") {
val taskCpus = 2
val taskScheduler = setupSchedulerWithMaster(
s"local[$taskCpus]",
config.CPUS_PER_TASK.key -> taskCpus.toString)
// Give zero core offers. Should not generate any tasks
val zeroCoreWorkerOffers = IndexedSeq(new WorkerOffer("executor0", "host0", 0),
new WorkerOffer("executor1", "host1", 0))
val taskSet = FakeTask.createTaskSet(1)
taskScheduler.submitTasks(taskSet)
var taskDescriptions = taskScheduler.resourceOffers(zeroCoreWorkerOffers).flatten
assert(0 === taskDescriptions.length)
// No tasks should run as we only have 1 core free.
val numFreeCores = 1
val singleCoreWorkerOffers = IndexedSeq(new WorkerOffer("executor0", "host0", numFreeCores),
new WorkerOffer("executor1", "host1", numFreeCores))
taskScheduler.submitTasks(taskSet)
taskDescriptions = taskScheduler.resourceOffers(singleCoreWorkerOffers).flatten
assert(0 === taskDescriptions.length)
// Now change the offers to have 2 cores in one executor and verify if it
// is chosen.
val multiCoreWorkerOffers = IndexedSeq(new WorkerOffer("executor0", "host0", taskCpus),
new WorkerOffer("executor1", "host1", numFreeCores))
taskScheduler.submitTasks(taskSet)
taskDescriptions = taskScheduler.resourceOffers(multiCoreWorkerOffers).flatten
assert(1 === taskDescriptions.length)
assert("executor0" === taskDescriptions(0).executorId)
assert(!failedTaskSet)
}
private def setupTaskSchedulerForLocalityTests(
clock: ManualClock,
conf: SparkConf = new SparkConf()): TaskSchedulerImpl = {
sc = new SparkContext("local", "TaskSchedulerImplSuite", conf)
val taskScheduler = new TaskSchedulerImpl(sc,
sc.conf.get(config.TASK_MAX_FAILURES),
clock = clock) {
override def createTaskSetManager(taskSet: TaskSet, maxTaskFailures: Int): TaskSetManager = {
new TaskSetManager(this, taskSet, maxTaskFailures, healthTrackerOpt, clock)
}
override def shuffleOffers(offers: IndexedSeq[WorkerOffer]): IndexedSeq[WorkerOffer] = {
// Don't shuffle the offers around for this test. Instead, we'll just pass in all
// the permutations we care about directly.
offers
}
}
// Need to initialize a DAGScheduler for the taskScheduler to use for callbacks.
new DAGScheduler(sc, taskScheduler) {
override def taskStarted(task: Task[_], taskInfo: TaskInfo): Unit = {}
override def executorAdded(execId: String, host: String): Unit = {}
}
taskScheduler.initialize(new FakeSchedulerBackend)
val taskSet = FakeTask.createTaskSet(8, 1, 1,
Seq(TaskLocation("host1", "exec1")),
Seq(TaskLocation("host1", "exec1")),
Seq(TaskLocation("host1", "exec1")),
Seq(TaskLocation("host1", "exec1")),
Seq(TaskLocation("host1", "exec1")),
Seq(TaskLocation("host1", "exec1")),
Seq(TaskLocation("host1", "exec1")),
Seq(TaskLocation("host1", "exec1"))
)
// Offer resources first so that when the taskset is submitted it can initialize
// with proper locality level. Otherwise, ANY would be the only locality level.
// See TaskSetManager.computeValidLocalityLevels()
// This begins the task set as PROCESS_LOCAL locality level
taskScheduler.resourceOffers(IndexedSeq(WorkerOffer("exec1", "host1", 1)))
taskScheduler.submitTasks(taskSet)
taskScheduler
}
test("SPARK-18886 - partial offers (isAllFreeResources = false) reset timer before " +
"any resources have been rejected") {
val clock = new ManualClock()
// All tasks created here are local to exec1, host1.
// Locality level starts at PROCESS_LOCAL.
val taskScheduler = setupTaskSchedulerForLocalityTests(clock)
// Locality levels increase at 3000 ms.
val advanceAmount = 3000
// Advancing clock increases locality level to NODE_LOCAL.
clock.advance(advanceAmount)
// If there hasn't yet been any full resource offers,
// partial resource (isAllFreeResources = false) offers reset delay scheduling
// if this and previous offers were accepted.
// This line resets the timer and locality level is reset to PROCESS_LOCAL.
assert(taskScheduler
.resourceOffers(
IndexedSeq(WorkerOffer("exec1", "host1", 1)),
isAllFreeResources = false)
.flatten.length === 1)
// This NODE_LOCAL task should not be accepted.
assert(taskScheduler
.resourceOffers(
IndexedSeq(WorkerOffer("exec2", "host1", 1)),
isAllFreeResources = false)
.flatten.isEmpty)
}
test("SPARK-18886 - delay scheduling timer is reset when it accepts all resources offered when " +
"isAllFreeResources = true") {
val clock = new ManualClock()
// All tasks created here are local to exec1, host1.
// Locality level starts at PROCESS_LOCAL.
val taskScheduler = setupTaskSchedulerForLocalityTests(clock)
// Locality levels increase at 3000 ms.
val advanceAmount = 3000
// Advancing clock increases locality level to NODE_LOCAL.
clock.advance(advanceAmount)
// If there are no rejects on an all resource offer, delay scheduling is reset.
// This line resets the timer and locality level is reset to PROCESS_LOCAL.
assert(taskScheduler
.resourceOffers(
IndexedSeq(WorkerOffer("exec1", "host1", 1)),
isAllFreeResources = true)
.flatten.length === 1)
// This NODE_LOCAL task should not be accepted.
assert(taskScheduler
.resourceOffers(
IndexedSeq(WorkerOffer("exec2", "host1", 1)),
isAllFreeResources = false)
.flatten.isEmpty)
}
test("SPARK-18886 - task set with no locality requirements should not starve one with them") {
val clock = new ManualClock()
// All tasks created here are local to exec1, host1.
// Locality level starts at PROCESS_LOCAL.
val taskScheduler = setupTaskSchedulerForLocalityTests(clock)
// Locality levels increase at 3000 ms.
val advanceAmount = 2000
val taskSet2 = FakeTask.createTaskSet(8, 2, 0)
taskScheduler.submitTasks(taskSet2)
// Stage 2 takes resource since it has no locality requirements
assert(taskScheduler
.resourceOffers(
IndexedSeq(WorkerOffer("exec2", "host1", 1)),
isAllFreeResources = false)
.flatten
.headOption
.map(_.name)
.getOrElse("")
.contains("stage 2.0"))
// Clock advances to 2s. No locality changes yet.
clock.advance(advanceAmount)
// Stage 2 takes resource since it has no locality requirements
assert(taskScheduler
.resourceOffers(
IndexedSeq(WorkerOffer("exec2", "host1", 1)),
isAllFreeResources = false)
.flatten
.headOption
.map(_.name)
.getOrElse("")
.contains("stage 2.0"))
// Simulates:
// 1. stage 2 has taken all resource offers through single resource offers
// 2. stage 1 is offered 0 cpus on allResourceOffer.
// This should not reset timer.
assert(taskScheduler
.resourceOffers(
IndexedSeq(WorkerOffer("exec2", "host1", 0)),
isAllFreeResources = true)
.flatten.length === 0)
// This should move stage 1 to NODE_LOCAL.
clock.advance(advanceAmount)
// Stage 1 should now accept NODE_LOCAL resource.
assert(taskScheduler
.resourceOffers(
IndexedSeq(WorkerOffer("exec2", "host1", 1)),
isAllFreeResources = false)
.flatten
.headOption
.map(_.name)
.getOrElse("")
.contains("stage 1.1"))
}
test("SPARK-18886 - partial resource offers (isAllFreeResources = false) reset " +
"time if last full resource offer (isAllResources = true) was accepted as well as any " +
"following partial resource offers") {
val clock = new ManualClock()
// All tasks created here are local to exec1, host1.
// Locality level starts at PROCESS_LOCAL.
val taskScheduler = setupTaskSchedulerForLocalityTests(clock)
// Locality levels increase at 3000 ms.
val advanceAmount = 3000
// PROCESS_LOCAL full resource offer is not rejected due to locality.
// It has 0 available cores, so no task is launched.
// Timer is reset and locality level remains at PROCESS_LOCAL.
assert(taskScheduler
.resourceOffers(
IndexedSeq(WorkerOffer("exec1", "host1", 0)),
isAllFreeResources = true)
.flatten.length === 0)
// Advancing clock increases locality level to NODE_LOCAL.
clock.advance(advanceAmount)
// PROCESS_LOCAL partial resource is accepted.
// Since all offers have been accepted since the last full resource offer
// (this one and the previous one), delay scheduling is reset.
// This line resets the timer and locality level is reset to PROCESS_LOCAL.
assert(taskScheduler
.resourceOffers(
IndexedSeq(WorkerOffer("exec1", "host1", 1)),
isAllFreeResources = false)
.flatten.length === 1)
// Advancing clock increases locality level to NODE_LOCAL
clock.advance(advanceAmount)
// PROCESS_LOCAL partial resource is accepted
// Since all offers have been accepted since the last full resource offer
// (one previous full offer, one previous partial offer, and this partial offer),
// delay scheduling is reset.
// This line resets the timer and locality level is reset to PROCESS_LOCAL.
assert(taskScheduler
.resourceOffers(
IndexedSeq(WorkerOffer("exec1", "host1", 1)),
isAllFreeResources = false)
.flatten.length === 1)
// This NODE_LOCAL task should not be accepted.
assert(taskScheduler
.resourceOffers(
IndexedSeq(WorkerOffer("exec2", "host1", 1)),
isAllFreeResources = false)
.flatten.isEmpty)
}
// This tests two cases
// 1. partial resource offer doesn't reset timer after full resource offer had rejected resources
// 2. partial resource offer doesn't reset timer after partial resource offer
// had rejected resources
test("SPARK-18886 - partial resource offers (isAllFreeResources = false) do not reset " +
"time if any offer was rejected since last full offer was fully accepted") {
val clock = new ManualClock()
// All tasks created here are local to exec1, host1.
// Locality level starts at PROCESS_LOCAL.
val taskScheduler = setupTaskSchedulerForLocalityTests(clock)
// Locality levels increase at 3000 ms.
val advanceAmount = 3000
// case 1 from test description above.
// NODE_LOCAL full resource offer is rejected, so delay scheduling is not reset.
assert(taskScheduler
.resourceOffers(
IndexedSeq(WorkerOffer("exec2", "host1", 1)),
isAllFreeResources = true)
.flatten.isEmpty)
// Advancing clock increases locality level to NODE_LOCAL
clock.advance(advanceAmount)
// PROCESS_LOCAL partial resource is accepted,
// but because preceding full resource offer was rejected, delay scheduling is not reset.
// Locality level remains at NODE_LOCAL.
assert(taskScheduler
.resourceOffers(
IndexedSeq(WorkerOffer("exec1", "host1", 1)),
isAllFreeResources = false)
.flatten.length === 1)
// Even though we launched a local task above, we still utilize non-local exec2.
// This is the behavior change to fix SPARK-18886.
// Locality level remains NODE_LOCAL after this clock advance.
assert(taskScheduler
.resourceOffers(
IndexedSeq(WorkerOffer("exec2", "host1", 1)),
isAllFreeResources = false)
.flatten.length === 1)
// case 2 from test description above.
// PROCESS_LOCAL full resource offer is accepted, resetting delay scheduling.
// This line resets the timer and locality level is reset to PROCESS_LOCAL.
assert(taskScheduler
.resourceOffers(
IndexedSeq(WorkerOffer("exec1", "host1", 1)),
isAllFreeResources = true)
.flatten.length === 1)
// Partial resource offer: NODE_LOCAL exec 2 is rejected, PROCESS_LOCAL exec1 is accepted.
// Since there were rejects, delay scheduling is not reset, and follow up partial offers
// will not reset delay scheduling, even if they are accepted.
assert(taskScheduler
.resourceOffers(
IndexedSeq(WorkerOffer("exec2", "host1", 1), WorkerOffer("exec1", "host1", 1)),
isAllFreeResources = false)
.flatten.size === 1)
// Advancing clock increases locality level to NODE_LOCAL
clock.advance(advanceAmount)
// PROCESS_LOCAL partial resource is accepted, but does not reset delay scheduling
// as described above.
// Locality level remains at NODE_LOCAL.
assert(taskScheduler
.resourceOffers(
IndexedSeq(WorkerOffer("exec1", "host1", 1)),
isAllFreeResources = false)
.flatten.length === 1)
// NODE_LOCAL partial resource offer is accepted,
// verifying locality level was not reset to PROCESS_LOCAL by above offer.
assert(taskScheduler
.resourceOffers(
IndexedSeq(WorkerOffer("exec2", "host1", 1)),
isAllFreeResources = false)
.flatten.length === 1)
}
test("Scheduler does not crash when tasks are not serializable") {
val taskCpus = 2
val taskScheduler = setupSchedulerWithMaster(
s"local[$taskCpus]",
config.CPUS_PER_TASK.key -> taskCpus.toString)
val numFreeCores = 1
val taskSet = new TaskSet(
Array(new NotSerializableFakeTask(1, 0), new NotSerializableFakeTask(0, 1)),
0, 0, 0, null, ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID)
val multiCoreWorkerOffers = IndexedSeq(new WorkerOffer("executor0", "host0", taskCpus),
new WorkerOffer("executor1", "host1", numFreeCores))
taskScheduler.submitTasks(taskSet)
var taskDescriptions = taskScheduler.resourceOffers(multiCoreWorkerOffers).flatten
assert(0 === taskDescriptions.length)
assert(failedTaskSet)
assert(failedTaskSetReason.contains("Failed to serialize task"))
// Now check that we can still submit tasks
// Even if one of the task sets has not-serializable tasks, the other task set should
// still be processed without error
taskScheduler.submitTasks(FakeTask.createTaskSet(1))
val taskSet2 = new TaskSet(
Array(new NotSerializableFakeTask(1, 0), new NotSerializableFakeTask(0, 1)),
1, 0, 0, null, ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID)
taskScheduler.submitTasks(taskSet2)
taskDescriptions = taskScheduler.resourceOffers(multiCoreWorkerOffers).flatten
assert(taskDescriptions.map(_.executorId) === Seq("executor0"))
}
test("concurrent attempts for the same stage only have one active taskset") {
val taskScheduler = setupScheduler()
def isTasksetZombie(taskset: TaskSet): Boolean = {
taskScheduler.taskSetManagerForAttempt(taskset.stageId, taskset.stageAttemptId).get.isZombie
}
val attempt1 = FakeTask.createTaskSet(1, stageId = 0, stageAttemptId = 0)
taskScheduler.submitTasks(attempt1)
// The first submitted taskset is active
assert(!isTasksetZombie(attempt1))
val attempt2 = FakeTask.createTaskSet(1, stageId = 0, stageAttemptId = 1)
taskScheduler.submitTasks(attempt2)
// The first submitted taskset is zombie now
assert(isTasksetZombie(attempt1))
// The newly submitted taskset is active
assert(!isTasksetZombie(attempt2))
val attempt3 = FakeTask.createTaskSet(1, stageId = 0, stageAttemptId = 2)
taskScheduler.submitTasks(attempt3)
// The first submitted taskset remains zombie
assert(isTasksetZombie(attempt1))
// The second submitted taskset is zombie now
assert(isTasksetZombie(attempt2))
// The newly submitted taskset is active
assert(!isTasksetZombie(attempt3))
}
test("don't schedule more tasks after a taskset is zombie") {
val taskScheduler = setupScheduler()
val numFreeCores = 1
val workerOffers = IndexedSeq(new WorkerOffer("executor0", "host0", numFreeCores))
val attempt1 = FakeTask.createTaskSet(10, stageId = 0, stageAttemptId = 0)
// submit attempt 1, offer some resources, some tasks get scheduled
taskScheduler.submitTasks(attempt1)
val taskDescriptions = taskScheduler.resourceOffers(workerOffers).flatten
assert(1 === taskDescriptions.length)
// now mark attempt 1 as a zombie
taskScheduler.taskSetManagerForAttempt(attempt1.stageId, attempt1.stageAttemptId)
.get.isZombie = true
// don't schedule anything on another resource offer
val taskDescriptions2 = taskScheduler.resourceOffers(workerOffers).flatten
assert(0 === taskDescriptions2.length)
// if we schedule another attempt for the same stage, it should get scheduled
val attempt2 = FakeTask.createTaskSet(10, stageId = 0, stageAttemptId = 1)
// submit attempt 2, offer some resources, some tasks get scheduled
taskScheduler.submitTasks(attempt2)
val taskDescriptions3 = taskScheduler.resourceOffers(workerOffers).flatten
assert(1 === taskDescriptions3.length)
val mgr = Option(taskScheduler.taskIdToTaskSetManager.get(taskDescriptions3(0).taskId)).get
assert(mgr.taskSet.stageAttemptId === 1)
assert(!failedTaskSet)
}
test("if a zombie attempt finishes, continue scheduling tasks for non-zombie attempts") {
val taskScheduler = setupScheduler()
val numFreeCores = 10
val workerOffers = IndexedSeq(new WorkerOffer("executor0", "host0", numFreeCores))
val attempt1 = FakeTask.createTaskSet(10, stageId = 0, stageAttemptId = 0)
// submit attempt 1, offer some resources, some tasks get scheduled
taskScheduler.submitTasks(attempt1)
val taskDescriptions = taskScheduler.resourceOffers(workerOffers).flatten
assert(10 === taskDescriptions.length)
// now mark attempt 1 as a zombie
val mgr1 = taskScheduler.taskSetManagerForAttempt(attempt1.stageId, attempt1.stageAttemptId).get
mgr1.isZombie = true
// don't schedule anything on another resource offer
val taskDescriptions2 = taskScheduler.resourceOffers(workerOffers).flatten
assert(0 === taskDescriptions2.length)
// submit attempt 2
val attempt2 = FakeTask.createTaskSet(10, stageId = 0, stageAttemptId = 1)
taskScheduler.submitTasks(attempt2)
// attempt 1 finished (this can happen even if it was marked zombie earlier -- all tasks were
// already submitted, and then they finish)
taskScheduler.taskSetFinished(mgr1)
// now with another resource offer, we should still schedule all the tasks in attempt2
val taskDescriptions3 = taskScheduler.resourceOffers(workerOffers).flatten
assert(10 === taskDescriptions3.length)
taskDescriptions3.foreach { task =>
val mgr = Option(taskScheduler.taskIdToTaskSetManager.get(task.taskId)).get
assert(mgr.taskSet.stageAttemptId === 1)
}
assert(!failedTaskSet)
}
test("tasks are not re-scheduled while executor loss reason is pending") {
val taskScheduler = setupScheduler()
val e0Offers = IndexedSeq(new WorkerOffer("executor0", "host0", 1))
val e1Offers = IndexedSeq(new WorkerOffer("executor1", "host0", 1))
val attempt1 = FakeTask.createTaskSet(1)
// submit attempt 1, offer resources, task gets scheduled
taskScheduler.submitTasks(attempt1)
val taskDescriptions = taskScheduler.resourceOffers(e0Offers).flatten
assert(1 === taskDescriptions.length)
// mark executor0 as dead but pending fail reason
taskScheduler.executorLost("executor0", LossReasonPending)
// offer some more resources on a different executor, nothing should change
val taskDescriptions2 = taskScheduler.resourceOffers(e1Offers).flatten
assert(0 === taskDescriptions2.length)
// provide the actual loss reason for executor0
taskScheduler.executorLost("executor0", ExecutorProcessLost("oops"))
// executor0's tasks should have failed now that the loss reason is known, so offering more
// resources should make them be scheduled on the new executor.
val taskDescriptions3 = taskScheduler.resourceOffers(e1Offers).flatten
assert(1 === taskDescriptions3.length)
assert("executor1" === taskDescriptions3(0).executorId)
assert(!failedTaskSet)
}
test("scheduled tasks obey task and stage excludelist") {
taskScheduler = setupSchedulerWithMockTaskSetExcludelist()
(0 to 2).foreach {stageId =>
val taskSet = FakeTask.createTaskSet(numTasks = 2, stageId = stageId, stageAttemptId = 0)
taskScheduler.submitTasks(taskSet)
}
// Setup our mock excludelist:
// * stage 0 is excluded on node "host1"
// * stage 1 is excluded on executor "executor3"
// * stage 0, partition 0 is excluded on executor 0
// (mocked methods default to returning false, ie. no excluding)
when(stageToMockTaskSetExcludelist(0).isNodeExcludedForTaskSet("host1")).thenReturn(true)
when(stageToMockTaskSetExcludelist(1).isExecutorExcludedForTaskSet("executor3"))
.thenReturn(true)
when(stageToMockTaskSetExcludelist(0).isExecutorExcludedForTask("executor0", 0))
.thenReturn(true)
val offers = IndexedSeq(
new WorkerOffer("executor0", "host0", 1),
new WorkerOffer("executor1", "host1", 1),
new WorkerOffer("executor2", "host1", 1),
new WorkerOffer("executor3", "host2", 10)
)
val firstTaskAttempts = taskScheduler.resourceOffers(offers).flatten
// We should schedule all tasks.
assert(firstTaskAttempts.size === 6)
// Whenever we schedule a task, we must consult the node and executor excludelist. (The test
// doesn't check exactly what checks are made because the offers get shuffled.)
(0 to 2).foreach { stageId =>
verify(stageToMockTaskSetExcludelist(stageId), atLeast(1))
.isNodeExcludedForTaskSet(anyString())
verify(stageToMockTaskSetExcludelist(stageId), atLeast(1))
.isExecutorExcludedForTaskSet(anyString())
}
def tasksForStage(stageId: Int): Seq[TaskDescription] = {
firstTaskAttempts.filter{_.name.contains(s"stage $stageId")}
}
tasksForStage(0).foreach { task =>
// executors 1 & 2 excluded for node
// executor 0 excluded just for partition 0
if (task.index == 0) {
assert(task.executorId === "executor3")
} else {
assert(Set("executor0", "executor3").contains(task.executorId))
}
}
tasksForStage(1).foreach { task =>
// executor 3 excluded
assert("executor3" != task.executorId)
}
// no restrictions on stage 2
// Finally, just make sure that we can still complete tasks as usual with exclusion
// in effect. Finish each of the tasksets -- taskset 0 & 1 complete successfully, taskset 2
// fails.
(0 to 2).foreach { stageId =>
val tasks = tasksForStage(stageId)
val tsm = taskScheduler.taskSetManagerForAttempt(stageId, 0).get
val valueSer = SparkEnv.get.serializer.newInstance()
if (stageId == 2) {
// Just need to make one task fail 4 times.
var task = tasks(0)
val taskIndex = task.index
(0 until 4).foreach { attempt =>
assert(task.attemptNumber === attempt)
failTask(task.taskId, TaskState.FAILED, TaskResultLost, tsm)
val nextAttempts =
taskScheduler.resourceOffers(IndexedSeq(WorkerOffer("executor4", "host4", 1))).flatten
if (attempt < 3) {
assert(nextAttempts.size === 1)
task = nextAttempts(0)
assert(task.index === taskIndex)
} else {
assert(nextAttempts.size === 0)
}
}
// End the other task of the taskset, doesn't matter whether it succeeds or fails.
val otherTask = tasks(1)
val result = new DirectTaskResult[Int](valueSer.serialize(otherTask.taskId), Seq(), Array())
tsm.handleSuccessfulTask(otherTask.taskId, result)
} else {
tasks.foreach { task =>
val result = new DirectTaskResult[Int](valueSer.serialize(task.taskId), Seq(), Array())
tsm.handleSuccessfulTask(task.taskId, result)
}
}
assert(tsm.isZombie)
}
// the tasksSets complete, so the tracker should be notified of the successful ones
verify(healthTracker, times(1)).updateExcludedForSuccessfulTaskSet(
stageId = 0,
stageAttemptId = 0,
failuresByExec = stageToMockTaskSetExcludelist(0).execToFailures)
verify(healthTracker, times(1)).updateExcludedForSuccessfulTaskSet(
stageId = 1,
stageAttemptId = 0,
failuresByExec = stageToMockTaskSetExcludelist(1).execToFailures)
// but we shouldn't update for the failed taskset
verify(healthTracker, never).updateExcludedForSuccessfulTaskSet(
stageId = meq(2),
stageAttemptId = anyInt(),
failuresByExec = any())
}
test("scheduled tasks obey node and executor excludelists") {
taskScheduler = setupSchedulerWithMockTaskSetExcludelist()
(0 to 2).foreach { stageId =>
val taskSet = FakeTask.createTaskSet(numTasks = 2, stageId = stageId, stageAttemptId = 0)
taskScheduler.submitTasks(taskSet)
}
val offers = IndexedSeq(
new WorkerOffer("executor0", "host0", 1),
new WorkerOffer("executor1", "host1", 1),
new WorkerOffer("executor2", "host1", 1),
new WorkerOffer("executor3", "host2", 10),
new WorkerOffer("executor4", "host3", 1)
)
// setup our mock excludelist:
// host1, executor0 & executor3 are completely excluded
// This covers everything *except* one core on executor4 / host3, so that everything is still
// schedulable.
when(healthTracker.isNodeExcluded("host1")).thenReturn(true)
when(healthTracker.isExecutorExcluded("executor0")).thenReturn(true)
when(healthTracker.isExecutorExcluded("executor3")).thenReturn(true)
val stageToTsm = (0 to 2).map { stageId =>
val tsm = taskScheduler.taskSetManagerForAttempt(stageId, 0).get
stageId -> tsm
}.toMap
val firstTaskAttempts = taskScheduler.resourceOffers(offers).flatten
firstTaskAttempts.foreach { task => logInfo(s"scheduled $task on ${task.executorId}") }
assert(firstTaskAttempts.size === 1)
assert(firstTaskAttempts.head.executorId === "executor4")
('0' until '2').foreach { hostNum =>
verify(healthTracker, atLeast(1)).isNodeExcluded("host" + hostNum)
}
}
test("abort stage when all executors are excluded and we cannot acquire new executor") {
taskScheduler = setupSchedulerWithMockTaskSetExcludelist()
val taskSet = FakeTask.createTaskSet(numTasks = 10)
taskScheduler.submitTasks(taskSet)
val tsm = stageToMockTaskSetManager(0)
// first just submit some offers so the scheduler knows about all the executors
taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor0", "host0", 2),
WorkerOffer("executor1", "host0", 2),
WorkerOffer("executor2", "host0", 2),
WorkerOffer("executor3", "host1", 2)
))
// now say our health tracker updates to exclude a bunch of resources, but *not* everything
when(healthTracker.isNodeExcluded("host1")).thenReturn(true)
when(healthTracker.isExecutorExcluded("executor0")).thenReturn(true)
// make an offer on the excluded resources. We won't schedule anything, but also won't
// abort yet, since we know of other resources that work
assert(taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor0", "host0", 2),
WorkerOffer("executor3", "host1", 2)
)).flatten.size === 0)
assert(!tsm.isZombie)
// now update the health tracker so that everything really is excluded
when(healthTracker.isExecutorExcluded("executor1")).thenReturn(true)
when(healthTracker.isExecutorExcluded("executor2")).thenReturn(true)
assert(taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor0", "host0", 2),
WorkerOffer("executor3", "host1", 2)
)).flatten.size === 0)
assert(tsm.isZombie)
verify(tsm).abort(anyString(), any())
}
test("SPARK-22148 abort timer should kick in when task is completely excluded & no new " +
"executor can be acquired") {
// set the abort timer to fail immediately
taskScheduler = setupSchedulerWithMockTaskSetExcludelist(
config.UNSCHEDULABLE_TASKSET_TIMEOUT.key -> "0")
// We have only 1 task remaining with 1 executor
val taskSet = FakeTask.createTaskSet(numTasks = 1)
taskScheduler.submitTasks(taskSet)
val tsm = stageToMockTaskSetManager(0)
// submit an offer with one executor
val firstTaskAttempts = taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor0", "host0", 1)
)).flatten
// Fail the running task
val failedTask = firstTaskAttempts.find(_.executorId == "executor0").get
failTask(failedTask.taskId, TaskState.FAILED, UnknownReason, tsm)
when(tsm.taskSetExcludelistHelperOpt.get.isExecutorExcludedForTask(
"executor0", failedTask.index)).thenReturn(true)
// make an offer on the excluded executor. We won't schedule anything, and set the abort
// timer to kick in immediately
assert(taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor0", "host0", 1)
)).flatten.size === 0)
// Wait for the abort timer to kick in. Even though we configure the timeout to be 0, there is a
// slight delay as the abort timer is launched in a separate thread.
eventually(timeout(500.milliseconds)) {
assert(tsm.isZombie)
}
}
test("SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor") {
taskScheduler = setupSchedulerWithMockTaskSetExcludelist(
config.UNSCHEDULABLE_TASKSET_TIMEOUT.key -> "10")
// We have only 1 task remaining with 1 executor
val taskSet = FakeTask.createTaskSet(numTasks = 1)
taskScheduler.submitTasks(taskSet)
val tsm = stageToMockTaskSetManager(0)
// submit an offer with one executor
val firstTaskAttempts = taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor0", "host0", 1)
)).flatten
// Fail the running task
val failedTask = firstTaskAttempts.head
failTask(failedTask.taskId, TaskState.FAILED, UnknownReason, tsm)
when(tsm.taskSetExcludelistHelperOpt.get.isExecutorExcludedForTask(
"executor0", failedTask.index)).thenReturn(true)
// make an offer on the excluded executor. We won't schedule anything, and set the abort
// timer to expire if no new executors could be acquired. We kill the existing idle excluded
// executor and try to acquire a new one.
assert(taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor0", "host0", 1)
)).flatten.size === 0)
assert(taskScheduler.unschedulableTaskSetToExpiryTime.contains(tsm))
assert(!tsm.isZombie)
// Offer a new executor which should be accepted
assert(taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor1", "host0", 1)
)).flatten.size === 1)
assert(taskScheduler.unschedulableTaskSetToExpiryTime.isEmpty)
assert(!tsm.isZombie)
}
// This is to test a scenario where we have two taskSets completely excluded and on acquiring
// a new executor we don't want the abort timer for the second taskSet to expire and abort the job
test("SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets") {
taskScheduler = setupSchedulerWithMockTaskSetExcludelist()
// We have 2 taskSets with 1 task remaining in each with 1 executor completely excluded
val taskSet1 = FakeTask.createTaskSet(numTasks = 1, stageId = 0, stageAttemptId = 0)
taskScheduler.submitTasks(taskSet1)
val taskSet2 = FakeTask.createTaskSet(numTasks = 1, stageId = 1, stageAttemptId = 0)
taskScheduler.submitTasks(taskSet2)
val tsm = stageToMockTaskSetManager(0)
// submit an offer with one executor
val firstTaskAttempts = taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor0", "host0", 1)
)).flatten
assert(taskScheduler.unschedulableTaskSetToExpiryTime.isEmpty)
// Fail the running task
val failedTask = firstTaskAttempts.head
failTask(failedTask.taskId, TaskState.FAILED, UnknownReason, tsm)
when(tsm.taskSetExcludelistHelperOpt.get.isExecutorExcludedForTask(
"executor0", failedTask.index)).thenReturn(true)
// make an offer. We will schedule the task from the second taskSet. Since a task was scheduled
// we do not kick off the abort timer for taskSet1
val secondTaskAttempts = taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor0", "host0", 1)
)).flatten
assert(taskScheduler.unschedulableTaskSetToExpiryTime.isEmpty)
val tsm2 = stageToMockTaskSetManager(1)
val failedTask2 = secondTaskAttempts.head
failTask(failedTask2.taskId, TaskState.FAILED, UnknownReason, tsm2)
when(tsm2.taskSetExcludelistHelperOpt.get.isExecutorExcludedForTask(
"executor0", failedTask2.index)).thenReturn(true)
// make an offer on the excluded executor. We won't schedule anything, and set the abort
// timer for taskSet1 and taskSet2
assert(taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor0", "host0", 1)
)).flatten.size === 0)
assert(taskScheduler.unschedulableTaskSetToExpiryTime.contains(tsm))
assert(taskScheduler.unschedulableTaskSetToExpiryTime.contains(tsm2))
assert(taskScheduler.unschedulableTaskSetToExpiryTime.size == 2)
// Offer a new executor which should be accepted
assert(taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor1", "host1", 1)
)).flatten.size === 1)
// Check if all the taskSets are cleared
assert(taskScheduler.unschedulableTaskSetToExpiryTime.isEmpty)
assert(!tsm.isZombie)
}
// this test is to check that we don't abort a taskSet which is not being scheduled on other
// executors as it is waiting on locality timeout and not being aborted because it is still not
// completely excluded.
test("SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded") {
taskScheduler = setupSchedulerWithMockTaskSetExcludelist(
config.UNSCHEDULABLE_TASKSET_TIMEOUT.key -> "0",
// This is to avoid any potential flakiness in the test because of large pauses in jenkins
config.LOCALITY_WAIT.key -> "30s"
)
val preferredLocation = Seq(ExecutorCacheTaskLocation("host0", "executor0"))
val taskSet1 = FakeTask.createTaskSet(numTasks = 1, stageId = 0, stageAttemptId = 0,
preferredLocation)
taskScheduler.submitTasks(taskSet1)
val tsm = stageToMockTaskSetManager(0)
// submit an offer with one executor
val taskAttempts = taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor0", "host0", 1)
)).flatten
// Fail the running task
val failedTask = taskAttempts.head
failTask(failedTask.taskId, TaskState.FAILED, UnknownReason, tsm)
when(tsm.taskSetExcludelistHelperOpt.get.isExecutorExcludedForTask(
"executor0", failedTask.index)).thenReturn(true)
// make an offer but we won't schedule anything yet as scheduler locality is still PROCESS_LOCAL
assert(taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor1", "host0", 1)
)).flatten.isEmpty)
assert(taskScheduler.unschedulableTaskSetToExpiryTime.isEmpty)
assert(!tsm.isZombie)
}
test("SPARK-31418 abort timer should kick in when task is completely excluded &" +
"allocation manager could not acquire a new executor before the timeout") {
// set the abort timer to fail immediately
taskScheduler = setupSchedulerWithMockTaskSetExcludelist(
config.UNSCHEDULABLE_TASKSET_TIMEOUT.key -> "0",
config.DYN_ALLOCATION_ENABLED.key -> "true")
// We have 2 tasks remaining with 1 executor
val taskSet = FakeTask.createTaskSet(numTasks = 2)
taskScheduler.submitTasks(taskSet)
val tsm = stageToMockTaskSetManager(0)
// submit an offer with one executor
taskScheduler.resourceOffers(IndexedSeq(WorkerOffer("executor0", "host0", 2))).flatten
// Fail the running task
failTask(0, TaskState.FAILED, UnknownReason, tsm)
when(tsm.taskSetExcludelistHelperOpt.get.isExecutorExcludedForTask(
"executor0", 0)).thenReturn(true)
// If the executor is busy, then dynamic allocation should kick in and try
// to acquire additional executors to schedule the excluded task
assert(taskScheduler.isExecutorBusy("executor0"))
// make an offer on the excluded executor. We won't schedule anything, and set the abort
// timer to kick in immediately
assert(taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor0", "host0", 1)
)).flatten.size === 0)
// Wait for the abort timer to kick in. Even though we configure the timeout to be 0, there is a
// slight delay as the abort timer is launched in a separate thread.
eventually(timeout(500.milliseconds)) {
assert(tsm.isZombie)
}
}
/**
* Helper for performance tests. Takes the explicitly excluded nodes and executors; verifies
* that the excluded are used efficiently to ensure scheduling is not O(numPendingTasks).
* Creates 1 offer on executor[1-3]. Executor1 & 2 are on host1, executor3 is on host2. Passed
* in nodes and executors should be on that list.
*/
private def testExcludelistPerformance(
testName: String,
nodeExcludelist: Seq[String],
execExcludelist: Seq[String]): Unit = {
// Because scheduling involves shuffling the order of offers around, we run this test a few
// times to cover more possibilities. There are only 3 offers, which means 6 permutations,
// so 10 iterations is pretty good.
(0 until 10).foreach { testItr =>
test(s"$testName: iteration $testItr") {
// When an executor or node is excluded, we want to make sure that we don't try
// scheduling each pending task, one by one, to discover they are all excluded. This is
// important for performance -- if we did check each task one-by-one, then responding to a
// resource offer (which is usually O(1)-ish) would become O(numPendingTasks), which would
// slow down scheduler throughput and slow down scheduling even on healthy executors.
// Here, we check a proxy for the runtime -- we make sure the scheduling is short-circuited
// at the node or executor excludelist, so we never check the per-task excludelist. We also
// make sure we don't check the node & executor excludelist for the entire taskset
// O(numPendingTasks) times.
taskScheduler = setupSchedulerWithMockTaskSetExcludelist()
// we schedule 500 tasks so we can clearly distinguish anything that is O(numPendingTasks)
val taskSet = FakeTask.createTaskSet(numTasks = 500, stageId = 0, stageAttemptId = 0)
taskScheduler.submitTasks(taskSet)
val offers = IndexedSeq(
new WorkerOffer("executor1", "host1", 1),
new WorkerOffer("executor2", "host1", 1),
new WorkerOffer("executor3", "host2", 1)
)
// We should check the node & exec excludelists, but only O(numOffers),
// not O(numPendingTasks) times. In the worst case, after shuffling,
// we offer our excluded resource first, and then offer other resources
// which do get used. The taskset excludelist is consulted repeatedly
// as we offer resources to the taskset -- each iteration either schedules
// something, or it terminates that locality level, so the maximum number of
// checks is numCores + numLocalityLevels
val numCoresOnAllOffers = offers.map(_.cores).sum
val numLocalityLevels = TaskLocality.values.size
val maxExcludelistChecks = numCoresOnAllOffers + numLocalityLevels
// Setup the excludelist
nodeExcludelist.foreach { node =>
when(stageToMockTaskSetExcludelist(0).isNodeExcludedForTaskSet(node)).thenReturn(true)
}
execExcludelist.foreach { exec =>
when(stageToMockTaskSetExcludelist(0).isExecutorExcludedForTaskSet(exec))
.thenReturn(true)
}
// Figure out which nodes have any effective exclusions on them. This means all nodes
// that are explicitly excluded, plus those that have *any* executors excluded.
val nodesForExcludedExecutors = offers.filter { offer =>
execExcludelist.contains(offer.executorId)
}.map(_.host).distinct
val nodesWithAnyExclusions = (nodeExcludelist ++ nodesForExcludedExecutors).toSet
// Similarly, figure out which executors have any exclusions. This means all executors
// that are explicitly excluded, plus all executors on nodes that are excluded.
val execsForExcludedNodes = offers.filter { offer =>
nodeExcludelist.contains(offer.host)
}.map(_.executorId).toSeq
val executorsWithAnyExclusions = (execExcludelist ++ execsForExcludedNodes).toSet
// Schedule a taskset, and make sure our test setup is correct -- we are able to schedule
// a task on all executors that aren't excluded (whether that executor is a explicitly
// excluded, or implicitly excluded via the node excludeOnFailures).
val firstTaskAttempts = taskScheduler.resourceOffers(offers).flatten
assert(firstTaskAttempts.size === offers.size - executorsWithAnyExclusions.size)
// Now check that we haven't made too many calls to any of the excludelist methods.
// We should be checking our node excludelist, but it should be within the bound we defined
// above.
verify(stageToMockTaskSetExcludelist(0), atMost(maxExcludelistChecks))
.isNodeExcludedForTaskSet(anyString())
// We shouldn't ever consult the per-task excludelist for the nodes that have been excluded
// for the entire taskset, since the taskset level exclusions should prevent scheduling
// from ever looking at specific tasks.
nodesWithAnyExclusions.foreach { node =>
verify(stageToMockTaskSetExcludelist(0), never)
.isNodeExcludedForTask(meq(node), anyInt())
}
executorsWithAnyExclusions.foreach { exec =>
// We should be checking our executor excludelist, but it should be within the bound
// defined above. Its possible that this will be significantly fewer calls, maybe even
// 0, if there is also a node-excludelist which takes effect first. But this assert is
// all we need to avoid an O(numPendingTask) slowdown.
verify(stageToMockTaskSetExcludelist(0), atMost(maxExcludelistChecks))
.isExecutorExcludedForTaskSet(exec)
// We shouldn't ever consult the per-task excludelist for executors that have been
// excluded for the entire taskset, since the taskset level exclusions should prevent
// scheduling from ever looking at specific tasks.
verify(stageToMockTaskSetExcludelist(0), never)
.isExecutorExcludedForTask(meq(exec), anyInt())
}
}
}
}
testExcludelistPerformance(
testName = "Excluded node for entire task set prevents per-task exclusion checks",
nodeExcludelist = Seq("host1"),
execExcludelist = Seq())
testExcludelistPerformance(
testName = "Excluded executor for entire task set prevents per-task exclusion checks",
nodeExcludelist = Seq(),
execExcludelist = Seq("executor3")
)
test("abort stage if executor loss results in unschedulability from previously failed tasks") {
// Make sure we can detect when a taskset becomes unschedulable from excludeOnFailure. This
// test explores a particular corner case -- you may have one task fail, but still be
// schedulable on another executor. However, that executor may fail later on, leaving the
// first task with no place to run.
val taskScheduler = setupScheduler(
config.EXCLUDE_ON_FAILURE_ENABLED.key -> "true"
)
val taskSet = FakeTask.createTaskSet(2)
taskScheduler.submitTasks(taskSet)
val tsm = taskScheduler.taskSetManagerForAttempt(taskSet.stageId, taskSet.stageAttemptId).get
val firstTaskAttempts = taskScheduler.resourceOffers(IndexedSeq(
new WorkerOffer("executor0", "host0", 1),
new WorkerOffer("executor1", "host1", 1)
)).flatten
assert(Set("executor0", "executor1") === firstTaskAttempts.map(_.executorId).toSet)
// Fail one of the tasks, but leave the other running.
val failedTask = firstTaskAttempts.find(_.executorId == "executor0").get
failTask(failedTask.taskId, TaskState.FAILED, TaskResultLost, tsm)
// At this point, our failed task could run on the other executor, so don't give up the task
// set yet.
assert(!failedTaskSet)
// Now we fail our second executor. The other task can still run on executor1, so make an offer
// on that executor, and make sure that the other task (not the failed one) is assigned there.
taskScheduler.executorLost("executor1", ExecutorProcessLost("oops"))
val nextTaskAttempts =
taskScheduler.resourceOffers(IndexedSeq(new WorkerOffer("executor0", "host0", 1))).flatten
// Note: Its OK if some future change makes this already realize the taskset has become
// unschedulable at this point (though in the current implementation, we're sure it will not).
assert(nextTaskAttempts.size === 1)
assert(nextTaskAttempts.head.executorId === "executor0")
assert(nextTaskAttempts.head.attemptNumber === 1)
assert(nextTaskAttempts.head.index != failedTask.index)
// Now we should definitely realize that our task set is unschedulable, because the only
// task left can't be scheduled on any executors due to the excludelist.
taskScheduler.resourceOffers(IndexedSeq(new WorkerOffer("executor0", "host0", 1)))
sc.listenerBus.waitUntilEmpty(100000)
assert(tsm.isZombie)
assert(failedTaskSet)
val idx = failedTask.index
assert(failedTaskSetReason === s"""
|Aborting $taskSet because task $idx (partition $idx)
|cannot run anywhere due to node and executor excludeOnFailure.
|Most recent failure:
|${tsm.taskSetExcludelistHelperOpt.get.getLatestFailureReason}
|
|ExcludeOnFailure behavior can be configured via spark.excludeOnFailure.*.
|""".stripMargin)
}
test("don't abort if there is an executor available, though it hasn't had scheduled tasks yet") {
// interaction of SPARK-15865 & SPARK-16106
// if we have a small number of tasks, we might be able to schedule them all on the first
// executor. But if those tasks fail, we should still realize there is another executor
// available and not bail on the job
val taskScheduler = setupScheduler(
config.EXCLUDE_ON_FAILURE_ENABLED.key -> "true"
)
val taskSet = FakeTask.createTaskSet(2, (0 until 2).map { _ => Seq(TaskLocation("host0")) }: _*)
taskScheduler.submitTasks(taskSet)
val tsm = taskScheduler.taskSetManagerForAttempt(taskSet.stageId, taskSet.stageAttemptId).get
val offers = IndexedSeq(
// each offer has more than enough free cores for the entire task set, so when combined
// with the locality preferences, we schedule all tasks on one executor
new WorkerOffer("executor0", "host0", 4),
new WorkerOffer("executor1", "host1", 4)
)
val firstTaskAttempts = taskScheduler.resourceOffers(offers).flatten
assert(firstTaskAttempts.size == 2)
firstTaskAttempts.foreach { taskAttempt => assert("executor0" === taskAttempt.executorId) }
// fail all the tasks on the bad executor
firstTaskAttempts.foreach { taskAttempt =>
failTask(taskAttempt.taskId, TaskState.FAILED, TaskResultLost, tsm)
}
// Here is the main check of this test -- we have the same offers again, and we schedule it
// successfully. Because the scheduler tries to schedule with locality in mind, at first
// it won't schedule anything on executor1. But despite that, we don't abort the job.
val secondTaskAttempts = taskScheduler.resourceOffers(offers).flatten
assert(secondTaskAttempts.isEmpty)
assert(!failedTaskSet)
}
test("SPARK-16106 locality levels updated if executor added to existing host") {
val taskScheduler = setupScheduler()
taskScheduler.submitTasks(FakeTask.createTaskSet(2, stageId = 0, stageAttemptId = 0,
(0 until 2).map { _ => Seq(TaskLocation("host0", "executor2")) }: _*
))
val taskDescs = taskScheduler.resourceOffers(IndexedSeq(
new WorkerOffer("executor0", "host0", 1),
new WorkerOffer("executor1", "host1", 1)
)).flatten
// only schedule one task because of locality
assert(taskDescs.size === 1)
val mgr = Option(taskScheduler.taskIdToTaskSetManager.get(taskDescs(0).taskId)).get
assert(mgr.myLocalityLevels.toSet === Set(TaskLocality.NODE_LOCAL, TaskLocality.ANY))
// we should know about both executors, even though we only scheduled tasks on one of them
assert(taskScheduler.getExecutorsAliveOnHost("host0") === Some(Set("executor0")))
assert(taskScheduler.getExecutorsAliveOnHost("host1") === Some(Set("executor1")))
// when executor2 is added, we should realize that we can run process-local tasks.
// And we should know its alive on the host.
val secondTaskDescs = taskScheduler.resourceOffers(
IndexedSeq(new WorkerOffer("executor2", "host0", 1))).flatten
assert(secondTaskDescs.size === 1)
assert(mgr.myLocalityLevels.toSet ===
Set(TaskLocality.PROCESS_LOCAL, TaskLocality.NODE_LOCAL, TaskLocality.ANY))
assert(taskScheduler.getExecutorsAliveOnHost("host0") === Some(Set("executor0", "executor2")))
assert(taskScheduler.getExecutorsAliveOnHost("host1") === Some(Set("executor1")))
// And even if we don't have anything left to schedule, another resource offer on yet another
// executor should also update the set of live executors
val thirdTaskDescs = taskScheduler.resourceOffers(
IndexedSeq(new WorkerOffer("executor3", "host1", 1))).flatten
assert(thirdTaskDescs.size === 0)
assert(taskScheduler.getExecutorsAliveOnHost("host1") === Some(Set("executor1", "executor3")))
}
test("scheduler checks for executors that can be expired from excludeOnFailure") {
taskScheduler = setupScheduler()
taskScheduler.submitTasks(FakeTask.createTaskSet(1, stageId = 0, stageAttemptId = 0))
taskScheduler.resourceOffers(IndexedSeq(
new WorkerOffer("executor0", "host0", 1)
)).flatten
verify(healthTracker).applyExcludeOnFailureTimeout()
}
test("if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)") {
sc = new SparkContext("local", "TaskSchedulerImplSuite")
val taskScheduler = new TaskSchedulerImpl(sc)
taskScheduler.initialize(new FakeSchedulerBackend)
// Need to initialize a DAGScheduler for the taskScheduler to use for callbacks.
new DAGScheduler(sc, taskScheduler) {
override def taskStarted(task: Task[_], taskInfo: TaskInfo): Unit = {}
override def executorAdded(execId: String, host: String): Unit = {}
}
val e0Offers = IndexedSeq(WorkerOffer("executor0", "host0", 1))
val attempt1 = FakeTask.createTaskSet(1)
// submit attempt 1, offer resources, task gets scheduled
taskScheduler.submitTasks(attempt1)
val taskDescriptions = taskScheduler.resourceOffers(e0Offers).flatten
assert(1 === taskDescriptions.length)
// mark executor0 as dead
taskScheduler.executorLost("executor0", ExecutorProcessLost())
assert(!taskScheduler.isExecutorAlive("executor0"))
assert(!taskScheduler.hasExecutorsAliveOnHost("host0"))
assert(taskScheduler.getExecutorsAliveOnHost("host0").isEmpty)
// Check that state associated with the lost task attempt is cleaned up:
assert(taskScheduler.taskIdToExecutorId.isEmpty)
assert(taskScheduler.taskIdToTaskSetManager.isEmpty)
assert(taskScheduler.runningTasksByExecutors.get("executor0").isEmpty)
}
test("if a task finishes with TaskState.LOST its executor is marked as dead") {
sc = new SparkContext("local", "TaskSchedulerImplSuite")
val taskScheduler = new TaskSchedulerImpl(sc)
taskScheduler.initialize(new FakeSchedulerBackend)
// Need to initialize a DAGScheduler for the taskScheduler to use for callbacks.
new DAGScheduler(sc, taskScheduler) {
override def taskStarted(task: Task[_], taskInfo: TaskInfo): Unit = {}
override def executorAdded(execId: String, host: String): Unit = {}
}
val e0Offers = IndexedSeq(WorkerOffer("executor0", "host0", 1))
val attempt1 = FakeTask.createTaskSet(1)
// submit attempt 1, offer resources, task gets scheduled
taskScheduler.submitTasks(attempt1)
val taskDescriptions = taskScheduler.resourceOffers(e0Offers).flatten
assert(1 === taskDescriptions.length)
// Report the task as failed with TaskState.LOST
taskScheduler.statusUpdate(
tid = taskDescriptions.head.taskId,
state = TaskState.LOST,
serializedData = ByteBuffer.allocate(0)
)
// Check that state associated with the lost task attempt is cleaned up:
assert(taskScheduler.taskIdToExecutorId.isEmpty)
assert(taskScheduler.taskIdToTaskSetManager.isEmpty)
assert(taskScheduler.runningTasksByExecutors.get("executor0").isEmpty)
// Check that the executor has been marked as dead
assert(!taskScheduler.isExecutorAlive("executor0"))
assert(!taskScheduler.hasExecutorsAliveOnHost("host0"))
assert(taskScheduler.getExecutorsAliveOnHost("host0").isEmpty)
}
test("Locality should be used for bulk offers even with delay scheduling off") {
val conf = new SparkConf()
.set(config.LOCALITY_WAIT.key, "0")
sc = new SparkContext("local", "TaskSchedulerImplSuite", conf)
// we create a manual clock just so we can be sure the clock doesn't advance at all in this test
val clock = new ManualClock()
// We customize the task scheduler just to let us control the way offers are shuffled, so we
// can be sure we try both permutations, and to control the clock on the tasksetmanager.
val taskScheduler = new TaskSchedulerImpl(sc) {
override def shuffleOffers(offers: IndexedSeq[WorkerOffer]): IndexedSeq[WorkerOffer] = {
// Don't shuffle the offers around for this test. Instead, we'll just pass in all
// the permutations we care about directly.
offers
}
override def createTaskSetManager(taskSet: TaskSet, maxTaskFailures: Int): TaskSetManager = {
new TaskSetManager(this, taskSet, maxTaskFailures, healthTrackerOpt, clock)
}
}
// Need to initialize a DAGScheduler for the taskScheduler to use for callbacks.
new DAGScheduler(sc, taskScheduler) {
override def taskStarted(task: Task[_], taskInfo: TaskInfo): Unit = {}
override def executorAdded(execId: String, host: String): Unit = {}
}
taskScheduler.initialize(new FakeSchedulerBackend)
// Make two different offers -- one in the preferred location, one that is not.
val offers = IndexedSeq(
WorkerOffer("exec1", "host1", 1),
WorkerOffer("exec2", "host2", 1)
)
Seq(false, true).foreach { swapOrder =>
// Submit a taskset with locality preferences.
val taskSet = FakeTask.createTaskSet(
1, stageId = 1, stageAttemptId = 0, Seq(TaskLocation("host1", "exec1")))
taskScheduler.submitTasks(taskSet)
val shuffledOffers = if (swapOrder) offers.reverse else offers
// Regardless of the order of the offers (after the task scheduler shuffles them), we should
// always take advantage of the local offer.
val taskDescs = taskScheduler.resourceOffers(shuffledOffers).flatten
withClue(s"swapOrder = $swapOrder") {
assert(taskDescs.size === 1)
assert(taskDescs.head.executorId === "exec1")
}
}
}
test("With delay scheduling off, tasks can be run at any locality level immediately") {
val conf = new SparkConf()
.set(config.LOCALITY_WAIT.key, "0")
sc = new SparkContext("local", "TaskSchedulerImplSuite", conf)
// we create a manual clock just so we can be sure the clock doesn't advance at all in this test
val clock = new ManualClock()
val taskScheduler = new TaskSchedulerImpl(sc) {
override def createTaskSetManager(taskSet: TaskSet, maxTaskFailures: Int): TaskSetManager = {
new TaskSetManager(this, taskSet, maxTaskFailures, healthTrackerOpt, clock)
}
}
// Need to initialize a DAGScheduler for the taskScheduler to use for callbacks.
new DAGScheduler(sc, taskScheduler) {
override def taskStarted(task: Task[_], taskInfo: TaskInfo): Unit = {}
override def executorAdded(execId: String, host: String): Unit = {}
}
taskScheduler.initialize(new FakeSchedulerBackend)
// make an offer on the preferred host so the scheduler knows its alive. This is necessary
// so that the taskset knows that it *could* take advantage of locality.
taskScheduler.resourceOffers(IndexedSeq(WorkerOffer("exec1", "host1", 1)))
// Submit a taskset with locality preferences.
val taskSet = FakeTask.createTaskSet(
1, stageId = 1, stageAttemptId = 0, Seq(TaskLocation("host1", "exec1")))
taskScheduler.submitTasks(taskSet)
val tsm = taskScheduler.taskSetManagerForAttempt(1, 0).get
// make sure we've setup our test correctly, so that the taskset knows it *could* use local
// offers.
assert(tsm.myLocalityLevels.contains(TaskLocality.NODE_LOCAL))
// make an offer on a non-preferred location. Since the delay is 0, we should still schedule
// immediately.
val taskDescs =
taskScheduler.resourceOffers(IndexedSeq(WorkerOffer("exec2", "host2", 1))).flatten
assert(taskDescs.size === 1)
assert(taskDescs.head.executorId === "exec2")
}
test("TaskScheduler should throw IllegalArgumentException when schedulingMode is not supported") {
intercept[IllegalArgumentException] {
val taskScheduler = setupScheduler(
TaskSchedulerImpl.SCHEDULER_MODE_PROPERTY -> SchedulingMode.NONE.toString)
taskScheduler.initialize(new FakeSchedulerBackend)
}
}
test("don't schedule for a barrier taskSet if available slots are less than pending tasks") {
val taskCpus = 2
val taskScheduler = setupSchedulerWithMaster(
s"local[$taskCpus]",
config.CPUS_PER_TASK.key -> taskCpus.toString)
val numFreeCores = 3
val workerOffers = IndexedSeq(
new WorkerOffer("executor0", "host0", numFreeCores, Some("192.168.0.101:49625")),
new WorkerOffer("executor1", "host1", numFreeCores, Some("192.168.0.101:49627")))
val attempt1 = FakeTask.createBarrierTaskSet(3)
// submit attempt 1, offer some resources, since the available slots are less than pending
// tasks, don't schedule barrier tasks on the resource offer.
taskScheduler.submitTasks(attempt1)
val taskDescriptions = taskScheduler.resourceOffers(workerOffers).flatten
assert(0 === taskDescriptions.length)
}
test("don't schedule for a barrier taskSet if available slots are less than " +
"pending tasks gpus limiting") {
val taskCpus = 1
val taskScheduler = setupSchedulerWithMaster(
s"local[$taskCpus]", config.CPUS_PER_TASK.key -> taskCpus.toString,
"spark.executor.resource.gpu.amount" -> "1", "spark.task.resource.gpu.amount" -> "1")
val numFreeCores = 3
val workerOffers = IndexedSeq(
new WorkerOffer("executor0", "host0", numFreeCores, Some("192.168.0.101:49625"),
Map("gpu" -> Seq("0").toBuffer)),
new WorkerOffer("executor1", "host1", numFreeCores, Some("192.168.0.101:49627"),
Map("gpu" -> Seq("0").toBuffer)))
val attempt1 = FakeTask.createBarrierTaskSet(3)
taskScheduler.submitTasks(attempt1)
val taskDescriptions = taskScheduler.resourceOffers(workerOffers).flatten
assert(0 === taskDescriptions.length)
}
test("schedule tasks for a barrier taskSet if all tasks can be launched together gpus") {
val taskCpus = 1
val taskScheduler = setupSchedulerWithMaster(
s"local[$taskCpus]", config.CPUS_PER_TASK.key -> taskCpus.toString,
"spark.executor.resource.gpu.amount" -> "1", "spark.task.resource.gpu.amount" -> "1")
val numFreeCores = 3
val workerOffers = IndexedSeq(
new WorkerOffer("executor0", "host0", numFreeCores, Some("192.168.0.101:49625"),
Map("gpu" -> Seq("0").toBuffer)),
new WorkerOffer("executor1", "host1", numFreeCores, Some("192.168.0.101:49627"),
Map("gpu" -> Seq("0").toBuffer)),
new WorkerOffer("executor2", "host2", numFreeCores, Some("192.168.0.101:49629"),
Map("gpu" -> Seq("0").toBuffer)))
val attempt1 = FakeTask.createBarrierTaskSet(3)
taskScheduler.submitTasks(attempt1)
val taskDescriptions = taskScheduler.resourceOffers(workerOffers).flatten
assert(3 === taskDescriptions.length)
}
// barrier scheduling doesn't yet work with dynamic allocation but test it with another
// ResourceProfile anyway to make sure code path works when it is supported
test("schedule tasks for a barrier taskSet if all tasks can be launched together " +
"diff ResourceProfile") {
val taskCpus = 1
val taskScheduler = setupSchedulerWithMaster(
s"local[$taskCpus]", config.CPUS_PER_TASK.key -> taskCpus.toString)
val execReqs = new ExecutorResourceRequests().cores(2).resource("gpu", 2)
val taskReqs = new TaskResourceRequests().cpus(1).resource("gpu", 1)
val rp = new ResourceProfile(execReqs.requests, taskReqs.requests)
taskScheduler.sc.resourceProfileManager.addResourceProfile(rp)
val numFreeCores = 2
val workerOffers = IndexedSeq(
new WorkerOffer("executor0", "host0", numFreeCores, Some("192.168.0.101:49625"),
Map("gpu" -> Seq("0", "1").toBuffer), rp.id),
new WorkerOffer("executor1", "host1", numFreeCores, Some("192.168.0.101:49627"),
Map("gpu" -> Seq("0", "1").toBuffer), rp.id))
val attempt1 = FakeTask.createBarrierTaskSet(3, rpId = rp.id)
taskScheduler.submitTasks(attempt1)
val taskDescriptions = taskScheduler.resourceOffers(workerOffers).flatten
assert(3 === taskDescriptions.length)
}
test("schedule tasks for a barrier taskSet if all tasks can be launched together " +
"diff ResourceProfile, but not enough gpus") {
val taskCpus = 1
val taskScheduler = setupSchedulerWithMaster(
s"local[$taskCpus]", config.CPUS_PER_TASK.key -> taskCpus.toString)
val execReqs = new ExecutorResourceRequests().cores(2).resource("gpu", 2)
val taskReqs = new TaskResourceRequests().cpus(1).resource("gpu", 1)
val rp = new ResourceProfile(execReqs.requests, taskReqs.requests)
taskScheduler.sc.resourceProfileManager.addResourceProfile(rp)
val numFreeCores = 2
// make each of the worker offers only have 1 GPU, thus making it not enough
val workerOffers = IndexedSeq(
new WorkerOffer("executor0", "host0", numFreeCores, Some("192.168.0.101:49625"),
Map("gpu" -> Seq("0").toBuffer), rp.id),
new WorkerOffer("executor1", "host1", numFreeCores, Some("192.168.0.101:49627"),
Map("gpu" -> Seq("0").toBuffer), rp.id))
val attempt1 = FakeTask.createBarrierTaskSet(3, rpId = rp.id)
taskScheduler.submitTasks(attempt1)
val taskDescriptions = taskScheduler.resourceOffers(workerOffers).flatten
assert(0 === taskDescriptions.length)
}
test("schedule tasks for a barrier taskSet if all tasks can be launched together") {
val taskCpus = 2
val taskScheduler = setupSchedulerWithMaster(
s"local[$taskCpus]",
config.CPUS_PER_TASK.key -> taskCpus.toString)
val numFreeCores = 3
val workerOffers = IndexedSeq(
new WorkerOffer("executor0", "host0", numFreeCores, Some("192.168.0.101:49625")),
new WorkerOffer("executor1", "host1", numFreeCores, Some("192.168.0.101:49627")),
new WorkerOffer("executor2", "host2", numFreeCores, Some("192.168.0.101:49629")))
val attempt1 = FakeTask.createBarrierTaskSet(3)
// submit attempt 1, offer some resources, all tasks get launched together
taskScheduler.submitTasks(attempt1)
val taskDescriptions = taskScheduler.resourceOffers(workerOffers).flatten
assert(3 === taskDescriptions.length)
}
test("SPARK-29263: barrier TaskSet can't schedule when higher prio taskset takes the slots") {
val taskCpus = 2
val taskScheduler = setupSchedulerWithMaster(
s"local[$taskCpus]",
config.CPUS_PER_TASK.key -> taskCpus.toString)
val numFreeCores = 3
val workerOffers = IndexedSeq(
new WorkerOffer("executor0", "host0", numFreeCores, Some("192.168.0.101:49625")),
new WorkerOffer("executor1", "host1", numFreeCores, Some("192.168.0.101:49627")),
new WorkerOffer("executor2", "host2", numFreeCores, Some("192.168.0.101:49629")))
val barrier = FakeTask.createBarrierTaskSet(3, stageId = 0, stageAttemptId = 0, priority = 1,
ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID)
val highPrio = FakeTask.createTaskSet(1, stageId = 1, stageAttemptId = 0, priority = 0,
rpId = ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID)
// submit highPrio and barrier taskSet
taskScheduler.submitTasks(highPrio)
taskScheduler.submitTasks(barrier)
val taskDescriptions = taskScheduler.resourceOffers(workerOffers).flatten
// it schedules the highPrio task first, and then will not have enough slots to schedule
// the barrier taskset
assert(1 === taskDescriptions.length)
}
test("cancelTasks shall kill all the running tasks and fail the stage") {
val taskScheduler = setupScheduler()
taskScheduler.initialize(new FakeSchedulerBackend {
override def killTask(
taskId: Long,
executorId: String,
interruptThread: Boolean,
reason: String): Unit = {
// Since we only submit one stage attempt, the following call is sufficient to mark the
// task as killed.
taskScheduler.taskSetManagerForAttempt(0, 0).get.runningTasksSet.remove(taskId)
}
})
val attempt1 = FakeTask.createTaskSet(10)
taskScheduler.submitTasks(attempt1)
val workerOffers = IndexedSeq(new WorkerOffer("executor0", "host0", 1),
new WorkerOffer("executor1", "host1", 1))
val taskDescriptions = taskScheduler.resourceOffers(workerOffers).flatten
assert(2 === taskDescriptions.length)
val tsm = taskScheduler.taskSetManagerForAttempt(0, 0).get
assert(2 === tsm.runningTasks)
taskScheduler.cancelTasks(0, false)
assert(0 === tsm.runningTasks)
assert(tsm.isZombie)
assert(taskScheduler.taskSetManagerForAttempt(0, 0).isEmpty)
}
test("killAllTaskAttempts shall kill all the running tasks and not fail the stage") {
val taskScheduler = setupScheduler()
taskScheduler.initialize(new FakeSchedulerBackend {
override def killTask(
taskId: Long,
executorId: String,
interruptThread: Boolean,
reason: String): Unit = {
// Since we only submit one stage attempt, the following call is sufficient to mark the
// task as killed.
taskScheduler.taskSetManagerForAttempt(0, 0).get.runningTasksSet.remove(taskId)
}
})
val attempt1 = FakeTask.createTaskSet(10)
taskScheduler.submitTasks(attempt1)
val workerOffers = IndexedSeq(new WorkerOffer("executor0", "host0", 1),
new WorkerOffer("executor1", "host1", 1))
val taskDescriptions = taskScheduler.resourceOffers(workerOffers).flatten
assert(2 === taskDescriptions.length)
val tsm = taskScheduler.taskSetManagerForAttempt(0, 0).get
assert(2 === tsm.runningTasks)
taskScheduler.killAllTaskAttempts(0, false, "test")
assert(0 === tsm.runningTasks)
assert(!tsm.isZombie)
assert(taskScheduler.taskSetManagerForAttempt(0, 0).isDefined)
}
test("mark taskset for a barrier stage as zombie in case a task fails") {
val taskScheduler = setupScheduler()
val attempt = FakeTask.createBarrierTaskSet(3)
taskScheduler.submitTasks(attempt)
val tsm = taskScheduler.taskSetManagerForAttempt(0, 0).get
val offers = (0 until 3).map{ idx =>
WorkerOffer(s"exec-$idx", s"host-$idx", 1, Some(s"192.168.0.101:4962$idx"))
}
taskScheduler.resourceOffers(offers)
assert(tsm.runningTasks === 3)
// Fail a task from the stage attempt.
tsm.handleFailedTask(tsm.taskAttempts.head.head.taskId, TaskState.FAILED, TaskKilled("test"))
assert(tsm.isZombie)
}
test("Scheduler correctly accounts for GPUs per task") {
val taskCpus = 1
val taskGpus = 1
val executorGpus = 4
val executorCpus = 4
val taskScheduler = setupScheduler(numCores = executorCpus,
config.CPUS_PER_TASK.key -> taskCpus.toString,
TASK_GPU_ID.amountConf -> taskGpus.toString,
EXECUTOR_GPU_ID.amountConf -> executorGpus.toString,
config.EXECUTOR_CORES.key -> executorCpus.toString)
val taskSet = FakeTask.createTaskSet(3)
val numFreeCores = 2
val resources = Map(GPU -> ArrayBuffer("0", "1", "2", "3"))
val singleCoreWorkerOffers =
IndexedSeq(new WorkerOffer("executor0", "host0", numFreeCores, None, resources))
val zeroGpuWorkerOffers =
IndexedSeq(new WorkerOffer("executor0", "host0", numFreeCores, None, Map.empty))
taskScheduler.submitTasks(taskSet)
// WorkerOffer doesn't contain GPU resource, don't launch any task.
var taskDescriptions = taskScheduler.resourceOffers(zeroGpuWorkerOffers).flatten
assert(0 === taskDescriptions.length)
assert(!failedTaskSet)
// Launch tasks on executor that satisfies resource requirements.
taskDescriptions = taskScheduler.resourceOffers(singleCoreWorkerOffers).flatten
assert(2 === taskDescriptions.length)
assert(!failedTaskSet)
assert(ArrayBuffer("0") === taskDescriptions(0).resources.get(GPU).get.addresses)
assert(ArrayBuffer("1") === taskDescriptions(1).resources.get(GPU).get.addresses)
}
test("Scheduler correctly accounts for GPUs per task with fractional amount") {
val taskCpus = 1
val taskGpus = 0.33
val executorGpus = 1
val executorCpus = 4
val taskScheduler = setupScheduler(numCores = executorCpus,
config.CPUS_PER_TASK.key -> taskCpus.toString,
TASK_GPU_ID.amountConf -> taskGpus.toString,
EXECUTOR_GPU_ID.amountConf -> executorGpus.toString,
config.EXECUTOR_CORES.key -> executorCpus.toString)
val taskSet = FakeTask.createTaskSet(5)
val numFreeCores = 4
val resources = Map(GPU -> ArrayBuffer("0", "0", "0"))
val singleCoreWorkerOffers =
IndexedSeq(new WorkerOffer("executor0", "host0", numFreeCores, None, resources))
taskScheduler.submitTasks(taskSet)
// Launch tasks on executor that satisfies resource requirements.
val taskDescriptions = taskScheduler.resourceOffers(singleCoreWorkerOffers).flatten
assert(3 === taskDescriptions.length)
assert(!failedTaskSet)
assert(ArrayBuffer("0") === taskDescriptions(0).resources.get(GPU).get.addresses)
assert(ArrayBuffer("0") === taskDescriptions(1).resources.get(GPU).get.addresses)
assert(ArrayBuffer("0") === taskDescriptions(2).resources.get(GPU).get.addresses)
}
test("Scheduler works with multiple ResourceProfiles and gpus") {
val taskCpus = 1
val taskGpus = 1
val executorGpus = 4
val executorCpus = 4
val taskScheduler = setupScheduler(numCores = executorCpus,
config.CPUS_PER_TASK.key -> taskCpus.toString,
TASK_GPU_ID.amountConf -> taskGpus.toString,
EXECUTOR_GPU_ID.amountConf -> executorGpus.toString,
config.EXECUTOR_CORES.key -> executorCpus.toString)
val ereqs = new ExecutorResourceRequests().cores(6).resource(GPU, 6)
val treqs = new TaskResourceRequests().cpus(2).resource(GPU, 2)
val rp = new ResourceProfile(ereqs.requests, treqs.requests)
taskScheduler.sc.resourceProfileManager.addResourceProfile(rp)
val taskSet = FakeTask.createTaskSet(3)
val rpTaskSet = FakeTask.createTaskSet(5, stageId = 1, stageAttemptId = 0,
priority = 0, rpId = rp.id)
val resourcesDefaultProf = Map(GPU -> ArrayBuffer("0", "1", "2", "3"))
val resources = Map(GPU -> ArrayBuffer("4", "5", "6", "7", "8", "9"))
val workerOffers =
IndexedSeq(new WorkerOffer("executor0", "host0", 2, None, resourcesDefaultProf),
new WorkerOffer("executor1", "host1", 6, None, resources, rp.id))
taskScheduler.submitTasks(taskSet)
taskScheduler.submitTasks(rpTaskSet)
// should have 2 for default profile and 2 for additional resource profile
var taskDescriptions = taskScheduler.resourceOffers(workerOffers).flatten
assert(5 === taskDescriptions.length)
var has2Gpus = 0
var has1Gpu = 0
for (tDesc <- taskDescriptions) {
assert(tDesc.resources.contains(GPU))
if (tDesc.resources(GPU).addresses.size == 2) {
has2Gpus += 1
}
if (tDesc.resources(GPU).addresses.size == 1) {
has1Gpu += 1
}
}
assert(has2Gpus == 3)
assert(has1Gpu == 2)
val resources3 = Map(GPU -> ArrayBuffer("14", "15", "16", "17", "18", "19"))
// clear the first 2 worker offers so they don't have any room and add a third
// for the resource profile
val workerOffers3 = IndexedSeq(
new WorkerOffer("executor0", "host0", 0, None, Map.empty),
new WorkerOffer("executor1", "host1", 0, None, Map.empty, rp.id),
new WorkerOffer("executor2", "host2", 6, None, resources3, rp.id))
taskDescriptions = taskScheduler.resourceOffers(workerOffers3).flatten
assert(2 === taskDescriptions.length)
assert(taskDescriptions.head.resources.contains(GPU))
assert(2 == taskDescriptions.head.resources(GPU).addresses.size)
}
private def setupSchedulerForDecommissionTests(clock: Clock, numTasks: Int): TaskSchedulerImpl = {
// one task per host
val numHosts = numTasks
val conf = new SparkConf()
.setMaster(s"local[$numHosts]")
.setAppName("TaskSchedulerImplSuite")
.set(config.CPUS_PER_TASK.key, "1")
sc = new SparkContext(conf)
val maxTaskFailures = sc.conf.get(config.TASK_MAX_FAILURES)
taskScheduler = new TaskSchedulerImpl(sc, maxTaskFailures, clock = clock) {
override def createTaskSetManager(taskSet: TaskSet, maxFailures: Int): TaskSetManager = {
val tsm = super.createTaskSetManager(taskSet, maxFailures)
// we need to create a spied tsm so that we can see the copies running
val tsmSpy = spy(tsm)
stageToMockTaskSetManager(taskSet.stageId) = tsmSpy
tsmSpy
}
}
setupHelper()
// Spawn the tasks on different executors/hosts
taskScheduler.submitTasks(FakeTask.createTaskSet(numTasks))
for (i <- 0 until numTasks) {
val executorId = s"executor$i"
val taskDescriptions = taskScheduler.resourceOffers(IndexedSeq(WorkerOffer(
executorId, s"host$i", 1))).flatten
assert(taskDescriptions.size === 1)
assert(taskDescriptions(0).executorId == executorId)
assert(taskDescriptions(0).index === i)
}
taskScheduler
}
test("scheduler should keep the decommission state where host was decommissioned") {
val clock = new ManualClock(10000L)
val scheduler = setupSchedulerForDecommissionTests(clock, 2)
val decomTime = clock.getTimeMillis()
scheduler.executorDecommission("executor0", ExecutorDecommissionInfo("0", None))
scheduler.executorDecommission("executor1", ExecutorDecommissionInfo("1", Some("host1")))
assert(scheduler.getExecutorDecommissionState("executor0")
=== Some(ExecutorDecommissionState(decomTime, None)))
assert(scheduler.getExecutorDecommissionState("executor1")
=== Some(ExecutorDecommissionState(decomTime, Some("host1"))))
assert(scheduler.getExecutorDecommissionState("executor2").isEmpty)
}
test("test full decommissioning flow") {
val clock = new ManualClock(10000L)
val scheduler = setupSchedulerForDecommissionTests(clock, 2)
val manager = stageToMockTaskSetManager(0)
// The task started should be running.
assert(manager.copiesRunning.take(2) === Array(1, 1))
// executor 0 is decommissioned after loosing
assert(scheduler.getExecutorDecommissionState("executor0").isEmpty)
scheduler.executorLost("executor0", ExecutorExited(0, false, "normal"))
assert(scheduler.getExecutorDecommissionState("executor0").isEmpty)
scheduler.executorDecommission("executor0", ExecutorDecommissionInfo("", None))
assert(scheduler.getExecutorDecommissionState("executor0").isEmpty)
// 0th task just died above
assert(manager.copiesRunning.take(2) === Array(0, 1))
assert(scheduler.executorsPendingDecommission.isEmpty)
clock.advance(5000)
// executor1 hasn't been decommissioned yet
assert(scheduler.getExecutorDecommissionState("executor1").isEmpty)
// executor 1 is decommissioned before loosing
scheduler.executorDecommission("executor1", ExecutorDecommissionInfo("", None))
assert(scheduler.getExecutorDecommissionState("executor1").isDefined)
clock.advance(2000)
// executor1 is eventually lost
scheduler.executorLost("executor1", ExecutorExited(0, false, "normal"))
assert(scheduler.executorsPendingDecommission.isEmpty)
// So now both the tasks are no longer running
assert(manager.copiesRunning.take(2) === Array(0, 0))
clock.advance(2000)
// Now give it some resources and both tasks should be rerun
val taskDescriptions = taskScheduler.resourceOffers(IndexedSeq(
WorkerOffer("executor2", "host2", 1), WorkerOffer("executor3", "host3", 1))).flatten
assert(taskDescriptions.size === 2)
assert(taskDescriptions.map(_.index).sorted == Seq(0, 1))
assert(manager.copiesRunning.take(2) === Array(1, 1))
}
test("SPARK-24818: test delay scheduling for barrier TaskSetManager") {
val clock = new ManualClock()
val conf = new SparkConf().set(config.LEGACY_LOCALITY_WAIT_RESET, false)
val sched = setupTaskSchedulerForLocalityTests(clock, conf)
// Call resourceOffers() first, so executor-0 can be used
// to calculate the locality levels of the TaskSetManager later
sched.resourceOffers(Seq(WorkerOffer("executor-0", "host1", 1, Some("host1"))).toIndexedSeq)
val prefLocs = Seq(TaskLocation("host1", "executor-0"))
val barrierTaskSet = FakeTask.createBarrierTaskSet(1, prefLocs)
sched.submitTasks(barrierTaskSet)
val tsm = sched.taskSetManagerForAttempt(0, 0).get
assert(tsm.myLocalityLevels ===
Array(TaskLocality.PROCESS_LOCAL, TaskLocality.NODE_LOCAL, TaskLocality.ANY))
val offers = Seq(WorkerOffer("executor-1", "host1", 1, Some("host1"))).toIndexedSeq
var tasks = sched.resourceOffers(offers).flatten
// The TaskSetManager prefers executor-0 for the PROCESS_LOCAL location but there's no
// available offer of executor-0 in this round, so task scheduling will be delayed first.
assert(tasks.length === 0)
// Advance the clock so the TaskSetManager can move to next locality level(NODE_LOCAL)
clock.advance(4000)
tasks = sched.resourceOffers(offers).flatten
assert(tasks.length === 1)
assert(tsm.taskInfos(tasks.head.taskId).taskLocality === TaskLocality.NODE_LOCAL)
}
test("SPARK-24818: test resource revert of barrier TaskSetManager") {
val clock = new ManualClock()
val conf = new SparkConf().set(config.LEGACY_LOCALITY_WAIT_RESET, false)
val sched = setupTaskSchedulerForLocalityTests(clock, conf)
// Call resourceOffers() first, so executors can be used
// to calculate the locality levels of the TaskSetManager later
sched.resourceOffers(Seq(WorkerOffer("executor-0", "host1", 1, Some("host1"))).toIndexedSeq)
val barrierTaskSet =
FakeTask.createBarrierTaskSet(2, 0, 0, 0, 0,
Seq(TaskLocation("host1", "executor-0")), Seq(TaskLocation("host1", "executor-1")))
val normalTaskSet = FakeTask.createTaskSet(2, 1, 0, 0, 0)
// Submit barrier task set first, so we can schedule it before the normal task set in order to
// test the resource revert behaviour of the barrier TaskSetManager
sched.submitTasks(barrierTaskSet)
sched.submitTasks(normalTaskSet)
val barrierTSM = sched.taskSetManagerForAttempt(0, 0).get
val normalTSM = sched.taskSetManagerForAttempt(1, 0).get
assert(barrierTSM.myLocalityLevels ===
Array(TaskLocality.PROCESS_LOCAL, TaskLocality.NODE_LOCAL, TaskLocality.ANY))
assert(normalTSM.myLocalityLevels === Array(TaskLocality.NO_PREF, TaskLocality.ANY))
// The barrier TaskSetManager can not launch all tasks because of delay scheduling.
// So it will revert assigned resources and let the normal TaskSetManager to schedule first.
var tasks = sched.resourceOffers(
Seq(WorkerOffer("executor-0", "host1", 1, Some("host1")),
WorkerOffer("executor-2", "host1", 1, Some("host1"))).toIndexedSeq).flatten
assert(tasks.length === 2)
var taskId = tasks.head.taskId
assert(!barrierTSM.runningTasksSet.contains(taskId))
assert(normalTSM.runningTasksSet.contains(taskId))
// Advance the clock so the TaskSetManager can move to next locality level(NODE_LOCAL)
// and launch all tasks.
clock.advance(4000)
tasks = sched.resourceOffers(
Seq(WorkerOffer("executor-0", "host1", 1, Some("host1")),
WorkerOffer("executor-2", "host1", 1, Some("host1"))).toIndexedSeq).flatten
assert(tasks.length === 2)
taskId = tasks.head.taskId
assert(barrierTSM.runningTasksSet.contains(taskId))
assert(!normalTSM.runningTasksSet.contains(taskId))
}
test("SPARK-37300: TaskSchedulerImpl should ignore task finished" +
" event if its task was finished state") {
val taskScheduler = setupScheduler()
val latch = new CountDownLatch(2)
val resultGetter = new TaskResultGetter(sc.env, taskScheduler) {
override protected val getTaskResultExecutor: ExecutorService =
new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue[Runnable],
ThreadUtils.namedThreadFactory("task-result-getter")) {
override def execute(command: Runnable): Unit = {
super.execute(new Runnable {
override def run(): Unit = {
command.run()
latch.countDown()
}
})
}
}
def taskResultExecutor() : ExecutorService = getTaskResultExecutor
}
taskScheduler.taskResultGetter = resultGetter
val workerOffers = IndexedSeq(new WorkerOffer("executor0", "host0", 1),
new WorkerOffer("executor1", "host1", 1))
val task1 = new ShuffleMapTask(1, 0, null, new Partition {
override def index: Int = 0
}, Seq(TaskLocation("host0", "executor0")), new Properties, null)
val task2 = new ShuffleMapTask(1, 0, null, new Partition {
override def index: Int = 1
}, Seq(TaskLocation("host1", "executor1")), new Properties, null)
val taskSet = new TaskSet(Array(task1, task2), 0, 0, 0, null, 0)
taskScheduler.submitTasks(taskSet)
val taskDescriptions = taskScheduler.resourceOffers(workerOffers).flatten
assert(2 === taskDescriptions.length)
val ser = sc.env.serializer.newInstance()
val directResult = new DirectTaskResult[Int](ser.serialize(1), Seq(), Array.empty)
val resultBytes = ser.serialize(directResult)
val busyTask = new Runnable {
val lock : Object = new Object
var running : AtomicBoolean = new AtomicBoolean(false)
override def run(): Unit = {
lock.synchronized {
running.set(true)
lock.wait()
}
}
def markTaskDone: Unit = {
lock.synchronized {
lock.notify()
}
}
}
// make getTaskResultExecutor busy
resultGetter.taskResultExecutor().submit(busyTask)
// task1 finished
val tid = taskDescriptions(0).taskId
taskScheduler.statusUpdate(
tid = tid,
state = TaskState.FINISHED,
serializedData = resultBytes
)
// mark executor heartbeat timed out
taskScheduler.executorLost(taskDescriptions(0).executorId, ExecutorProcessLost("Executor " +
"heartbeat timed out"))
// Wait busyTask begin running
eventually(timeout(10.seconds)) {
assert(busyTask.running.get())
}
busyTask.markTaskDone
// Wait until all events are processed
latch.await()
val taskSetManager = taskScheduler.taskIdToTaskSetManager.get(taskDescriptions(1).taskId)
assert(taskSetManager != null)
assert(0 == taskSetManager.tasksSuccessful)
assert(!taskSetManager.successful(taskDescriptions(0).index))
}
/**
* Used by tests to simulate a task failure. This calls the failure handler explicitly, to ensure
* that all the state is updated when this method returns. Otherwise, there's no way to know when
* that happens, since the operation is performed asynchronously by the TaskResultGetter.
*/
private def failTask(
tid: Long,
state: TaskState.TaskState,
reason: TaskFailedReason,
tsm: TaskSetManager): Unit = {
taskScheduler.statusUpdate(tid, state, ByteBuffer.allocate(0))
taskScheduler.handleFailedTask(tsm, tid, state, reason)
}
}
| ueshin/apache-spark | core/src/test/scala/org/apache/spark/scheduler/TaskSchedulerImplSuite.scala | Scala | apache-2.0 | 93,404 |
package se.culvertsoft.mnet
import java.util.concurrent.ConcurrentLinkedQueue
import scala.collection.JavaConversions.asScalaBuffer
import scala.collection.mutable.ArrayBuffer
import org.junit.Test
import TestUtils.assertFor01sec
import TestUtils.assertWithin1sec
import TestUtils.getTime
import se.culvertsoft.mnet.backend.WebSockBackEnd
class CheckLatency {
@Test
def latencyCheck() {
val b1Msgs = new ConcurrentLinkedQueue[Message]
val b2Msgs = new ConcurrentLinkedQueue[Message]
val b1 = TestUtils.newNode(1500)(b1Msgs.add(_)).start()
val b2 = TestUtils.newNode(1501)(b2Msgs.add(_)).start()
val ws1 = b1.getBackEnd[WebSockBackEnd]
val ws2 = b2.getBackEnd[WebSockBackEnd]
assertFor01sec(b1.getRoutes.isEmpty && b2.getRoutes.isEmpty)
ws2.addOutboundConnection(ws1.listenPort)
assertWithin1sec(b1.getRoutes.nonEmpty && b2.getRoutes.nonEmpty)
val errMsgSentByB1 = new ErrorMessage().setMsg("ErrorFromB1")
val errMsgSentByB2 = new ErrorMessage().setMsg("ErrorFromB2")
val t00 = getTime
var i = 0
val n = 10000
var nRecvd = 0
while (i < n) {
b2.broadcast(errMsgSentByB2)
while (b1Msgs.isEmpty) {}
nRecvd += 1
b1Msgs.clear()
i += 1
}
val nBytes = errMsgSentByB1.toString().size.toLong * n.toLong
val dt = getTime - t00
val bits = nBytes.toLong * 8L
val mbits = bits / 1024 / 1024
val mbits_per_sec = mbits.toDouble / dt
val msgs_per_sec = n.toDouble / dt
assert(msgs_per_sec > 5000)
assert(n == nRecvd)
b2.stop()
b1.stop()
}
} | culvertsoft/mnet | mnet-backend/src/test/scala/se/culvertsoft/mnet/CheckLatency.scala | Scala | gpl-2.0 | 1,587 |
/*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.algorithm
class OnlineDelaySuite extends BaseOnlineAlgorithmSuite {
override def newInstance: OnlineAlgorithm = OnlineDelay(10)
test("n = 1") {
val algo = OnlineDelay(1)
assert(algo.next(0.0).isNaN)
assertEquals(algo.next(1.0), 0.0)
assertEquals(algo.next(2.0), 1.0)
assertEquals(algo.next(Double.NaN), 2.0)
assert(algo.isEmpty)
assert(algo.next(Double.NaN).isNaN)
}
test("n = 1, reset") {
val algo = OnlineDelay(1)
assert(algo.next(0.0).isNaN)
assertEquals(algo.next(1.0), 0.0)
algo.reset()
assert(algo.next(2.0).isNaN)
assertEquals(algo.next(3.0), 2.0)
}
test("n = 2") {
val algo = OnlineDelay(2)
assert(algo.next(0.0).isNaN)
assert(algo.next(1.0).isNaN)
assertEquals(algo.next(2.0), 0.0)
assertEquals(algo.next(Double.NaN), 1.0)
assert(!algo.isEmpty)
assertEquals(algo.next(Double.NaN), 2.0)
assert(algo.isEmpty)
assert(algo.next(Double.NaN).isNaN)
assert(algo.isEmpty)
}
}
| Netflix/atlas | atlas-core/src/test/scala/com/netflix/atlas/core/algorithm/OnlineDelaySuite.scala | Scala | apache-2.0 | 1,621 |
/*
* This file is part of KatLib, licensed under the MIT License (MIT).
*
* Copyright (c) 2016 Katrix
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
* associated documentation files (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge, publish, distribute,
* sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or
* substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
* NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package io.github.katrix.katlib.lib
import org.spongepowered.api.text.Text
import io.github.katrix.katlib.helper.Implicits.RichString
object LibCommonCommandKey {
final val Command: Text = "command".text
final val Player: Text = "player".text
final val World: Text = "world".text
final val Dimension: Text = "dimension".text
final val Location: Text = "location".text
final val String: Text = "string".text
final val Integer: Text = "integer".text
final val Double: Text = "fractional".text
final val Boolean: Text = "boolean".text
}
| Katrix-/KatLib | katLib/shared/src/main/scala/io/github/katrix/katlib/lib/LibCommonCommandKey.scala | Scala | mit | 1,751 |
package com.guidewire.tools.chronos.client.api.v2
import java.lang.{Iterable => JavaIterable}
import java.net.URI
import com.guidewire.tools.chronos.client._
import org.joda.time._
import org.joda.time.format.{ISODateTimeFormat, ISOPeriodFormat}
import play.api.libs.json._
import scala.concurrent.{ExecutionContext, Future}
import scalaz._
sealed case class Schedule(recurrences: Long, starting: ReadableDateTime, period: ReadablePeriod) {
require(recurrences >= -1L, s"Recurrences must be infinite (-1), 0 (none), or any number above that")
require(starting ne null, s"Missing the starting date/time")
require(period ne null, s"Missing the period")
val repeatsForever = recurrences < 0
def isRepeatingForever = repeatsForever
override def toString = {
val starting_as_string = s"${ISODateTimeFormat.date().print(starting)}${ISODateTimeFormat.tTime().print(starting)}"
val period_as_string = ISOPeriodFormat.standard.print(period)
if (!repeatsForever)
s"R$recurrences/$starting_as_string/$period_as_string"
else
s"R/$starting_as_string/$period_as_string"
}
}
object Schedule {
private[this] val ISO8601Expression = """(R[0-9]*)/(.*)/(P.*)?""".r
def canParse(input: String): Boolean =
ISO8601Expression.pattern.matcher(input).matches()
def apply(input: String): Schedule = {
require(canParse(input), s"Invalid schedule: $input")
val ISO8601Expression(repetitions_as_string, starting_as_string, period_as_string) = input
val period = ISOPeriodFormat.standard.parsePeriod(period_as_string)
val start = DateTime.parse(starting_as_string)
val repetitions =
if (repetitions_as_string.length <= 1)
-1L
else
repetitions_as_string.substring(1).toLong
Schedule(repetitions, start, period)
}
}
sealed case class Job(
name : String
, command : String
, epsilon : ReadablePeriod = Minutes.minutes(5).toPeriod
, successCount : Long = 0L
, errorCount : Long = 0L
, executor : String = ""
, executorFlags : String = ""
, retries : Int = 2
, owner : String = ""
, lastSuccess : OptionalDateTime = "".toOptionalDateTime
, lastError : OptionalDateTime = "".toOptionalDateTime
, async : Boolean = false
, cpus : FrequencyUnit = 0.0D.asMHz
, disk : DataUnit = 0.asMB
, mem : DataUnit = 0.asMB
, disabled : Boolean = false
, uris : Seq[URI] = Seq()
, errorsSinceLastSuccess: Option[Long] = Some(0L)
, parents : Option[Set[String]] = None
, schedule : Option[Schedule] = None
, container : Option[Container] = None
) {
def isScheduled: Boolean = schedule.isDefined && parents.isEmpty
def isDependent: Boolean = parents.isDefined && schedule.isEmpty
}
object Jobs {
import scala.collection.JavaConversions._
def scheduled(name: String, command: String, schedule: Schedule): Job =
Job(name, command, schedule = Some(schedule))
def scheduled(name: String, command: String, retries: Int, cpus: FrequencyUnit, disk: DataUnit, mem: DataUnit, disabled: Boolean, schedule: Schedule): Job =
Job(name, command, schedule = Some(schedule), retries = retries, cpus = cpus, disk = disk, mem = mem, disabled = disabled)
def dependent(name: String, command: String, parents: TraversableOnce[String]): Job =
Job(name, command, parents = Some(parents.toSet))
def dependent(name: String, command: String, retries: Int, cpus: FrequencyUnit, disk: DataUnit, mem: DataUnit, disabled: Boolean, parents: TraversableOnce[String]): Job =
Job(name, command, parents = Some(parents.toSet), retries = retries, cpus = cpus, disk = disk, mem = mem, disabled = disabled)
def dependent(name: String, command: String, parents: JavaIterable[String]): Job =
Job(name, command, parents = Some(parents.toSet))
def dependent(name: String, command: String, retries: Int, cpus: FrequencyUnit, disk: DataUnit, mem: DataUnit, disabled: Boolean, parents: JavaIterable[String]): Job =
Job(name, command, parents = Some(parents.toSet), retries = retries, cpus = cpus, disk = disk, mem = mem, disabled = disabled)
}
sealed case class TaskCompleted(
statusCode: Int = 0
)
/**
*
*/
object Scheduler {
import com.guidewire.tools.chronos.client.api.HttpUtils._
object jobs {
/**
* Constructs a [[scala.Predef.String]] representing the URI for this resource.
*
* @param connection used to construct the full URI
* @return a [[scala.Predef.String]] representing the URI for this resource
*/
def uriList(connection: Connection): String = {
require(connection ne null, s"Missing connection")
connection.uri(s"/scheduler/jobs")
}
/**
* Makes the equivalent call to `GET /scheduler/jobs` and provides the response at a future time.
*
* @param connection used to construct the full URI
* @param executor the [[scala.concurrent.ExecutionContext]] used to process the request
* @return A [[scala.concurrent.Future]] with a scalaz [[scalaz.Validation]] object providing the results of
* the request or an error
*/
def apply(implicit connection: Connection, executor: ExecutionContext = ExecutionContext.Implicits.global): Future[Validation[Error, Traversable[Job]]] =
list(connection, executor)
/**
* Makes the equivalent call to `GET /scheduler/jobs` and provides the response at a future time.
*
* @param connection used to construct the full URI
* @param executor the [[scala.concurrent.ExecutionContext]] used to process the request
* @return A [[scala.concurrent.Future]] with a scalaz [[scalaz.Validation]] object providing the results of
* the request or an error
*/
def list(implicit connection: Connection, executor: ExecutionContext = ExecutionContext.Implicits.global): Future[Validation[Error, Traversable[Job]]] =
httpGet[Traversable[Job]](connection)(uriList)(processList)
/**
* Performs the actual parsing and validation of a JSON payload representing the payload from a call
* to `GET /scheduler/jobs`.
*
* @param response JSON contents to parse and map to a [[scala.collection.Traversable]]
* of [[com.guidewire.tools.chronos.client.api.v2.Job]]
* @return a [[scalaz.Validation]] that can be composed using normal scalaz methods
*/
def processList(statusCode: Int, response: Array[Byte]): Validation[Error, Traversable[Job]] =
validateify(statusCode, Json.parse(response).validate[Seq[Job]])
/**
* Constructs a [[scala.Predef.String]] representing the URI for this resource.
*
* @param connection used to construct the full URI
* @return a [[scala.Predef.String]] representing the URI for this resource
*/
def uriAddScheduled(connection: Connection): String = {
require(connection ne null, s"Missing connection")
connection.uri(s"/scheduler/iso8601")
}
/**
* Makes the equivalent call to `POST /scheduler/iso8601` and provides the response at a future time.
*
* @param job details of the job to run
* @param connection used to construct the full URI
* @param executor the [[scala.concurrent.ExecutionContext]] used to process the request
* @return A [[scala.concurrent.Future]] with a scalaz [[scalaz.Validation]] object providing the results of
* the request or an error
*/
def addScheduled(job: Job)(implicit connection: Connection, executor: ExecutionContext = ExecutionContext.Implicits.global): Future[Validation[Error, Boolean]] = {
require(job.parents.isEmpty && job.schedule.isDefined, s"A schedule must be defined for this job and must not define any parents")
httpPostAsJson[Job, Boolean](job, connection)(uriAddScheduled)(processAddScheduled)
}
/**
* Performs the response processing from a call to `POST /scheduler/iso8601`.
*
* @param response should be empty if `statusCode` is a 204 (indicating success)
* @return a [[scalaz.Validation]] that can be composed using normal scalaz methods
*/
def processAddScheduled(statusCode: Int, response: Array[Byte]): Validation[Error, Boolean] =
successIf204(statusCode, response)
/**
* Constructs a [[scala.Predef.String]] representing the URI for this resource.
*
* @param connection used to construct the full URI
* @return a [[scala.Predef.String]] representing the URI for this resource
*/
def uriAddDependent(connection: Connection): String = {
require(connection ne null, s"Missing connection")
connection.uri(s"/scheduler/dependency")
}
/**
* Makes the equivalent call to `POST /scheduler/dependency` and provides the response at a future time.
*
* @param job details of the job to run
* @param connection used to construct the full URI
* @param executor the [[scala.concurrent.ExecutionContext]] used to process the request
* @return A [[scala.concurrent.Future]] with a scalaz [[scalaz.Validation]] object providing the results of
* the request or an error
*/
def addDependent(job: Job)(implicit connection: Connection, executor: ExecutionContext = ExecutionContext.Implicits.global): Future[Validation[Error, Boolean]] = {
require(job.schedule.isEmpty && job.parents.isDefined && !job.parents.get.isEmpty, s"A parent must be defined for this job and must not define a schedule")
httpPostAsJson[Job, Boolean](job, connection)(uriAddDependent)(processAddDependent)
}
/**
* Performs the response processing from a call to `POST /scheduler/dependency`.
*
* @param response should be empty if `statusCode` is a 204 (indicating success)
* @return a [[scalaz.Validation]] that can be composed using normal scalaz methods
*/
def processAddDependent(statusCode: Int, response: Array[Byte]): Validation[Error, Boolean] =
successIf204(statusCode, response)
/**
* Constructs a [[scala.Predef.String]] representing the URI for this resource.
*
* @param jobName name of a job
* @param connection used to construct the full URI
* @return a [[scala.Predef.String]] representing the URI for this resource
*/
def uriDelete(jobName: String)(connection: Connection): String = {
require(connection ne null, s"Missing connection")
connection.uri(s"/scheduler/job/$jobName")
}
/**
* Makes the equivalent call to `DELETE /scheduler/job/<jobName>` and provides the response at a future time.
*
* @param jobName name of a job
* @param ignoreIfMissing consider a missing job to be a success
* @param connection used to construct the full URI
* @param executor the [[scala.concurrent.ExecutionContext]] used to process the request
* @return A [[scala.concurrent.Future]] with a scalaz [[scalaz.Validation]] object providing the results of
* the request or an error
*/
def delete(jobName: String, ignoreIfMissing: Boolean = false)(implicit connection: Connection, executor: ExecutionContext = ExecutionContext.Implicits.global): Future[Validation[Error, Boolean]] =
httpDelete[Boolean](connection)(uriDelete(jobName))(processDelete(ignoreIfMissing))
/**
* Performs the response processing from a call to `DELETE /scheduler/job/<jobName>`.
*
* @param ignoreIfMissing consider a missing job to be a success
* @param response should be empty if `statusCode` is a 204 (indicating success)
* @return a [[scalaz.Validation]] that can be composed using normal scalaz methods
*/
def processDelete(ignoreIfMissing: Boolean)(statusCode: Int, response: Array[Byte]): Validation[Error, Boolean] =
successIf204(statusCode, response, ignoreOn400 = ignoreIfMissing)
/**
* Constructs a [[scala.Predef.String]] representing the URI for this resource.
*
* @param jobName name of a job
* @param connection used to construct the full URI
* @return a [[scala.Predef.String]] representing the URI for this resource
*/
def uriStart(jobName: String)(connection: Connection): String = {
require(connection ne null, s"Missing connection")
connection.uri(s"/scheduler/job/$jobName")
}
/**
* Makes the equivalent call to `PUT /scheduler/job/<jobName>` and provides the response at a future time.
*
* @param jobName name of a job
* @param connection used to construct the full URI
* @param executor the [[scala.concurrent.ExecutionContext]] used to process the request
* @return A [[scala.concurrent.Future]] with a scalaz [[scalaz.Validation]] object providing the results of
* the request or an error
*/
def start(jobName: String)(implicit connection: Connection, executor: ExecutionContext = ExecutionContext.Implicits.global): Future[Validation[Error, Boolean]] =
httpPutAsJson[String, Boolean]("", connection)(uriStart(jobName))(processStart)
/**
* Performs the response processing from a call to `PUT /scheduler/job/<jobName>`.
*
* @param response should be empty if `statusCode` is a 204 (indicating success)
* @return a [[scalaz.Validation]] that can be composed using normal scalaz methods
*/
def processStart(statusCode: Int, response: Array[Byte]): Validation[Error, Boolean] =
successIf204(statusCode, response)
}
object tasks {
/**
* Constructs a [[scala.Predef.String]] representing the URI for this resource.
*
* @param jobName name of a job
* @param connection used to construct the full URI
* @return a [[scala.Predef.String]] representing the URI for this resource
*/
def uriKillAll(jobName: String)(connection: Connection): String = {
require(connection ne null, s"Missing connection")
connection.uri(s"/scheduler/task/kill/$jobName")
}
/**
* Makes the equivalent call to `GET /scheduler/task/kill/<jobName>` and provides the response at a future time.
*
* @param jobName name of a job
* @param connection used to construct the full URI
* @param executor the [[scala.concurrent.ExecutionContext]] used to process the request
* @return A [[scala.concurrent.Future]] with a scalaz [[scalaz.Validation]] object providing the results of
* the request or an error
*/
def killAll(jobName: String)(implicit connection: Connection, executor: ExecutionContext = ExecutionContext.Implicits.global): Future[Validation[Error, Boolean]] =
httpDelete[Boolean](connection)(uriKillAll(jobName))(processKillAll)
/**
* Performs the response processing from a call to `DELETE /scheduler/task/kill/<jobName>`.
*
* @param response should be empty if `statusCode` is a 204 (indicating success)
* @return a [[scalaz.Validation]] that can be composed using normal scalaz methods
*/
def processKillAll(statusCode: Int, response: Array[Byte]): Validation[Error, Boolean] =
successIf204(statusCode, response)
/**
* Constructs a [[scala.Predef.String]] representing the URI for this resource.
*
* @param taskID Mesos task ID usually provided in the environment as `\$mesos_task_id`
* @param connection used to construct the full URI
* @return a [[scala.Predef.String]] representing the URI for this resource
*/
def uriCompleted(taskID: String)(connection: Connection): String = {
require(connection ne null, s"Missing connection")
connection.uri(s"/scheduler/task/$taskID")
}
/**
* Makes the equivalent call to `POST /scheduler/task/<task id>` and provides the response at a future time.
*
* @param taskID Mesos task ID usually provided in the environment as `\$mesos_task_id`
* @param statusCode the completion status of the task
* @param connection used to construct the full URI
* @param executor the [[scala.concurrent.ExecutionContext]] used to process the request
* @return A [[scala.concurrent.Future]] with a scalaz [[scalaz.Validation]] object providing the results of
* the request or an error
*/
def completed(taskID: String, statusCode: Int)(implicit connection: Connection, executor: ExecutionContext = ExecutionContext.Implicits.global): Future[Validation[Error, Boolean]] =
httpPutAsJson[TaskCompleted, Boolean](TaskCompleted(statusCode), connection)(uriCompleted(taskID))(processCompleted)
/**
* Performs the response processing from a call to `POST /scheduler/task/<task id>`.
*
* @param response should be empty if `statusCode` is a 204 (indicating success)
* @return a [[scalaz.Validation]] that can be composed using normal scalaz methods
*/
def processCompleted(statusCode: Int, response: Array[Byte]): Validation[Error, Boolean] =
successIf204(statusCode, response)
}
object graphs {
/**
* Constructs a [[scala.Predef.String]] representing the URI for this resource.
*
* @param connection used to construct the full URI
* @return a [[scala.Predef.String]] representing the URI for this resource
*/
def uriDot(connection: Connection): String = {
require(connection ne null, s"Missing connection")
connection.uri(s"/scheduler/graph/dot")
}
/**
* Makes the equivalent call to `GET /scheduler/graph/dot` and provides the response at a future time.
*
* @param connection used to construct the full URI
* @param executor the [[scala.concurrent.ExecutionContext]] used to process the request
* @return A [[scala.concurrent.Future]] with a scalaz [[scalaz.Validation]] object providing the results of
* the request or an error
*/
def dot(implicit connection: Connection, executor: ExecutionContext = ExecutionContext.Implicits.global): Future[Validation[Error, String]] =
httpGetPlainText[String](connection)(uriDot)(processDot)
/**
* Performs the processing of the payload from a call to `GET /scheduler/graph/dot`.
*
* @param response Plain text contents of a dot file representing the job dependency graph
* @return a [[scalaz.Validation]] that can be composed using normal scalaz methods
*/
def processDot(statusCode: Int, response: Array[Byte]): Validation[Error, String] =
processSingleStringHttpGetResponse(statusCode, response)
}
}
| echinthaka/chronos-client | src/main/scala/com/guidewire/tools/chronos/client/api/v2/Scheduler.scala | Scala | apache-2.0 | 18,908 |
package org.smitt.conf
object TestConfImpl extends TestConf {
override def messengerConf: TestMessengerConf = TestMessengerConfImpl
override def outputConf: TestOutputConf = TestOutputConfImpl
}
| sergius/smitt | src/test/scala/org/smitt/conf/TestConfImpl.scala | Scala | mit | 212 |
/** Simple session management layer, minus the part the interacts with the
* browser.
*/
package util.session
import org.joda.time.{Duration, DateTime}
import org.apache.commons.math3.random.RandomDataGenerator
import util.EitherOptionHelpers.Implicits._
import pwguard.global.Globals.ExecutionContexts.Default._
import scala.concurrent.Future
/** Session cookie information.
*
* @param userIdentifier user's unique ID, as a string. Note that this value
* can be a stringified numeric ID, a username, an
* email address, etc.
* @param sessionID user's session ID
* @param ipAddress user's IP address
* @param validUntil expiration date/time
* @param duration stored session duration
*/
case class SessionData(userIdentifier: String,
sessionID: String,
ipAddress: String,
validUntil: DateTime,
duration: Duration)
/** Utility methods.
*/
object SessionUtil {
private val rdg = new RandomDataGenerator()
/** Generate new session data, with a random session ID.
*
* @param userIdentifier the user identifier to store
* @param ipAddress the user's IP address
* @param duration How long the session data should be valid. This
* value is used to calculate the expiration time,
* from the current time.
*
* @return a suitable `SessionData` object, with a generated session ID.
*/
def newSessionData(userIdentifier: String,
ipAddress: String,
duration: Duration): SessionData = {
val now = DateTime.now()
val expiry = now.plus(duration)
val sessionID = rdg.nextSecureHexString(32)
SessionData(userIdentifier = userIdentifier,
sessionID = rdg.nextSecureHexString(32),
ipAddress = ipAddress,
validUntil = DateTime.now.plus(duration),
duration = duration)
}
}
/** Defines the interface for a session store. Session stores can be anything
* from memory to a database.
*/
trait SessionStore {
/** Retrieve session data for a session ID.
*
* @param sessionID the session ID
*
* @return `Right(Some(data))` if found. `Right(None)` if not found.
* `Left(error)` on error.
*/
def getSessionData(sessionID: String): Future[Option[SessionData]]
/** Store session data for a session ID. Any existing data for the
* associated session ID is overwritten.
*
* @param sessionData the `SessionData` object to store
*
* @return `Right(true)` on successful store, `Left(error)` on error.
*/
def storeSessionData(sessionData: SessionData): Future[Boolean]
/** Clear or invalidate session data for a particular session ID.
*
* @param sessionID
*/
def clearSessionData(sessionID: String): Future[Boolean]
/** Refresh a session, updating its timestamp.
*
* @param sessionID the session ID
*
* @return `Right(SessionData)` on successful refresh, `Left(error)` on error.
*/
def refresh(sessionID: String): Future[SessionData] = {
for { dataOpt <- getSessionData(sessionID)
data <- dataOpt.toFuture("Nonexistent session $sessionID")
refreshed <- refresh(data) }
yield refreshed
}
/** Refresh a session, updating its timestamp.
*
* @param data the session data to update and replace
*
* @return `Right(SessionData)` on successful refresh, `Left(error)` on error.
*/
def refresh(data: SessionData): Future[SessionData] = {
val newData = data.copy(validUntil = DateTime.now.plus(data.duration))
storeSessionData(newData).map { _ => newData }
}
}
/** An in-memory, Map-driven session store. Note that this store cannot
* save its session data across reboots of the server.
*/
class MemorySessionStore extends SessionStore {
import scala.collection.mutable.{Map => MutableMap, HashMap => MutableHashMap}
private val data: MutableMap[String, SessionData] =
new MutableHashMap[String, SessionData]
def getSessionData(sessionID: String): Future[Option[SessionData]] = {
Future { data.get(sessionID) }
}
def storeSessionData(sessionData: SessionData): Future[Boolean] = {
Future {
data += (sessionData.sessionID -> sessionData)
true
}
}
def clearSessionData(sessionID: String): Future[Boolean] = {
Future {
data -= sessionID
true
}
}
}
/** A version of the session store that uses Play's cache API. With a
* persistent underlying cache store, such as Memcache, this session store
* can allow session data to persist across server restarts.
*/
class PlayCacheSessionStore extends SessionStore {
import play.api.cache.Cache
import play.api.Play.current
def getSessionData(sessionID: String): Future[Option[SessionData]] = {
Future {
Cache.getAs[SessionData](sessionID)
}
}
def storeSessionData(sessionData: SessionData): Future[Boolean] = {
Future {
Cache.set(sessionData.sessionID, sessionData)
true
}
}
def clearSessionData(sessionID: String): Future[Boolean] = {
Future {
Cache.remove(sessionID)
true
}
}
}
| bmc/pwguard | app/util/session.scala | Scala | bsd-3-clause | 5,385 |
package com.microsoft.awt.data
import org.scalajs.nodejs.mongodb.{Collection, Db}
import scala.concurrent.ExecutionContext
import scala.scalajs.js
/**
* Question DAO
* @author lawrence.daniels@gmail.com
*/
@js.native
trait QuestionDAO extends Collection
/**
* Question DAO Companion
* @author lawrence.daniels@gmail.com
*/
object QuestionDAO {
/**
* Question DAO Extensions
* @param db the given [[Db database]]
*/
implicit class QuestionDAOExtensions(val db: Db) extends AnyVal {
@inline
def getQuestionDAO(implicit ec: ExecutionContext) = {
db.collectionFuture("questions").mapTo[QuestionDAO]
}
}
} | ldaniels528/awt | app-nodejs/src/main/scala/com/microsoft/awt/data/QuestionDAO.scala | Scala | apache-2.0 | 656 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.database.s3
import org.apache.openwhisk.core.entity.WhiskEntity
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class S3AttachmentStoreCloudFrontTests extends S3AttachmentStoreBehaviorBase with S3Aws {
override lazy val store = makeS3Store[WhiskEntity]
override def storeType: String = "S3_CloudFront"
override def cloudFrontConfig: String =
"""
|cloud-front-config {
| domain-name = ${CLOUDFRONT_DOMAIN_NAME}
| key-pair-id = ${CLOUDFRONT_KEY_PAIR_ID}
| private-key = ${CLOUDFRONT_PRIVATE_KEY}
|}
""".stripMargin
override protected def withFixture(test: NoArgTest) = {
assume(
System.getenv("CLOUDFRONT_PRIVATE_KEY") != null,
"Configure following env variables for test " +
"to run 'CLOUDFRONT_DOMAIN_NAME', 'CLOUDFRONT_KEY_PAIR_ID', 'CLOUDFRONT_PRIVATE_KEY'")
super.withFixture(test)
}
//With CloudFront deletes are not immediate and instead the objects may live in CDN cache untill TTL
override protected val lazyDeletes = true
}
| jeremiaswerner/openwhisk | tests/src/test/scala/org/apache/openwhisk/core/database/s3/S3AttachmentStoreCloudFrontTests.scala | Scala | apache-2.0 | 1,910 |
/*
* Copyright 2017-2018 47 Degrees, LLC. <http://www.47deg.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package examples.todolist
package http
import cats._
import cats.implicits._
import com.twitter.util.Future
import examples.todolist.TodoItem
import examples.todolist.service.TodoItemService
import io.circe.generic.auto._
import io.finch._
import io.finch.circe._
class TodoItemApi[F[_]: Monad](implicit service: TodoItemService[F], handler: F ~> Future)
extends CRUDApi[TodoItem] {
import io.finch.syntax._
private val prefix = "items"
val reset = post(prefix :: "reset") {
handler(service.reset.map(Ok))
}
val retrieve = get(prefix :: path[Int]) { id: Int =>
handler(
service.retrieve(id) map (item =>
item.fold[Output[TodoItem]](
NotFound(new NoSuchElementException(s"Could not find ${service.model} with $id")))(Ok)))
} handle {
case nse: NoSuchElementException => NotFound(nse)
}
val list = get(prefix) {
handler(service.list.map(Ok))
}
val insert = post(prefix :: jsonBody[TodoItem]) { item: TodoItem =>
handler(service.insert(item).map(Ok))
}
val update = put(prefix :: path[Int] :: jsonBody[TodoItem]) { (id: Int, item: TodoItem) =>
handler(service.update(item.copy(id = Some(id))).map(Ok))
}
val destroy = delete(prefix :: path[Int]) { id: Int =>
handler(service.destroy(id).map(Ok))
}
}
object TodoItemApi {
implicit def instance[F[_]: Monad](
implicit service: TodoItemService[F],
handler: F ~> Future): TodoItemApi[F] =
new TodoItemApi[F]
}
| frees-io/freestyle | modules/examples/todolist-http-finch/src/main/scala/todo/http/TodoItemApi.scala | Scala | apache-2.0 | 2,097 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.usergrid.enums
/**
* Created by mdunker on 8/31/15.
*/
object CsvFeedPatternType {
val Random = "random"
val Circular = "circular"
val Values = Seq(Random,Circular)
def isValid(str: String): Boolean = {
Values.contains(str)
}
}
| mdunker/usergrid | tests/performance/src/main/scala/org/apache/usergrid/enums/CsvFeedPatternType.scala | Scala | apache-2.0 | 1,068 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.h2o.sparkling.ml.params
import ai.h2o.sparkling.H2OFrame
import org.apache.spark.sql.DataFrame
import ai.h2o.sparkling.utils.DataFrameSerializationWrappers._
trait HasBetaConstraints extends H2OAlgoParamsBase with HasDataFrameSerializer {
private val betaConstraints = new NullableDataFrameParam(
this,
"betaConstraints",
"Data frame of beta constraints enabling to set special conditions over the model coefficients.")
setDefault(betaConstraints -> null)
def getBetaConstraints(): DataFrame = $(betaConstraints)
def setBetaConstraints(value: DataFrame): this.type = set(betaConstraints, toWrapper(value))
private[sparkling] def getBetaConstraintsParam(trainingFrame: H2OFrame): Map[String, Any] = {
Map("beta_constraints" -> convertDataFrameToH2OFrameKey(getBetaConstraints()))
}
override private[sparkling] def getSWtoH2OParamNameMap(): Map[String, String] = {
super.getSWtoH2OParamNameMap() ++ Map("betaConstraints" -> "beta_constraints")
}
}
| h2oai/sparkling-water | ml/src/main/scala/ai/h2o/sparkling/ml/params/HasBetaConstraints.scala | Scala | apache-2.0 | 1,802 |
/** Copyright 2015, Metreta Information Technology s.r.l. */
package com.metreta.spark.orientdb.connector.rdd.partitioner
import org.apache.spark.Partition
import java.net.InetAddress
trait EndpointPartition extends Partition{
def endpoints: Iterable[InetAddress]
}
case class OrientPartition(index: Int,
endpoints: Iterable[InetAddress],
partitionName: PartitionName) extends EndpointPartition
case class PartitionName(className: String, clusterName: String) | metreta/spark-orientdb-connector | spark-orientdb-connector/src/main/scala/com/metreta/spark/orientdb/connector/rdd/partitioner/OrientPartition.scala | Scala | apache-2.0 | 557 |
package org.marxc.ast
import org.objectweb.asm.MethodVisitor
import org.objectweb.asm.Opcodes._
import org.marxc.SymbolTable
case class StringNode(value: String) extends OperandNode {
def generate(mv: MethodVisitor, symbolTable: SymbolTable) {
mv.visitLdcInsn(value)
}
}
| iogr/MarxC | src/main/scala/org/marxc/ast/StringNode.scala | Scala | apache-2.0 | 281 |
/*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.flaminem.flamy.commands
import com.flaminem.flamy.commands.utils.FlamySubcommand
import com.flaminem.flamy.conf.{Environment, FlamyContext, FlamyGlobalOptions}
import com.flaminem.flamy.exec.FlamyRunner
import com.flaminem.flamy.exec.utils.{ReturnStatus, ReturnSuccess}
import com.flaminem.flamy.model.files.FileIndex
import com.flaminem.flamy.model.names.ItemName
import org.rogach.scallop.{ScallopConf, ScallopOption, Subcommand}
import scala.language.reflectiveCalls
/**
* Created by fpin on 5/22/15.
*/
class Test extends Subcommand("test") with FlamySubcommand{
banner("Execute all TEST.hql queries for all specified items, and assert that the result is 0.")
private val environment: ScallopOption[Environment] =
opt(name="on", default=None, descr="Specifies environment to run on", required=true, noshort=true)
private val dryRun: ScallopOption[Boolean] =
opt(name="dry", default=Some(false), descr="Perform a dry-run", noshort=true)
private val items: ScallopOption[List[ItemName]] =
trailArg[List[ItemName]](default=Some(List()),required=false)
override def doCommand(globalOptions: FlamyGlobalOptions, subCommands: List[ScallopConf]): ReturnStatus = {
val context = new FlamyContext(globalOptions, environment.get)
context.dryRun = dryRun()
val flamyRunner: FlamyRunner = FlamyRunner(context)
val fileIndex = context.getFileIndex.filter(items())
flamyRunner.testAll(fileIndex, context)
ReturnSuccess
}
}
| flaminem/flamy | src/main/scala/com/flaminem/flamy/commands/Test.scala | Scala | apache-2.0 | 2,060 |
package com.twitter.finagle.exception
import com.twitter.finagle.exception.thriftscala.{LogEntry, ResultCode, Scribe}
import com.twitter.util._
import java.net.{InetAddress, InetSocketAddress}
import org.mockito.ArgumentCaptor
import org.mockito.Matchers.anyObject
import org.mockito.Mockito._
import org.scalatest.FunSuite
import org.scalatestplus.mockito.MockitoSugar
class DefaultReporterTest extends FunSuite with MockitoSugar {
val logger = mock[Scribe.FutureIface]
when(logger.log(anyObject())).thenReturn(Future.value(ResultCode.Ok))
val captor = ArgumentCaptor.forClass(classOf[Seq[LogEntry]])
val reporter = Reporter(logger, "service16")
val tse = new TestServiceException("service16", "my cool message")
test("log entries to a client once upon receive") {
reporter.handle(tse.throwable)
verify(logger).log(captor.capture())
}
test("log a json entry with the proper format") {
val es = captor.getValue
assert(es.size == 1)
tse.verifyCompressedJSON(es(0).message)
}
}
class ClientReporterTest extends FunSuite with MockitoSugar {
val logger = mock[Scribe.FutureIface]
when(logger.log(anyObject())).thenReturn(Future.value(ResultCode.Ok))
val captor = ArgumentCaptor.forClass(classOf[Seq[LogEntry]])
val reporter = Reporter(logger, "service16").withClient()
val tse = new TestServiceException(
"service16",
"my cool message",
clientAddress = Some(InetAddress.getLoopbackAddress.getHostAddress)
)
test("log entries to a client once upon receive") {
reporter.handle(tse.throwable)
verify(logger).log(captor.capture())
}
test("log a json entry with the proper format") {
val es = captor.getValue
assert(es.size == 1)
tse.verifyCompressedJSON(es(0).message)
}
}
class SourceClientReporterTest extends FunSuite with MockitoSugar {
val logger = mock[Scribe.FutureIface]
when(logger.log(anyObject())).thenReturn(Future.value(ResultCode.Ok))
val captor = ArgumentCaptor.forClass(classOf[Seq[LogEntry]])
val socket = new InetSocketAddress(InetAddress.getLoopbackAddress, 0)
val reporter = Reporter(logger, "service16")
.withSource(socket)
.withClient()
val tse = new TestServiceException(
"service16",
"my cool message",
clientAddress = Some(InetAddress.getLoopbackAddress.getHostAddress),
sourceAddress = Some(socket.getAddress.getHostName)
)
test("log entries to a client once upon receive") {
reporter.handle(tse.throwable)
verify(logger).log(captor.capture())
}
test("log a json entry with the proper format") {
val es = captor.getValue
assert(es.size == 1)
tse.verifyCompressedJSON(es(0).message)
}
}
class ExceptionReporterTest extends FunSuite with MockitoSugar {
test("logs an exception") {
val logger = mock[Scribe.FutureIface]
when(logger.log(anyObject())).thenReturn(Future.value(ResultCode.Ok))
val captor = ArgumentCaptor.forClass(classOf[Seq[LogEntry]])
val tse = new TestServiceException("service", "my cool message")
val reporter = new ExceptionReporter().apply("service", None)
reporter.copy(client = logger).handle(tse.throwable)
verify(logger).log(captor.capture())
}
test("logs a client exception") {
val logger = mock[Scribe.FutureIface]
when(logger.log(anyObject())).thenReturn(Future.value(ResultCode.Ok))
val captor = ArgumentCaptor.forClass(classOf[Seq[LogEntry]])
val socket = new InetSocketAddress(InetAddress.getLoopbackAddress, 0)
val tse = new TestServiceException(
"service",
"my cool message",
clientAddress = Some(socket.getAddress.getHostName)
)
val reporter = new ExceptionReporter().apply("service", Some(socket))
reporter.copy(client = logger).handle(tse.throwable)
verify(logger).log(captor.capture())
}
test("appends the client address to the exception when provided") {
val reporter = new ExceptionReporter
val addr = new InetSocketAddress("8.8.8.8", 342)
val factoryWithClient = reporter("qux", Some(addr))
val factoryWithout = reporter("qux", None)
assert(factoryWithClient != factoryWithout)
assert(factoryWithClient == factoryWithout.withClient(addr.getAddress))
}
}
| luciferous/finagle | finagle-exception/src/test/scala/com/twitter/finagle/exception/ReporterSpec.scala | Scala | apache-2.0 | 4,212 |
package com.datawizards.sparklocal.impl.scala.parallellazy.session
import com.datawizards.sparklocal.dataset.DataSetAPI
import com.datawizards.sparklocal.dataset.io.ReaderExecutor
import com.datawizards.sparklocal.impl.scala.parallellazy.ParallelLazySeq
import com.datawizards.sparklocal.impl.scala.parallellazy.dataset.io.ReaderScalaParallelLazyImpl
import com.datawizards.sparklocal.impl.scala.session.SparkSessionAPIScalaBase
import com.datawizards.sparklocal.rdd.RDDAPI
import org.apache.spark.sql.Encoder
import scala.reflect.ClassTag
class SparkSessionAPIScalaParallelLazyImpl extends SparkSessionAPIScalaBase {
override def createRDD[T: ClassTag](data: Seq[T]): RDDAPI[T] =
RDDAPI(new ParallelLazySeq(data.par))
override def createDataset[T: ClassTag](data: Seq[T])(implicit enc: Encoder[T]): DataSetAPI[T] =
DataSetAPI(new ParallelLazySeq(data.par))
override def read[T]: ReaderExecutor[T] =
ReaderScalaParallelLazyImpl.read[T]
override def textFile(path: String, minPartitions: Int=2): RDDAPI[String] =
RDDAPI(new ParallelLazySeq(scala.io.Source.fromFile(path).getLines().toSeq.par))
}
| piotr-kalanski/spark-local | src/main/scala/com/datawizards/sparklocal/impl/scala/parallellazy/session/SparkSessionAPIScalaParallelLazyImpl.scala | Scala | apache-2.0 | 1,127 |
/**
* Swaggy Jenkins
* Jenkins API clients generated from Swagger / Open API specification
*
* The version of the OpenAPI document: 1.1.2-pre.0
* Contact: blah@cliffano.com
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
package io.swagger.client.model
import play.api.libs.json._
case class ExtensionClassImpllinks (
self: Option[Link],
`class`: Option[String]
)
object ExtensionClassImpllinks {
implicit val format: Format[ExtensionClassImpllinks] = Json.format
}
| cliffano/swaggy-jenkins | clients/scala-lagom-server/generated/src/main/scala/io/swagger/client/model/ExtensionClassImpllinks.scala | Scala | mit | 632 |
package com.twitter.finagle.mux.exp.pushsession
import com.twitter.finagle.Mux.param.OppTls
import com.twitter.finagle.exp.pushsession.{PushChannelHandle, PushSession}
import com.twitter.finagle.liveness.FailureDetector
import com.twitter.finagle.mux.{Handshake, Request, Response}
import com.twitter.finagle.mux.Handshake.Headers
import com.twitter.finagle.mux.transport.{IncompatibleNegotiationException, MuxFramer, OpportunisticTls}
import com.twitter.finagle.{Service, Stack, param}
import com.twitter.io.{Buf, ByteReader}
import com.twitter.logging.{Level, Logger}
/**
* Abstraction of negotiation logic for push-based mux clients and servers
*/
private abstract class Negotiation(params: Stack.Params) {
type SessionT <: PushSession[ByteReader, Buf]
private[this] val log = Logger.get
private[this] val statsReceiver = params[param.Stats].statsReceiver
protected def builder(
handle: PushChannelHandle[ByteReader, Buf],
writer: MessageWriter,
decoder: MuxMessageDecoder
): SessionT
private[this] def remoteAddressString(handle: PushChannelHandle[_, _]): String =
s"remote: ${handle.remoteAddress}"
// effectual method that may throw
private[this] def negotiateOppTls(
handle: PushChannelHandle[ByteReader, Buf],
peerHeaders: Option[Headers]
): Unit = {
def turnOnTls(): Unit = handle match {
case h: MuxChannelHandle => h.turnOnTls()
case other =>
// Should never happen when building a true client
throw new IllegalStateException(
"Expected to find a MuxChannelHandle, instead found " +
s"$other. Couldn't turn on TLS. ${remoteAddressString(handle)}")
}
val localEncryptLevel = params[OppTls].level.getOrElse(OpportunisticTls.Off)
val remoteEncryptLevel = peerHeaders
.flatMap(Handshake.valueOf(OpportunisticTls.Header.KeyBuf, _)) match {
case Some(buf) => OpportunisticTls.Header.decodeLevel(buf)
case None =>
log.debug("Peer either didn't negotiate or didn't send an Opportunistic Tls preference: " +
s"defaulting to remote encryption level of Off. ${remoteAddressString(handle)}")
OpportunisticTls.Off
}
try {
val useTls = OpportunisticTls.negotiate(localEncryptLevel, remoteEncryptLevel)
if (log.isLoggable(Level.DEBUG)) {
log.debug(s"Successfully negotiated TLS with remote peer. Using TLS: $useTls local level: " +
s"$localEncryptLevel, remote level: $remoteEncryptLevel. ${remoteAddressString(handle)}")
}
if (useTls) {
statsReceiver.counter("tls", "upgrade", "success").incr()
turnOnTls()
}
} catch {
case exn: IncompatibleNegotiationException =>
log.fatal(
exn,
s"The local peer wanted $localEncryptLevel and the remote peer wanted" +
s" $remoteEncryptLevel which are incompatible. ${remoteAddressString(handle)}"
)
throw exn
}
}
/**
* Attempt to negotiate the final Mux session.
*
* @note If negotiation fails an appropriate exception may be thrown.
*/
def negotiate(
handle: PushChannelHandle[ByteReader, Buf],
peerHeaders: Option[Headers]
): SessionT = {
negotiateOppTls(handle, peerHeaders)
val framingStats = statsReceiver.scope("framer")
val writeManager = {
val fragmentSize = peerHeaders
.flatMap(Handshake.valueOf(MuxFramer.Header.KeyBuf, _))
.map(MuxFramer.Header.decodeFrameSize(_))
.getOrElse(Int.MaxValue)
new FragmentingMessageWriter(handle, fragmentSize, framingStats)
}
builder(handle, writeManager, new FragmentDecoder(framingStats))
}
}
private object Negotiation {
final case class Client(params: Stack.Params) extends Negotiation(params) {
override type SessionT = MuxClientSession
protected def builder(
handle: PushChannelHandle[ByteReader, Buf],
writer: MessageWriter,
decoder: MuxMessageDecoder
): MuxClientSession = {
new MuxClientSession(
handle,
decoder,
writer,
params[FailureDetector.Param].param,
params[param.Label].label,
params[param.Stats].statsReceiver,
params[param.Timer].timer
)
}
}
final case class Server(params: Stack.Params, service: Service[Request, Response])
extends Negotiation(params) {
override type SessionT = MuxServerSession
protected def builder(
handle: PushChannelHandle[ByteReader, Buf],
writer: MessageWriter,
decoder: MuxMessageDecoder
): MuxServerSession = {
new MuxServerSession(
params,
decoder,
writer,
handle,
service
)
}
}
}
| mkhq/finagle | finagle-mux/src/main/scala/com/twitter/finagle/mux/exp/pushsession/Negotiation.scala | Scala | apache-2.0 | 4,702 |
package me.heaton.profun.week4
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ExprPMTest extends FunSuite {
test("eval sum of 2 and 3 is 5") {
assert(SumPM(NumberPM(2), NumberPM(3)).eval === 5)
}
test("show Number 10 is 10") {
assert(NumberPM(10).show === "10")
}
test("show Sum of 5 and 10 is '5 + 10'") {
assert(SumPM(NumberPM(5), NumberPM(10)).show === "5 + 10")
}
test("show variable x is x") {
assert(VarPM("x").show === "x")
}
test("show x prod 5 is 'x * 5'") {
assert(ProdPM(VarPM("x"), NumberPM(5)).show === "x * 5")
}
test("show 2 * x + y") {
assert(SumPM(ProdPM(NumberPM(2), VarPM("x")), VarPM("y")).show === "2 * x + y")
}
test("show (2 + x) * y") {
assert(ProdPM(SumPM(NumberPM(2), VarPM("x")), VarPM("y")).show === "(2 + x) * y")
}
test("show y * (2 + x)") {
assert(ProdPM(VarPM("y"), SumPM(NumberPM(2), VarPM("x"))).show === "y * (2 + x)")
}
test("show (2 + x) * (3 + y)") {
assert(ProdPM(SumPM(NumberPM(2), VarPM("x")), SumPM(NumberPM(3), VarPM("y"))).show === "(2 + x) * (3 + y)")
}
test("show 2 * (x + y + z)") {
assert(ProdPM(NumberPM(2), SumPM(VarPM("x"), SumPM(VarPM("y"), VarPM("z")))).show === "2 * (x + y + z)")
}
} | heaton/hello-scala | src/test/scala/me/heaton/profun/week4/ExprPMTest.scala | Scala | mit | 1,322 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.