code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package com.rainysoft.bayesian.inference
import com.rainysoft.bayesian.network.{NetworkNode, Network}
/** Implements enumeration ask algorithm.
*
* Implements enumeration ask algorithm from "Artificial Intelligence: A Modern Approach"
* by Russel, Stuart and Norvig, Peter.
*
* @author mikael.ohman
* 2014-03-16
*/
object Enumerate {
/**
* Computes boolean distribution over the query variable.
* @param queryVariable The variable to get distribution for.
* @param evidence The observed values of other variables.
* @param network The network containing query variable, observed variables and hidden variables.
* @return Boolean distribution over the query variable.
*/
def enumerateAsk(queryVariable: String, evidence: Map[String, Boolean], network: Network) = {
// Because we have assumed only boolean valued nodes, the looping over all query variable
// values reduces to summation of two terms (one for true, one for false).
val qTrue = enumerateAll(network.nodes, evidence + (queryVariable -> true))
val qFalse = enumerateAll(network.nodes, evidence + (queryVariable -> false))
normalize(Map(true -> qTrue, false -> qFalse))
}
def enumerateAll(vars: Traversable[NetworkNode], evidence: Map[String, Boolean]): Double = {
if (vars.isEmpty) {
1.0
} else {
// Pick the first variable and make sure to exclude it in future enumeration calls.
val y = vars.head
val varsd = vars.drop(1)
// If this is a root node, just get the unit action value.
// Else need parents values. They have to have been computed prior to this point.
var p = 0.0
if (y.parents.isEmpty) {
p = y.cpt(Traversable(("UnitAction", true)))
} else {
val ocp = y.parents.map(pName => (pName, evidence(pName)))
p = y.cpt(ocp)
}
if (evidence.contains(y.name)) {
// We have assumed only boolean valued vars.
if (evidence(y.name)) {
p * enumerateAll(varsd, evidence)
} else {
(1.0-p) * enumerateAll(varsd, evidence)
}
} else {
// Again, because we have assumed only boolean values, the summation is just an
// addition of two terms.
p * enumerateAll(varsd, evidence + (y.name -> true)) +
(1.0-p) * enumerateAll(varsd, evidence + (y.name -> false))
}
}
}
def normalize(vals: Map[Boolean, Double]) = {
val total = vals.values.sum
vals.map(v => (v._1, v._2 / total))
}
}
| MikaelUmaN/BayesianNetwork | src/main/scala/com/rainysoft/bayesian/inference/Enumerate.scala | Scala | mit | 2,523 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc
package ast
import scala.annotation.tailrec
import symtab._
import util.DocStrings._
import scala.collection.mutable
import scala.tools.nsc.Reporting.WarningCategory
/*
* @author Martin Odersky
*/
trait DocComments { self: Global =>
val cookedDocComments = mutable.HashMap[Symbol, String]()
/** The raw doc comment map
*
* In IDE, background compilation runs get interrupted by
* reloading new sourcefiles. This is weak to avoid
* memleaks due to the doc of their cached symbols
* (e.g. in baseTypeSeq) between periodic doc reloads.
*/
val docComments = mutable.WeakHashMap[Symbol, DocComment]()
def clearDocComments(): Unit = {
cookedDocComments.clear()
docComments.clear()
defs.clear()
}
/** The raw doc comment of symbol `sym`, as it appears in the source text, "" if missing.
*/
def rawDocComment(sym: Symbol): String =
docComments get sym map (_.raw) getOrElse ""
/** The position of the raw doc comment of symbol `sym`, or NoPosition if missing
* If a symbol does not have a doc comment but some overridden version of it does,
* the position of the doc comment of the overridden version is returned instead.
*/
def docCommentPos(sym: Symbol): Position =
getDocComment(sym) map (_.pos) getOrElse NoPosition
/** A version which doesn't consider self types, as a temporary measure:
* an infinite loop has broken out between superComment and cookedDocComment
* since r23926.
*/
private def allInheritedOverriddenSymbols(sym: Symbol): List[Symbol] = {
val getter: Symbol = sym.getter
val symOrGetter = getter.orElse(sym)
if (!symOrGetter.owner.isClass) Nil
else symOrGetter.owner.ancestors map (symOrGetter overriddenSymbol _) filter (_ != NoSymbol)
}
def fillDocComment(sym: Symbol, comment: DocComment): Unit = {
docComments(sym) = comment
comment.defineVariables(sym)
}
def replaceInheritDocToInheritdoc(docStr: String):String = {
docStr.replaceAll("""\\{@inheritDoc\\p{Zs}*\\}""", "@inheritdoc")
}
/** The raw doc comment of symbol `sym`, minus usecase and define sections, augmented by
* missing sections of an inherited doc comment.
* If a symbol does not have a doc comment but some overridden version of it does,
* the doc comment of the overridden version is copied instead.
*/
def cookedDocComment(sym: Symbol, docStr: String = ""): String = cookedDocComments.getOrElseUpdate(sym, {
val ownComment = replaceInheritDocToInheritdoc {
if (docStr.length == 0) docComments get sym map (_.template) getOrElse ""
else DocComment(docStr).template
}
superComment(sym) match {
case None =>
// scala/bug#8210 - The warning would be false negative when this symbol is a setter
if (ownComment.indexOf("@inheritdoc") != -1 && ! sym.isSetter)
runReporting.warning(sym.pos, s"The comment for ${sym} contains @inheritdoc, but no parent comment is available to inherit from.", WarningCategory.Scaladoc, sym)
ownComment.replace("@inheritdoc", "<invalid inheritdoc annotation>")
case Some(sc) =>
if (ownComment == "") sc
else expandInheritdoc(sc, merge(sc, ownComment, sym), sym)
}
})
/** The cooked doc comment of symbol `sym` after variable expansion, or "" if missing.
*
* @param sym The symbol for which doc comment is returned
* @param site The class for which doc comments are generated
* @throws ExpansionLimitExceeded when more than 10 successive expansions
* of the same string are done, which is
* interpreted as a recursive variable definition.
*/
def expandedDocComment(sym: Symbol, site: Symbol, docStr: String = ""): String = {
// when parsing a top level class or module, use the (module-)class itself to look up variable definitions
val site1 = if ((sym.isModule || sym.isClass) && site.hasPackageFlag) sym
else site
expandVariables(cookedDocComment(sym, docStr), sym, site1)
}
/** The list of use cases of doc comment of symbol `sym` seen as a member of class
* `site`. Each use case consists of a synthetic symbol (which is entered nowhere else),
* of an expanded doc comment string, and of its position.
*
* @param sym The symbol for which use cases are returned
* @param site The class for which doc comments are generated
* @throws ExpansionLimitExceeded when more than 10 successive expansions
* of the same string are done, which is
* interpreted as a recursive variable definition.
*/
def useCases(sym: Symbol, site: Symbol): List[(Symbol, String, Position)] = {
def getUseCases(dc: DocComment) = {
val fullSigComment = cookedDocComment(sym)
for (uc <- dc.useCases; defn <- uc.expandedDefs(sym, site)) yield {
// use cases comments go through a series of transformations:
// 1 - filling in missing sections from the full signature
// 2 - expanding explicit inheritance @inheritdoc tags
// 3 - expanding variables like $COLL
val useCaseCommentRaw = uc.comment.raw
val useCaseCommentMerged = merge(fullSigComment, useCaseCommentRaw, defn)
val useCaseCommentInheritdoc = expandInheritdoc(fullSigComment, useCaseCommentMerged, sym)
val useCaseCommentVariables = expandVariables(useCaseCommentInheritdoc, sym, site)
(defn, useCaseCommentVariables, uc.pos)
}
}
getDocComment(sym) map getUseCases getOrElse List()
}
private def getDocComment(sym: Symbol): Option[DocComment] =
mapFind(sym :: allInheritedOverriddenSymbols(sym))(docComments get _)
/** The cooked doc comment of an overridden symbol */
protected def superComment(sym: Symbol): Option[String] = {
allInheritedOverriddenSymbols(sym).iterator
.map(cookedDocComment(_))
.find(_ != "")
}
private def mapFind[A, B](xs: Iterable[A])(f: A => Option[B]): Option[B] =
xs collectFirst scala.Function.unlift(f)
private def isMovable(str: String, sec: (Int, Int)): Boolean =
startsWithTag(str, sec, "@param") ||
startsWithTag(str, sec, "@tparam") ||
startsWithTag(str, sec, "@return")
/** Merge elements of doccomment `src` into doc comment `dst` for symbol `sym`.
* In detail:
* 1. If `copyFirstPara` is true, copy first paragraph
* 2. For all parameters of `sym` if there is no @param section
* in `dst` for that parameter name, but there is one on `src`, copy that section.
* 3. If there is no @return section in `dst` but there is one in `src`, copy it.
*/
def merge(src: String, dst: String, sym: Symbol, copyFirstPara: Boolean = false): String = {
val srcSections = tagIndex(src)
val dstSections = tagIndex(dst)
val srcParams = paramDocs(src, "@param", srcSections)
val dstParams = paramDocs(dst, "@param", dstSections)
val srcTParams = paramDocs(src, "@tparam", srcSections)
val dstTParams = paramDocs(dst, "@tparam", dstSections)
val out = new StringBuilder
var copied = 0
var tocopy = startTag(dst, dstSections dropWhile (!isMovable(dst, _)))
if (copyFirstPara) {
val eop = // end of comment body (first para), which is delimited by blank line, or tag, or end of comment
(findNext(src, 0)(src.charAt(_) == '\\n')) min startTag(src, srcSections)
out append src.substring(0, eop).trim
copied = 3
tocopy = 3
}
def mergeSection(srcSec: Option[(Int, Int)], dstSec: Option[(Int, Int)]) = dstSec match {
case Some((start, end)) =>
if (end > tocopy) tocopy = end
case None =>
srcSec match {
case Some((start1, end1)) => {
out append dst.substring(copied, tocopy).trim
out append "\\n"
copied = tocopy
out append src.substring(start1, end1).trim
}
case None =>
}
}
for (params <- sym.paramss; param <- params)
mergeSection(srcParams get param.name.toString, dstParams get param.name.toString)
for (tparam <- sym.typeParams)
mergeSection(srcTParams get tparam.name.toString, dstTParams get tparam.name.toString)
mergeSection(returnDoc(src, srcSections), returnDoc(dst, dstSections))
mergeSection(groupDoc(src, srcSections), groupDoc(dst, dstSections))
if (out.length == 0) dst
else {
out append dst.substring(copied)
out.toString
}
}
/**
* Expand inheritdoc tags
* - for the main comment we transform the inheritdoc into the super variable,
* and the variable expansion can expand it further
* - for the param, tparam and throws sections we must replace comments on the spot
*
* This is done separately, for two reasons:
* 1. It takes longer to run compared to merge
* 2. The inheritdoc annotation should not be used very often, as building the comment from pieces severely
* impacts performance
*
* @param parent The source (or parent) comment
* @param child The child (overriding member or usecase) comment
* @param sym The child symbol
* @return The child comment with the inheritdoc sections expanded
*/
def expandInheritdoc(parent: String, child: String, sym: Symbol): String =
if (child.indexOf("@inheritdoc") == -1)
child
else {
val parentSections = tagIndex(parent)
val childSections = tagIndex(child)
val parentTagMap = sectionTagMap(parent, parentSections)
val parentNamedParams = Map() +
("@param" -> paramDocs(parent, "@param", parentSections)) +
("@tparam" -> paramDocs(parent, "@tparam", parentSections)) +
("@throws" -> paramDocs(parent, "@throws", parentSections))
val out = new StringBuilder
def replaceInheritdoc(childSection: String, parentSection: => String) =
if (childSection.indexOf("@inheritdoc") == -1)
childSection
else
childSection.replace("@inheritdoc", parentSection)
def getParentSection(section: (Int, Int)): String = {
def getSectionHeader = extractSectionTag(child, section) match {
case param@("@param"|"@tparam"|"@throws") => param + " " + extractSectionParam(child, section)
case other => other
}
def sectionString(param: String, paramMap: Map[String, (Int, Int)]): String =
paramMap.get(param) match {
case Some(section) =>
// Cleanup the section tag and parameter
val sectionTextBounds = extractSectionText(parent, section)
cleanupSectionText(parent.substring(sectionTextBounds._1, sectionTextBounds._2))
case None =>
reporter.echo(sym.pos, "The \\"" + getSectionHeader + "\\" annotation of the " + sym +
" comment contains @inheritdoc, but the corresponding section in the parent is not defined.")
"<invalid inheritdoc annotation>"
}
child.substring(section._1, section._1 + 7) match {
case param@("@param "|"@tparam"|"@throws") =>
sectionString(extractSectionParam(child, section), parentNamedParams(param.trim))
case _ =>
sectionString(extractSectionTag(child, section), parentTagMap)
}
}
def mainComment(str: String, sections: List[(Int, Int)]): String =
if (str.trim.length > 3)
str.trim.substring(3, startTag(str, sections))
else
""
// Append main comment
out.append("/**")
out.append(replaceInheritdoc(mainComment(child, childSections), mainComment(parent, parentSections)))
// Append sections
for (section <- childSections)
out.append(replaceInheritdoc(child.substring(section._1, section._2), getParentSection(section)))
out.append("*/")
out.toString
}
/** Maps symbols to the variable -> replacement maps that are defined
* in their doc comments
*/
private val defs = mutable.HashMap[Symbol, Map[String, String]]() withDefaultValue Map()
/** Lookup definition of variable.
*
* @param vble The variable for which a definition is searched
* @param site The class for which doc comments are generated
*/
@tailrec
final def lookupVariable(vble: String, site: Symbol): Option[String] = site match {
case NoSymbol => None
case _ =>
val searchList =
if (site.isModule) site :: site.info.baseClasses
else site.info.baseClasses
searchList collectFirst { case x if defs(x) contains vble => defs(x)(vble) } match {
case Some(str) if str startsWith "$" => lookupVariable(str.tail, site)
case s @ Some(str) => s
case None => lookupVariable(vble, site.owner)
}
}
/** Expand variable occurrences in string `str`, until a fix point is reached or
* an expandLimit is exceeded.
*
* @param initialStr The string to be expanded
* @param sym The symbol for which doc comments are generated
* @param site The class for which doc comments are generated
* @return Expanded string
*/
protected def expandVariables(initialStr: String, sym: Symbol, site: Symbol): String = {
val expandLimit = 10
@tailrec
def expandInternal(str: String, depth: Int): String = {
if (depth >= expandLimit)
throw new ExpansionLimitExceeded(str)
val out = new StringBuilder
var copied, idx = 0
// excluding variables written as \\$foo so we can use them when
// necessary to document things like Symbol#decode
def isEscaped = idx > 0 && str.charAt(idx - 1) == '\\\\'
while (idx < str.length) {
if ((str charAt idx) != '$' || isEscaped)
idx += 1
else {
val vstart = idx
idx = skipVariable(str, idx + 1)
def replaceWith(repl: String): Unit = {
out append str.substring(copied, vstart)
out append repl
copied = idx
}
variableName(str.substring(vstart + 1, idx)) match {
case "super" =>
superComment(sym) foreach { sc =>
val superSections = tagIndex(sc)
replaceWith(sc.substring(3, startTag(sc, superSections)))
for (sec @ (start, end) <- superSections)
if (!isMovable(sc, sec)) out append sc.substring(start, end)
}
case "" => idx += 1
case vname =>
lookupVariable(vname, site) match {
case Some(replacement) => replaceWith(replacement)
case None =>
val pos = docCommentPos(sym)
val loc = pos withPoint (pos.start + vstart + 1)
runReporting.warning(loc, s"Variable $vname undefined in comment for $sym in $site", WarningCategory.Scaladoc, sym)
}
}
}
}
if (out.length == 0) str
else {
out append str.substring(copied)
expandInternal(out.toString, depth + 1)
}
}
// We suppressed expanding \\$ throughout the recursion, and now we
// need to replace \\$ with $ so it looks as intended.
expandInternal(initialStr, 0).replace("""\\$""", "$")
}
// !!! todo: inherit from Comment?
case class DocComment(raw: String, pos: Position = NoPosition, codePos: Position = NoPosition) {
/** Returns:
* template: the doc comment minus all @define and @usecase sections
* defines : all define sections (as strings)
* useCases: all usecase sections (as instances of class UseCase)
*/
lazy val (template, defines, useCases) = {
val sections = tagIndex(raw)
val defines = sections filter { startsWithTag(raw, _, "@define") }
val usecases = sections filter { startsWithTag(raw, _, "@usecase") }
val end = startTag(raw, (defines ::: usecases).sortBy(_._1))
(if (end == raw.length - 2) raw else raw.substring(0, end) + "*/",
defines map { case (start, end) => raw.substring(start, end) },
usecases map { case (start, end) => decomposeUseCase(start, end) })
}
private def decomposeUseCase(start: Int, end: Int): UseCase = {
val codeStart = skipWhitespace(raw, start + "@usecase".length)
val codeEnd = skipToEol(raw, codeStart)
val code = raw.substring(codeStart, codeEnd)
val codePos = subPos(codeStart, codeEnd)
val commentStart = skipLineLead(raw, codeEnd + 1) min end
val comment = "/** " + raw.substring(commentStart, end) + "*/"
val commentPos = subPos(commentStart, end)
runReporting.deprecationWarning(codePos, "The @usecase tag is deprecated, instead use the @example tag to document the usage of your API", "2.13.0", site = "", origin = "")
UseCase(DocComment(comment, commentPos, codePos), code, codePos)
}
private def subPos(start: Int, end: Int) =
if (pos == NoPosition) NoPosition
else {
val start1 = pos.start + start
val end1 = pos.start + end
pos withStart start1 withPoint start1 withEnd end1
}
def defineVariables(sym: Symbol) = {
val Trim = "(?s)^[\\\\s&&[^\\n\\r]]*(.*?)\\\\s*$".r
defs(sym) ++= defines.map {
str => {
val start = skipWhitespace(str, "@define".length)
val (key, value) = str.splitAt(skipVariable(str, start))
key.drop(start) -> value
}
} map {
case (key, Trim(value)) => variableName(key) -> value.replaceAll("\\\\s+\\\\*+$", "")
case x => throw new MatchError(x)
}
}
}
case class UseCase(comment: DocComment, body: String, pos: Position) {
var defined: List[Symbol] = List() // initialized by Typer
var aliases: List[Symbol] = List() // initialized by Typer
def expandedDefs(sym: Symbol, site: Symbol): List[Symbol] = {
def select(site: Type, name: Name, orElse: => Type): Type = {
val member = site.nonPrivateMember(name)
if (member.isTerm) singleType(site, member)
else if (member.isType) site.memberType(member)
else orElse
}
def getSite(name: Name): Type = {
def findIn(sites: List[Symbol]): Type = sites match {
case List() => NoType
case site :: sites1 => select(site.thisType, name, findIn(sites1))
}
// Previously, searching was taking place *only* in the current package and in the root package
// now we're looking for it everywhere in the hierarchy, so we'll be able to link variable expansions like
// immutable.Seq in package immutable
//val (classes, pkgs) = site.ownerChain.span(!_.isPackageClass)
//val sites = (classes ::: List(pkgs.head, rootMirror.RootClass)))
//findIn(sites)
findIn(site.ownerChain ::: List(rootMirror.EmptyPackage))
}
def getType(str: String, variable: String): Type = {
def getParts(start: Int): List[String] = {
val end = skipIdent(str, start)
if (end == start) List()
else str.substring (start, end) :: {
if (end < str.length && (str charAt end) == '.') getParts(end + 1)
else List()
}
}
val parts = getParts(0)
if (parts.isEmpty) {
reporter.error(comment.codePos, "Incorrect variable expansion for " + variable + " in use case. Does the " +
"variable expand to wiki syntax when documenting " + site + "?")
return ErrorType
}
val partnames = (parts.init map newTermName) :+ newTypeName(parts.last)
val (start, rest) = parts match {
case "this" :: _ => (site.thisType, partnames.tail)
case _ :: "this" :: _ =>
site.ownerChain.find(_.name == partnames.head) match {
case Some(clazz) => (clazz.thisType, partnames drop 2)
case _ => (NoType, Nil)
}
case _ =>
(getSite(partnames.head), partnames.tail)
}
val result = rest.foldLeft(start)(select(_, _, NoType))
if (result == NoType)
runReporting.warning(
comment.codePos,
s"""Could not find the type $variable points to while expanding it for the usecase signature of $sym in $site. In this context, $variable = "$str".""",
WarningCategory.Scaladoc,
site)
result
}
/*
* work around the backticks issue suggested by Simon in
* https://groups.google.com/forum/?hl=en&fromgroups#!topic/scala-internals/z7s1CCRCz74
* ideally, we'd have a removeWikiSyntax method in the CommentFactory to completely eliminate the wiki markup
*/
def cleanupVariable(str: String) = {
val tstr = str.trim
if (tstr.length >= 2 && tstr.startsWith("`") && tstr.endsWith("`"))
tstr.substring(1, tstr.length - 1)
else
tstr
}
// the Boolean tells us whether we can normalize: if we found an actual type, then yes, we can normalize, else no,
// use the synthetic alias created for the variable
val aliasExpansions: List[(Type, Boolean)] =
for (alias <- aliases) yield
lookupVariable(alias.name.toString.substring(1), site) match {
case Some(repl) =>
val repl2 = cleanupVariable(repl)
val tpe = getType(repl2, alias.name.toString)
if (tpe != NoType) (tpe, true)
else {
val alias1 = alias.cloneSymbol(rootMirror.RootClass, alias.rawflags, newTypeName(repl2))
(typeRef(NoPrefix, alias1, Nil), false)
}
case None =>
(typeRef(NoPrefix, alias, Nil), false)
}
@tailrec
def subst(sym: Symbol, from: List[Symbol], to: List[(Type, Boolean)]): (Type, Boolean) =
if (from.isEmpty) (sym.tpe, false)
else if (from.head == sym) to.head
else subst(sym, from.tail, to.tail)
val substAliases = new TypeMap {
def apply(tp: Type) = mapOver(tp) match {
case tp1 @ TypeRef(pre, sym, args) if (sym.name.length > 1 && sym.name.startChar == '$') =>
subst(sym, aliases, aliasExpansions) match {
case (TypeRef(pre1, sym1, _), canNormalize) =>
val tpe = typeRef(pre1, sym1, args)
if (canNormalize) tpe.normalize else tpe
case _ =>
tp1
}
case tp1 =>
tp1
}
}
for (defn <- defined) yield {
defn.cloneSymbol(sym.owner, sym.flags | Flags.SYNTHETIC) modifyInfo (info =>
substAliases(info).asSeenFrom(site.thisType, sym.owner)
)
}
}
}
class ExpansionLimitExceeded(str: String) extends Exception
}
| scala/scala | src/compiler/scala/tools/nsc/ast/DocComments.scala | Scala | apache-2.0 | 23,411 |
package com.yetu.oauth2provider.signature.models
case class SignatureSyntaxException(msg: String) extends Exception
case class SignatureException(msg: String) extends Exception
| yetu/oauth2-provider | app/com/yetu/oauth2provider/signature/models/Exceptions.scala | Scala | mit | 178 |
package org.catapult.sa.fulgurite.geotiff
import javax.imageio.metadata.IIOMetadata
import com.github.jaiimageio.impl.plugins.tiff.{TIFFIFD, TIFFImageMetadata}
import com.github.jaiimageio.plugins.tiff.{BaselineTIFFTagSet, GeoTIFFTagSet, TIFFField, TIFFTag}
import collection.JavaConversions._
/**
* Helper class to extract fields from GeoTIFF metadata
*
* Wraps up an instance of IOMetadata and provides readonly accessors to the required fields
*
* Implementation of metadata helper using jaiimageio rather than the GeoTiffIOMetadataAdapter
*/
class GeoTiffMetaHelper(baseMeta : IIOMetadata) {
private val meta = baseMeta.asInstanceOf[TIFFImageMetadata].getRootIFD
def width = getIntField(BaselineTIFFTagSet.TAG_IMAGE_WIDTH)
def height = getIntField(BaselineTIFFTagSet.TAG_IMAGE_LENGTH)
def samplesPerPixel = getIntField(BaselineTIFFTagSet.TAG_SAMPLES_PER_PIXEL)
def bitsPerSample = getIntsField(BaselineTIFFTagSet.TAG_BITS_PER_SAMPLE)
def firstOffset = getLongField(BaselineTIFFTagSet.TAG_STRIP_OFFSETS)
def endOffset = getLongsField(BaselineTIFFTagSet.TAG_STRIP_OFFSETS).last + getLongsField(BaselineTIFFTagSet.TAG_STRIP_BYTE_COUNTS).last
def modelTiePoints = getDoublesField(GeoTIFFTagSet.TAG_MODEL_TIE_POINT)
def pixelScales = getDoublesField(GeoTIFFTagSet.TAG_MODEL_PIXEL_SCALE)
def photometricInterpretation = getIntField(BaselineTIFFTagSet.TAG_PHOTOMETRIC_INTERPRETATION)
def planarConfiguration = getIntField(BaselineTIFFTagSet.TAG_PLANAR_CONFIGURATION)
def extraSamples = getIntsField(BaselineTIFFTagSet.TAG_EXTRA_SAMPLES)
def sampleFormats = getIntsField(BaselineTIFFTagSet.TAG_SAMPLE_FORMAT)
def geoAsciiParams = getStringField(GeoTIFFTagSet.TAG_GEO_ASCII_PARAMS)
def xResolution = getRationalField(BaselineTIFFTagSet.TAG_X_RESOLUTION)
def yResolution = getRationalField(BaselineTIFFTagSet.TAG_Y_RESOLUTION)
def compression = getIntField(BaselineTIFFTagSet.TAG_COMPRESSION)
def geoKeyDirectory = getIntsField(GeoTIFFTagSet.TAG_GEO_KEY_DIRECTORY)
private def getIntField(field : Int, offset : Int = 0) = baseGet(field, -1, _.getAsInt(offset))
private def getIntsField(field : Int) = baseGet(field, Array.empty[Int], _.getAsInts())
private def getLongField(field : Int, offset : Int = 0) = baseGet(field, -1L, _.getAsLong(offset))
private def getLongsField(field : Int) = baseGet(field, Array.empty[Long], _.getAsLongs())
private def getDoublesField(field : Int) = baseGet(field, Array.empty[Double], _.getAsDoubles())
private def getStringField(field : Int) = baseGet(field, "", _.getAsString(0))
private def getRationalField(field : Int, offset : Int = 0) = baseGet(field, Array(0L, 0L), _.getAsRational(offset))
private def baseGet[T](field : Int, nullValue : T, extractor : TIFFField => T) : T = {
meta.getTIFFField(field) match {
case null => nullValue
case f => extractor(f)
}
}
}
object GeoTiffMetaHelper {
def createImageMetaData(meta : GeoTiffMeta) : TIFFImageMetadata = {
val baseMeta = new TIFFImageMetadata(List(geoTiffBase, base))
val rootIFD = baseMeta.asInstanceOf[TIFFImageMetadata].getRootIFD
// update the ifd and make sure it matches the meta object.
val numRows = meta.height.asInstanceOf[Int] * meta.samplesPerPixel
//val numColumns = meta.width * meta.bytesPerSample.sum
setInt(rootIFD, BaselineTIFFTagSet.TAG_IMAGE_WIDTH, meta.width.toInt)
setInt(rootIFD, BaselineTIFFTagSet.TAG_IMAGE_LENGTH, meta.height.toInt)
setInt(rootIFD, BaselineTIFFTagSet.TAG_SAMPLES_PER_PIXEL, meta.samplesPerPixel)
setShorts(rootIFD, BaselineTIFFTagSet.TAG_BITS_PER_SAMPLE, meta.bitsPerSample.map(_.toChar))
setGeoDoubles(rootIFD, GeoTIFFTagSet.TAG_MODEL_PIXEL_SCALE, meta.pixelScales)
setInt(rootIFD, BaselineTIFFTagSet.TAG_PLANAR_CONFIGURATION, meta.planarConfiguration)
setInt(rootIFD, BaselineTIFFTagSet.TAG_PHOTOMETRIC_INTERPRETATION, meta.photometricInterpretation)
val rowsPerStrip = meta.planarConfiguration match {
case BaselineTIFFTagSet.PLANAR_CONFIGURATION_CHUNKY => meta.height.toInt
case BaselineTIFFTagSet.PLANAR_CONFIGURATION_PLANAR => meta.samplesPerPixel
case _ => throw new IllegalArgumentException("Unknown planar configuration")
}
setInt(rootIFD, BaselineTIFFTagSet.TAG_ROWS_PER_STRIP, rowsPerStrip)
rootIFD.addTIFFField(new TIFFField(geoTiffBase.getTag(GeoTIFFTagSet.TAG_GEO_ASCII_PARAMS), TIFFTag.TIFF_ASCII, 1, Array(meta.geoAsciiParams)))
setRational(rootIFD, BaselineTIFFTagSet.TAG_X_RESOLUTION, meta.xResolution)
setRational(rootIFD, BaselineTIFFTagSet.TAG_Y_RESOLUTION, meta.yResolution)
// given we have just updated the size we should also update the number of offset and byte count places to be filled in later
rootIFD.removeTIFFField(BaselineTIFFTagSet.TAG_STRIP_OFFSETS)
setEmptyLongs(rootIFD, BaselineTIFFTagSet.TAG_STRIP_OFFSETS, numRows)
rootIFD.removeTIFFField(BaselineTIFFTagSet.TAG_STRIP_BYTE_COUNTS)
setEmptyLongs(rootIFD, BaselineTIFFTagSet.TAG_STRIP_BYTE_COUNTS, numRows)
setInt(rootIFD, BaselineTIFFTagSet.TAG_COMPRESSION, meta.compression)
setGeoShorts(rootIFD, GeoTIFFTagSet.TAG_GEO_KEY_DIRECTORY, meta.geoKeyDirectory.map(_.toChar))
// Optional fields, when we don't have any data they should not be provided.
if (meta.extraSamples.isEmpty) {
rootIFD.removeTIFFField(BaselineTIFFTagSet.TAG_EXTRA_SAMPLES)
} else {
setShorts(rootIFD, BaselineTIFFTagSet.TAG_EXTRA_SAMPLES, meta.extraSamples.map(_.toChar))
}
if (meta.sampleFormat.isEmpty) {
rootIFD.removeTIFFField(BaselineTIFFTagSet.TAG_SAMPLE_FORMAT)
} else {
setShorts(rootIFD, BaselineTIFFTagSet.TAG_SAMPLE_FORMAT, meta.sampleFormat.map(_.toChar))
}
baseMeta
}
private def setInt(rootIFD : TIFFIFD, field : Int, value : Int) =
rootIFD.addTIFFField(new TIFFField(base.getTag(field), value))
private def setRational(rootIFD : TIFFIFD, field : Int, value : Array[Long]) =
rootIFD.addTIFFField(new TIFFField(base.getTag(field), TIFFTag.TIFF_RATIONAL, 1, Array(value)))
private def setEmptyLongs(rootIFD : TIFFIFD, field : Int, numRows : Int) =
rootIFD.addTIFFField(new TIFFField(base.getTag(field), TIFFTag.TIFF_LONG, numRows))
private def setShorts(rootIFD : TIFFIFD, field : Int, value : Array[Char]) =
rootIFD.addTIFFField(new TIFFField(base.getTag(field), TIFFTag.TIFF_SHORT, value.length, value))
private def setGeoShorts(rootIFD : TIFFIFD, field : Int, value : Array[Char]) =
rootIFD.addTIFFField(new TIFFField(geoTiffBase.getTag(field), TIFFTag.TIFF_SHORT, value.length, value))
private def setGeoDoubles(rootIFD : TIFFIFD, field : Int, value : Array[Double]) =
rootIFD.addTIFFField(new TIFFField(geoTiffBase.getTag(field), TIFFTag.TIFF_DOUBLE, value.length, value))
private val base = BaselineTIFFTagSet.getInstance
private val geoTiffBase = GeoTIFFTagSet.getInstance
} | SatelliteApplicationsCatapult/fulgurite | fulgurite-core/src/main/scala/org/catapult/sa/fulgurite/geotiff/GeoTiffMetaHelper.scala | Scala | lgpl-3.0 | 7,054 |
package spire.laws.shadows
import cats.kernel.Comparison
import spire.algebra.Order
trait ShadowOrder[A, S] extends ShadowPartialOrder[A, S] with Order[Shadow[A, S]] {
implicit def A: Order[A]
implicit def S: Order[S]
override def eqv(x: Shadow[A, S], y: Shadow[A, S]): Boolean = super[ShadowPartialOrder].eqv(x, y)
override def partialCompare(x: Shadow[A, S], y: Shadow[A, S]): Double = super[ShadowPartialOrder].partialCompare(x, y)
def compare(x: Shadow[A, S], y: Shadow[A, S]): Int = {
val a = A.compare(x.a, y.a)
val s = S.compare(x.s, y.s)
assert(a == s)
a
}
override def comparison(x: Shadow[A, S], y: Shadow[A, S]): Comparison = {
val a = A.comparison(x.a, y.a)
val s = S.comparison(x.s, y.s)
assert(a == s)
a
}
override def min(x: Shadow[A, S], y: Shadow[A, S]): Shadow[A, S] =
Shadow(A.min(x.a, y.a), S.min(x.s, y.s))
override def max(x: Shadow[A, S], y: Shadow[A, S]): Shadow[A, S] =
Shadow(A.max(x.a, y.a), S.max(x.s, y.s))
override def neqv(x: Shadow[A, S], y: Shadow[A, S]): Boolean = {
val a = A.neqv(x.a, y.a)
val s = S.neqv(x.s, y.s)
assert(a == s)
a
}
override def lteqv(x: Shadow[A, S], y: Shadow[A, S]): Boolean = {
val a = A.lteqv(x.a, y.a)
val s = S.lteqv(x.s, y.s)
assert(a == s)
a
}
override def lt(x: Shadow[A, S], y: Shadow[A, S]): Boolean = {
val a = A.lt(x.a, y.a)
val s = S.lt(x.s, y.s)
assert(a == s)
a
}
override def gteqv(x: Shadow[A, S], y: Shadow[A, S]): Boolean = {
val a = A.gteqv(x.a, y.a)
val s = S.gteqv(x.s, y.s)
assert(a == s)
a
}
override def gt(x: Shadow[A, S], y: Shadow[A, S]): Boolean = {
val a = A.gt(x.a, y.a)
val s = S.gt(x.s, y.s)
assert(a == s)
a
}
}
| non/spire | laws/src/main/scala/spire/laws/shadows/ShadowOrder.scala | Scala | mit | 1,775 |
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.commons
import scala.util.control.NonFatal
import com.typesafe.scalalogging.StrictLogging
package object validation extends StrictLogging {
val TrueSuccess = true.success
val FalseSuccess = false.success
val NoneSuccess = None.success
val NullStringSuccess = "null".success
def safely[T](errorMapper: String => String = identity)(f: => Validation[T]): Validation[T] =
try { f }
catch {
case NonFatal(e) =>
val message = errorMapper(e.getClass.getSimpleName + ": " + e.getMessage)
logger.info(message, e)
message.failure
}
implicit class SuccessWrapper[T](val value: T) extends AnyVal {
def success: Validation[T] = Success(value)
}
implicit class FailureWrapper(val message: String) extends AnyVal {
def failure = Failure(message)
}
}
| GabrielPlassard/gatling | gatling-commons/src/main/scala/io/gatling/commons/validation/package.scala | Scala | apache-2.0 | 1,447 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package utils
import org.scalatestplus.mockito.MockitoSugar
import org.scalatestplus.play.PlaySpec
import org.scalatestplus.play.guice.GuiceOneAppPerSuite
import scala.concurrent.{Await, Future}
import generators.AmlsReferenceNumberGenerator
import generators.submission.SubscriptionResponseGenerator
import models.ResponseType.SubscriptionResponseType
import models.{FeeResponse, ResponseType}
import org.joda.time.DateTime
import org.mockito.Matchers.{any, eq => eqTo}
import org.mockito.Mockito._
import org.scalatest.concurrent.ScalaFutures
import services.{AuthEnrolmentsService, FeeResponseService}
import uk.gov.hmrc.http.HeaderCarrier
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.concurrent.ExecutionContext.Implicits.global
class FeeHelperSpec extends PlaySpec with MockitoSugar
with ScalaFutures
with GuiceOneAppPerSuite
with AmlsReferenceNumberGenerator
with SubscriptionResponseGenerator{
"FeeHelper" when {
implicit val hc = HeaderCarrier()
val feeHelper = new FeeHelper(
feeResponseService = mock[FeeResponseService],
enrolmentService = mock[AuthEnrolmentsService]
)
def feeResponse(responseType: ResponseType) = FeeResponse(
responseType = responseType,
amlsReferenceNumber = amlsRegistrationNumber,
registrationFee = 100,
fpFee = None,
approvalCheckFee = None,
premiseFee = 0,
totalFees = 200,
paymentReference = Some(paymentReferenceNumber),
difference = Some(115),
createdAt = DateTime.now
)
val response = feeResponse(SubscriptionResponseType)
"the user has fees" must {
"fetch fees" in {
when(feeHelper.enrolmentService.amlsRegistrationNumber(any(), any())(any(), any()))
.thenReturn(Future.successful(Some(amlsRegistrationNumber)))
when {
feeHelper.feeResponseService.getFeeResponse(eqTo(amlsRegistrationNumber), any[(String, String)]())(any(), any())
} thenReturn Future.successful(Some(response))
val result = Await.result(
feeHelper.retrieveFeeResponse(Some(amlsRegistrationNumber), ("foo", "bar"), None, "feeHelper"), 5 seconds
)
result.isDefined mustBe true
}
}
"the user has no fees" must {
"fetch fees" in {
when(feeHelper.enrolmentService.amlsRegistrationNumber(any(), any())(any(), any()))
.thenReturn(Future.successful(Some(amlsRegistrationNumber)))
when {
feeHelper.feeResponseService.getFeeResponse(eqTo(amlsRegistrationNumber), any[(String, String)]())(any(), any())
} thenReturn Future.successful(None)
val result = Await.result(
feeHelper.retrieveFeeResponse(Some(amlsRegistrationNumber), ("foo", "bar"), None, "feeHelper"), 5 seconds
)
result.isDefined mustBe false
}
}
}
}
| hmrc/amls-frontend | test/utils/FeeHelperSpec.scala | Scala | apache-2.0 | 3,473 |
/*
* Copyright (c) 2017. Yuriy Stul
*/
package com.stulsoft.scala.math.assessment.integration
import com.stulsoft.scala.math.integration.{Integration,IntegrationMethod}
/**
* Assessments for interpolation methods
*
* @author Yuriy Stul
*/
private object IntegrationAssessment extends App {
private def sinRectangleFunction(): Unit = {
println("==>sinRectangleFunction")
val computedValue = Integration.s(Math.sin, 0.0, Math.PI, 40, IntegrationMethod.Rectangle)
val dif = Math.abs(computedValue - 2.0)
println(s"Difference is $dif")
println("<==sinRectangleFunction")
}
private def sinTrapezoidalFunction(): Unit = {
println("==>sinTrapezoidalFunction")
val computedValue = Integration.s(Math.sin, 0.0, Math.PI, 40, IntegrationMethod.Trapezoidal)
val dif = Math.abs(computedValue - 2.0)
println(s"Difference is $dif")
println("<==sinTrapezoidalFunction")
}
private def sinSimpsonFunction(): Unit = {
println("==>sinSimpsonFunction")
val computedValue = Integration.s(Math.sin, 0.0, Math.PI, 40, IntegrationMethod.Simpson)
val dif = Math.abs(computedValue - 2.0)
println(s"Difference is $dif")
println("<==sinSimpsonFunction")
}
println("==>main")
sinRectangleFunction()
sinTrapezoidalFunction()
sinSimpsonFunction()
println("<==main")
}
| ysden123/ys-scala-math | src/test/scala/com/stulsoft/scala/math/assessment/integration/IntegrationAssessment.scala | Scala | mit | 1,334 |
package actors
import java.net.URI
import akka.actor.{Actor, ActorLogging, AddressFromURIString}
import akka.cluster.Cluster
import akka.cluster.ClusterEvent._
import akka.cluster.pubsub.{DistributedPubSub, DistributedPubSubMediator}
import mousio.etcd4j.EtcdClient
import mousio.etcd4j.responses.EtcdException
import play.api.Application
import play.twirl.api.TemplateMagic.javaCollectionToScala
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
class ClusterListener(application: Application) extends Actor with ActorLogging {
val NODES_DISCOVER_INTERVAL = application.configuration.getLong("cluster.seed-nodes.discover-interval").getOrElse(System.getProperty("cluster.seed-nodes.discover-interval", "6000").toLong)
val NODES_TTL = application.configuration.getLong("cluster.seed-nodes.ttl").getOrElse(System.getProperty("cluster.seed-nodes.ttl", "12000").toLong)
val MASTER_TTL = application.configuration.getLong("cluster.seed-nodes.master-ttl").getOrElse(System.getProperty("cluster.seed-nodes.master-ttl", "24000").toLong)
import DistributedPubSubMediator.Subscribe
val mediator = DistributedPubSub(context.system).mediator
val cluster = Cluster(context.system)
val etcdPeers = System.getProperty("ETCDCTL_PEERS")
val client = if (etcdPeers != null) {
val addrs = etcdPeers.split(",").toList.map(addr => URI.create(if (addr.startsWith("http://")) addr else "http://" + addr))
new EtcdClient(addrs: _*)
} else {
null
}
var seeds = new collection.mutable.HashSet[String]()
var isMaster = client == null
override def preStart(): Unit = {
cluster.subscribe(self, initialStateMode = InitialStateAsEvents,
classOf[MemberEvent], classOf[UnreachableMember])
mediator ! Subscribe("cluster-events", self)
context.system.eventStream.subscribe(self, classOf[MasterStateInquiry])
if (client != null) {
context.system.scheduler.schedule(0.seconds, NODES_DISCOVER_INTERVAL.millisecond, self, DiscoveryEvent())
}
}
override def postStop(): Unit = cluster.unsubscribe(self)
def receive = {
case inquiry: MasterStateInquiry =>
context.system.eventStream.publish(if (isMaster) MasterEvent else SlaveEvent )
case MemberUp(member) =>
log.info("Member is Up: {}", member.address)
case UnreachableMember(member) =>
log.info("Member detected as unreachable: {}", member)
case MemberRemoved(member, previousStatus) =>
log.info("Member is Removed: {} after {}",
member.address, previousStatus)
case event: ClusterEvent =>
log.info("Received a cluster event: " + event)
context.system.actorSelection(s"/user/${event.userMask}.*") ! event.message
case event: DiscoveryEvent =>
val selfHost = cluster.selfAddress.host.get
val selfPort = cluster.selfAddress.port.get
log.info(s"Updating cluster seed information: '$selfHost:$selfPort'")
client.put(s"/jetchat/seeds/$selfHost:$selfPort", "up").ttl((NODES_TTL / 1000).toInt).send().get()
val newSeeds = new collection.mutable.HashSet[String]()
val seedInfoNodes = client.get("/jetchat/seeds").send().get.node.nodes.toList
seedInfoNodes.foreach { case i =>
if (i.key.contains(":")) {
val address = i.key.substring(15)
newSeeds.add(address)
if (!seeds.contains(address)) {
log.info(s"A cluster seed found: '$address'")
}
}
}
if (!seeds.equals(newSeeds)) {
(seeds -- newSeeds).foreach(s => log.info(s"A cluster seed lost: '$s'"))
seeds = newSeeds
cluster.joinSeedNodes(seeds.map(address => AddressFromURIString(s"akka.tcp://application@$address")).toList)
}
val master = try { Some(client.get("/jetchat/master").send().get().node.value) } catch { case e:EtcdException => None}
if (master.isEmpty) {
try {
client.put("/jetchat/master", s"$selfHost:$selfPort").ttl((MASTER_TTL / 1000).toInt).prevExist(false).send().get()
log.info(s"The cluster master is self-elected: '$selfHost:$selfPort'")
if (!isMaster) {
isMaster = true
context.system.eventStream.publish(MasterEvent)
}
} catch { case e:EtcdException =>
}
} else if (master.get.equals(s"$selfHost:$selfPort")) {
client.put("/jetchat/master", s"$selfHost:$selfPort").ttl((MASTER_TTL / 1000).toInt).prevValue(s"$selfHost:$selfPort").send().get()
} else {
isMaster = false
context.system.eventStream.publish(SlaveEvent)
}
case _: MemberEvent => // ignore
}
} | JetChat/JetChat | app/actors/ClusterListener.scala | Scala | apache-2.0 | 4,633 |
/*
This file is part of Intake24.
Copyright 2015, 2016 Newcastle University.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package uk.ac.ncl.openlab.intake24.foodxml.scripts
import java.io.FileReader
import scala.collection.JavaConversions.asScalaBuffer
import au.com.bytecode.opencsv.CSVReader
import scala.xml.XML
import java.io.File
import java.io.FileFilter
object ParseNewPhotosGuide {
/* import ParseNewPhotos._
def main(args: Array[String]): Unit = {
val sourcePath = "D:\\\\SCRAN24\\\\Notes\\\\new_guides.csv"
val foodsSrc = "D:\\\\SCRAN24\\\\Data\\\\foods.xml"
val foodsDst = "D:\\\\SCRAN24\\\\Data\\\\foods-newguide.xml"
val guideSrc = "D:\\\\SCRAN24\\\\Data\\\\guide.xml"
val guideDst = "D:\\\\SCRAN24\\\\Data\\\\guide-new.xml"
val photosBase = "D:\\\\SCRAN24\\\\Photos_new"
// FOOD (for reference) PHOTO TYPE WEIGHT (g) photo code food code(s) linked to
val rows = new CSVReader(new FileReader(sourcePath)).readAll().toSeq.map(_.toSeq)
val parsedRows = rows.tail.map(r => Row(r(0), r(1), r(2), r(3), r.drop(4).filterNot(_.isEmpty).filter(_.length == 4)))
val grouped = parsedRows.groupBy(r => r.foods)
val oldGuideImages = GuideImageDef.parseXml(XML.load(guideSrc))
val newGuideImages = grouped.values.toSeq.map(rows => {
val id = mkid(rows.map(_.id))
println(id)
rows.foreach(println)
val weights = rows.map(row => GuideImageWeightRecord(row.description, row.id.substring(id.length).toInt, row.weight.toDouble))
(id, GuideImage(id, "No description", weights))
}).toMap
Util.writeXml(GuideImageDef.toXml((oldGuideImages ++ newGuideImages).values.toSeq.sortBy(_.id)), guideDst)
val newGuideImageAssoc = grouped.mapValues(rows => mkid(rows.map(_.id))).toSeq
val foods = FoodDef.parseXml(XML.load(foodsSrc)).map(f => (f.code, f)).toMap
val updatedFoods = newGuideImageAssoc.foldLeft(foods) {
case (foods, (assocFoods, guideId)) => assocFoods.foldLeft(foods) {
case (foods, code) => foods.get(code) match {
case Some(food) => {
println("Updating " + foods(code))
foods + (code -> foods(code).copy(portionSize = foods(code).portionSize :+ PortionSizeMethod("guide-image", "No description", "portion/placeholder.jpg", Seq(("guide-image-id", guideId)))))
}
case None => {
println("Missing food: " + code)
foods
}
}
}
}
Util.writeXml(FoodDef.toXml(updatedFoods.values.toSeq.sortBy(_.description)), foodsDst)
}*/
} | digitalinteraction/intake24 | FoodDataXML/src/main/scala/uk/ac/ncl/openlab/intake24/foodxml/scripts/ParseNewPhotosGuide.scala | Scala | apache-2.0 | 3,026 |
package endpoints.akkahttp.server
import akka.http.scaladsl.model.{HttpResponse, StatusCodes}
import akka.http.scaladsl.server.Directives
import endpoints.algebra
trait OptionalResponses extends algebra.OptionalResponses with Endpoints {
/**
* A response encoder that maps `None` to an empty HTTP result with status 404
*/
def option[A](response: Response[A]): Response[Option[A]] = {
case Some(a) => response(a)
case None => Directives.complete(HttpResponse(StatusCodes.NotFound))
}
}
| Krever/endpoints | akka-http/server/src/main/scala/endpoints/akkahttp/server/OptionalResponses.scala | Scala | mit | 512 |
package dawn.flow.trajectory
import dawn.flow._
import io.circe.generic.JsonCodec
import breeze.linalg.{max => _, min => _, _ => _}
import spire.math.{Real => _, _ => _}
import spire.implicits._
case class TrajInit(a: Acceleration, v: Velocity, p: Position, q: Quat)
trait Trajectory extends Model {
def tf: Timeframe
def keypoints: List[(Keypoint, Timeframe)]
def gravity: Vec3
def init: Init
def initQ: Quat
def trajInit =
TrajInit(init.a, init.v, init.p, initQ)
def getKeypoints = {
var t = 0.0
keypoints.map(x => { t += x._2; Timestamped(t, x._1) })
}
def getPosition(t: Time): Position
def getVelocity(t: Time): Velocity
def getAcceleration(t: Time): Acceleration
def isFeasible: Boolean
//Acceleration + G
// def getFullAcceleration(t: Time): Acceleration =
// getAcceleration(t) + gravity
//TODO: getAcceleration in Local coordinate you retard
//Acceleration in the local referential of the drone
def getLocalAcceleration(t: Time) = {
getOrientationQuaternion(t).reciprocal
.rotate(getAcceleration(t))
}
def getJerk(t: Time): Jerk
//NormalVector is oriented toward acceleration - G
//because it is assumed that acceleration direction is the goal
//and you need to retrieve it after applying G
def getNormalVector(t: Time): NormalVector
def getThrust(t: Time): Thrust
//The quaternion is not unique but we choose the "shortest arc".
def getOrientationQuaternion(t: Time) = {
val rt =
if (t > tf) {
println("[Warning] time over tf")
tf
} else
t
Quat.getQuaternion(
Vec3(0, 0, 1),
getNormalVector(rt)
)
}
def getOmega(t: Time, dt: Timestep): Vec3 = {
Quat.quatToAxisAngle(getOrientationQuaternion(Math.max(0, t-dt)),
getOrientationQuaternion(t)) / dt
}
def getPoint(t: Time, dt: Timestep = 1e-3): TrajectoryPoint =
TrajectoryPoint(getPosition(t),
getVelocity(t),
getLocalAcceleration(t),
getJerk(t),
getNormalVector(t),
getOrientationQuaternion(t),
getThrust(t),
getOmega(t, dt))
}
class TrajectoryClock(dt: Timestep)(
implicit val modelHook: ModelHook[Trajectory],
val schedulerHook: SchedulerHook,
val nodeHook: NodeHook
) extends Block0[Time]
with RequireModel[Trajectory] {
def name = "TrajectoryClock " + dt
lazy val out = (new Clock(dt)).takeWhile(_ < model.get.tf, "< tf")
}
@JsonCodec
case class TrajectoryPoint(p: Vec3,
v: Vec3,
a: Vec3,
j: Vec3,
nv: Vec3,
q: Quaternion[Real],
t: Thrust,
br: Vec3)
case class Init(p: Vec3, v: Vec3, a: Vec3) {
def apply(i: Int) = SingleAxisInit(p(i), v(i), a(i))
}
object Init {
def zero = Init(Vec3.zero, Vec3.zero, Vec3.zero)
}
@JsonCodec
case class Keypoint(p: Option[Vec3], v: Option[Vec3], a: Option[Vec3]) {
def apply(i: Int) = SingleAxisGoal(p.map(_(i)), v.map(_(i)), a.map(_(i)))
}
object Keypoint {
def apply(p: Vec3): Keypoint = Keypoint(Some(p), None, None)
def one = Keypoint(Some(Vec3.one), Some(Vec3.one), Some(Vec3.one))
}
case class SingleAxisInit(p: Real, v: Real, a: Real)
case class SingleAxisGoal(p: Option[Real], v: Option[Real], a: Option[Real])
| rubenfiszel/scala-flow | core/src/main/scala/trajectory/Trajectory.scala | Scala | mit | 3,462 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.entity
import spray.json.{deserializationError, DefaultJsonProtocol, JsNumber, JsObject, JsString, JsValue, RootJsonFormat}
import spray.json._
import scala.collection.mutable.ListBuffer
import scala.util.Try
/**
* An instance id representing an invoker
*
* @param instance a numeric value used for the load balancing and Kafka topic creation
* @param uniqueName an identifier required for dynamic instance assignment by Zookeeper
* @param displayedName an identifier that is required for the health protocol to correlate Kafka topics with invoker container names
*/
case class InvokerInstanceId(val instance: Int,
uniqueName: Option[String] = None,
displayedName: Option[String] = None,
val userMemory: ByteSize)
extends InstanceId {
def toInt: Int = instance
override val instanceType = "invoker"
override val source = s"$instanceType$instance"
override val toString: String = (Seq("invoker" + instance) ++ uniqueName ++ displayedName).mkString("/")
override val toJson: JsValue = InvokerInstanceId.serdes.write(this)
}
case class ControllerInstanceId(asString: String) extends InstanceId {
validate(asString)
override val instanceType = "controller"
override val source = s"$instanceType$asString"
override val toString: String = source
override val toJson: JsValue = ControllerInstanceId.serdes.write(this)
}
case class SchedulerInstanceId(val asString: String) extends InstanceId {
validate(asString)
override val instanceType = "scheduler"
override val source = s"$instanceType$asString"
override val toString: String = source
override val toJson: JsValue = SchedulerInstanceId.serdes.write(this)
}
object InvokerInstanceId extends DefaultJsonProtocol {
def parse(c: String): Try[InvokerInstanceId] = Try(serdes.read(c.parseJson))
implicit val serdes = new RootJsonFormat[InvokerInstanceId] {
override def write(i: InvokerInstanceId): JsValue = {
val fields = new ListBuffer[(String, JsValue)]
fields ++= List("instance" -> JsNumber(i.instance))
fields ++= List("userMemory" -> JsString(i.userMemory.toString))
fields ++= List("instanceType" -> JsString(i.instanceType))
i.uniqueName.foreach(uniqueName => fields ++= List("uniqueName" -> JsString(uniqueName)))
i.displayedName.foreach(displayedName => fields ++= List("displayedName" -> JsString(displayedName)))
JsObject(fields.toSeq: _*)
}
override def read(json: JsValue): InvokerInstanceId = {
val instance = fromField[Int](json, "instance")
val uniqueName = fromField[Option[String]](json, "uniqueName")
val displayedName = fromField[Option[String]](json, "displayedName")
val userMemory = fromField[String](json, "userMemory")
val instanceType = fromField[String](json, "instanceType")
if (instanceType == "invoker") {
new InvokerInstanceId(instance, uniqueName, displayedName, ByteSize.fromString(userMemory))
} else {
deserializationError("could not read InvokerInstanceId")
}
}
}
}
object ControllerInstanceId extends DefaultJsonProtocol {
def parse(c: String): Try[ControllerInstanceId] = Try(serdes.read(c.parseJson))
implicit val serdes = new RootJsonFormat[ControllerInstanceId] {
override def write(c: ControllerInstanceId): JsValue =
JsObject("asString" -> JsString(c.asString), "instanceType" -> JsString(c.instanceType))
override def read(json: JsValue): ControllerInstanceId = {
json.asJsObject.getFields("asString", "instanceType") match {
case Seq(JsString(asString), JsString(instanceType)) =>
if (instanceType == "controller") {
new ControllerInstanceId(asString)
} else {
deserializationError("could not read ControllerInstanceId")
}
case Seq(JsString(asString)) =>
new ControllerInstanceId(asString)
case _ =>
deserializationError("could not read ControllerInstanceId")
}
}
}
}
object SchedulerInstanceId extends DefaultJsonProtocol {
implicit val serdes = jsonFormat(SchedulerInstanceId.apply _, "asString")
}
trait InstanceId {
// controller ids become part of a kafka topic, hence, hence allow only certain characters
// see https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/internals/Topic.java#L29
private val LEGAL_CHARS = "[a-zA-Z0-9._-]+"
// reserve some number of characters as the prefix to be added to topic names
private val MAX_NAME_LENGTH = 249 - 121
def serialize: String = InstanceId.serdes.write(this).compactPrint
def validate(asString: String): Unit =
require(
asString.length <= MAX_NAME_LENGTH && asString.matches(LEGAL_CHARS),
s"$instanceType instance id contains invalid characters")
val instanceType: String
val source: String
val toJson: JsValue
}
object InstanceId extends DefaultJsonProtocol {
def parse(i: String): Try[InstanceId] = Try(serdes.read(i.parseJson))
implicit val serdes = new RootJsonFormat[InstanceId] {
override def write(i: InstanceId): JsValue = i.toJson
override def read(json: JsValue): InstanceId = {
val JsObject(field) = json
field
.get("instanceType")
.map(_.convertTo[String] match {
case "invoker" =>
json.convertTo[InvokerInstanceId]
case "controller" =>
json.convertTo[ControllerInstanceId]
case _ =>
deserializationError("could not read InstanceId")
})
.getOrElse(deserializationError("could not read InstanceId"))
}
}
}
| akrabat/openwhisk | common/scala/src/main/scala/org/apache/openwhisk/core/entity/InstanceId.scala | Scala | apache-2.0 | 6,523 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.factories.utils
import java.util
import org.apache.flink.api.common.serialization.{DeserializationSchema, SerializationSchema}
import org.apache.flink.table.descriptors.{DescriptorProperties, FormatDescriptorValidator, SchemaValidator}
import org.apache.flink.table.factories.{DeserializationSchemaFactory, SerializationSchemaFactory, TableFormatFactoryServiceTest}
import org.apache.flink.types.Row
/**
* Table format factory for testing.
*
* It has the same context as [[TestAmbiguousTableFormatFactory]] and both support COMMON_PATH.
* This format does not support SPECIAL_PATH but supports schema derivation.
*/
class TestTableFormatFactory
extends DeserializationSchemaFactory[Row]
with SerializationSchemaFactory[Row] {
override def requiredContext(): util.Map[String, String] = {
val context = new util.HashMap[String, String]()
context.put(
FormatDescriptorValidator.FORMAT_TYPE,
TableFormatFactoryServiceTest.TEST_FORMAT_TYPE)
context.put(FormatDescriptorValidator.FORMAT_PROPERTY_VERSION, "1")
context
}
override def supportsSchemaDerivation(): Boolean = true
override def supportedProperties(): util.List[String] = {
val properties = new util.ArrayList[String]()
properties.add(TableFormatFactoryServiceTest.UNIQUE_PROPERTY)
properties.add(TableFormatFactoryServiceTest.COMMON_PATH)
properties.add(FormatDescriptorValidator.FORMAT_DERIVE_SCHEMA)
properties.addAll(SchemaValidator.getSchemaDerivationKeys)
properties
}
override def createDeserializationSchema(
properties: util.Map[String, String])
: DeserializationSchema[Row] = {
val props = new DescriptorProperties(true)
props.putProperties(properties)
val schema = SchemaValidator.deriveFormatFields(props)
new TestDeserializationSchema(schema.toRowType)
}
override def createSerializationSchema(
properties: util.Map[String, String])
: SerializationSchema[Row] = {
val props = new DescriptorProperties(true)
props.putProperties(properties)
val schema = SchemaValidator.deriveFormatFields(props)
new TestSerializationSchema(schema.toRowType)
}
}
| yew1eb/flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/factories/utils/TestTableFormatFactory.scala | Scala | apache-2.0 | 2,995 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.batch.sql
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.utils.TableTestBase
import org.apache.flink.table.utils.TableTestUtil._
import org.junit.Test
class GroupingSetsTest extends TableTestBase {
@Test
def testGroupingSets(): Unit = {
val util = batchTestUtil()
val table = util.addTable[(Int, Long, Int)]("MyTable", 'a, 'b, 'c)
val sqlQuery = "SELECT b, c, avg(a) as a, GROUP_ID() as g FROM MyTable " +
"GROUP BY GROUPING SETS (b, c)"
val aggregate = unaryNode(
"DataSetCalc",
unaryNode(
"DataSetAggregate",
batchTableNode(table),
term("groupBy", "b", "c"),
term("select", "b", "c", "AVG(a) AS a")
),
term("select", "b", "c", "a", "0:BIGINT AS g")
)
util.verifySql(sqlQuery, aggregate)
}
@Test
def testCube(): Unit = {
val util = batchTestUtil()
val table = util.addTable[(Int, Long, Int)]("MyTable", 'a, 'b, 'c)
val sqlQuery = "SELECT b, c, avg(a) as a, GROUP_ID() as g, " +
"GROUPING(b) as gb, GROUPING(c) as gc, " +
"GROUPING_ID(b) as gib, GROUPING_ID(c) as gic, " +
"GROUPING_ID(b, c) as gid " +
"FROM MyTable " +
"GROUP BY CUBE (b, c)"
val group1 = unaryNode(
"DataSetCalc",
unaryNode(
"DataSetAggregate",
batchTableNode(table),
term("groupBy", "b", "c"),
term("select", "b", "c", "AVG(a) AS a")
),
term("select", "b", "c", "a", "0:BIGINT AS g", "1:BIGINT AS gb", "1:BIGINT AS gc",
"1:BIGINT AS gib", "1:BIGINT AS gic", "3:BIGINT AS gid")
)
val group2 = unaryNode(
"DataSetCalc",
unaryNode(
"DataSetAggregate",
unaryNode(
"DataSetCalc",
batchTableNode(table),
term("select", "b", "a")
),
term("groupBy", "b"),
term("select", "b", "AVG(a) AS a")
),
term("select", "b", "null:INTEGER AS c", "a", "0:BIGINT AS g", "1:BIGINT AS gb",
"0:BIGINT AS gc", "1:BIGINT AS gib", "0:BIGINT AS gic", "2:BIGINT AS gid")
)
val group3 = unaryNode(
"DataSetCalc",
unaryNode(
"DataSetAggregate",
unaryNode(
"DataSetCalc",
batchTableNode(table),
term("select", "c", "a")
),
term("groupBy", "c"),
term("select", "c", "AVG(a) AS a")
),
term("select", "null:BIGINT AS b", "c", "a", "0:BIGINT AS g", "0:BIGINT AS gb",
"1:BIGINT AS gc", "0:BIGINT AS gib", "1:BIGINT AS gic", "1:BIGINT AS gid")
)
val group4 = unaryNode(
"DataSetCalc",
unaryNode(
"DataSetAggregate",
unaryNode(
"DataSetCalc",
batchTableNode(table),
term("select", "a")
),
term("select", "AVG(a) AS a")
),
term(
"select", "null:BIGINT AS b", "null:INTEGER AS c", "a", "0:BIGINT AS g", "0:BIGINT AS gb",
"0:BIGINT AS gc", "0:BIGINT AS gib", "0:BIGINT AS gic", "0:BIGINT AS gid")
)
val union = binaryNode(
"DataSetUnion",
binaryNode(
"DataSetUnion",
binaryNode(
"DataSetUnion",
group1,
group2,
term("all", "true"),
term("union", "b", "c", "a", "g", "gb", "gc", "gib", "gic", "gid")
),
group3,
term("all", "true"),
term("union", "b", "c", "a", "g", "gb", "gc", "gib", "gic", "gid")
),
group4,
term("all", "true"),
term("union", "b", "c", "a", "g", "gb", "gc", "gib", "gic", "gid")
)
util.verifySql(sqlQuery, union)
}
@Test
def testRollup(): Unit = {
val util = batchTestUtil()
val table = util.addTable[(Int, Long, Int)]("MyTable", 'a, 'b, 'c)
val sqlQuery = "SELECT b, c, avg(a) as a, GROUP_ID() as g, " +
"GROUPING(b) as gb, GROUPING(c) as gc, " +
"GROUPING_ID(b) as gib, GROUPING_ID(c) as gic, " +
"GROUPING_ID(b, c) as gid " + " FROM MyTable " +
"GROUP BY ROLLUP (b, c)"
val group1 = unaryNode(
"DataSetCalc",
unaryNode(
"DataSetAggregate",
batchTableNode(table),
term("groupBy", "b", "c"),
term("select", "b", "c", "AVG(a) AS a")
),
term("select", "b", "c", "a", "0:BIGINT AS g", "1:BIGINT AS gb", "1:BIGINT AS gc",
"1:BIGINT AS gib", "1:BIGINT AS gic", "3:BIGINT AS gid")
)
val group2 = unaryNode(
"DataSetCalc",
unaryNode(
"DataSetAggregate",
unaryNode(
"DataSetCalc",
batchTableNode(table),
term("select", "b", "a")
),
term("groupBy", "b"),
term("select", "b", "AVG(a) AS a")
),
term("select", "b", "null:INTEGER AS c", "a", "0:BIGINT AS g", "1:BIGINT AS gb",
"0:BIGINT AS gc", "1:BIGINT AS gib", "0:BIGINT AS gic", "2:BIGINT AS gid")
)
val group3 = unaryNode(
"DataSetCalc",
unaryNode(
"DataSetAggregate",
unaryNode(
"DataSetCalc",
batchTableNode(table),
term("select", "a")
),
term("select", "AVG(a) AS a")
),
term(
"select", "null:BIGINT AS b", "null:INTEGER AS c", "a", "0:BIGINT AS g", "0:BIGINT AS gb",
"0:BIGINT AS gc", "0:BIGINT AS gib", "0:BIGINT AS gic", "0:BIGINT AS gid")
)
val union = binaryNode(
"DataSetUnion",
binaryNode(
"DataSetUnion",
group1,
group2,
term("all", "true"),
term("union", "b", "c", "a", "g", "gb", "gc", "gib", "gic", "gid")
),
group3,
term("all", "true"),
term("union", "b", "c", "a", "g", "gb", "gc", "gib", "gic", "gid")
)
util.verifySql(sqlQuery, union)
}
}
| jinglining/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/api/batch/sql/GroupingSetsTest.scala | Scala | apache-2.0 | 6,628 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.batch.table
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.planner.plan.utils.JavaUserDefinedAggFunctions.WeightedAvgWithMerge
import org.apache.flink.table.planner.utils.TableTestBase
import org.junit.Test
import java.sql.Timestamp
class GroupWindowTest extends TableTestBase {
//===============================================================================================
// Common test
//===============================================================================================
@Test(expected = classOf[TableException])
def testEventTimeTumblingGroupWindowOverCount(): Unit = {
val util = batchTestUtil()
val table = util.addTableSource[(Long, Int, String)]('long, 'int, 'string)
val windowedTable = table
.window(Tumble over 2.rows on 'long as 'w)
.groupBy('w, 'string)
.select('string, 'int.count)
util.verifyPlan(windowedTable)
}
@Test
def testEventTimeTumblingGroupWindowOverTimeWithUdAgg(): Unit = {
val util = batchTestUtil()
val table = util.addTableSource[(Long, Int, String)]('long, 'int, 'string)
val myWeightedAvg = new WeightedAvgWithMerge
val windowedTable = table
.window(Tumble over 5.millis on 'long as 'w)
.groupBy('w, 'string)
.select('string, myWeightedAvg('long, 'int))
util.verifyPlan(windowedTable)
}
@Test
def testEventTimeTumblingGroupWindowOverTime(): Unit = {
val util = batchTestUtil()
val table = util.addTableSource[(Long, Int, String)]('long, 'int, 'string)
val windowedTable = table
.window(Tumble over 5.millis on 'long as 'w)
.groupBy('w, 'string)
.select('string, 'int.count)
util.verifyPlan(windowedTable)
}
@Test
def testAllEventTimeTumblingGroupWindowOverTime(): Unit = {
val util = batchTestUtil()
val table = util.addTableSource[(Long, Int, String)]('long, 'int, 'string)
val windowedTable = table
.window(Tumble over 5.millis on 'long as 'w)
.groupBy('w)
.select('int.count)
util.verifyPlan(windowedTable)
}
@Test(expected = classOf[TableException])
def testAllEventTimeTumblingGroupWindowOverCount(): Unit = {
val util = batchTestUtil()
val table = util.addTableSource[(Long, Int, String)]('long, 'int, 'string)
val windowedTable = table
.window(Tumble over 2.rows on 'long as 'w)
.groupBy('w)
.select('int.count)
util.verifyPlan(windowedTable)
}
@Test
def testLongEventTimeTumblingGroupWindowWithProperties(): Unit = {
val util = batchTestUtil()
val table = util.addTableSource[(Long, Int, String)]('ts, 'int, 'string)
val windowedTable = table
.window(Tumble over 2.hours on 'ts as 'w)
.groupBy('w, 'string)
.select('string, 'int.count, 'w.start, 'w.end, 'w.rowtime)
util.verifyPlan(windowedTable)
}
@Test
def testTimestampEventTimeTumblingGroupWindowWithProperties(): Unit = {
val util = batchTestUtil()
val table = util.addTableSource[(Timestamp, Int, String)]('ts, 'int, 'string)
val windowedTable = table
.window(Tumble over 2.hours on 'ts as 'w)
.groupBy('w, 'string)
.select('string, 'int.count, 'w.start, 'w.end, 'w.rowtime)
util.verifyPlan(windowedTable)
}
//===============================================================================================
// Sliding Windows
//===============================================================================================
@Test
def testEventTimeSlidingGroupWindowOverTime(): Unit = {
val util = batchTestUtil()
val table = util.addTableSource[(Long, Int, String)]('long, 'int, 'string)
val windowedTable = table
.window(Slide over 8.millis every 10.millis on 'long as 'w)
.groupBy('w, 'string)
.select('string, 'int.count)
util.verifyPlan(windowedTable)
}
@Test(expected = classOf[TableException])
def testEventTimeSlidingGroupWindowOverCount(): Unit = {
val util = batchTestUtil()
val table = util.addTableSource[(Long, Int, String)]('long, 'int, 'string)
val windowedTable = table
.window(Slide over 2.rows every 1.rows on 'long as 'w)
.groupBy('w, 'string)
.select('string, 'int.count)
util.verifyPlan(windowedTable)
}
}
| GJL/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/batch/table/GroupWindowTest.scala | Scala | apache-2.0 | 5,152 |
//https://www.hackerrank.com/contests/lambda-calculi-apr14/challenges/jumping-bunnies
import java.io._;
import scala.collection.mutable._;
object Solution {
def main(args: Array[String]) {
//Input
var N: Byte = readLine().toByte;
var J: Array[Int] = new Array[Int](N);
var S: Array[String] = readLine().split(" ");
for(i <- 0 until J.length){
J(i) = S(i).toInt;
}
//Find the common point for consecutive jumps
var nearestPoint: Map[Int, Int] = Map[Int, Int]();
for(j <- J){
var jump: Map[Int, Int] = getPrimePowers(j);
subtractCommonPowers(jump, nearestPoint);
addPowers(nearestPoint, jump);
}
//Convert from map of primes and their powers to long
var point: Long = 1;
for ((factor, power) <- nearestPoint){
point *= math.pow(factor, power).toLong;
}
//Output
println(point);
}
def addPowers(n: Map[Int, Int], q: Map[Int, Int]){
for((factor, power) <- q){
var newPower = power;
if (n.contains(factor)){
newPower += n(factor);
}
n(factor) = newPower;
}
}
def subtractCommonPowers(n: Map[Int, Int], q: Map[Int, Int]) {
for((factor, power) <- n){
if (q.contains(factor)){
val sub: Int = power - q(factor);
if (sub <= 0){
n-=(factor);
} else {
n(factor) = sub;
}
}
}
}
def getPrimePowers(input: Int) : Map[Int, Int] = {
var n: Int = input;
var map : Map[Int, Int] = Map[Int, Int]();
n = getPower(n, 2, map);
val maxFactor : Int = math.floor(math.sqrt(n)).toInt;
var factor: Int = 3;
while (n >= factor && factor <= maxFactor){
n = getPower(n, factor, map);
factor += 2;
}
if (n > 1){
map(n) = 1;
}
return map;
}
def getPower(num: Int, divisor: Int, map: Map[Int, Int]) : Int = {
var q: Int = 0;
var n: Int = num;
while (n % divisor == 0){
q += 1;
n = n / divisor;
}
if (q > 0){
map(divisor) = q;
}
return n;
}
} | hongyegong/hackerrank | contests/lambda-calculi-apr14/jumping-bunnies.scala | Scala | mit | 2,455 |
package core
import com.datastax.driver.core.{ProtocolVersion, Row}
import util.ReflectionUtil
import scala.reflect.runtime.universe._
/**
* @author instcode
*/
class Mapper[T: TypeTag] {
val columnNames = ReflectionUtil.constructorParams[T].map(_._1)
def map(r: Row)(implicit protocolVersion: ProtocolVersion): T = {
val columnDefinitions = r.getColumnDefinitions
val data = new Array[Object](columnNames.size)
for (i <- 0 to columnNames.size - 1) {
val name = columnNames(i)
data(i) = columnDefinitions.getType(i).deserialize(r.getBytesUnsafe(name), protocolVersion)
println(i + ": " + name + " ---> " + data(i))
}
ReflectionUtil.create[T](data)
}
}
| instcode/cassandra-mapper | src/main/scala/core/Mapper.scala | Scala | apache-2.0 | 703 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gearpump.akkastream.task
import java.time.Instant
import java.util.concurrent.TimeUnit
import org.apache.gearpump.Message
import org.apache.gearpump.cluster.UserConfig
import org.apache.gearpump.streaming.task.TaskContext
import scala.concurrent.duration.FiniteDuration
case object DropWithinTimeout
class DropWithinTask[T](context: TaskContext, userConf : UserConfig)
extends GraphTask(context, userConf) {
val timeout = userConf.getValue[FiniteDuration](DropWithinTask.TIMEOUT).
getOrElse(FiniteDuration(0, TimeUnit.MINUTES))
var timeoutActive = true
override def onStart(startTime: Instant): Unit = {
context.scheduleOnce(timeout)(
self ! Message(DropWithinTimeout, Instant.now())
)
}
override def onNext(msg: Message) : Unit = {
msg.value match {
case DropWithinTimeout =>
timeoutActive = false
case _ =>
}
timeoutActive match {
case true =>
case false =>
context.output(msg)
}
}
}
object DropWithinTask {
val TIMEOUT = "TIMEOUT"
}
| manuzhang/incubator-gearpump | experiments/akkastream/src/main/scala/org/apache/gearpump/akkastream/task/DropWithinTask.scala | Scala | apache-2.0 | 1,863 |
package com.karasiq.shadowcloud.config
import com.typesafe.config.Config
case class BuffersConfig(rootConfig: Config, readChunks: Long, repair: Long) extends WrappedConfig
object BuffersConfig extends WrappedConfigFactory[BuffersConfig] {
override def apply(config: Config): BuffersConfig = {
BuffersConfig(
config,
config.getMemorySize("read-chunks").toBytes,
config.getMemorySize("repair").toBytes
)
}
}
| Karasiq/shadowcloud | core/src/main/scala/com/karasiq/shadowcloud/config/BuffersConfig.scala | Scala | apache-2.0 | 439 |
package com.peterpotts.common.util
import scala.concurrent.{ExecutionContext, Future}
object FutureDecorator {
implicit class DecoratedFutureOption[T](futureOption: Future[Option[T]]) {
def mapGetOrThrow(exception: => Exception)(implicit executionContext: ExecutionContext): Future[T] =
futureOption.map(_.getOrElse(throw exception))
}
}
| peterpotts/mobius | src/main/scala/com/peterpotts/common/util/FutureDecorator.scala | Scala | mit | 356 |
package com.datastax.spark.connector.rdd.partitioner
import java.net.InetAddress
import com.datastax.spark.connector.ColumnSelector
import com.datastax.spark.connector.cql.{CassandraConnector, Schema}
import com.datastax.spark.connector.writer.RowWriterFactory
import org.apache.spark.{Partition, Partitioner}
import scala.reflect.ClassTag
import scala.collection.JavaConversions._
case class ReplicaPartition(index: Int, endpoints: Set[InetAddress]) extends EndpointPartition
/**
* The replica partitioner will work on an RDD which is keyed on sets of InetAddresses representing Cassandra
* Hosts . It will group keys which share a common IP address into partitionsPerReplicaSet Partitions.
* @param partitionsPerReplicaSet The number of Spark Partitions to make Per Unique Endpoint
*/
class ReplicaPartitioner[T](
table: String,
keyspace: String,
partitionsPerReplicaSet: Int,
partitionKeyMapper: ColumnSelector,
val connector: CassandraConnector)(
implicit
currentType: ClassTag[T],
@transient rwf: RowWriterFactory[T]) extends Partitioner {
val tableDef = Schema.tableFromCassandra(connector, keyspace, table)
val rowWriter = implicitly[RowWriterFactory[T]].rowWriter(
tableDef,
partitionKeyMapper.selectFrom(tableDef)
)
@transient lazy private val tokenGenerator = new TokenGenerator[T](connector, tableDef, rowWriter)
@transient lazy private val metadata = connector.withClusterDo(_.getMetadata)
@transient lazy private val protocolVersion = connector
.withClusterDo(_.getConfiguration.getProtocolOptions.getProtocolVersion)
@transient lazy private val clazz = implicitly[ClassTag[T]].runtimeClass
private val hosts = connector.hosts.toVector
private val hostSet = connector.hosts
private val numHosts = hosts.size
private val partitionIndexes = (0 until partitionsPerReplicaSet * numHosts)
.grouped(partitionsPerReplicaSet)
.toList
private val hostMap = (hosts zip partitionIndexes).toMap
// Ip1 -> (0,1,2,..), Ip2 -> (11,12,13...)
private val indexMap = for ((ip, partitions) <- hostMap; partition <- partitions) yield (partition, ip)
// 0->IP1, 1-> IP1, ...
private def randomHost(index: Int): InetAddress = hosts(index % hosts.length)
/**
* Given a set of endpoints, pick a random endpoint, and then a random partition owned by that
* endpoint. If the requested host doesn't exist chose another random host. Only uses valid hosts
* from the connected datacenter.
* @param key A Set[InetAddress] of replicas for this Cassandra Partition
* @return An integer between 0 and numPartitions
*/
override def getPartition(key: Any): Int = {
key match {
case key: T if clazz.isInstance(key) =>
//Only use ReplicaEndpoints in the connected DC
val token = tokenGenerator.getTokenFor(key)
val tokenHash = Math.abs(token.hashCode())
val replicas = metadata
.getReplicas(keyspace, token.serialize(protocolVersion))
.map(_.getBroadcastAddress)
val replicaSetInDC = (hostSet & replicas).toVector
if (replicaSetInDC.nonEmpty) {
val endpoint = replicaSetInDC(tokenHash % replicaSetInDC.size)
hostMap(endpoint)(tokenHash % partitionsPerReplicaSet)
} else {
hostMap(randomHost(tokenHash))(tokenHash % partitionsPerReplicaSet)
}
case _ => throw new IllegalArgumentException(
"ReplicaPartitioner can only determine the partition of a tuple whose key is a non-empty Set[InetAddress]. " +
s"Invalid key: $key")
}
}
override def numPartitions: Int = partitionsPerReplicaSet * numHosts
def getEndpointPartition(partition: Partition): ReplicaPartition = {
val endpoints = indexMap.getOrElse(partition.index,
throw new RuntimeException(s"$indexMap : Can't get an endpoint for Partition $partition.index"))
new ReplicaPartition(index = partition.index, endpoints = Set(endpoints))
}
}
| ponkin/spark-cassandra-connector | spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/rdd/partitioner/ReplicaPartitioner.scala | Scala | apache-2.0 | 3,949 |
package scala.collection
import scala.language.higherKinds
/**
* Trait that overrides operations on sequences in order
* to take advantage of strict builders.
*/
trait StrictOptimizedSeqOps [+A, +CC[_], +C]
extends Any
with SeqOps[A, CC, C]
with StrictOptimizedIterableOps[A, CC, C] {
override def distinctBy[B](f: A => B): C = {
val builder = newSpecificBuilder()
val seen = mutable.HashSet.empty[B]
for (x <- this) {
val y = f(x)
if (!seen.contains(y)) {
seen += y
builder += x
}
}
builder.result()
}
override def prepended[B >: A](elem: B): CC[B] = {
val b = iterableFactory.newBuilder[B]()
if (knownSize >= 0) {
b.sizeHint(size + 1)
}
b += elem
b ++= this
b.result()
}
override def appended[B >: A](elem: B): CC[B] = {
val b = iterableFactory.newBuilder[B]()
if (knownSize >= 0) {
b.sizeHint(size + 1)
}
b ++= this
b += elem
b.result()
}
override def appendedAll[B >: A](suffix: Iterable[B]): CC[B] = {
val b = iterableFactory.newBuilder[B]()
b ++= this
b ++= suffix
b.result()
}
override def prependedAll[B >: A](prefix: Iterable[B]): CC[B] = {
val b = iterableFactory.newBuilder[B]()
b ++= prefix
b ++= this
b.result()
}
override def padTo[B >: A](len: Int, elem: B): CC[B] = {
val b = iterableFactory.newBuilder[B]()
val L = size
b.sizeHint(math.max(L, len))
var diff = len - L
b ++= this
while (diff > 0) {
b += elem
diff -= 1
}
b.result()
}
}
| rorygraves/perf_tester | corpus/scala-library/src/main/scala/collection/StrictOptimizedSeqOps.scala | Scala | apache-2.0 | 1,592 |
package root
/**
* Created by adixith on 5/20/15.
*/
object ActionReturnValues {
val serverError:Integer = -1
val oneBitcoinSold:Integer = 1
val bitcoinBought:Integer = 2
val takingNoAction:Integer = 3
val multipleBitcoinsSold:Integer = 4
}
| anoopdixith/SmartBCT | src/main/scala/ActionReturnValues.scala | Scala | mit | 263 |
package com.twitter.finagle.loadbalancer
/**
* The behavior the load balancer should take when none
* of its nodes have a [[com.twitter.finagle.Status]] of
* [[com.twitter.finagle.Status.Open Open]].
*
* The default behavior is [[WhenNoNodesOpen.PickOne]]
* and can be customized on a client through [[LoadBalancerFactory.WhenNoNodesOpenParam]]:
* {{{
* import com.twitter.finagle.loadbalancer.LoadBalancerFactory.WhenNoNodesOpenParam
* import com.twitter.finagle.loadbalancer.WhenNoNodesOpen
* import com.twitter.finagle.Http
*
* Http.client
* .configured(WhenNoNodesOpenParam(WhenNoNodesOpen.FailFast))
* }}}
*
* @see the [[https://twitter.github.io/finagle/guide/Clients.html#behavior-when-no-nodes-are-available user guide]].
* @see `WhenNoNodesOpens` for Java friendly API.
*/
sealed trait WhenNoNodesOpen
object WhenNoNodesOpen {
/**
* Picks one node, usually at random. This is an optimistic
* decision that the balancer's view of the nodes may be out-of-date.
*
* This is the default load balancer behavior.
*
* For a Java friendly API, use `WhenNoNodesOpens.PICK_ONE`.
*
* @see [[WhenNoNodesOpen.FailFast]] for a more conservative approach.
*/
case object PickOne extends WhenNoNodesOpen
/**
* Fail the request with a [[NoNodesOpenException]].
*
* For a Java friendly API, use `WhenNoNodesOpens.FAIL_FAST`.
*
* @see [[WhenNoNodesOpen.PickOne]] for a more optimistic approach.
*/
case object FailFast extends WhenNoNodesOpen
}
| luciferous/finagle | finagle-core/src/main/scala/com/twitter/finagle/loadbalancer/WhenNoNodesOpen.scala | Scala | apache-2.0 | 1,512 |
package au.com.dius.pact.consumer
import java.io.{File, PrintWriter}
import au.com.dius.pact.model.Pact
import au.com.dius.pact.model.Pact.{MergeConflict, MergeSuccess}
import au.com.dius.pact.com.typesafe.scalalogging.StrictLogging
/**
* Globally accumulates Pacts, merges by destination file, and allows writing to File.
*
* This must be mutable, since there is otherwise no way to thread the state through
* whatever testing framework is in use.
*
* Ideally writing would happen only at the end of the full test suite, but it may be necessary
* to write each time, and synchronise on disk, such that the file read and write can not be done concurrently
* with another running test.
*
* This code has a way to go before it is fit for purpose.
*/
object PactGenerator {
def defaultFilename(pact: Pact): String =
s"${pact.consumer.name}-${pact.provider.name}.json"
def destinationFileForPact(pact: Pact): File = destinationFile(defaultFilename(pact))
def destinationFile(filename: String): File = new File(s"${PactConsumerConfig.pactRootDir}/$filename")
def merge(pact: Pact): PactGenerator = synchronized {
pactGen = pactGen merge pact
pactGen
}
private var pactGen = new PactGenerator(Map(), Nil)
}
case class PactGenerator(pacts: Map[String, Pact], conflicts: List[MergeConflict]) extends StrictLogging {
import PactGenerator._
def failed: Boolean = conflicts.nonEmpty
def isEmpty: Boolean = pacts.isEmpty
def merge(pact: Pact): PactGenerator = {
val pactFileName = defaultFilename(pact)
val existingPact = pacts get pactFileName
def directlyAddPact(p: Pact) =
PactGenerator(pacts + (pactFileName -> p), conflicts)
existingPact.fold(directlyAddPact(pact)) { existing =>
pact.merge(existing) match {
case MergeSuccess(merged) => directlyAddPact(merged)
case c @ MergeConflict(_) => PactGenerator(pacts, c :: conflicts)
}
}
}
def writeAllToFile(): Unit = {
def createPactRootDir(): Unit =
new File(PactConsumerConfig.pactRootDir).mkdirs()
def writeToFile(pact: Pact, filename: String): Unit = {
val file = destinationFileForPact(pact)
logger.debug(s"Writing pact ${pact.consumer.name} -> ${pact.provider.name} to file $file")
val writer = new PrintWriter(file)
try pact.sortInteractions.serialize(writer)
finally writer.close()
}
require(!isEmpty, "Cannot write to file; no pacts have been recorded")
require(!failed, "The following merge conflicts occurred: \\n" + conflicts.mkString("\\n - "))
createPactRootDir()
pacts foreach {
case (filename, pact) => writeToFile(pact, filename)
}
}
}
| sangohan/pact-jvm | pact-jvm-consumer/src/main/scala/au/com/dius/pact/consumer/PactGenerator.scala | Scala | apache-2.0 | 2,719 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.convert2.transforms
trait TransformerFunctionFactory {
def functions: Seq[TransformerFunction]
}
| aheyne/geomesa | geomesa-convert/geomesa-convert-common/src/main/scala/org/locationtech/geomesa/convert2/transforms/TransformerFunctionFactory.scala | Scala | apache-2.0 | 598 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.util.Properties
import scala.collection.JavaConverters._
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.internal.Logging
import org.apache.spark.Partition
import org.apache.spark.annotation.InterfaceStability
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.json.{CreateJacksonParser, JacksonParser, JSONOptions}
import org.apache.spark.sql.execution.LogicalRDD
import org.apache.spark.sql.execution.datasources.DataSource
import org.apache.spark.sql.execution.datasources.jdbc._
import org.apache.spark.sql.execution.datasources.json.InferSchema
import org.apache.spark.sql.types.StructType
import org.apache.spark.unsafe.types.UTF8String
/**
* Interface used to load a [[Dataset]] from external storage systems (e.g. file systems,
* key-value stores, etc). Use `SparkSession.read` to access this.
*
* @since 1.4.0
*/
@InterfaceStability.Stable
class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging {
/**
* Specifies the input data source format.
*
* @since 1.4.0
*/
def format(source: String): DataFrameReader = {
this.source = source
this
}
/**
* Specifies the input schema. Some data sources (e.g. JSON) can infer the input schema
* automatically from data. By specifying the schema here, the underlying data source can
* skip the schema inference step, and thus speed up data loading.
*
* @since 1.4.0
*/
def schema(schema: StructType): DataFrameReader = {
this.userSpecifiedSchema = Option(schema)
this
}
/**
* Adds an input option for the underlying data source.
*
* @since 1.4.0
*/
def option(key: String, value: String): DataFrameReader = {
this.extraOptions += (key -> value)
this
}
/**
* Adds an input option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: Boolean): DataFrameReader = option(key, value.toString)
/**
* Adds an input option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: Long): DataFrameReader = option(key, value.toString)
/**
* Adds an input option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: Double): DataFrameReader = option(key, value.toString)
/**
* (Scala-specific) Adds input options for the underlying data source.
*
* @since 1.4.0
*/
def options(options: scala.collection.Map[String, String]): DataFrameReader = {
this.extraOptions ++= options
this
}
/**
* Adds input options for the underlying data source.
*
* @since 1.4.0
*/
def options(options: java.util.Map[String, String]): DataFrameReader = {
this.options(options.asScala)
this
}
/**
* Loads input in as a `DataFrame`, for data sources that don't require a path (e.g. external
* key-value stores).
*
* @since 1.4.0
*/
def load(): DataFrame = {
load(Seq.empty: _*) // force invocation of `load(...varargs...)`
}
/**
* Loads input in as a `DataFrame`, for data sources that require a path (e.g. data backed by
* a local or distributed file system).
*
* @since 1.4.0
*/
def load(path: String): DataFrame = {
option("path", path).load(Seq.empty: _*) // force invocation of `load(...varargs...)`
}
/**
* Loads input in as a `DataFrame`, for data sources that support multiple paths.
* Only works if the source is a HadoopFsRelationProvider.
*
* @since 1.6.0
*/
@scala.annotation.varargs
def load(paths: String*): DataFrame = {
sparkSession.baseRelationToDataFrame(
DataSource.apply(
sparkSession,
paths = paths,
userSpecifiedSchema = userSpecifiedSchema,
className = source,
options = extraOptions.toMap).resolveRelation())
}
/**
* Construct a `DataFrame` representing the database table accessible via JDBC URL
* url named table and connection properties.
*
* @since 1.4.0
*/
def jdbc(url: String, table: String, properties: Properties): DataFrame = {
// properties should override settings in extraOptions.
this.extraOptions = this.extraOptions ++ properties.asScala
// explicit url and dbtable should override all
this.extraOptions += (JDBCOptions.JDBC_URL -> url, JDBCOptions.JDBC_TABLE_NAME -> table)
format("jdbc").load()
}
/**
* Construct a `DataFrame` representing the database table accessible via JDBC URL
* url named table. Partitions of the table will be retrieved in parallel based on the parameters
* passed to this function.
*
* Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash
* your external database systems.
*
* @param url JDBC database url of the form `jdbc:subprotocol:subname`.
* @param table Name of the table in the external database.
* @param columnName the name of a column of integral type that will be used for partitioning.
* @param lowerBound the minimum value of `columnName` used to decide partition stride.
* @param upperBound the maximum value of `columnName` used to decide partition stride.
* @param numPartitions the number of partitions. This, along with `lowerBound` (inclusive),
* `upperBound` (exclusive), form partition strides for generated WHERE
* clause expressions used to split the column `columnName` evenly. When
* the input is less than 1, the number is set to 1.
* @param connectionProperties JDBC database connection arguments, a list of arbitrary string
* tag/value. Normally at least a "user" and "password" property
* should be included. "fetchsize" can be used to control the
* number of rows per fetch.
* @since 1.4.0
*/
def jdbc(
url: String,
table: String,
columnName: String,
lowerBound: Long,
upperBound: Long,
numPartitions: Int,
connectionProperties: Properties): DataFrame = {
// columnName, lowerBound, upperBound and numPartitions override settings in extraOptions.
this.extraOptions ++= Map(
JDBCOptions.JDBC_PARTITION_COLUMN -> columnName,
JDBCOptions.JDBC_LOWER_BOUND -> lowerBound.toString,
JDBCOptions.JDBC_UPPER_BOUND -> upperBound.toString,
JDBCOptions.JDBC_NUM_PARTITIONS -> numPartitions.toString)
jdbc(url, table, connectionProperties)
}
/**
* Construct a `DataFrame` representing the database table accessible via JDBC URL
* url named table using connection properties. The `predicates` parameter gives a list
* expressions suitable for inclusion in WHERE clauses; each one defines one partition
* of the `DataFrame`.
*
* Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash
* your external database systems.
*
* @param url JDBC database url of the form `jdbc:subprotocol:subname`
* @param table Name of the table in the external database.
* @param predicates Condition in the where clause for each partition.
* @param connectionProperties JDBC database connection arguments, a list of arbitrary string
* tag/value. Normally at least a "user" and "password" property
* should be included. "fetchsize" can be used to control the
* number of rows per fetch.
* @since 1.4.0
*/
def jdbc(
url: String,
table: String,
predicates: Array[String],
connectionProperties: Properties): DataFrame = {
// connectionProperties should override settings in extraOptions.
val params = extraOptions.toMap ++ connectionProperties.asScala.toMap
val options = new JDBCOptions(url, table, params)
val parts: Array[Partition] = predicates.zipWithIndex.map { case (part, i) =>
JDBCPartition(part, i) : Partition
}
val relation = JDBCRelation(parts, options)(sparkSession)
sparkSession.baseRelationToDataFrame(relation)
}
/**
* Loads a JSON file (<a href="http://jsonlines.org/">JSON Lines text format or
* newline-delimited JSON</a>) and returns the result as a `DataFrame`.
* See the documentation on the overloaded `json()` method with varargs for more details.
*
* @since 1.4.0
*/
def json(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
json(Seq(path): _*)
}
/**
* Loads a JSON file and returns the results as a `DataFrame`.
*
* Both JSON (one record per file) and <a href="http://jsonlines.org/">JSON Lines</a>
* (newline-delimited JSON) are supported and can be selected with the `wholeFile` option.
*
* This function goes through the input once to determine the input schema. If you know the
* schema in advance, use the version that specifies the schema to avoid the extra scan.
*
* You can set the following JSON-specific options to deal with non-standard JSON files:
* <ul>
* <li>`primitivesAsString` (default `false`): infers all primitive values as a string type</li>
* <li>`prefersDecimal` (default `false`): infers all floating-point values as a decimal
* type. If the values do not fit in decimal, then it infers them as doubles.</li>
* <li>`allowComments` (default `false`): ignores Java/C++ style comment in JSON records</li>
* <li>`allowUnquotedFieldNames` (default `false`): allows unquoted JSON field names</li>
* <li>`allowSingleQuotes` (default `true`): allows single quotes in addition to double quotes
* </li>
* <li>`allowNumericLeadingZeros` (default `false`): allows leading zeros in numbers
* (e.g. 00012)</li>
* <li>`allowBackslashEscapingAnyCharacter` (default `false`): allows accepting quoting of all
* character using backslash quoting mechanism</li>
* <li>`mode` (default `PERMISSIVE`): allows a mode for dealing with corrupt records
* during parsing.
* <ul>
* <li>`PERMISSIVE` : sets other fields to `null` when it meets a corrupted record, and puts
* the malformed string into a new field configured by `columnNameOfCorruptRecord`. When
* a schema is set by user, it sets `null` for extra fields.</li>
* <li>`DROPMALFORMED` : ignores the whole corrupted records.</li>
* <li>`FAILFAST` : throws an exception when it meets corrupted records.</li>
* </ul>
* </li>
* <li>`columnNameOfCorruptRecord` (default is the value specified in
* `spark.sql.columnNameOfCorruptRecord`): allows renaming the new field having malformed string
* created by `PERMISSIVE` mode. This overrides `spark.sql.columnNameOfCorruptRecord`.</li>
* <li>`dateFormat` (default `yyyy-MM-dd`): sets the string that indicates a date format.
* Custom date formats follow the formats at `java.text.SimpleDateFormat`. This applies to
* date type.</li>
* <li>`timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss.SSSZZ`): sets the string that
* indicates a timestamp format. Custom date formats follow the formats at
* `java.text.SimpleDateFormat`. This applies to timestamp type.</li>
* <li>`timeZone` (default session local timezone): sets the string that indicates a timezone
* to be used to parse timestamps.</li>
* <li>`wholeFile` (default `false`): parse one record, which may span multiple lines,
* per file</li>
* </ul>
*
* @since 2.0.0
*/
@scala.annotation.varargs
def json(paths: String*): DataFrame = format("json").load(paths : _*)
/**
* Loads a `JavaRDD[String]` storing JSON objects (<a href="http://jsonlines.org/">JSON
* Lines text format or newline-delimited JSON</a>) and returns the result as
* a `DataFrame`.
*
* Unless the schema is specified using [[schema]] function, this function goes through the
* input once to determine the input schema.
*
* @param jsonRDD input RDD with one JSON object per record
* @since 1.4.0
*/
def json(jsonRDD: JavaRDD[String]): DataFrame = json(jsonRDD.rdd)
/**
* Loads an `RDD[String]` storing JSON objects (<a href="http://jsonlines.org/">JSON Lines
* text format or newline-delimited JSON</a>) and returns the result as a `DataFrame`.
*
* Unless the schema is specified using [[schema]] function, this function goes through the
* input once to determine the input schema.
*
* @param jsonRDD input RDD with one JSON object per record
* @since 1.4.0
*/
def json(jsonRDD: RDD[String]): DataFrame = {
val parsedOptions = new JSONOptions(
extraOptions.toMap,
sparkSession.sessionState.conf.columnNameOfCorruptRecord)
val createParser = CreateJacksonParser.string _
val schema = userSpecifiedSchema.getOrElse {
InferSchema.infer(
jsonRDD,
parsedOptions,
createParser)
}
val parsed = jsonRDD.mapPartitions { iter =>
val parser = new JacksonParser(schema, parsedOptions)
iter.flatMap(parser.parse(_, createParser, UTF8String.fromString))
}
Dataset.ofRows(
sparkSession,
LogicalRDD(schema.toAttributes, parsed)(sparkSession))
}
/**
* Loads a CSV file and returns the result as a `DataFrame`. See the documentation on the
* other overloaded `csv()` method for more details.
*
* @since 2.0.0
*/
def csv(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
csv(Seq(path): _*)
}
/**
* Loads a CSV file and returns the result as a `DataFrame`.
*
* This function will go through the input once to determine the input schema if `inferSchema`
* is enabled. To avoid going through the entire data once, disable `inferSchema` option or
* specify the schema explicitly using [[schema]].
*
* You can set the following CSV-specific options to deal with CSV files:
* <ul>
* <li>`sep` (default `,`): sets the single character as a separator for each
* field and value.</li>
* <li>`encoding` (default `UTF-8`): decodes the CSV files by the given encoding
* type.</li>
* <li>`quote` (default `"`): sets the single character used for escaping quoted values where
* the separator can be part of the value. If you would like to turn off quotations, you need to
* set not `null` but an empty string. This behaviour is different from
* `com.databricks.spark.csv`.</li>
* <li>`escape` (default `\\`): sets the single character used for escaping quotes inside
* an already quoted value.</li>
* <li>`comment` (default empty string): sets the single character used for skipping lines
* beginning with this character. By default, it is disabled.</li>
* <li>`header` (default `false`): uses the first line as names of columns.</li>
* <li>`inferSchema` (default `false`): infers the input schema automatically from data. It
* requires one extra pass over the data.</li>
* <li>`ignoreLeadingWhiteSpace` (default `false`): defines whether or not leading whitespaces
* from values being read should be skipped.</li>
* <li>`ignoreTrailingWhiteSpace` (default `false`): defines whether or not trailing
* whitespaces from values being read should be skipped.</li>
* <li>`nullValue` (default empty string): sets the string representation of a null value. Since
* 2.0.1, this applies to all supported types including the string type.</li>
* <li>`nanValue` (default `NaN`): sets the string representation of a non-number" value.</li>
* <li>`positiveInf` (default `Inf`): sets the string representation of a positive infinity
* value.</li>
* <li>`negativeInf` (default `-Inf`): sets the string representation of a negative infinity
* value.</li>
* <li>`dateFormat` (default `yyyy-MM-dd`): sets the string that indicates a date format.
* Custom date formats follow the formats at `java.text.SimpleDateFormat`. This applies to
* date type.</li>
* <li>`timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss.SSSZZ`): sets the string that
* indicates a timestamp format. Custom date formats follow the formats at
* `java.text.SimpleDateFormat`. This applies to timestamp type.</li>
* <li>`maxColumns` (default `20480`): defines a hard limit of how many columns
* a record can have.</li>
* <li>`maxCharsPerColumn` (default `-1`): defines the maximum number of characters allowed
* for any given value being read. By default, it is -1 meaning unlimited length</li>
* <li>`maxMalformedLogPerPartition` (default `10`): sets the maximum number of malformed rows
* Spark will log for each partition. Malformed records beyond this number will be ignored.</li>
* <li>`mode` (default `PERMISSIVE`): allows a mode for dealing with corrupt records
* during parsing.
* <ul>
* <li>`PERMISSIVE` : sets other fields to `null` when it meets a corrupted record. When
* a schema is set by user, it sets `null` for extra fields.</li>
* <li>`DROPMALFORMED` : ignores the whole corrupted records.</li>
* <li>`FAILFAST` : throws an exception when it meets corrupted records.</li>
* </ul>
* </li>
* </ul>
* @since 2.0.0
*/
@scala.annotation.varargs
def csv(paths: String*): DataFrame = format("csv").load(paths : _*)
/**
* Loads a Parquet file, returning the result as a `DataFrame`. See the documentation
* on the other overloaded `parquet()` method for more details.
*
* @since 2.0.0
*/
def parquet(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
parquet(Seq(path): _*)
}
/**
* Loads a Parquet file, returning the result as a `DataFrame`.
*
* You can set the following Parquet-specific option(s) for reading Parquet files:
* <ul>
* <li>`mergeSchema` (default is the value specified in `spark.sql.parquet.mergeSchema`): sets
* whether we should merge schemas collected from all Parquet part-files. This will override
* `spark.sql.parquet.mergeSchema`.</li>
* </ul>
* @since 1.4.0
*/
@scala.annotation.varargs
def parquet(paths: String*): DataFrame = {
format("parquet").load(paths: _*)
}
/**
* Loads an ORC file and returns the result as a `DataFrame`.
*
* @param path input path
* @since 1.5.0
* @note Currently, this method can only be used after enabling Hive support.
*/
def orc(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
orc(Seq(path): _*)
}
/**
* Loads an ORC file and returns the result as a `DataFrame`.
*
* @param paths input paths
* @since 2.0.0
* @note Currently, this method can only be used after enabling Hive support.
*/
@scala.annotation.varargs
def orc(paths: String*): DataFrame = format("orc").load(paths: _*)
/**
* Returns the specified table as a `DataFrame`.
*
* @since 1.4.0
*/
def table(tableName: String): DataFrame = {
Dataset.ofRows(sparkSession,
sparkSession.sessionState.catalog.lookupRelation(
sparkSession.sessionState.sqlParser.parseTableIdentifier(tableName)))
}
/**
* Loads text files and returns a `DataFrame` whose schema starts with a string column named
* "value", and followed by partitioned columns if there are any. See the documentation on
* the other overloaded `text()` method for more details.
*
* @since 2.0.0
*/
def text(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
text(Seq(path): _*)
}
/**
* Loads text files and returns a `DataFrame` whose schema starts with a string column named
* "value", and followed by partitioned columns if there are any.
*
* Each line in the text files is a new row in the resulting DataFrame. For example:
* {{{
* // Scala:
* spark.read.text("/path/to/spark/README.md")
*
* // Java:
* spark.read().text("/path/to/spark/README.md")
* }}}
*
* @param paths input paths
* @since 1.6.0
*/
@scala.annotation.varargs
def text(paths: String*): DataFrame = format("text").load(paths : _*)
/**
* Loads text files and returns a [[Dataset]] of String. See the documentation on the
* other overloaded `textFile()` method for more details.
* @since 2.0.0
*/
def textFile(path: String): Dataset[String] = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
textFile(Seq(path): _*)
}
/**
* Loads text files and returns a [[Dataset]] of String. The underlying schema of the Dataset
* contains a single string column named "value".
*
* If the directory structure of the text files contains partitioning information, those are
* ignored in the resulting Dataset. To include partitioning information as columns, use `text`.
*
* Each line in the text files is a new element in the resulting Dataset. For example:
* {{{
* // Scala:
* spark.read.textFile("/path/to/spark/README.md")
*
* // Java:
* spark.read().textFile("/path/to/spark/README.md")
* }}}
*
* @param paths input path
* @since 2.0.0
*/
@scala.annotation.varargs
def textFile(paths: String*): Dataset[String] = {
if (userSpecifiedSchema.nonEmpty) {
throw new AnalysisException("User specified schema not supported with `textFile`")
}
text(paths : _*).select("value").as[String](sparkSession.implicits.newStringEncoder)
}
///////////////////////////////////////////////////////////////////////////////////////
// Builder pattern config options
///////////////////////////////////////////////////////////////////////////////////////
private var source: String = sparkSession.sessionState.conf.defaultDataSourceName
private var userSpecifiedSchema: Option[StructType] = None
private var extraOptions = new scala.collection.mutable.HashMap[String, String]
}
| SnappyDataInc/spark | sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala | Scala | apache-2.0 | 22,846 |
package org.improving.scalify
import Scalify._
import org.eclipse.jdt.core._
import org.eclipse.jdt.core.dom
// import scalaz.OptionW._
import org.eclipse.jdt.core.dom.Modifier.ModifierKeyword._
import scala.collection.mutable.HashMap
// Programming in Scala p129:
//
// In Scala, every auxiliary constructor must invoke another constructor of
// the same class as its first action. In other words, the first statement in every
// auxiliary constructor in every Scala class will have the form “this(. . . ).”
// The invoked constructor is either the primary constructor (as in the Rational
// example), or another auxiliary constructor that comes textually before the
// calling constructor. The net effect of this rule is that every constructor invo-
// cation in Scala will end up eventually calling the primary constructor of the
// class. The primary constructor is thus the single point of entry of a class.
//
// So: only the primary constructor can call the superclass constructor.
// This means that though there are gyrations that could be done to accomodate
// multiple distinct independent constructors, it's impossible to preserve
// semantics if different supers are called in a built-in class. So for
// simplicity, anytime a class has 2 or more indpendent constructors, we switch
// to using factory methods and place each primary constructor in its own class.
abstract class TypeDeclaration(override val node: dom.TypeDeclaration) extends AbstractTypeDeclaration(node)
with HasTypes
{
lazy val TypeDeclaration(_, _, interface, _, typeParams, superType, superIntTypes, bodyDecls) = node
override def toString: String =
name.toString + (if (superType.isDefined) " (extends " + superType.get + ")" else "")
// separate into static/instance fields/methods, plus constructors/non and types
lazy val fields: List[dom.FieldDeclaration] = node.getFields.toList
lazy val methods: List[dom.MethodDeclaration] = node.getMethods.toList
lazy val types: List[dom.TypeDeclaration] = node.getTypes.toList
lazy val stds = types
lazy val inits: List[dom.Initializer] = bodyDecls.flatMap { case x: dom.Initializer => List(x) ; case _ => Nil }
// what's left should be (?) EnumDeclaration, EnumConstantDeclaration, AnnotationTypeDeclaration
// and AnnotationTypeMemberDeclaration
lazy val otherBody: List[dom.BodyDeclaration] = bodyDecls --
(fields ::: methods ::: types ::: inits).map(_.asInstanceOf[dom.BodyDeclaration])
lazy val allDeclaredIdentifiers: List[NamedDecl] =
(fields.flatMap(_.allFragments).map(_.snode) ::: methods.map(_.snode) ::: types.map(_.snode)) flatMap {
case x: NamedDecl => List(x) ; case _ => Nil
}
// split everything into static and instance
lazy val (sfields, ifields) = fields.partition(_.isStatic)
lazy val (smethods, imethods) = methods.partition(_.isStatic)
lazy val (stypes, itypes) = types.partition(_.isStatic)
lazy val (sinits, iinits) = inits.partition(_.isStatic)
// independent means they don't call this(...) in the original java
lazy val constructors: List[Constructor] = imethods.filter(_.isConstructor).map(_.snode.asInstanceOf[Constructor])
lazy val nonConstructors = imethods.filter(!_.isConstructor)
lazy val independentConstructors: List[IndependentConstructor] =
constructors flatMap { case x: IndependentConstructor => List(x) ; case _ => Nil }
lazy val dependentConstructors: List[DependentConstructor] =
PosetOps.tsort(constructors)
. flatMap { case x: DependentConstructor => List(x) ; case _ => Nil }
lazy val isEverCreated = Global.lookupCreation(tb.getKey)
// abstract
def emitInstancePart: Emission
// umbrella methods
override def emitDirect: Emission = emitInstancePart ~ emitStaticPart ~ emitMainProxy ~ NL
override def allFragments = fields.flatMap(_.allFragments)
// customized in the subclasses
def emitTypeParameters: Emission = TYPEARGS(typeParams)
def emitStaticPart: Emission =
if (isStaticPartEmpty) Nil
else NL ~ OBJECT ~ name ~ BRACES(emitStaticsImport(false) ~ REP(sfields) ~ REP(sinits) ~ REP(smethods) ~ REP(stypes))
// this means emit the "extends SuperClass(a, b)" expression for THIS class
// overridden in subclasses
def emitSuperExpr: Emission = superType match {
case None => Nil
case Some(x) => EXTENDS ~ x.emitExprWhenSuper(None)
}
// given a particular supercall pointed at this type, returns correct emission
// the level of complication here is due to factory types having variable names for the superclass
def emitTypeNameWhenSuper: Emission = emitTypeNameWhenSuper(None)
def emitTypeNameWhenSuper(sc: Option[dom.SuperConstructorInvocation]): Emission =
INVOKE(ROOTPKG, INVOKE(emitString(pkgName), name))
def findAllSupertypesWithSelf: List[IType] = itype.get :: findAllSupertypes
def findAllSupertypes: List[IType] = {
val xs: Option[Array[IType]] = for (it <- itype) yield hierarchy.getAllSuperclasses(it)
if (xs.isEmpty || xs.get == null) Nil else xs.get.toList
}
// traits and different sorts of classes have much in common
def emitSTDHeader(typeArgs: Emission, conArgs: Emission): Emission =
emitSTDHeader(typeArgs, conArgs, superType.map(x => EXTENDS ~ x.emit) getOrElse Nil)
def emitSTDHeader(typeArgs: Emission, conArgs: Emission, superArgs: Emission): Emission = {
emitFactoryImports ~
emitModifierList ~
emitAbstract ~
emitClassType ~ name ~
typeArgs ~ conArgs ~ superArgs ~
emitMixins(superIntTypes)
}
def emitSTDBody(instancePart: Emission): Emission =
BRACES(
emitStaticsImport(true) ~
emitListNL(ifields) ~
emitListNL(iinits) ~
emitListNL(itypes) ~
instancePart
)
// given a list of ASTNodes representing superclasses and/or interfaces, assembles into scala form
def emitMixins(xs: List[ASTNode]): Emission = {
def withList(ints: List[ASTNode]): Emission = REP(ints, NL ~ WITH ~ _)
if (xs.isEmpty) Nil
else if (superType.isEmpty) EXTENDS ~ xs.head ~ withList(xs.tail)
else withList(xs)
}
def emitStaticsImport(includeSelf: Boolean): Emission = {
def allSupertypes = if (includeSelf) findAllSupertypesWithSelf else findAllSupertypes
// if we are an inner class inheriting from our enclosing class, we need to avoid double importing
val typeList: List[IType] =
if (tb.isTopLevel) allSupertypes
else {
val etype = findEnclosingType.flatMap(_.itype) | abort("error")
val okTypes = allSupertypes.takeWhile(t => t.getKey != etype.getKey)
if (okTypes.isEmpty) List(itype.get) else okTypes
}
val emits = typeList
. takeWhile(!_.isBinary)
. map(it => if (it.hasStaticMembers) cu.emitImportDeclaration(it.getFullyQualifiedName('.'), true) else Nil)
. filter(_ != Nil)
emitListNL(emits)
}
// main
def findMainMethod: Option[dom.MethodDeclaration] = smethods.find(_.isMainMethod)
def hasMainProxy: Boolean = findMainMethod.isDefined && !isInstancePartEmpty
def mainProxyName: String = origName + "Main" // XXX
def emitMainProxy: Emission =
if (hasMainProxy) {
NL ~ OBJECT ~ emitString(mainProxyName) ~ BRACES(
DEF ~ Emit("main") ~ PARENS(Emit("args") <~> COLON ~ ARRAY <~> BRACKETS(Emit("String"))) ~
BRACES(INVOKE(name, Emit("main")) <~> PARENS(Emit("args")))
)
} else Nil
def isInstancePartEmpty: Boolean =
if (isEverCreated) false
else ifields.isEmpty && imethods.isEmpty && iinits.isEmpty && itypes.isEmpty && !isAbstract
def isStaticPartEmpty: Boolean = sfields.isEmpty && smethods.isEmpty && sinits.isEmpty && stypes.isEmpty
}
| mbana/scalify | src/main/ast/STD.scala | Scala | isc | 7,526 |
package web
import java.util.concurrent.TimeUnit
import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props}
import akka.stream.Materializer
import akka.util.Timeout
import dataset.DatasetActor.Active
import dataset.DsInfo
import play.api.libs.json.Json
import play.api.libs.streams.ActorFlow
import play.api.mvc.{Controller, WebSocket}
import scala.concurrent.ExecutionContext
class WebSocketController(implicit actorSystem: ActorSystem,
mat: Materializer,
ec: ExecutionContext)
extends Controller {
implicit val timeout = Timeout(500, TimeUnit.MILLISECONDS)
object DatasetSocketActor {
def props(out: ActorRef) = Props(new DatasetSocketActor(out))
}
class DatasetSocketActor(val out: ActorRef) extends Actor with ActorLogging {
def receive = {
case active: Active => out ! Json.stringify(Json.toJson(active))
case idle: DsInfo => out ! Json.stringify(Json.toJson(idle))
case fromClient: String => log.debug(s"Message from browser: $fromClient")
}
}
def dataset: WebSocket = WebSocket.accept[String, String] { request =>
ActorFlow.actorRef(out => DatasetSocketActor.props(out))
}
}
| delving/narthex | app/web/WebSocketController.scala | Scala | apache-2.0 | 1,204 |
package com.karasiq.shadowcloud.index.utils
import com.karasiq.shadowcloud.model.File
import com.karasiq.shadowcloud.utils.MergeUtil.SplitDecider
final case class FolderDecider(files: SplitDecider[File], folders: SplitDecider[String])
object FolderDecider {
val createWins = FolderDecider(SplitDecider.keepLeft, SplitDecider.keepLeft)
val deleteWins = FolderDecider(SplitDecider.keepRight, SplitDecider.keepRight)
val mutualExclude = FolderDecider(SplitDecider.dropDuplicates, SplitDecider.dropDuplicates)
}
| Karasiq/shadowcloud | model/src/main/scala/com/karasiq/shadowcloud/index/utils/FolderDecider.scala | Scala | apache-2.0 | 519 |
/*
* Copyright 2014 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.core.index
import org.apache.accumulo.core.security.ColumnVisibility
import org.geotools.factory.Hints
import org.junit.runner.RunWith
import org.locationtech.geomesa.core.data.DEFAULT_ENCODING
import org.locationtech.geomesa.core.data.tables.{AttributeIndexRow, AttributeTable}
import org.locationtech.geomesa.feature.AvroSimpleFeatureFactory
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes._
import org.locationtech.geomesa.utils.text.WKTUtils
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConversions._
import scala.util.Success
@RunWith(classOf[JUnitRunner])
class AttributeTableTest extends Specification {
val sftName = "mutableType"
val spec = s"name:String:$OPT_INDEX=true,age:Integer:$OPT_INDEX=true,*geom:Geometry:srid=4326,dtg:Date:$OPT_INDEX=true"
val sft = SimpleFeatureTypes.createType(sftName, spec)
sft.getUserData.put(SF_PROPERTY_START_TIME, "dtg")
"AttributeTable" should {
"encode mutations for attribute index" in {
val descriptors = (0 until sft.getAttributeCount).zip(sft.getAttributeDescriptors).toSeq
val feature = AvroSimpleFeatureFactory.buildAvroFeature(sft, List(), "id1")
val geom = WKTUtils.read("POINT(45.0 49.0)")
feature.setDefaultGeometry(geom)
feature.setAttribute("name","fred")
feature.setAttribute("age",50.asInstanceOf[Any])
feature.getUserData()(Hints.USE_PROVIDED_FID) = java.lang.Boolean.TRUE
val mutations = AttributeTable.getAttributeIndexMutations(feature, DEFAULT_ENCODING, descriptors, new ColumnVisibility(), "")
mutations.size mustEqual descriptors.length
mutations.map(_.getUpdates.size()) must contain(beEqualTo(1)).foreach
mutations.map(_.getUpdates.get(0).isDeleted) must contain(beEqualTo(false)).foreach
}
"encode mutations for delete attribute index" in {
val descriptors = (0 until sft.getAttributeCount).zip(sft.getAttributeDescriptors)
val feature = AvroSimpleFeatureFactory.buildAvroFeature(sft, List(), "id1")
val geom = WKTUtils.read("POINT(45.0 49.0)")
feature.setDefaultGeometry(geom)
feature.setAttribute("name","fred")
feature.setAttribute("age",50.asInstanceOf[Any])
feature.getUserData()(Hints.USE_PROVIDED_FID) = java.lang.Boolean.TRUE
val mutations = AttributeTable.getAttributeIndexMutations(feature, DEFAULT_ENCODING, descriptors, new ColumnVisibility(), "", delete = true)
mutations.size mustEqual descriptors.length
mutations.map(_.getUpdates.size()) must contain(beEqualTo(1)).foreach
mutations.map(_.getUpdates.get(0).isDeleted) must contain(beEqualTo(true)).foreach
}
"decode attribute index rows" in {
val row = AttributeTable.getAttributeIndexRows("prefix", sft.getDescriptor("age"), Some(23)).head
val decoded = AttributeTable.decodeAttributeIndexRow("prefix", sft.getDescriptor("age"), row)
decoded mustEqual(Success(AttributeIndexRow("age", 23)))
}
}
}
| kevinwheeler/geomesa | geomesa-core/src/test/scala/org/locationtech/geomesa/core/index/AttributeTableTest.scala | Scala | apache-2.0 | 3,789 |
package de.leanovate.swaggercheck.generators
import org.scalacheck.Prop.forAllNoShrink
import org.scalacheck.Properties
object GenRegexMatchSpecification extends Properties("GenRegexMatch") {
property("Any match") = checkRegex(".*")
property("Email like match") = checkRegex("[a-zA-Z0-9\\\\.]+@[a-z]+\\\\.[a-z]+")
property("Strict email match") = checkRegex("^[-a-z0-9~!$%^&*_=+}{\\\\'?]+(\\\\.[-a-z0-9~!$%^&*_=+}{\\\\'?]+)*@([a-z0-9_][-a-z0-9_]*(\\\\.[-a-z0-9_]+)*\\\\.(aero|arpa|biz|com|coop|edu|gov|info|int|mil|museum|name|net|org|pro|travel|mobi|[a-z][a-z])|([0-9]{1,3}\\\\.[0-9]{1,3}\\\\.[0-9]{1,3}\\\\.[0-9]{1,3}))(:[0-9]{1,5})?$")
property("UUID like match") = checkRegex("[0-9a-f]{8}(\\\\-[0-9a-f]{4}){3}\\\\-[0-9a-f]{12}")
property("URL like match") = checkRegex("(https?|ftp)://[^\\\\s/$\\\\.?#].[^\\\\s]*")
property("Escapes") = checkRegex("\\\\d\\\\D\\\\s\\\\S\\\\w\\\\W")
property("Strange 1") = checkRegex("[1-v5P-d sv-wO-jdLaEIG-a4-duK4-fj-rt-yh1-s;M8EV-rE-w,:\\\\&\\\\&]+[oR2];?")
property("Unicode 1") = checkRegex("(^[a-zA-ZàÀâÂäÄáÁéÅåÉèÈêÊëËìÌîÎïÏòÒôÔöÖøØùÙûÛüÜçÇñœŒæÆíóúÍÓÚĄąĆćĘꣳŃńŚśŻżŹź]'?[- a-zA-ZàÀâÂäÄáÁéÅåÉèÈêÊëËìÌîÎïÏòÒôÔöÖøØùÙûÛüÜçÇñœŒæÆßíóúÍÓÚĄąĆćĘꣳŃńŚśŻżŹź]+$)")
property("Unicode 2") = checkRegex("(^[a-zA-ZàÀâÂäÄáÁåÅéÉèÈêÊëËìÌîÎïÏòÒôÔöÖøØùÙûÛüÜçÇñœŒæÆíóúÍÓÚĄąĆćĘꣳŃńŚśŻżŹź]'?[-,;()' 0-9a-zA-ZàÀâÂäÄáÁéÅåÉèÈêÊëËìÌîÎïÏòÒôÔöÖøØùÙûÛüÜçÇñœŒæÆßíóúÍÓÚĄąĆćĘꣳŃńŚśŻżŹź]+$)")
property("toplevel") = checkRegex("^ALL$|^20[0-9]{2}$")
property("Any regex") = forAllNoShrink(Generators.regex) {
regex =>
checkRegex(regex)
}
def checkRegex(regex: String) = forAllNoShrink(Generators.regexMatch(regex)) {
(str: String) =>
val matches = regex.r.findFirstIn(str)
matches.contains(str)
}
}
| leanovate/swagger-check | json-schema-gen/src/test/scala/de/leanovate/swaggercheck/generators/GenRegexMatchSpecification.scala | Scala | mit | 2,003 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.index.legacy
import org.locationtech.geomesa.accumulo.data.AccumuloWritableFeature
import org.locationtech.geomesa.accumulo.index.AccumuloJoinIndex
import org.locationtech.geomesa.index.api.ShardStrategy.AttributeShardStrategy
import org.locationtech.geomesa.index.api.{RowKeyValue, WritableFeature}
import org.locationtech.geomesa.index.geotools.GeoMesaDataStore
import org.locationtech.geomesa.index.index.attribute.legacy.AttributeIndexV6
import org.locationtech.geomesa.index.index.attribute.legacy.AttributeIndexV6.AttributeIndexKeySpaceV6
import org.locationtech.geomesa.index.index.attribute.legacy.AttributeIndexV7.AttributeIndexKeySpaceV7
import org.locationtech.geomesa.index.index.attribute.{AttributeIndexKey, AttributeIndexKeySpace}
import org.locationtech.geomesa.utils.index.IndexMode.IndexMode
import org.opengis.feature.simple.SimpleFeatureType
class JoinIndexV6(ds: GeoMesaDataStore[_],
sft: SimpleFeatureType,
attribute: String,
secondaries: Seq[String],
mode: IndexMode)
extends AttributeIndexV6(ds, sft, attribute, secondaries, mode) with AccumuloJoinIndex {
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
override val keySpace: AttributeIndexKeySpace = {
val sharding = AttributeShardStrategy(sft)
if (sharding.shards.nonEmpty) {
// if sharding, we need to swap the shard bytes with the idx bytes
new AttributeIndexKeySpaceV6(sft, sft.getTableSharingBytes, sharding, attribute) {
override def toIndexKey(writable: WritableFeature,
tier: Array[Byte],
id: Array[Byte],
lenient: Boolean): RowKeyValue[AttributeIndexKey] = {
val kv = super.toIndexKey(writable, tier, id, lenient)
kv.copy(values = writable.asInstanceOf[AccumuloWritableFeature].indexValues)
}
}
} else {
// otherwise we can skip the swap and use the parent class
new AttributeIndexKeySpaceV7(sft, sft.getTableSharingBytes, sharding, attribute) {
override def toIndexKey(writable: WritableFeature,
tier: Array[Byte],
id: Array[Byte],
lenient: Boolean): RowKeyValue[AttributeIndexKey] = {
val kv = super.toIndexKey(writable, tier, id, lenient)
kv.copy(values = writable.asInstanceOf[AccumuloWritableFeature].indexValues)
}
}
}
}
}
| elahrvivaz/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/index/legacy/JoinIndexV6.scala | Scala | apache-2.0 | 3,066 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.play.filters
import play.api.mvc.{Filter, RequestHeader, Result}
import play.mvc.Http.HeaderNames
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
/**
* This filter adds Cache-Control: no-cache,no-store,max-age=0 headers
* to any responses that do not already have a Cache-Control header.
*/
object DefaultToNoCacheFilter extends Filter with MicroserviceFilterSupport {
def apply(next: (RequestHeader) => Future[Result])(rh: RequestHeader): Future[Result] = {
next(rh).map { r =>
r.header.headers.get(HeaderNames.CACHE_CONTROL) match {
case Some(_) => r
case _ => r.withHeaders(CommonHeaders.NoCacheHeader)
}
}
}
}
| cjwebb/play-filters | src/main/scala/uk/gov/hmrc/play/filters/DefaultToNoCacheFilter.scala | Scala | apache-2.0 | 1,330 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.rdd
import java.util
import java.util.concurrent._
import scala.collection.JavaConverters._
import scala.collection.mutable.ListBuffer
import scala.util.Random
import scala.util.control.Breaks._
import org.apache.hadoop.conf.{Configurable, Configuration}
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat, FileSplit}
import org.apache.spark.{util => _, _}
import org.apache.spark.sql.{CarbonEnv, DataFrame, SQLContext}
import org.apache.spark.sql.execution.command.{AlterTableModel, CompactionCallableModel, CompactionModel, Partitioner}
import org.apache.spark.sql.hive.{DistributionUtil}
import org.apache.spark.util.{FileUtils, SplitUtils}
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.carbon.{CarbonDataLoadSchema, CarbonTableIdentifier}
import org.apache.carbondata.core.carbon.datastore.block.{Distributable, TableBlockInfo}
import org.apache.carbondata.core.carbon.metadata.CarbonMetadata
import org.apache.carbondata.core.carbon.metadata.schema.table.CarbonTable
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.load.{BlockDetails, LoadMetadataDetails}
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.integration.spark.merger.{CarbonCompactionUtil, CompactionCallable, CompactionType}
import org.apache.carbondata.lcm.locks.{CarbonLockFactory, CarbonLockUtil, ICarbonLock, LockUsage}
import org.apache.carbondata.lcm.status.SegmentStatusManager
import org.apache.carbondata.processing.etl.DataLoadingException
import org.apache.carbondata.spark._
import org.apache.carbondata.spark.load._
import org.apache.carbondata.spark.merger.CarbonDataMergerUtil
import org.apache.carbondata.spark.splits.TableSplit
import org.apache.carbondata.spark.util.{CarbonQueryUtil, LoadMetadataUtil}
/**
* This is the factory class which can create different RDD depends on user needs.
*
*/
object CarbonDataRDDFactory extends Logging {
val logger = LogServiceFactory.getLogService(CarbonDataRDDFactory.getClass.getName)
def mergeCarbonData(
sqlContext: SQLContext,
carbonLoadModel: CarbonLoadModel,
storeLocation: String,
hdfsStoreLocation: String,
partitioner: Partitioner) {
val table = CarbonMetadata.getInstance()
.getCarbonTable(carbonLoadModel.getDatabaseName + "_" + carbonLoadModel.getTableName)
val metaDataPath: String = table.getMetaDataFilepath
}
def deleteLoadByDate(
sqlContext: SQLContext,
schema: CarbonDataLoadSchema,
databaseName: String,
tableName: String,
hdfsStoreLocation: String,
dateField: String,
dateFieldActualName: String,
dateValue: String,
partitioner: Partitioner) {
val sc = sqlContext
// Delete the records based on data
val table = org.apache.carbondata.core.carbon.metadata.CarbonMetadata.getInstance
.getCarbonTable(databaseName + "_" + tableName)
val segmentStatusManager = new SegmentStatusManager(table.getAbsoluteTableIdentifier)
val loadMetadataDetailsArray =
segmentStatusManager.readLoadMetadata(table.getMetaDataFilepath()).toList
val resultMap = new CarbonDeleteLoadByDateRDD(
sc.sparkContext,
new DeletedLoadResultImpl(),
databaseName,
table.getDatabaseName,
dateField,
dateFieldActualName,
dateValue,
partitioner,
table.getFactTableName,
tableName,
hdfsStoreLocation,
loadMetadataDetailsArray).collect.groupBy(_._1)
var updatedLoadMetadataDetailsList = new ListBuffer[LoadMetadataDetails]()
if (resultMap.nonEmpty) {
if (resultMap.size == 1) {
if (resultMap.contains("")) {
logError("Delete by Date request is failed")
sys.error("Delete by Date request is failed, potential causes " +
"Empty store or Invalid column type, For more details please refer logs.")
}
}
val updatedloadMetadataDetails = loadMetadataDetailsArray.map { elem => {
var statusList = resultMap.get(elem.getLoadName)
// check for the merged load folder.
if (statusList.isEmpty && null != elem.getMergedLoadName) {
statusList = resultMap.get(elem.getMergedLoadName)
}
if (statusList.isDefined) {
elem.setModificationOrdeletionTimesStamp(CarbonLoaderUtil.readCurrentTime())
// if atleast on CarbonCommonConstants.MARKED_FOR_UPDATE status exist,
// use MARKED_FOR_UPDATE
if (statusList.get
.forall(status => status._2 == CarbonCommonConstants.MARKED_FOR_DELETE)) {
elem.setLoadStatus(CarbonCommonConstants.MARKED_FOR_DELETE)
} else {
elem.setLoadStatus(CarbonCommonConstants.MARKED_FOR_UPDATE)
updatedLoadMetadataDetailsList += elem
}
elem
} else {
elem
}
}
}
// Save the load metadata
val carbonLock = CarbonLockFactory
.getCarbonLockObj(table.getAbsoluteTableIdentifier.getCarbonTableIdentifier,
LockUsage.METADATA_LOCK
)
try {
if (carbonLock.lockWithRetries()) {
logInfo("Successfully got the table metadata file lock")
if (updatedLoadMetadataDetailsList.nonEmpty) {
// TODO: Load Aggregate tables after retention.
}
// write
CarbonLoaderUtil.writeLoadMetadata(
schema,
databaseName,
table.getDatabaseName,
updatedloadMetadataDetails.asJava
)
}
} finally {
if (carbonLock.unlock()) {
logInfo("unlock the table metadata file successfully")
} else {
logError("Unable to unlock the metadata lock")
}
}
} else {
logError("Delete by Date request is failed")
logger.audit(s"The delete load by date is failed for $databaseName.$tableName")
sys.error("Delete by Date request is failed, potential causes " +
"Empty store or Invalid column type, For more details please refer logs.")
}
}
def configSplitMaxSize(context: SparkContext, filePaths: String,
hadoopConfiguration: Configuration): Unit = {
val defaultParallelism = if (context.defaultParallelism < 1) 1 else context.defaultParallelism
val spaceConsumed = FileUtils.getSpaceOccupied(filePaths)
val blockSize =
hadoopConfiguration.getLongBytes("dfs.blocksize", CarbonCommonConstants.CARBON_256MB)
logInfo("[Block Distribution]")
// calculate new block size to allow use all the parallelism
if (spaceConsumed < defaultParallelism * blockSize) {
var newSplitSize: Long = spaceConsumed / defaultParallelism
if (newSplitSize < CarbonCommonConstants.CARBON_16MB) {
newSplitSize = CarbonCommonConstants.CARBON_16MB
}
hadoopConfiguration.set(FileInputFormat.SPLIT_MAXSIZE, newSplitSize.toString)
logInfo("totalInputSpaceConsumed : " + spaceConsumed +
" , defaultParallelism : " + defaultParallelism)
logInfo("mapreduce.input.fileinputformat.split.maxsize : " + newSplitSize.toString)
}
}
def alterTableForCompaction(sqlContext: SQLContext,
alterTableModel: AlterTableModel,
carbonLoadModel: CarbonLoadModel, partitioner: Partitioner, hdfsStoreLocation: String,
kettleHomePath: String, storeLocation: String): Unit = {
var compactionSize: Long = 0
var compactionType: CompactionType = CompactionType.MINOR_COMPACTION
if (alterTableModel.compactionType.equalsIgnoreCase("major")) {
compactionSize = CarbonDataMergerUtil.getCompactionSize(CompactionType.MAJOR_COMPACTION)
compactionType = CompactionType.MAJOR_COMPACTION
}
else {
compactionType = CompactionType.MINOR_COMPACTION
}
logger
.audit(s"Compaction request received for table " +
s"${carbonLoadModel.getDatabaseName}.${carbonLoadModel.getTableName}"
)
val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
val tableCreationTime = CarbonEnv.getInstance(sqlContext).carbonCatalog
.getTableCreationTime(carbonLoadModel.getDatabaseName, carbonLoadModel.getTableName)
if (null == carbonLoadModel.getLoadMetadataDetails) {
readLoadMetadataDetails(carbonLoadModel, hdfsStoreLocation)
}
// reading the start time of data load.
val loadStartTime = CarbonLoaderUtil.readCurrentTime()
carbonLoadModel.setFactTimeStamp(loadStartTime)
val isCompactionTriggerByDDl = true
val compactionModel = CompactionModel(compactionSize,
compactionType,
carbonTable,
tableCreationTime,
isCompactionTriggerByDDl
)
val isConcurrentCompactionAllowed = CarbonProperties.getInstance()
.getProperty(CarbonCommonConstants.ENABLE_CONCURRENT_COMPACTION,
CarbonCommonConstants.DEFAULT_ENABLE_CONCURRENT_COMPACTION
)
.equalsIgnoreCase("true")
// if system level compaction is enabled then only one compaction can run in the system
// if any other request comes at this time then it will create a compaction request file.
// so that this will be taken up by the compaction process which is executing.
if (!isConcurrentCompactionAllowed) {
logger
.info("System level compaction lock is enabled."
)
handleCompactionForSystemLocking(sqlContext,
carbonLoadModel,
partitioner,
hdfsStoreLocation,
kettleHomePath,
storeLocation,
compactionType,
carbonTable,
compactionModel
)
}
else {
// normal flow of compaction
val lock = CarbonLockFactory
.getCarbonLockObj(carbonTable.getAbsoluteTableIdentifier.getCarbonTableIdentifier,
LockUsage.COMPACTION_LOCK
)
if (lock.lockWithRetries()) {
logger
.info("Acquired the compaction lock for table " + carbonLoadModel
.getDatabaseName + "." + carbonLoadModel.getTableName
)
try {
startCompactionThreads(sqlContext,
carbonLoadModel,
partitioner,
hdfsStoreLocation,
kettleHomePath,
storeLocation,
compactionModel,
lock
)
}
catch {
case e : Exception =>
logger.error("Exception in start compaction thread. " + e.getMessage)
lock.unlock()
}
}
else {
logger
.audit("Not able to acquire the compaction lock for table " +
s"${carbonLoadModel.getDatabaseName}.${carbonLoadModel.getTableName}"
)
logger
.error("Not able to acquire the compaction lock for table " + carbonLoadModel
.getDatabaseName + "." + carbonLoadModel.getTableName
)
sys.error("Table is already locked for compaction. Please try after some time.")
}
}
}
def handleCompactionForSystemLocking(sqlContext: SQLContext,
carbonLoadModel: CarbonLoadModel,
partitioner: Partitioner,
hdfsStoreLocation: String,
kettleHomePath: String,
storeLocation: String,
compactionType: CompactionType,
carbonTable: CarbonTable,
compactionModel: CompactionModel): Unit = {
val lock = CarbonLockFactory
.getCarbonLockObj(CarbonCommonConstants.SYSTEM_LEVEL_COMPACTION_LOCK_FOLDER,
LockUsage.SYSTEMLEVEL_COMPACTION_LOCK
)
if (lock.lockWithRetries()) {
logger
.info("Acquired the compaction lock for table " + carbonLoadModel
.getDatabaseName + "." + carbonLoadModel.getTableName
)
try {
startCompactionThreads(sqlContext,
carbonLoadModel,
partitioner,
hdfsStoreLocation,
kettleHomePath,
storeLocation,
compactionModel,
lock
)
}
catch {
case e : Exception =>
logger.error("Exception in start compaction thread. " + e.getMessage)
lock.unlock()
// if the compaction is a blocking call then only need to throw the exception.
if (compactionModel.isDDLTrigger) {
throw e
}
}
}
else {
logger
.audit("Not able to acquire the system level compaction lock for table " +
s"${carbonLoadModel.getDatabaseName}.${carbonLoadModel.getTableName}"
)
logger
.error("Not able to acquire the compaction lock for table " + carbonLoadModel
.getDatabaseName + "." + carbonLoadModel.getTableName
)
CarbonCompactionUtil
.createCompactionRequiredFile(carbonTable.getMetaDataFilepath, compactionType)
// do sys error only in case of DDL trigger.
if(compactionModel.isDDLTrigger) {
sys.error("Compaction is in progress, compaction request for table " + carbonLoadModel
.getDatabaseName + "." + carbonLoadModel.getTableName + " is in queue.")
}
else {
logger
.error("Compaction is in progress, compaction request for table " + carbonLoadModel
.getDatabaseName + "." + carbonLoadModel.getTableName + " is in queue."
)
}
}
}
def executeCompaction(carbonLoadModel: CarbonLoadModel,
hdfsStoreLocation: String,
compactionModel: CompactionModel,
partitioner: Partitioner,
executor: ExecutorService,
sqlContext: SQLContext,
kettleHomePath: String,
storeLocation: String): Unit = {
val sortedSegments: util.List[LoadMetadataDetails] = new util.ArrayList[LoadMetadataDetails](
carbonLoadModel.getLoadMetadataDetails
)
CarbonDataMergerUtil.sortSegments(sortedSegments)
var segList = carbonLoadModel.getLoadMetadataDetails
var loadsToMerge = CarbonDataMergerUtil.identifySegmentsToBeMerged(
hdfsStoreLocation,
carbonLoadModel,
partitioner.partitionCount,
compactionModel.compactionSize,
segList,
compactionModel.compactionType
)
while (loadsToMerge.size() > 1) {
val lastSegment = sortedSegments.get(sortedSegments.size() - 1)
deletePartialLoadsInCompaction(carbonLoadModel)
val futureList: util.List[Future[Void]] = new util.ArrayList[Future[Void]](
CarbonCommonConstants
.DEFAULT_COLLECTION_SIZE
)
scanSegmentsAndSubmitJob(futureList,
loadsToMerge,
executor,
hdfsStoreLocation,
sqlContext,
compactionModel,
kettleHomePath,
carbonLoadModel,
partitioner,
storeLocation
)
try {
futureList.asScala.foreach(future => {
future.get
}
)
}
catch {
case e: Exception =>
logger.error("Exception in compaction thread " + e.getMessage)
throw e
}
// scan again and determine if anything is there to merge again.
readLoadMetadataDetails(carbonLoadModel, hdfsStoreLocation)
segList = carbonLoadModel.getLoadMetadataDetails
// in case of major compaction we will scan only once and come out as it will keep
// on doing major for the new loads also.
// excluding the newly added segments.
if (compactionModel.compactionType == CompactionType.MAJOR_COMPACTION) {
segList = CarbonDataMergerUtil
.filterOutNewlyAddedSegments(carbonLoadModel.getLoadMetadataDetails, lastSegment)
}
loadsToMerge = CarbonDataMergerUtil.identifySegmentsToBeMerged(
hdfsStoreLocation,
carbonLoadModel,
partitioner.partitionCount,
compactionModel.compactionSize,
segList,
compactionModel.compactionType
)
}
}
/**
* This will submit the loads to be merged into the executor.
*
* @param futureList
*/
def scanSegmentsAndSubmitJob(futureList: util.List[Future[Void]],
loadsToMerge: util
.List[LoadMetadataDetails],
executor: ExecutorService,
hdfsStoreLocation: String,
sqlContext: SQLContext,
compactionModel: CompactionModel,
kettleHomePath: String,
carbonLoadModel: CarbonLoadModel,
partitioner: Partitioner,
storeLocation: String): Unit = {
loadsToMerge.asScala.foreach(seg => {
logger.info("loads identified for merge is " + seg.getLoadName)
}
)
val compactionCallableModel = CompactionCallableModel(hdfsStoreLocation,
carbonLoadModel,
partitioner,
storeLocation,
compactionModel.carbonTable,
kettleHomePath,
compactionModel.tableCreationTime,
loadsToMerge,
sqlContext,
compactionModel.compactionType
)
val future: Future[Void] = executor
.submit(new CompactionCallable(compactionCallableModel
)
)
futureList.add(future)
}
def startCompactionThreads(sqlContext: SQLContext,
carbonLoadModel: CarbonLoadModel,
partitioner: Partitioner,
hdfsStoreLocation: String,
kettleHomePath: String,
storeLocation: String,
compactionModel: CompactionModel,
compactionLock: ICarbonLock): Unit = {
val executor: ExecutorService = Executors.newFixedThreadPool(1)
// update the updated table status.
readLoadMetadataDetails(carbonLoadModel, hdfsStoreLocation)
var segList: util.List[LoadMetadataDetails] = carbonLoadModel.getLoadMetadataDetails
// clean up of the stale segments.
try {
CarbonLoaderUtil.deletePartialLoadDataIfExist(carbonLoadModel, true)
}
catch {
case e: Exception =>
logger
.error("Exception in compaction thread while clean up of stale segments " + e
.getMessage
)
}
val compactionThread = new Thread {
override def run(): Unit = {
try {
// compaction status of the table which is triggered by the user.
var triggeredCompactionStatus = false
var exception : Exception = null
try {
executeCompaction(carbonLoadModel: CarbonLoadModel,
hdfsStoreLocation: String,
compactionModel: CompactionModel,
partitioner: Partitioner,
executor, sqlContext, kettleHomePath, storeLocation
)
triggeredCompactionStatus = true
}
catch {
case e: Exception =>
logger.error("Exception in compaction thread " + e.getMessage)
exception = e
}
// continue in case of exception also, check for all the tables.
val isConcurrentCompactionAllowed = CarbonProperties.getInstance()
.getProperty(CarbonCommonConstants.ENABLE_CONCURRENT_COMPACTION,
CarbonCommonConstants.DEFAULT_ENABLE_CONCURRENT_COMPACTION
).equalsIgnoreCase("true")
if (!isConcurrentCompactionAllowed) {
logger.info("System level compaction lock is enabled.")
val skipCompactionTables = ListBuffer[CarbonTableIdentifier]()
var tableForCompaction = CarbonCompactionUtil
.getNextTableToCompact(CarbonEnv.getInstance(sqlContext).carbonCatalog.metadata
.tablesMeta.toArray, skipCompactionTables.toList.asJava
)
while (null != tableForCompaction) {
logger
.info("Compaction request has been identified for table " + tableForCompaction
.carbonTable.getDatabaseName + "." + tableForCompaction.carbonTableIdentifier
.getTableName
)
val table: CarbonTable = tableForCompaction.carbonTable
val metadataPath = table.getMetaDataFilepath
val compactionType = CarbonCompactionUtil.determineCompactionType(metadataPath)
val newCarbonLoadModel = new CarbonLoadModel()
prepareCarbonLoadModel(hdfsStoreLocation, table, newCarbonLoadModel)
val tableCreationTime = CarbonEnv.getInstance(sqlContext).carbonCatalog
.getTableCreationTime(newCarbonLoadModel.getDatabaseName,
newCarbonLoadModel.getTableName
)
val compactionSize = CarbonDataMergerUtil
.getCompactionSize(CompactionType.MAJOR_COMPACTION)
val newcompactionModel = CompactionModel(compactionSize,
compactionType,
table,
tableCreationTime,
compactionModel.isDDLTrigger
)
// proceed for compaction
try {
executeCompaction(newCarbonLoadModel,
newCarbonLoadModel.getStorePath,
newcompactionModel,
partitioner,
executor, sqlContext, kettleHomePath, storeLocation
)
}
catch {
case e: Exception =>
logger.error("Exception in compaction thread for table " + tableForCompaction
.carbonTable.getDatabaseName + "." +
tableForCompaction.carbonTableIdentifier
.getTableName)
// not handling the exception. only logging as this is not the table triggered
// by user.
}
finally {
// delete the compaction required file in case of failure or success also.
if (!CarbonCompactionUtil
.deleteCompactionRequiredFile(metadataPath, compactionType)) {
// if the compaction request file is not been able to delete then
// add those tables details to the skip list so that it wont be considered next.
skipCompactionTables.+=:(tableForCompaction.carbonTableIdentifier)
logger
.error("Compaction request file can not be deleted for table " +
tableForCompaction
.carbonTable.getDatabaseName + "." + tableForCompaction
.carbonTableIdentifier
.getTableName
)
}
}
// ********* check again for all the tables.
tableForCompaction = CarbonCompactionUtil
.getNextTableToCompact(CarbonEnv.getInstance(sqlContext).carbonCatalog.metadata
.tablesMeta.toArray, skipCompactionTables.asJava
)
}
// giving the user his error for telling in the beeline if his triggered table
// compaction is failed.
if (!triggeredCompactionStatus) {
throw new Exception("Exception in compaction " + exception.getMessage)
}
}
}
finally {
executor.shutdownNow()
deletePartialLoadsInCompaction(carbonLoadModel)
compactionLock.unlock()
}
}
}
// calling the run method of a thread to make the call as blocking call.
// in the future we may make this as concurrent.
compactionThread.run()
}
def prepareCarbonLoadModel(hdfsStoreLocation: String,
table: CarbonTable,
newCarbonLoadModel: CarbonLoadModel): Unit = {
newCarbonLoadModel.setAggTables(table.getAggregateTablesName.asScala.toArray)
newCarbonLoadModel.setTableName(table.getFactTableName)
val dataLoadSchema = new CarbonDataLoadSchema(table)
// Need to fill dimension relation
newCarbonLoadModel.setCarbonDataLoadSchema(dataLoadSchema)
newCarbonLoadModel.setTableName(table.getCarbonTableIdentifier.getTableName)
newCarbonLoadModel.setDatabaseName(table.getCarbonTableIdentifier.getDatabaseName)
newCarbonLoadModel.setStorePath(table.getStorePath)
readLoadMetadataDetails(newCarbonLoadModel, hdfsStoreLocation)
val loadStartTime = CarbonLoaderUtil.readCurrentTime()
newCarbonLoadModel.setFactTimeStamp(loadStartTime)
}
def deletePartialLoadsInCompaction(carbonLoadModel: CarbonLoadModel): Unit = {
// Deleting the any partially loaded data if present.
// in some case the segment folder which is present in store will not have entry in
// status.
// so deleting those folders.
try {
CarbonLoaderUtil.deletePartialLoadDataIfExist(carbonLoadModel, true)
}
catch {
case e: Exception =>
logger
.error("Exception in compaction thread while clean up of stale segments " + e
.getMessage
)
}
}
def loadCarbonData(sqlContext: SQLContext,
carbonLoadModel: CarbonLoadModel,
storeLocation: String,
hdfsStoreLocation: String,
kettleHomePath: String,
partitioner: Partitioner,
columinar: Boolean,
isAgg: Boolean,
partitionStatus: String = CarbonCommonConstants.STORE_LOADSTATUS_SUCCESS,
dataFrame: Option[DataFrame] = None): Unit = {
val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
// for handling of the segment Merging.
def handleSegmentMerging(tableCreationTime: Long): Unit = {
logger
.info("compaction need status is " + CarbonDataMergerUtil.checkIfAutoLoadMergingRequired())
if (CarbonDataMergerUtil.checkIfAutoLoadMergingRequired()) {
logger
.audit("Compaction request received for table " + carbonLoadModel
.getDatabaseName + "." + carbonLoadModel.getTableName
)
val compactionSize = 0
val isCompactionTriggerByDDl = false
val compactionModel = CompactionModel(compactionSize,
CompactionType.MINOR_COMPACTION,
carbonTable,
tableCreationTime,
isCompactionTriggerByDDl
)
var storeLocation = ""
val configuredStore = CarbonLoaderUtil.getConfiguredLocalDirs(SparkEnv.get.conf)
if (null != configuredStore && configuredStore.nonEmpty) {
storeLocation = configuredStore(Random.nextInt(configuredStore.length))
}
if (storeLocation == null) {
storeLocation = System.getProperty("java.io.tmpdir")
}
storeLocation = storeLocation + "/carbonstore/" + System.nanoTime()
val isConcurrentCompactionAllowed = CarbonProperties.getInstance()
.getProperty(CarbonCommonConstants.ENABLE_CONCURRENT_COMPACTION,
CarbonCommonConstants.DEFAULT_ENABLE_CONCURRENT_COMPACTION
)
.equalsIgnoreCase("true")
if (!isConcurrentCompactionAllowed) {
handleCompactionForSystemLocking(sqlContext,
carbonLoadModel,
partitioner,
hdfsStoreLocation,
kettleHomePath,
storeLocation,
CompactionType.MINOR_COMPACTION,
carbonTable,
compactionModel
)
}
else {
val lock = CarbonLockFactory
.getCarbonLockObj(carbonTable.getAbsoluteTableIdentifier.getCarbonTableIdentifier,
LockUsage.COMPACTION_LOCK
)
if (lock.lockWithRetries()) {
logger.info("Acquired the compaction lock.")
try {
startCompactionThreads(sqlContext,
carbonLoadModel,
partitioner,
hdfsStoreLocation,
kettleHomePath,
storeLocation,
compactionModel,
lock
)
}
catch {
case e : Exception =>
logger.error("Exception in start compaction thread. " + e.getMessage)
lock.unlock()
throw e
}
}
else {
logger
.audit("Not able to acquire the compaction lock for table " + carbonLoadModel
.getDatabaseName + "." + carbonLoadModel.getTableName
)
logger
.error("Not able to acquire the compaction lock for table " + carbonLoadModel
.getDatabaseName + "." + carbonLoadModel.getTableName
)
}
}
}
}
try {
logger
.audit("Data load request has been received for table " + carbonLoadModel
.getDatabaseName + "." + carbonLoadModel.getTableName
)
// Check if any load need to be deleted before loading new data
deleteLoadsAndUpdateMetadata(carbonLoadModel, carbonTable, partitioner, hdfsStoreLocation,
isForceDeletion = false)
if (null == carbonLoadModel.getLoadMetadataDetails) {
readLoadMetadataDetails(carbonLoadModel, hdfsStoreLocation)
}
var currentLoadCount = -1
val convLoadDetails = carbonLoadModel.getLoadMetadataDetails.asScala
// taking the latest segment ID present.
// so that any other segments above this will be deleted.
if (convLoadDetails.nonEmpty) {
convLoadDetails.foreach { l =>
var loadCount = 0
breakable {
try {
loadCount = Integer.parseInt(l.getLoadName)
} catch {
case e: NumberFormatException => // case of merge folder. ignore it.
break
}
if (currentLoadCount < loadCount) {
currentLoadCount = loadCount
}
}
}
}
currentLoadCount += 1
// Deleting the any partially loaded data if present.
// in some case the segment folder which is present in store will not have entry in status.
// so deleting those folders.
try {
CarbonLoaderUtil.deletePartialLoadDataIfExist(carbonLoadModel, false)
}
catch {
case e: Exception =>
logger
.error("Exception in data load while clean up of stale segments " + e
.getMessage
)
}
// reading the start time of data load.
val loadStartTime = CarbonLoaderUtil.readCurrentTime()
carbonLoadModel.setFactTimeStamp(loadStartTime)
val tableCreationTime = CarbonEnv.getInstance(sqlContext).carbonCatalog
.getTableCreationTime(carbonLoadModel.getDatabaseName, carbonLoadModel.getTableName)
val schemaLastUpdatedTime = CarbonEnv.getInstance(sqlContext).carbonCatalog
.getSchemaLastUpdatedTime(carbonLoadModel.getDatabaseName, carbonLoadModel.getTableName)
// get partition way from configuration
// val isTableSplitPartition = CarbonProperties.getInstance().getProperty(
// CarbonCommonConstants.TABLE_SPLIT_PARTITION,
// CarbonCommonConstants.TABLE_SPLIT_PARTITION_DEFAULT_VALUE).toBoolean
val isTableSplitPartition = false
var blocksGroupBy: Array[(String, Array[BlockDetails])] = null
var status: Array[(String, LoadMetadataDetails)] = null
def loadDataFile(): Unit = { isTableSplitPartition match {
case true =>
/*
* when data handle by table split partition
* 1) get partition files, direct load or not will get the different files path
* 2) get files blocks by using SplitUtils
* 3) output Array[(partitionID,Array[BlockDetails])] to blocksGroupBy
*/
var splits = Array[TableSplit]()
if (carbonLoadModel.isDirectLoad) {
// get all table Splits, this part means files were divide to different partitions
splits = CarbonQueryUtil.getTableSplitsForDirectLoad(carbonLoadModel.getFactFilePath,
partitioner.nodeList, partitioner.partitionCount
)
// get all partition blocks from file list
blocksGroupBy = splits.map {
split =>
val pathBuilder = new StringBuilder()
for (path <- split.getPartition.getFilesPath.asScala) {
pathBuilder.append(path).append(",")
}
if (pathBuilder.nonEmpty) {
pathBuilder.substring(0, pathBuilder.size - 1)
}
(split.getPartition.getUniqueID, SplitUtils.getSplits(pathBuilder.toString(),
sqlContext.sparkContext
))
}
} else {
// get all table Splits,when come to this, means data have been partition
splits = CarbonQueryUtil.getTableSplits(carbonLoadModel.getDatabaseName,
carbonLoadModel.getTableName, null, partitioner
)
// get all partition blocks from factFilePath/uniqueID/
blocksGroupBy = splits.map {
split =>
val pathBuilder = new StringBuilder()
pathBuilder.append(carbonLoadModel.getFactFilePath)
if (!carbonLoadModel.getFactFilePath.endsWith("/")
&& !carbonLoadModel.getFactFilePath.endsWith("\\\\")) {
pathBuilder.append("/")
}
pathBuilder.append(split.getPartition.getUniqueID).append("/")
(split.getPartition.getUniqueID,
SplitUtils.getSplits(pathBuilder.toString, sqlContext.sparkContext))
}
}
case false =>
/*
* when data load handle by node partition
* 1)clone the hadoop configuration,and set the file path to the configuration
* 2)use NewHadoopRDD to get split,size:Math.max(minSize, Math.min(maxSize, blockSize))
* 3)use DummyLoadRDD to group blocks by host,and let spark balance the block location
* 4)DummyLoadRDD output (host,Array[BlockDetails])as the parameter to CarbonDataLoadRDD
* which parititon by host
*/
val hadoopConfiguration = new Configuration(sqlContext.sparkContext.hadoopConfiguration)
// FileUtils will skip file which is no csv, and return all file path which split by ','
val filePaths = carbonLoadModel.getFactFilePath
hadoopConfiguration.set(FileInputFormat.INPUT_DIR, filePaths)
hadoopConfiguration.set(FileInputFormat.INPUT_DIR_RECURSIVE, "true")
hadoopConfiguration.set("io.compression.codecs",
"""org.apache.hadoop.io.compress.GzipCodec,
org.apache.hadoop.io.compress.DefaultCodec,
org.apache.hadoop.io.compress.BZip2Codec""".stripMargin)
configSplitMaxSize(sqlContext.sparkContext, filePaths, hadoopConfiguration)
val inputFormat = new org.apache.hadoop.mapreduce.lib.input.TextInputFormat
inputFormat match {
case configurable: Configurable =>
configurable.setConf(hadoopConfiguration)
case _ =>
}
val jobContext = new Job(hadoopConfiguration)
val rawSplits = inputFormat.getSplits(jobContext).toArray
val result = new Array[Partition](rawSplits.size)
val blockList = rawSplits.map(inputSplit => {
val fileSplit = inputSplit.asInstanceOf[FileSplit]
new TableBlockInfo(fileSplit.getPath.toString,
fileSplit.getStart, "1",
fileSplit.getLocations, fileSplit.getLength
).asInstanceOf[Distributable]
}
)
// group blocks to nodes, tasks
val startTime = System.currentTimeMillis
val activeNodes = DistributionUtil
.ensureExecutorsAndGetNodeList(blockList, sqlContext.sparkContext)
val nodeBlockMapping =
CarbonLoaderUtil
.nodeBlockMapping(blockList.toSeq.asJava, -1, activeNodes.toList.asJava).asScala
.toSeq
val timeElapsed: Long = System.currentTimeMillis - startTime
logInfo("Total Time taken in block allocation : " + timeElapsed)
logInfo("Total no of blocks : " + blockList.size
+ ", No.of Nodes : " + nodeBlockMapping.size
)
var str = ""
nodeBlockMapping.foreach(entry => {
val tableBlock = entry._2
str = str + "#Node: " + entry._1 + " no.of.blocks: " + tableBlock.size()
tableBlock.asScala.foreach(tableBlockInfo =>
if (!tableBlockInfo.getLocations.exists(hostentry =>
hostentry.equalsIgnoreCase(entry._1)
)) {
str = str + " , mismatch locations: " + tableBlockInfo.getLocations
.foldLeft("")((a, b) => a + "," + b)
}
)
str = str + "\\n"
}
)
logInfo(str)
blocksGroupBy = nodeBlockMapping.map(entry => {
val blockDetailsList =
entry._2.asScala.map(distributable => {
val tableBlock = distributable.asInstanceOf[TableBlockInfo]
new BlockDetails(tableBlock.getFilePath,
tableBlock.getBlockOffset, tableBlock.getBlockLength, tableBlock.getLocations
)
}).toArray
(entry._1, blockDetailsList)
}
).toArray
}
status = new DataFileLoaderRDD(sqlContext.sparkContext,
new DataLoadResultImpl(),
carbonLoadModel,
storeLocation,
hdfsStoreLocation,
kettleHomePath,
partitioner,
columinar,
currentLoadCount,
tableCreationTime,
schemaLastUpdatedTime,
blocksGroupBy,
isTableSplitPartition
).collect()
}
def loadDataFrame(): Unit = {
var rdd = dataFrame.get.rdd
var numPartitions = DistributionUtil.getNodeList(sqlContext.sparkContext).length
numPartitions = Math.max(1, Math.min(numPartitions, rdd.partitions.length))
rdd = rdd.coalesce(numPartitions, false)
status = new DataFrameLoaderRDD(sqlContext.sparkContext,
new DataLoadResultImpl(),
carbonLoadModel,
storeLocation,
hdfsStoreLocation,
kettleHomePath,
columinar,
currentLoadCount,
tableCreationTime,
schemaLastUpdatedTime,
rdd).collect()
}
CarbonLoaderUtil.checkAndCreateCarbonDataLocation(hdfsStoreLocation,
carbonLoadModel.getDatabaseName, carbonLoadModel.getTableName,
partitioner.partitionCount, currentLoadCount.toString)
var loadStatus = CarbonCommonConstants.STORE_LOADSTATUS_SUCCESS
var errorMessage: String = "DataLoad failure"
var executorMessage: String = ""
try {
if (dataFrame.isDefined) {
loadDataFrame()
} else {
loadDataFile()
}
val newStatusMap = scala.collection.mutable.Map.empty[String, String]
status.foreach { eachLoadStatus =>
val state = newStatusMap.get(eachLoadStatus._1)
state match {
case Some(CarbonCommonConstants.STORE_LOADSTATUS_FAILURE) =>
newStatusMap.put(eachLoadStatus._1, eachLoadStatus._2.getLoadStatus)
case Some(CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS)
if eachLoadStatus._2.getLoadStatus ==
CarbonCommonConstants.STORE_LOADSTATUS_SUCCESS =>
newStatusMap.put(eachLoadStatus._1, eachLoadStatus._2.getLoadStatus)
case _ =>
newStatusMap.put(eachLoadStatus._1, eachLoadStatus._2.getLoadStatus)
}
}
newStatusMap.foreach {
case (key, value) =>
if (value == CarbonCommonConstants.STORE_LOADSTATUS_FAILURE) {
loadStatus = CarbonCommonConstants.STORE_LOADSTATUS_FAILURE
} else if (value == CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS &&
!loadStatus.equals(CarbonCommonConstants.STORE_LOADSTATUS_FAILURE)) {
loadStatus = CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS
}
}
if (loadStatus != CarbonCommonConstants.STORE_LOADSTATUS_FAILURE &&
partitionStatus == CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS) {
loadStatus = partitionStatus
}
} catch {
case ex: Throwable =>
loadStatus = CarbonCommonConstants.STORE_LOADSTATUS_FAILURE
ex match {
case sparkException: SparkException =>
if (sparkException.getCause.isInstanceOf[DataLoadingException]) {
executorMessage = sparkException.getCause.getMessage
errorMessage = errorMessage + ": " + executorMessage
}
case _ =>
executorMessage = ex.getCause.getMessage
errorMessage = errorMessage + ": " + executorMessage
}
logInfo(errorMessage)
logger.error(ex)
}
if (loadStatus == CarbonCommonConstants.STORE_LOADSTATUS_FAILURE) {
logInfo("********starting clean up**********")
CarbonLoaderUtil.deleteSegment(carbonLoadModel, currentLoadCount)
logInfo("********clean up done**********")
logger.audit(s"Data load is failed for " +
s"${carbonLoadModel.getDatabaseName}.${carbonLoadModel.getTableName}")
logWarning("Cannot write load metadata file as data load failed")
throw new Exception(errorMessage)
} else {
val metadataDetails = status(0)._2
if (!isAgg) {
val status = CarbonLoaderUtil
.recordLoadMetadata(currentLoadCount,
metadataDetails,
carbonLoadModel,
loadStatus,
loadStartTime
)
if (!status) {
val errorMessage = "Dataload failed due to failure in table status updation."
logger.audit("Data load is failed for " +
s"${carbonLoadModel.getDatabaseName}.${carbonLoadModel.getTableName}")
logger.error("Dataload failed due to failure in table status updation.")
throw new Exception(errorMessage)
}
} else if (!carbonLoadModel.isRetentionRequest) {
// TODO : Handle it
logInfo("********Database updated**********")
}
logger.audit("Data load is successful for " +
s"${carbonLoadModel.getDatabaseName}.${carbonLoadModel.getTableName}")
try {
// compaction handling
handleSegmentMerging(tableCreationTime)
}
catch {
case e: Exception =>
throw new Exception(
"Dataload is success. Auto-Compaction has failed. Please check logs.")
}
}
}
}
def readLoadMetadataDetails(model: CarbonLoadModel, hdfsStoreLocation: String): Unit = {
val metadataPath = model.getCarbonDataLoadSchema.getCarbonTable.getMetaDataFilepath
val segmentStatusManager =
new SegmentStatusManager(
model.getCarbonDataLoadSchema.getCarbonTable.getAbsoluteTableIdentifier)
val details = segmentStatusManager.readLoadMetadata(metadataPath)
model.setLoadMetadataDetails(details.toList.asJava)
}
def deleteLoadsAndUpdateMetadata(
carbonLoadModel: CarbonLoadModel,
table: CarbonTable, partitioner: Partitioner,
hdfsStoreLocation: String,
isForceDeletion: Boolean) {
if (LoadMetadataUtil.isLoadDeletionRequired(carbonLoadModel)) {
val loadMetadataFilePath = CarbonLoaderUtil
.extractLoadMetadataFileLocation(carbonLoadModel)
val segmentStatusManager = new SegmentStatusManager(table.getAbsoluteTableIdentifier)
val details = segmentStatusManager
.readLoadMetadata(loadMetadataFilePath)
val carbonTableStatusLock = CarbonLockFactory
.getCarbonLockObj(table.getAbsoluteTableIdentifier.getCarbonTableIdentifier,
LockUsage.TABLE_STATUS_LOCK)
// Delete marked loads
val isUpdationRequired = DeleteLoadFolders
.deleteLoadFoldersFromFileSystem(carbonLoadModel, hdfsStoreLocation,
partitioner.partitionCount, isForceDeletion, details)
if (isUpdationRequired) {
try {
// Update load metadate file after cleaning deleted nodes
if (carbonTableStatusLock.lockWithRetries()) {
logger.info("Table status lock has been successfully acquired.")
// read latest table status again.
val latestMetadata = segmentStatusManager
.readLoadMetadata(loadMetadataFilePath)
// update the metadata details from old to new status.
val latestStatus = CarbonLoaderUtil
.updateLoadMetadataFromOldToNew(details, latestMetadata)
CarbonLoaderUtil.writeLoadMetadata(
carbonLoadModel.getCarbonDataLoadSchema,
carbonLoadModel.getDatabaseName,
carbonLoadModel.getTableName, latestStatus
)
}
else {
val errorMsg = "Clean files request is failed for " + carbonLoadModel.getDatabaseName +
"." + carbonLoadModel.getTableName +
". Not able to acquire the table status lock due to other operation " +
"running in the background."
logger.audit(errorMsg)
logger.error(errorMsg)
throw new Exception(errorMsg + " Please try after some time.")
}
} finally {
CarbonLockUtil.fileUnlock(carbonTableStatusLock, LockUsage.TABLE_STATUS_LOCK)
}
}
}
}
def dropTable(
sc: SparkContext,
schema: String,
table: String,
partitioner: Partitioner) {
val v: Value[Array[Object]] = new ValueImpl()
new CarbonDropTableRDD(sc, v, schema, table, partitioner).collect
}
def cleanFiles(
sc: SparkContext,
carbonLoadModel: CarbonLoadModel,
hdfsStoreLocation: String,
partitioner: Partitioner) {
val table = org.apache.carbondata.core.carbon.metadata.CarbonMetadata.getInstance
.getCarbonTable(carbonLoadModel.getDatabaseName + "_" + carbonLoadModel.getTableName)
val metaDataPath: String = table.getMetaDataFilepath
val carbonCleanFilesLock = CarbonLockFactory
.getCarbonLockObj(table.getAbsoluteTableIdentifier.getCarbonTableIdentifier,
LockUsage.CLEAN_FILES_LOCK
)
try {
if (carbonCleanFilesLock.lockWithRetries()) {
logger.info("Clean files lock has been successfully acquired.")
deleteLoadsAndUpdateMetadata(carbonLoadModel,
table,
partitioner,
hdfsStoreLocation,
isForceDeletion = true)
}
else {
val errorMsg = "Clean files request is failed for " + carbonLoadModel.getDatabaseName +
"." + carbonLoadModel.getTableName +
". Not able to acquire the clean files lock due to another clean files " +
"operation is running in the background."
logger.audit(errorMsg)
logger.error(errorMsg)
throw new Exception(errorMsg + " Please try after some time.")
}
}
finally {
CarbonLockUtil.fileUnlock(carbonCleanFilesLock, LockUsage.CLEAN_FILES_LOCK)
}
}
}
| foryou2030/incubator-carbondata | integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala | Scala | apache-2.0 | 48,121 |
/*
* Author: Manish Gupta
*/
package com.guptam.spark.dba.hdfsutil
import com.guptam.spark.dba.common._
trait HDFSStringUtil {
// fix the path string
def removeLastSlash(url: String): String = {
if (url.endsWith("/")) {
url.substring(0, url.lastIndexOf("/"))
} else {
url
}
}
// Generate a string that is unique and can be sorted on time
def getUniqueHashAndTime(): String = {
System.currentTimeMillis().toString() +
Constants.UNDERSCORE +
hashCode().toString() +
Constants.UNDERSCORE +
util.Random.nextInt().abs.toString()
}
}
| guptam/spark-dba | src/com/guptam/spark/dba/hdfsutil/HDFSStringUtil.scala | Scala | apache-2.0 | 598 |
/*
* ScalaRay - Ray tracer based on pbrt (see http://pbrt.org) written in Scala
* Copyright (C) 2009, 2010, 2011 Jesper de Jong
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.jesperdj.scalaray
import org.jesperdj.scalaray.common._
package object spectrum {
// Implicit conversion for scaling spectra by multiplying a numeric type with a spectrum
implicit def implicitScaleSpectrum[@specialized(Int, Double) N <% Double](f: N) = new MultipliableSame[Spectrum] {
@inline def *(s: Spectrum): Spectrum = s * f
}
// Implicit conversion to enable Spectrum to be used in interpolate()
implicit def spectrumToInterpolatable(s: Spectrum) = new Interpolatable[Spectrum] {
@inline def *(f: Double): Spectrum = s * f
@inline def +(s2: Spectrum): Spectrum = s + s2
}
}
| jesperdj/scalaray | src/main/scala/org/jesperdj/scalaray/spectrum/package.scala | Scala | gpl-3.0 | 1,404 |
package spatutorial.shared
import upickle.default._
sealed trait TodoPriority
case object TodoLow extends TodoPriority
case object TodoNormal extends TodoPriority
case object TodoHigh extends TodoPriority
case class TodoItem(id: String, timeStamp: Int, content: String, priority: TodoPriority, completed: Boolean)
object TodoItem {
implicit val readWriter: ReadWriter[TodoItem] = macroRW[TodoItem]
}
object TodoPriority {
// note: may need macroRW[TodoLow] merge ... SI-7046
implicit val readWriter: ReadWriter[TodoPriority] = macroRW[TodoPriority]
}
| drdozer/shoppinglist | web-shared/src/main/scala/spatutorial/shared/TodoItem.scala | Scala | apache-2.0 | 565 |
/**
* Copyright (c) 2013, The National Archives <digitalpreservation@nationalarchives.gov.uk>
* http://www.nationalarchives.gov.uk
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package uk.gov.nationalarchives.dri.jassh
import org.scalatest.{Matchers, FlatSpec}
import org.specs2.mutable.Specification
import org.json4s._
import org.json4s.jackson.JsonMethods._
/**
* Created by dev on 6/9/14.
*/
object testJSON extends FlatSpec with Matchers{
implicit val formats = DefaultFormats // Brings in default date formats etc.
case class Child(name: String, age: Int, birthdate: Option[java.util.Date])
case class Address(street: String, city: String)
case class Person(name: String, address: Address, children: List[Child])
val json = parse("""
{ "name": "joe",
"address": {
"street": "Bulevard",
"city": "Helsinki"
},
"children": [
{
"name": "Mary",
"age": 5,
"birthdate": "2004-09-04T18:06:22Z"
},
{
"name": "Mazy",
"age": 3
}
]
}
""")
val jsonLoad = parse(
"""
{ "actions" : [
{
"action": "Load",
"loadUnit" :
{
"uid": "383a118b271b7abff0ebaa62465a8f1cfab9ee20022294dbb58a272896637786",
"parts" : [
{
"unit": "/dri-upload/parts2.zip.gpg",
"series": "PART1",
"destination": "Holding"
},
{
"unit": "/dri-upload/parts2.zip.gpg",
"series": "PART2",
"destination": "Holding"
}
]
},
"certificate": "myprivate.key",
"passphrase": "passphrase"
}
]
}
""")
println("json" + jsonLoad)
import uk.gov.nationalarchives.dri.preingest.loader.ClientAction.Actions
val clientActions = json.extract[Actions]
println("Client actions" + clientActions)
val person = json.extract[Person]
println("person " + person)
"json parse test" should "check name is joe" in {
person.name === "joe"
}
}
| digital-preservation/dali | src/test/scala/uk/gov/tna/dri/jassh/testJSON.scala | Scala | mpl-2.0 | 2,455 |
package amailp.intellij.robot.findUsage
import com.intellij.usages.impl.rules.UsageType
object UsageTypes {
val KeywordUsage = new UsageType(() => "Keyword usage")
}
| AmailP/robot-plugin | src/main/scala/amailp/intellij/robot/findUsage/UsageTypes.scala | Scala | gpl-3.0 | 170 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.util.Collections
import java.util.concurrent.TimeUnit
import org.apache.spark.api.java.JavaFutureAction
import org.apache.spark.rdd.RDD
import org.apache.spark.scheduler.{JobFailed, JobSucceeded, JobWaiter}
import scala.concurrent._
import scala.concurrent.duration.Duration
import scala.util.{Failure, Try}
/**
* A future for the result of an action to support cancellation. This is an extension of the
* Scala Future interface to support cancellation.
* future的行动结果支持取消,这是Scala Future界面的扩展,以支持取消。
*/
trait FutureAction[T] extends Future[T] {
// Note that we redefine methods of the Future trait here explicitly so we can specify a different
// documentation (with reference to the word "action").
//请注意,我们明确地重新定义了“Future”特征的方法,因此我们可以指定一个不同的文档(参考“action”一词)
/**
* Cancels the execution of this action. 取消执行此操作
*/
def cancel()
/**
* Blocks until this action completes. 直到此操作完成为止
* @param atMost maximum wait time, which may be negative (no waiting is done), Duration.Inf
* for unbounded waiting, or a finite positive duration
* 最大等待时间,可能为负数(无等待完成),持续时间。无限等待时间或持续时间有限,
* @return this FutureAction
*/
override def ready(atMost: Duration)(implicit permit: CanAwait): FutureAction.this.type
/**
* Awaits and returns the result (of type T) of this action.
* 等待并返回此操作的结果(类型T)
* @param atMost maximum wait time, which may be negative (no waiting is done), Duration.Inf
* for unbounded waiting, or a finite positive duration
* 最长等待时间,可能为负(不等待完成),Duration.Inf无限等待,或有限的正期
* @throws Exception exception during action execution 行动执行期间异常
* @return the result value if the action is completed within the specific maximum wait time
* 如果操作在特定最大等待时间内完成,则结果值
*/
@throws(classOf[Exception])
override def result(atMost: Duration)(implicit permit: CanAwait): T
/**
* When this action is completed, either through an exception, or a value, applies the provided
* function.当此操作完成时,通过异常或值来应用提供的功能。
* Try的子类Success或者Failure,如果计算成功,返回Success的实例,如果抛出异常,返回Failure并携带相关信息
*/
def onComplete[U](func: (Try[T]) => U)(implicit executor: ExecutionContext)
/**
* Returns whether the action has already been completed with a value or an exception.
* 返回动作是否已经用值或异常完成
*/
override def isCompleted: Boolean
/**
* Returns whether the action has been cancelled.
* 返回操作是否已被取消
*/
def isCancelled: Boolean
/**
* The value of this Future.
* 这个未来的价值。如果未来未完成,返回的值将为“无”。 如果未来完成如果包含有效结果,则值为Some(Success(t))
* 或者Some(Failure(error))if它包含一个异常。
* If the future is not completed the returned value will be None. If the future is completed
* the value will be Some(Success(t)) if it contains a valid result, or Some(Failure(error)) if
* it contains an exception.
*/
override def value: Option[Try[T]]
/**
* Blocks and returns the result of this job.
* 阻塞并返回此作业的结果。
*Await.result会导致当前线程被阻塞,并等待actor通过它的应答来完成Future
*/
@throws(classOf[Exception])
def get(): T = Await.result(this, Duration.Inf)
/**
* Returns the job IDs run by the underlying async operation.
* 返回基础异步操作运行的作业ID
*
* This returns the current snapshot of the job list. Certain operations may run multiple
* jobs, so multiple calls to this method may return different lists.
* 这将返回作业列表的当前快照, 某些操作可以运行多个作业,所以多次调用此方法可能会返回不同的列表。
*/
def jobIds: Seq[Int]
}
/**
* A [[FutureAction]] holding the result of an action that triggers a single job. Examples include
* count, collect, reduce.
* FutureAction 持有触发单个作业的操作的结果,示例包括计数,收集,减少
*/
class SimpleFutureAction[T] private[spark](jobWaiter: JobWaiter[_], resultFunc: => T)
extends FutureAction[T] {
@volatile private var _cancelled: Boolean = false
override def cancel() {
_cancelled = true
jobWaiter.cancel()
}
override def ready(atMost: Duration)(implicit permit: CanAwait): SimpleFutureAction.this.type = {
//isFinite此方法返回此持续时间是否有限
if (!atMost.isFinite()) {
awaitResult()
} else jobWaiter.synchronized {
val finishTime = System.currentTimeMillis() + atMost.toMillis
while (!isCompleted) {
val time = System.currentTimeMillis()
if (time >= finishTime) {
throw new TimeoutException
} else {
jobWaiter.wait(finishTime - time)
}
}
}
this
}
@throws(classOf[Exception])
override def result(atMost: Duration)(implicit permit: CanAwait): T = {
ready(atMost)(permit)
awaitResult() match {
case scala.util.Success(res) => res
case scala.util.Failure(e) => throw e
}
}
override def onComplete[U](func: (Try[T]) => U)(implicit executor: ExecutionContext) {
executor.execute(new Runnable {
override def run() {
func(awaitResult())
}
})
}
override def isCompleted: Boolean = jobWaiter.jobFinished
override def isCancelled: Boolean = _cancelled
/**
* Try的子类Success或者Failure,如果计算成功,返回Success的实例,如果抛出异常,返回Failure并携带相关信息
* @return
*/
override def value: Option[Try[T]] = {
if (jobWaiter.jobFinished) {
Some(awaitResult())
} else {
None
}
}
private def awaitResult(): Try[T] = {
//awaitResult 直到Job执行完成之后返回所得的结果
jobWaiter.awaitResult() match {
case JobSucceeded => scala.util.Success(resultFunc)
case JobFailed(e: Exception) => scala.util.Failure(e)
}
}
def jobIds: Seq[Int] = Seq(jobWaiter.jobId)
}
/**
* A [[FutureAction]] for actions that could trigger multiple Spark jobs. Examples include take,
* takeSample. Cancellation works by setting the cancelled flag to true and interrupting the
* action thread if it is being blocked by a job.
* A [[FutureAction]]可以触发多个Spark作业的操作,示例包括take,takeSample,
* 通过将取消的标志设置为true并中断操作线程(如果被作业阻止),取消工作。
*/
class ComplexFutureAction[T] extends FutureAction[T] {
// Pointer to the thread that is executing the action. It is set when the action is run.
//指向正在执行操作的线程的指针,运行动作时设置。
@volatile private var thread: Thread = _
// A flag indicating whether the future has been cancelled. This is used in case the future
// is cancelled before the action was even run (and thus we have no thread to interrupt).
//指示未来是否被取消的标志,这是为了在将来被取消之前被使用,在动作被平均运行之前(因此我们没有线程中断)。
@volatile private var _cancelled: Boolean = false
@volatile private var jobs: Seq[Int] = Nil
// A promise used to signal the future.
//用来表示未来的承诺
private val p = promise[T]()
override def cancel(): Unit = this.synchronized {
_cancelled = true
if (thread != null) {
thread.interrupt()
}
}
/**
* Executes some action enclosed in the closure. To properly enable cancellation, the closure
* should use runJob implementation in this promise. See takeAsync for example.
* 执行封闭中包含的一些操作,为了正确启用取消,关闭应该在本承诺中使用runJob实现,请参见takeAsync例如
*/
def run(func: => T)(implicit executor: ExecutionContext): this.type = {
scala.concurrent.future {
thread = Thread.currentThread
try {
p.success(func)
} catch {
case e: Exception => p.failure(e)
} finally {
// This lock guarantees when calling `thread.interrupt()` in `cancel`,
// thread won't be set to null.
///在`cancel`中调用`thread.interrupt()`时,线程不会被设置为null。
ComplexFutureAction.this.synchronized {
thread = null
}
}
}
this
}
/**
* Runs a Spark job. This is a wrapper around the same functionality provided by SparkContext
* to enable cancellation.
* 运行Spark工作,这是SparkContext提供的相同功能的封装启用取消。
*/
def runJob[T, U, R](
rdd: RDD[T],
processPartition: Iterator[T] => U,
partitions: Seq[Int],
resultHandler: (Int, U) => Unit,
resultFunc: => R) {
// If the action hasn't been cancelled yet, submit the job. The check and the submitJob
// command need to be in an atomic block.
//如果该动作尚未被取消,请提交作业,检查和submitJob命令需要在原子块中
val job = this.synchronized {
if (!isCancelled) {
rdd.context.submitJob(rdd, processPartition, partitions, resultHandler, resultFunc)
} else {
throw new SparkException("Action has been cancelled")
}
}
this.jobs = jobs ++ job.jobIds
// Wait for the job to complete. If the action is cancelled (with an interrupt),
//等待工作完成,如果动作被取消(中断),取消作业并停止执行,这不是因为同步的块Await.ready最终等待在FutureJob.jobWaiter的显示器上
// cancel the job and stop the execution. This is not in a synchronized block because
// Await.ready eventually waits on the monitor in FutureJob.jobWaiter.
try {
Await.ready(job, Duration.Inf)
} catch {
case e: InterruptedException =>
job.cancel()
throw new SparkException("Action has been cancelled")
}
}
override def isCancelled: Boolean = _cancelled
@throws(classOf[InterruptedException])
@throws(classOf[scala.concurrent.TimeoutException])
override def ready(atMost: Duration)(implicit permit: CanAwait): this.type = {
p.future.ready(atMost)(permit)
this
}
@throws(classOf[Exception])
override def result(atMost: Duration)(implicit permit: CanAwait): T = {
p.future.result(atMost)(permit)
}
override def onComplete[U](func: (Try[T]) => U)(implicit executor: ExecutionContext): Unit = {
p.future.onComplete(func)(executor)
}
override def isCompleted: Boolean = p.isCompleted
override def value: Option[Try[T]] = p.future.value
def jobIds: Seq[Int] = jobs
}
private[spark]
class JavaFutureActionWrapper[S, T](futureAction: FutureAction[S], converter: S => T)
extends JavaFutureAction[T] {
import scala.collection.JavaConverters._
override def isCancelled: Boolean = futureAction.isCancelled
override def isDone: Boolean = {
// According to java.util.Future's Javadoc, this returns True if the task was completed,
// whether that completion was due to successful execution, an exception, or a cancellation.
//根据java.util.Future的Javadoc,如果任务完成,则返回True,是否完成是由于执行成功,异常或取消。
futureAction.isCancelled || futureAction.isCompleted
}
override def jobIds(): java.util.List[java.lang.Integer] = {
//unmodifiableList 将参数中的List返回一个不可修改的List.
Collections.unmodifiableList(futureAction.jobIds.map(Integer.valueOf).asJava)
}
private def getImpl(timeout: Duration): T = {
// This will throw TimeoutException on timeout:
//这将在超时时抛出TimeoutException
//等待“completed”的awaitable状态
Await.ready(futureAction, timeout)
futureAction.value.get match {
case scala.util.Success(value) => converter(value)
case Failure(exception) =>
if (isCancelled) {
throw new CancellationException("Job cancelled").initCause(exception)
} else {
// java.util.Future.get() wraps exceptions in ExecutionException
//java.util.Future.get()在ExecutionException中包装异常
throw new ExecutionException("Exception thrown by job", exception)
}
}
}
override def get(): T = getImpl(Duration.Inf)
override def get(timeout: Long, unit: TimeUnit): T =
getImpl(Duration.fromNanos(unit.toNanos(timeout)))
override def cancel(mayInterruptIfRunning: Boolean): Boolean = synchronized {
if (isDone) {
// According to java.util.Future's Javadoc, this should return false if the task is completed.
//根据java.util.Future的Javadoc,如果任务完成,则返回false,
false
} else {
// We're limited in terms of the semantics we can provide here; our cancellation is
// asynchronous and doesn't provide a mechanism to not cancel if the job is running.
//我们在这里提供的语义有限,我们的取消是异步的,如果作业正在运行,则不提供不取消的机制
futureAction.cancel()
true
}
}
}
| tophua/spark1.52 | core/src/main/scala/org/apache/spark/FutureAction.scala | Scala | apache-2.0 | 14,266 |
/*
* Part of NDLA learningpath-api.
* Copyright (C) 2016 NDLA
*
* See LICENSE
*
*/
package no.ndla.learningpathapi.controller
import java.util.UUID
import javax.servlet.http.HttpServletRequest
import no.ndla.learningpathapi.LearningpathApiProperties
import no.ndla.learningpathapi.model.api.ImportReport
import no.ndla.learningpathapi.model.domain._
import no.ndla.learningpathapi.model.domain
import no.ndla.learningpathapi.repository.LearningPathRepositoryComponent
import no.ndla.learningpathapi.service.{ReadService, UpdateService}
import no.ndla.learningpathapi.service.search.{SearchIndexService, SearchService}
import no.ndla.network.AuthUser
import org.json4s.Formats
import org.json4s.ext.EnumNameSerializer
import org.scalatra._
import scala.util.{Failure, Success}
trait InternController {
this: SearchIndexService
with SearchService
with LearningPathRepositoryComponent
with ReadService
with UpdateService =>
val internController: InternController
class InternController extends NdlaController {
protected implicit override val jsonFormats: Formats =
org.json4s.DefaultFormats +
new EnumNameSerializer(LearningPathStatus) +
new EnumNameSerializer(LearningPathVerificationStatus) +
new EnumNameSerializer(StepType) +
new EnumNameSerializer(StepStatus) +
new EnumNameSerializer(EmbedType)
def requireClientId(implicit request: HttpServletRequest): String = {
AuthUser.getClientId match {
case Some(clientId) => clientId
case None => {
logger.warn(s"Request made to ${request.getRequestURI} without clientId")
throw new AccessDeniedException("You do not have access to the requested resource.")
}
}
}
get("/id/:external_id") {
val externalId = params("external_id")
learningPathRepository.getIdFromExternalId(externalId) match {
case Some(id) => id.toString
case None => NotFound()
}
}
post("/index") {
searchIndexService.indexDocuments match {
case Success(reindexResult) =>
val result =
s"Completed indexing of ${reindexResult.totalIndexed} documents in ${reindexResult.millisUsed} ms."
logger.info(result)
Ok(result)
case Failure(f) =>
logger.warn(f.getMessage, f)
InternalServerError(f.getMessage)
}
}
delete("/index") {
def pluralIndex(n: Int) = if (n == 1) "1 index" else s"$n indexes"
val deleteResults = searchIndexService.findAllIndexes(LearningpathApiProperties.SearchIndex) match {
case Failure(f) => halt(status = 500, body = f.getMessage)
case Success(indexes) =>
indexes.map(index => {
logger.info(s"Deleting index $index")
searchIndexService.deleteIndexWithName(Option(index))
})
}
val (errors, successes) = deleteResults.partition(_.isFailure)
if (errors.nonEmpty) {
val message = s"Failed to delete ${pluralIndex(errors.length)}: " +
s"${errors.map(_.failed.get.getMessage).mkString(", ")}. " +
s"${pluralIndex(successes.length)} were deleted successfully."
halt(status = 500, body = message)
} else {
Ok(body = s"Deleted ${pluralIndex(successes.length)}")
}
}
get("/dump/learningpath/?") {
val pageNo = intOrDefault("page", 1)
val pageSize = intOrDefault("page-size", 250)
val onlyIncludePublished = booleanOrDefault("only-published", true)
readService.getLearningPathDomainDump(pageNo, pageSize, onlyIncludePublished)
}
post("/dump/learningpath/?") {
val dumpToInsert = extract[domain.LearningPath](request.body)
updateService.insertDump(dumpToInsert)
}
get("/containsArticle") {
val paths = paramAsListOfString("paths")
searchService.containsPath(paths) match {
case Success(result) => result.results
case Failure(ex) => errorHandler(ex)
}
}
}
}
| NDLANO/learningpath-api | src/main/scala/no/ndla/learningpathapi/controller/InternController.scala | Scala | gpl-3.0 | 4,031 |
package controllers
import javax.inject.{Inject, Singleton}
import constraints.FormConstraints
import controllers.NeedLogin.Authenticated
import helpers.ItemInquiryMail
import models.{LocaleInfoRepo, SiteItemRepo, ShoppingCartItemRepo}
import play.api.db.Database
import play.api.mvc.MessagesControllerComponents
@Singleton
class ItemInquiryReserve @Inject() (
cc: MessagesControllerComponents,
fc: FormConstraints,
authenticated: Authenticated,
db: Database,
itemInquiryMail: ItemInquiryMail,
localeInfoRepo: LocaleInfoRepo,
siteItemRepo: SiteItemRepo,
shoppingCartItemRepo: ShoppingCartItemRepo
) extends ItemInquiryReserveBase(cc, fc, authenticated, db, itemInquiryMail, localeInfoRepo, siteItemRepo, shoppingCartItemRepo)
| ruimo/store2 | app/controllers/ItemInquiryReserve.scala | Scala | apache-2.0 | 745 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.fuberlin.wiwiss.silk.workbench.lift.snippet
import xml.NodeSeq
import net.liftweb.http.SHtml
import de.fuberlin.wiwiss.silk.workbench.learning._
import de.fuberlin.wiwiss.silk.workbench.lift.util.JS
class LearnResetButton {
def render(xhtml: NodeSeq): NodeSeq = {
SHtml.ajaxButton("Reset", () => reset())
}
private def reset() = {
CurrentPool.reset()
CurrentPopulation.reset()
CurrentValidationLinks.reset()
CurrentActiveLearningTask().cancel()
CurrentLearningTask().cancel()
CurrentActiveLearningTask.reset()
CurrentLearningTask.reset()
JS.Reload
}
} | fusepoolP3/p3-silk | silk-workbench-outdated/src/main/scala/de/fuberlin/wiwiss/silk/workbench/lift/snippet/LearnResetButton.scala | Scala | apache-2.0 | 1,175 |
package lr2
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import classification._
import classification.OptimizerType._
import classification.RegularizerType._
import utilities.SparseMatrix
import utilities.SparseVector
import Optimizers._
class MEM (
override val weights: Array[Double],
override val localModels: RDD[(Int, LocalModel)],
override val featureCount: Array[Int])
extends AVGM(weights, localModels, featureCount)
with Serializable{
override def train(
trainingData: RDD[(Int, (Array[Byte], SparseMatrix))],
maxNumIter: Int, optType: OptimizerType,
regPara: Double, regType: RegularizerType): MEM = {
val avgm =
super.train(trainingData, maxNumIter, CD, regPara, regType)
new MEM(avgm.weights, avgm.localModels, avgm.featureCount)
}
} | XianXing/bdl | src/main/scala/bdl/lr2/MEM.scala | Scala | apache-2.0 | 870 |
package io.scalac.slack.common
import akka.actor.{Actor, ActorLogging}
import io.scalac.slack.api.Ok
import io.scalac.slack.models.{DirectChannel, Presence, SlackUser}
import scala.language.{implicitConversions, postfixOps}
/**
* Maintainer: @marioosh
*/
class UsersStorage extends Actor with ActorLogging {
var userCatalog = List.empty[UserInfo]
var channelCatalog = List.empty[DirectChannel]
implicit def convertUsers(su: SlackUser): UserInfo = UserInfo(su.id.trim, su.name.trim, su.presence)
override def receive: Receive = {
case RegisterUsers(users@_*) =>
users.filterNot(u => u.deleted).foreach(addUser(_))
sender ! Ok
case FindUser(key) => sender ! userCatalog.find { user =>
val matcher = key.trim.toLowerCase
matcher == user.id || matcher == user.name
}
case RegisterDirectChannels(channels@_*) =>
channels foreach addDirectChannel
sender ! Ok
case FindChannel(key) =>
val id = userCatalog.find(u => u.name == key.trim.toLowerCase) match {
case Some(user) => user.id
case None => key
}
sender ! channelCatalog.find(c => c.id == id || c.userId == id).map(_.id)
}
def addDirectChannel(channel: DirectChannel): Unit = {
channelCatalog = channel :: channelCatalog.filterNot(_.userId == channel.userId)
}
private def addUser(user: UserInfo): Unit = {
userCatalog = user :: userCatalog.filterNot(_.id == user.id)
}
}
case class UserInfo(id: String, name: String, presence: Presence) {
def userLink() = s"""<@$id|name>"""
}
case class RegisterUsers(slackUsers: SlackUser*)
case class RegisterDirectChannels(ims: DirectChannel*)
case class FindUser(key: String)
case class FindChannel(key: String) | ScalaConsultants/scala-slack-bot-core | src/main/scala/io/scalac/slack/common/UsersStorage.scala | Scala | mit | 1,737 |
//package service
//
//import scala.slick.session.Database
//import util.ControlUtil._
//import java.sql.DriverManager
//import org.apache.commons.io.FileUtils
//import scala.util.Random
//import java.io.File
//
//trait ServiceSpecBase {
//
// def withTestDB[A](action: => A): A = {
// util.FileUtil.withTmpDir(new File(FileUtils.getTempDirectory(), Random.alphanumeric.take(10).mkString)){ dir =>
// val (url, user, pass) = (s"jdbc:h2:${dir}", "sa", "sa")
// org.h2.Driver.load()
// using(DriverManager.getConnection(url, user, pass)){ conn =>
// servlet.AutoUpdate.versions.reverse.foreach(_.update(conn))
// }
// Database.forURL(url, user, pass).withSession {
// action
// }
// }
// }
//
//}
| campolake/gitbucketV2.1 | src/test/scala/service/ServiceSpecBase.scala | Scala | apache-2.0 | 747 |
/*
Copyright 2016 Douglas Myers-Turnbull
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.github.dmyersturnbull.atomtin.core
import java.io._
import java.net.URL
import java.util.zip.GZIPInputStream
import org.slf4j.LoggerFactory
import scala.io.Source
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration.Duration
import com.github.dmyersturnbull.atomtin.core.model.PdbAtom
import scalacache._
/**
* Go-to class for atom-tin.
* Provides functionality to download, parse, and cache PDB files from the RCSB PDB, and to delete PDB files from the cache.
* @author Douglas Myers-Turnbull
*/
class AtomTin(cache: ScalaCache, source: String => TraversableOnce[PdbAtom] = pdbId => AtomTin.download(pdbId, warn = true))
(implicit ec: ExecutionContext) extends Object with Closeable {
implicit val scalaCache = cache
protected val logger = LoggerFactory.getLogger(classOf[AtomTin])
def loadAndWait(pdbId: String, duration: Duration = Duration.Inf): TraversableOnce[PdbAtom] = {
val r = load(pdbId)
Await.ready(r, duration)
r.value.get.get // :(
}
def load(pdbId: String): Future[TraversableOnce[PdbAtom]] = {
// TODO Why can't I just use caching()?
get(pdbId).asInstanceOf[Future[Option[TraversableOnce[PdbAtom]]]] map {
case Some(atoms) => atoms
case None =>
logger.debug("Retrieving atoms for {} from source...", pdbId)
val r = source(pdbId)
logger.debug("Done. Adding atoms for {} to cache.", pdbId)
put(pdbId)(r) // ditto
r
}
}
override def close() {
cache.cache.close()
}
def delete(pdbId: String) {
remove(pdbId)
logger.debug("Deleted {} from cache", pdbId)
}
def deleteAll() {
removeAll()
logger.debug("Deleted all entries in cache")
}
}
object AtomTin {
protected val logger = LoggerFactory.getLogger(classOf[AtomTin])
def urlFor(pdbId: String) = new URL("http://www.rcsb.org/pdb/files/" + pdbId.toUpperCase + ".pdb.gz")
def download(pdbId: String): TraversableOnce[PdbAtom] = download(pdbId, warn=true)
def download(pdbId: String, warn: Boolean): TraversableOnce[PdbAtom] = {
val is = new GZIPInputStream(urlFor(pdbId).openStream())
new PdbParser(warn = warn).parse(
Source.fromInputStream(is) withClose (() => is.close()) getLines()
)
}
} | dmyersturnbull/atom-tin | core/src/main/scala/com/github/dmyersturnbull/atomtin/core/AtomTin.scala | Scala | apache-2.0 | 2,806 |
package io.abacus.pipeline
import java.util.concurrent.atomic.AtomicLong
import scala.{specialized => spec}
class CountingPipeline[@spec (Int) T]() extends Pipeline[T,Long,Long] {
val count = new AtomicLong(0)
override def results: Long = {
count.get
}
override def process(elem: T): Long = count.incrementAndGet()
}
| socrata-platform/palamedes | src/main/scala/io/abacus/pipeline/CountingPipeline.scala | Scala | mit | 335 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.orc
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.NullWritable
import org.apache.hadoop.mapreduce.TaskAttemptContext
import org.apache.orc.OrcFile
import org.apache.orc.mapred.{OrcOutputFormat => OrcMapRedOutputFormat, OrcStruct}
import org.apache.orc.mapreduce.{OrcMapreduceRecordWriter, OrcOutputFormat}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.datasources.OutputWriter
import org.apache.spark.sql.types._
private[sql] class OrcOutputWriter(
val path: String,
dataSchema: StructType,
context: TaskAttemptContext)
extends OutputWriter {
private[this] val serializer = new OrcSerializer(dataSchema)
private val recordWriter = {
val orcOutputFormat = new OrcOutputFormat[OrcStruct]() {
override def getDefaultWorkFile(context: TaskAttemptContext, extension: String): Path = {
new Path(path)
}
}
val filename = orcOutputFormat.getDefaultWorkFile(context, ".orc")
val options = OrcMapRedOutputFormat.buildOptions(context.getConfiguration)
options.setSchema(OrcUtils.orcTypeDescription(dataSchema))
val writer = OrcFile.createWriter(filename, options)
val recordWriter = new OrcMapreduceRecordWriter[OrcStruct](writer)
OrcUtils.addSparkVersionMetadata(writer)
recordWriter
}
override def write(row: InternalRow): Unit = {
recordWriter.write(NullWritable.get(), serializer.serialize(row))
}
override def close(): Unit = {
recordWriter.close(context)
}
}
| vinodkc/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcOutputWriter.scala | Scala | apache-2.0 | 2,362 |
/**
* Copyright (C) 2009-2014 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.routing
import scala.collection.immutable
import akka.actor.ActorContext
import akka.actor.Props
import akka.dispatch.Dispatchers
import com.typesafe.config.Config
import akka.actor.SupervisorStrategy
import akka.japi.Util.immutableSeq
import akka.actor.Address
import akka.actor.ExtendedActorSystem
import akka.actor.ActorSystem
import java.util.concurrent.atomic.AtomicReference
import akka.actor.ActorRef
import akka.serialization.SerializationExtension
import scala.util.control.NonFatal
import akka.event.Logging
import akka.actor.ActorPath
object ConsistentHashingRouter {
/**
* If you don't define the `hashMapping` when
* constructing the [[akka.routing.ConsistentHashingRouter]]
* the messages need to implement this interface to define what
* data to use for the consistent hash key. Note that it's not
* the hash, but the data to be hashed.
*
* If returning an `Array[Byte]` or String it will be used as is,
* otherwise the configured [[akka.serialization.Serializer]]
* will be applied to the returned data.
*
* If messages can't implement this interface themselves,
* it's possible to wrap the messages in
* [[akka.routing.ConsistentHashingRouter.ConsistentHashableEnvelope]],
* or use [[akka.routing.ConsistentHashingRouter.ConsistentHashableEnvelope]]
*/
trait ConsistentHashable {
def consistentHashKey: Any
}
/**
* If you don't define the `hashMapping` when
* constructing the [[akka.routing.ConsistentHashingRouter]]
* and messages can't implement [[akka.routing.ConsistentHashingRouter.ConsistentHashable]]
* themselves they can we wrapped by this envelope instead. The
* router will only send the wrapped message to the destination,
* i.e. the envelope will be stripped off.
*/
@SerialVersionUID(1L)
final case class ConsistentHashableEnvelope(message: Any, hashKey: Any)
extends ConsistentHashable with RouterEnvelope {
override def consistentHashKey: Any = hashKey
}
/**
* Partial function from message to the data to
* use for the consistent hash key. Note that it's not
* the hash that is to be returned, but the data to be hashed.
*
* If returning an `Array[Byte]` or String it will be used as is,
* otherwise the configured [[akka.serialization.Serializer]]
* will be applied to the returned data.
*/
type ConsistentHashMapping = PartialFunction[Any, Any]
@SerialVersionUID(1L)
object emptyConsistentHashMapping extends ConsistentHashMapping {
def isDefinedAt(x: Any) = false
def apply(x: Any) = throw new UnsupportedOperationException("Empty ConsistentHashMapping apply()")
}
/**
* JAVA API
* Mapping from message to the data to use for the consistent hash key.
* Note that it's not the hash that is to be returned, but the data to be
* hashed.
*
* May return `null` to indicate that the message is not handled by
* this mapping.
*
* If returning an `Array[Byte]` or String it will be used as is,
* otherwise the configured [[akka.serialization.Serializer]]
* will be applied to the returned data.
*/
trait ConsistentHashMapper {
def hashKey(message: Any): Any
}
/**
* INTERNAL API
*/
private[akka] def hashMappingAdapter(mapper: ConsistentHashMapper): ConsistentHashMapping = {
case message if (mapper.hashKey(message).asInstanceOf[AnyRef] ne null) ⇒
mapper.hashKey(message)
}
/**
* Creates a new ConsistentHashingRouter, routing to the specified routees
*/
@deprecated("Use ConsistentHashingGroup", "2.3")
def apply(routees: immutable.Iterable[ActorRef]): ConsistentHashingRouter =
new ConsistentHashingRouter(routees = routees map (_.path.toString))
/**
* Java API to create router with the supplied 'routees' actors.
*/
@deprecated("Use ConsistentHashingGroup", "2.3")
def create(routees: java.lang.Iterable[ActorRef]): ConsistentHashingRouter = apply(immutableSeq(routees))
}
object ConsistentHashingRoutingLogic {
/**
* Address to use for the selfAddress parameter
*/
def defaultAddress(system: ActorSystem): Address =
system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress
}
/**
* Uses consistent hashing to select a routee based on the sent message.
*
* There is 3 ways to define what data to use for the consistent hash key.
*
* 1. You can define `hashMapping` / `withHashMapper`
* of the router to map incoming messages to their consistent hash key.
* This makes the decision transparent for the sender.
*
* 2. The messages may implement [[akka.routing.ConsistentHashingRouter.ConsistentHashable]].
* The key is part of the message and it's convenient to define it together
* with the message definition.
*
* 3. The messages can be be wrapped in a [[akka.routing.ConsistentHashingRouter.ConsistentHashableEnvelope]]
* to define what data to use for the consistent hash key. The sender knows
* the key to use.
*
* These ways to define the consistent hash key can be use together and at
* the same time for one router. The `hashMapping` is tried first.
*
* @param virtualNodesFactor number of virtual nodes per node, used in [[akka.routing.ConsistentHash]]
*
* @param hashMapping partial function from message to the data to
* use for the consistent hash key
*
* @param system the actor system hosting this router
*
*/
@SerialVersionUID(1L)
final case class ConsistentHashingRoutingLogic(
system: ActorSystem,
virtualNodesFactor: Int = 0,
hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping)
extends RoutingLogic {
import ConsistentHashingRouter._
/**
* Java API
* @param system the actor system hosting this router
*/
def this(system: ActorSystem) =
this(system, virtualNodesFactor = 0, hashMapping = ConsistentHashingRouter.emptyConsistentHashMapping)
private val selfAddress = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress
val vnodes =
if (virtualNodesFactor == 0) system.settings.DefaultVirtualNodesFactor
else virtualNodesFactor
private lazy val log = Logging(system, getClass)
/**
* Setting the number of virtual nodes per node, used in [[akka.routing.ConsistentHash]]
*/
def withVirtualNodesFactor(vnodes: Int): ConsistentHashingRoutingLogic = copy(virtualNodesFactor = vnodes)
/**
* Java API: Setting the mapping from message to the data to use for the consistent hash key.
*/
def withHashMapper(mapper: ConsistentHashingRouter.ConsistentHashMapper): ConsistentHashingRoutingLogic =
copy(hashMapping = ConsistentHashingRouter.hashMappingAdapter(mapper))
// tuple of routees and the ConsistentHash, updated together in updateConsistentHash
private val consistentHashRef = new AtomicReference[(immutable.IndexedSeq[Routee], ConsistentHash[ConsistentRoutee])]((null, null))
override def select(message: Any, routees: immutable.IndexedSeq[Routee]): Routee =
if (routees.isEmpty) NoRoutee
else {
// update consistentHash when routees has changed
// changes to routees are rare and when no changes this is a quick operation
def updateConsistentHash(): ConsistentHash[ConsistentRoutee] = {
val oldConsistentHashTuple = consistentHashRef.get
val (oldRoutees, oldConsistentHash) = oldConsistentHashTuple
if (routees ne oldRoutees) {
// when other instance, same content, no need to re-hash, but try to set routees
val consistentHash =
if (routees == oldRoutees) oldConsistentHash
else ConsistentHash(routees.map(ConsistentRoutee(_, selfAddress)), vnodes) // re-hash
// ignore, don't update, in case of CAS failure
consistentHashRef.compareAndSet(oldConsistentHashTuple, (routees, consistentHash))
consistentHash
} else oldConsistentHash
}
def target(hashData: Any): Routee = try {
val currentConsistenHash = updateConsistentHash()
if (currentConsistenHash.isEmpty) NoRoutee
else hashData match {
case bytes: Array[Byte] ⇒ currentConsistenHash.nodeFor(bytes).routee
case str: String ⇒ currentConsistenHash.nodeFor(str).routee
case x: AnyRef ⇒ currentConsistenHash.nodeFor(SerializationExtension(system).serialize(x).get).routee
}
} catch {
case NonFatal(e) ⇒
// serialization failed
log.warning("Couldn't route message with consistent hash key [{}] due to [{}]", hashData, e.getMessage)
NoRoutee
}
message match {
case _ if hashMapping.isDefinedAt(message) ⇒ target(hashMapping(message))
case hashable: ConsistentHashable ⇒ target(hashable.consistentHashKey)
case other ⇒
log.warning("Message [{}] must be handled by hashMapping, or implement [{}] or be wrapped in [{}]",
message.getClass.getName, classOf[ConsistentHashable].getName,
classOf[ConsistentHashableEnvelope].getName)
NoRoutee
}
}
}
/**
* A router pool that uses consistent hashing to select a routee based on the
* sent message. The selection is described in [[akka.routing.ConsistentHashingRoutingLogic]].
*
* The configuration parameter trumps the constructor arguments. This means that
* if you provide `nrOfInstances` during instantiation they will be ignored if
* the router is defined in the configuration file for the actor being used.
*
* <h1>Supervision Setup</h1>
*
* Any routees that are created by a router will be created as the router's children.
* The router is therefore also the children's supervisor.
*
* The supervision strategy of the router actor can be configured with
* [[#withSupervisorStrategy]]. If no strategy is provided, routers default to
* a strategy of “always escalate”. This means that errors are passed up to the
* router's supervisor for handling.
*
* The router's supervisor will treat the error as an error with the router itself.
* Therefore a directive to stop or restart will cause the router itself to stop or
* restart. The router, in turn, will cause its children to stop and restart.
*
* @param nrOfInstances initial number of routees in the pool
*
* @param resizer optional resizer that dynamically adjust the pool size
*
* @param virtualNodesFactor number of virtual nodes per node, used in [[akka.routing.ConsistentHash]]
*
* @param hashMapping partial function from message to the data to
* use for the consistent hash key
*
* @param supervisorStrategy strategy for supervising the routees, see 'Supervision Setup'
*
* @param routerDispatcher dispatcher to use for the router head actor, which handles
* supervision, death watch and router management messages
*/
@SerialVersionUID(1L)
final case class ConsistentHashingPool(
override val nrOfInstances: Int, override val resizer: Option[Resizer] = None,
val virtualNodesFactor: Int = 0,
val hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping,
override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
override val usePoolDispatcher: Boolean = false)
extends Pool with PoolOverrideUnsetConfig[ConsistentHashingPool] {
def this(config: Config) =
this(
nrOfInstances = config.getInt("nr-of-instances"),
resizer = DefaultResizer.fromConfig(config),
usePoolDispatcher = config.hasPath("pool-dispatcher"))
/**
* Java API
* @param nr initial number of routees in the pool
*/
def this(nr: Int) = this(nrOfInstances = nr)
override def createRouter(system: ActorSystem): Router =
new Router(ConsistentHashingRoutingLogic(system, virtualNodesFactor, hashMapping))
/**
* Setting the supervisor strategy to be used for the “head” Router actor.
*/
def withSupervisorStrategy(strategy: SupervisorStrategy): ConsistentHashingPool = copy(supervisorStrategy = strategy)
/**
* Setting the resizer to be used.
*/
def withResizer(resizer: Resizer): ConsistentHashingPool = copy(resizer = Some(resizer))
/**
* Setting the dispatcher to be used for the router head actor, which handles
* supervision, death watch and router management messages.
*/
def withDispatcher(dispatcherId: String): ConsistentHashingPool = copy(routerDispatcher = dispatcherId)
/**
* Setting the number of virtual nodes per node, used in [[akka.routing.ConsistentHash]]
*/
def withVirtualNodesFactor(vnodes: Int): ConsistentHashingPool = copy(virtualNodesFactor = vnodes)
/**
* Java API: Setting the mapping from message to the data to use for the consistent hash key.
*/
def withHashMapper(mapper: ConsistentHashingRouter.ConsistentHashMapper): ConsistentHashingPool =
copy(hashMapping = ConsistentHashingRouter.hashMappingAdapter(mapper))
/**
* Uses the resizer and/or the supervisor strategy of the given Routerconfig
* if this RouterConfig doesn't have one, i.e. the resizer defined in code is used if
* resizer was not defined in config.
* Uses the the `hashMapping` defined in code, since that can't be defined in configuration.
*/
override def withFallback(other: RouterConfig): RouterConfig = other match {
case _: FromConfig | _: NoRouter ⇒ this.overrideUnsetConfig(other)
case otherRouter: ConsistentHashingPool ⇒ (copy(hashMapping = otherRouter.hashMapping)).overrideUnsetConfig(other)
case otherRouter: ConsistentHashingRouter ⇒ (copy(hashMapping = otherRouter.hashMapping)).overrideUnsetConfig(other)
case _ ⇒ throw new IllegalArgumentException("Expected ConsistentHashingPool, got [%s]".format(other))
}
}
/**
* A router group that uses consistent hashing to select a routee based on the
* sent message. The selection is described in [[akka.routing.ConsistentHashingRoutingLogic]].
*
* The configuration parameter trumps the constructor arguments. This means that
* if you provide `paths` during instantiation they will be ignored if
* the router is defined in the configuration file for the actor being used.
*
* @param paths string representation of the actor paths of the routees, messages are
* sent with [[akka.actor.ActorSelection]] to these paths
*
* @param virtualNodesFactor number of virtual nodes per node, used in [[akka.routing.ConsistentHash]]
*
* @param hashMapping partial function from message to the data to
* use for the consistent hash key
*
* @param routerDispatcher dispatcher to use for the router head actor, which handles
* router management messages
*/
@SerialVersionUID(1L)
final case class ConsistentHashingGroup(
override val paths: immutable.Iterable[String],
val virtualNodesFactor: Int = 0,
val hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping,
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
extends Group {
def this(config: Config) =
this(paths = immutableSeq(config.getStringList("routees.paths")))
/**
* Java API
* @param routeePaths string representation of the actor paths of the routees, messages are
* sent with [[akka.actor.ActorSelection]] to these paths
*/
def this(routeePaths: java.lang.Iterable[String]) = this(paths = immutableSeq(routeePaths))
override def createRouter(system: ActorSystem): Router =
new Router(ConsistentHashingRoutingLogic(system, virtualNodesFactor, hashMapping))
/**
* Setting the dispatcher to be used for the router head actor, which handles
* router management messages
*/
def withDispatcher(dispatcherId: String): ConsistentHashingGroup = copy(routerDispatcher = dispatcherId)
/**
* Setting the number of virtual nodes per node, used in [[akka.routing.ConsistentHash]]
*/
def withVirtualNodesFactor(vnodes: Int): ConsistentHashingGroup = copy(virtualNodesFactor = vnodes)
/**
* Java API: Setting the mapping from message to the data to use for the consistent hash key.
*/
def withHashMapper(mapper: ConsistentHashingRouter.ConsistentHashMapper): ConsistentHashingGroup =
copy(hashMapping = ConsistentHashingRouter.hashMappingAdapter(mapper))
/**
* Uses the the `hashMapping` defined in code, since that can't be defined in configuration.
*/
override def withFallback(other: RouterConfig): RouterConfig = other match {
case _: FromConfig | _: NoRouter ⇒ super.withFallback(other)
case otherRouter: ConsistentHashingGroup ⇒ copy(hashMapping = otherRouter.hashMapping)
case _ ⇒ throw new IllegalArgumentException("Expected ConsistentHashingGroup, got [%s]".format(other))
}
}
/**
* INTERNAL API
* Important to use ActorRef with full address, with host and port, in the hash ring,
* so that same ring is produced on different nodes.
* The ConsistentHash uses toString of the ring nodes, and the ActorRef itself
* isn't a good representation, because LocalActorRef doesn't include the
* host and port.
*/
private[akka] case class ConsistentRoutee(routee: Routee, selfAddress: Address) {
override def toString: String = routee match {
case ActorRefRoutee(ref) ⇒ toStringWithfullAddress(ref.path)
case ActorSelectionRoutee(sel) ⇒ toStringWithfullAddress(sel.anchorPath) + sel.pathString
case other ⇒ other.toString
}
private def toStringWithfullAddress(path: ActorPath): String = {
path.address match {
case Address(_, _, None, None) ⇒ path.toStringWithAddress(selfAddress)
case a ⇒ path.toString
}
}
}
/**
* A Router that uses consistent hashing to select a connection based on the
* sent message.
*
* There is 3 ways to define what data to use for the consistent hash key.
*
* 1. You can define `hashMapping` / `withHashMapper`
* of the router to map incoming messages to their consistent hash key.
* This makes the decision transparent for the sender.
*
* 2. The messages may implement [[akka.routing.ConsistentHashingRouter.ConsistentHashable]].
* The key is part of the message and it's convenient to define it together
* with the message definition.
*
* 3. The messages can be be wrapped in a [[akka.routing.ConsistentHashingRouter.ConsistentHashableEnvelope]]
* to define what data to use for the consistent hash key. The sender knows
* the key to use.
*
* These ways to define the consistent hash key can be use together and at
* the same time for one router. The `hashMapping` is tried first.
*
* Please note that providing both 'nrOfInstances' and 'routees' does not make logical
* sense as this means that the router should both create new actors and use the 'routees'
* actor(s). In this case the 'nrOfInstances' will be ignored and the 'routees' will be used.
* <br>
* <b>The</b> configuration parameter trumps the constructor arguments. This means that
* if you provide either 'nrOfInstances' or 'routees' during instantiation they will
* be ignored if the router is defined in the configuration file for the actor being used.
*
* <h1>Supervision Setup</h1>
*
* Any routees that are created by a router will be created as the router's children.
* The router is therefore also the children's supervisor.
*
* The supervision strategy of the router actor can be configured with
* [[#withSupervisorStrategy]]. If no strategy is provided, routers default to
* a strategy of “always escalate”. This means that errors are passed up to the
* router's supervisor for handling.
*
* The router's supervisor will treat the error as an error with the router itself.
* Therefore a directive to stop or restart will cause the router itself to stop or
* restart. The router, in turn, will cause its children to stop and restart.
*
* @param routees string representation of the actor paths of the routees that will be looked up
* using `actorFor` in [[akka.actor.ActorRefProvider]]
* @param virtualNodesFactor number of virtual nodes per node, used in [[akka.routing.ConsistentHash]]
* @param hashMapping partial function from message to the data to
* use for the consistent hash key
*/
@SerialVersionUID(1L)
@deprecated("Use ConsistentHashingPool or ConsistentHashingGroup", "2.3")
case class ConsistentHashingRouter(
nrOfInstances: Int = 0, routees: immutable.Iterable[String] = Nil, override val resizer: Option[Resizer] = None,
val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
val virtualNodesFactor: Int = 0,
val hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping)
extends DeprecatedRouterConfig with PoolOverrideUnsetConfig[ConsistentHashingRouter] {
/**
* Java API: Constructor that sets nrOfInstances to be created.
*/
def this(nr: Int) = this(nrOfInstances = nr)
/**
* Java API: Constructor that sets the routees to be used.
*
* @param routeePaths string representation of the actor paths of the routees that will be looked up
* using `actorFor` in [[akka.actor.ActorRefProvider]]
*/
def this(routeePaths: java.lang.Iterable[String]) = this(routees = immutableSeq(routeePaths))
/**
* Java API: Constructor that sets the resizer to be used.
*/
def this(resizer: Resizer) = this(resizer = Some(resizer))
override def paths: immutable.Iterable[String] = routees
/**
* Java API for setting routerDispatcher
*/
def withDispatcher(dispatcherId: String): ConsistentHashingRouter = copy(routerDispatcher = dispatcherId)
/**
* Java API for setting the supervisor strategy to be used for the “head”
* Router actor.
*/
def withSupervisorStrategy(strategy: SupervisorStrategy): ConsistentHashingRouter = copy(supervisorStrategy = strategy)
/**
* Java API for setting the resizer to be used.
*/
def withResizer(resizer: Resizer): ConsistentHashingRouter = copy(resizer = Some(resizer))
/**
* Java API for setting the number of virtual nodes per node, used in [[akka.routing.ConsistentHash]]
*/
def withVirtualNodesFactor(vnodes: Int): ConsistentHashingRouter = copy(virtualNodesFactor = vnodes)
/**
* Java API for setting the mapping from message to the data to use for the consistent hash key.
*/
def withHashMapper(mapping: ConsistentHashingRouter.ConsistentHashMapper) =
copy(hashMapping = ConsistentHashingRouter.hashMappingAdapter(mapping))
/**
* Uses the resizer and/or the supervisor strategy of the given Routerconfig
* if this RouterConfig doesn't have one, i.e. the resizer defined in code is used if
* resizer was not defined in config.
* Uses the the `hashMapping` defined in code, since that can't be defined in configuration.
*/
override def withFallback(other: RouterConfig): RouterConfig = other match {
case _: FromConfig | _: NoRouter ⇒ this.overrideUnsetConfig(other)
case otherRouter: ConsistentHashingRouter ⇒ (copy(hashMapping = otherRouter.hashMapping)).overrideUnsetConfig(other)
case _ ⇒ throw new IllegalArgumentException("Expected ConsistentHashingRouter, got [%s]".format(other))
}
override def createRouter(system: ActorSystem): Router =
new Router(ConsistentHashingRoutingLogic(system, virtualNodesFactor, hashMapping))
}
/**
* INTERNAL API
* Important to use ActorRef with full address, with host and port, in the hash ring,
* so that same ring is produced on different nodes.
* The ConsistentHash uses toString of the ring nodes, and the ActorRef itself
* isn't a good representation, because LocalActorRef doesn't include the
* host and port.
*/
@deprecated("Replaced by ConsistentRoutee", "2.3")
private[akka] case class ConsistentActorRef(actorRef: ActorRef, selfAddress: Address) {
override def toString: String = {
actorRef.path.address match {
case Address(_, _, None, None) ⇒ actorRef.path.toStringWithAddress(selfAddress)
case a ⇒ actorRef.path.toString
}
}
} | Fincore/org.spark-project.akka | actor/src/main/scala/akka/routing/ConsistentHashing.scala | Scala | mit | 24,344 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package collection
import java.lang.Math.{max, min}
import java.util.Arrays
import scala.Predef.{ // unimport all array-related implicit conversions to avoid triggering them accidentally
genericArrayOps => _,
booleanArrayOps => _,
byteArrayOps => _,
charArrayOps => _,
doubleArrayOps => _,
floatArrayOps => _,
intArrayOps => _,
longArrayOps => _,
refArrayOps => _,
shortArrayOps => _,
unitArrayOps => _,
genericWrapArray => _,
wrapRefArray => _,
wrapIntArray => _,
wrapDoubleArray => _,
wrapLongArray => _,
wrapFloatArray => _,
wrapCharArray => _,
wrapByteArray => _,
wrapShortArray => _,
wrapBooleanArray => _,
wrapUnitArray => _,
wrapString => _,
copyArrayToImmutableIndexedSeq => _,
_
}
import scala.collection.Stepper.EfficientSplit
import scala.collection.immutable.Range
import scala.collection.mutable.ArrayBuilder
import scala.math.Ordering
import scala.reflect.ClassTag
import scala.util.Sorting
object ArrayOps {
@SerialVersionUID(3L)
private class ArrayView[A](xs: Array[A]) extends AbstractIndexedSeqView[A] {
def length = xs.length
def apply(n: Int) = xs(n)
override def toString: String = immutable.ArraySeq.unsafeWrapArray(xs).mkString("ArrayView(", ", ", ")")
}
/** A lazy filtered array. No filtering is applied until one of `foreach`, `map` or `flatMap` is called. */
class WithFilter[A](p: A => Boolean, xs: Array[A]) {
/** Apply `f` to each element for its side effects.
* Note: [U] parameter needed to help scalac's type inference.
*/
def foreach[U](f: A => U): Unit = {
val len = xs.length
var i = 0
while(i < len) {
val x = xs(i)
if(p(x)) f(x)
i += 1
}
}
/** Builds a new array by applying a function to all elements of this array.
*
* @param f the function to apply to each element.
* @tparam B the element type of the returned array.
* @return a new array resulting from applying the given function
* `f` to each element of this array and collecting the results.
*/
def map[B: ClassTag](f: A => B): Array[B] = {
val b = ArrayBuilder.make[B]
var i = 0
while (i < xs.length) {
val x = xs(i)
if(p(x)) b += f(x)
i = i + 1
}
b.result()
}
/** Builds a new array by applying a function to all elements of this array
* and using the elements of the resulting collections.
*
* @param f the function to apply to each element.
* @tparam B the element type of the returned array.
* @return a new array resulting from applying the given collection-valued function
* `f` to each element of this array and concatenating the results.
*/
def flatMap[B: ClassTag](f: A => IterableOnce[B]): Array[B] = {
val b = ArrayBuilder.make[B]
var i = 0
while(i < xs.length) {
val x = xs(i)
if(p(x)) b ++= f(xs(i))
i += 1
}
b.result()
}
def flatMap[BS, B](f: A => BS)(implicit asIterable: BS => Iterable[B], m: ClassTag[B]): Array[B] =
flatMap[B](x => asIterable(f(x)))
/** Creates a new non-strict filter which combines this filter with the given predicate. */
def withFilter(q: A => Boolean): WithFilter[A] = new WithFilter[A](a => p(a) && q(a), xs)
}
@SerialVersionUID(3L)
private[collection] final class ArrayIterator[@specialized(Specializable.Everything) A](xs: Array[A]) extends AbstractIterator[A] with Serializable {
private[this] var pos = 0
private[this] val len = xs.length
override def knownSize: Int = len - pos
def hasNext: Boolean = pos < len
def next(): A = try {
val r = xs(pos)
pos += 1
r
} catch { case _: ArrayIndexOutOfBoundsException => Iterator.empty.next() }
override def drop(n: Int): Iterator[A] = {
if (n > 0) {
val newPos = pos + n
pos =
if (newPos < 0 /* overflow */) len
else Math.min(len, newPos)
}
this
}
}
@SerialVersionUID(3L)
private final class ReverseIterator[@specialized(Specializable.Everything) A](xs: Array[A]) extends AbstractIterator[A] with Serializable {
private[this] var pos = xs.length-1
def hasNext: Boolean = pos >= 0
def next(): A = try {
val r = xs(pos)
pos -= 1
r
} catch { case _: ArrayIndexOutOfBoundsException => Iterator.empty.next() }
override def drop(n: Int): Iterator[A] = {
if (n > 0) pos = Math.max( -1, pos - n)
this
}
}
@SerialVersionUID(3L)
private final class GroupedIterator[A](xs: Array[A], groupSize: Int) extends AbstractIterator[Array[A]] with Serializable {
private[this] var pos = 0
def hasNext: Boolean = pos < xs.length
def next(): Array[A] = {
if(pos >= xs.length) throw new NoSuchElementException
val r = new ArrayOps(xs).slice(pos, pos+groupSize)
pos += groupSize
r
}
}
/** The cut-off point for the array size after which we switch from `Sorting.stableSort` to
* an implementation that copies the data to a boxed representation for use with `Arrays.sort`.
*/
private final val MaxStableSortLength = 300
}
/** This class serves as a wrapper for `Array`s with many of the operations found in
* indexed sequences. Where needed, instances of arrays are implicitly converted
* into this class. There is generally no reason to create an instance explicitly or use
* an `ArrayOps` type. It is better to work with plain `Array` types instead and rely on
* the implicit conversion to `ArrayOps` when calling a method (which does not actually
* allocate an instance of `ArrayOps` because it is a value class).
*
* Neither `Array` nor `ArrayOps` are proper collection types
* (i.e. they do not extend `Iterable` or even `IterableOnce`). `mutable.ArraySeq` and
* `immutable.ArraySeq` serve this purpose.
*
* The difference between this class and `ArraySeq`s is that calling transformer methods such as
* `filter` and `map` will yield an array, whereas an `ArraySeq` will remain an `ArraySeq`.
*
* @tparam A type of the elements contained in this array.
*/
final class ArrayOps[A](private val xs: Array[A]) extends AnyVal {
@`inline` private[this] implicit def elemTag: ClassTag[A] = ClassTag(xs.getClass.getComponentType)
/** The size of this array.
*
* @return the number of elements in this array.
*/
@`inline` def size: Int = xs.length
/** The size of this array.
*
* @return the number of elements in this array.
*/
@`inline` def knownSize: Int = xs.length
/** Tests whether the array is empty.
*
* @return `true` if the array contains no elements, `false` otherwise.
*/
@`inline` def isEmpty: Boolean = xs.length == 0
/** Tests whether the array is not empty.
*
* @return `true` if the array contains at least one element, `false` otherwise.
*/
@`inline` def nonEmpty: Boolean = xs.length != 0
/** Selects the first element of this array.
*
* @return the first element of this array.
* @throws NoSuchElementException if the array is empty.
*/
def head: A = try xs.apply(0) catch { case _: ArrayIndexOutOfBoundsException => throw new NoSuchElementException("head of empty array") }
/** Selects the last element.
*
* @return The last element of this array.
* @throws NoSuchElementException If the array is empty.
*/
def last: A = try xs.apply(xs.length-1) catch { case _: ArrayIndexOutOfBoundsException => throw new NoSuchElementException("last of empty array") }
/** Optionally selects the first element.
*
* @return the first element of this array if it is nonempty,
* `None` if it is empty.
*/
def headOption: Option[A] = if(isEmpty) None else Some(head)
/** Optionally selects the last element.
*
* @return the last element of this array$ if it is nonempty,
* `None` if it is empty.
*/
def lastOption: Option[A] = if(isEmpty) None else Some(last)
/** Compares the size of this array to a test value.
*
* @param otherSize the test value that gets compared with the size.
* @return A value `x` where
* {{{
* x < 0 if this.size < otherSize
* x == 0 if this.size == otherSize
* x > 0 if this.size > otherSize
* }}}
*/
def sizeCompare(otherSize: Int): Int = Integer.compare(xs.length, otherSize)
/** Compares the length of this array to a test value.
*
* @param len the test value that gets compared with the length.
* @return A value `x` where
* {{{
* x < 0 if this.length < len
* x == 0 if this.length == len
* x > 0 if this.length > len
* }}}
*/
def lengthCompare(len: Int): Int = Integer.compare(xs.length, len)
/** Method mirroring [[SeqOps.sizeIs]] for consistency, except it returns an `Int`
* because `size` is known and comparison is constant-time.
*
* These operations are equivalent to [[sizeCompare(Int) `sizeCompare(Int)`]], and
* allow the following more readable usages:
*
* {{{
* this.sizeIs < size // this.sizeCompare(size) < 0
* this.sizeIs <= size // this.sizeCompare(size) <= 0
* this.sizeIs == size // this.sizeCompare(size) == 0
* this.sizeIs != size // this.sizeCompare(size) != 0
* this.sizeIs >= size // this.sizeCompare(size) >= 0
* this.sizeIs > size // this.sizeCompare(size) > 0
* }}}
*/
def sizeIs: Int = xs.length
/** Method mirroring [[SeqOps.lengthIs]] for consistency, except it returns an `Int`
* because `length` is known and comparison is constant-time.
*
* These operations are equivalent to [[lengthCompare(Int) `lengthCompare(Int)`]], and
* allow the following more readable usages:
*
* {{{
* this.lengthIs < len // this.lengthCompare(len) < 0
* this.lengthIs <= len // this.lengthCompare(len) <= 0
* this.lengthIs == len // this.lengthCompare(len) == 0
* this.lengthIs != len // this.lengthCompare(len) != 0
* this.lengthIs >= len // this.lengthCompare(len) >= 0
* this.lengthIs > len // this.lengthCompare(len) > 0
* }}}
*/
def lengthIs: Int = xs.length
/** Selects an interval of elements. The returned array is made up
* of all elements `x` which satisfy the invariant:
* {{{
* from <= indexOf(x) < until
* }}}
*
* @param from the lowest index to include from this array.
* @param until the lowest index to EXCLUDE from this array.
* @return an array containing the elements greater than or equal to
* index `from` extending up to (but not including) index `until`
* of this array.
*/
def slice(from: Int, until: Int): Array[A] = {
import java.util.Arrays.copyOfRange
val lo = max(from, 0)
val hi = min(until, xs.length)
if (hi > lo) {
(((xs: Array[_]): @unchecked) match {
case x: Array[AnyRef] => copyOfRange(x, lo, hi)
case x: Array[Int] => copyOfRange(x, lo, hi)
case x: Array[Double] => copyOfRange(x, lo, hi)
case x: Array[Long] => copyOfRange(x, lo, hi)
case x: Array[Float] => copyOfRange(x, lo, hi)
case x: Array[Char] => copyOfRange(x, lo, hi)
case x: Array[Byte] => copyOfRange(x, lo, hi)
case x: Array[Short] => copyOfRange(x, lo, hi)
case x: Array[Boolean] => copyOfRange(x, lo, hi)
}).asInstanceOf[Array[A]]
} else new Array[A](0)
}
/** The rest of the array without its first element. */
def tail: Array[A] =
if(xs.length == 0) throw new UnsupportedOperationException("tail of empty array") else slice(1, xs.length)
/** The initial part of the array without its last element. */
def init: Array[A] =
if(xs.length == 0) throw new UnsupportedOperationException("init of empty array") else slice(0, xs.length-1)
/** Iterates over the tails of this array. The first value will be this
* array and the final one will be an empty array, with the intervening
* values the results of successive applications of `tail`.
*
* @return an iterator over all the tails of this array
*/
def tails: Iterator[Array[A]] = iterateUntilEmpty(xs => new ArrayOps(xs).tail)
/** Iterates over the inits of this array. The first value will be this
* array and the final one will be an empty array, with the intervening
* values the results of successive applications of `init`.
*
* @return an iterator over all the inits of this array
*/
def inits: Iterator[Array[A]] = iterateUntilEmpty(xs => new ArrayOps(xs).init)
// A helper for tails and inits.
private[this] def iterateUntilEmpty(f: Array[A] => Array[A]): Iterator[Array[A]] =
Iterator.iterate(xs)(f).takeWhile(x => x.length != 0) ++ Iterator.single(Array.empty[A])
/** An array containing the first `n` elements of this array. */
def take(n: Int): Array[A] = slice(0, n)
/** The rest of the array without its `n` first elements. */
def drop(n: Int): Array[A] = slice(n, xs.length)
/** An array containing the last `n` elements of this array. */
def takeRight(n: Int): Array[A] = drop(xs.length - max(n, 0))
/** The rest of the array without its `n` last elements. */
def dropRight(n: Int): Array[A] = take(xs.length - max(n, 0))
/** Takes longest prefix of elements that satisfy a predicate.
*
* @param p The predicate used to test elements.
* @return the longest prefix of this array whose elements all satisfy
* the predicate `p`.
*/
def takeWhile(p: A => Boolean): Array[A] = {
val i = indexWhere(x => !p(x))
val hi = if(i < 0) xs.length else i
slice(0, hi)
}
/** Drops longest prefix of elements that satisfy a predicate.
*
* @param p The predicate used to test elements.
* @return the longest suffix of this array whose first element
* does not satisfy the predicate `p`.
*/
def dropWhile(p: A => Boolean): Array[A] = {
val i = indexWhere(x => !p(x))
val lo = if(i < 0) xs.length else i
slice(lo, xs.length)
}
def iterator: Iterator[A] =
((xs: Any @unchecked) match {
case xs: Array[AnyRef] => new ArrayOps.ArrayIterator(xs)
case xs: Array[Int] => new ArrayOps.ArrayIterator(xs)
case xs: Array[Double] => new ArrayOps.ArrayIterator(xs)
case xs: Array[Long] => new ArrayOps.ArrayIterator(xs)
case xs: Array[Float] => new ArrayOps.ArrayIterator(xs)
case xs: Array[Char] => new ArrayOps.ArrayIterator(xs)
case xs: Array[Byte] => new ArrayOps.ArrayIterator(xs)
case xs: Array[Short] => new ArrayOps.ArrayIterator(xs)
case xs: Array[Boolean] => new ArrayOps.ArrayIterator(xs)
case xs: Array[Unit] => new ArrayOps.ArrayIterator(xs)
case null => throw new NullPointerException
}).asInstanceOf[Iterator[A]]
def stepper[S <: Stepper[_]](implicit shape: StepperShape[A, S]): S with EfficientSplit = {
import convert.impl._
val s = (shape.shape: @unchecked) match {
case StepperShape.ReferenceShape => (xs: Any) match {
case bs: Array[Boolean] => new BoxedBooleanArrayStepper(bs, 0, xs.length)
case _ => new ObjectArrayStepper[AnyRef](xs.asInstanceOf[Array[AnyRef ]], 0, xs.length)
}
case StepperShape.IntShape => new IntArrayStepper (xs.asInstanceOf[Array[Int ]], 0, xs.length)
case StepperShape.LongShape => new LongArrayStepper (xs.asInstanceOf[Array[Long ]], 0, xs.length)
case StepperShape.DoubleShape => new DoubleArrayStepper (xs.asInstanceOf[Array[Double ]], 0, xs.length)
case StepperShape.ByteShape => new WidenedByteArrayStepper (xs.asInstanceOf[Array[Byte ]], 0, xs.length)
case StepperShape.ShortShape => new WidenedShortArrayStepper (xs.asInstanceOf[Array[Short ]], 0, xs.length)
case StepperShape.CharShape => new WidenedCharArrayStepper (xs.asInstanceOf[Array[Char ]], 0, xs.length)
case StepperShape.FloatShape => new WidenedFloatArrayStepper (xs.asInstanceOf[Array[Float ]], 0, xs.length)
}
s.asInstanceOf[S with EfficientSplit]
}
/** Partitions elements in fixed size arrays.
* @see [[scala.collection.Iterator]], method `grouped`
*
* @param size the number of elements per group
* @return An iterator producing arrays of size `size`, except the
* last will be less than size `size` if the elements don't divide evenly.
*/
def grouped(size: Int): Iterator[Array[A]] = new ArrayOps.GroupedIterator[A](xs, size)
/** Splits this array into a prefix/suffix pair according to a predicate.
*
* Note: `c span p` is equivalent to (but more efficient than)
* `(c takeWhile p, c dropWhile p)`, provided the evaluation of the
* predicate `p` does not cause any side-effects.
*
* @param p the test predicate
* @return a pair consisting of the longest prefix of this array whose
* elements all satisfy `p`, and the rest of this array.
*/
def span(p: A => Boolean): (Array[A], Array[A]) = {
val i = indexWhere(x => !p(x))
val idx = if(i < 0) xs.length else i
(slice(0, idx), slice(idx, xs.length))
}
/** Splits this array into two at a given position.
* Note: `c splitAt n` is equivalent to `(c take n, c drop n)`.
*
* @param n the position at which to split.
* @return a pair of arrays consisting of the first `n`
* elements of this array, and the other elements.
*/
def splitAt(n: Int): (Array[A], Array[A]) = (take(n), drop(n))
/** A pair of, first, all elements that satisfy predicate `p` and, second, all elements that do not. */
def partition(p: A => Boolean): (Array[A], Array[A]) = {
val res1, res2 = ArrayBuilder.make[A]
var i = 0
while(i < xs.length) {
val x = xs(i)
(if(p(x)) res1 else res2) += x
i += 1
}
(res1.result(), res2.result())
}
/** Applies a function `f` to each element of the array and returns a pair of arrays: the first one
* made of those values returned by `f` that were wrapped in [[scala.util.Left]], and the second
* one made of those wrapped in [[scala.util.Right]].
*
* Example:
* {{{
* val xs = Array(1, "one", 2, "two", 3, "three") partitionMap {
* case i: Int => Left(i)
* case s: String => Right(s)
* }
* // xs == (Array(1, 2, 3),
* // Array(one, two, three))
* }}}
*
* @tparam A1 the element type of the first resulting collection
* @tparam A2 the element type of the second resulting collection
* @param f the 'split function' mapping the elements of this array to an [[scala.util.Either]]
*
* @return a pair of arrays: the first one made of those values returned by `f` that were wrapped in [[scala.util.Left]],
* and the second one made of those wrapped in [[scala.util.Right]]. */
def partitionMap[A1: ClassTag, A2: ClassTag](f: A => Either[A1, A2]): (Array[A1], Array[A2]) = {
val res1 = ArrayBuilder.make[A1]
val res2 = ArrayBuilder.make[A2]
var i = 0
while(i < xs.length) {
f(xs(i)) match {
case Left(x) => res1 += x
case Right(x) => res2 += x
}
i += 1
}
(res1.result(), res2.result())
}
/** Returns a new array with the elements in reversed order. */
@inline def reverse: Array[A] = {
val len = xs.length
val res = new Array[A](len)
var i = 0
while(i < len) {
res(len-i-1) = xs(i)
i += 1
}
res
}
/** An iterator yielding elements in reversed order.
*
* Note: `xs.reverseIterator` is the same as `xs.reverse.iterator` but implemented more efficiently.
*
* @return an iterator yielding the elements of this array in reversed order
*/
def reverseIterator: Iterator[A] =
((xs: Any @unchecked) match {
case xs: Array[AnyRef] => new ArrayOps.ReverseIterator(xs)
case xs: Array[Int] => new ArrayOps.ReverseIterator(xs)
case xs: Array[Double] => new ArrayOps.ReverseIterator(xs)
case xs: Array[Long] => new ArrayOps.ReverseIterator(xs)
case xs: Array[Float] => new ArrayOps.ReverseIterator(xs)
case xs: Array[Char] => new ArrayOps.ReverseIterator(xs)
case xs: Array[Byte] => new ArrayOps.ReverseIterator(xs)
case xs: Array[Short] => new ArrayOps.ReverseIterator(xs)
case xs: Array[Boolean] => new ArrayOps.ReverseIterator(xs)
case xs: Array[Unit] => new ArrayOps.ReverseIterator(xs)
case null => throw new NullPointerException
}).asInstanceOf[Iterator[A]]
/** Selects all elements of this array which satisfy a predicate.
*
* @param p the predicate used to test elements.
* @return a new array consisting of all elements of this array that satisfy the given predicate `p`.
*/
def filter(p: A => Boolean): Array[A] = {
val res = ArrayBuilder.make[A]
var i = 0
while(i < xs.length) {
val x = xs(i)
if(p(x)) res += x
i += 1
}
res.result()
}
/** Selects all elements of this array which do not satisfy a predicate.
*
* @param p the predicate used to test elements.
* @return a new array consisting of all elements of this array that do not satisfy the given predicate `p`.
*/
def filterNot(p: A => Boolean): Array[A] = filter(x => !p(x))
/** Sorts this array according to an Ordering.
*
* The sort is stable. That is, elements that are equal (as determined by
* `lt`) appear in the same order in the sorted sequence as in the original.
*
* @see [[scala.math.Ordering]]
*
* @param ord the ordering to be used to compare elements.
* @return an array consisting of the elements of this array
* sorted according to the ordering `ord`.
*/
def sorted[B >: A](implicit ord: Ordering[B]): Array[A] = {
val len = xs.length
def boxed = if(len < ArrayOps.MaxStableSortLength) {
val a = xs.clone()
Sorting.stableSort(a)(ord.asInstanceOf[Ordering[A]])
a
} else {
val a = Array.copyAs[AnyRef](xs, len)(ClassTag.AnyRef)
Arrays.sort(a, ord.asInstanceOf[Ordering[AnyRef]])
Array.copyAs[A](a, len)
}
if(len <= 1) xs.clone()
else ((xs: Array[_]) match {
case xs: Array[AnyRef] =>
val a = Arrays.copyOf(xs, len); Arrays.sort(a, ord.asInstanceOf[Ordering[AnyRef]]); a
case xs: Array[Int] =>
if(ord eq Ordering.Int) { val a = Arrays.copyOf(xs, len); Arrays.sort(a); a }
else boxed
case xs: Array[Long] =>
if(ord eq Ordering.Long) { val a = Arrays.copyOf(xs, len); Arrays.sort(a); a }
else boxed
case xs: Array[Char] =>
if(ord eq Ordering.Char) { val a = Arrays.copyOf(xs, len); Arrays.sort(a); a }
else boxed
case xs: Array[Byte] =>
if(ord eq Ordering.Byte) { val a = Arrays.copyOf(xs, len); Arrays.sort(a); a }
else boxed
case xs: Array[Short] =>
if(ord eq Ordering.Short) { val a = Arrays.copyOf(xs, len); Arrays.sort(a); a }
else boxed
case xs: Array[Boolean] =>
if(ord eq Ordering.Boolean) { val a = Arrays.copyOf(xs, len); Sorting.stableSort(a); a }
else boxed
case xs => boxed
}).asInstanceOf[Array[A]]
}
/** Sorts this array according to a comparison function.
*
* The sort is stable. That is, elements that are equal (as determined by
* `lt`) appear in the same order in the sorted sequence as in the original.
*
* @param lt the comparison function which tests whether
* its first argument precedes its second argument in
* the desired ordering.
* @return an array consisting of the elements of this array
* sorted according to the comparison function `lt`.
*/
def sortWith(lt: (A, A) => Boolean): Array[A] = sorted(Ordering.fromLessThan(lt))
/** Sorts this array according to the Ordering which results from transforming
* an implicitly given Ordering with a transformation function.
*
* @see [[scala.math.Ordering]]
* @param f the transformation function mapping elements
* to some other domain `B`.
* @param ord the ordering assumed on domain `B`.
* @tparam B the target type of the transformation `f`, and the type where
* the ordering `ord` is defined.
* @return an array consisting of the elements of this array
* sorted according to the ordering where `x < y` if
* `ord.lt(f(x), f(y))`.
*/
def sortBy[B](f: A => B)(implicit ord: Ordering[B]): Array[A] = sorted(ord on f)
/** Creates a non-strict filter of this array.
*
* Note: the difference between `c filter p` and `c withFilter p` is that
* the former creates a new array, whereas the latter only
* restricts the domain of subsequent `map`, `flatMap`, `foreach`,
* and `withFilter` operations.
*
* @param p the predicate used to test elements.
* @return an object of class `ArrayOps.WithFilter`, which supports
* `map`, `flatMap`, `foreach`, and `withFilter` operations.
* All these operations apply to those elements of this array
* which satisfy the predicate `p`.
*/
def withFilter(p: A => Boolean): ArrayOps.WithFilter[A] = new ArrayOps.WithFilter[A](p, xs)
/** Finds index of first occurrence of some value in this array after or at some start index.
*
* @param elem the element value to search for.
* @param from the start index
* @return the index `>= from` of the first element of this array that is equal (as determined by `==`)
* to `elem`, or `-1`, if none exists.
*/
def indexOf(elem: A, from: Int = 0): Int = {
var i = from
while(i < xs.length) {
if(elem == xs(i)) return i
i += 1
}
-1
}
/** Finds index of the first element satisfying some predicate after or at some start index.
*
* @param p the predicate used to test elements.
* @param from the start index
* @return the index `>= from` of the first element of this array that satisfies the predicate `p`,
* or `-1`, if none exists.
*/
def indexWhere(@deprecatedName("f", "2.13.3") p: A => Boolean, from: Int = 0): Int = {
var i = from
while(i < xs.length) {
if(p(xs(i))) return i
i += 1
}
-1
}
/** Finds index of last occurrence of some value in this array before or at a given end index.
*
* @param elem the element value to search for.
* @param end the end index.
* @return the index `<= end` of the last element of this array that is equal (as determined by `==`)
* to `elem`, or `-1`, if none exists.
*/
def lastIndexOf(elem: A, end: Int = xs.length - 1): Int = {
var i = min(end, xs.length-1)
while(i >= 0) {
if(elem == xs(i)) return i
i -= 1
}
-1
}
/** Finds index of last element satisfying some predicate before or at given end index.
*
* @param p the predicate used to test elements.
* @return the index `<= end` of the last element of this array that satisfies the predicate `p`,
* or `-1`, if none exists.
*/
def lastIndexWhere(p: A => Boolean, end: Int = xs.length - 1): Int = {
var i = min(end, xs.length-1)
while(i >= 0) {
if(p(xs(i))) return i
i -= 1
}
-1
}
/** Finds the first element of the array satisfying a predicate, if any.
*
* @param p the predicate used to test elements.
* @return an option value containing the first element in the array
* that satisfies `p`, or `None` if none exists.
*/
def find(@deprecatedName("f", "2.13.3") p: A => Boolean): Option[A] = {
val idx = indexWhere(p)
if(idx == -1) None else Some(xs(idx))
}
/** Tests whether a predicate holds for at least one element of this array.
*
* @param p the predicate used to test elements.
* @return `true` if the given predicate `p` is satisfied by at least one element of this array, otherwise `false`
*/
def exists(@deprecatedName("f", "2.13.3") p: A => Boolean): Boolean = indexWhere(p) >= 0
/** Tests whether a predicate holds for all elements of this array.
*
* @param p the predicate used to test elements.
* @return `true` if this array is empty or the given predicate `p`
* holds for all elements of this array, otherwise `false`.
*/
def forall(@deprecatedName("f", "2.13.3") p: A => Boolean): Boolean = {
var i = 0
while(i < xs.length) {
if(!p(xs(i))) return false
i += 1
}
true
}
/** Applies a binary operator to a start value and all elements of this array,
* going left to right.
*
* @param z the start value.
* @param op the binary operator.
* @tparam B the result type of the binary operator.
* @return the result of inserting `op` between consecutive elements of this array,
* going left to right with the start value `z` on the left:
* {{{
* op(...op(z, x_1), x_2, ..., x_n)
* }}}
* where `x,,1,,, ..., x,,n,,` are the elements of this array.
* Returns `z` if this array is empty.
*/
def foldLeft[B](z: B)(op: (B, A) => B): B = {
def f[@specialized(Specializable.Everything) T](xs: Array[T], op: (Any, Any) => Any, z: Any): Any = {
val length = xs.length
var v: Any = z
var i = 0
while(i < length) {
v = op(v, xs(i))
i += 1
}
v
}
((xs: Any @unchecked) match {
case null => throw new NullPointerException // null-check first helps static analysis of instanceOf
case xs: Array[AnyRef] => f(xs, op.asInstanceOf[(Any, Any) => Any], z)
case xs: Array[Int] => f(xs, op.asInstanceOf[(Any, Any) => Any], z)
case xs: Array[Double] => f(xs, op.asInstanceOf[(Any, Any) => Any], z)
case xs: Array[Long] => f(xs, op.asInstanceOf[(Any, Any) => Any], z)
case xs: Array[Float] => f(xs, op.asInstanceOf[(Any, Any) => Any], z)
case xs: Array[Char] => f(xs, op.asInstanceOf[(Any, Any) => Any], z)
case xs: Array[Byte] => f(xs, op.asInstanceOf[(Any, Any) => Any], z)
case xs: Array[Short] => f(xs, op.asInstanceOf[(Any, Any) => Any], z)
case xs: Array[Boolean] => f(xs, op.asInstanceOf[(Any, Any) => Any], z)
case xs: Array[Unit] => f(xs, op.asInstanceOf[(Any, Any) => Any], z)
}).asInstanceOf[B]
}
/** Produces an array containing cumulative results of applying the binary
* operator going left to right.
*
* @param z the start value.
* @param op the binary operator.
* @tparam B the result type of the binary operator.
* @return array with intermediate values.
*
* Example:
* {{{
* Array(1, 2, 3, 4).scanLeft(0)(_ + _) == Array(0, 1, 3, 6, 10)
* }}}
*
*/
def scanLeft[ B : ClassTag ](z: B)(op: (B, A) => B): Array[B] = {
var v = z
var i = 0
val res = new Array[B](xs.length + 1)
while(i < xs.length) {
res(i) = v
v = op(v, xs(i))
i += 1
}
res(i) = v
res
}
/** Computes a prefix scan of the elements of the array.
*
* Note: The neutral element `z` may be applied more than once.
*
* @tparam B element type of the resulting array
* @param z neutral element for the operator `op`
* @param op the associative operator for the scan
*
* @return a new array containing the prefix scan of the elements in this array
*/
def scan[B >: A : ClassTag](z: B)(op: (B, B) => B): Array[B] = scanLeft(z)(op)
/** Produces an array containing cumulative results of applying the binary
* operator going right to left.
*
* @param z the start value.
* @param op the binary operator.
* @tparam B the result type of the binary operator.
* @return array with intermediate values.
*
* Example:
* {{{
* Array(4, 3, 2, 1).scanRight(0)(_ + _) == Array(10, 6, 3, 1, 0)
* }}}
*
*/
def scanRight[ B : ClassTag ](z: B)(op: (A, B) => B): Array[B] = {
var v = z
var i = xs.length - 1
val res = new Array[B](xs.length + 1)
res(xs.length) = z
while(i >= 0) {
v = op(xs(i), v)
res(i) = v
i -= 1
}
res
}
/** Applies a binary operator to all elements of this array and a start value,
* going right to left.
*
* @param z the start value.
* @param op the binary operator.
* @tparam B the result type of the binary operator.
* @return the result of inserting `op` between consecutive elements of this array,
* going right to left with the start value `z` on the right:
* {{{
* op(x_1, op(x_2, ... op(x_n, z)...))
* }}}
* where `x,,1,,, ..., x,,n,,` are the elements of this array.
* Returns `z` if this array is empty.
*/
def foldRight[B](z: B)(op: (A, B) => B): B = {
def f[@specialized(Specializable.Everything) T](xs: Array[T], op: (Any, Any) => Any, z: Any): Any = {
var v = z
var i = xs.length - 1
while(i >= 0) {
v = op(xs(i), v)
i -= 1
}
v
}
((xs: Any @unchecked) match {
case null => throw new NullPointerException
case xs: Array[AnyRef] => f(xs, op.asInstanceOf[(Any, Any) => Any], z)
case xs: Array[Int] => f(xs, op.asInstanceOf[(Any, Any) => Any], z)
case xs: Array[Double] => f(xs, op.asInstanceOf[(Any, Any) => Any], z)
case xs: Array[Long] => f(xs, op.asInstanceOf[(Any, Any) => Any], z)
case xs: Array[Float] => f(xs, op.asInstanceOf[(Any, Any) => Any], z)
case xs: Array[Char] => f(xs, op.asInstanceOf[(Any, Any) => Any], z)
case xs: Array[Byte] => f(xs, op.asInstanceOf[(Any, Any) => Any], z)
case xs: Array[Short] => f(xs, op.asInstanceOf[(Any, Any) => Any], z)
case xs: Array[Boolean] => f(xs, op.asInstanceOf[(Any, Any) => Any], z)
case xs: Array[Unit] => f(xs, op.asInstanceOf[(Any, Any) => Any], z)
}).asInstanceOf[B]
}
/** Folds the elements of this array using the specified associative binary operator.
*
* @tparam A1 a type parameter for the binary operator, a supertype of `A`.
* @param z a neutral element for the fold operation; may be added to the result
* an arbitrary number of times, and must not change the result (e.g., `Nil` for list concatenation,
* 0 for addition, or 1 for multiplication).
* @param op a binary operator that must be associative.
* @return the result of applying the fold operator `op` between all the elements, or `z` if this array is empty.
*/
def fold[A1 >: A](z: A1)(op: (A1, A1) => A1): A1 = foldLeft(z)(op)
/** Builds a new array by applying a function to all elements of this array.
*
* @param f the function to apply to each element.
* @tparam B the element type of the returned array.
* @return a new array resulting from applying the given function
* `f` to each element of this array and collecting the results.
*/
def map[B](f: A => B)(implicit ct: ClassTag[B]): Array[B] = {
val len = xs.length
val ys = new Array[B](len)
if(len > 0) {
var i = 0
(xs: Any @unchecked) match {
case xs: Array[AnyRef] => while (i < len) { ys(i) = f(xs(i).asInstanceOf[A]); i = i+1 }
case xs: Array[Int] => while (i < len) { ys(i) = f(xs(i).asInstanceOf[A]); i = i+1 }
case xs: Array[Double] => while (i < len) { ys(i) = f(xs(i).asInstanceOf[A]); i = i+1 }
case xs: Array[Long] => while (i < len) { ys(i) = f(xs(i).asInstanceOf[A]); i = i+1 }
case xs: Array[Float] => while (i < len) { ys(i) = f(xs(i).asInstanceOf[A]); i = i+1 }
case xs: Array[Char] => while (i < len) { ys(i) = f(xs(i).asInstanceOf[A]); i = i+1 }
case xs: Array[Byte] => while (i < len) { ys(i) = f(xs(i).asInstanceOf[A]); i = i+1 }
case xs: Array[Short] => while (i < len) { ys(i) = f(xs(i).asInstanceOf[A]); i = i+1 }
case xs: Array[Boolean] => while (i < len) { ys(i) = f(xs(i).asInstanceOf[A]); i = i+1 }
}
}
ys
}
def mapInPlace(f: A => A): Array[A] = {
var i = 0
while (i < xs.length) {
xs.update(i, f(xs(i)))
i = i + 1
}
xs
}
/** Builds a new array by applying a function to all elements of this array
* and using the elements of the resulting collections.
*
* @param f the function to apply to each element.
* @tparam B the element type of the returned array.
* @return a new array resulting from applying the given collection-valued function
* `f` to each element of this array and concatenating the results.
*/
def flatMap[B : ClassTag](f: A => IterableOnce[B]): Array[B] = {
val b = ArrayBuilder.make[B]
var i = 0
while(i < xs.length) {
b ++= f(xs(i))
i += 1
}
b.result()
}
def flatMap[BS, B](f: A => BS)(implicit asIterable: BS => Iterable[B], m: ClassTag[B]): Array[B] =
flatMap[B](x => asIterable(f(x)))
/** Flattens a two-dimensional array by concatenating all its rows
* into a single array.
*
* @tparam B Type of row elements.
* @param asIterable A function that converts elements of this array to rows - Iterables of type `B`.
* @return An array obtained by concatenating rows of this array.
*/
def flatten[B](implicit asIterable: A => IterableOnce[B], m: ClassTag[B]): Array[B] = {
val b = ArrayBuilder.make[B]
val len = xs.length
var size = 0
var i = 0
while(i < len) {
xs(i) match {
case it: IterableOnce[_] =>
val k = it.knownSize
if(k > 0) size += k
case a: Array[_] => size += a.length
case _ =>
}
i += 1
}
if(size > 0) b.sizeHint(size)
i = 0
while(i < len) {
b ++= asIterable(xs(i))
i += 1
}
b.result()
}
/** Builds a new array by applying a partial function to all elements of this array
* on which the function is defined.
*
* @param pf the partial function which filters and maps the array.
* @tparam B the element type of the returned array.
* @return a new array resulting from applying the given partial function
* `pf` to each element on which it is defined and collecting the results.
* The order of the elements is preserved.
*/
def collect[B : ClassTag](pf: PartialFunction[A, B]): Array[B] = {
var i = 0
var matched = true
def d(x: A): B = {
matched = false
null.asInstanceOf[B]
}
val b = ArrayBuilder.make[B]
while(i < xs.length) {
matched = true
val v = pf.applyOrElse(xs(i), d)
if(matched) b += v
i += 1
}
b.result()
}
/** Finds the first element of the array for which the given partial function is defined, and applies the
* partial function to it. */
def collectFirst[B](f: PartialFunction[A, B]): Option[B] = {
var i = 0
var matched = true
def d(x: A): B = {
matched = false
null.asInstanceOf[B]
}
while(i < xs.length) {
matched = true
val v = f.applyOrElse(xs(i), d)
if(matched) return Some(v)
i += 1
}
None
}
/** Returns an array formed from this array and another iterable collection
* by combining corresponding elements in pairs.
* If one of the two collections is longer than the other, its remaining elements are ignored.
*
* @param that The iterable providing the second half of each result pair
* @tparam B the type of the second half of the returned pairs
* @return a new array containing pairs consisting of corresponding elements of this array and `that`.
* The length of the returned array is the minimum of the lengths of this array and `that`.
*/
def zip[B](that: IterableOnce[B]): Array[(A, B)] = {
val b = new ArrayBuilder.ofRef[(A, B)]()
val k = that.knownSize
b.sizeHint(if(k >= 0) min(k, xs.length) else xs.length)
var i = 0
val it = that.iterator
while(i < xs.length && it.hasNext) {
b += ((xs(i), it.next()))
i += 1
}
b.result()
}
/** Analogous to `zip` except that the elements in each collection are not consumed until a strict operation is
* invoked on the returned `LazyZip2` decorator.
*
* Calls to `lazyZip` can be chained to support higher arities (up to 4) without incurring the expense of
* constructing and deconstructing intermediary tuples.
*
* {{{
* val xs = List(1, 2, 3)
* val res = (xs lazyZip xs lazyZip xs lazyZip xs).map((a, b, c, d) => a + b + c + d)
* // res == List(4, 8, 12)
* }}}
*
* @param that the iterable providing the second element of each eventual pair
* @tparam B the type of the second element in each eventual pair
* @return a decorator `LazyZip2` that allows strict operations to be performed on the lazily evaluated pairs
* or chained calls to `lazyZip`. Implicit conversion to `Iterable[(A, B)]` is also supported.
*/
def lazyZip[B](that: Iterable[B]): LazyZip2[A, B, Array[A]] = new LazyZip2(xs, immutable.ArraySeq.unsafeWrapArray(xs), that)
/** Returns an array formed from this array and another iterable collection
* by combining corresponding elements in pairs.
* If one of the two collections is shorter than the other,
* placeholder elements are used to extend the shorter collection to the length of the longer.
*
* @param that the iterable providing the second half of each result pair
* @param thisElem the element to be used to fill up the result if this array is shorter than `that`.
* @param thatElem the element to be used to fill up the result if `that` is shorter than this array.
* @return a new array containing pairs consisting of corresponding elements of this array and `that`.
* The length of the returned array is the maximum of the lengths of this array and `that`.
* If this array is shorter than `that`, `thisElem` values are used to pad the result.
* If `that` is shorter than this array, `thatElem` values are used to pad the result.
*/
def zipAll[A1 >: A, B](that: Iterable[B], thisElem: A1, thatElem: B): Array[(A1, B)] = {
val b = new ArrayBuilder.ofRef[(A1, B)]()
val k = that.knownSize
b.sizeHint(max(k, xs.length))
var i = 0
val it = that.iterator
while(i < xs.length && it.hasNext) {
b += ((xs(i), it.next()))
i += 1
}
while(it.hasNext) {
b += ((thisElem, it.next()))
i += 1
}
while(i < xs.length) {
b += ((xs(i), thatElem))
i += 1
}
b.result()
}
/** Zips this array with its indices.
*
* @return A new array containing pairs consisting of all elements of this array paired with their index.
* Indices start at `0`.
*/
def zipWithIndex: Array[(A, Int)] = {
val b = new Array[(A, Int)](xs.length)
var i = 0
while(i < xs.length) {
b(i) = ((xs(i), i))
i += 1
}
b
}
/** A copy of this array with an element appended. */
def appended[B >: A : ClassTag](x: B): Array[B] = {
val dest = Array.copyAs[B](xs, xs.length+1)
dest(xs.length) = x
dest
}
@`inline` final def :+ [B >: A : ClassTag](x: B): Array[B] = appended(x)
/** A copy of this array with an element prepended. */
def prepended[B >: A : ClassTag](x: B): Array[B] = {
val dest = new Array[B](xs.length + 1)
dest(0) = x
Array.copy(xs, 0, dest, 1, xs.length)
dest
}
@`inline` final def +: [B >: A : ClassTag](x: B): Array[B] = prepended(x)
/** A copy of this array with all elements of a collection prepended. */
def prependedAll[B >: A : ClassTag](prefix: IterableOnce[B]): Array[B] = {
val b = ArrayBuilder.make[B]
val k = prefix.knownSize
if(k >= 0) b.sizeHint(k + xs.length)
b.addAll(prefix)
if(k < 0) b.sizeHint(b.length + xs.length)
b.addAll(xs)
b.result()
}
/** A copy of this array with all elements of an array prepended. */
def prependedAll[B >: A : ClassTag](prefix: Array[_ <: B]): Array[B] = {
val dest = Array.copyAs[B](prefix, prefix.length+xs.length)
Array.copy(xs, 0, dest, prefix.length, xs.length)
dest
}
@`inline` final def ++: [B >: A : ClassTag](prefix: IterableOnce[B]): Array[B] = prependedAll(prefix)
@`inline` final def ++: [B >: A : ClassTag](prefix: Array[_ <: B]): Array[B] = prependedAll(prefix)
/** A copy of this array with all elements of a collection appended. */
def appendedAll[B >: A : ClassTag](suffix: IterableOnce[B]): Array[B] = {
val b = ArrayBuilder.make[B]
val k = suffix.knownSize
if(k >= 0) b.sizeHint(k + xs.length)
b.addAll(xs)
b.addAll(suffix)
b.result()
}
/** A copy of this array with all elements of an array appended. */
def appendedAll[B >: A : ClassTag](suffix: Array[_ <: B]): Array[B] = {
val dest = Array.copyAs[B](xs, xs.length+suffix.length)
Array.copy(suffix, 0, dest, xs.length, suffix.length)
dest
}
@`inline` final def :++ [B >: A : ClassTag](suffix: IterableOnce[B]): Array[B] = appendedAll(suffix)
@`inline` final def :++ [B >: A : ClassTag](suffix: Array[_ <: B]): Array[B] = appendedAll(suffix)
@`inline` final def concat[B >: A : ClassTag](suffix: IterableOnce[B]): Array[B] = appendedAll(suffix)
@`inline` final def concat[B >: A : ClassTag](suffix: Array[_ <: B]): Array[B] = appendedAll(suffix)
@`inline` final def ++[B >: A : ClassTag](xs: IterableOnce[B]): Array[B] = appendedAll(xs)
@`inline` final def ++[B >: A : ClassTag](xs: Array[_ <: B]): Array[B] = appendedAll(xs)
/** Tests whether this array contains a given value as an element.
*
* @param elem the element to test.
* @return `true` if this array has an element that is equal (as
* determined by `==`) to `elem`, `false` otherwise.
*/
def contains(elem: A): Boolean = exists (_ == elem)
/** Returns a copy of this array with patched values.
* Patching at negative indices is the same as patching starting at 0.
* Patching at indices at or larger than the length of the original array appends the patch to the end.
* If more values are replaced than actually exist, the excess is ignored.
*
* @param from The start index from which to patch
* @param other The patch values
* @param replaced The number of values in the original array that are replaced by the patch.
*/
def patch[B >: A : ClassTag](from: Int, other: IterableOnce[B], replaced: Int): Array[B] = {
val b = ArrayBuilder.make[B]
val k = other.knownSize
val r = if(replaced < 0) 0 else replaced
if(k >= 0) b.sizeHint(xs.length + k - r)
val chunk1 = if(from > 0) min(from, xs.length) else 0
if(chunk1 > 0) b.addAll(xs, 0, chunk1)
b ++= other
val remaining = xs.length - chunk1 - r
if(remaining > 0) b.addAll(xs, xs.length - remaining, remaining)
b.result()
}
/** Converts an array of pairs into an array of first elements and an array of second elements.
*
* @tparam A1 the type of the first half of the element pairs
* @tparam A2 the type of the second half of the element pairs
* @param asPair an implicit conversion which asserts that the element type
* of this Array is a pair.
* @param ct1 a class tag for `A1` type parameter that is required to create an instance
* of `Array[A1]`
* @param ct2 a class tag for `A2` type parameter that is required to create an instance
* of `Array[A2]`
* @return a pair of Arrays, containing, respectively, the first and second half
* of each element pair of this Array.
*/
def unzip[A1, A2](implicit asPair: A => (A1, A2), ct1: ClassTag[A1], ct2: ClassTag[A2]): (Array[A1], Array[A2]) = {
val a1 = new Array[A1](xs.length)
val a2 = new Array[A2](xs.length)
var i = 0
while (i < xs.length) {
val e = asPair(xs(i))
a1(i) = e._1
a2(i) = e._2
i += 1
}
(a1, a2)
}
/** Converts an array of triples into three arrays, one containing the elements from each position of the triple.
*
* @tparam A1 the type of the first of three elements in the triple
* @tparam A2 the type of the second of three elements in the triple
* @tparam A3 the type of the third of three elements in the triple
* @param asTriple an implicit conversion which asserts that the element type
* of this Array is a triple.
* @param ct1 a class tag for T1 type parameter that is required to create an instance
* of Array[T1]
* @param ct2 a class tag for T2 type parameter that is required to create an instance
* of Array[T2]
* @param ct3 a class tag for T3 type parameter that is required to create an instance
* of Array[T3]
* @return a triple of Arrays, containing, respectively, the first, second, and third
* elements from each element triple of this Array.
*/
def unzip3[A1, A2, A3](implicit asTriple: A => (A1, A2, A3), ct1: ClassTag[A1], ct2: ClassTag[A2],
ct3: ClassTag[A3]): (Array[A1], Array[A2], Array[A3]) = {
val a1 = new Array[A1](xs.length)
val a2 = new Array[A2](xs.length)
val a3 = new Array[A3](xs.length)
var i = 0
while (i < xs.length) {
val e = asTriple(xs(i))
a1(i) = e._1
a2(i) = e._2
a3(i) = e._3
i += 1
}
(a1, a2, a3)
}
/** Transposes a two dimensional array.
*
* @tparam B Type of row elements.
* @param asArray A function that converts elements of this array to rows - arrays of type `B`.
* @return An array obtained by replacing elements of this arrays with rows the represent.
*/
def transpose[B](implicit asArray: A => Array[B]): Array[Array[B]] = {
val aClass = xs.getClass.getComponentType
val bb = new ArrayBuilder.ofRef[Array[B]]()(ClassTag[Array[B]](aClass))
if (xs.length == 0) bb.result()
else {
def mkRowBuilder() = ArrayBuilder.make[B](ClassTag[B](aClass.getComponentType))
val bs = new ArrayOps(asArray(xs(0))).map((x: B) => mkRowBuilder())
for (xs <- this) {
var i = 0
for (x <- new ArrayOps(asArray(xs))) {
bs(i) += x
i += 1
}
}
for (b <- new ArrayOps(bs)) bb += b.result()
bb.result()
}
}
/** Apply `f` to each element for its side effects.
* Note: [U] parameter needed to help scalac's type inference.
*/
def foreach[U](f: A => U): Unit = {
val len = xs.length
var i = 0
(xs: Any @unchecked) match {
case xs: Array[AnyRef] => while (i < len) { f(xs(i).asInstanceOf[A]); i = i+1 }
case xs: Array[Int] => while (i < len) { f(xs(i).asInstanceOf[A]); i = i+1 }
case xs: Array[Double] => while (i < len) { f(xs(i).asInstanceOf[A]); i = i+1 }
case xs: Array[Long] => while (i < len) { f(xs(i).asInstanceOf[A]); i = i+1 }
case xs: Array[Float] => while (i < len) { f(xs(i).asInstanceOf[A]); i = i+1 }
case xs: Array[Char] => while (i < len) { f(xs(i).asInstanceOf[A]); i = i+1 }
case xs: Array[Byte] => while (i < len) { f(xs(i).asInstanceOf[A]); i = i+1 }
case xs: Array[Short] => while (i < len) { f(xs(i).asInstanceOf[A]); i = i+1 }
case xs: Array[Boolean] => while (i < len) { f(xs(i).asInstanceOf[A]); i = i+1 }
}
}
/** Selects all the elements of this array ignoring the duplicates.
*
* @return a new array consisting of all the elements of this array without duplicates.
*/
def distinct: Array[A] = distinctBy(identity)
/** Selects all the elements of this array ignoring the duplicates as determined by `==` after applying
* the transforming function `f`.
*
* @param f The transforming function whose result is used to determine the uniqueness of each element
* @tparam B the type of the elements after being transformed by `f`
* @return a new array consisting of all the elements of this array without duplicates.
*/
def distinctBy[B](f: A => B): Array[A] =
ArrayBuilder.make[A].addAll(iterator.distinctBy(f)).result()
/** A copy of this array with an element value appended until a given target length is reached.
*
* @param len the target length
* @param elem the padding value
* @tparam B the element type of the returned array.
* @return a new array consisting of
* all elements of this array followed by the minimal number of occurrences of `elem` so
* that the resulting collection has a length of at least `len`.
*/
def padTo[B >: A : ClassTag](len: Int, elem: B): Array[B] = {
var i = xs.length
val newlen = max(i, len)
val dest = Array.copyAs[B](xs, newlen)
while(i < newlen) {
dest(i) = elem
i += 1
}
dest
}
/** Produces the range of all indices of this sequence.
*
* @return a `Range` value from `0` to one less than the length of this array.
*/
def indices: Range = Range(0, xs.length)
/** Partitions this array into a map of arrays according to some discriminator function.
*
* @param f the discriminator function.
* @tparam K the type of keys returned by the discriminator function.
* @return A map from keys to arrays such that the following invariant holds:
* {{{
* (xs groupBy f)(k) = xs filter (x => f(x) == k)
* }}}
* That is, every key `k` is bound to an array of those elements `x`
* for which `f(x)` equals `k`.
*/
def groupBy[K](f: A => K): immutable.Map[K, Array[A]] = {
val m = mutable.Map.empty[K, ArrayBuilder[A]]
val len = xs.length
var i = 0
while(i < len) {
val elem = xs(i)
val key = f(elem)
val bldr = m.getOrElseUpdate(key, ArrayBuilder.make[A])
bldr += elem
i += 1
}
m.view.mapValues(_.result()).toMap
}
/**
* Partitions this array into a map of arrays according to a discriminator function `key`.
* Each element in a group is transformed into a value of type `B` using the `value` function.
*
* It is equivalent to `groupBy(key).mapValues(_.map(f))`, but more efficient.
*
* {{{
* case class User(name: String, age: Int)
*
* def namesByAge(users: Array[User]): Map[Int, Array[String]] =
* users.groupMap(_.age)(_.name)
* }}}
*
* @param key the discriminator function
* @param f the element transformation function
* @tparam K the type of keys returned by the discriminator function
* @tparam B the type of values returned by the transformation function
*/
def groupMap[K, B : ClassTag](key: A => K)(f: A => B): immutable.Map[K, Array[B]] = {
val m = mutable.Map.empty[K, ArrayBuilder[B]]
val len = xs.length
var i = 0
while(i < len) {
val elem = xs(i)
val k = key(elem)
val bldr = m.getOrElseUpdate(k, ArrayBuilder.make[B])
bldr += f(elem)
i += 1
}
m.view.mapValues(_.result()).toMap
}
@`inline` final def toSeq: immutable.Seq[A] = toIndexedSeq
def toIndexedSeq: immutable.IndexedSeq[A] =
immutable.ArraySeq.unsafeWrapArray(Array.copyOf(xs, xs.length))
/** Copy elements of this array to another array.
* Fills the given array `xs` starting at index 0.
* Copying will stop once either all the elements of this array have been copied,
* or the end of the array is reached.
*
* @param xs the array to fill.
* @tparam B the type of the elements of the array.
*/
def copyToArray[B >: A](xs: Array[B]): Int = copyToArray(xs, 0)
/** Copy elements of this array to another array.
* Fills the given array `xs` starting at index `start`.
* Copying will stop once either all the elements of this array have been copied,
* or the end of the array is reached.
*
* @param xs the array to fill.
* @param start the starting index within the destination array.
* @tparam B the type of the elements of the array.
*/
def copyToArray[B >: A](xs: Array[B], start: Int): Int = copyToArray(xs, start, Int.MaxValue)
/** Copy elements of this array to another array.
* Fills the given array `xs` starting at index `start` with at most `len` values.
* Copying will stop once either all the elements of this array have been copied,
* or the end of the array is reached, or `len` elements have been copied.
*
* @param xs the array to fill.
* @param start the starting index within the destination array.
* @param len the maximal number of elements to copy.
* @tparam B the type of the elements of the array.
*/
def copyToArray[B >: A](xs: Array[B], start: Int, len: Int): Int = {
val copied = IterableOnce.elemsToCopyToArray(this.xs.length, xs.length, start, len)
if (copied > 0) {
Array.copy(this.xs, 0, xs, start, copied)
}
copied
}
/** Create a copy of this array with the specified element type. */
def toArray[B >: A: ClassTag]: Array[B] = {
val destination = new Array[B](xs.length)
copyToArray(destination, 0)
destination
}
/** Counts the number of elements in this array which satisfy a predicate */
def count(p: A => Boolean): Int = {
var i, res = 0
val len = xs.length
while(i < len) {
if(p(xs(i))) res += 1
i += 1
}
res
}
// can't use a default arg because we already have another overload with a default arg
/** Tests whether this array starts with the given array. */
@`inline` def startsWith[B >: A](that: Array[B]): Boolean = startsWith(that, 0)
/** Tests whether this array contains the given array at a given index.
*
* @param that the array to test
* @param offset the index where the array is searched.
* @return `true` if the array `that` is contained in this array at
* index `offset`, otherwise `false`.
*/
def startsWith[B >: A](that: Array[B], offset: Int): Boolean = {
val safeOffset = offset.max(0)
val thatl = that.length
if(thatl > xs.length-safeOffset) thatl == 0
else {
var i = 0
while(i < thatl) {
if(xs(i+safeOffset) != that(i)) return false
i += 1
}
true
}
}
/** Tests whether this array ends with the given array.
*
* @param that the array to test
* @return `true` if this array has `that` as a suffix, `false` otherwise.
*/
def endsWith[B >: A](that: Array[B]): Boolean = {
val thatl = that.length
val off = xs.length - thatl
if(off < 0) false
else {
var i = 0
while(i < thatl) {
if(xs(i+off) != that(i)) return false
i += 1
}
true
}
}
/** A copy of this array with one single replaced element.
* @param index the position of the replacement
* @param elem the replacing element
* @return a new array which is a copy of this array with the element at position `index` replaced by `elem`.
* @throws IndexOutOfBoundsException if `index` does not satisfy `0 <= index < length`.
*/
def updated[B >: A : ClassTag](index: Int, elem: B): Array[B] = {
if(index < 0 || index >= xs.length) throw new IndexOutOfBoundsException(s"$index is out of bounds (min 0, max ${xs.length-1})")
val dest = toArray[B]
dest(index) = elem
dest
}
@`inline` def view: IndexedSeqView[A] = new ArrayOps.ArrayView[A](xs)
/* ************************************************************************************************************
The remaining methods are provided for completeness but they delegate to mutable.ArraySeq implementations which
may not provide the best possible performance. We need them in `ArrayOps` because their return type
mentions `C` (which is `Array[A]` in `StringOps` and `mutable.ArraySeq[A]` in `mutable.ArraySeq`).
************************************************************************************************************ */
/** Computes the multiset difference between this array and another sequence.
*
* @param that the sequence of elements to remove
* @return a new array which contains all elements of this array
* except some of occurrences of elements that also appear in `that`.
* If an element value `x` appears
* ''n'' times in `that`, then the first ''n'' occurrences of `x` will not form
* part of the result, but any following occurrences will.
*/
def diff[B >: A](that: Seq[B]): Array[A] = mutable.ArraySeq.make(xs).diff(that).toArray[A]
/** Computes the multiset intersection between this array and another sequence.
*
* @param that the sequence of elements to intersect with.
* @return a new array which contains all elements of this array
* which also appear in `that`.
* If an element value `x` appears
* ''n'' times in `that`, then the first ''n'' occurrences of `x` will be retained
* in the result, but any following occurrences will be omitted.
*/
def intersect[B >: A](that: Seq[B]): Array[A] = mutable.ArraySeq.make(xs).intersect(that).toArray[A]
/** Groups elements in fixed size blocks by passing a "sliding window"
* over them (as opposed to partitioning them, as is done in grouped.)
* @see [[scala.collection.Iterator]], method `sliding`
*
* @param size the number of elements per group
* @param step the distance between the first elements of successive groups
* @return An iterator producing arrays of size `size`, except the
* last element (which may be the only element) will be truncated
* if there are fewer than `size` elements remaining to be grouped.
*/
def sliding(size: Int, step: Int = 1): Iterator[Array[A]] = mutable.ArraySeq.make(xs).sliding(size, step).map(_.toArray[A])
/** Iterates over combinations of elements.
*
* A '''combination''' of length `n` is a sequence of `n` elements selected in order of their first index in this sequence.
*
* For example, `"xyx"` has two combinations of length 2. The `x` is selected first: `"xx"`, `"xy"`.
* The sequence `"yx"` is not returned as a combination because it is subsumed by `"xy"`.
*
* If there is more than one way to generate the same combination, only one will be returned.
*
* For example, the result `"xy"` arbitrarily selected one of the `x` elements.
*
* As a further illustration, `"xyxx"` has three different ways to generate `"xy"` because there are three elements `x`
* to choose from. Moreover, there are three unordered pairs `"xx"` but only one is returned.
*
* It is not specified which of these equal combinations is returned. It is an implementation detail
* that should not be relied on. For example, the combination `"xx"` does not necessarily contain
* the first `x` in this sequence. This behavior is observable if the elements compare equal
* but are not identical.
*
* As a consequence, `"xyx".combinations(3).next()` is `"xxy"`: the combination does not reflect the order
* of the original sequence, but the order in which elements were selected, by "first index";
* the order of each `x` element is also arbitrary.
*
* @return An Iterator which traverses the n-element combinations of this array
* @example {{{
* Array('a', 'b', 'b', 'b', 'c').combinations(2).map(runtime.ScalaRunTime.stringOf).foreach(println)
* // Array(a, b)
* // Array(a, c)
* // Array(b, b)
* // Array(b, c)
* Array('b', 'a', 'b').combinations(2).map(runtime.ScalaRunTime.stringOf).foreach(println)
* // Array(b, b)
* // Array(b, a)
* }}}
*/
def combinations(n: Int): Iterator[Array[A]] = mutable.ArraySeq.make(xs).combinations(n).map(_.toArray[A])
/** Iterates over distinct permutations of elements.
*
* @return An Iterator which traverses the distinct permutations of this array.
* @example {{{
* Array('a', 'b', 'b').permutations.map(runtime.ScalaRunTime.stringOf).foreach(println)
* // Array(a, b, b)
* // Array(b, a, b)
* // Array(b, b, a)
* }}}
*/
def permutations: Iterator[Array[A]] = mutable.ArraySeq.make(xs).permutations.map(_.toArray[A])
// we have another overload here, so we need to duplicate this method
/** Tests whether this array contains the given sequence at a given index.
*
* @param that the sequence to test
* @param offset the index where the sequence is searched.
* @return `true` if the sequence `that` is contained in this array at
* index `offset`, otherwise `false`.
*/
def startsWith[B >: A](that: IterableOnce[B], offset: Int = 0): Boolean = mutable.ArraySeq.make(xs).startsWith(that, offset)
// we have another overload here, so we need to duplicate this method
/** Tests whether this array ends with the given sequence.
*
* @param that the sequence to test
* @return `true` if this array has `that` as a suffix, `false` otherwise.
*/
def endsWith[B >: A](that: Iterable[B]): Boolean = mutable.ArraySeq.make(xs).endsWith(that)
}
| scala/scala | src/library/scala/collection/ArrayOps.scala | Scala | apache-2.0 | 67,010 |
import stainless.lang._
object TypeParams3 {
abstract class Test[A] {
def something: A
}
case class FooBar[Foo, Bar, Baz](foo: Foo, bar: Bar, baz: Baz) extends Test[Bar] {
def something: Bar = bar
}
def foo[Toto](toto: Toto, y: BigInt): Test[Toto] = {
require(y == 0)
def bar[Str](fooStr: Str): Test[Toto] = {
FooBar(fooStr, toto, y)
}
bar("FooStr")
}
def test = {
foo(Some("Test"), 0).something == Some("Test")
}.holds
}
| epfl-lara/stainless | frontends/benchmarks/extraction/valid/TypeParams3.scala | Scala | apache-2.0 | 476 |
/*
* Copyright 2015 Databricks Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.databricks.spark.sql.perf.tpcds
import com.databricks.spark.sql.perf.{Benchmark, ExecutionMode, Query}
/**
* This implements the official TPCDS v1.4 queries with only cosmetic modifications
* (noted for each query).
* Don't modify this except for these kind of modifications.
*/
trait Tpcds_1_4_Queries extends Benchmark {
import ExecutionMode._
// should be random generated based on scale
// RC=ulist(random(1, rowcount("store_sales")/5,uniform),5);
val rc = Array(1000000, 1000000, 1000000, 1000000, 1000000)
// Queries the TPCDS 1.4 queries using the qualifcations values in the templates.
val tpcds1_4Queries = Seq(
("q1", """
| WITH customer_total_return AS
| (SELECT sr_customer_sk AS ctr_customer_sk, sr_store_sk AS ctr_store_sk,
| sum(sr_return_amt) AS ctr_total_return
| FROM store_returns, date_dim
| WHERE sr_returned_date_sk = d_date_sk AND d_year = 2000
| GROUP BY sr_customer_sk, sr_store_sk)
| SELECT c_customer_id
| FROM customer_total_return ctr1, store, customer
| WHERE ctr1.ctr_total_return >
| (SELECT avg(ctr_total_return)*1.2
| FROM customer_total_return ctr2
| WHERE ctr1.ctr_store_sk = ctr2.ctr_store_sk)
| AND s_store_sk = ctr1.ctr_store_sk
| AND s_state = 'TN'
| AND ctr1.ctr_customer_sk = c_customer_sk
| ORDER BY c_customer_id LIMIT 100
""".stripMargin),
("q2", """
| WITH wscs as
| (SELECT sold_date_sk, sales_price
| FROM (SELECT ws_sold_date_sk sold_date_sk, ws_ext_sales_price sales_price
| FROM web_sales) x
| UNION ALL
| (SELECT cs_sold_date_sk sold_date_sk, cs_ext_sales_price sales_price
| FROM catalog_sales)),
| wswscs AS
| (SELECT d_week_seq,
| sum(case when (d_day_name='Sunday') then sales_price else null end) sun_sales,
| sum(case when (d_day_name='Monday') then sales_price else null end) mon_sales,
| sum(case when (d_day_name='Tuesday') then sales_price else null end) tue_sales,
| sum(case when (d_day_name='Wednesday') then sales_price else null end) wed_sales,
| sum(case when (d_day_name='Thursday') then sales_price else null end) thu_sales,
| sum(case when (d_day_name='Friday') then sales_price else null end) fri_sales,
| sum(case when (d_day_name='Saturday') then sales_price else null end) sat_sales
| FROM wscs, date_dim
| WHERE d_date_sk = sold_date_sk
| GROUP BY d_week_seq)
| SELECT d_week_seq1
| ,round(sun_sales1/sun_sales2,2)
| ,round(mon_sales1/mon_sales2,2)
| ,round(tue_sales1/tue_sales2,2)
| ,round(wed_sales1/wed_sales2,2)
| ,round(thu_sales1/thu_sales2,2)
| ,round(fri_sales1/fri_sales2,2)
| ,round(sat_sales1/sat_sales2,2)
| FROM
| (SELECT wswscs.d_week_seq d_week_seq1
| ,sun_sales sun_sales1
| ,mon_sales mon_sales1
| ,tue_sales tue_sales1
| ,wed_sales wed_sales1
| ,thu_sales thu_sales1
| ,fri_sales fri_sales1
| ,sat_sales sat_sales1
| FROM wswscs,date_dim
| WHERE date_dim.d_week_seq = wswscs.d_week_seq AND d_year = 2001) y,
| (SELECT wswscs.d_week_seq d_week_seq2
| ,sun_sales sun_sales2
| ,mon_sales mon_sales2
| ,tue_sales tue_sales2
| ,wed_sales wed_sales2
| ,thu_sales thu_sales2
| ,fri_sales fri_sales2
| ,sat_sales sat_sales2
| FROM wswscs, date_dim
| WHERE date_dim.d_week_seq = wswscs.d_week_seq AND d_year = 2001 + 1) z
| WHERE d_week_seq1=d_week_seq2-53
| ORDER BY d_week_seq1
""".stripMargin),
("q3", """
| SELECT dt.d_year, item.i_brand_id brand_id, item.i_brand brand,SUM(ss_ext_sales_price) sum_agg
| FROM date_dim dt, store_sales, item
| WHERE dt.d_date_sk = store_sales.ss_sold_date_sk
| AND store_sales.ss_item_sk = item.i_item_sk
| AND item.i_manufact_id = 128
| AND dt.d_moy=11
| GROUP BY dt.d_year, item.i_brand, item.i_brand_id
| ORDER BY dt.d_year, sum_agg desc, brand_id
| LIMIT 100
""".stripMargin),
("q4", """
|WITH year_total AS (
| SELECT c_customer_id customer_id,
| c_first_name customer_first_name,
| c_last_name customer_last_name,
| c_preferred_cust_flag customer_preferred_cust_flag,
| c_birth_country customer_birth_country,
| c_login customer_login,
| c_email_address customer_email_address,
| d_year dyear,
| sum(((ss_ext_list_price-ss_ext_wholesale_cost-ss_ext_discount_amt)+ss_ext_sales_price)/2) year_total,
| 's' sale_type
| FROM customer, store_sales, date_dim
| WHERE c_customer_sk = ss_customer_sk AND ss_sold_date_sk = d_date_sk
| GROUP BY c_customer_id,
| c_first_name,
| c_last_name,
| c_preferred_cust_flag,
| c_birth_country,
| c_login,
| c_email_address,
| d_year
| UNION ALL
| SELECT c_customer_id customer_id,
| c_first_name customer_first_name,
| c_last_name customer_last_name,
| c_preferred_cust_flag customer_preferred_cust_flag,
| c_birth_country customer_birth_country,
| c_login customer_login,
| c_email_address customer_email_address,
| d_year dyear,
| sum((((cs_ext_list_price-cs_ext_wholesale_cost-cs_ext_discount_amt)+cs_ext_sales_price)/2) ) year_total,
| 'c' sale_type
| FROM customer, catalog_sales, date_dim
| WHERE c_customer_sk = cs_bill_customer_sk AND cs_sold_date_sk = d_date_sk
| GROUP BY c_customer_id,
| c_first_name,
| c_last_name,
| c_preferred_cust_flag,
| c_birth_country,
| c_login,
| c_email_address,
| d_year
| UNION ALL
| SELECT c_customer_id customer_id
| ,c_first_name customer_first_name
| ,c_last_name customer_last_name
| ,c_preferred_cust_flag customer_preferred_cust_flag
| ,c_birth_country customer_birth_country
| ,c_login customer_login
| ,c_email_address customer_email_address
| ,d_year dyear
| ,sum((((ws_ext_list_price-ws_ext_wholesale_cost-ws_ext_discount_amt)+ws_ext_sales_price)/2) ) year_total
| ,'w' sale_type
| FROM customer, web_sales, date_dim
| WHERE c_customer_sk = ws_bill_customer_sk AND ws_sold_date_sk = d_date_sk
| GROUP BY c_customer_id,
| c_first_name,
| c_last_name,
| c_preferred_cust_flag,
| c_birth_country,
| c_login,
| c_email_address,
| d_year)
| SELECT
| t_s_secyear.customer_id,
| t_s_secyear.customer_first_name,
| t_s_secyear.customer_last_name,
| t_s_secyear.customer_preferred_cust_flag,
| t_s_secyear.customer_birth_country,
| t_s_secyear.customer_login,
| t_s_secyear.customer_email_address
| FROM year_total t_s_firstyear, year_total t_s_secyear, year_total t_c_firstyear,
| year_total t_c_secyear, year_total t_w_firstyear, year_total t_w_secyear
| WHERE t_s_secyear.customer_id = t_s_firstyear.customer_id
| and t_s_firstyear.customer_id = t_c_secyear.customer_id
| and t_s_firstyear.customer_id = t_c_firstyear.customer_id
| and t_s_firstyear.customer_id = t_w_firstyear.customer_id
| and t_s_firstyear.customer_id = t_w_secyear.customer_id
| and t_s_firstyear.sale_type = 's'
| and t_c_firstyear.sale_type = 'c'
| and t_w_firstyear.sale_type = 'w'
| and t_s_secyear.sale_type = 's'
| and t_c_secyear.sale_type = 'c'
| and t_w_secyear.sale_type = 'w'
| and t_s_firstyear.dyear = 2001
| and t_s_secyear.dyear = 2001+1
| and t_c_firstyear.dyear = 2001
| and t_c_secyear.dyear = 2001+1
| and t_w_firstyear.dyear = 2001
| and t_w_secyear.dyear = 2001+1
| and t_s_firstyear.year_total > 0
| and t_c_firstyear.year_total > 0
| and t_w_firstyear.year_total > 0
| and case when t_c_firstyear.year_total > 0 then t_c_secyear.year_total / t_c_firstyear.year_total else null end
| > case when t_s_firstyear.year_total > 0 then t_s_secyear.year_total / t_s_firstyear.year_total else null end
| and case when t_c_firstyear.year_total > 0 then t_c_secyear.year_total / t_c_firstyear.year_total else null end
| > case when t_w_firstyear.year_total > 0 then t_w_secyear.year_total / t_w_firstyear.year_total else null end
| ORDER BY
| t_s_secyear.customer_id,
| t_s_secyear.customer_first_name,
| t_s_secyear.customer_last_name,
| t_s_secyear.customer_preferred_cust_flag,
| t_s_secyear.customer_birth_country,
| t_s_secyear.customer_login,
| t_s_secyear.customer_email_address
| LIMIT 100
""".stripMargin),
// Modifications: "+ days" -> date_add
// Modifications: "||" -> concat
("q5", """
| WITH ssr AS
| (SELECT s_store_id,
| sum(sales_price) as sales,
| sum(profit) as profit,
| sum(return_amt) as returns,
| sum(net_loss) as profit_loss
| FROM
| (SELECT ss_store_sk as store_sk,
| ss_sold_date_sk as date_sk,
| ss_ext_sales_price as sales_price,
| ss_net_profit as profit,
| cast(0 as decimal(7,2)) as return_amt,
| cast(0 as decimal(7,2)) as net_loss
| FROM store_sales
| UNION ALL
| SELECT sr_store_sk as store_sk,
| sr_returned_date_sk as date_sk,
| cast(0 as decimal(7,2)) as sales_price,
| cast(0 as decimal(7,2)) as profit,
| sr_return_amt as return_amt,
| sr_net_loss as net_loss
| FROM store_returns)
| salesreturns, date_dim, store
| WHERE date_sk = d_date_sk
| and d_date between cast('2000-08-23' as date)
| and ((cast('2000-08-23' as date) + interval 14 days))
| and store_sk = s_store_sk
| GROUP BY s_store_id),
| csr AS
| (SELECT cp_catalog_page_id,
| sum(sales_price) as sales,
| sum(profit) as profit,
| sum(return_amt) as returns,
| sum(net_loss) as profit_loss
| FROM
| (SELECT cs_catalog_page_sk as page_sk,
| cs_sold_date_sk as date_sk,
| cs_ext_sales_price as sales_price,
| cs_net_profit as profit,
| cast(0 as decimal(7,2)) as return_amt,
| cast(0 as decimal(7,2)) as net_loss
| FROM catalog_sales
| UNION ALL
| SELECT cr_catalog_page_sk as page_sk,
| cr_returned_date_sk as date_sk,
| cast(0 as decimal(7,2)) as sales_price,
| cast(0 as decimal(7,2)) as profit,
| cr_return_amount as return_amt,
| cr_net_loss as net_loss
| from catalog_returns
| ) salesreturns, date_dim, catalog_page
| WHERE date_sk = d_date_sk
| and d_date between cast('2000-08-23' as date)
| and ((cast('2000-08-23' as date) + interval 14 days))
| and page_sk = cp_catalog_page_sk
| GROUP BY cp_catalog_page_id)
| ,
| wsr AS
| (SELECT web_site_id,
| sum(sales_price) as sales,
| sum(profit) as profit,
| sum(return_amt) as returns,
| sum(net_loss) as profit_loss
| from
| (select ws_web_site_sk as wsr_web_site_sk,
| ws_sold_date_sk as date_sk,
| ws_ext_sales_price as sales_price,
| ws_net_profit as profit,
| cast(0 as decimal(7,2)) as return_amt,
| cast(0 as decimal(7,2)) as net_loss
| from web_sales
| union all
| select ws_web_site_sk as wsr_web_site_sk,
| wr_returned_date_sk as date_sk,
| cast(0 as decimal(7,2)) as sales_price,
| cast(0 as decimal(7,2)) as profit,
| wr_return_amt as return_amt,
| wr_net_loss as net_loss
| FROM web_returns LEFT OUTER JOIN web_sales on
| ( wr_item_sk = ws_item_sk
| and wr_order_number = ws_order_number)
| ) salesreturns, date_dim, web_site
| WHERE date_sk = d_date_sk
| and d_date between cast('2000-08-23' as date)
| and ((cast('2000-08-23' as date) + interval 14 days))
| and wsr_web_site_sk = web_site_sk
| GROUP BY web_site_id)
| SELECT channel,
| id,
| sum(sales) as sales,
| sum(returns) as returns,
| sum(profit) as profit
| from
| (select 'store channel' as channel,
| concat('store', s_store_id) as id,
| sales,
| returns,
| (profit - profit_loss) as profit
| FROM ssr
| UNION ALL
| select 'catalog channel' as channel,
| concat('catalog_page', cp_catalog_page_id) as id,
| sales,
| returns,
| (profit - profit_loss) as profit
| FROM csr
| UNION ALL
| SELECT 'web channel' as channel,
| concat('web_site', web_site_id) as id,
| sales,
| returns,
| (profit - profit_loss) as profit
| FROM wsr
| ) x
| GROUP BY ROLLUP (channel, id)
| ORDER BY channel, id
| LIMIT 100
""".stripMargin),
("q6", """
| SELECT a.ca_state state, count(*) cnt
| FROM
| customer_address a, customer c, store_sales s, date_dim d, item i
| WHERE a.ca_address_sk = c.c_current_addr_sk
| AND c.c_customer_sk = s.ss_customer_sk
| AND s.ss_sold_date_sk = d.d_date_sk
| AND s.ss_item_sk = i.i_item_sk
| AND d.d_month_seq =
| (SELECT distinct (d_month_seq) FROM date_dim
| WHERE d_year = 2000 AND d_moy = 1)
| AND i.i_current_price > 1.2 *
| (SELECT avg(j.i_current_price) FROM item j
| WHERE j.i_category = i.i_category)
| GROUP BY a.ca_state
| HAVING count(*) >= 10
| ORDER BY cnt LIMIT 100
""".stripMargin),
("q7", """
| SELECT i_item_id,
| avg(ss_quantity) agg1,
| avg(ss_list_price) agg2,
| avg(ss_coupon_amt) agg3,
| avg(ss_sales_price) agg4
| FROM store_sales, customer_demographics, date_dim, item, promotion
| WHERE ss_sold_date_sk = d_date_sk AND
| ss_item_sk = i_item_sk AND
| ss_cdemo_sk = cd_demo_sk AND
| ss_promo_sk = p_promo_sk AND
| cd_gender = 'M' AND
| cd_marital_status = 'S' AND
| cd_education_status = 'College' AND
| (p_channel_email = 'N' or p_channel_event = 'N') AND
| d_year = 2000
| GROUP BY i_item_id
| ORDER BY i_item_id LIMIT 100
""".stripMargin),
("q8", """
| select s_store_name, sum(ss_net_profit)
| from store_sales, date_dim, store,
| (SELECT ca_zip
| from (
| (SELECT substr(ca_zip,1,5) ca_zip FROM customer_address
| WHERE substr(ca_zip,1,5) IN (
| '24128','76232','65084','87816','83926','77556','20548',
| '26231','43848','15126','91137','61265','98294','25782',
| '17920','18426','98235','40081','84093','28577','55565',
| '17183','54601','67897','22752','86284','18376','38607',
| '45200','21756','29741','96765','23932','89360','29839',
| '25989','28898','91068','72550','10390','18845','47770',
| '82636','41367','76638','86198','81312','37126','39192',
| '88424','72175','81426','53672','10445','42666','66864',
| '66708','41248','48583','82276','18842','78890','49448',
| '14089','38122','34425','79077','19849','43285','39861',
| '66162','77610','13695','99543','83444','83041','12305',
| '57665','68341','25003','57834','62878','49130','81096',
| '18840','27700','23470','50412','21195','16021','76107',
| '71954','68309','18119','98359','64544','10336','86379',
| '27068','39736','98569','28915','24206','56529','57647',
| '54917','42961','91110','63981','14922','36420','23006',
| '67467','32754','30903','20260','31671','51798','72325',
| '85816','68621','13955','36446','41766','68806','16725',
| '15146','22744','35850','88086','51649','18270','52867',
| '39972','96976','63792','11376','94898','13595','10516',
| '90225','58943','39371','94945','28587','96576','57855',
| '28488','26105','83933','25858','34322','44438','73171',
| '30122','34102','22685','71256','78451','54364','13354',
| '45375','40558','56458','28286','45266','47305','69399',
| '83921','26233','11101','15371','69913','35942','15882',
| '25631','24610','44165','99076','33786','70738','26653',
| '14328','72305','62496','22152','10144','64147','48425',
| '14663','21076','18799','30450','63089','81019','68893',
| '24996','51200','51211','45692','92712','70466','79994',
| '22437','25280','38935','71791','73134','56571','14060',
| '19505','72425','56575','74351','68786','51650','20004',
| '18383','76614','11634','18906','15765','41368','73241',
| '76698','78567','97189','28545','76231','75691','22246',
| '51061','90578','56691','68014','51103','94167','57047',
| '14867','73520','15734','63435','25733','35474','24676',
| '94627','53535','17879','15559','53268','59166','11928',
| '59402','33282','45721','43933','68101','33515','36634',
| '71286','19736','58058','55253','67473','41918','19515',
| '36495','19430','22351','77191','91393','49156','50298',
| '87501','18652','53179','18767','63193','23968','65164',
| '68880','21286','72823','58470','67301','13394','31016',
| '70372','67030','40604','24317','45748','39127','26065',
| '77721','31029','31880','60576','24671','45549','13376',
| '50016','33123','19769','22927','97789','46081','72151',
| '15723','46136','51949','68100','96888','64528','14171',
| '79777','28709','11489','25103','32213','78668','22245',
| '15798','27156','37930','62971','21337','51622','67853',
| '10567','38415','15455','58263','42029','60279','37125',
| '56240','88190','50308','26859','64457','89091','82136',
| '62377','36233','63837','58078','17043','30010','60099',
| '28810','98025','29178','87343','73273','30469','64034',
| '39516','86057','21309','90257','67875','40162','11356',
| '73650','61810','72013','30431','22461','19512','13375',
| '55307','30625','83849','68908','26689','96451','38193',
| '46820','88885','84935','69035','83144','47537','56616',
| '94983','48033','69952','25486','61547','27385','61860',
| '58048','56910','16807','17871','35258','31387','35458',
| '35576'))
| INTERSECT
| (select ca_zip
| FROM
| (SELECT substr(ca_zip,1,5) ca_zip,count(*) cnt
| FROM customer_address, customer
| WHERE ca_address_sk = c_current_addr_sk and
| c_preferred_cust_flag='Y'
| group by ca_zip
| having count(*) > 10) A1)
| ) A2
| ) V1
| where ss_store_sk = s_store_sk
| and ss_sold_date_sk = d_date_sk
| and d_qoy = 2 and d_year = 1998
| and (substr(s_zip,1,2) = substr(V1.ca_zip,1,2))
| group by s_store_name
| order by s_store_name LIMIT 100
""".stripMargin),
("q9", s"""
|select case when (select count(*) from store_sales
| where ss_quantity between 1 and 20) > ${rc(0)}
| then (select avg(ss_ext_discount_amt) from store_sales
| where ss_quantity between 1 and 20)
| else (select avg(ss_net_paid) from store_sales
| where ss_quantity between 1 and 20) end bucket1 ,
| case when (select count(*) from store_sales
| where ss_quantity between 21 and 40) > ${rc(1)}
| then (select avg(ss_ext_discount_amt) from store_sales
| where ss_quantity between 21 and 40)
| else (select avg(ss_net_paid) from store_sales
| where ss_quantity between 21 and 40) end bucket2,
| case when (select count(*) from store_sales
| where ss_quantity between 41 and 60) > ${rc(2)}
| then (select avg(ss_ext_discount_amt) from store_sales
| where ss_quantity between 41 and 60)
| else (select avg(ss_net_paid) from store_sales
| where ss_quantity between 41 and 60) end bucket3,
| case when (select count(*) from store_sales
| where ss_quantity between 61 and 80) > ${rc(3)}
| then (select avg(ss_ext_discount_amt) from store_sales
| where ss_quantity between 61 and 80)
| else (select avg(ss_net_paid) from store_sales
| where ss_quantity between 61 and 80) end bucket4,
| case when (select count(*) from store_sales
| where ss_quantity between 81 and 100) > ${rc(4)}
| then (select avg(ss_ext_discount_amt) from store_sales
| where ss_quantity between 81 and 100)
| else (select avg(ss_net_paid) from store_sales
| where ss_quantity between 81 and 100) end bucket5
|from reason
|where r_reason_sk = 1
""".stripMargin),
("q10", """
| select
| cd_gender, cd_marital_status, cd_education_status, count(*) cnt1,
| cd_purchase_estimate, count(*) cnt2, cd_credit_rating, count(*) cnt3,
| cd_dep_count, count(*) cnt4, cd_dep_employed_count, count(*) cnt5,
| cd_dep_college_count, count(*) cnt6
| from
| customer c, customer_address ca, customer_demographics
| where
| c.c_current_addr_sk = ca.ca_address_sk and
| ca_county in ('Rush County','Toole County','Jefferson County',
| 'Dona Ana County','La Porte County') and
| cd_demo_sk = c.c_current_cdemo_sk AND
| exists (select * from store_sales, date_dim
| where c.c_customer_sk = ss_customer_sk AND
| ss_sold_date_sk = d_date_sk AND
| d_year = 2002 AND
| d_moy between 1 AND 1+3) AND
| (exists (select * from web_sales, date_dim
| where c.c_customer_sk = ws_bill_customer_sk AND
| ws_sold_date_sk = d_date_sk AND
| d_year = 2002 AND
| d_moy between 1 AND 1+3) or
| exists (select * from catalog_sales, date_dim
| where c.c_customer_sk = cs_ship_customer_sk AND
| cs_sold_date_sk = d_date_sk AND
| d_year = 2002 AND
| d_moy between 1 AND 1+3))
| group by cd_gender,
| cd_marital_status,
| cd_education_status,
| cd_purchase_estimate,
| cd_credit_rating,
| cd_dep_count,
| cd_dep_employed_count,
| cd_dep_college_count
| order by cd_gender,
| cd_marital_status,
| cd_education_status,
| cd_purchase_estimate,
| cd_credit_rating,
| cd_dep_count,
| cd_dep_employed_count,
| cd_dep_college_count
|LIMIT 100
""".stripMargin),
("q11", """
| with year_total as (
| select c_customer_id customer_id
| ,c_first_name customer_first_name
| ,c_last_name customer_last_name
| ,c_preferred_cust_flag customer_preferred_cust_flag
| ,c_birth_country customer_birth_country
| ,c_login customer_login
| ,c_email_address customer_email_address
| ,d_year dyear
| ,sum(ss_ext_list_price-ss_ext_discount_amt) year_total
| ,'s' sale_type
| from customer, store_sales, date_dim
| where c_customer_sk = ss_customer_sk
| and ss_sold_date_sk = d_date_sk
| group by c_customer_id
| ,c_first_name
| ,c_last_name
| ,d_year
| ,c_preferred_cust_flag
| ,c_birth_country
| ,c_login
| ,c_email_address
| ,d_year
| union all
| select c_customer_id customer_id
| ,c_first_name customer_first_name
| ,c_last_name customer_last_name
| ,c_preferred_cust_flag customer_preferred_cust_flag
| ,c_birth_country customer_birth_country
| ,c_login customer_login
| ,c_email_address customer_email_address
| ,d_year dyear
| ,sum(ws_ext_list_price-ws_ext_discount_amt) year_total
| ,'w' sale_type
| from customer, web_sales, date_dim
| where c_customer_sk = ws_bill_customer_sk
| and ws_sold_date_sk = d_date_sk
| group by
| c_customer_id, c_first_name, c_last_name, c_preferred_cust_flag, c_birth_country,
| c_login, c_email_address, d_year)
| select
| t_s_secyear.customer_preferred_cust_flag
| from year_total t_s_firstyear
| ,year_total t_s_secyear
| ,year_total t_w_firstyear
| ,year_total t_w_secyear
| where t_s_secyear.customer_id = t_s_firstyear.customer_id
| and t_s_firstyear.customer_id = t_w_secyear.customer_id
| and t_s_firstyear.customer_id = t_w_firstyear.customer_id
| and t_s_firstyear.sale_type = 's'
| and t_w_firstyear.sale_type = 'w'
| and t_s_secyear.sale_type = 's'
| and t_w_secyear.sale_type = 'w'
| and t_s_firstyear.dyear = 2001
| and t_s_secyear.dyear = 2001+1
| and t_w_firstyear.dyear = 2001
| and t_w_secyear.dyear = 2001+1
| and t_s_firstyear.year_total > 0
| and t_w_firstyear.year_total > 0
| and case when t_w_firstyear.year_total > 0 then t_w_secyear.year_total / t_w_firstyear.year_total else null end
| > case when t_s_firstyear.year_total > 0 then t_s_secyear.year_total / t_s_firstyear.year_total else null end
| order by t_s_secyear.customer_preferred_cust_flag
| LIMIT 100
""".stripMargin),
// Modifications: "+ days" -> date_add
("q12", """
| select
| i_item_desc, i_category, i_class, i_current_price,
| sum(ws_ext_sales_price) as itemrevenue,
| sum(ws_ext_sales_price)*100/sum(sum(ws_ext_sales_price)) over
| (partition by i_class) as revenueratio
| from
| web_sales, item, date_dim
| where
| ws_item_sk = i_item_sk
| and i_category in ('Sports', 'Books', 'Home')
| and ws_sold_date_sk = d_date_sk
| and d_date between cast('1999-02-22' as date)
| and (cast('1999-02-22' as date) + interval 30 days)
| group by
| i_item_id, i_item_desc, i_category, i_class, i_current_price
| order by
| i_category, i_class, i_item_id, i_item_desc, revenueratio
| LIMIT 100
""".stripMargin),
("q13", """
| select avg(ss_quantity)
| ,avg(ss_ext_sales_price)
| ,avg(ss_ext_wholesale_cost)
| ,sum(ss_ext_wholesale_cost)
| from store_sales
| ,store
| ,customer_demographics
| ,household_demographics
| ,customer_address
| ,date_dim
| where s_store_sk = ss_store_sk
| and ss_sold_date_sk = d_date_sk and d_year = 2001
| and((ss_hdemo_sk=hd_demo_sk
| and cd_demo_sk = ss_cdemo_sk
| and cd_marital_status = 'M'
| and cd_education_status = 'Advanced Degree'
| and ss_sales_price between 100.00 and 150.00
| and hd_dep_count = 3
| )or
| (ss_hdemo_sk=hd_demo_sk
| and cd_demo_sk = ss_cdemo_sk
| and cd_marital_status = 'S'
| and cd_education_status = 'College'
| and ss_sales_price between 50.00 and 100.00
| and hd_dep_count = 1
| ) or
| (ss_hdemo_sk=hd_demo_sk
| and cd_demo_sk = ss_cdemo_sk
| and cd_marital_status = 'W'
| and cd_education_status = '2 yr Degree'
| and ss_sales_price between 150.00 and 200.00
| and hd_dep_count = 1
| ))
| and((ss_addr_sk = ca_address_sk
| and ca_country = 'United States'
| and ca_state in ('TX', 'OH', 'TX')
| and ss_net_profit between 100 and 200
| ) or
| (ss_addr_sk = ca_address_sk
| and ca_country = 'United States'
| and ca_state in ('OR', 'NM', 'KY')
| and ss_net_profit between 150 and 300
| ) or
| (ss_addr_sk = ca_address_sk
| and ca_country = 'United States'
| and ca_state in ('VA', 'TX', 'MS')
| and ss_net_profit between 50 and 250
| ))
""".stripMargin),
("q14a", """
|with cross_items as
| (select i_item_sk ss_item_sk
| from item,
| (select iss.i_brand_id brand_id, iss.i_class_id class_id, iss.i_category_id category_id
| from store_sales, item iss, date_dim d1
| where ss_item_sk = iss.i_item_sk
and ss_sold_date_sk = d1.d_date_sk
| and d1.d_year between 1999 AND 1999 + 2
| intersect
| select ics.i_brand_id, ics.i_class_id, ics.i_category_id
| from catalog_sales, item ics, date_dim d2
| where cs_item_sk = ics.i_item_sk
| and cs_sold_date_sk = d2.d_date_sk
| and d2.d_year between 1999 AND 1999 + 2
| intersect
| select iws.i_brand_id, iws.i_class_id, iws.i_category_id
| from web_sales, item iws, date_dim d3
| where ws_item_sk = iws.i_item_sk
| and ws_sold_date_sk = d3.d_date_sk
| and d3.d_year between 1999 AND 1999 + 2) x
| where i_brand_id = brand_id
| and i_class_id = class_id
| and i_category_id = category_id
|),
| avg_sales as
| (select avg(quantity*list_price) average_sales
| from (
| select ss_quantity quantity, ss_list_price list_price
| from store_sales, date_dim
| where ss_sold_date_sk = d_date_sk
| and d_year between 1999 and 2001
| union all
| select cs_quantity quantity, cs_list_price list_price
| from catalog_sales, date_dim
| where cs_sold_date_sk = d_date_sk
| and d_year between 1999 and 1999 + 2
| union all
| select ws_quantity quantity, ws_list_price list_price
| from web_sales, date_dim
| where ws_sold_date_sk = d_date_sk
| and d_year between 1999 and 1999 + 2) x)
| select channel, i_brand_id,i_class_id,i_category_id,sum(sales), sum(number_sales)
| from(
| select 'store' channel, i_brand_id,i_class_id
| ,i_category_id,sum(ss_quantity*ss_list_price) sales
| , count(*) number_sales
| from store_sales, item, date_dim
| where ss_item_sk in (select ss_item_sk from cross_items)
| and ss_item_sk = i_item_sk
| and ss_sold_date_sk = d_date_sk
| and d_year = 1999+2
| and d_moy = 11
| group by i_brand_id,i_class_id,i_category_id
| having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales)
| union all
| select 'catalog' channel, i_brand_id,i_class_id,i_category_id, sum(cs_quantity*cs_list_price) sales, count(*) number_sales
| from catalog_sales, item, date_dim
| where cs_item_sk in (select ss_item_sk from cross_items)
| and cs_item_sk = i_item_sk
| and cs_sold_date_sk = d_date_sk
| and d_year = 1999+2
| and d_moy = 11
| group by i_brand_id,i_class_id,i_category_id
| having sum(cs_quantity*cs_list_price) > (select average_sales from avg_sales)
| union all
| select 'web' channel, i_brand_id,i_class_id,i_category_id, sum(ws_quantity*ws_list_price) sales , count(*) number_sales
| from web_sales, item, date_dim
| where ws_item_sk in (select ss_item_sk from cross_items)
| and ws_item_sk = i_item_sk
| and ws_sold_date_sk = d_date_sk
| and d_year = 1999+2
| and d_moy = 11
| group by i_brand_id,i_class_id,i_category_id
| having sum(ws_quantity*ws_list_price) > (select average_sales from avg_sales)
| ) y
| group by rollup (channel, i_brand_id,i_class_id,i_category_id)
| order by channel,i_brand_id,i_class_id,i_category_id
| limit 100
""".stripMargin),
("q14b", """
| with cross_items as
| (select i_item_sk ss_item_sk
| from item,
| (select iss.i_brand_id brand_id, iss.i_class_id class_id, iss.i_category_id category_id
| from store_sales, item iss, date_dim d1
| where ss_item_sk = iss.i_item_sk
| and ss_sold_date_sk = d1.d_date_sk
| and d1.d_year between 1999 AND 1999 + 2
| intersect
| select ics.i_brand_id, ics.i_class_id, ics.i_category_id
| from catalog_sales, item ics, date_dim d2
| where cs_item_sk = ics.i_item_sk
| and cs_sold_date_sk = d2.d_date_sk
| and d2.d_year between 1999 AND 1999 + 2
| intersect
| select iws.i_brand_id, iws.i_class_id, iws.i_category_id
| from web_sales, item iws, date_dim d3
| where ws_item_sk = iws.i_item_sk
| and ws_sold_date_sk = d3.d_date_sk
| and d3.d_year between 1999 AND 1999 + 2) x
| where i_brand_id = brand_id
| and i_class_id = class_id
| and i_category_id = category_id
| ),
| avg_sales as
| (select avg(quantity*list_price) average_sales
| from (select ss_quantity quantity, ss_list_price list_price
| from store_sales, date_dim
| where ss_sold_date_sk = d_date_sk and d_year between 1999 and 1999 + 2
| union all
| select cs_quantity quantity, cs_list_price list_price
| from catalog_sales, date_dim
| where cs_sold_date_sk = d_date_sk and d_year between 1999 and 1999 + 2
| union all
| select ws_quantity quantity, ws_list_price list_price
| from web_sales, date_dim
| where ws_sold_date_sk = d_date_sk and d_year between 1999 and 1999 + 2) x)
| select * from
| (select 'store' channel, i_brand_id,i_class_id,i_category_id
| ,sum(ss_quantity*ss_list_price) sales, count(*) number_sales
| from store_sales, item, date_dim
| where ss_item_sk in (select ss_item_sk from cross_items)
| and ss_item_sk = i_item_sk
| and ss_sold_date_sk = d_date_sk
| and d_week_seq = (select d_week_seq from date_dim
| where d_year = 1999 + 1 and d_moy = 12 and d_dom = 11)
| group by i_brand_id,i_class_id,i_category_id
| having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales)) this_year,
| (select 'store' channel, i_brand_id,i_class_id
| ,i_category_id, sum(ss_quantity*ss_list_price) sales, count(*) number_sales
| from store_sales, item, date_dim
| where ss_item_sk in (select ss_item_sk from cross_items)
| and ss_item_sk = i_item_sk
| and ss_sold_date_sk = d_date_sk
| and d_week_seq = (select d_week_seq from date_dim
| where d_year = 1999 and d_moy = 12 and d_dom = 11)
| group by i_brand_id,i_class_id,i_category_id
| having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales)) last_year
| where this_year.i_brand_id= last_year.i_brand_id
| and this_year.i_class_id = last_year.i_class_id
| and this_year.i_category_id = last_year.i_category_id
| order by this_year.channel, this_year.i_brand_id, this_year.i_class_id, this_year.i_category_id
| limit 100
""".stripMargin),
("q15", """
| select ca_zip, sum(cs_sales_price)
| from catalog_sales, customer, customer_address, date_dim
| where cs_bill_customer_sk = c_customer_sk
| and c_current_addr_sk = ca_address_sk
| and ( substr(ca_zip,1,5) in ('85669', '86197','88274','83405','86475',
| '85392', '85460', '80348', '81792')
| or ca_state in ('CA','WA','GA')
| or cs_sales_price > 500)
| and cs_sold_date_sk = d_date_sk
| and d_qoy = 2 and d_year = 2001
| group by ca_zip
| order by ca_zip
| limit 100
""".stripMargin),
// Modifications: " -> `
("q16", """
| select
| count(distinct cs_order_number) as `order count`,
| sum(cs_ext_ship_cost) as `total shipping cost`,
| sum(cs_net_profit) as `total net profit`
| from
| catalog_sales cs1, date_dim, customer_address, call_center
| where
| d_date between '2002-02-01' and (cast('2002-02-01' as date) + interval 60 days)
| and cs1.cs_ship_date_sk = d_date_sk
| and cs1.cs_ship_addr_sk = ca_address_sk
| and ca_state = 'GA'
| and cs1.cs_call_center_sk = cc_call_center_sk
| and cc_county in ('Williamson County','Williamson County','Williamson County','Williamson County', 'Williamson County')
| and exists (select *
| from catalog_sales cs2
| where cs1.cs_order_number = cs2.cs_order_number
| and cs1.cs_warehouse_sk <> cs2.cs_warehouse_sk)
| and not exists(select *
| from catalog_returns cr1
| where cs1.cs_order_number = cr1.cr_order_number)
| order by count(distinct cs_order_number)
| limit 100
""".stripMargin),
("q17", """
| select i_item_id
| ,i_item_desc
| ,s_state
| ,count(ss_quantity) as store_sales_quantitycount
| ,avg(ss_quantity) as store_sales_quantityave
| ,stddev_samp(ss_quantity) as store_sales_quantitystdev
| ,stddev_samp(ss_quantity)/avg(ss_quantity) as store_sales_quantitycov
| ,count(sr_return_quantity) as_store_returns_quantitycount
| ,avg(sr_return_quantity) as_store_returns_quantityave
| ,stddev_samp(sr_return_quantity) as_store_returns_quantitystdev
| ,stddev_samp(sr_return_quantity)/avg(sr_return_quantity) as store_returns_quantitycov
| ,count(cs_quantity) as catalog_sales_quantitycount ,avg(cs_quantity) as catalog_sales_quantityave
| ,stddev_samp(cs_quantity)/avg(cs_quantity) as catalog_sales_quantitystdev
| ,stddev_samp(cs_quantity)/avg(cs_quantity) as catalog_sales_quantitycov
| from store_sales, store_returns, catalog_sales, date_dim d1, date_dim d2, date_dim d3, store, item
| where d1.d_quarter_name = '2001Q1'
| and d1.d_date_sk = ss_sold_date_sk
| and i_item_sk = ss_item_sk
| and s_store_sk = ss_store_sk
| and ss_customer_sk = sr_customer_sk
| and ss_item_sk = sr_item_sk
| and ss_ticket_number = sr_ticket_number
| and sr_returned_date_sk = d2.d_date_sk
| and d2.d_quarter_name in ('2001Q1','2001Q2','2001Q3')
| and sr_customer_sk = cs_bill_customer_sk
| and sr_item_sk = cs_item_sk
| and cs_sold_date_sk = d3.d_date_sk
| and d3.d_quarter_name in ('2001Q1','2001Q2','2001Q3')
| group by i_item_id, i_item_desc, s_state
| order by i_item_id, i_item_desc, s_state
| limit 100
""".stripMargin),
// Modifications: "numeric" -> "decimal"
("q18", """
| select i_item_id,
| ca_country,
| ca_state,
| ca_county,
| avg( cast(cs_quantity as decimal(12,2))) agg1,
| avg( cast(cs_list_price as decimal(12,2))) agg2,
| avg( cast(cs_coupon_amt as decimal(12,2))) agg3,
| avg( cast(cs_sales_price as decimal(12,2))) agg4,
| avg( cast(cs_net_profit as decimal(12,2))) agg5,
| avg( cast(c_birth_year as decimal(12,2))) agg6,
| avg( cast(cd1.cd_dep_count as decimal(12,2))) agg7
| from catalog_sales, customer_demographics cd1,
| customer_demographics cd2, customer, customer_address, date_dim, item
| where cs_sold_date_sk = d_date_sk and
| cs_item_sk = i_item_sk and
| cs_bill_cdemo_sk = cd1.cd_demo_sk and
| cs_bill_customer_sk = c_customer_sk and
| cd1.cd_gender = 'F' and
| cd1.cd_education_status = 'Unknown' and
| c_current_cdemo_sk = cd2.cd_demo_sk and
| c_current_addr_sk = ca_address_sk and
| c_birth_month in (1,6,8,9,12,2) and
| d_year = 1998 and
| ca_state in ('MS','IN','ND','OK','NM','VA','MS')
| group by rollup (i_item_id, ca_country, ca_state, ca_county)
| order by ca_country, ca_state, ca_county, i_item_id
| LIMIT 100
""".stripMargin),
("q19", """
| select i_brand_id brand_id, i_brand brand, i_manufact_id, i_manufact,
| sum(ss_ext_sales_price) ext_price
| from date_dim, store_sales, item,customer,customer_address,store
| where d_date_sk = ss_sold_date_sk
| and ss_item_sk = i_item_sk
| and i_manager_id = 8
| and d_moy = 11
| and d_year = 1998
| and ss_customer_sk = c_customer_sk
| and c_current_addr_sk = ca_address_sk
| and substr(ca_zip,1,5) <> substr(s_zip,1,5)
| and ss_store_sk = s_store_sk
| group by i_brand, i_brand_id, i_manufact_id, i_manufact
| order by ext_price desc, brand, brand_id, i_manufact_id, i_manufact
| limit 100
""".stripMargin),
("q20", """
|select i_item_desc
| ,i_category
| ,i_class
| ,i_current_price
| ,sum(cs_ext_sales_price) as itemrevenue
| ,sum(cs_ext_sales_price)*100/sum(sum(cs_ext_sales_price)) over
| (partition by i_class) as revenueratio
| from catalog_sales, item, date_dim
| where cs_item_sk = i_item_sk
| and i_category in ('Sports', 'Books', 'Home')
| and cs_sold_date_sk = d_date_sk
| and d_date between cast('1999-02-22' as date)
| and (cast('1999-02-22' as date) + interval 30 days)
| group by i_item_id, i_item_desc, i_category, i_class, i_current_price
| order by i_category, i_class, i_item_id, i_item_desc, revenueratio
| limit 100
""".stripMargin),
// Modifications: "+ days" -> date_add
("q21", """
| select * from(
| select w_warehouse_name, i_item_id,
| sum(case when (cast(d_date as date) < cast ('2000-03-11' as date))
| then inv_quantity_on_hand
| else 0 end) as inv_before,
| sum(case when (cast(d_date as date) >= cast ('2000-03-11' as date))
| then inv_quantity_on_hand
| else 0 end) as inv_after
| from inventory, warehouse, item, date_dim
| where i_current_price between 0.99 and 1.49
| and i_item_sk = inv_item_sk
| and inv_warehouse_sk = w_warehouse_sk
| and inv_date_sk = d_date_sk
| and d_date between (cast('2000-03-11' as date) - interval 30 days)
| and (cast('2000-03-11' as date) + interval 30 days)
| group by w_warehouse_name, i_item_id) x
| where (case when inv_before > 0
| then inv_after / inv_before
| else null
| end) between 2.0/3.0 and 3.0/2.0
| order by w_warehouse_name, i_item_id
| limit 100
""".stripMargin),
("q22", """
| select i_product_name, i_brand, i_class, i_category, avg(inv_quantity_on_hand) qoh
| from inventory, date_dim, item, warehouse
| where inv_date_sk=d_date_sk
| and inv_item_sk=i_item_sk
| and inv_warehouse_sk = w_warehouse_sk
| and d_month_seq between 1200 and 1200 + 11
| group by rollup(i_product_name, i_brand, i_class, i_category)
| order by qoh, i_product_name, i_brand, i_class, i_category
| limit 100
""".stripMargin),
("q23a", """
| with frequent_ss_items as
| (select substr(i_item_desc,1,30) itemdesc,i_item_sk item_sk,d_date solddate,count(*) cnt
| from store_sales, date_dim, item
| where ss_sold_date_sk = d_date_sk
| and ss_item_sk = i_item_sk
| and d_year in (2000, 2000+1, 2000+2,2000+3)
| group by substr(i_item_desc,1,30),i_item_sk,d_date
| having count(*) >4),
| max_store_sales as
| (select max(csales) tpcds_cmax
| from (select c_customer_sk,sum(ss_quantity*ss_sales_price) csales
| from store_sales, customer, date_dim
| where ss_customer_sk = c_customer_sk
| and ss_sold_date_sk = d_date_sk
| and d_year in (2000, 2000+1, 2000+2,2000+3)
| group by c_customer_sk) x),
| best_ss_customer as
| (select c_customer_sk,sum(ss_quantity*ss_sales_price) ssales
| from store_sales, customer
| where ss_customer_sk = c_customer_sk
| group by c_customer_sk
| having sum(ss_quantity*ss_sales_price) > (50/100.0) *
| (select * from max_store_sales))
| select sum(sales)
| from ((select cs_quantity*cs_list_price sales
| from catalog_sales, date_dim
| where d_year = 2000
| and d_moy = 2
| and cs_sold_date_sk = d_date_sk
| and cs_item_sk in (select item_sk from frequent_ss_items)
| and cs_bill_customer_sk in (select c_customer_sk from best_ss_customer))
| union all
| (select ws_quantity*ws_list_price sales
| from web_sales, date_dim
| where d_year = 2000
| and d_moy = 2
| and ws_sold_date_sk = d_date_sk
| and ws_item_sk in (select item_sk from frequent_ss_items)
| and ws_bill_customer_sk in (select c_customer_sk from best_ss_customer))) y
| limit 100
""".stripMargin),
("q23b", """
|
| with frequent_ss_items as
| (select substr(i_item_desc,1,30) itemdesc,i_item_sk item_sk,d_date solddate,count(*) cnt
| from store_sales, date_dim, item
| where ss_sold_date_sk = d_date_sk
| and ss_item_sk = i_item_sk
| and d_year in (2000, 2000+1, 2000+2,2000+3)
| group by substr(i_item_desc,1,30),i_item_sk,d_date
| having count(*) > 4),
| max_store_sales as
| (select max(csales) tpcds_cmax
| from (select c_customer_sk,sum(ss_quantity*ss_sales_price) csales
| from store_sales, customer, date_dim
| where ss_customer_sk = c_customer_sk
| and ss_sold_date_sk = d_date_sk
| and d_year in (2000, 2000+1, 2000+2,2000+3)
| group by c_customer_sk) x),
| best_ss_customer as
| (select c_customer_sk,sum(ss_quantity*ss_sales_price) ssales
| from store_sales
| ,customer
| where ss_customer_sk = c_customer_sk
| group by c_customer_sk
| having sum(ss_quantity*ss_sales_price) > (50/100.0) *
| (select * from max_store_sales))
| select c_last_name,c_first_name,sales
| from ((select c_last_name,c_first_name,sum(cs_quantity*cs_list_price) sales
| from catalog_sales, customer, date_dim
| where d_year = 2000
| and d_moy = 2
| and cs_sold_date_sk = d_date_sk
| and cs_item_sk in (select item_sk from frequent_ss_items)
| and cs_bill_customer_sk in (select c_customer_sk from best_ss_customer)
| and cs_bill_customer_sk = c_customer_sk
| group by c_last_name,c_first_name)
| union all
| (select c_last_name,c_first_name,sum(ws_quantity*ws_list_price) sales
| from web_sales, customer, date_dim
| where d_year = 2000
| and d_moy = 2
| and ws_sold_date_sk = d_date_sk
| and ws_item_sk in (select item_sk from frequent_ss_items)
| and ws_bill_customer_sk in (select c_customer_sk from best_ss_customer)
| and ws_bill_customer_sk = c_customer_sk
| group by c_last_name,c_first_name)) y
| order by c_last_name,c_first_name,sales
| limit 100
""".stripMargin),
("q24a", """
| with ssales as
| (select c_last_name, c_first_name, s_store_name, ca_state, s_state, i_color,
| i_current_price, i_manager_id, i_units, i_size, sum(ss_net_paid) netpaid
| from store_sales, store_returns, store, item, customer, customer_address
| where ss_ticket_number = sr_ticket_number
| and ss_item_sk = sr_item_sk
| and ss_customer_sk = c_customer_sk
| and ss_item_sk = i_item_sk
| and ss_store_sk = s_store_sk
| and c_birth_country = upper(ca_country)
| and s_zip = ca_zip
| and s_market_id = 8
| group by c_last_name, c_first_name, s_store_name, ca_state, s_state, i_color,
| i_current_price, i_manager_id, i_units, i_size)
| select c_last_name, c_first_name, s_store_name, sum(netpaid) paid
| from ssales
| where i_color = 'pale'
| group by c_last_name, c_first_name, s_store_name
| having sum(netpaid) > (select 0.05*avg(netpaid) from ssales)
""".stripMargin),
("q24b", """
| with ssales as
| (select c_last_name, c_first_name, s_store_name, ca_state, s_state, i_color,
| i_current_price, i_manager_id, i_units, i_size, sum(ss_net_paid) netpaid
| from store_sales, store_returns, store, item, customer, customer_address
| where ss_ticket_number = sr_ticket_number
| and ss_item_sk = sr_item_sk
| and ss_customer_sk = c_customer_sk
| and ss_item_sk = i_item_sk
| and ss_store_sk = s_store_sk
| and c_birth_country = upper(ca_country)
| and s_zip = ca_zip
| and s_market_id = 8
| group by c_last_name, c_first_name, s_store_name, ca_state, s_state,
| i_color, i_current_price, i_manager_id, i_units, i_size)
| select c_last_name, c_first_name, s_store_name, sum(netpaid) paid
| from ssales
| where i_color = 'chiffon'
| group by c_last_name, c_first_name, s_store_name
| having sum(netpaid) > (select 0.05*avg(netpaid) from ssales)
""".stripMargin),
("q25", """
| select i_item_id, i_item_desc, s_store_id, s_store_name,
| sum(ss_net_profit) as store_sales_profit,
| sum(sr_net_loss) as store_returns_loss,
| sum(cs_net_profit) as catalog_sales_profit
| from
| store_sales, store_returns, catalog_sales, date_dim d1, date_dim d2, date_dim d3,
| store, item
| where
| d1.d_moy = 4
| and d1.d_year = 2001
| and d1.d_date_sk = ss_sold_date_sk
| and i_item_sk = ss_item_sk
| and s_store_sk = ss_store_sk
| and ss_customer_sk = sr_customer_sk
| and ss_item_sk = sr_item_sk
| and ss_ticket_number = sr_ticket_number
| and sr_returned_date_sk = d2.d_date_sk
| and d2.d_moy between 4 and 10
| and d2.d_year = 2001
| and sr_customer_sk = cs_bill_customer_sk
| and sr_item_sk = cs_item_sk
| and cs_sold_date_sk = d3.d_date_sk
| and d3.d_moy between 4 and 10
| and d3.d_year = 2001
| group by
| i_item_id, i_item_desc, s_store_id, s_store_name
| order by
| i_item_id, i_item_desc, s_store_id, s_store_name
| limit 100
""".stripMargin),
("q26", """
| select i_item_id,
| avg(cs_quantity) agg1,
| avg(cs_list_price) agg2,
| avg(cs_coupon_amt) agg3,
| avg(cs_sales_price) agg4
| from catalog_sales, customer_demographics, date_dim, item, promotion
| where cs_sold_date_sk = d_date_sk and
| cs_item_sk = i_item_sk and
| cs_bill_cdemo_sk = cd_demo_sk and
| cs_promo_sk = p_promo_sk and
| cd_gender = 'M' and
| cd_marital_status = 'S' and
| cd_education_status = 'College' and
| (p_channel_email = 'N' or p_channel_event = 'N') and
| d_year = 2000
| group by i_item_id
| order by i_item_id
| limit 100
""".stripMargin),
("q27", """
| select i_item_id,
| s_state, grouping(s_state) g_state,
| avg(ss_quantity) agg1,
| avg(ss_list_price) agg2,
| avg(ss_coupon_amt) agg3,
| avg(ss_sales_price) agg4
| from store_sales, customer_demographics, date_dim, store, item
| where ss_sold_date_sk = d_date_sk and
| ss_item_sk = i_item_sk and
| ss_store_sk = s_store_sk and
| ss_cdemo_sk = cd_demo_sk and
| cd_gender = 'M' and
| cd_marital_status = 'S' and
| cd_education_status = 'College' and
| d_year = 2002 and
| s_state in ('TN','TN', 'TN', 'TN', 'TN', 'TN')
| group by rollup (i_item_id, s_state)
| order by i_item_id, s_state
| limit 100
""".stripMargin),
("q28", """
| select *
| from (select avg(ss_list_price) B1_LP
| ,count(ss_list_price) B1_CNT
| ,count(distinct ss_list_price) B1_CNTD
| from store_sales
| where ss_quantity between 0 and 5
| and (ss_list_price between 8 and 8+10
| or ss_coupon_amt between 459 and 459+1000
| or ss_wholesale_cost between 57 and 57+20)) B1,
| (select avg(ss_list_price) B2_LP
| ,count(ss_list_price) B2_CNT
| ,count(distinct ss_list_price) B2_CNTD
| from store_sales
| where ss_quantity between 6 and 10
| and (ss_list_price between 90 and 90+10
| or ss_coupon_amt between 2323 and 2323+1000
| or ss_wholesale_cost between 31 and 31+20)) B2,
| (select avg(ss_list_price) B3_LP
| ,count(ss_list_price) B3_CNT
| ,count(distinct ss_list_price) B3_CNTD
| from store_sales
| where ss_quantity between 11 and 15
| and (ss_list_price between 142 and 142+10
| or ss_coupon_amt between 12214 and 12214+1000
| or ss_wholesale_cost between 79 and 79+20)) B3,
| (select avg(ss_list_price) B4_LP
| ,count(ss_list_price) B4_CNT
| ,count(distinct ss_list_price) B4_CNTD
| from store_sales
| where ss_quantity between 16 and 20
| and (ss_list_price between 135 and 135+10
| or ss_coupon_amt between 6071 and 6071+1000
| or ss_wholesale_cost between 38 and 38+20)) B4,
| (select avg(ss_list_price) B5_LP
| ,count(ss_list_price) B5_CNT
| ,count(distinct ss_list_price) B5_CNTD
| from store_sales
| where ss_quantity between 21 and 25
| and (ss_list_price between 122 and 122+10
| or ss_coupon_amt between 836 and 836+1000
| or ss_wholesale_cost between 17 and 17+20)) B5,
| (select avg(ss_list_price) B6_LP
| ,count(ss_list_price) B6_CNT
| ,count(distinct ss_list_price) B6_CNTD
| from store_sales
| where ss_quantity between 26 and 30
| and (ss_list_price between 154 and 154+10
| or ss_coupon_amt between 7326 and 7326+1000
| or ss_wholesale_cost between 7 and 7+20)) B6
| limit 100
""".stripMargin),
("q29", """
| select
| i_item_id
| ,i_item_desc
| ,s_store_id
| ,s_store_name
| ,sum(ss_quantity) as store_sales_quantity
| ,sum(sr_return_quantity) as store_returns_quantity
| ,sum(cs_quantity) as catalog_sales_quantity
| from
| store_sales, store_returns, catalog_sales, date_dim d1, date_dim d2,
| date_dim d3, store, item
| where
| d1.d_moy = 9
| and d1.d_year = 1999
| and d1.d_date_sk = ss_sold_date_sk
| and i_item_sk = ss_item_sk
| and s_store_sk = ss_store_sk
| and ss_customer_sk = sr_customer_sk
| and ss_item_sk = sr_item_sk
| and ss_ticket_number = sr_ticket_number
| and sr_returned_date_sk = d2.d_date_sk
| and d2.d_moy between 9 and 9 + 3
| and d2.d_year = 1999
| and sr_customer_sk = cs_bill_customer_sk
| and sr_item_sk = cs_item_sk
| and cs_sold_date_sk = d3.d_date_sk
| and d3.d_year in (1999,1999+1,1999+2)
| group by
| i_item_id, i_item_desc, s_store_id, s_store_name
| order by
| i_item_id, i_item_desc, s_store_id, s_store_name
| limit 100
""".stripMargin),
("q30", """
| with customer_total_return as
| (select wr_returning_customer_sk as ctr_customer_sk
| ,ca_state as ctr_state,
| sum(wr_return_amt) as ctr_total_return
| from web_returns, date_dim, customer_address
| where wr_returned_date_sk = d_date_sk
| and d_year = 2002
| and wr_returning_addr_sk = ca_address_sk
| group by wr_returning_customer_sk,ca_state)
| select c_customer_id,c_salutation,c_first_name,c_last_name,c_preferred_cust_flag
| ,c_birth_day,c_birth_month,c_birth_year,c_birth_country,c_login,c_email_address
| ,c_last_review_date,ctr_total_return
| from customer_total_return ctr1, customer_address, customer
| where ctr1.ctr_total_return > (select avg(ctr_total_return)*1.2
| from customer_total_return ctr2
| where ctr1.ctr_state = ctr2.ctr_state)
| and ca_address_sk = c_current_addr_sk
| and ca_state = 'GA'
| and ctr1.ctr_customer_sk = c_customer_sk
| order by c_customer_id,c_salutation,c_first_name,c_last_name,c_preferred_cust_flag
| ,c_birth_day,c_birth_month,c_birth_year,c_birth_country,c_login,c_email_address
| ,c_last_review_date,ctr_total_return
| limit 100
""".stripMargin),
("q31", """
| with ss as
| (select ca_county,d_qoy, d_year,sum(ss_ext_sales_price) as store_sales
| from store_sales,date_dim,customer_address
| where ss_sold_date_sk = d_date_sk
| and ss_addr_sk=ca_address_sk
| group by ca_county,d_qoy, d_year),
| ws as
| (select ca_county,d_qoy, d_year,sum(ws_ext_sales_price) as web_sales
| from web_sales,date_dim,customer_address
| where ws_sold_date_sk = d_date_sk
| and ws_bill_addr_sk=ca_address_sk
| group by ca_county,d_qoy, d_year)
| select
| ss1.ca_county
| ,ss1.d_year
| ,ws2.web_sales/ws1.web_sales web_q1_q2_increase
| ,ss2.store_sales/ss1.store_sales store_q1_q2_increase
| ,ws3.web_sales/ws2.web_sales web_q2_q3_increase
| ,ss3.store_sales/ss2.store_sales store_q2_q3_increase
| from
| ss ss1, ss ss2, ss ss3, ws ws1, ws ws2, ws ws3
| where
| ss1.d_qoy = 1
| and ss1.d_year = 2000
| and ss1.ca_county = ss2.ca_county
| and ss2.d_qoy = 2
| and ss2.d_year = 2000
| and ss2.ca_county = ss3.ca_county
| and ss3.d_qoy = 3
| and ss3.d_year = 2000
| and ss1.ca_county = ws1.ca_county
| and ws1.d_qoy = 1
| and ws1.d_year = 2000
| and ws1.ca_county = ws2.ca_county
| and ws2.d_qoy = 2
| and ws2.d_year = 2000
| and ws1.ca_county = ws3.ca_county
| and ws3.d_qoy = 3
| and ws3.d_year = 2000
| and case when ws1.web_sales > 0 then ws2.web_sales/ws1.web_sales else null end
| > case when ss1.store_sales > 0 then ss2.store_sales/ss1.store_sales else null end
| and case when ws2.web_sales > 0 then ws3.web_sales/ws2.web_sales else null end
| > case when ss2.store_sales > 0 then ss3.store_sales/ss2.store_sales else null end
| order by ss1.ca_county
""".stripMargin),
// Modifications: " -> `
("q32", """
| select sum(cs_ext_discount_amt) as `excess discount amount`
| from
| catalog_sales, item, date_dim
| where
| i_manufact_id = 977
| and i_item_sk = cs_item_sk
| and d_date between '2000-01-27' and (cast('2000-01-27' as date) + interval 90 days)
| and d_date_sk = cs_sold_date_sk
| and cs_ext_discount_amt > (
| select 1.3 * avg(cs_ext_discount_amt)
| from catalog_sales, date_dim
| where cs_item_sk = i_item_sk
| and d_date between '2000-01-27]' and (cast('2000-01-27' as date) + interval 90 days)
| and d_date_sk = cs_sold_date_sk)
|limit 100
""".stripMargin),
("q33", """
| with ss as (
| select
| i_manufact_id,sum(ss_ext_sales_price) total_sales
| from
| store_sales, date_dim, customer_address, item
| where
| i_manufact_id in (select i_manufact_id
| from item
| where i_category in ('Electronics'))
| and ss_item_sk = i_item_sk
| and ss_sold_date_sk = d_date_sk
| and d_year = 1998
| and d_moy = 5
| and ss_addr_sk = ca_address_sk
| and ca_gmt_offset = -5
| group by i_manufact_id), cs as
| (select i_manufact_id, sum(cs_ext_sales_price) total_sales
| from catalog_sales, date_dim, customer_address, item
| where
| i_manufact_id in (
| select i_manufact_id from item
| where
| i_category in ('Electronics'))
| and cs_item_sk = i_item_sk
| and cs_sold_date_sk = d_date_sk
| and d_year = 1998
| and d_moy = 5
| and cs_bill_addr_sk = ca_address_sk
| and ca_gmt_offset = -5
| group by i_manufact_id),
| ws as (
| select i_manufact_id,sum(ws_ext_sales_price) total_sales
| from
| web_sales, date_dim, customer_address, item
| where
| i_manufact_id in (select i_manufact_id from item
| where i_category in ('Electronics'))
| and ws_item_sk = i_item_sk
| and ws_sold_date_sk = d_date_sk
| and d_year = 1998
| and d_moy = 5
| and ws_bill_addr_sk = ca_address_sk
| and ca_gmt_offset = -5
| group by i_manufact_id)
| select i_manufact_id ,sum(total_sales) total_sales
| from (select * from ss
| union all
| select * from cs
| union all
| select * from ws) tmp1
| group by i_manufact_id
| order by total_sales
|limit 100
""".stripMargin),
("q34", """
| select c_last_name, c_first_name, c_salutation, c_preferred_cust_flag, ss_ticket_number,
| cnt
| FROM
| (select ss_ticket_number, ss_customer_sk, count(*) cnt
| from store_sales,date_dim,store,household_demographics
| where store_sales.ss_sold_date_sk = date_dim.d_date_sk
| and store_sales.ss_store_sk = store.s_store_sk
| and store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
| and (date_dim.d_dom between 1 and 3 or date_dim.d_dom between 25 and 28)
| and (household_demographics.hd_buy_potential = '>10000' or
| household_demographics.hd_buy_potential = 'unknown')
| and household_demographics.hd_vehicle_count > 0
| and (case when household_demographics.hd_vehicle_count > 0
| then household_demographics.hd_dep_count/ household_demographics.hd_vehicle_count
| else null
| end) > 1.2
| and date_dim.d_year in (1999, 1999+1, 1999+2)
| and store.s_county in ('Williamson County','Williamson County','Williamson County','Williamson County',
| 'Williamson County','Williamson County','Williamson County','Williamson County')
| group by ss_ticket_number,ss_customer_sk) dn,customer
| where ss_customer_sk = c_customer_sk
| and cnt between 15 and 20
| order by c_last_name,c_first_name,c_salutation,c_preferred_cust_flag desc
""".stripMargin),
("q35", """
| select
| ca_state,
| cd_gender,
| cd_marital_status,
| count(*) cnt1,
| min(cd_dep_count),
| max(cd_dep_count),
| avg(cd_dep_count),
| cd_dep_employed_count,
| count(*) cnt2,
| min(cd_dep_employed_count),
| max(cd_dep_employed_count),
| avg(cd_dep_employed_count),
| cd_dep_college_count,
| count(*) cnt3,
| min(cd_dep_college_count),
| max(cd_dep_college_count),
| avg(cd_dep_college_count)
| from
| customer c,customer_address ca,customer_demographics
| where
| c.c_current_addr_sk = ca.ca_address_sk and
| cd_demo_sk = c.c_current_cdemo_sk and
| exists (select * from store_sales, date_dim
| where c.c_customer_sk = ss_customer_sk and
| ss_sold_date_sk = d_date_sk and
| d_year = 2002 and
| d_qoy < 4) and
| (exists (select * from web_sales, date_dim
| where c.c_customer_sk = ws_bill_customer_sk and
| ws_sold_date_sk = d_date_sk and
| d_year = 2002 and
| d_qoy < 4) or
| exists (select * from catalog_sales, date_dim
| where c.c_customer_sk = cs_ship_customer_sk and
| cs_sold_date_sk = d_date_sk and
| d_year = 2002 and
| d_qoy < 4))
| group by ca_state, cd_gender, cd_marital_status, cd_dep_count,
| cd_dep_employed_count, cd_dep_college_count
| order by ca_state, cd_gender, cd_marital_status, cd_dep_count,
| cd_dep_employed_count, cd_dep_college_count
| limit 100
""".stripMargin),
("q36", """
| select
| sum(ss_net_profit)/sum(ss_ext_sales_price) as gross_margin
| ,i_category
| ,i_class
| ,grouping(i_category)+grouping(i_class) as lochierarchy
| ,rank() over (
| partition by grouping(i_category)+grouping(i_class),
| case when grouping(i_class) = 0 then i_category end
| order by sum(ss_net_profit)/sum(ss_ext_sales_price) asc) as rank_within_parent
| from
| store_sales, date_dim d1, item, store
| where
| d1.d_year = 2001
| and d1.d_date_sk = ss_sold_date_sk
| and i_item_sk = ss_item_sk
| and s_store_sk = ss_store_sk
| and s_state in ('TN','TN','TN','TN','TN','TN','TN','TN')
| group by rollup(i_category,i_class)
| order by
| lochierarchy desc
| ,case when lochierarchy = 0 then i_category end
| ,rank_within_parent
| limit 100
""".stripMargin),
// Modifications: "+ days" -> date_add
("q37", """
| select i_item_id, i_item_desc, i_current_price
| from item, inventory, date_dim, catalog_sales
| where i_current_price between 68 and 68 + 30
| and inv_item_sk = i_item_sk
| and d_date_sk=inv_date_sk
| and d_date between cast('2000-02-01' as date) and (cast('2000-02-01' as date) + interval 60 days)
| and i_manufact_id in (677,940,694,808)
| and inv_quantity_on_hand between 100 and 500
| and cs_item_sk = i_item_sk
| group by i_item_id,i_item_desc,i_current_price
| order by i_item_id
| limit 100
""".stripMargin),
("q38", """
| select count(*) from (
| select distinct c_last_name, c_first_name, d_date
| from store_sales, date_dim, customer
| where store_sales.ss_sold_date_sk = date_dim.d_date_sk
| and store_sales.ss_customer_sk = customer.c_customer_sk
| and d_month_seq between 1200 and 1200 + 11
| intersect
| select distinct c_last_name, c_first_name, d_date
| from catalog_sales, date_dim, customer
| where catalog_sales.cs_sold_date_sk = date_dim.d_date_sk
| and catalog_sales.cs_bill_customer_sk = customer.c_customer_sk
| and d_month_seq between 1200 and 1200 + 11
| intersect
| select distinct c_last_name, c_first_name, d_date
| from web_sales, date_dim, customer
| where web_sales.ws_sold_date_sk = date_dim.d_date_sk
| and web_sales.ws_bill_customer_sk = customer.c_customer_sk
| and d_month_seq between 1200 and 1200 + 11
| ) hot_cust
| limit 100
""".stripMargin),
("q39a", """
| with inv as
| (select w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy
| ,stdev,mean, case mean when 0 then null else stdev/mean end cov
| from(select w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy
| ,stddev_samp(inv_quantity_on_hand) stdev,avg(inv_quantity_on_hand) mean
| from inventory, item, warehouse, date_dim
| where inv_item_sk = i_item_sk
| and inv_warehouse_sk = w_warehouse_sk
| and inv_date_sk = d_date_sk
| and d_year = 2001
| group by w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy) foo
| where case mean when 0 then 0 else stdev/mean end > 1)
| select inv1.w_warehouse_sk,inv1.i_item_sk,inv1.d_moy,inv1.mean, inv1.cov
| ,inv2.w_warehouse_sk,inv2.i_item_sk,inv2.d_moy,inv2.mean, inv2.cov
| from inv inv1,inv inv2
| where inv1.i_item_sk = inv2.i_item_sk
| and inv1.w_warehouse_sk = inv2.w_warehouse_sk
| and inv1.d_moy=1
| and inv2.d_moy=1+1
| order by inv1.w_warehouse_sk,inv1.i_item_sk,inv1.d_moy,inv1.mean,inv1.cov
| ,inv2.d_moy,inv2.mean, inv2.cov
""".stripMargin),
("q39b", """
| with inv as
| (select w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy
| ,stdev,mean, case mean when 0 then null else stdev/mean end cov
| from(select w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy
| ,stddev_samp(inv_quantity_on_hand) stdev,avg(inv_quantity_on_hand) mean
| from inventory, item, warehouse, date_dim
| where inv_item_sk = i_item_sk
| and inv_warehouse_sk = w_warehouse_sk
| and inv_date_sk = d_date_sk
| and d_year = 2001
| group by w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy) foo
| where case mean when 0 then 0 else stdev/mean end > 1)
| select inv1.w_warehouse_sk,inv1.i_item_sk,inv1.d_moy,inv1.mean, inv1.cov
| ,inv2.w_warehouse_sk,inv2.i_item_sk,inv2.d_moy,inv2.mean, inv2.cov
| from inv inv1,inv inv2
| where inv1.i_item_sk = inv2.i_item_sk
| and inv1.w_warehouse_sk = inv2.w_warehouse_sk
| and inv1.d_moy=1
| and inv2.d_moy=1+1
| and inv1.cov > 1.5
| order by inv1.w_warehouse_sk,inv1.i_item_sk,inv1.d_moy,inv1.mean,inv1.cov
| ,inv2.d_moy,inv2.mean, inv2.cov
""".stripMargin),
// Modifications: "+ days" -> date_add
("q40", """
| select
| w_state
| ,i_item_id
| ,sum(case when (cast(d_date as date) < cast('2000-03-11' as date))
| then cs_sales_price - coalesce(cr_refunded_cash,0) else 0 end) as sales_before
| ,sum(case when (cast(d_date as date) >= cast('2000-03-11' as date))
| then cs_sales_price - coalesce(cr_refunded_cash,0) else 0 end) as sales_after
| from
| catalog_sales left outer join catalog_returns on
| (cs_order_number = cr_order_number
| and cs_item_sk = cr_item_sk)
| ,warehouse, item, date_dim
| where
| i_current_price between 0.99 and 1.49
| and i_item_sk = cs_item_sk
| and cs_warehouse_sk = w_warehouse_sk
| and cs_sold_date_sk = d_date_sk
| and d_date between (cast('2000-03-11' as date) - interval 30 days)
| and (cast('2000-03-11' as date) + interval 30 days)
| group by w_state,i_item_id
| order by w_state,i_item_id
| limit 100
""".stripMargin),
("q41", """
| select distinct(i_product_name)
| from item i1
| where i_manufact_id between 738 and 738+40
| and (select count(*) as item_cnt
| from item
| where (i_manufact = i1.i_manufact and
| ((i_category = 'Women' and
| (i_color = 'powder' or i_color = 'khaki') and
| (i_units = 'Ounce' or i_units = 'Oz') and
| (i_size = 'medium' or i_size = 'extra large')
| ) or
| (i_category = 'Women' and
| (i_color = 'brown' or i_color = 'honeydew') and
| (i_units = 'Bunch' or i_units = 'Ton') and
| (i_size = 'N/A' or i_size = 'small')
| ) or
| (i_category = 'Men' and
| (i_color = 'floral' or i_color = 'deep') and
| (i_units = 'N/A' or i_units = 'Dozen') and
| (i_size = 'petite' or i_size = 'large')
| ) or
| (i_category = 'Men' and
| (i_color = 'light' or i_color = 'cornflower') and
| (i_units = 'Box' or i_units = 'Pound') and
| (i_size = 'medium' or i_size = 'extra large')
| ))
| or
| ((i_category = 'Women' and
| (i_color = 'midnight' or i_color = 'snow') and
| (i_units = 'Pallet' or i_units = 'Gross') and
| (i_size = 'medium' or i_size = 'extra large')
| ) or
| (i_category = 'Women' and
| (i_color = 'cyan' or i_color = 'papaya') and
| (i_units = 'Cup' or i_units = 'Dram') and
| (i_size = 'N/A' or i_size = 'small')
| ) or
| (i_category = 'Men' and
| (i_color = 'orange' or i_color = 'frosted') and
| (i_units = 'Each' or i_units = 'Tbl') and
| (i_size = 'petite' or i_size = 'large')
| ) or
| (i_category = 'Men' and
| (i_color = 'forest' or i_color = 'ghost') and
| (i_units = 'Lb' or i_units = 'Bundle') and
| (i_size = 'medium' or i_size = 'extra large')
| )))) > 0
| order by i_product_name
| limit 100
""".stripMargin),
("q42", """
| select dt.d_year, item.i_category_id, item.i_category, sum(ss_ext_sales_price)
| from date_dim dt, store_sales, item
| where dt.d_date_sk = store_sales.ss_sold_date_sk
| and store_sales.ss_item_sk = item.i_item_sk
| and item.i_manager_id = 1
| and dt.d_moy=11
| and dt.d_year=2000
| group by dt.d_year
| ,item.i_category_id
| ,item.i_category
| order by sum(ss_ext_sales_price) desc,dt.d_year
| ,item.i_category_id
| ,item.i_category
| limit 100
""".stripMargin),
("q43", """
| select s_store_name, s_store_id,
| sum(case when (d_day_name='Sunday') then ss_sales_price else null end) sun_sales,
| sum(case when (d_day_name='Monday') then ss_sales_price else null end) mon_sales,
| sum(case when (d_day_name='Tuesday') then ss_sales_price else null end) tue_sales,
| sum(case when (d_day_name='Wednesday') then ss_sales_price else null end) wed_sales,
| sum(case when (d_day_name='Thursday') then ss_sales_price else null end) thu_sales,
| sum(case when (d_day_name='Friday') then ss_sales_price else null end) fri_sales,
| sum(case when (d_day_name='Saturday') then ss_sales_price else null end) sat_sales
| from date_dim, store_sales, store
| where d_date_sk = ss_sold_date_sk and
| s_store_sk = ss_store_sk and
| s_gmt_offset = -5 and
| d_year = 2000
| group by s_store_name, s_store_id
| order by s_store_name, s_store_id,sun_sales,mon_sales,tue_sales,wed_sales,
| thu_sales,fri_sales,sat_sales
| limit 100
""".stripMargin),
("q44", """
| select asceding.rnk, i1.i_product_name best_performing, i2.i_product_name worst_performing
| from(select *
| from (select item_sk,rank() over (order by rank_col asc) rnk
| from (select ss_item_sk item_sk,avg(ss_net_profit) rank_col
| from store_sales ss1
| where ss_store_sk = 4
| group by ss_item_sk
| having avg(ss_net_profit) > 0.9*(select avg(ss_net_profit) rank_col
| from store_sales
| where ss_store_sk = 4
| and ss_addr_sk is null
| group by ss_store_sk))V1)V11
| where rnk < 11) asceding,
| (select *
| from (select item_sk,rank() over (order by rank_col desc) rnk
| from (select ss_item_sk item_sk,avg(ss_net_profit) rank_col
| from store_sales ss1
| where ss_store_sk = 4
| group by ss_item_sk
| having avg(ss_net_profit) > 0.9*(select avg(ss_net_profit) rank_col
| from store_sales
| where ss_store_sk = 4
| and ss_addr_sk is null
| group by ss_store_sk))V2)V21
| where rnk < 11) descending,
| item i1, item i2
| where asceding.rnk = descending.rnk
| and i1.i_item_sk=asceding.item_sk
| and i2.i_item_sk=descending.item_sk
| order by asceding.rnk
| limit 100
""".stripMargin),
("q45", """
| select ca_zip, ca_city, sum(ws_sales_price)
| from web_sales, customer, customer_address, date_dim, item
| where ws_bill_customer_sk = c_customer_sk
| and c_current_addr_sk = ca_address_sk
| and ws_item_sk = i_item_sk
| and ( substr(ca_zip,1,5) in ('85669', '86197','88274','83405','86475', '85392', '85460', '80348', '81792')
| or
| i_item_id in (select i_item_id
| from item
| where i_item_sk in (2, 3, 5, 7, 11, 13, 17, 19, 23, 29)
| )
| )
| and ws_sold_date_sk = d_date_sk
| and d_qoy = 2 and d_year = 2001
| group by ca_zip, ca_city
| order by ca_zip, ca_city
| limit 100
""".stripMargin),
("q46", """
| select c_last_name, c_first_name, ca_city, bought_city, ss_ticket_number, amt,profit
| from
| (select ss_ticket_number
| ,ss_customer_sk
| ,ca_city bought_city
| ,sum(ss_coupon_amt) amt
| ,sum(ss_net_profit) profit
| from store_sales, date_dim, store, household_demographics, customer_address
| where store_sales.ss_sold_date_sk = date_dim.d_date_sk
| and store_sales.ss_store_sk = store.s_store_sk
| and store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
| and store_sales.ss_addr_sk = customer_address.ca_address_sk
| and (household_demographics.hd_dep_count = 4 or
| household_demographics.hd_vehicle_count= 3)
| and date_dim.d_dow in (6,0)
| and date_dim.d_year in (1999,1999+1,1999+2)
| and store.s_city in ('Fairview','Midway','Fairview','Fairview','Fairview')
| group by ss_ticket_number,ss_customer_sk,ss_addr_sk,ca_city) dn,customer,customer_address current_addr
| where ss_customer_sk = c_customer_sk
| and customer.c_current_addr_sk = current_addr.ca_address_sk
| and current_addr.ca_city <> bought_city
| order by c_last_name, c_first_name, ca_city, bought_city, ss_ticket_number
| limit 100
""".stripMargin),
("q47", """
| with v1 as(
| select i_category, i_brand,
| s_store_name, s_company_name,
| d_year, d_moy,
| sum(ss_sales_price) sum_sales,
| avg(sum(ss_sales_price)) over
| (partition by i_category, i_brand,
| s_store_name, s_company_name, d_year)
| avg_monthly_sales,
| rank() over
| (partition by i_category, i_brand,
| s_store_name, s_company_name
| order by d_year, d_moy) rn
| from item, store_sales, date_dim, store
| where ss_item_sk = i_item_sk and
| ss_sold_date_sk = d_date_sk and
| ss_store_sk = s_store_sk and
| (
| d_year = 1999 or
| ( d_year = 1999-1 and d_moy =12) or
| ( d_year = 1999+1 and d_moy =1)
| )
| group by i_category, i_brand,
| s_store_name, s_company_name,
| d_year, d_moy),
| v2 as(
| select v1.i_category, v1.i_brand, v1.s_store_name, v1.s_company_name, v1.d_year,
v1.d_moy, v1.avg_monthly_sales ,v1.sum_sales, v1_lag.sum_sales psum,
v1_lead.sum_sales nsum
| from v1, v1 v1_lag, v1 v1_lead
| where v1.i_category = v1_lag.i_category and
| v1.i_category = v1_lead.i_category and
| v1.i_brand = v1_lag.i_brand and
| v1.i_brand = v1_lead.i_brand and
| v1.s_store_name = v1_lag.s_store_name and
| v1.s_store_name = v1_lead.s_store_name and
| v1.s_company_name = v1_lag.s_company_name and
| v1.s_company_name = v1_lead.s_company_name and
| v1.rn = v1_lag.rn + 1 and
| v1.rn = v1_lead.rn - 1)
| select * from v2
| where d_year = 1999 and
| avg_monthly_sales > 0 and
| case when avg_monthly_sales > 0 then abs(sum_sales - avg_monthly_sales) / avg_monthly_sales else null end > 0.1
| order by sum_sales - avg_monthly_sales, 3
| limit 100
""".stripMargin),
("q48", """
| select sum (ss_quantity)
| from store_sales, store, customer_demographics, customer_address, date_dim
| where s_store_sk = ss_store_sk
| and ss_sold_date_sk = d_date_sk and d_year = 2001
| and
| (
| (
| cd_demo_sk = ss_cdemo_sk
| and
| cd_marital_status = 'M'
| and
| cd_education_status = '4 yr Degree'
| and
| ss_sales_price between 100.00 and 150.00
| )
| or
| (
| cd_demo_sk = ss_cdemo_sk
| and
| cd_marital_status = 'D'
| and
| cd_education_status = '2 yr Degree'
| and
| ss_sales_price between 50.00 and 100.00
| )
| or
| (
| cd_demo_sk = ss_cdemo_sk
| and
| cd_marital_status = 'S'
| and
| cd_education_status = 'College'
| and
| ss_sales_price between 150.00 and 200.00
| )
| )
| and
| (
| (
| ss_addr_sk = ca_address_sk
| and
| ca_country = 'United States'
| and
| ca_state in ('CO', 'OH', 'TX')
| and ss_net_profit between 0 and 2000
| )
| or
| (ss_addr_sk = ca_address_sk
| and
| ca_country = 'United States'
| and
| ca_state in ('OR', 'MN', 'KY')
| and ss_net_profit between 150 and 3000
| )
| or
| (ss_addr_sk = ca_address_sk
| and
| ca_country = 'United States'
| and
| ca_state in ('VA', 'CA', 'MS')
| and ss_net_profit between 50 and 25000
| )
| )
""".stripMargin),
// Modifications: "dec" -> "decimal"
("q49", """
| select 'web' as channel, web.item, web.return_ratio, web.return_rank, web.currency_rank
| from (
| select
| item, return_ratio, currency_ratio,
| rank() over (order by return_ratio) as return_rank,
| rank() over (order by currency_ratio) as currency_rank
| from
| ( select ws.ws_item_sk as item
| ,(cast(sum(coalesce(wr.wr_return_quantity,0)) as decimal(15,4))/
| cast(sum(coalesce(ws.ws_quantity,0)) as decimal(15,4) )) as return_ratio
| ,(cast(sum(coalesce(wr.wr_return_amt,0)) as decimal(15,4))/
| cast(sum(coalesce(ws.ws_net_paid,0)) as decimal(15,4) )) as currency_ratio
| from
| web_sales ws left outer join web_returns wr
| on (ws.ws_order_number = wr.wr_order_number and
| ws.ws_item_sk = wr.wr_item_sk)
| ,date_dim
| where
| wr.wr_return_amt > 10000
| and ws.ws_net_profit > 1
| and ws.ws_net_paid > 0
| and ws.ws_quantity > 0
| and ws_sold_date_sk = d_date_sk
| and d_year = 2001
| and d_moy = 12
| group by ws.ws_item_sk
| ) in_web
| ) web
| where (web.return_rank <= 10 or web.currency_rank <= 10)
| union
| select
| 'catalog' as channel, catalog.item, catalog.return_ratio,
| catalog.return_rank, catalog.currency_rank
| from (
| select
| item, return_ratio, currency_ratio,
| rank() over (order by return_ratio) as return_rank,
| rank() over (order by currency_ratio) as currency_rank
| from
| ( select
| cs.cs_item_sk as item
| ,(cast(sum(coalesce(cr.cr_return_quantity,0)) as decimal(15,4))/
| cast(sum(coalesce(cs.cs_quantity,0)) as decimal(15,4) )) as return_ratio
| ,(cast(sum(coalesce(cr.cr_return_amount,0)) as decimal(15,4))/
| cast(sum(coalesce(cs.cs_net_paid,0)) as decimal(15,4) )) as currency_ratio
| from
| catalog_sales cs left outer join catalog_returns cr
| on (cs.cs_order_number = cr.cr_order_number and
| cs.cs_item_sk = cr.cr_item_sk)
| ,date_dim
| where
| cr.cr_return_amount > 10000
| and cs.cs_net_profit > 1
| and cs.cs_net_paid > 0
| and cs.cs_quantity > 0
| and cs_sold_date_sk = d_date_sk
| and d_year = 2001
| and d_moy = 12
| group by cs.cs_item_sk
| ) in_cat
| ) catalog
| where (catalog.return_rank <= 10 or catalog.currency_rank <=10)
| union
| select
| 'store' as channel, store.item, store.return_ratio,
| store.return_rank, store.currency_rank
| from (
| select
| item, return_ratio, currency_ratio,
| rank() over (order by return_ratio) as return_rank,
| rank() over (order by currency_ratio) as currency_rank
| from
| ( select sts.ss_item_sk as item
| ,(cast(sum(coalesce(sr.sr_return_quantity,0)) as decimal(15,4))/
| cast(sum(coalesce(sts.ss_quantity,0)) as decimal(15,4) )) as return_ratio
| ,(cast(sum(coalesce(sr.sr_return_amt,0)) as decimal(15,4))/
| cast(sum(coalesce(sts.ss_net_paid,0)) as decimal(15,4) )) as currency_ratio
| from
| store_sales sts left outer join store_returns sr
| on (sts.ss_ticket_number = sr.sr_ticket_number and sts.ss_item_sk = sr.sr_item_sk)
| ,date_dim
| where
| sr.sr_return_amt > 10000
| and sts.ss_net_profit > 1
| and sts.ss_net_paid > 0
| and sts.ss_quantity > 0
| and ss_sold_date_sk = d_date_sk
| and d_year = 2001
| and d_moy = 12
| group by sts.ss_item_sk
| ) in_store
| ) store
| where (store.return_rank <= 10 or store.currency_rank <= 10)
| order by 1,4,5
| limit 100
""".stripMargin),
// Modifications: " -> `
("q50", """
| select
| s_store_name, s_company_id, s_street_number, s_street_name, s_street_type,
| s_suite_number, s_city, s_county, s_state, s_zip
| ,sum(case when (sr_returned_date_sk - ss_sold_date_sk <= 30 ) then 1 else 0 end) as `30 days`
| ,sum(case when (sr_returned_date_sk - ss_sold_date_sk > 30) and
| (sr_returned_date_sk - ss_sold_date_sk <= 60) then 1 else 0 end ) as `31-60 days`
| ,sum(case when (sr_returned_date_sk - ss_sold_date_sk > 60) and
| (sr_returned_date_sk - ss_sold_date_sk <= 90) then 1 else 0 end) as `61-90 days`
| ,sum(case when (sr_returned_date_sk - ss_sold_date_sk > 90) and
| (sr_returned_date_sk - ss_sold_date_sk <= 120) then 1 else 0 end) as `91-120 days`
| ,sum(case when (sr_returned_date_sk - ss_sold_date_sk > 120) then 1 else 0 end) as `>120 days`
| from
| store_sales, store_returns, store, date_dim d1, date_dim d2
| where
| d2.d_year = 2001
| and d2.d_moy = 8
| and ss_ticket_number = sr_ticket_number
| and ss_item_sk = sr_item_sk
| and ss_sold_date_sk = d1.d_date_sk
| and sr_returned_date_sk = d2.d_date_sk
| and ss_customer_sk = sr_customer_sk
| and ss_store_sk = s_store_sk
| group by
| s_store_name, s_company_id, s_street_number, s_street_name, s_street_type,
| s_suite_number, s_city, s_county, s_state, s_zip
| order by
| s_store_name, s_company_id, s_street_number, s_street_name, s_street_type,
| s_suite_number, s_city, s_county, s_state, s_zip
| limit 100
""".stripMargin),
("q51", """
| WITH web_v1 as (
| select
| ws_item_sk item_sk, d_date,
| sum(sum(ws_sales_price))
| over (partition by ws_item_sk order by d_date rows between unbounded preceding and current row) cume_sales
| from web_sales, date_dim
| where ws_sold_date_sk=d_date_sk
| and d_month_seq between 1200 and 1200+11
| and ws_item_sk is not NULL
| group by ws_item_sk, d_date),
| store_v1 as (
| select
| ss_item_sk item_sk, d_date,
| sum(sum(ss_sales_price))
| over (partition by ss_item_sk order by d_date rows between unbounded preceding and current row) cume_sales
| from store_sales, date_dim
| where ss_sold_date_sk=d_date_sk
| and d_month_seq between 1200 and 1200+11
| and ss_item_sk is not NULL
| group by ss_item_sk, d_date)
| select *
| from (select item_sk, d_date, web_sales, store_sales
| ,max(web_sales)
| over (partition by item_sk order by d_date rows between unbounded preceding and current row) web_cumulative
| ,max(store_sales)
| over (partition by item_sk order by d_date rows between unbounded preceding and current row) store_cumulative
| from (select case when web.item_sk is not null then web.item_sk else store.item_sk end item_sk
| ,case when web.d_date is not null then web.d_date else store.d_date end d_date
| ,web.cume_sales web_sales
| ,store.cume_sales store_sales
| from web_v1 web full outer join store_v1 store on (web.item_sk = store.item_sk
| and web.d_date = store.d_date)
| )x )y
| where web_cumulative > store_cumulative
| order by item_sk, d_date
| limit 100
""".stripMargin),
("q52", """
| select dt.d_year
| ,item.i_brand_id brand_id
| ,item.i_brand brand
| ,sum(ss_ext_sales_price) ext_price
| from date_dim dt, store_sales, item
| where dt.d_date_sk = store_sales.ss_sold_date_sk
| and store_sales.ss_item_sk = item.i_item_sk
| and item.i_manager_id = 1
| and dt.d_moy=11
| and dt.d_year=2000
| group by dt.d_year, item.i_brand, item.i_brand_id
| order by dt.d_year, ext_price desc, brand_id
|limit 100
""".stripMargin),
("q53", """
| select * from
| (select i_manufact_id,
| sum(ss_sales_price) sum_sales,
| avg(sum(ss_sales_price)) over (partition by i_manufact_id) avg_quarterly_sales
| from item, store_sales, date_dim, store
| where ss_item_sk = i_item_sk and
| ss_sold_date_sk = d_date_sk and
| ss_store_sk = s_store_sk and
| d_month_seq in (1200,1200+1,1200+2,1200+3,1200+4,1200+5,1200+6,
| 1200+7,1200+8,1200+9,1200+10,1200+11) and
| ((i_category in ('Books','Children','Electronics') and
| i_class in ('personal','portable','reference','self-help') and
| i_brand in ('scholaramalgamalg #14','scholaramalgamalg #7',
| 'exportiunivamalg #9','scholaramalgamalg #9'))
| or
| (i_category in ('Women','Music','Men') and
| i_class in ('accessories','classical','fragrances','pants') and
| i_brand in ('amalgimporto #1','edu packscholar #1','exportiimporto #1',
| 'importoamalg #1')))
| group by i_manufact_id, d_qoy ) tmp1
| where case when avg_quarterly_sales > 0
| then abs (sum_sales - avg_quarterly_sales)/ avg_quarterly_sales
| else null end > 0.1
| order by avg_quarterly_sales,
| sum_sales,
| i_manufact_id
| limit 100
""".stripMargin),
("q54", """
| with my_customers as (
| select distinct c_customer_sk
| , c_current_addr_sk
| from
| ( select cs_sold_date_sk sold_date_sk,
| cs_bill_customer_sk customer_sk,
| cs_item_sk item_sk
| from catalog_sales
| union all
| select ws_sold_date_sk sold_date_sk,
| ws_bill_customer_sk customer_sk,
| ws_item_sk item_sk
| from web_sales
| ) cs_or_ws_sales,
| item,
| date_dim,
| customer
| where sold_date_sk = d_date_sk
| and item_sk = i_item_sk
| and i_category = 'Women'
| and i_class = 'maternity'
| and c_customer_sk = cs_or_ws_sales.customer_sk
| and d_moy = 12
| and d_year = 1998
| )
| , my_revenue as (
| select c_customer_sk,
| sum(ss_ext_sales_price) as revenue
| from my_customers,
| store_sales,
| customer_address,
| store,
| date_dim
| where c_current_addr_sk = ca_address_sk
| and ca_county = s_county
| and ca_state = s_state
| and ss_sold_date_sk = d_date_sk
| and c_customer_sk = ss_customer_sk
| and d_month_seq between (select distinct d_month_seq+1
| from date_dim where d_year = 1998 and d_moy = 12)
| and (select distinct d_month_seq+3
| from date_dim where d_year = 1998 and d_moy = 12)
| group by c_customer_sk
| )
| , segments as
| (select cast((revenue/50) as int) as segment from my_revenue)
| select segment, count(*) as num_customers, segment*50 as segment_base
| from segments
| group by segment
| order by segment, num_customers
| limit 100
""".stripMargin),
("q55", """
|select i_brand_id brand_id, i_brand brand,
| sum(ss_ext_sales_price) ext_price
| from date_dim, store_sales, item
| where d_date_sk = ss_sold_date_sk
| and ss_item_sk = i_item_sk
| and i_manager_id=28
| and d_moy=11
| and d_year=1999
| group by i_brand, i_brand_id
| order by ext_price desc, brand_id
| limit 100
""".stripMargin),
("q56", """
| with ss as (
| select i_item_id,sum(ss_ext_sales_price) total_sales
| from
| store_sales, date_dim, customer_address, item
| where
| i_item_id in (select i_item_id from item where i_color in ('slate','blanched','burnished'))
| and ss_item_sk = i_item_sk
| and ss_sold_date_sk = d_date_sk
| and d_year = 2001
| and d_moy = 2
| and ss_addr_sk = ca_address_sk
| and ca_gmt_offset = -5
| group by i_item_id),
| cs as (
| select i_item_id,sum(cs_ext_sales_price) total_sales
| from
| catalog_sales, date_dim, customer_address, item
| where
| i_item_id in (select i_item_id from item where i_color in ('slate','blanched','burnished'))
| and cs_item_sk = i_item_sk
| and cs_sold_date_sk = d_date_sk
| and d_year = 2001
| and d_moy = 2
| and cs_bill_addr_sk = ca_address_sk
| and ca_gmt_offset = -5
| group by i_item_id),
| ws as (
| select i_item_id,sum(ws_ext_sales_price) total_sales
| from
| web_sales, date_dim, customer_address, item
| where
| i_item_id in (select i_item_id from item where i_color in ('slate','blanched','burnished'))
| and ws_item_sk = i_item_sk
| and ws_sold_date_sk = d_date_sk
| and d_year = 2001
| and d_moy = 2
| and ws_bill_addr_sk = ca_address_sk
| and ca_gmt_offset = -5
| group by i_item_id)
| select i_item_id ,sum(total_sales) total_sales
| from (select * from ss
| union all
| select * from cs
| union all
| select * from ws) tmp1
| group by i_item_id
| order by total_sales
| limit 100
""".stripMargin),
("q57", """
| with v1 as(
| select i_category, i_brand,
| cc_name,
| d_year, d_moy,
| sum(cs_sales_price) sum_sales,
| avg(sum(cs_sales_price)) over
| (partition by i_category, i_brand, cc_name, d_year)
| avg_monthly_sales,
| rank() over
| (partition by i_category, i_brand, cc_name
| order by d_year, d_moy) rn
| from item, catalog_sales, date_dim, call_center
| where cs_item_sk = i_item_sk and
| cs_sold_date_sk = d_date_sk and
| cc_call_center_sk= cs_call_center_sk and
| (
| d_year = 1999 or
| ( d_year = 1999-1 and d_moy =12) or
| ( d_year = 1999+1 and d_moy =1)
| )
| group by i_category, i_brand,
| cc_name , d_year, d_moy),
| v2 as(
| select v1.i_category, v1.i_brand, v1.cc_name, v1.d_year, v1.d_moy
| ,v1.avg_monthly_sales
| ,v1.sum_sales, v1_lag.sum_sales psum, v1_lead.sum_sales nsum
| from v1, v1 v1_lag, v1 v1_lead
| where v1.i_category = v1_lag.i_category and
| v1.i_category = v1_lead.i_category and
| v1.i_brand = v1_lag.i_brand and
| v1.i_brand = v1_lead.i_brand and
| v1. cc_name = v1_lag. cc_name and
| v1. cc_name = v1_lead. cc_name and
| v1.rn = v1_lag.rn + 1 and
| v1.rn = v1_lead.rn - 1)
| select * from v2
| where d_year = 1999 and
| avg_monthly_sales > 0 and
| case when avg_monthly_sales > 0 then abs(sum_sales - avg_monthly_sales) / avg_monthly_sales else null end > 0.1
| order by sum_sales - avg_monthly_sales, 3
| limit 100
""".stripMargin),
("q58", """
| with ss_items as
| (select i_item_id item_id, sum(ss_ext_sales_price) ss_item_rev
| from store_sales, item, date_dim
| where ss_item_sk = i_item_sk
| and d_date in (select d_date
| from date_dim
| where d_week_seq = (select d_week_seq
| from date_dim
| where d_date = '2000-01-03'))
| and ss_sold_date_sk = d_date_sk
| group by i_item_id),
| cs_items as
| (select i_item_id item_id
| ,sum(cs_ext_sales_price) cs_item_rev
| from catalog_sales, item, date_dim
| where cs_item_sk = i_item_sk
| and d_date in (select d_date
| from date_dim
| where d_week_seq = (select d_week_seq
| from date_dim
| where d_date = '2000-01-03'))
| and cs_sold_date_sk = d_date_sk
| group by i_item_id),
| ws_items as
| (select i_item_id item_id, sum(ws_ext_sales_price) ws_item_rev
| from web_sales, item, date_dim
| where ws_item_sk = i_item_sk
| and d_date in (select d_date
| from date_dim
| where d_week_seq =(select d_week_seq
| from date_dim
| where d_date = '2000-01-03'))
| and ws_sold_date_sk = d_date_sk
| group by i_item_id)
| select ss_items.item_id
| ,ss_item_rev
| ,ss_item_rev/(ss_item_rev+cs_item_rev+ws_item_rev)/3 * 100 ss_dev
| ,cs_item_rev
| ,cs_item_rev/(ss_item_rev+cs_item_rev+ws_item_rev)/3 * 100 cs_dev
| ,ws_item_rev
| ,ws_item_rev/(ss_item_rev+cs_item_rev+ws_item_rev)/3 * 100 ws_dev
| ,(ss_item_rev+cs_item_rev+ws_item_rev)/3 average
| from ss_items,cs_items,ws_items
| where ss_items.item_id=cs_items.item_id
| and ss_items.item_id=ws_items.item_id
| and ss_item_rev between 0.9 * cs_item_rev and 1.1 * cs_item_rev
| and ss_item_rev between 0.9 * ws_item_rev and 1.1 * ws_item_rev
| and cs_item_rev between 0.9 * ss_item_rev and 1.1 * ss_item_rev
| and cs_item_rev between 0.9 * ws_item_rev and 1.1 * ws_item_rev
| and ws_item_rev between 0.9 * ss_item_rev and 1.1 * ss_item_rev
| and ws_item_rev between 0.9 * cs_item_rev and 1.1 * cs_item_rev
| order by item_id, ss_item_rev
| limit 100
""".stripMargin),
("q59", """
| with wss as
| (select d_week_seq,
| ss_store_sk,
| sum(case when (d_day_name='Sunday') then ss_sales_price else null end) sun_sales,
| sum(case when (d_day_name='Monday') then ss_sales_price else null end) mon_sales,
| sum(case when (d_day_name='Tuesday') then ss_sales_price else null end) tue_sales,
| sum(case when (d_day_name='Wednesday') then ss_sales_price else null end) wed_sales,
| sum(case when (d_day_name='Thursday') then ss_sales_price else null end) thu_sales,
| sum(case when (d_day_name='Friday') then ss_sales_price else null end) fri_sales,
| sum(case when (d_day_name='Saturday') then ss_sales_price else null end) sat_sales
| from store_sales,date_dim
| where d_date_sk = ss_sold_date_sk
| group by d_week_seq,ss_store_sk
| )
| select s_store_name1,s_store_id1,d_week_seq1
| ,sun_sales1/sun_sales2,mon_sales1/mon_sales2
| ,tue_sales1/tue_sales2,wed_sales1/wed_sales2,thu_sales1/thu_sales2
| ,fri_sales1/fri_sales2,sat_sales1/sat_sales2
| from
| (select s_store_name s_store_name1,wss.d_week_seq d_week_seq1
| ,s_store_id s_store_id1,sun_sales sun_sales1
| ,mon_sales mon_sales1,tue_sales tue_sales1
| ,wed_sales wed_sales1,thu_sales thu_sales1
| ,fri_sales fri_sales1,sat_sales sat_sales1
| from wss,store,date_dim d
| where d.d_week_seq = wss.d_week_seq and
| ss_store_sk = s_store_sk and
| d_month_seq between 1212 and 1212 + 11) y,
| (select s_store_name s_store_name2,wss.d_week_seq d_week_seq2
| ,s_store_id s_store_id2,sun_sales sun_sales2
| ,mon_sales mon_sales2,tue_sales tue_sales2
| ,wed_sales wed_sales2,thu_sales thu_sales2
| ,fri_sales fri_sales2,sat_sales sat_sales2
| from wss,store,date_dim d
| where d.d_week_seq = wss.d_week_seq and
| ss_store_sk = s_store_sk and
| d_month_seq between 1212+ 12 and 1212 + 23) x
| where s_store_id1=s_store_id2
| and d_week_seq1=d_week_seq2-52
| order by s_store_name1,s_store_id1,d_week_seq1
| limit 100
""".stripMargin),
("q60", """
| with ss as (
| select i_item_id,sum(ss_ext_sales_price) total_sales
| from store_sales, date_dim, customer_address, item
| where
| i_item_id in (select i_item_id from item where i_category in ('Music'))
| and ss_item_sk = i_item_sk
| and ss_sold_date_sk = d_date_sk
| and d_year = 1998
| and d_moy = 9
| and ss_addr_sk = ca_address_sk
| and ca_gmt_offset = -5
| group by i_item_id),
| cs as (
| select i_item_id,sum(cs_ext_sales_price) total_sales
| from catalog_sales, date_dim, customer_address, item
| where
| i_item_id in (select i_item_id from item where i_category in ('Music'))
| and cs_item_sk = i_item_sk
| and cs_sold_date_sk = d_date_sk
| and d_year = 1998
| and d_moy = 9
| and cs_bill_addr_sk = ca_address_sk
| and ca_gmt_offset = -5
| group by i_item_id),
| ws as (
| select i_item_id,sum(ws_ext_sales_price) total_sales
| from web_sales, date_dim, customer_address, item
| where
| i_item_id in (select i_item_id from item where i_category in ('Music'))
| and ws_item_sk = i_item_sk
| and ws_sold_date_sk = d_date_sk
| and d_year = 1998
| and d_moy = 9
| and ws_bill_addr_sk = ca_address_sk
| and ca_gmt_offset = -5
| group by i_item_id)
| select i_item_id, sum(total_sales) total_sales
| from (select * from ss
| union all
| select * from cs
| union all
| select * from ws) tmp1
| group by i_item_id
| order by i_item_id, total_sales
| limit 100
""".stripMargin),
("q61", s"""
| select promotions,total,cast(promotions as decimal(15,4))/cast(total as decimal(15,4))*100
| from
| (select sum(ss_ext_sales_price) promotions
| from store_sales, store, promotion, date_dim, customer, customer_address, item
| where ss_sold_date_sk = d_date_sk
| and ss_store_sk = s_store_sk
| and ss_promo_sk = p_promo_sk
| and ss_customer_sk= c_customer_sk
| and ca_address_sk = c_current_addr_sk
| and ss_item_sk = i_item_sk
| and ca_gmt_offset = -5
| and i_category = 'Jewelry'
| and (p_channel_dmail = 'Y' or p_channel_email = 'Y' or p_channel_tv = 'Y')
| and s_gmt_offset = -5
| and d_year = 1998
| and d_moy = 11) promotional_sales,
| (select sum(ss_ext_sales_price) total
| from store_sales, store, date_dim, customer, customer_address, item
| where ss_sold_date_sk = d_date_sk
| and ss_store_sk = s_store_sk
| and ss_customer_sk= c_customer_sk
| and ca_address_sk = c_current_addr_sk
| and ss_item_sk = i_item_sk
| and ca_gmt_offset = -5
| and i_category = 'Jewelry'
| and s_gmt_offset = -5
| and d_year = 1998
| and d_moy = 11) all_sales
| order by promotions, total
| limit 100
""".stripMargin),
// Modifications: " -> `
("q62", """
| select
| substr(w_warehouse_name,1,20)
| ,sm_type
| ,web_name
| ,sum(case when (ws_ship_date_sk - ws_sold_date_sk <= 30 ) then 1 else 0 end) as `30 days`
| ,sum(case when (ws_ship_date_sk - ws_sold_date_sk > 30) and
| (ws_ship_date_sk - ws_sold_date_sk <= 60) then 1 else 0 end ) as `31-60 days`
| ,sum(case when (ws_ship_date_sk - ws_sold_date_sk > 60) and
| (ws_ship_date_sk - ws_sold_date_sk <= 90) then 1 else 0 end) as `61-90 days`
| ,sum(case when (ws_ship_date_sk - ws_sold_date_sk > 90) and
| (ws_ship_date_sk - ws_sold_date_sk <= 120) then 1 else 0 end) as `91-120 days`
| ,sum(case when (ws_ship_date_sk - ws_sold_date_sk > 120) then 1 else 0 end) as `>120 days`
| from
| web_sales, warehouse, ship_mode, web_site, date_dim
| where
| d_month_seq between 1200 and 1200 + 11
| and ws_ship_date_sk = d_date_sk
| and ws_warehouse_sk = w_warehouse_sk
| and ws_ship_mode_sk = sm_ship_mode_sk
| and ws_web_site_sk = web_site_sk
| group by
| substr(w_warehouse_name,1,20), sm_type, web_name
| order by
| substr(w_warehouse_name,1,20), sm_type, web_name
| limit 100
""".stripMargin),
("q63", """
| select *
| from (select i_manager_id
| ,sum(ss_sales_price) sum_sales
| ,avg(sum(ss_sales_price)) over (partition by i_manager_id) avg_monthly_sales
| from item
| ,store_sales
| ,date_dim
| ,store
| where ss_item_sk = i_item_sk
| and ss_sold_date_sk = d_date_sk
| and ss_store_sk = s_store_sk
| and d_month_seq in (1200,1200+1,1200+2,1200+3,1200+4,1200+5,1200+6,1200+7,
| 1200+8,1200+9,1200+10,1200+11)
| and (( i_category in ('Books','Children','Electronics')
| and i_class in ('personal','portable','refernece','self-help')
| and i_brand in ('scholaramalgamalg #14','scholaramalgamalg #7',
| 'exportiunivamalg #9','scholaramalgamalg #9'))
| or( i_category in ('Women','Music','Men')
| and i_class in ('accessories','classical','fragrances','pants')
| and i_brand in ('amalgimporto #1','edu packscholar #1','exportiimporto #1',
| 'importoamalg #1')))
| group by i_manager_id, d_moy) tmp1
| where case when avg_monthly_sales > 0 then abs (sum_sales - avg_monthly_sales) / avg_monthly_sales else null end > 0.1
| order by i_manager_id
| ,avg_monthly_sales
| ,sum_sales
| limit 100
""".stripMargin),
("q64", """
| with cs_ui as
| (select cs_item_sk
| ,sum(cs_ext_list_price) as sale,sum(cr_refunded_cash+cr_reversed_charge+cr_store_credit) as refund
| from catalog_sales
| ,catalog_returns
| where cs_item_sk = cr_item_sk
| and cs_order_number = cr_order_number
| group by cs_item_sk
| having sum(cs_ext_list_price)>2*sum(cr_refunded_cash+cr_reversed_charge+cr_store_credit)),
| cross_sales as
| (select i_product_name product_name, i_item_sk item_sk, s_store_name store_name, s_zip store_zip,
| ad1.ca_street_number b_street_number, ad1.ca_street_name b_streen_name, ad1.ca_city b_city,
| ad1.ca_zip b_zip, ad2.ca_street_number c_street_number, ad2.ca_street_name c_street_name,
| ad2.ca_city c_city, ad2.ca_zip c_zip, d1.d_year as syear, d2.d_year as fsyear, d3.d_year s2year,
| count(*) cnt, sum(ss_wholesale_cost) s1, sum(ss_list_price) s2, sum(ss_coupon_amt) s3
| FROM store_sales, store_returns, cs_ui, date_dim d1, date_dim d2, date_dim d3,
| store, customer, customer_demographics cd1, customer_demographics cd2,
| promotion, household_demographics hd1, household_demographics hd2,
| customer_address ad1, customer_address ad2, income_band ib1, income_band ib2, item
| WHERE ss_store_sk = s_store_sk AND
| ss_sold_date_sk = d1.d_date_sk AND
| ss_customer_sk = c_customer_sk AND
| ss_cdemo_sk= cd1.cd_demo_sk AND
| ss_hdemo_sk = hd1.hd_demo_sk AND
| ss_addr_sk = ad1.ca_address_sk and
| ss_item_sk = i_item_sk and
| ss_item_sk = sr_item_sk and
| ss_ticket_number = sr_ticket_number and
| ss_item_sk = cs_ui.cs_item_sk and
| c_current_cdemo_sk = cd2.cd_demo_sk AND
| c_current_hdemo_sk = hd2.hd_demo_sk AND
| c_current_addr_sk = ad2.ca_address_sk and
| c_first_sales_date_sk = d2.d_date_sk and
| c_first_shipto_date_sk = d3.d_date_sk and
| ss_promo_sk = p_promo_sk and
| hd1.hd_income_band_sk = ib1.ib_income_band_sk and
| hd2.hd_income_band_sk = ib2.ib_income_band_sk and
| cd1.cd_marital_status <> cd2.cd_marital_status and
| i_color in ('purple','burlywood','indian','spring','floral','medium') and
| i_current_price between 64 and 64 + 10 and
| i_current_price between 64 + 1 and 64 + 15
| group by i_product_name, i_item_sk, s_store_name, s_zip, ad1.ca_street_number,
| ad1.ca_street_name, ad1.ca_city, ad1.ca_zip, ad2.ca_street_number,
| ad2.ca_street_name, ad2.ca_city, ad2.ca_zip, d1.d_year, d2.d_year, d3.d_year
| )
| select cs1.product_name, cs1.store_name, cs1.store_zip, cs1.b_street_number,
| cs1.b_streen_name, cs1.b_city, cs1.b_zip, cs1.c_street_number, cs1.c_street_name,
| cs1.c_city, cs1.c_zip, cs1.syear, cs1.cnt, cs1.s1, cs1.s2, cs1.s3, cs2.s1,
| cs2.s2, cs2.s3, cs2.syear, cs2.cnt
| from cross_sales cs1,cross_sales cs2
| where cs1.item_sk=cs2.item_sk and
| cs1.syear = 1999 and
| cs2.syear = 1999 + 1 and
| cs2.cnt <= cs1.cnt and
| cs1.store_name = cs2.store_name and
| cs1.store_zip = cs2.store_zip
| order by cs1.product_name, cs1.store_name, cs2.cnt
""".stripMargin),
("q65", """
| select
| s_store_name, i_item_desc, sc.revenue, i_current_price, i_wholesale_cost, i_brand
| from store, item,
| (select ss_store_sk, avg(revenue) as ave
| from
| (select ss_store_sk, ss_item_sk,
| sum(ss_sales_price) as revenue
| from store_sales, date_dim
| where ss_sold_date_sk = d_date_sk and d_month_seq between 1176 and 1176+11
| group by ss_store_sk, ss_item_sk) sa
| group by ss_store_sk) sb,
| (select ss_store_sk, ss_item_sk, sum(ss_sales_price) as revenue
| from store_sales, date_dim
| where ss_sold_date_sk = d_date_sk and d_month_seq between 1176 and 1176+11
| group by ss_store_sk, ss_item_sk) sc
| where sb.ss_store_sk = sc.ss_store_sk and
| sc.revenue <= 0.1 * sb.ave and
| s_store_sk = sc.ss_store_sk and
| i_item_sk = sc.ss_item_sk
| order by s_store_name, i_item_desc
| limit 100
""".stripMargin),
// Modifications: "||" -> concat
("q66", """
| select w_warehouse_name, w_warehouse_sq_ft, w_city, w_county, w_state, w_country,
| ship_carriers, year
| ,sum(jan_sales) as jan_sales
| ,sum(feb_sales) as feb_sales
| ,sum(mar_sales) as mar_sales
| ,sum(apr_sales) as apr_sales
| ,sum(may_sales) as may_sales
| ,sum(jun_sales) as jun_sales
| ,sum(jul_sales) as jul_sales
| ,sum(aug_sales) as aug_sales
| ,sum(sep_sales) as sep_sales
| ,sum(oct_sales) as oct_sales
| ,sum(nov_sales) as nov_sales
| ,sum(dec_sales) as dec_sales
| ,sum(jan_sales/w_warehouse_sq_ft) as jan_sales_per_sq_foot
| ,sum(feb_sales/w_warehouse_sq_ft) as feb_sales_per_sq_foot
| ,sum(mar_sales/w_warehouse_sq_ft) as mar_sales_per_sq_foot
| ,sum(apr_sales/w_warehouse_sq_ft) as apr_sales_per_sq_foot
| ,sum(may_sales/w_warehouse_sq_ft) as may_sales_per_sq_foot
| ,sum(jun_sales/w_warehouse_sq_ft) as jun_sales_per_sq_foot
| ,sum(jul_sales/w_warehouse_sq_ft) as jul_sales_per_sq_foot
| ,sum(aug_sales/w_warehouse_sq_ft) as aug_sales_per_sq_foot
| ,sum(sep_sales/w_warehouse_sq_ft) as sep_sales_per_sq_foot
| ,sum(oct_sales/w_warehouse_sq_ft) as oct_sales_per_sq_foot
| ,sum(nov_sales/w_warehouse_sq_ft) as nov_sales_per_sq_foot
| ,sum(dec_sales/w_warehouse_sq_ft) as dec_sales_per_sq_foot
| ,sum(jan_net) as jan_net
| ,sum(feb_net) as feb_net
| ,sum(mar_net) as mar_net
| ,sum(apr_net) as apr_net
| ,sum(may_net) as may_net
| ,sum(jun_net) as jun_net
| ,sum(jul_net) as jul_net
| ,sum(aug_net) as aug_net
| ,sum(sep_net) as sep_net
| ,sum(oct_net) as oct_net
| ,sum(nov_net) as nov_net
| ,sum(dec_net) as dec_net
| from (
| (select
| w_warehouse_name, w_warehouse_sq_ft, w_city, w_county, w_state, w_country
| ,concat('DHL', ',', 'BARIAN') as ship_carriers
| ,d_year as year
| ,sum(case when d_moy = 1 then ws_ext_sales_price * ws_quantity else 0 end) as jan_sales
| ,sum(case when d_moy = 2 then ws_ext_sales_price * ws_quantity else 0 end) as feb_sales
| ,sum(case when d_moy = 3 then ws_ext_sales_price * ws_quantity else 0 end) as mar_sales
| ,sum(case when d_moy = 4 then ws_ext_sales_price * ws_quantity else 0 end) as apr_sales
| ,sum(case when d_moy = 5 then ws_ext_sales_price * ws_quantity else 0 end) as may_sales
| ,sum(case when d_moy = 6 then ws_ext_sales_price * ws_quantity else 0 end) as jun_sales
| ,sum(case when d_moy = 7 then ws_ext_sales_price * ws_quantity else 0 end) as jul_sales
| ,sum(case when d_moy = 8 then ws_ext_sales_price * ws_quantity else 0 end) as aug_sales
| ,sum(case when d_moy = 9 then ws_ext_sales_price * ws_quantity else 0 end) as sep_sales
| ,sum(case when d_moy = 10 then ws_ext_sales_price * ws_quantity else 0 end) as oct_sales
| ,sum(case when d_moy = 11 then ws_ext_sales_price * ws_quantity else 0 end) as nov_sales
| ,sum(case when d_moy = 12 then ws_ext_sales_price * ws_quantity else 0 end) as dec_sales
| ,sum(case when d_moy = 1 then ws_net_paid * ws_quantity else 0 end) as jan_net
| ,sum(case when d_moy = 2 then ws_net_paid * ws_quantity else 0 end) as feb_net
| ,sum(case when d_moy = 3 then ws_net_paid * ws_quantity else 0 end) as mar_net
| ,sum(case when d_moy = 4 then ws_net_paid * ws_quantity else 0 end) as apr_net
| ,sum(case when d_moy = 5 then ws_net_paid * ws_quantity else 0 end) as may_net
| ,sum(case when d_moy = 6 then ws_net_paid * ws_quantity else 0 end) as jun_net
| ,sum(case when d_moy = 7 then ws_net_paid * ws_quantity else 0 end) as jul_net
| ,sum(case when d_moy = 8 then ws_net_paid * ws_quantity else 0 end) as aug_net
| ,sum(case when d_moy = 9 then ws_net_paid * ws_quantity else 0 end) as sep_net
| ,sum(case when d_moy = 10 then ws_net_paid * ws_quantity else 0 end) as oct_net
| ,sum(case when d_moy = 11 then ws_net_paid * ws_quantity else 0 end) as nov_net
| ,sum(case when d_moy = 12 then ws_net_paid * ws_quantity else 0 end) as dec_net
| from
| web_sales, warehouse, date_dim, time_dim, ship_mode
| where
| ws_warehouse_sk = w_warehouse_sk
| and ws_sold_date_sk = d_date_sk
| and ws_sold_time_sk = t_time_sk
| and ws_ship_mode_sk = sm_ship_mode_sk
| and d_year = 2001
| and t_time between 30838 and 30838+28800
| and sm_carrier in ('DHL','BARIAN')
| group by
| w_warehouse_name, w_warehouse_sq_ft, w_city, w_county, w_state, w_country, d_year)
| union all
| (select w_warehouse_name, w_warehouse_sq_ft, w_city, w_county, w_state, w_country
| ,concat('DHL', ',', 'BARIAN') as ship_carriers
| ,d_year as year
| ,sum(case when d_moy = 1 then cs_sales_price * cs_quantity else 0 end) as jan_sales
| ,sum(case when d_moy = 2 then cs_sales_price * cs_quantity else 0 end) as feb_sales
| ,sum(case when d_moy = 3 then cs_sales_price * cs_quantity else 0 end) as mar_sales
| ,sum(case when d_moy = 4 then cs_sales_price * cs_quantity else 0 end) as apr_sales
| ,sum(case when d_moy = 5 then cs_sales_price * cs_quantity else 0 end) as may_sales
| ,sum(case when d_moy = 6 then cs_sales_price * cs_quantity else 0 end) as jun_sales
| ,sum(case when d_moy = 7 then cs_sales_price * cs_quantity else 0 end) as jul_sales
| ,sum(case when d_moy = 8 then cs_sales_price * cs_quantity else 0 end) as aug_sales
| ,sum(case when d_moy = 9 then cs_sales_price * cs_quantity else 0 end) as sep_sales
| ,sum(case when d_moy = 10 then cs_sales_price * cs_quantity else 0 end) as oct_sales
| ,sum(case when d_moy = 11 then cs_sales_price * cs_quantity else 0 end) as nov_sales
| ,sum(case when d_moy = 12 then cs_sales_price * cs_quantity else 0 end) as dec_sales
| ,sum(case when d_moy = 1 then cs_net_paid_inc_tax * cs_quantity else 0 end) as jan_net
| ,sum(case when d_moy = 2 then cs_net_paid_inc_tax * cs_quantity else 0 end) as feb_net
| ,sum(case when d_moy = 3 then cs_net_paid_inc_tax * cs_quantity else 0 end) as mar_net
| ,sum(case when d_moy = 4 then cs_net_paid_inc_tax * cs_quantity else 0 end) as apr_net
| ,sum(case when d_moy = 5 then cs_net_paid_inc_tax * cs_quantity else 0 end) as may_net
| ,sum(case when d_moy = 6 then cs_net_paid_inc_tax * cs_quantity else 0 end) as jun_net
| ,sum(case when d_moy = 7 then cs_net_paid_inc_tax * cs_quantity else 0 end) as jul_net
| ,sum(case when d_moy = 8 then cs_net_paid_inc_tax * cs_quantity else 0 end) as aug_net
| ,sum(case when d_moy = 9 then cs_net_paid_inc_tax * cs_quantity else 0 end) as sep_net
| ,sum(case when d_moy = 10 then cs_net_paid_inc_tax * cs_quantity else 0 end) as oct_net
| ,sum(case when d_moy = 11 then cs_net_paid_inc_tax * cs_quantity else 0 end) as nov_net
| ,sum(case when d_moy = 12 then cs_net_paid_inc_tax * cs_quantity else 0 end) as dec_net
| from
| catalog_sales, warehouse, date_dim, time_dim, ship_mode
| where
| cs_warehouse_sk = w_warehouse_sk
| and cs_sold_date_sk = d_date_sk
| and cs_sold_time_sk = t_time_sk
| and cs_ship_mode_sk = sm_ship_mode_sk
| and d_year = 2001
| and t_time between 30838 AND 30838+28800
| and sm_carrier in ('DHL','BARIAN')
| group by
| w_warehouse_name, w_warehouse_sq_ft, w_city, w_county, w_state, w_country, d_year
| )
| ) x
| group by
| w_warehouse_name, w_warehouse_sq_ft, w_city, w_county, w_state, w_country,
| ship_carriers, year
| order by w_warehouse_name
| limit 100
""".stripMargin),
("q67", """
| select * from
| (select i_category, i_class, i_brand, i_product_name, d_year, d_qoy, d_moy, s_store_id,
| sumsales, rank() over (partition by i_category order by sumsales desc) rk
| from
| (select i_category, i_class, i_brand, i_product_name, d_year, d_qoy, d_moy,
| s_store_id, sum(coalesce(ss_sales_price*ss_quantity,0)) sumsales
| from store_sales, date_dim, store, item
| where ss_sold_date_sk=d_date_sk
| and ss_item_sk=i_item_sk
| and ss_store_sk = s_store_sk
| and d_month_seq between 1200 and 1200+11
| group by rollup(i_category, i_class, i_brand, i_product_name, d_year, d_qoy,
| d_moy,s_store_id))dw1) dw2
| where rk <= 100
| order by
| i_category, i_class, i_brand, i_product_name, d_year,
| d_qoy, d_moy, s_store_id, sumsales, rk
| limit 100
""".stripMargin),
("q68", """
| select
| c_last_name, c_first_name, ca_city, bought_city, ss_ticket_number, extended_price,
| extended_tax, list_price
| from (select
| ss_ticket_number, ss_customer_sk, ca_city bought_city,
| sum(ss_ext_sales_price) extended_price,
| sum(ss_ext_list_price) list_price,
| sum(ss_ext_tax) extended_tax
| from store_sales, date_dim, store, household_demographics, customer_address
| where store_sales.ss_sold_date_sk = date_dim.d_date_sk
| and store_sales.ss_store_sk = store.s_store_sk
| and store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
| and store_sales.ss_addr_sk = customer_address.ca_address_sk
| and date_dim.d_dom between 1 and 2
| and (household_demographics.hd_dep_count = 4 or
| household_demographics.hd_vehicle_count = 3)
| and date_dim.d_year in (1999,1999+1,1999+2)
| and store.s_city in ('Midway','Fairview')
| group by ss_ticket_number, ss_customer_sk, ss_addr_sk,ca_city) dn,
| customer,
| customer_address current_addr
| where ss_customer_sk = c_customer_sk
| and customer.c_current_addr_sk = current_addr.ca_address_sk
| and current_addr.ca_city <> bought_city
| order by c_last_name, ss_ticket_number
| limit 100
""".stripMargin),
("q69", """
| select
| cd_gender, cd_marital_status, cd_education_status, count(*) cnt1,
| cd_purchase_estimate, count(*) cnt2, cd_credit_rating, count(*) cnt3
| from
| customer c,customer_address ca,customer_demographics
| where
| c.c_current_addr_sk = ca.ca_address_sk and
| ca_state in ('KY', 'GA', 'NM') and
| cd_demo_sk = c.c_current_cdemo_sk and
| exists (select * from store_sales, date_dim
| where c.c_customer_sk = ss_customer_sk and
| ss_sold_date_sk = d_date_sk and
| d_year = 2001 and
| d_moy between 4 and 4+2) and
| (not exists (select * from web_sales, date_dim
| where c.c_customer_sk = ws_bill_customer_sk and
| ws_sold_date_sk = d_date_sk and
| d_year = 2001 and
| d_moy between 4 and 4+2) and
| not exists (select * from catalog_sales, date_dim
| where c.c_customer_sk = cs_ship_customer_sk and
| cs_sold_date_sk = d_date_sk and
| d_year = 2001 and
| d_moy between 4 and 4+2))
| group by cd_gender, cd_marital_status, cd_education_status,
| cd_purchase_estimate, cd_credit_rating
| order by cd_gender, cd_marital_status, cd_education_status,
| cd_purchase_estimate, cd_credit_rating
| limit 100
""".stripMargin),
("q70", """
| select
| sum(ss_net_profit) as total_sum, s_state, s_county
| ,grouping(s_state)+grouping(s_county) as lochierarchy
| ,rank() over (
| partition by grouping(s_state)+grouping(s_county),
| case when grouping(s_county) = 0 then s_state end
| order by sum(ss_net_profit) desc) as rank_within_parent
| from
| store_sales, date_dim d1, store
| where
| d1.d_month_seq between 1200 and 1200+11
| and d1.d_date_sk = ss_sold_date_sk
| and s_store_sk = ss_store_sk
| and s_state in
| (select s_state from
| (select s_state as s_state,
| rank() over ( partition by s_state order by sum(ss_net_profit) desc) as ranking
| from store_sales, store, date_dim
| where d_month_seq between 1200 and 1200+11
| and d_date_sk = ss_sold_date_sk
| and s_store_sk = ss_store_sk
| group by s_state) tmp1
| where ranking <= 5)
| group by rollup(s_state,s_county)
| order by
| lochierarchy desc
| ,case when lochierarchy = 0 then s_state end
| ,rank_within_parent
| limit 100
""".stripMargin),
("q71", """
| select i_brand_id brand_id, i_brand brand,t_hour,t_minute,
| sum(ext_price) ext_price
| from item,
| (select
| ws_ext_sales_price as ext_price,
| ws_sold_date_sk as sold_date_sk,
| ws_item_sk as sold_item_sk,
| ws_sold_time_sk as time_sk
| from web_sales, date_dim
| where d_date_sk = ws_sold_date_sk
| and d_moy=11
| and d_year=1999
| union all
| select
| cs_ext_sales_price as ext_price,
| cs_sold_date_sk as sold_date_sk,
| cs_item_sk as sold_item_sk,
| cs_sold_time_sk as time_sk
| from catalog_sales, date_dim
| where d_date_sk = cs_sold_date_sk
| and d_moy=11
| and d_year=1999
| union all
| select
| ss_ext_sales_price as ext_price,
| ss_sold_date_sk as sold_date_sk,
| ss_item_sk as sold_item_sk,
| ss_sold_time_sk as time_sk
| from store_sales,date_dim
| where d_date_sk = ss_sold_date_sk
| and d_moy=11
| and d_year=1999
| ) as tmp, time_dim
| where
| sold_item_sk = i_item_sk
| and i_manager_id=1
| and time_sk = t_time_sk
| and (t_meal_time = 'breakfast' or t_meal_time = 'dinner')
| group by i_brand, i_brand_id,t_hour,t_minute
| order by ext_price desc, brand_id
""".stripMargin),
// Modifications: "+ days" -> date_add
("q72", """
| select i_item_desc
| ,w_warehouse_name
| ,d1.d_week_seq
| ,count(case when p_promo_sk is null then 1 else 0 end) no_promo
| ,count(case when p_promo_sk is not null then 1 else 0 end) promo
| ,count(*) total_cnt
| from catalog_sales
| join inventory on (cs_item_sk = inv_item_sk)
| join warehouse on (w_warehouse_sk=inv_warehouse_sk)
| join item on (i_item_sk = cs_item_sk)
| join customer_demographics on (cs_bill_cdemo_sk = cd_demo_sk)
| join household_demographics on (cs_bill_hdemo_sk = hd_demo_sk)
| join date_dim d1 on (cs_sold_date_sk = d1.d_date_sk)
| join date_dim d2 on (inv_date_sk = d2.d_date_sk)
| join date_dim d3 on (cs_ship_date_sk = d3.d_date_sk)
| left outer join promotion on (cs_promo_sk=p_promo_sk)
| left outer join catalog_returns on (cr_item_sk = cs_item_sk and cr_order_number = cs_order_number)
| where d1.d_week_seq = d2.d_week_seq
| and inv_quantity_on_hand < cs_quantity
| and d3.d_date > (cast(d1.d_date AS DATE) + interval 5 days)
| and hd_buy_potential = '>10000'
| and d1.d_year = 1999
| and hd_buy_potential = '>10000'
| and cd_marital_status = 'D'
| and d1.d_year = 1999
| group by i_item_desc,w_warehouse_name,d1.d_week_seq
| order by total_cnt desc, i_item_desc, w_warehouse_name, d_week_seq
| limit 100
""".stripMargin),
("q73", """
| select
| c_last_name, c_first_name, c_salutation, c_preferred_cust_flag,
| ss_ticket_number, cnt from
| (select ss_ticket_number, ss_customer_sk, count(*) cnt
| from store_sales,date_dim,store,household_demographics
| where store_sales.ss_sold_date_sk = date_dim.d_date_sk
| and store_sales.ss_store_sk = store.s_store_sk
| and store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
| and date_dim.d_dom between 1 and 2
| and (household_demographics.hd_buy_potential = '>10000' or
| household_demographics.hd_buy_potential = 'unknown')
| and household_demographics.hd_vehicle_count > 0
| and case when household_demographics.hd_vehicle_count > 0 then
| household_demographics.hd_dep_count/ household_demographics.hd_vehicle_count else null end > 1
| and date_dim.d_year in (1999,1999+1,1999+2)
| and store.s_county in ('Williamson County','Franklin Parish','Bronx County','Orange County')
| group by ss_ticket_number,ss_customer_sk) dj,customer
| where ss_customer_sk = c_customer_sk
| and cnt between 1 and 5
| order by cnt desc
""".stripMargin),
("q74", """
| with year_total as (
| select
| c_customer_id customer_id, c_first_name customer_first_name,
| c_last_name customer_last_name, d_year as year,
| sum(ss_net_paid) year_total, 's' sale_type
| from
| customer, store_sales, date_dim
| where c_customer_sk = ss_customer_sk
| and ss_sold_date_sk = d_date_sk
| and d_year in (2001,2001+1)
| group by
| c_customer_id, c_first_name, c_last_name, d_year
| union all
| select
| c_customer_id customer_id, c_first_name customer_first_name,
| c_last_name customer_last_name, d_year as year,
| sum(ws_net_paid) year_total, 'w' sale_type
| from
| customer, web_sales, date_dim
| where c_customer_sk = ws_bill_customer_sk
| and ws_sold_date_sk = d_date_sk
| and d_year in (2001,2001+1)
| group by
| c_customer_id, c_first_name, c_last_name, d_year)
| select
| t_s_secyear.customer_id, t_s_secyear.customer_first_name, t_s_secyear.customer_last_name
| from
| year_total t_s_firstyear, year_total t_s_secyear,
| year_total t_w_firstyear, year_total t_w_secyear
| where t_s_secyear.customer_id = t_s_firstyear.customer_id
| and t_s_firstyear.customer_id = t_w_secyear.customer_id
| and t_s_firstyear.customer_id = t_w_firstyear.customer_id
| and t_s_firstyear.sale_type = 's'
| and t_w_firstyear.sale_type = 'w'
| and t_s_secyear.sale_type = 's'
| and t_w_secyear.sale_type = 'w'
| and t_s_firstyear.year = 2001
| and t_s_secyear.year = 2001+1
| and t_w_firstyear.year = 2001
| and t_w_secyear.year = 2001+1
| and t_s_firstyear.year_total > 0
| and t_w_firstyear.year_total > 0
| and case when t_w_firstyear.year_total > 0 then t_w_secyear.year_total / t_w_firstyear.year_total else null end
| > case when t_s_firstyear.year_total > 0 then t_s_secyear.year_total / t_s_firstyear.year_total else null end
| order by 1, 1, 1
| limit 100
""".stripMargin),
("q75", """
| WITH all_sales AS (
| SELECT
| d_year, i_brand_id, i_class_id, i_category_id, i_manufact_id,
| SUM(sales_cnt) AS sales_cnt, SUM(sales_amt) AS sales_amt
| FROM (
| SELECT
| d_year, i_brand_id, i_class_id, i_category_id, i_manufact_id,
| cs_quantity - COALESCE(cr_return_quantity,0) AS sales_cnt,
| cs_ext_sales_price - COALESCE(cr_return_amount,0.0) AS sales_amt
| FROM catalog_sales
| JOIN item ON i_item_sk=cs_item_sk
| JOIN date_dim ON d_date_sk=cs_sold_date_sk
| LEFT JOIN catalog_returns ON (cs_order_number=cr_order_number
| AND cs_item_sk=cr_item_sk)
| WHERE i_category='Books'
| UNION
| SELECT
| d_year, i_brand_id, i_class_id, i_category_id, i_manufact_id,
| ss_quantity - COALESCE(sr_return_quantity,0) AS sales_cnt,
| ss_ext_sales_price - COALESCE(sr_return_amt,0.0) AS sales_amt
| FROM store_sales
| JOIN item ON i_item_sk=ss_item_sk
| JOIN date_dim ON d_date_sk=ss_sold_date_sk
| LEFT JOIN store_returns ON (ss_ticket_number=sr_ticket_number
| AND ss_item_sk=sr_item_sk)
| WHERE i_category='Books'
| UNION
| SELECT
| d_year, i_brand_id, i_class_id, i_category_id, i_manufact_id,
| ws_quantity - COALESCE(wr_return_quantity,0) AS sales_cnt,
| ws_ext_sales_price - COALESCE(wr_return_amt,0.0) AS sales_amt
| FROM web_sales
| JOIN item ON i_item_sk=ws_item_sk
| JOIN date_dim ON d_date_sk=ws_sold_date_sk
| LEFT JOIN web_returns ON (ws_order_number=wr_order_number
| AND ws_item_sk=wr_item_sk)
| WHERE i_category='Books') sales_detail
| GROUP BY d_year, i_brand_id, i_class_id, i_category_id, i_manufact_id)
| SELECT
| prev_yr.d_year AS prev_year, curr_yr.d_year AS year, curr_yr.i_brand_id,
| curr_yr.i_class_id, curr_yr.i_category_id, curr_yr.i_manufact_id,
| prev_yr.sales_cnt AS prev_yr_cnt, curr_yr.sales_cnt AS curr_yr_cnt,
| curr_yr.sales_cnt-prev_yr.sales_cnt AS sales_cnt_diff,
| curr_yr.sales_amt-prev_yr.sales_amt AS sales_amt_diff
| FROM all_sales curr_yr, all_sales prev_yr
| WHERE curr_yr.i_brand_id=prev_yr.i_brand_id
| AND curr_yr.i_class_id=prev_yr.i_class_id
| AND curr_yr.i_category_id=prev_yr.i_category_id
| AND curr_yr.i_manufact_id=prev_yr.i_manufact_id
| AND curr_yr.d_year=2002
| AND prev_yr.d_year=2002-1
| AND CAST(curr_yr.sales_cnt AS DECIMAL(17,2))/CAST(prev_yr.sales_cnt AS DECIMAL(17,2))<0.9
| ORDER BY sales_cnt_diff
| LIMIT 100
""".stripMargin),
("q76", """
| SELECT
| channel, col_name, d_year, d_qoy, i_category, COUNT(*) sales_cnt,
| SUM(ext_sales_price) sales_amt
| FROM(
| SELECT
| 'store' as channel, ss_store_sk col_name, d_year, d_qoy, i_category,
| ss_ext_sales_price ext_sales_price
| FROM store_sales, item, date_dim
| WHERE ss_store_sk IS NULL
| AND ss_sold_date_sk=d_date_sk
| AND ss_item_sk=i_item_sk
| UNION ALL
| SELECT
| 'web' as channel, ws_ship_customer_sk col_name, d_year, d_qoy, i_category,
| ws_ext_sales_price ext_sales_price
| FROM web_sales, item, date_dim
| WHERE ws_ship_customer_sk IS NULL
| AND ws_sold_date_sk=d_date_sk
| AND ws_item_sk=i_item_sk
| UNION ALL
| SELECT
| 'catalog' as channel, cs_ship_addr_sk col_name, d_year, d_qoy, i_category,
| cs_ext_sales_price ext_sales_price
| FROM catalog_sales, item, date_dim
| WHERE cs_ship_addr_sk IS NULL
| AND cs_sold_date_sk=d_date_sk
| AND cs_item_sk=i_item_sk) foo
| GROUP BY channel, col_name, d_year, d_qoy, i_category
| ORDER BY channel, col_name, d_year, d_qoy, i_category
| limit 100
""".stripMargin),
// Modifications: "+ days" -> date_add
("q77", """
| with ss as
| (select s_store_sk, sum(ss_ext_sales_price) as sales, sum(ss_net_profit) as profit
| from store_sales, date_dim, store
| where ss_sold_date_sk = d_date_sk
| and d_date between cast('2000-08-03' as date) and
| (cast('2000-08-03' as date) + interval 30 days)
| and ss_store_sk = s_store_sk
| group by s_store_sk),
| sr as
| (select s_store_sk, sum(sr_return_amt) as returns, sum(sr_net_loss) as profit_loss
| from store_returns, date_dim, store
| where sr_returned_date_sk = d_date_sk
| and d_date between cast('2000-08-03' as date) and
| (cast('2000-08-03' as date) + interval 30 days)
| and sr_store_sk = s_store_sk
| group by s_store_sk),
| cs as
| (select cs_call_center_sk, sum(cs_ext_sales_price) as sales, sum(cs_net_profit) as profit
| from catalog_sales, date_dim
| where cs_sold_date_sk = d_date_sk
| and d_date between cast('2000-08-03' as date) and
| (cast('2000-08-03' as date) + interval 30 days)
| group by cs_call_center_sk),
| cr as
| (select sum(cr_return_amount) as returns, sum(cr_net_loss) as profit_loss
| from catalog_returns, date_dim
| where cr_returned_date_sk = d_date_sk
| and d_date between cast('2000-08-03]' as date) and
| (cast('2000-08-03' as date) + interval 30 day)),
| ws as
| (select wp_web_page_sk, sum(ws_ext_sales_price) as sales, sum(ws_net_profit) as profit
| from web_sales, date_dim, web_page
| where ws_sold_date_sk = d_date_sk
| and d_date between cast('2000-08-03' as date) and
| (cast('2000-08-03' as date) + interval 30 days)
| and ws_web_page_sk = wp_web_page_sk
| group by wp_web_page_sk),
| wr as
| (select wp_web_page_sk, sum(wr_return_amt) as returns, sum(wr_net_loss) as profit_loss
| from web_returns, date_dim, web_page
| where wr_returned_date_sk = d_date_sk
| and d_date between cast('2000-08-03' as date) and
| (cast('2000-08-03' as date) + interval 30 days)
| and wr_web_page_sk = wp_web_page_sk
| group by wp_web_page_sk)
| select channel, id, sum(sales) as sales, sum(returns) as returns, sum(profit) as profit
| from
| (select
| 'store channel' as channel, ss.s_store_sk as id, sales,
| coalesce(returns, 0) as returns, (profit - coalesce(profit_loss,0)) as profit
| from ss left join sr
| on ss.s_store_sk = sr.s_store_sk
| union all
| select
| 'catalog channel' as channel, cs_call_center_sk as id, sales,
| returns, (profit - profit_loss) as profit
| from cs, cr
| union all
| select
| 'web channel' as channel, ws.wp_web_page_sk as id, sales,
| coalesce(returns, 0) returns, (profit - coalesce(profit_loss,0)) as profit
| from ws left join wr
| on ws.wp_web_page_sk = wr.wp_web_page_sk
| ) x
| group by rollup(channel, id)
| order by channel, id
| limit 100
""".stripMargin),
("q78", """
| with ws as
| (select d_year AS ws_sold_year, ws_item_sk,
| ws_bill_customer_sk ws_customer_sk,
| sum(ws_quantity) ws_qty,
| sum(ws_wholesale_cost) ws_wc,
| sum(ws_sales_price) ws_sp
| from web_sales
| left join web_returns on wr_order_number=ws_order_number and ws_item_sk=wr_item_sk
| join date_dim on ws_sold_date_sk = d_date_sk
| where wr_order_number is null
| group by d_year, ws_item_sk, ws_bill_customer_sk
| ),
| cs as
| (select d_year AS cs_sold_year, cs_item_sk,
| cs_bill_customer_sk cs_customer_sk,
| sum(cs_quantity) cs_qty,
| sum(cs_wholesale_cost) cs_wc,
| sum(cs_sales_price) cs_sp
| from catalog_sales
| left join catalog_returns on cr_order_number=cs_order_number and cs_item_sk=cr_item_sk
| join date_dim on cs_sold_date_sk = d_date_sk
| where cr_order_number is null
| group by d_year, cs_item_sk, cs_bill_customer_sk
| ),
| ss as
| (select d_year AS ss_sold_year, ss_item_sk,
| ss_customer_sk,
| sum(ss_quantity) ss_qty,
| sum(ss_wholesale_cost) ss_wc,
| sum(ss_sales_price) ss_sp
| from store_sales
| left join store_returns on sr_ticket_number=ss_ticket_number and ss_item_sk=sr_item_sk
| join date_dim on ss_sold_date_sk = d_date_sk
| where sr_ticket_number is null
| group by d_year, ss_item_sk, ss_customer_sk
| )
| select
| round(ss_qty/(coalesce(ws_qty+cs_qty,1)),2) ratio,
| ss_qty store_qty, ss_wc store_wholesale_cost, ss_sp store_sales_price,
| coalesce(ws_qty,0)+coalesce(cs_qty,0) other_chan_qty,
| coalesce(ws_wc,0)+coalesce(cs_wc,0) other_chan_wholesale_cost,
| coalesce(ws_sp,0)+coalesce(cs_sp,0) other_chan_sales_price
| from ss
| left join ws on (ws_sold_year=ss_sold_year and ws_item_sk=ss_item_sk and ws_customer_sk=ss_customer_sk)
| left join cs on (cs_sold_year=ss_sold_year and ss_item_sk=cs_item_sk and cs_customer_sk=ss_customer_sk)
| where coalesce(ws_qty,0)>0 and coalesce(cs_qty, 0)>0 and ss_sold_year=2000
| order by
| ratio,
| ss_qty desc, ss_wc desc, ss_sp desc,
| other_chan_qty,
| other_chan_wholesale_cost,
| other_chan_sales_price,
| round(ss_qty/(coalesce(ws_qty+cs_qty,1)),2)
| limit 100
""".stripMargin),
("q79", """
| select
| c_last_name,c_first_name,substr(s_city,1,30),ss_ticket_number,amt,profit
| from
| (select ss_ticket_number
| ,ss_customer_sk
| ,store.s_city
| ,sum(ss_coupon_amt) amt
| ,sum(ss_net_profit) profit
| from store_sales,date_dim,store,household_demographics
| where store_sales.ss_sold_date_sk = date_dim.d_date_sk
| and store_sales.ss_store_sk = store.s_store_sk
| and store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
| and (household_demographics.hd_dep_count = 6 or
| household_demographics.hd_vehicle_count > 2)
| and date_dim.d_dow = 1
| and date_dim.d_year in (1999,1999+1,1999+2)
| and store.s_number_employees between 200 and 295
| group by ss_ticket_number,ss_customer_sk,ss_addr_sk,store.s_city) ms,customer
| where ss_customer_sk = c_customer_sk
| order by c_last_name,c_first_name,substr(s_city,1,30), profit
| limit 100
""".stripMargin),
// Modifications: "+ days" -> date_add
// Modifications: "||" -> "concat"
("q80", """
| with ssr as
| (select s_store_id as store_id,
| sum(ss_ext_sales_price) as sales,
| sum(coalesce(sr_return_amt, 0)) as returns,
| sum(ss_net_profit - coalesce(sr_net_loss, 0)) as profit
| from store_sales left outer join store_returns on
| (ss_item_sk = sr_item_sk and ss_ticket_number = sr_ticket_number),
| date_dim, store, item, promotion
| where ss_sold_date_sk = d_date_sk
| and d_date between cast('2000-08-23' as date)
| and (cast('2000-08-23' as date) + interval 30 days)
| and ss_store_sk = s_store_sk
| and ss_item_sk = i_item_sk
| and i_current_price > 50
| and ss_promo_sk = p_promo_sk
| and p_channel_tv = 'N'
| group by s_store_id),
| csr as
| (select cp_catalog_page_id as catalog_page_id,
| sum(cs_ext_sales_price) as sales,
| sum(coalesce(cr_return_amount, 0)) as returns,
| sum(cs_net_profit - coalesce(cr_net_loss, 0)) as profit
| from catalog_sales left outer join catalog_returns on
| (cs_item_sk = cr_item_sk and cs_order_number = cr_order_number),
| date_dim, catalog_page, item, promotion
| where cs_sold_date_sk = d_date_sk
| and d_date between cast('2000-08-23' as date)
| and (cast('2000-08-23' as date) + interval 30 days)
| and cs_catalog_page_sk = cp_catalog_page_sk
| and cs_item_sk = i_item_sk
| and i_current_price > 50
| and cs_promo_sk = p_promo_sk
| and p_channel_tv = 'N'
| group by cp_catalog_page_id),
| wsr as
| (select web_site_id,
| sum(ws_ext_sales_price) as sales,
| sum(coalesce(wr_return_amt, 0)) as returns,
| sum(ws_net_profit - coalesce(wr_net_loss, 0)) as profit
| from web_sales left outer join web_returns on
| (ws_item_sk = wr_item_sk and ws_order_number = wr_order_number),
| date_dim, web_site, item, promotion
| where ws_sold_date_sk = d_date_sk
| and d_date between cast('2000-08-23' as date)
| and (cast('2000-08-23' as date) + interval 30 days)
| and ws_web_site_sk = web_site_sk
| and ws_item_sk = i_item_sk
| and i_current_price > 50
| and ws_promo_sk = p_promo_sk
| and p_channel_tv = 'N'
| group by web_site_id)
| select channel, id, sum(sales) as sales, sum(returns) as returns, sum(profit) as profit
| from (select
| 'store channel' as channel, concat('store', store_id) as id, sales, returns, profit
| from ssr
| union all
| select
| 'catalog channel' as channel, concat('catalog_page', catalog_page_id) as id,
| sales, returns, profit
| from csr
| union all
| select
| 'web channel' as channel, concat('web_site', web_site_id) as id, sales, returns, profit
| from wsr) x
| group by rollup (channel, id)
| order by channel, id
| limit 100
""".stripMargin),
("q81", """
| with customer_total_return as
| (select
| cr_returning_customer_sk as ctr_customer_sk, ca_state as ctr_state,
| sum(cr_return_amt_inc_tax) as ctr_total_return
| from catalog_returns, date_dim, customer_address
| where cr_returned_date_sk = d_date_sk
| and d_year = 2000
| and cr_returning_addr_sk = ca_address_sk
| group by cr_returning_customer_sk, ca_state )
| select
| c_customer_id,c_salutation,c_first_name,c_last_name,ca_street_number,ca_street_name,
| ca_street_type,ca_suite_number,ca_city,ca_county,ca_state,ca_zip,ca_country,
| ca_gmt_offset,ca_location_type,ctr_total_return
| from customer_total_return ctr1, customer_address, customer
| where ctr1.ctr_total_return > (select avg(ctr_total_return)*1.2
| from customer_total_return ctr2
| where ctr1.ctr_state = ctr2.ctr_state)
| and ca_address_sk = c_current_addr_sk
| and ca_state = 'GA'
| and ctr1.ctr_customer_sk = c_customer_sk
| order by c_customer_id,c_salutation,c_first_name,c_last_name,ca_street_number,ca_street_name
| ,ca_street_type,ca_suite_number,ca_city,ca_county,ca_state,ca_zip,ca_country,ca_gmt_offset
| ,ca_location_type,ctr_total_return
| limit 100
""".stripMargin),
("q82", """
| select i_item_id, i_item_desc, i_current_price
| from item, inventory, date_dim, store_sales
| where i_current_price between 62 and 62+30
| and inv_item_sk = i_item_sk
| and d_date_sk=inv_date_sk
| and d_date between cast('2000-05-25' as date) and (cast('2000-05-25' as date) + interval 60 days)
| and i_manufact_id in (129, 270, 821, 423)
| and inv_quantity_on_hand between 100 and 500
| and ss_item_sk = i_item_sk
| group by i_item_id,i_item_desc,i_current_price
| order by i_item_id
| limit 100
""".stripMargin),
("q83", """
| with sr_items as
| (select i_item_id item_id, sum(sr_return_quantity) sr_item_qty
| from store_returns, item, date_dim
| where sr_item_sk = i_item_sk
| and d_date in (select d_date from date_dim where d_week_seq in
| (select d_week_seq from date_dim where d_date in ('2000-06-30','2000-09-27','2000-11-17')))
| and sr_returned_date_sk = d_date_sk
| group by i_item_id),
| cr_items as
| (select i_item_id item_id, sum(cr_return_quantity) cr_item_qty
| from catalog_returns, item, date_dim
| where cr_item_sk = i_item_sk
| and d_date in (select d_date from date_dim where d_week_seq in
| (select d_week_seq from date_dim where d_date in ('2000-06-30','2000-09-27','2000-11-17')))
| and cr_returned_date_sk = d_date_sk
| group by i_item_id),
| wr_items as
| (select i_item_id item_id, sum(wr_return_quantity) wr_item_qty
| from web_returns, item, date_dim
| where wr_item_sk = i_item_sk and d_date in
| (select d_date from date_dim where d_week_seq in
| (select d_week_seq from date_dim where d_date in ('2000-06-30','2000-09-27','2000-11-17')))
| and wr_returned_date_sk = d_date_sk
| group by i_item_id)
| select sr_items.item_id
| ,sr_item_qty
| ,sr_item_qty/(sr_item_qty+cr_item_qty+wr_item_qty)/3.0 * 100 sr_dev
| ,cr_item_qty
| ,cr_item_qty/(sr_item_qty+cr_item_qty+wr_item_qty)/3.0 * 100 cr_dev
| ,wr_item_qty
| ,wr_item_qty/(sr_item_qty+cr_item_qty+wr_item_qty)/3.0 * 100 wr_dev
| ,(sr_item_qty+cr_item_qty+wr_item_qty)/3.0 average
| from sr_items, cr_items, wr_items
| where sr_items.item_id=cr_items.item_id
| and sr_items.item_id=wr_items.item_id
| order by sr_items.item_id, sr_item_qty
| limit 100
""".stripMargin),
// Modifications: "||" -> concat
("q84", """
| select c_customer_id as customer_id
| ,concat(c_last_name, ', ', c_first_name) as customername
| from customer
| ,customer_address
| ,customer_demographics
| ,household_demographics
| ,income_band
| ,store_returns
| where ca_city = 'Edgewood'
| and c_current_addr_sk = ca_address_sk
| and ib_lower_bound >= 38128
| and ib_upper_bound <= 38128 + 50000
| and ib_income_band_sk = hd_income_band_sk
| and cd_demo_sk = c_current_cdemo_sk
| and hd_demo_sk = c_current_hdemo_sk
| and sr_cdemo_sk = cd_demo_sk
| order by c_customer_id
| limit 100
""".stripMargin),
("q85", """
| select
| substr(r_reason_desc,1,20), avg(ws_quantity), avg(wr_refunded_cash), avg(wr_fee)
| from web_sales, web_returns, web_page, customer_demographics cd1,
| customer_demographics cd2, customer_address, date_dim, reason
| where ws_web_page_sk = wp_web_page_sk
| and ws_item_sk = wr_item_sk
| and ws_order_number = wr_order_number
| and ws_sold_date_sk = d_date_sk and d_year = 2000
| and cd1.cd_demo_sk = wr_refunded_cdemo_sk
| and cd2.cd_demo_sk = wr_returning_cdemo_sk
| and ca_address_sk = wr_refunded_addr_sk
| and r_reason_sk = wr_reason_sk
| and
| (
| (
| cd1.cd_marital_status = 'M'
| and
| cd1.cd_marital_status = cd2.cd_marital_status
| and
| cd1.cd_education_status = 'Advanced Degree'
| and
| cd1.cd_education_status = cd2.cd_education_status
| and
| ws_sales_price between 100.00 and 150.00
| )
| or
| (
| cd1.cd_marital_status = 'S'
| and
| cd1.cd_marital_status = cd2.cd_marital_status
| and
| cd1.cd_education_status = 'College'
| and
| cd1.cd_education_status = cd2.cd_education_status
| and
| ws_sales_price between 50.00 and 100.00
| )
| or
| (
| cd1.cd_marital_status = 'W'
| and
| cd1.cd_marital_status = cd2.cd_marital_status
| and
| cd1.cd_education_status = '2 yr Degree'
| and
| cd1.cd_education_status = cd2.cd_education_status
| and
| ws_sales_price between 150.00 and 200.00
| )
| )
| and
| (
| (
| ca_country = 'United States'
| and
| ca_state in ('IN', 'OH', 'NJ')
| and ws_net_profit between 100 and 200
| )
| or
| (
| ca_country = 'United States'
| and
| ca_state in ('WI', 'CT', 'KY')
| and ws_net_profit between 150 and 300
| )
| or
| (
| ca_country = 'United States'
| and
| ca_state in ('LA', 'IA', 'AR')
| and ws_net_profit between 50 and 250
| )
| )
| group by r_reason_desc
| order by substr(r_reason_desc,1,20)
| ,avg(ws_quantity)
| ,avg(wr_refunded_cash)
| ,avg(wr_fee)
| limit 100
""".stripMargin),
("q86", """
| select sum(ws_net_paid) as total_sum, i_category, i_class,
| grouping(i_category)+grouping(i_class) as lochierarchy,
| rank() over (
| partition by grouping(i_category)+grouping(i_class),
| case when grouping(i_class) = 0 then i_category end
| order by sum(ws_net_paid) desc) as rank_within_parent
| from
| web_sales, date_dim d1, item
| where
| d1.d_month_seq between 1200 and 1200+11
| and d1.d_date_sk = ws_sold_date_sk
| and i_item_sk = ws_item_sk
| group by rollup(i_category,i_class)
| order by
| lochierarchy desc,
| case when lochierarchy = 0 then i_category end,
| rank_within_parent
| limit 100
""".stripMargin),
("q87", """
| select count(*)
| from ((select distinct c_last_name, c_first_name, d_date
| from store_sales, date_dim, customer
| where store_sales.ss_sold_date_sk = date_dim.d_date_sk
| and store_sales.ss_customer_sk = customer.c_customer_sk
| and d_month_seq between 1200 and 1200+11)
| except
| (select distinct c_last_name, c_first_name, d_date
| from catalog_sales, date_dim, customer
| where catalog_sales.cs_sold_date_sk = date_dim.d_date_sk
| and catalog_sales.cs_bill_customer_sk = customer.c_customer_sk
| and d_month_seq between 1200 and 1200+11)
| except
| (select distinct c_last_name, c_first_name, d_date
| from web_sales, date_dim, customer
| where web_sales.ws_sold_date_sk = date_dim.d_date_sk
| and web_sales.ws_bill_customer_sk = customer.c_customer_sk
| and d_month_seq between 1200 and 1200+11)
|) cool_cust
""".stripMargin),
("q88", """
| select *
| from
| (select count(*) h8_30_to_9
| from store_sales, household_demographics , time_dim, store
| where ss_sold_time_sk = time_dim.t_time_sk
| and ss_hdemo_sk = household_demographics.hd_demo_sk
| and ss_store_sk = s_store_sk
| and time_dim.t_hour = 8
| and time_dim.t_minute >= 30
| and ((household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or
| (household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or
| (household_demographics.hd_dep_count = 0 and household_demographics.hd_vehicle_count<=0+2))
| and store.s_store_name = 'ese') s1,
| (select count(*) h9_to_9_30
| from store_sales, household_demographics , time_dim, store
| where ss_sold_time_sk = time_dim.t_time_sk
| and ss_hdemo_sk = household_demographics.hd_demo_sk
| and ss_store_sk = s_store_sk
| and time_dim.t_hour = 9
| and time_dim.t_minute < 30
| and ((household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or
| (household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or
| (household_demographics.hd_dep_count = 0 and household_demographics.hd_vehicle_count<=0+2))
| and store.s_store_name = 'ese') s2,
| (select count(*) h9_30_to_10
| from store_sales, household_demographics , time_dim, store
| where ss_sold_time_sk = time_dim.t_time_sk
| and ss_hdemo_sk = household_demographics.hd_demo_sk
| and ss_store_sk = s_store_sk
| and time_dim.t_hour = 9
| and time_dim.t_minute >= 30
| and ((household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or
| (household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or
| (household_demographics.hd_dep_count = 0 and household_demographics.hd_vehicle_count<=0+2))
| and store.s_store_name = 'ese') s3,
| (select count(*) h10_to_10_30
| from store_sales, household_demographics , time_dim, store
| where ss_sold_time_sk = time_dim.t_time_sk
| and ss_hdemo_sk = household_demographics.hd_demo_sk
| and ss_store_sk = s_store_sk
| and time_dim.t_hour = 10
| and time_dim.t_minute < 30
| and ((household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or
| (household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or
| (household_demographics.hd_dep_count = 0 and household_demographics.hd_vehicle_count<=0+2))
| and store.s_store_name = 'ese') s4,
| (select count(*) h10_30_to_11
| from store_sales, household_demographics , time_dim, store
| where ss_sold_time_sk = time_dim.t_time_sk
| and ss_hdemo_sk = household_demographics.hd_demo_sk
| and ss_store_sk = s_store_sk
| and time_dim.t_hour = 10
| and time_dim.t_minute >= 30
| and ((household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or
| (household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or
| (household_demographics.hd_dep_count = 0 and household_demographics.hd_vehicle_count<=0+2))
| and store.s_store_name = 'ese') s5,
| (select count(*) h11_to_11_30
| from store_sales, household_demographics , time_dim, store
| where ss_sold_time_sk = time_dim.t_time_sk
| and ss_hdemo_sk = household_demographics.hd_demo_sk
| and ss_store_sk = s_store_sk
| and time_dim.t_hour = 11
| and time_dim.t_minute < 30
| and ((household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or
| (household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or
| (household_demographics.hd_dep_count = 0 and household_demographics.hd_vehicle_count<=0+2))
| and store.s_store_name = 'ese') s6,
| (select count(*) h11_30_to_12
| from store_sales, household_demographics , time_dim, store
| where ss_sold_time_sk = time_dim.t_time_sk
| and ss_hdemo_sk = household_demographics.hd_demo_sk
| and ss_store_sk = s_store_sk
| and time_dim.t_hour = 11
| and time_dim.t_minute >= 30
| and ((household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or
| (household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or
| (household_demographics.hd_dep_count = 0 and household_demographics.hd_vehicle_count<=0+2))
| and store.s_store_name = 'ese') s7,
| (select count(*) h12_to_12_30
| from store_sales, household_demographics , time_dim, store
| where ss_sold_time_sk = time_dim.t_time_sk
| and ss_hdemo_sk = household_demographics.hd_demo_sk
| and ss_store_sk = s_store_sk
| and time_dim.t_hour = 12
| and time_dim.t_minute < 30
| and ((household_demographics.hd_dep_count = 4 and household_demographics.hd_vehicle_count<=4+2) or
| (household_demographics.hd_dep_count = 2 and household_demographics.hd_vehicle_count<=2+2) or
| (household_demographics.hd_dep_count = 0 and household_demographics.hd_vehicle_count<=0+2))
| and store.s_store_name = 'ese') s8
""".stripMargin),
("q89", """
| select *
| from(
| select i_category, i_class, i_brand,
| s_store_name, s_company_name,
| d_moy,
| sum(ss_sales_price) sum_sales,
| avg(sum(ss_sales_price)) over
| (partition by i_category, i_brand, s_store_name, s_company_name)
| avg_monthly_sales
| from item, store_sales, date_dim, store
| where ss_item_sk = i_item_sk and
| ss_sold_date_sk = d_date_sk and
| ss_store_sk = s_store_sk and
| d_year in (1999) and
| ((i_category in ('Books','Electronics','Sports') and
| i_class in ('computers','stereo','football'))
| or (i_category in ('Men','Jewelry','Women') and
| i_class in ('shirts','birdal','dresses')))
| group by i_category, i_class, i_brand,
| s_store_name, s_company_name, d_moy) tmp1
| where case when (avg_monthly_sales <> 0) then (abs(sum_sales - avg_monthly_sales) / avg_monthly_sales) else null end > 0.1
| order by sum_sales - avg_monthly_sales, s_store_name
| limit 100
""".stripMargin),
("q90", """
| select cast(amc as decimal(15,4))/cast(pmc as decimal(15,4)) am_pm_ratio
| from ( select count(*) amc
| from web_sales, household_demographics , time_dim, web_page
| where ws_sold_time_sk = time_dim.t_time_sk
| and ws_ship_hdemo_sk = household_demographics.hd_demo_sk
| and ws_web_page_sk = web_page.wp_web_page_sk
| and time_dim.t_hour between 8 and 8+1
| and household_demographics.hd_dep_count = 6
| and web_page.wp_char_count between 5000 and 5200) at,
| ( select count(*) pmc
| from web_sales, household_demographics , time_dim, web_page
| where ws_sold_time_sk = time_dim.t_time_sk
| and ws_ship_hdemo_sk = household_demographics.hd_demo_sk
| and ws_web_page_sk = web_page.wp_web_page_sk
| and time_dim.t_hour between 19 and 19+1
| and household_demographics.hd_dep_count = 6
| and web_page.wp_char_count between 5000 and 5200) pt
| order by am_pm_ratio
| limit 100
""".stripMargin),
("q91", """
| select
| cc_call_center_id Call_Center, cc_name Call_Center_Name, cc_manager Manager,
| sum(cr_net_loss) Returns_Loss
| from
| call_center, catalog_returns, date_dim, customer, customer_address,
| customer_demographics, household_demographics
| where
| cr_call_center_sk = cc_call_center_sk
| and cr_returned_date_sk = d_date_sk
| and cr_returning_customer_sk = c_customer_sk
| and cd_demo_sk = c_current_cdemo_sk
| and hd_demo_sk = c_current_hdemo_sk
| and ca_address_sk = c_current_addr_sk
| and d_year = 1998
| and d_moy = 11
| and ( (cd_marital_status = 'M' and cd_education_status = 'Unknown')
| or(cd_marital_status = 'W' and cd_education_status = 'Advanced Degree'))
| and hd_buy_potential like 'Unknown%'
| and ca_gmt_offset = -7
| group by cc_call_center_id,cc_name,cc_manager,cd_marital_status,cd_education_status
| order by sum(cr_net_loss) desc
""".stripMargin),
// Modifications: "+ days" -> date_add
// Modifications: " -> `
("q92", """
| select sum(ws_ext_discount_amt) as `Excess Discount Amount`
| from web_sales, item, date_dim
| where i_manufact_id = 350
| and i_item_sk = ws_item_sk
| and d_date between '2000-01-27' and (cast('2000-01-27' as date) + interval 90 days)
| and d_date_sk = ws_sold_date_sk
| and ws_ext_discount_amt >
| (
| SELECT 1.3 * avg(ws_ext_discount_amt)
| FROM web_sales, date_dim
| WHERE ws_item_sk = i_item_sk
| and d_date between '2000-01-27' and (cast('2000-01-27' as date) + interval 90 days)
| and d_date_sk = ws_sold_date_sk
| )
| order by sum(ws_ext_discount_amt)
| limit 100
""".stripMargin),
("q93", """
| select ss_customer_sk, sum(act_sales) sumsales
| from (select
| ss_item_sk, ss_ticket_number, ss_customer_sk,
| case when sr_return_quantity is not null then (ss_quantity-sr_return_quantity)*ss_sales_price
| else (ss_quantity*ss_sales_price) end act_sales
| from store_sales
| left outer join store_returns
| on (sr_item_sk = ss_item_sk and sr_ticket_number = ss_ticket_number),
| reason
| where sr_reason_sk = r_reason_sk and r_reason_desc = 'reason 28') t
| group by ss_customer_sk
| order by sumsales, ss_customer_sk
| limit 100
""".stripMargin),
// Modifications: "+ days" -> date_add
// Modifications: " -> `
("q94", """
| select
| count(distinct ws_order_number) as `order count`
| ,sum(ws_ext_ship_cost) as `total shipping cost`
| ,sum(ws_net_profit) as `total net profit`
| from
| web_sales ws1, date_dim, customer_address, web_site
| where
| d_date between '1999-02-01' and
| (cast('1999-02-01' as date) + interval 60 days)
| and ws1.ws_ship_date_sk = d_date_sk
| and ws1.ws_ship_addr_sk = ca_address_sk
| and ca_state = 'IL'
| and ws1.ws_web_site_sk = web_site_sk
| and web_company_name = 'pri'
| and exists (select *
| from web_sales ws2
| where ws1.ws_order_number = ws2.ws_order_number
| and ws1.ws_warehouse_sk <> ws2.ws_warehouse_sk)
| and not exists(select *
| from web_returns wr1
| where ws1.ws_order_number = wr1.wr_order_number)
| order by count(distinct ws_order_number)
| limit 100
""".stripMargin),
// Modifications: "+ days" -> date_add
("q95", """
| with ws_wh as
| (select ws1.ws_order_number,ws1.ws_warehouse_sk wh1,ws2.ws_warehouse_sk wh2
| from web_sales ws1,web_sales ws2
| where ws1.ws_order_number = ws2.ws_order_number
| and ws1.ws_warehouse_sk <> ws2.ws_warehouse_sk)
| select
| count(distinct ws_order_number) as `order count`
| ,sum(ws_ext_ship_cost) as `total shipping cost`
| ,sum(ws_net_profit) as `total net profit`
| from
| web_sales ws1, date_dim, customer_address, web_site
| where
| d_date between '1999-02-01' and
| (cast('1999-02-01' as date) + interval 60 days)
| and ws1.ws_ship_date_sk = d_date_sk
| and ws1.ws_ship_addr_sk = ca_address_sk
| and ca_state = 'IL'
| and ws1.ws_web_site_sk = web_site_sk
| and web_company_name = 'pri'
| and ws1.ws_order_number in (select ws_order_number
| from ws_wh)
| and ws1.ws_order_number in (select wr_order_number
| from web_returns,ws_wh
| where wr_order_number = ws_wh.ws_order_number)
| order by count(distinct ws_order_number)
| limit 100
""".stripMargin),
("q96", """
| select count(*)
| from store_sales, household_demographics, time_dim, store
| where ss_sold_time_sk = time_dim.t_time_sk
| and ss_hdemo_sk = household_demographics.hd_demo_sk
| and ss_store_sk = s_store_sk
| and time_dim.t_hour = 20
| and time_dim.t_minute >= 30
| and household_demographics.hd_dep_count = 7
| and store.s_store_name = 'ese'
| order by count(*)
| limit 100
""".stripMargin),
("q97", """
| with ssci as (
| select ss_customer_sk customer_sk, ss_item_sk item_sk
| from store_sales,date_dim
| where ss_sold_date_sk = d_date_sk
| and d_month_seq between 1200 and 1200 + 11
| group by ss_customer_sk, ss_item_sk),
| csci as(
| select cs_bill_customer_sk customer_sk, cs_item_sk item_sk
| from catalog_sales,date_dim
| where cs_sold_date_sk = d_date_sk
| and d_month_seq between 1200 and 1200 + 11
| group by cs_bill_customer_sk, cs_item_sk)
| select sum(case when ssci.customer_sk is not null and csci.customer_sk is null then 1 else 0 end) store_only
| ,sum(case when ssci.customer_sk is null and csci.customer_sk is not null then 1 else 0 end) catalog_only
| ,sum(case when ssci.customer_sk is not null and csci.customer_sk is not null then 1 else 0 end) store_and_catalog
| from ssci full outer join csci on (ssci.customer_sk=csci.customer_sk
| and ssci.item_sk = csci.item_sk)
| limit 100
""".stripMargin),
// Modifications: "+ days" -> date_add
("q98", """
|select i_item_desc, i_category, i_class, i_current_price
| ,sum(ss_ext_sales_price) as itemrevenue
| ,sum(ss_ext_sales_price)*100/sum(sum(ss_ext_sales_price)) over
| (partition by i_class) as revenueratio
|from
| store_sales, item, date_dim
|where
| ss_item_sk = i_item_sk
| and i_category in ('Sports', 'Books', 'Home')
| and ss_sold_date_sk = d_date_sk
| and d_date between cast('1999-02-22' as date)
| and (cast('1999-02-22' as date) + interval 30 days)
|group by
| i_item_id, i_item_desc, i_category, i_class, i_current_price
|order by
| i_category, i_class, i_item_id, i_item_desc, revenueratio
""".stripMargin),
// Modifications: " -> `
("q99", """
| select
| substr(w_warehouse_name,1,20), sm_type, cc_name
| ,sum(case when (cs_ship_date_sk - cs_sold_date_sk <= 30 ) then 1 else 0 end) as `30 days`
| ,sum(case when (cs_ship_date_sk - cs_sold_date_sk > 30) and
| (cs_ship_date_sk - cs_sold_date_sk <= 60) then 1 else 0 end ) as `31-60 days`
| ,sum(case when (cs_ship_date_sk - cs_sold_date_sk > 60) and
| (cs_ship_date_sk - cs_sold_date_sk <= 90) then 1 else 0 end) as `61-90 days`
| ,sum(case when (cs_ship_date_sk - cs_sold_date_sk > 90) and
| (cs_ship_date_sk - cs_sold_date_sk <= 120) then 1 else 0 end) as `91-120 days`
| ,sum(case when (cs_ship_date_sk - cs_sold_date_sk > 120) then 1 else 0 end) as `>120 days`
| from
| catalog_sales, warehouse, ship_mode, call_center, date_dim
| where
| d_month_seq between 1200 and 1200 + 11
| and cs_ship_date_sk = d_date_sk
| and cs_warehouse_sk = w_warehouse_sk
| and cs_ship_mode_sk = sm_ship_mode_sk
| and cs_call_center_sk = cc_call_center_sk
| group by
| substr(w_warehouse_name,1,20), sm_type, cc_name
| order by substr(w_warehouse_name,1,20), sm_type, cc_name
| limit 100
""".stripMargin),
("qSsMax",
"""
|select
| count(*) as total,
| count(ss_sold_date_sk) as not_null_total,
| count(distinct ss_sold_date_sk) as unique_days,
| max(ss_sold_date_sk) as max_ss_sold_date_sk,
| max(ss_sold_time_sk) as max_ss_sold_time_sk,
| max(ss_item_sk) as max_ss_item_sk,
| max(ss_customer_sk) as max_ss_customer_sk,
| max(ss_cdemo_sk) as max_ss_cdemo_sk,
| max(ss_hdemo_sk) as max_ss_hdemo_sk,
| max(ss_addr_sk) as max_ss_addr_sk,
| max(ss_store_sk) as max_ss_store_sk,
| max(ss_promo_sk) as max_ss_promo_sk
|from store_sales
""".stripMargin)
).map { case (name, sqlText) =>
Query(name + "-v1.4", sqlText, description = "TPCDS 1.4 Query", executionMode = CollectResults)
}
val tpcds1_4QueriesMap = tpcds1_4Queries.map(q => q.name.split("-").get(0) -> q).toMap
val runnable: Seq[Query] = Seq(
"q1", "q2", "q3", "q4", "q5", "q7", "q8", "q9",
"q11", "q12", "q13", "q15", "q17", "q18", "q19",
"q20", "q21", "q22", "q25", "q26", "q27", "q28", "q29",
"q31", "q34", "q36", "q37", "q38", "q39a", "q39b",
"q40", "q42", "q43", "q44", "q46", "q47", "q48", "q49",
"q50", "q51", "q52", "q53", "q54", "q55", "q57", "q59",
"q61", "q62", "q63", "q64", "q65", "q66", "q67", "q68",
"q71", "q72", "q73", "q74", "q75", "q76", "q77", "q78", "q79",
"q80", "q82", "q84", "q85", "q86", "q87", "q88", "q89",
"q90", "q91", "q93", "q96", "q97", "q98", "q99", "qSsMax").map(tpcds1_4QueriesMap)
val all: Seq[Query] = tpcds1_4QueriesMap.values.toSeq
}
| levyx/spark-sql-perf | src/main/scala/com/databricks/spark/sql/perf/tpcds/TPCDS_1_4_Queries.scala | Scala | apache-2.0 | 206,322 |
package org.http4s.client.blaze
import scalaz.concurrent.Task
import org.http4s._
import org.specs2.mutable.After
// TODO: this should have a more comprehensive test suite
class ExternalBlazeHttp1ClientSpec extends Http4sSpec {
private val simpleClient = SimpleHttp1Client()
"Blaze Simple Http1 Client" should {
"Make simple https requests" in {
val resp = simpleClient.expect[String](uri("https://github.com/")).run
resp.length mustNotEqual 0
}
}
step {
simpleClient.shutdown.run
}
private val pooledClient = PooledHttp1Client()
"RecyclingHttp1Client" should {
def fetchBody = pooledClient.toService(_.as[String]).local { uri: Uri => Request(uri = uri) }
"Make simple https requests" in {
val resp = fetchBody.run(uri("https://github.com/")).run
resp.length mustNotEqual 0
}
"Repeat a simple https request" in {
val f = (0 until 10).map(_ => Task.fork {
val resp = fetchBody.run(uri("https://github.com/"))
resp.map(_.length)
})
foreach(Task.gatherUnordered(f).run) { length =>
length mustNotEqual 0
}
}
}
step {
pooledClient.shutdown.run
}
}
| hvesalai/http4s | blaze-client/src/test/scala/org/http4s/client/blaze/ExternalBlazeHttp1ClientSpec.scala | Scala | apache-2.0 | 1,185 |
package keystoneml.nodes.stats
import breeze.linalg._
import org.scalatest.FunSuite
import org.scalatest.matchers.ShouldMatchers
import keystoneml.pipelines.Logging
class RandomSignNodeSuite extends FunSuite with Logging with ShouldMatchers {
test("RandomSignNode") {
val signs = DenseVector(1.0, -1.0, 1.0)
val node = RandomSignNode(signs)
val data: DenseVector[Double] = DenseVector(1.0, 2.0, 3.0)
val result = node(data)
Seq(result) should equal (Seq(DenseVector(1.0, -2.0, 3.0)))
}
test("RandomSignNode.create") {
val node = RandomSignNode(1000)
node.signs.foreach(elt => assert(elt == -1.0 || elt == 1.0))
}
}
| amplab/keystone | src/test/scala/keystoneml/nodes/stats/RandomSignNodeSuite.scala | Scala | apache-2.0 | 660 |
/*
* Copyright 2011-2021 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.spark.compiler
package operator
package core
import scala.collection.JavaConversions._
import org.objectweb.asm.Type
import org.objectweb.asm.signature.SignatureVisitor
import com.asakusafw.lang.compiler.analyzer.util.ProjectionOperatorUtil
import com.asakusafw.lang.compiler.model.graph.CoreOperator
import com.asakusafw.lang.compiler.model.graph.CoreOperator.CoreOperatorKind
import com.asakusafw.runtime.model.DataModel
import com.asakusafw.spark.compiler.spi.{ OperatorCompiler, OperatorType }
import com.asakusafw.spark.runtime.fragment.core.ProjectionOperatorsFragment
import com.asakusafw.spark.runtime.util.ValueOptionOps
import com.asakusafw.spark.tools.asm._
import com.asakusafw.spark.tools.asm.MethodBuilder._
import com.asakusafw.spark.tools.asm4s._
class ProjectionOperatorsCompiler extends CoreOperatorCompiler {
override def support(
operator: CoreOperator)(
implicit context: OperatorCompiler.Context): Boolean = {
operator.getCoreOperatorKind == CoreOperatorKind.PROJECT ||
operator.getCoreOperatorKind == CoreOperatorKind.EXTEND ||
operator.getCoreOperatorKind == CoreOperatorKind.RESTRUCTURE
}
override def operatorType: OperatorType = OperatorType.ExtractType
override def compile(
operator: CoreOperator)(
implicit context: OperatorCompiler.Context): Type = {
assert(support(operator),
s"The operator type is not supported: ${operator.getCoreOperatorKind} [${operator}]")
assert(operator.inputs.size == 1,
s"The size of inputs should be 1: ${operator.inputs.size} [${operator}]")
assert(operator.outputs.size == 1,
s"The size of outputs should be 1: ${operator.outputs.size} [${operator}]")
val builder = new ProjectionOperatorsFragmentClassBuilder(operator)
context.addClass(builder)
}
}
private class ProjectionOperatorsFragmentClassBuilder(
operator: CoreOperator)(
implicit context: OperatorCompiler.Context)
extends CoreOperatorFragmentClassBuilder(
operator.inputs.head.dataModelType,
operator.outputs.head.dataModelType)(
Option(
new ClassSignatureBuilder()
.newSuperclass {
_.newClassType(classOf[ProjectionOperatorsFragment[_, _]].asType) {
_.newTypeArgument(SignatureVisitor.INSTANCEOF, operator.inputs.head.dataModelType)
.newTypeArgument(SignatureVisitor.INSTANCEOF, operator.outputs.head.dataModelType)
}
}),
classOf[ProjectionOperatorsFragment[_, _]].asType) {
val mappings =
ProjectionOperatorUtil.getPropertyMappings(context.dataModelLoader, operator)
.toSeq
override def defCtor()(implicit mb: MethodBuilder): Unit = {
val thisVar :: _ :: childVar :: _ = mb.argVars
thisVar.push().invokeInit(
superType,
pushNew0(childDataModelType).asType(classOf[DataModel[_]].asType),
childVar.push())
}
override def defMethods(methodDef: MethodDef): Unit = {
super.defMethods(methodDef)
methodDef.newMethod(
"project",
Seq(classOf[DataModel[_]].asType, classOf[DataModel[_]].asType),
new MethodSignatureBuilder()
.newParameterType {
_.newClassType(classOf[DataModel[_]].asType) {
_.newTypeArgument()
}
}
.newParameterType {
_.newClassType(classOf[DataModel[_]].asType) {
_.newTypeArgument()
}
}
.newVoidReturnType()) { implicit mb =>
val thisVar :: srcVar :: destVar :: _ = mb.argVars
thisVar.push().invokeV(
"project",
srcVar.push().cast(dataModelType),
destVar.push().cast(childDataModelType))
`return`()
}
methodDef.newMethod(
"project",
Seq(dataModelType, childDataModelType)) { implicit mb =>
val thisVar :: srcVar :: destVar :: _ = mb.argVars
mappings.foreach { mapping =>
val srcProperty = mapping.getSourcePort.dataModelRef
.findProperty(mapping.getSourceProperty)
val destProperty = mapping.getDestinationPort.dataModelRef
.findProperty(mapping.getDestinationProperty)
assert(srcProperty.getType.asType == destProperty.getType.asType,
"The source and destination types should be the same: " +
s"(${srcProperty.getType}, ${destProperty.getType} [${operator}]")
pushObject(ValueOptionOps)
.invokeV(
"copy",
srcVar.push()
.invokeV(srcProperty.getDeclaration.getName, srcProperty.getType.asType),
destVar.push()
.invokeV(destProperty.getDeclaration.getName, destProperty.getType.asType))
}
`return`()
}
}
}
| asakusafw/asakusafw-spark | compiler/src/main/scala/com/asakusafw/spark/compiler/operator/core/ProjectionOperatorsCompiler.scala | Scala | apache-2.0 | 5,333 |
/*
* Copyright 2014 Philip L. McMahon
*
* Philip L. McMahon licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package rascql.postgresql
import java.nio.ByteOrder
import java.nio.charset.Charset
import java.security.MessageDigest
import scala.annotation.switch
import scala.collection.immutable
import scala.util.control.NoStackTrace
import scala.util.Try
import akka.util._
package object protocol {
type OID = Int
type ProcessID = Int
type SecretKey = Int
// Type aliases to simplify `isInstanceOf[T]` checks
type BindComplete = BindComplete.type
type CloseComplete = CloseComplete.type
type CopyDone = CopyDone.type
type EmptyQueryResponse = EmptyQueryResponse.type
type Flush = Flush.type
type NoData = NoData.type
type ParseComplete = ParseComplete.type
type PortalSuspended = PortalSuspended.type
type SSLRequest = SSLRequest.type
type Sync = Sync.type
type Terminate = Terminate.type
private[protocol] implicit val order = ByteOrder.BIG_ENDIAN
private[protocol] val NUL = 0x0.toByte
private[protocol] val HexChunks = 0x0.to(0xFF).map("%02x".format(_).toLowerCase).map(ByteString(_))
private[protocol] implicit class RichByte(val b: Byte) extends AnyVal {
@inline def asHex: ByteString = HexChunks(b & 0xFF)
}
private[protocol] implicit class RichArrayOfByte(val a: Array[Byte]) extends AnyVal {
@inline def asHex: ByteString = a.foldLeft(ByteString.empty)(_ ++ _.asHex)
}
private[protocol] implicit class RichByteIterator(val b: ByteIterator) extends AnyVal {
def getCString(c: Charset): String = {
val iter = b.clone
val result = iter.takeWhile(_ != NUL) // FIXME Throw error if no NUL found (result length equals iter length)
b.drop(result.len + 1) // Consume up to and including NUL byte
new String(result.toArray[Byte], c)
}
// Since take/slice both truncate the iterator and we want to return a sub-iterator for a given range, we do this instead.
def nextBytes(n: Int): ByteIterator = {
val iter = b.clone
b.drop(n)
iter.take(n)
}
def splitAt(n: Int): (ByteIterator, ByteIterator) =
b.clone.take(n) -> b.drop(n)
}
private[protocol] implicit class RichByteStringBuilder(val b: ByteStringBuilder) extends AnyVal {
@inline def putCString(content: String, charset: Charset): ByteStringBuilder =
b.putBytes(content.getBytes(charset)).putNUL
@inline def putNUL: ByteStringBuilder = b.putByte(NUL)
def prependLength: ByteString =
ByteString.newBuilder.putInt(b.length + 4).result ++ b.result // Include length of int
}
private[protocol] implicit class RichByteString(val b: ByteString) extends AnyVal {
def prependLength: ByteString =
ByteString.newBuilder.putInt(b.length).result ++ b
}
private[protocol] implicit class RichOptionOfFieldFormats(val f: Option[FieldFormats]) extends AnyVal {
def encoded: ByteString = f.fold(FieldFormats.Default)(_.encoded)
}
private[protocol] implicit class RichSeqOfParameter(val s: Seq[Parameter]) extends AnyVal {
def encode(c: Charset): ByteString = {
val size = ByteString.newBuilder.putShort(s.size).result
val (formats, values) = s.unzip { p => p.format.toShort -> p.encode(c) }
size ++
formats.foldLeft(ByteString.newBuilder)(_.putShort(_)).result ++
values.foldLeft(size)(_ ++ _)
}
}
sealed trait FrontendMessage {
def encode(charset: Charset): ByteString
}
object FrontendMessage {
import scala.language.implicitConversions
sealed abstract class Fixed(private val bytes: ByteString) extends FrontendMessage {
def encode(c: Charset) = bytes
}
object Fixed {
implicit def toByteString(f: Fixed): ByteString = f.bytes
}
sealed abstract class Empty(typ: Char)
extends FrontendMessage.Fixed(ByteString.newBuilder.putByte(typ.toByte).putInt(4).result)
sealed abstract class NonEmpty(typ: Byte) extends FrontendMessage {
final def encode(c: Charset) = ByteString(typ) ++
ByteString.newBuilder.append(encodeContent(c)).prependLength
protected def encodeContent(c: Charset): ByteString
}
object NonEmpty {
implicit def toByteString(m: NonEmpty)(implicit c: Charset): ByteString =
m.encode(c)
}
}
sealed trait BackendMessage
object BackendMessage {
sealed trait Empty extends BackendMessage with Decoder {
protected def decodeContent(c: Charset, b: ByteIterator) = this
}
}
sealed trait AsyncOperation { _: BackendMessage => }
sealed trait Decoder {
import Decoder._
def decode(charset: Charset, bytes: ByteString): Result =
if (bytes.length < 4) NeedBytes(4 - bytes.length)
else {
val iter = bytes.iterator
val length = iter.getInt - 4 // Drop four bytes for int
if (iter.len < length) NeedBytes(length - iter.len) // TODO This could be a "LengthKnown" intermediate state
else {
val (content, rest) = iter.splitAt(length)
MessageDecoded(decodeContent(charset, content), rest.toByteString)
}
}
protected def decodeContent(c: Charset, b: ByteIterator): BackendMessage
}
object Decoder {
sealed trait Result
case class NeedBytes(count: Int) extends Result
case class MessageDecoded(message: BackendMessage, tail: ByteString) extends Result
def apply(code: Byte): Decoder = (code: @switch) match {
case 'R' => AuthenticationRequest
case 'K' => BackendKeyData
case '2' => BindComplete
case '3' => CloseComplete
case 'C' => CommandComplete
case 'd' => CopyData
case 'c' => CopyDone
case 'G' => CopyInResponse
case 'H' => CopyOutResponse
case 'W' => CopyBothResponse
case 'D' => DataRow
case 'I' => EmptyQueryResponse
case 'E' => ErrorResponse
case 'V' => FunctionCallResponse
case 'n' => NoData
case 'N' => NoticeResponse
case 'A' => NotificationResponse
case 't' => ParameterDescription
case 'S' => ParameterStatus
case '1' => ParseComplete
case 's' => PortalSuspended
case 'Z' => ReadyForQuery
case 'T' => RowDescription
case _ => throw UnsupportedMessageType(code)
}
}
sealed trait AuthenticationRequest extends BackendMessage
object AuthenticationRequest extends Decoder {
protected def decodeContent(c: Charset, b: ByteIterator) = {
(b.getInt: @switch) match {
case 0 => AuthenticationOk
case 2 => AuthenticationKerberosV5
case 3 => AuthenticationCleartextPassword
case 5 => AuthenticationMD5Password(b.toByteString) // TODO compact?
case 6 => AuthenticationSCMCredential
case 7 => AuthenticationGSS
case 8 => AuthenticationGSSContinue(b.toByteString) // TODO compact?
case 9 => AuthenticationSSPI
case m => throw UnsupportedAuthenticationMethod(m)
}
}
}
case object AuthenticationOk extends AuthenticationRequest
case object AuthenticationKerberosV5 extends AuthenticationRequest
case object AuthenticationCleartextPassword extends AuthenticationRequest
case class AuthenticationMD5Password(salt: ByteString) extends AuthenticationRequest
case object AuthenticationSCMCredential extends AuthenticationRequest
case object AuthenticationGSS extends AuthenticationRequest
case class AuthenticationGSSContinue(data: ByteString) extends AuthenticationRequest
case object AuthenticationSSPI extends AuthenticationRequest
case class BackendKeyData(processId: ProcessID, secretKey: SecretKey) extends BackendMessage
object BackendKeyData extends Decoder {
protected def decodeContent(c: Charset, b: ByteIterator) =
BackendKeyData(b.getInt, b.getInt)
}
case class Bind(parameters: Seq[Parameter],
destination: Portal = Portal.Unnamed,
source: PreparedStatement = PreparedStatement.Unnamed,
resultFormats: Option[FieldFormats] = None) extends FrontendMessage.NonEmpty('B') {
protected def encodeContent(c: Charset) =
ByteString.newBuilder.
putCString(destination.name, c).
putCString(source.name, c).
append(parameters.encode(c)).
append(resultFormats.encoded).
result
}
case object BindComplete extends BackendMessage.Empty
case class CancelRequest(processId: ProcessID, secretKey: SecretKey)
extends FrontendMessage.Fixed(CancelRequest.Prefix ++
ByteString.newBuilder.putInt(processId).putInt(secretKey).result)
object CancelRequest {
private[protocol] val Prefix =
ByteString.newBuilder.putInt(16).putInt(80877102).result
}
case class Close(target: Closable) extends FrontendMessage.NonEmpty('C') {
protected def encodeContent(c: Charset) = target.encode(c)
}
case object CloseComplete extends BackendMessage.Empty
case class CommandComplete(tag: CommandTag) extends BackendMessage
object CommandComplete extends Decoder {
import CommandTag._
// TODO Use a Try to avoid exceptions/invalid data
protected def decodeContent(c: Charset, b: ByteIterator) = {
CommandComplete(b.getCString(c).split(' ') match {
case Array(name, oid, rows) =>
// TODO Verify large unsigned OID parses properly
OIDWithRows(name, oid.toInt, rows.toInt)
case Array(name, rows) =>
RowsAffected(name, rows.toInt)
case Array(name) =>
NameOnly(name)
})
}
}
case class CopyData(value: ByteString) extends FrontendMessage.NonEmpty('d') with BackendMessage {
protected def encodeContent(c: Charset) = value
}
object CopyData extends Decoder {
protected def decodeContent(c: Charset, b: ByteIterator) =
CopyData(b.toByteString) // TODO compact?
}
case object CopyDone extends FrontendMessage.Empty('c') with BackendMessage.Empty
case class CopyFail(error: String) extends FrontendMessage.NonEmpty('f') {
protected def encodeContent(c: Charset) =
ByteString.newBuilder.
putCString(error, c).
result
}
abstract class CopyResponse extends BackendMessage {
def format: FieldFormats
}
sealed abstract class CopyResponseDecoder(typ: Byte) extends Decoder {
import Format._
import FieldFormats._
protected def decodeContent(c: Charset, b: ByteIterator) = {
val format = Format.decode(b.getByte)
val size = b.getShort
val types = Vector.fill(size)(b.getByte).map(Format.decode(_))
apply(format match {
case Text =>
// All columns must have format text
types.zipWithIndex.
collect {
case (Format.Binary, idx) => idx
} match {
case Vector() =>
Matched(format, size)
case indices =>
throw UnexpectedBinaryColumnFormat(indices)
}
case Binary =>
Mixed(types)
})
}
def apply(format: FieldFormats): CopyResponse
}
case class CopyInResponse(format: FieldFormats) extends CopyResponse
object CopyInResponse extends CopyResponseDecoder('G')
case class CopyOutResponse(format: FieldFormats) extends CopyResponse
object CopyOutResponse extends CopyResponseDecoder('H')
case class CopyBothResponse(format: FieldFormats) extends CopyResponse
object CopyBothResponse extends CopyResponseDecoder('W')
case class DataRow(columns: DataRow.Columns) extends BackendMessage
object DataRow extends Decoder {
case class Column(value: Option[ByteString], charset: Charset)
type Columns = immutable.IndexedSeq[Column]
protected def decodeContent(c: Charset, b: ByteIterator) =
DataRow((0 until b.getShort) map { _ =>
Column(
Option(b.getInt).
filterNot(_ < 0).
map(b.nextBytes(_).toByteString), // TODO compact?
c
)
})
}
case class Describe(target: Closable) extends FrontendMessage.NonEmpty('D') {
protected def encodeContent(c: Charset) = target.encode(c)
}
case object EmptyQueryResponse extends BackendMessage.Empty
case class ErrorResponse(fields: ErrorResponse.Fields) extends BackendMessage
object ErrorResponse extends Decoder with ResponseFields {
protected def decodeContent(c: Charset, b: ByteIterator) =
ErrorResponse(decodeAll(c, b))
}
case class Execute(portal: Portal, maxRows: Option[Int] = None) extends FrontendMessage.NonEmpty('E') {
protected def encodeContent(c: Charset) =
ByteString.newBuilder.
putCString(portal.name, c).
putInt(maxRows.getOrElse(0)).
result
}
case object Flush extends FrontendMessage.Empty('H')
case class FunctionCall(target: OID,
arguments: immutable.Seq[Parameter],
result: Format) extends FrontendMessage.NonEmpty('F') {
protected def encodeContent(c: Charset) =
ByteString.newBuilder.
putInt(target).
append(arguments.encode(c)).
putShort(result.toShort).
result
}
case class FunctionCallResponse(value: Option[ByteString]) extends BackendMessage
object FunctionCallResponse extends Decoder {
protected def decodeContent(c: Charset, b: ByteIterator) =
FunctionCallResponse(
Option(b.getInt).
filter(_ > 0).
map(b.nextBytes(_).toByteString) // TODO compact?
)
}
case object NoData extends BackendMessage.Empty
case class NoticeResponse(fields: NoticeResponse.Fields) extends BackendMessage with AsyncOperation
object NoticeResponse extends Decoder with ResponseFields {
protected def decodeContent(c: Charset, b: ByteIterator) =
NoticeResponse(decodeAll(c, b))
}
case class NotificationResponse(processId: Int, channel: String, payload: String) extends BackendMessage with AsyncOperation
object NotificationResponse extends Decoder {
protected def decodeContent(c: Charset, b: ByteIterator) =
NotificationResponse(b.getInt, b.getCString(c), b.getCString(c))
}
case class ParameterDescription(types: immutable.IndexedSeq[OID]) extends BackendMessage
object ParameterDescription extends Decoder {
protected def decodeContent(c: Charset, b: ByteIterator) =
ParameterDescription(Vector.fill(b.getShort)(b.getInt))
}
case class ParameterStatus(key: String, value: String) extends BackendMessage with AsyncOperation
object ParameterStatus extends Decoder {
protected def decodeContent(c: Charset, b: ByteIterator) =
ParameterStatus(b.getCString(c), b.getCString(c))
}
case class Parse(query: String,
types: immutable.Seq[OID] = Nil,
destination: PreparedStatement = PreparedStatement.Unnamed) extends FrontendMessage.NonEmpty('P') {
protected def encodeContent(c: Charset) =
ByteString.newBuilder.
putCString(destination.name, c).
putCString(query, c).
putShort(types.size).
putInts(types.toArray).
result
}
case object ParseComplete extends BackendMessage.Empty
case class PasswordMessage(password: Password) extends FrontendMessage.NonEmpty('p') {
protected def encodeContent(c: Charset) = password.encode(c)
}
case object PortalSuspended extends BackendMessage.Empty
case class Query(str: String) extends FrontendMessage.NonEmpty('Q') {
protected def encodeContent(c: Charset) =
ByteString.newBuilder.
putCString(str, c).
result
}
case class ReadyForQuery(status: TransactionStatus) extends BackendMessage
object ReadyForQuery extends Decoder {
import TransactionStatus._
protected def decodeContent(c: Charset, b: ByteIterator) =
ReadyForQuery(
(b.getByte: @switch) match {
case 'I' => Idle
case 'T' => Open
case 'E' => Failed
case s => throw UnsupportedTransactionStatus(s)
}
)
}
case class RowDescription(fields: RowDescription.Fields) extends BackendMessage
object RowDescription extends Decoder {
case class Field(name: String,
tableOid: OID,
column: Int,
dataType: DataType,
format: Format)
type Fields = immutable.IndexedSeq[Field]
case class DataType(oid: OID, size: Long, modifier: Int)
protected def decodeContent(c: Charset, b: ByteIterator) = {
RowDescription(
(0 until b.getShort).map { index =>
Field(
name = b.getCString(c),
tableOid = b.getInt,
column = b.getShort,
dataType = DataType(
oid = b.getInt,
size = b.getShort,
modifier = b.getInt
),
format = Format.decode(b.getShort) // FIXME When returned after describe, will always be zero for "unknown"
)
}
)
}
}
// Like StartupMessage, this has no type byte
case object SSLRequest
extends FrontendMessage.Fixed(ByteString.newBuilder.putInt(80877103).prependLength) {
sealed trait Reply
case object Accepted extends Reply
case object Rejected extends Reply
object Reply {
def decode(b: Byte): Reply = (b: @switch) match {
case 'S' => Accepted
case 'N' => Rejected
case _ =>
throw UnsupportedSSLReply(b)
}
}
}
// Can't extend FrontentMessage.NonEmpty since there is no message type byte
case class StartupMessage(user: String, parameters: immutable.Map[String, String]) extends FrontendMessage {
import StartupMessage._
def encode(c: Charset) =
parameters.+(User -> user). // Override any "user" parameter
foldLeft(ByteString.newBuilder.append(Version)) {
case (b, (k, v)) =>
b.putCString(k, c).
putCString(v, c)
}.
putNUL.
prependLength
}
object StartupMessage {
private val Version = ByteString.newBuilder.putInt(196608).result // 3.0
private val User = "user"
}
case object Sync extends FrontendMessage.Empty('S')
case object Terminate extends FrontendMessage.Empty('X')
// Note that the PostgreSQL documentation recommends only encoding parameters
// using the text format, since it is portable across versions.
sealed trait Parameter {
def format: Format
// Fully encode parameter including 4-byte length prefix
def encode(c: Charset): ByteString
}
// TODO Make inner class of Bind/FunctionCall companion objects?
object Parameter {
private case class Raw(format: Format, bytes: ByteString) extends Parameter {
def encode(c: Charset) = bytes
}
private case class Dynamic(format: Format)(fn: Charset => ByteString) extends Parameter {
def encode(c: Charset) = fn(c).prependLength
}
val NULL: Parameter = Raw(Format.Text, ByteString.newBuilder.putInt(-1).result)
def apply(f: Format)(fn: Charset => ByteString): Parameter = Dynamic(f)(fn)
def apply(fn: Charset => ByteString): Parameter = Dynamic(Format.Text)(fn)
def apply(bytes: ByteString, f: Format = Format.Text): Parameter =
Raw(f, bytes.prependLength)
}
sealed trait CommandTag
object CommandTag {
// FIXME Should the rows count be a long?
case class OIDWithRows(name: String, oid: OID, rows: Int) extends CommandTag
case class RowsAffected(name: String, rows: Int) extends CommandTag
case class NameOnly(name: String) extends CommandTag
}
sealed abstract class Closable(typ: Byte) {
def name: String
def encode(c: Charset): ByteString =
ByteString.newBuilder.
putByte(typ).
putCString(name, c).
result
}
sealed abstract class Portal extends Closable('P')
object Portal {
case class Named(name: String) extends Portal {
require(name.nonEmpty)
}
case object Unnamed extends Portal {
val name = ""
}
def apply(name: String): Portal =
if (name.isEmpty) Unnamed else Named(name)
def unapply(p: Portal): Option[String] = Some(p.name)
}
// TODO Duplicates format of Portal -- DRY using macro?
sealed abstract class PreparedStatement extends Closable('S')
object PreparedStatement {
case class Named(name: String) extends PreparedStatement {
require(name.nonEmpty)
}
case object Unnamed extends PreparedStatement {
val name = ""
}
def apply(name: String): PreparedStatement =
if (name.isEmpty) Unnamed else Named(name)
def unapply(p: PreparedStatement): Option[String] = Some(p.name)
}
sealed abstract class Format(typ: Byte) {
def toShort = typ.toShort
def toByte = typ
}
object Format {
case object Text extends Format(0)
case object Binary extends Format(1)
def decode(typ: Short) = (typ: @switch) match {
case 0 => Text
case 1 => Binary
case _ => throw UnsupportedFormatType(typ)
}
}
sealed trait FieldFormats {
def encoded: ByteString
def apply(index: Short): Format
}
object FieldFormats {
private[protocol] val Default = ByteString.newBuilder.putShort(0).result
case class Matched(format: Format, count: Short) extends FieldFormats {
def apply(index: Short) =
if (index < count) format
else throw new IndexOutOfBoundsException
def encoded = ByteString.newBuilder.putShort(1).putShort(format.toShort).result
}
case class Mixed(types: immutable.IndexedSeq[Format]) extends FieldFormats {
def apply(index: Short) = types(index)
def encoded = ByteString.newBuilder.putShort(types.size).putShorts(types.map(_.toShort).toArray).result
}
// TODO Try to detect if Matched can be used
def apply(types: Iterable[Format]): FieldFormats = Mixed(types.toVector)
}
sealed trait Password {
def encode(c: Charset): ByteString
}
object Password {
case class ClearText(value: String) extends Password {
def encode(c: Charset) =
ByteString.newBuilder.
putCString(value, c).
result
}
case class MD5(username: String, password: String, salt: ByteString) extends Password {
import MD5._
// Encoded bytes must be in lower case
def encode(c: Charset) = {
val md = MessageDigest.getInstance("MD5")
md.update(password.getBytes(c))
md.update(username.getBytes(c))
md.update(md.digest().asHex.toArray)
md.update(salt.toArray)
Prefix ++ md.digest().asHex ++ Suffix
}
}
object MD5 {
private val Prefix = ByteString("md5")
private val Suffix = ByteString(NUL)
}
}
// TODO Make this an inner class of ReadyForQuery object?
sealed trait TransactionStatus
object TransactionStatus {
case object Idle extends TransactionStatus
case object Open extends TransactionStatus
case object Failed extends TransactionStatus
}
sealed trait ResponseFields {
sealed trait Field
case class Severity(level: String) extends Field
case class SQLState(code: String) extends Field
case class Message(text: String) extends Field
case class Detail(text: String) extends Field
case class Hint(text: String) extends Field
case class Position(index: Int) extends Field
case class InternalPosition(index: Int) extends Field
case class InternalQuery(text: String) extends Field
case class Where(trace: immutable.IndexedSeq[String]) extends Field
case class Schema(name: String) extends Field
case class Table(name: String) extends Field
case class Column(name: String) extends Field
case class DataType(name: String) extends Field
case class Constraint(name: String) extends Field
case class File(path: String) extends Field
case class Line(index: Int) extends Field
case class Routine(name: String) extends Field
type Fields = immutable.Seq[Field]
protected def decodeAll(c: Charset, b: ByteIterator): Fields =
Iterator.continually(b.getByte).
takeWhile(_ != NUL).
foldLeft(Vector.empty[Field]) { (fields, typ) =>
val value = b.getCString(c)
(typ: @switch) match {
case 'S' => fields :+ Severity(value)
case 'C' => fields :+ SQLState(value)
case 'M' => fields :+ Message(value)
case 'D' => fields :+ Detail(value)
case 'H' => fields :+ Hint(value)
case 'P' => fields :+ Position(value.toInt)
case 'p' => fields :+ InternalPosition(value.toInt)
case 'q' => fields :+ InternalQuery(value)
case 'W' => fields :+ Where(value.split('\\n').toVector)
case 's' => fields :+ Schema(value)
case 't' => fields :+ Table(value)
case 'c' => fields :+ Column(value)
case 'd' => fields :+ DataType(value)
case 'n' => fields :+ Constraint(value)
case 'F' => fields :+ File(value)
case 'L' => fields :+ Line(value.toInt)
case 'R' => fields :+ Routine(value)
case _ => fields // Ignore, per documentation recommendation
}
}
}
sealed abstract class DecoderException(msg: String)
extends RuntimeException(msg) with NoStackTrace
@SerialVersionUID(1)
case class UnsupportedMessageType(typ: Byte)
extends DecoderException(s"Message type ${Integer.toHexString(typ)} is not supported")
@SerialVersionUID(1)
case class UnsupportedAuthenticationMethod(method: Int)
extends DecoderException(s"Authentication method $method is not supported")
@SerialVersionUID(1)
case class UnsupportedSSLReply(typ: Byte)
extends DecoderException(s"SSL reply ${Integer.toHexString(typ)} is not supported")
@SerialVersionUID(1)
case class UnsupportedFormatType(typ: Short)
extends DecoderException(s"Format type ${Integer.toHexString(typ)} is not supported")
@SerialVersionUID(1)
case class UnexpectedBinaryColumnFormat(columns: Iterable[Int])
extends DecoderException(s"Text COPY format does not allow binary column types in columns ${columns.mkString(", ")}")
@SerialVersionUID(1)
case class UnsupportedTransactionStatus(typ: Byte)
extends DecoderException(s"Transaction status ${Integer.toHexString(typ)} is not supported")
}
| rascql/rascql | src/main/scala/rascql/postgresql/protocol/package.scala | Scala | apache-2.0 | 26,892 |
/**
* Copyright (C) 2012 LShift Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.lshift.diffa.kernel.hooks
import org.hibernate.SessionFactory
import org.hibernate.cfg.Configuration
import org.hibernate.dialect.{Oracle10gDialect, Oracle8iDialect, Dialect}
import net.lshift.hibernate.migrations.dialects.{OracleDialectExtension, DialectExtensionSelector, DialectExtension}
/**
* Factory for constructing hooks based upon session configuration.
*/
class HookManager {
private var dialect:Dialect = null
private var dialectExtension: DialectExtension = null
def this(config:Configuration) = {
this()
applyConfiguration(config)
}
def applyConfiguration(config:Configuration) {
dialect = Dialect.getDialect(config.getProperties)
dialectExtension = DialectExtensionSelector.select(dialect)
}
def createDifferencePartitioningHook(sessionFactory:SessionFactory) = {
// Ideally, this would go in something like the DialectExtension;
// however, that would create a circular dependency between the two modules.
if (dialectExtension.isInstanceOf[OracleDialectExtension]) {
new OracleDifferencePartitioningHook(sessionFactory)
} else {
new EmptyDifferencePartitioningHook
}
}
}
class EmptyDifferencePartitioningHook extends DifferencePartitioningHook {
def pairCreated(domain: String, key: String) {}
def pairRemoved(domain: String, key: String) {}
def removeAllPairDifferences(domain: String, key: String) = false
def isDifferencePartitioningEnabled = false
} | aprescott/diffa | kernel/src/main/scala/net/lshift/diffa/kernel/hooks/HookManager.scala | Scala | apache-2.0 | 2,065 |
package org.clulab.twitter4food.struct
/**
* Stores information about an individual tweet
* User: mihais
* Date: 12/14/15
*/
class Tweet (val text: String,
val id: Long,
val lang: String,
val createdAt: java.util.Date,
val handle: String,
val urls: Seq[String] = Nil) { // TODO: Add image locations
override def toString = s"$handle: $text [$createdAt]"
/**
* Returns a copy of the tweet, with optionally altered arguments
*/
def copy(
text: String = this.text,
id: Long = this.id,
lang: String = this.lang,
createdAt: java.util.Date = this.createdAt,
handle: String = this.handle,
urls: Seq[String] = this.urls): Tweet = {
new Tweet(text, id, lang, createdAt, handle)
}
/**
* Returns a new [[Tweet]] with the images of both copies of the tweet. The argument tweet's other info is discarded.
*/
def merge(that: Tweet): Tweet = {
assert(this.id == that.id, "Merged tweets must have the same ID!")
val allUrls = (this.urls ++ that.urls).distinct
// val allImages = (this.images ++ that.images).distinct
// this.copy(urls = allUrls, images = allImages)
this.copy(urls = allUrls)
}
/**
* Returns true if the tweet is a retweet. Assumes pre-tokenized text
*/
def isRetweet: Boolean = this.text.startsWith("RT ")
/**
* Returns true if the tweet is addressed using (at least one) @mention. Assumes pre-tokenized text
*/
def isAddressed: Boolean = this.text.startsWith("<@MENTION>")
/**
* Returns true if the tweet is "normal", i.e. not a retweet or addressed to other accounts
*/
def isNormal: Boolean = !this.isAddressed && !this.isRetweet
}
| clulab/twitter4food | src/main/scala/org/clulab/twitter4food/struct/Tweet.scala | Scala | apache-2.0 | 1,730 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import scala.language.implicitConversions
import java.io._
import java.lang.reflect.Constructor
import java.net.URI
import java.util.{Arrays, Properties, UUID}
import java.util.concurrent.atomic.{AtomicReference, AtomicBoolean, AtomicInteger}
import java.util.UUID.randomUUID
import scala.collection.{Map, Set}
import scala.collection.JavaConversions._
import scala.collection.generic.Growable
import scala.collection.mutable.HashMap
import scala.reflect.{ClassTag, classTag}
import scala.util.control.NonFatal
import org.apache.commons.lang.SerializationUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable,
FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}
import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat,
TextInputFormat}
import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}
import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}
import org.apache.mesos.MesosNativeLibrary
import org.apache.spark.annotation.{DeveloperApi, Experimental}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}
import org.apache.spark.executor.{ExecutorEndpoint, TriggerThreadDump}
import org.apache.spark.input.{StreamInputFormat, PortableDataStream, WholeTextFileInputFormat,
FixedLengthBinaryInputFormat}
import org.apache.spark.io.CompressionCodec
import org.apache.spark.metrics.MetricsSystem
import org.apache.spark.partial.{ApproximateEvaluator, PartialResult}
import org.apache.spark.rdd._
import org.apache.spark.rpc.{RpcAddress, RpcEndpointRef}
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.{CoarseGrainedSchedulerBackend,
SparkDeploySchedulerBackend, SimrSchedulerBackend}
import org.apache.spark.scheduler.cluster.mesos.{CoarseMesosSchedulerBackend, MesosSchedulerBackend}
import org.apache.spark.scheduler.local.LocalBackend
import org.apache.spark.storage._
import org.apache.spark.ui.{SparkUI, ConsoleProgressBar}
import org.apache.spark.ui.jobs.JobProgressListener
import org.apache.spark.util._
/**
* Main entry point for Spark functionality. A SparkContext represents the connection to a Spark
* Spark功能的主要入口点,一个SparkContext代表连接Spark集群
* cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster.
* 可以用来创建RDDS,在集群可以创建累加器和广播变量
* Only one SparkContext may be active per JVM. You must `stop()` the active SparkContext before
* 每个JVM只有一个sparkcontext可能激活,创建一个新的SparkContext之前必须'stop()'.这种限制最终可能会被删除
* creating a new one. This limitation may eventually be removed; see SPARK-2243 for more details.
*
* SparkContext的初始化步骤如下:
* 1)创建Spark执行环境SparkEnv
* 2)创建RDD清理器metadataCleaner
* 3)创建并初始化SparkUI
* 4)Hadoop相关配置及Executor环境变量的设置
* 5)创建任务调度TaskScheduler
* 6)创建和启动DAGScheduler
* 7)TashScheduler的启动
* 8)初始化块管理器BlockManager(BlockManager是存储体系的主要组件之一)
* 9)启动测量系统MetricsSystem
* 10)创建和启动Executor分配 管理器ExecutorAllocationManager
* 11)ContextCleaner的创建与启动
* 12)Spark环境更新
* 13)创建DAGSchedulerSource和BlockManagerSource
* 14)将SparkContext标记为激活
* @param config a Spark Config object describing the application configuration. Any settings in
* this config overrides the default configs as well as system properties.
*
*/
class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationClient {
// The call site where this SparkContext was constructed.
//存储了线程栈中最靠近栈顶的用户类及最近栈底的scala或者Spark核心信息
private val creationSite: CallSite = Utils.getCallSite()
// If true, log warnings instead of throwing exceptions when multiple SparkContexts are active
// 如果为true,多个激活sparkcontexts日志警告,而不是抛出异常
//SparkContext默认只有一个实例
private val allowMultipleContexts: Boolean =
config.getBoolean("spark.driver.allowMultipleContexts", false)
// In order to prevent multiple SparkContexts from being active at the same time, mark this
// context as having started construction.
//为了防止同时激活多个sparkcontexts,标记当前上下文正在构建中
// NOTE: this must be placed at the beginning of the SparkContext constructor.
//注意:这必须放在sparkcontext构造函数的开始
//用来确保实例的唯一性,并将当前Spark标记为正在构建中
SparkContext.markPartiallyConstructed(this, allowMultipleContexts)
// This is used only by YARN for now, but should be relevant to other cluster types (Mesos,
// etc) too. This is typically generated from InputFormatInfo.computePreferredLocations.
//现在只用于的YARN,但也可以是其他相关的类型集群,这通常来自computePreferredLocations(计算的首选地点)
// It contains a map from hostname to a list of input format splits on the host.
//Map的Key主机的输入格式,value
//它包含主机名的列表
private[spark] var preferredNodeLocationData: Map[String, Set[SplitInfo]] = Map()
//系统开始运行时间
val startTime = System.currentTimeMillis()
//暂停标示
private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false)
//如果已经停止抛出异常
private def assertNotStopped(): Unit = {
if (stopped.get()) {
throw new IllegalStateException("Cannot call methods on a stopped SparkContext")
}
}
/**
* Create a SparkContext that loads settings from system properties (for instance, when
* launching with ./bin/spark-submit).
* 创建一个sparkcontext加载设置系统属性(例如,当启动./bin/spark-submit)
*/
def this() = this(new SparkConf())
/**
* :: DeveloperApi ::
* Alternative constructor for setting preferred locations where Spark will create executors.
* 用于设置Spark将创建执行程序的首选位置的替代构造函数
* @param config a [[org.apache.spark.SparkConf]] object specifying other Spark parameters
* 指定其他Spark参数的对象
* @param preferredNodeLocationData used in YARN mode to select nodes to launch containers on.
* 用于在YARN模式选择节点来启动容器
* Can be generated using [[org.apache.spark.scheduler.InputFormatInfo.computePreferredLocations]]
* from a list of input files or InputFormats for the application.
* 可以从应用程序的输入文件或InputFormat列表中使用
* [[org.apache.spark.scheduler.InputFormatInfo.computePreferredLocations]]生成
*/
@deprecated("Passing in preferred locations has no effect at all, see SPARK-8949", "1.5.0")
@DeveloperApi
def this(config: SparkConf, preferredNodeLocationData: Map[String, Set[SplitInfo]]) = {
this(config)
logWarning("Passing in preferred locations has no effect at all, see SPARK-8949")
this.preferredNodeLocationData = preferredNodeLocationData
}
/**
* Alternative constructor that allows setting common Spark properties directly
* 允许直接设置公共Spark属性的替代构造函数
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI
* 你的应用程序的名称,在群集Web用户界面上显示
* @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters
* 指定其他Spark参数的对象
*/
def this(master: String, appName: String, conf: SparkConf) =
this(SparkContext.updatedConf(conf, master, appName))
/**
* Alternative constructor that allows setting common Spark properties directly
* 允许直接设置公共Spark属性的替代构造函数
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
* 你的应用程序的名称,在群集Web用户界面上显示
* @param sparkHome Location where Spark is installed on cluster nodes.
* Spark安装在群集节点上的位置
* @param jars Collection of JARs to send to the cluster. These can be paths on the local file
* system or HDFS, HTTP, HTTPS, or FTP URLs.
* jar的集合发送到集群,这些可以是本地文件系统或HDFS,HTTP,HTTPS或FTP URL的路径
* @param environment Environment variables to set on worker nodes.
* 在工作节点上设置的环境变量
* @param preferredNodeLocationData used in YARN mode to select nodes to launch containers on.
* 用于在YARN模式选择节点来启动容器
* Can be generated using [[org.apache.spark.scheduler.InputFormatInfo.computePreferredLocations]]
* from a list of input files or InputFormats for the application.
*
* 可以从应用程序的输入文件或InputFormat列表中使用[[org.apache.spark.scheduler.InputFormatInfo.computePreferredLocations]]生成
*/
def this(
master: String,
appName: String,
sparkHome: String = null,
jars: Seq[String] = Nil,//列表结尾为Nil
environment: Map[String, String] = Map(),
preferredNodeLocationData: Map[String, Set[SplitInfo]] = Map()) =
{
this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment))
//nonEmpty 非空
if (preferredNodeLocationData.nonEmpty) {
//在优先位置传递完全没有影响
logWarning("Passing in preferred locations has no effect at all, see SPARK-8949")
}
this.preferredNodeLocationData = preferredNodeLocationData
}
// NOTE: The below constructors could be consolidated using default arguments. Due to
// 注意:下面的构造函数可以使用默认参数合并,但是,这会导致编译步骤在生成文档时失败,直到我们有一个很好的解决方法为该bug,构造函数仍然被破坏。
// Scala bug SI-8479, however, this causes the compile step to fail when generating docs.
// Until we have a good workaround for that bug the constructors remain broken out.
/**
* Alternative constructor that allows setting common Spark properties directly
* 允许直接设置公共Spark属性的替代构造函数
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
* 应用程序的名称,以在集群Web UI上显示
*/
private[spark] def this(master: String, appName: String) =
this(master, appName, null, Nil, Map(), Map())//列表结尾为Nil
/**
* Alternative constructor that allows setting common Spark properties directly
* 允许直接设置公共Spark属性的替代构造函数
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
* @param sparkHome Location where Spark is installed on cluster nodes.Spark安装在群集节点上的位置
*/
private[spark] def this(master: String, appName: String, sparkHome: String) =
this(master, appName, sparkHome, Nil, Map(), Map())//列表结尾为Nil
/**
* Alternative constructor that allows setting common Spark properties directly
* 允许直接设置公共Spark属性的替代构造函数
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
* @param sparkHome Location where Spark is installed on cluster nodes.Spark安装在群集节点上的位置
* @param jars Collection of JARs to send to the cluster. These can be paths on the local file
* system or HDFS, HTTP, HTTPS, or FTP URLs.
* jar的集合发送到集群,这些可以是本地文件系统或HDFS,HTTP,HTTPS或FTP URL的路径
*/
private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) =
this(master, appName, sparkHome, jars, Map(), Map())
// log out Spark Version in Spark driver log
// 在Spark驱动程序日志中记录Spark版本
logInfo(s"Running Spark version $SPARK_VERSION")
/* ------------------------------------------------------------------------------------- *
| Private variables. These variables keep the internal state of the context, and are |
| 私有变量.这些变量保持上下文的内部状态,并不可访问的外部
| not accessible by the outside world. They're mutable since we want to initialize all |
| of them to some neutral value ahead of time, so that calling "stop()" while the |
| 他们可变的的需要提前初始化中性值,所以调用“stop()”当构造函数运行是安全的。
| constructor is still running is safe. |
* ------------------------------------------------------------------------------------- */
private var _conf: SparkConf = _
private var _eventLogDir: Option[URI] = None
private var _eventLogCodec: Option[String] = None
private var _env: SparkEnv = _
private var _metadataCleaner: MetadataCleaner = _
private var _jobProgressListener: JobProgressListener = _
private var _statusTracker: SparkStatusTracker = _
private var _progressBar: Option[ConsoleProgressBar] = None
private var _ui: Option[SparkUI] = None
private var _hadoopConfiguration: Configuration = _
private var _executorMemory: Int = _
private var _schedulerBackend: SchedulerBackend = _
private var _taskScheduler: TaskScheduler = _
private var _heartbeatReceiver: RpcEndpointRef = _
@volatile private var _dagScheduler: DAGScheduler = _
private var _applicationId: String = _
private var _applicationAttemptId: Option[String] = None
private var _eventLogger: Option[EventLoggingListener] = None
private var _executorAllocationManager: Option[ExecutorAllocationManager] = None
private var _cleaner: Option[ContextCleaner] = None
private var _listenerBusStarted: Boolean = false
private var _jars: Seq[String] = _
private var _files: Seq[String] = _
private var _shutdownHookRef: AnyRef = _
/* ------------------------------------------------------------------------------------- *
| Accessors and public fields. These provide access to the internal state of the |
| context. |
| 访问公有的字段,这些提供访问上下文的内部状态
* ------------------------------------------------------------------------------------- */
private[spark] def conf: SparkConf = _conf
/**
* Return a copy of this SparkContext's configuration. The configuration ''cannot'' be
* changed at runtime.
* 对SparkConf的配置文件进行复制,这个配置文件属性运行时不可改变.
*/
def getConf: SparkConf = conf.clone()
def jars: Seq[String] = _jars
def files: Seq[String] = _files
//要连接的Spark集群Master的URL
def master: String = _conf.get("spark.master")
//应用程序名称
def appName: String = _conf.get("spark.app.name")
//是否记录Spark事件
private[spark] def isEventLogEnabled: Boolean = _conf.getBoolean("spark.eventLog.enabled", false)
private[spark] def eventLogDir: Option[URI] = _eventLogDir
private[spark] def eventLogCodec: Option[String] = _eventLogCodec
// Generate the random name for a temp folder in external block store.
// Add a timestamp as the suffix here to make it more safe
//为外部块存储中的临时文件夹生成随机名称,添加时间戳作为后缀,使其更安全
val externalBlockStoreFolderName = "spark-" + randomUUID.toString()
@deprecated("Use externalBlockStoreFolderName instead.", "1.4.0")
val tachyonFolderName = externalBlockStoreFolderName
//是否单机模式
def isLocal: Boolean = (master == "local" || master.startsWith("local["))
// An asynchronous listener bus for Spark events,Spark事件异步监听总线
//listenerBus采用异步监听器模式维护各类事件的处理
//LiveListenerBus实现了监听器模型,通过监听事件触发对各种监听状态信息的修改,达到UI界面的数据刷新效果
private[spark] val listenerBus = new LiveListenerBus
// This function allows components created by SparkEnv to be mocked in unit tests:
//这个功能允许创建的sparkenv,在单元测试中被模拟
//SparkEnv是一个很重要的变量,其内包括了很多Spark执行时的重要组件(变量),包括 MapOutputTracker、ShuffleFetcher、BlockManager等,
// 这里是通过SparkEnv类的伴生对象SparkEnv Object内的Create方法实现的
private[spark] def createSparkEnv(
conf: SparkConf,
isLocal: Boolean,
listenerBus: LiveListenerBus): SparkEnv = {
SparkEnv.createDriverEnv(conf, isLocal, listenerBus, SparkContext.numDriverCores(master))
}
private[spark] def env: SparkEnv = _env
// Used to store a URL for each static file/jar together with the file's local timestamp
//用于将每个静态文件/ jar的URL与文件的本地时间戳一起存储
private[spark] val addedFiles = HashMap[String, Long]()
private[spark] val addedJars = HashMap[String, Long]()
// Keeps track of all persisted RDDs
//保持对所有持久化的RDD跟踪,使用TimeStampedWeakValueHashMap的persistentRdds缓存
private[spark] val persistentRdds = new TimeStampedWeakValueHashMap[Int, RDD[_]]
//清除过期的持久化RDD
private[spark] def metadataCleaner: MetadataCleaner = _metadataCleaner
//任务进度
private[spark] def jobProgressListener: JobProgressListener = _jobProgressListener
//Spark状态跟踪
def statusTracker: SparkStatusTracker = _statusTracker
//进度条
private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar
private[spark] def ui: Option[SparkUI] = _ui
/**
* A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse.
* 我们重用的Hadoop代码(例如文件系统)的默认Hadoop配置
* '''Note:''' As it will be reused in all Hadoop RDDs, it's better not to modify it unless you
* plan to set some global configurations for all Hadoop RDDs.
* 由于它将在所有Hadoop RDD中重复使用,最好不要修改它,除非您计划为所有Hadoop RDD设置一些全局配置
*/
def hadoopConfiguration: Configuration = _hadoopConfiguration
private[spark] def executorMemory: Int = _executorMemory
// Environment variables to pass to our executors.
//环境变量传递给executors
private[spark] val executorEnvs = HashMap[String, String]()
// Set SPARK_USER for user who is running SparkContext.
// 返回当前用户名,这是当前登录的用户
val sparkUser = Utils.getCurrentUserName()
private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend
private[spark] def schedulerBackend_=(sb: SchedulerBackend): Unit = {
_schedulerBackend = sb
}
private[spark] def taskScheduler: TaskScheduler = _taskScheduler
private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = {
_taskScheduler = ts
}
private[spark] def dagScheduler: DAGScheduler = _dagScheduler
private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = {
_dagScheduler = ds
}
/**
* A unique identifier for the Spark application.
* Its format depends on the scheduler implementation.
* Spark应用程序唯一标示,它的格式取决于调度程序的实现
* (i.e.
* in case of local spark app something like 'local-1433865536131'
* 在Spark应用的本地模式情况下,类似的东西'local-1433865536131'
* in case of YARN something like 'application_1433865536131_34483'
* 在Spark应用的YARN模式情况下,类似的东西'application_1433865536131_34483'
* )
*/
def applicationId: String = _applicationId
def applicationAttemptId: Option[String] = _applicationAttemptId
def metricsSystem: MetricsSystem = if (_env != null) _env.metricsSystem else null
private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger
private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] =
_executorAllocationManager
private[spark] def cleaner: Option[ContextCleaner] = _cleaner
private[spark] var checkpointDir: Option[String] = None
// Thread Local variable that can be used by users to pass information down the stack
// 线程本地变量,这可以通过用户传递信息到堆栈
protected[spark] val localProperties = new InheritableThreadLocal[Properties] {
override protected def childValue(parent: Properties): Properties = {
// Note: make a clone such that changes in the parent properties aren't reflected in
// the those of the children threads, which has confusing semantics (SPARK-10563).
//注意:使一个克隆,使得父属性中的更改不会反映在具有混淆语义(SPARK-10563)的子线程中
if (conf.get("spark.localProperties.clone", "false").toBoolean) {
SerializationUtils.clone(parent).asInstanceOf[Properties]
} else {
new Properties(parent)
}
}
override protected def initialValue(): Properties = new Properties()
}
/* ------------------------------------------------------------------------------------- *
| Initialization. This code initializes the context in a manner that is exception-safe. |
| All internal fields holding state are initialized here, and any error prompts the |
| 初始化,此代码初始化上下文中的方式是异常安全,所有保持状态的内部字段都在这里初始化, |
| stop() method to be called.任务错误提示将被调用stop方法 |
* ------------------------------------------------------------------------------------- */
private def warnSparkMem(value: String): String = {
logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " +
"deprecated, please use spark.executor.memory instead.")
value
}
/**
* Control our logLevel. This overrides any user-defined log settings.
* 控制日志级别,这将覆盖任何自定义日志设置
* @param logLevel The desired log level as a string. 所需的日志级别作为一个字符串
* Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
* 有效日志级别包括:ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
*/
def setLogLevel(logLevel: String) {
val validLevels = Seq("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN")
if (!validLevels.contains(logLevel)) {
throw new IllegalArgumentException(
s"Supplied level $logLevel did not match one of: ${validLevels.mkString(",")}")
}
Utils.setLogLevel(org.apache.log4j.Level.toLevel(logLevel))
}
//初始化代码块
try {
//对SparkCon进行复制
_conf = config.clone()
//对Spark各种信息进行校验
_conf.validateSettings()
//必须指定spark.master,spark.app.name属性,否是会抛出异常,结束初始化过程
if (!_conf.contains("spark.master")) {
throw new SparkException("A master URL must be set in your configuration")
}
if (!_conf.contains("spark.app.name")) {
throw new SparkException("An application name must be set in your configuration")
}
// System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster
// yarn-standalone is deprecated, but still supported
// 系统属性spark.yarn.app.id必须设置,如果用户代码跑的是YARN独立集群,已经过时,但一直支持
if ((master == "yarn-cluster" || master == "yarn-standalone") &&
!_conf.contains("spark.yarn.app.id")) {
throw new SparkException("Detected yarn-cluster mode, but isn't running on a cluster. " +
"Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.")
}
//SparkContext 启动时是否记录有效 SparkConf信息
if (_conf.getBoolean("spark.logConf", false)) {
logInfo("Spark configuration:\n" + _conf.toDebugString)
}
// Set Spark driver host and port system properties
//设置Sparkr的driver主机名或 IP地址和端口属性
_conf.setIfMissing("spark.driver.host", Utils.localHostName())
//0随机 driver侦听的端口
_conf.setIfMissing("spark.driver.port", "0")
//设置executor.id为driver
_conf.set("spark.executor.id", SparkContext.DRIVER_IDENTIFIER)
_jars = _conf.getOption("spark.jars").map(_.split(",")).map(_.filter(_.size != 0)).toSeq.flatten//转换
_files = _conf.getOption("spark.files").map(_.split(",")).map(_.filter(_.size != 0)).toSeq.flatten
//保存日志相关信息的路径,可以是hdfs://开头的HDFS路径,也可以是file://开头的本地路径,都需要提前创建
_eventLogDir =
if (isEventLogEnabled) {
val unresolvedDir = conf.get("spark.eventLog.dir", EventLoggingListener.DEFAULT_LOG_DIR)
//stripSuffix去掉<string>字串中结尾的字符
.stripSuffix("/")
Some(Utils.resolveURI(unresolvedDir))
} else {
None
}
_eventLogCodec = {
//是否压缩记录Spark事件,前提spark.eventLog.enabled为true
val compress = _conf.getBoolean("spark.eventLog.compress", false)
if (compress && isEventLogEnabled) {
Some(CompressionCodec.getCodecName(_conf)).map(CompressionCodec.getShortName)
} else {
None
}
}
_conf.set("spark.externalBlockStore.folderName", externalBlockStoreFolderName)
if (master == "yarn-client") System.setProperty("SPARK_YARN_MODE", "true")
// "_jobProgressListener" should be set up before creating SparkEnv because when creating
//"_jobProgressListener"应用在创建SparkEnv之前,需要创建JobProgressListener
// "SparkEnv", some messages will be posted to "listenerBus" and we should not miss them.
//"SparkEnv",一些消息将被提交到"listenerBus"我们不应该错过他们
//构造JobProgressListener,作用是通过HashMap,ListBuffer等数据结构存储JobId及对应JobUIData信息,并按照激活
//完成,失败等job状态统计,对于StageId,StageInfo等信息按照激活,完成,忽略,失败等Stage状态统计,并且存储StageID
//与JobId的一对多关系.
_jobProgressListener = new JobProgressListener(_conf)//通过监听listenerBus中的事件更新任务进度
//添加事件
listenerBus.addListener(jobProgressListener)
// Create the Spark execution environment (cache, map output tracker, etc)
//创建Spark执行环境(缓存, 任务输出跟踪,等等)
_env = createSparkEnv(_conf, isLocal, listenerBus)
SparkEnv.set(_env)
//清除过期的持久化RDD,构造MetadataCleaner时的参数是cleanup,用于清理persistentRdds
_metadataCleaner = new MetadataCleaner(MetadataCleanerType.SPARK_CONTEXT, this.cleanup, _conf)
_statusTracker = new SparkStatusTracker(this)
_progressBar =
if (_conf.getBoolean("spark.ui.showConsoleProgress", true) && !log.isInfoEnabled) {
Some(new ConsoleProgressBar(this))
} else {
None
}
_ui =
if (conf.getBoolean("spark.ui.enabled", true)) {
Some(SparkUI.createLiveUI(this, _conf, listenerBus, _jobProgressListener,
_env.securityManager, appName, startTime = startTime))
} else {
// For tests, do not enable the UI对于测试,不要启用用户界面
None
}
// Bind the UI before starting the task scheduler to communicate
//在启动任务计划程序之前绑定该用户界面
// the bound port to the cluster manager properly
//绑定端口到群集管理器
_ui.foreach(_.bind())
//默认情况下:Spark使用HDFS作为分布式文件系统,所以需要获取Hadoop相关配置信息
_hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf)
// Add each JAR given through the constructor
//通过构造函数添加每个jar
if (jars != null) {
jars.foreach(addJar)
}
if (files != null) {
files.foreach(addFile)
}
//spark.executor.memory指定分配给每个executor进程总内存,也可以配置系统变量SPARK_EXECUTOR_MEMORY对其大小进行设置
//Master给Worker发送高度后,worker最终使用executorEnvs提供的信息启动Executor
_executorMemory = _conf.getOption("spark.executor.memory")//分配给每个executor进程总内存
.orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY")))
.orElse(Option(System.getenv("SPARK_MEM"))
.map(warnSparkMem))//获得Spark
.map(Utils.memoryStringToMb)
.getOrElse(1024)//默认值 1024
// Convert java options to env vars as a work around
//转换java选项到evn变量作为一个工作节点
// since we can't set env vars directly in sbt.
//既然我们不能设置环境变量直接在SBT
//Executor环境变量executorEnvs
for { (envKey, propKey) <- Seq(("SPARK_TESTING", "spark.testing"))
value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} {
executorEnvs(envKey) = value
}
Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v =>
executorEnvs("SPARK_PREPEND_CLASSES") = v
}
// The Mesos scheduler backend relies on this environment variable to set executor memory.
//使用Mesos调度器的后端,此环境变量设置执行器的内存
// TODO: Set this only in the Mesos scheduler.
executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m"
executorEnvs ++= _conf.getExecutorEnv
executorEnvs("SPARK_USER") = sparkUser //设置Spark_user 用户名
// We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will
//在"createTaskScheduler"之前,我们需要注册"HeartbeatReceiver", 因为执行器将接收“heartbeatreceiver”的构造函数
// retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640)
//注册一个HeartbeatReceiver消息
_heartbeatReceiver = env.rpcEnv.setupEndpoint(
HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this))
// Create and start the scheduler
//创建任务调度器:负责任务的提交,并且请求集群管理器对任务调度,TaskSchedule可以看做任务调度的客户端
//createTaskScheduler方法会根据master的配置匹配部署模式,创建TaskSchedulerImpl,并生成不同的SchedulerBanckend
val (sched, ts) = SparkContext.createTaskScheduler(this, master)
_schedulerBackend = sched
_taskScheduler = ts
//DAGScheduler主要用于在任务正式交给TaskSchedulerImpl提交之前做一些准备工作
//包括创建Job,将DAG中的RDD划分到不同的Stage,提交Stage等
_dagScheduler = new DAGScheduler(this)
// 主要目的 scheduler = sc.taskScheduler
//HeartbeatReceiver接收TaskSchedulerIsSet消息,设置scheduler = sc.taskScheduler
//启动Start方法,向自己发送ExpireDeadHosts,检测Executor心跳
_heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet)
// start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's
// constructor
_taskScheduler.start() //启动任务调度器
_applicationId = _taskScheduler.applicationId()//获得应用程序ID
_applicationAttemptId = taskScheduler.applicationAttemptId()
_conf.set("spark.app.id", _applicationId)
_env.blockManager.initialize(_applicationId)
// The metrics system for Driver need to be set spark.app.id to app ID.
// 驱动器测量系统需要设置spark.app.id
// So it should start after we get app ID from the task scheduler and set spark.app.id.
metricsSystem.start()//启动测量系统
// Attach the driver metrics servlet handler to the web ui after the metrics system is started.
//Web UI的度量系统启动后,驱动测量servlet处理
metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler)))
_eventLogger =
if (isEventLogEnabled) {
val logger =
new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get,
_conf, _hadoopConfiguration)
logger.start()
listenerBus.addListener(logger)
Some(logger)
} else {
None
}
// Optionally scale number of executors dynamically based on workload. Exposed for testing.
//动态分配最小Executor数量,动态分配最大Executor数量,每个Executor可以运行的Task的数量等配置信息
val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf)
if (!dynamicAllocationEnabled && _conf.getBoolean("spark.dynamicAllocation.enabled", false)) {
logInfo("Dynamic Allocation and num executors both set, thus dynamic allocation disabled.")
}
//对已分配的Executor进行管理,创建和启动ExecutorAllocationManager
_executorAllocationManager =
if (dynamicAllocationEnabled) {
//动态分配最小Executor数量,动态分配最大Executor数量,每个Executor可以运行的Task的数量等配置信息
Some(new ExecutorAllocationManager(this, listenerBus, _conf))
} else {
None
}
//启动动态分配
_executorAllocationManager.foreach(_.start())
//ContextCleaner用于清理超出应用范围的RDD、ShuffleDependency和Broadcast对象
//默认为true
_cleaner =
if (_conf.getBoolean("spark.cleaner.referenceTracking", true)) {
Some(new ContextCleaner(this))
} else {
None
}
_cleaner.foreach(_.start())
setupAndStartListenerBus()
//更新Spark环境
postEnvironmentUpdate()
postApplicationStart()
// Post init
_taskScheduler.postStartHook()
//注册测量信息
_env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager))
_executorAllocationManager.foreach { e =>
_env.metricsSystem.registerSource(e.executorAllocationManagerSource)
}
// Make sure the context is stopped if the user forgets about it. This avoids leaving
//确保上下文被停止,如果用户忘记它,这样可以避免留下未完成的事件日志在JVM退出后清理
// unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM
// is killed, though.
// 如果JVM被杀死,它不帮助
_shutdownHookRef = ShutdownHookManager.addShutdownHook(
ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () =>
logInfo("Invoking stop() from shutdown hook")
stop()
}
} catch {
case NonFatal(e) =>
logError("Error initializing SparkContext.", e)
try {
stop()
} catch {
case NonFatal(inner) =>
logError("Error stopping SparkContext after init error.", inner)
} finally {
throw e
}
}
/**
* Called by the web UI to obtain executor thread dumps. This method may be expensive.
* 由Web用户界面调用,获取执行线程转储,这种方法可能是耗时的
* Logs an error and returns None if we failed to obtain a thread dump, which could occur due
* 记录一个错误,并返回没有,如果我们没有获得一个线程转储,
* to an executor being dead or unresponsive or due to network issues while sending the thread
* 这可能会发生一个执行器已经死了或没有响应或由于网络问题,同时发送线程转储消息返回到驱动程序
* dump message back to the driver.
*/
private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = {
try {
if (executorId == SparkContext.DRIVER_IDENTIFIER) {
Some(Utils.getThreadDump())
} else {
val (host, port) = env.blockManager.master.getRpcHostPortForExecutor(executorId).get
val endpointRef = env.rpcEnv.setupEndpointRef(
SparkEnv.executorActorSystemName,
RpcAddress(host, port),
ExecutorEndpoint.EXECUTOR_ENDPOINT_NAME)
Some(endpointRef.askWithRetry[Array[ThreadStackTrace]](TriggerThreadDump))
}
} catch {
case e: Exception =>
logError(s"Exception getting thread dump from executor $executorId", e)
None
}
}
private[spark] def getLocalProperties: Properties = localProperties.get()
private[spark] def setLocalProperties(props: Properties) {
localProperties.set(props)
}
@deprecated("Properties no longer need to be explicitly initialized.", "1.0.0")
def initLocalProperties() {
localProperties.set(new Properties())
}
/**
* Set a local property that affects jobs submitted from this thread, such as the
* Spark fair scheduler pool.
* 设置一个本地属性影响提交作业Job的线程,这是Spark公平调度池
*/
def setLocalProperty(key: String, value: String) {
if (value == null) {
localProperties.get.remove(key)
} else {
localProperties.get.setProperty(key, value)
}
}
/**
* Get a local property set in this thread, or null if it is missing. See
* [[org.apache.spark.SparkContext.setLocalProperty]].
* 获取此线程中的本地属性,如果null,则缺失
*/
def getLocalProperty(key: String): String =
//如果非空,则返回该选项的值,如果为空则返回“null”。
Option(localProperties.get).map(_.getProperty(key)).orNull
/**
* Set a human readable description of the current job.
* 设置当前作业可读的描述
* */
def setJobDescription(value: String) {
setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value)
}
/**
* Assigns a group ID to all the jobs started by this thread until the group ID is set to a
* different value or cleared.
* 将一组标识分配给该线程所启动的所有作业,直到将该组标识设置为不同的值或清除
*
* Often, a unit of execution in an application consists of multiple Spark actions or jobs.
* 通常,在应用程序中的一个执行器单元由多个Spark动作或作业组成
* Application programmers can use this method to group all those jobs together and give a
* group description. Once set, the Spark web UI will associate such jobs with this group.
* 应用程序程序可以使用这种方法将所有工作组在一起并给出一组描述,一旦设置,Spark用户界面将把这类工作与这组
*
* The application can also use [[org.apache.spark.SparkContext.cancelJobGroup]] to cancel all
* running jobs in this group. For example,取消本组所有的作业,例子
* {{{
* // In the main thread: 在主线程中:
* sc.setJobGroup("some_job_to_cancel", "some job description")
* sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count()
*
* // In a separate thread: 在一个单独的线程中
* sc.cancelJobGroup("some_job_to_cancel")
* }}}
*
* If interruptOnCancel is set to true for the job group, then job cancellation will result
* 如果interruptoncancel设置为true的工作组,在一个线程Thread.interrupt()取消作业的结果
* in Thread.interrupt() being called on the job's executor threads. This is useful to help ensure
* 被调用该作业的执行线程,这是有用的,确保任务实际上是及时停止的
* that the tasks are actually stopped in a timely manner, but is off by default due to HDFS-1208,
* where HDFS may respond to Thread.interrupt() by marking nodes as dead.
*/
def setJobGroup(groupId: String, description: String, interruptOnCancel: Boolean = false) {
setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description)
setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId)
// Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids
// changing several public APIs and allows Spark cancellations outside of the cancelJobGroup
// APIs to also take advantage of this property (e.g., internal job failures or canceling from
// JobProgressTab UI) on a per-job basis.
setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString)
}
/**
* Clear the current thread's job group ID and its description.
* 清除当前线程的Job组标识的描述
* */
def clearJobGroup() {
setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null)
setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null)
setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null)
}
/**
* Execute a block of code in a scope such that all new RDDs created in this body will
* be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}.
* 在范围内执行一个代码块,创建所有新的RDDS将相同的范围
*
* Note: Return statements are NOT allowed in the given body.
* 注:在给定的代码体的所有内容中不允许返回语句
*/
private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body)
// Methods for creating RDDs
/**
* Distribute a local Scala collection to form an RDD.
* 分配一个本地Seq的集合创建一个RDD
* @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call
* 延迟并发执行,如果'SEQ'是一个可变的集合和改变后调用并行化,
* to parallelize and before the first action on the RDD, the resultant RDD will reflect the
* 在RDD第一个执行之前,所得的RDD将反回修改后集合,通过一个参数的副本,以避免此
* modified collection. Pass a copy of the argument to avoid this.
* @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an
* RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions.
*
* 避免使用`parallelize(Seq())`来创建一个空的`RDD`,
* 考虑一下`emptyRDD`RDD没有分区,或者`parallelize(Seq [T]())`用于具有空分区的“T”的RDD。
*
* ClassTag 用于编译器在运行时也能获取泛型类型的信息,在JVM上,泛型参数类型T在运行时是被“擦拭”掉的,编译器把T当作Object来对待,
* 所以T的具体信息是无法得到的,为了使得在运行时得到T的信息,
*/
def parallelize[T: ClassTag](
seq: Seq[T],
numSlices: Int = defaultParallelism): RDD[T] = withScope {
assertNotStopped()//判断是否暂停
//并发集体RDD
new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]())
}
/**
* Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by
* `step` every element.
* 创建一个新RDD,包含元素开始和结束,每个元素的增量
* @note if we need to cache this RDD, we should make sure each partition does not exceed limit.
* 如果我们需要缓存该RDD,我们应该确保每个分区不超过限制
*
* @param start the start value.开始值
* @param end the end value. 结束值
* @param step the incremental step 每个元素的增量步骤
* @param numSlices the partition number of the new RDD.新RDD的分区数
* @return
*/
def range(
start: Long,
end: Long,
step: Long = 1,
numSlices: Int = defaultParallelism): RDD[Long] = withScope {
assertNotStopped()
// when step is 0, range will run infinitely
//当步骤为0时,范围将无限运行
require(step != 0, "step cannot be 0")
val numElements: BigInt = {
val safeStart = BigInt(start)
val safeEnd = BigInt(end)
if ((safeEnd - safeStart) % step == 0 || safeEnd > safeStart ^ step > 0) {
(safeEnd - safeStart) / step
} else {
// the remainder has the same sign with range, could add 1 more
//其余的有相同的符号与范围,可以添加1个以上
(safeEnd - safeStart) / step + 1
}
}
parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex((i, _) => {
val partitionStart = (i * numElements) / numSlices * step + start
val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start
def getSafeMargin(bi: BigInt): Long =
if (bi.isValidLong) {
bi.toLong
} else if (bi > 0) {
Long.MaxValue
} else {
Long.MinValue
}
val safePartitionStart = getSafeMargin(partitionStart)
val safePartitionEnd = getSafeMargin(partitionEnd)
new Iterator[Long] {
private[this] var number: Long = safePartitionStart
private[this] var overflow: Boolean = false
override def hasNext =
if (!overflow) {
if (step > 0) {
number < safePartitionEnd
} else {
number > safePartitionEnd
}
} else false
override def next() = {
val ret = number
number += step
if (number < ret ^ step < 0) {
// we have Long.MaxValue + Long.MaxValue < Long.MaxValue
// and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step
// back, we are pretty sure that we have an overflow.
overflow = true
}
ret
}
}
})
}
/** Distribute a local Scala collection to form an RDD.
* 分配一个本地Seq的集合创建一个RDD
* This method is identical to `parallelize`.
* 这种方法是`并行`相同
*/
def makeRDD[T: ClassTag](
seq: Seq[T],
numSlices: Int = defaultParallelism): RDD[T] = withScope {
parallelize(seq, numSlices)
}
/**
* Distribute a local Scala collection to form an RDD, with one or more
* 分配一个本地Seq的集合创建一个RDD,
* location preferences (hostnames of Spark nodes) for each object.
* 每个一个或者是多个位置偏好对象(Spark节点主机名)
* Create a new partition for each collection item.
* 为每个集合项目创建一个新分区
* */
def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope {
assertNotStopped()
val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap
new ParallelCollectionRDD[T](this, seq.map(_._1), seq.size, indexToPrefs)
}
/**
* Read a text file from HDFS, a local file system (available on all nodes), or any
* Hadoop-supported file system URI, and return it as an RDD of Strings.
* 从HDFS读取文本文件,本地文件系统(可在所有节点上),或者Hadoop支持的URI文件系统
* 返回一个RDD的字符串
*/
def textFile(
path: String,
minPartitions: Int = defaultMinPartitions): RDD[String] = withScope {
assertNotStopped()
//调用hadoopFile方法,生成MappedRDD对象
hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text],
//map方法将MappedRDD封装为MapPartitionsRDD
minPartitions).map(pair => pair._2.toString)
}
/**
* Read a directory of text files from HDFS, a local file system (available on all nodes), or any
* Hadoop-supported file system URI. Each file is read as a single record and returned in a
* key-value pair, where the key is the path of each file, the value is the content of each file.
* 在HDFS中读一个目录下的文本文件,本地文件系统(可在所有节点上),Hadoop文件系统或任何支持URI,
* 每个文件被读取为一个记录,并返回一个关键值对,其中键是每个文件的路径,值是每个文件的内容
* <p> For example, if you have the following files:
* {{{
* hdfs://a-hdfs-path/part-00000
* hdfs://a-hdfs-path/part-00001
* ...
* hdfs://a-hdfs-path/part-nnnnn
* }}}
*
* Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`,
*
* <p> then `rdd` contains
* {{{
* (a-hdfs-path/part-00000, its content)
* (a-hdfs-path/part-00001, its content)
* ...
* (a-hdfs-path/part-nnnnn, its content)
* }}}
*
* @note Small files are preferred, large file is also allowable, but may cause bad performance.
* 小文件优先考虑,大文件也是允许的,但可能会造成不好的表现
* @note On some filesystems, `.../path/*` can be a more efficient way to read all files
* 可以是一个更有效的方式来读取目录中的所有文件
* in a directory rather than `.../path/` or `.../path`
*
* @param path Directory to the input data files, the path can be comma separated paths as the
* 输入数据的目录文件,可以是以逗号分隔的输入列表的路径
* list of inputs.
* @param minPartitions A suggestion value of the minimal splitting number for input data.
* 最小分区数,输入数据的最小拆分数建议值
*/
def wholeTextFiles(
path: String,
minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope {
assertNotStopped()
val job = new NewHadoopJob(hadoopConfiguration)
// Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking
// comma separated files as input. (see SPARK-7155)
//使用setInputPaths,以使整数文件与hadoopFile/textFile对齐,以逗号分隔的文件作为输入,(见SPARK-7155)
//FileInputFormat 任务划分
NewFileInputFormat.setInputPaths(job, path)
val updateConf = job.getConfiguration
new WholeTextFileRDD(
this,
classOf[WholeTextFileInputFormat],
classOf[String],
classOf[String],
updateConf,
minPartitions).setName(path)
}
/**
* :: Experimental ::
*
* Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file
* (useful for binary data)
* 在HDFS中读一个目录下的二进行文件,每个文件被读取为一个记录,
* 并返回一个关键值对,其中键是每个文件的路径,值是每个文件的内容
* For example, if you have the following files:
* {{{
* hdfs://a-hdfs-path/part-00000
* hdfs://a-hdfs-path/part-00001
* ...
* hdfs://a-hdfs-path/part-nnnnn
* }}}
*
* Do
* `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`,
*
* then `rdd` contains
* {{{
* (a-hdfs-path/part-00000, its content)
* (a-hdfs-path/part-00001, its content)
* ...
* (a-hdfs-path/part-nnnnn, its content)
* }}}
*
* @note Small files are preferred; very large files may cause bad performance.
* 小文件是首选的,非常大的文件可能会导致坏的性能
* @note On some filesystems, `.../path/*` can be a more efficient way to read all files
* in a directory rather than `.../path/` or `.../path`
*
* @param path Directory to the input data files, the path can be comma separated paths as the
* list of inputs.将目录输入到数据文件中,路径可以以逗号分隔的路径作为输入的列表。
* @param minPartitions A suggestion value of the minimal splitting number for input data.
* 输入数据最小分裂数的一个建议值
*/
@Experimental
def binaryFiles(
path: String,
minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope {
assertNotStopped()
val job = new NewHadoopJob(hadoopConfiguration)
// Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking
// comma separated files as input. (see SPARK-7155)
//使用setinputpaths这样binaryfiles与hadoopfile/文本文件以逗号分隔的文件作为输入
NewFileInputFormat.setInputPaths(job, path)
val updateConf = job.getConfiguration
new BinaryFileRDD(
this,
classOf[StreamInputFormat],
classOf[String],
classOf[PortableDataStream],
updateConf,
minPartitions).setName(path)
}
/**
* :: Experimental ::
*
* Load data from a flat binary file, assuming the length of each record is constant.
* 加载二进制数据文件,假设每个记录的长度是不变的
* '''Note:''' We ensure that the byte array for each record in the resulting RDD
* 我们确保在产生的RDD每条记录的字节数组,具有提供的记录长度
* has the provided record length.
*
* @param path Directory to the input data files, the path can be comma separated paths as the
* list of inputs.
* 输入数据文件的路径目录,路径可以是以逗号分隔的输入列表
* @param recordLength The length at which to split the records 拆分记录的长度
* @param conf Configuration for setting up the dataset. 设置数据集的配置
*
* @return An RDD of data with values, represented as byte arrays
* 数据值的RDD,表示为字节数组
*/
@Experimental
def binaryRecords(
path: String,
recordLength: Int,
conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope {
assertNotStopped()
conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength)
val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path,
classOf[FixedLengthBinaryInputFormat],
classOf[LongWritable],
classOf[BytesWritable],
conf = conf)
val data = br.map { case (k, v) =>
val bytes = v.getBytes
assert(bytes.length == recordLength, "Byte array does not have correct length")
bytes
}
data
}
/**
* Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other
* 从 Hadoop可读的数据集获得一个 Hadoop jobconf的RDD,
* necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable),
* (例如,文件名为基于文件系统的数据集,对Hypertable表名)
* using the older MapReduce API (`org.apache.hadoop.mapred`).使用旧的MapReduce API
*
* @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast.
* 设立jobconf的数据集,注:这将被放入一个广播变量,
* Therefore if you plan to reuse this conf to create multiple RDDs, you need to make
* 因此,如果你打算使用这个配置创建多个RDDS,你需要确保你不会修改conf
* sure you won't modify the conf. A safe approach is always creating a new conf for
* a new RDD.一个安全的方法是创建一个新的RDD配置
* @param inputFormatClass Class of the InputFormat
* @param keyClass Class of the keys
* @param valueClass Class of the values
* @param minPartitions Minimum number of Hadoop Splits to generate.Hadoop分隔产生的最低分区数量
*
* '''Note:''' Because Hadoop's RecordReader class re-uses the same Writable object for each
* 为每个记录由于Hadoop的RecordReader类重新使用相同可写的对象
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* 直接返回缓存的RDD或直接传递到聚集或Shuufle操作会产生很多引用相同的对象
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* 如果你计划直接缓存,排序,或Hadoop聚合可写的对象,你应该首先将它们复制使用`Map`功能
* copy them using a `map` function.
*/
def hadoopRDD[K, V](
conf: JobConf,
inputFormatClass: Class[_ <: InputFormat[K, V]],
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope {
assertNotStopped()
// Add necessary security credentials to the JobConf before broadcasting it.
//广播前添加必要的安全认证到jobconf
SparkHadoopUtil.get.addCredentials(conf)
new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions)
}
/** Get an RDD for a Hadoop file with an arbitrary InputFormat
* 任意一个InputFormat获得一个Hadoop文件的RDD,
* '''Note:''' Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* 注:'因为Hadoop的RecordReader类重新使用相同的可写的对象为每个记录,直接缓存返回的RDD或直接传递到聚集或洗牌操作会产生很多引用相同的对象
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
*如果您计划直接缓存排序或聚合Hadoop可写对象,应该首先使用“map”函数复制它们
*/
def hadoopFile[K, V](
path: String,
inputFormatClass: Class[_ <: InputFormat[K, V]],
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope {
assertNotStopped()
// A Hadoop configuration can be about 10 KB, which is pretty big, so broadcast it.
//Hadoop的配置约10 kb,这是相当大的,所以广播
//将Hadoop的Configuration封装为SerializableWritable用于序列化读写操作,然后广播Hadoop的Configuration
val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration))
//定义偏函数,设置输入路径
val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path)
//构建HadoopRDD
new HadoopRDD(
this,//this代表SparkContext
confBroadcast,
Some(setInputPathsFunc),
inputFormatClass,
keyClass,
valueClass,
minPartitions).setName(path)
}
/**
* Smarter version of hadoopFile() that uses class tags to figure out the classes of keys,
* 优化版本hadoopfile()使用类标签来找出关键的类
* values and the InputFormat so that users don't need to pass them directly. Instead, callers
* 值和InputFormat,让用户不需要直接通过.
* can just write, for example,相反,调用方可以只写,例如
* {{{
* val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions)
* }}}
*
* '''Note:''' Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* 注:因为Hadoop的RecordReader类重新使用相同的可写的对象为每个记录,
* 直接缓存返回的RDD或直接传递到聚集或洗牌操作会产生很多引用相同的对象。
* 如果您计划直接缓存,排序或聚合Hadoop可写对象,应该首先使用“map”函数复制它们。
*/
def hadoopFile[K, V, F <: InputFormat[K, V]]
(path: String, minPartitions: Int)
(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope {
hadoopFile(path,
fm.runtimeClass.asInstanceOf[Class[F]],
km.runtimeClass.asInstanceOf[Class[K]],
vm.runtimeClass.asInstanceOf[Class[V]],
minPartitions)
}
/**
* Smarter version of hadoopFile() that uses class tags to figure out the classes of keys,
* values and the InputFormat so that users don't need to pass them directly. Instead, callers
* can just write, for example,
* {{{
* val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path)
* }}}
*
* '''Note:''' Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
*/
def hadoopFile[K, V, F <: InputFormat[K, V]](path: String)
(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope {
hadoopFile[K, V, F](path, defaultMinPartitions)
}
/**
* Get an RDD for a Hadoop file with an arbitrary new API InputFormat.
* 任意一个新的API InputFormat获得一个Hadoop文件的RDD,
* */
def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]
(path: String)
(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope {
newAPIHadoopFile(
path,
fm.runtimeClass.asInstanceOf[Class[F]],
km.runtimeClass.asInstanceOf[Class[K]],
vm.runtimeClass.asInstanceOf[Class[V]])
}
/**
* Get an RDD for a given Hadoop file with an arbitrary new API InputFormat
* 任意一个新的API InputFormat获得一个Hadoop文件的RDD,和额外的配置选项传递给输入格式
* and extra configuration options to pass to the input format.
*
* '''Note:''' Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
*/
def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]](
path: String,
fClass: Class[F],
kClass: Class[K],
vClass: Class[V],
conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope {
assertNotStopped()
// The call to new NewHadoopJob automatically adds security credentials to conf,
// so we don't need to explicitly add them ourselves
//调用新的newhadoopjob自动添加安全凭据来配置,所以我们不需要显式地添加他们自己
val job = new NewHadoopJob(conf)
// Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking
// comma separated files as input. (see SPARK-7155)
NewFileInputFormat.setInputPaths(job, path)
val updatedConf = job.getConfiguration
new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path)
}
/**
* Get an RDD for a given Hadoop file with an arbitrary new API InputFormat
* and extra configuration options to pass to the input format.
* 在给定的任意新的API InputFormat和额外的配置选项传递给输入格式Hadoop文件RDD
*
* @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast.
* Therefore if you plan to reuse this conf to create multiple RDDs, you need to make
* sure you won't modify the conf. A safe approach is always creating a new conf for
* a new RDD.
* 设置数据源的配置,注意:这将被放入一个广播,因此,如果你打算使用这个配置创建多个RDDs,
* 你需要确保你不会修改设置一个安全的方法是创建一个新的RDD新配置。
* @param fClass Class of the InputFormat
* @param kClass Class of the keys
* @param vClass Class of the values
*
* '''Note:''' Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
*/
def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]](
conf: Configuration = hadoopConfiguration,
fClass: Class[F],
kClass: Class[K],
vClass: Class[V]): RDD[(K, V)] = withScope {
assertNotStopped()
// Add necessary security credentials to the JobConf. Required to access secure HDFS.
//增加必要的安全凭据的jobconf,需要安全访问HDFS
val jconf = new JobConf(conf)
SparkHadoopUtil.get.addCredentials(jconf)
new NewHadoopRDD(this, fClass, kClass, vClass, jconf)
}
/** Get an RDD for a Hadoop SequenceFile with given key and value types.
* 得到一个RDD为Hadoop SequenceFile与给定的键和值的类型
* '''Note:''' Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
*/
def sequenceFile[K, V](path: String,
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int
): RDD[(K, V)] = withScope {
assertNotStopped()
val inputFormatClass = classOf[SequenceFileInputFormat[K, V]]
hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions)
}
/** Get an RDD for a Hadoop SequenceFile with given key and value types.
* 给定的键和值的类型得到一个RDD为Hadoop SequenceFile。
* '''Note:''' Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* */
def sequenceFile[K, V](
path: String,
keyClass: Class[K],
valueClass: Class[V]): RDD[(K, V)] = withScope {
assertNotStopped()
sequenceFile(path, keyClass, valueClass, defaultMinPartitions)
}
/**
* Version of sequenceFile() for types implicitly convertible to Writables through a
* WritableConverter. For example, to access a SequenceFile where the keys are Text and the
* values are IntWritable, you could simply write
* 对于隐式转换为writables通过类型sequencefile()版WritableConverter,例如,访问通过按键位置的文本和值intwritable
* {{{
* sparkContext.sequenceFile[String, Int](path, ...)
* }}}
*
* WritableConverters are provided in a somewhat strange way (by an implicit function) to support
* both subclasses of Writable and types for which we define a converter (e.g. Int to
* IntWritable). The most natural thing would've been to have implicit objects for the
* converters, but then we couldn't have an object for every subclass of Writable (you can't
* have a parameterized singleton object). We use functions instead to create a new converter
* for the appropriate type. In addition, we pass the converter a ClassTag of its type to
* allow it to figure out the Writable class to use in the subclass case.
*
* writableconverters是在一个有点奇怪的方式提供(由隐函数)来支持写和类型子类,我们定义了一个转换器(例如int intwritable),
* 最自然的事情是对转换器有隐式对象,但是对于每个可写的子类,我们不能有一个对象(不能有参数化的单例对象),
* 我们使用函数来创建一个新的合适的类型转换器,此外,我们通过转换器的classtag允许它出用在子类的情况下写的类类型。
*
* '''Note:''' Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
*/
def sequenceFile[K, V]
(path: String, minPartitions: Int = defaultMinPartitions)
(implicit km: ClassTag[K], vm: ClassTag[V],
kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = {
withScope {
assertNotStopped()
val kc = clean(kcf)()
val vc = clean(vcf)()
val format = classOf[SequenceFileInputFormat[Writable, Writable]]
val writables = hadoopFile(path, format,
kc.writableClass(km).asInstanceOf[Class[Writable]],
vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions)
writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) }
}
}
/**
* Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and
* 加载一个RDD保存为SequenceFile包含序列化的对象,与nullwritable键和byteswritable值包含序列化的分区
* BytesWritable values that contain a serialized partition. This is still an experimental
* storage format and may not be supported exactly as is in future Spark releases. It will also
* 这仍然是一个实验性的存储格式,可能不支持正是在未来的Spark发布
* be pretty slow if you use the default serializer (Java serialization),
* 如果您使用默认的序列化程序很慢
* though the nice thing about it is that there's very little effort required to save arbitrary
* objects.
* 虽然很好的事情是,有很少的努力,需要保存任意对象
*/
def objectFile[T: ClassTag](
path: String,
minPartitions: Int = defaultMinPartitions): RDD[T] = withScope {
assertNotStopped()
sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions)
.flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader))
}
protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope {
new ReliableCheckpointRDD[T](this, path)
}
/**
* Build the union of a list of RDDs.
* 构建合并RDDS的一个列表
* */
def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope {
val partitioners = rdds.flatMap(_.partitioner).toSet
if (rdds.forall(_.partitioner.isDefined) && partitioners.size == 1) {
new PartitionerAwareUnionRDD(this, rdds)
} else {
new UnionRDD(this, rdds)
}
}
/**
* Build the union of a list of RDDs passed as variable-length arguments.
* 构建合并RDDS的一个列表,可变参数长度
* */
def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope {
union(Seq(first) ++ rest)
}
/**
* Get an RDD that has no partitions or elements.
* 获得一个RDD没有分区或元素
* */
def emptyRDD[T: ClassTag]: EmptyRDD[T] = new EmptyRDD[T](this)
// Methods for creating shared variables
// 创建共享变量的方法
/**
* Create an [[org.apache.spark.Accumulator]] variable of a given type, which tasks can "add"
* values to using the `+=` method. Only the driver can access the accumulator's `value`.
* 给定一个类型创建累加器变量,任务可以使用+=增加值,只有驱动器可以访问累加器变量的值
*/
def accumulator[T](initialValue: T)(implicit param: AccumulatorParam[T]): Accumulator[T] =
{
val acc = new Accumulator(initialValue, param)
cleaner.foreach(_.registerAccumulatorForCleanup(acc))
acc
}
/**
* Create an [[org.apache.spark.Accumulator]] variable of a given type, with a name for display
* in the Spark UI. Tasks can "add" values to the accumulator using the `+=` method. Only the
* 给定一个类型创建累加器变量,在Spark用户界面中显示的名称,任务可以使用+=增加值,只有驱动器可以访问累加器变量的值
* driver can access the accumulator's `value`.
*/
def accumulator[T](initialValue: T, name: String)(implicit param: AccumulatorParam[T])
: Accumulator[T] = {
val acc = new Accumulator(initialValue, param, Some(name))
cleaner.foreach(_.registerAccumulatorForCleanup(acc))
acc
}
/**
* Create an [[org.apache.spark.Accumulable]] shared variable, to which tasks can add values
* with `+=`. Only the driver can access the accumuable's `value`.
* 给定一个类型创建累加器共享变量,任务可以使用+=增加值,只有驱动器可以访问累加器变量的值
* @tparam R accumulator result type,累加器结果类型
* @tparam T type that can be added to the accumulator 可以添加到累加器的类型
*/
def accumulable[R, T](initialValue: R)(implicit param: AccumulableParam[R, T])
: Accumulable[R, T] = {
val acc = new Accumulable(initialValue, param)
cleaner.foreach(_.registerAccumulatorForCleanup(acc))
acc
}
/**
* Create an [[org.apache.spark.Accumulable]] shared variable, with a name for display in the
* Spark UI. Tasks can add values to the accumuable using the `+=` operator. Only the driver can
* access the accumuable's `value`.
* 给定一个类型创建累加器共享变量,在Spark用户界面中显示的名称,任务可以使用+=增加值,只有驱动器可以访问累加器变量的值
* @tparam R accumulator result type 累加器结果类型
* @tparam T type that can be added to the accumulator 可以添加到累加器的类型
*/
def accumulable[R, T](initialValue: R, name: String)(implicit param: AccumulableParam[R, T])
: Accumulable[R, T] = {
val acc = new Accumulable(initialValue, param, Some(name))
cleaner.foreach(_.registerAccumulatorForCleanup(acc))
acc
}
/**
* Create an accumulator from a "mutable collection" type.
* 创建一个累加器来自一个可变的集合类
* Growable and TraversableOnce are the standard APIs that guarantee += and ++=, implemented by
* 增长性和traversableonce是标准的API保证+= and ++=,
* standard mutable collections. So you can use this with mutable Map, Set, etc.
* 通过实施标准可变集合,所以你可以使用这个易变的Map,set等
*/
def accumulableCollection[R <% Growable[T] with TraversableOnce[T] with Serializable: ClassTag, T]
(initialValue: R): Accumulable[R, T] = {
val param = new GrowableAccumulableParam[R, T]
val acc = new Accumulable(initialValue, param)
cleaner.foreach(_.registerAccumulatorForCleanup(acc))
acc
}
/**
* Broadcast a read-only variable to the cluster, returning a
* [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions.
* 广播一个只读变量到群集,返回一个Broadcast对象,在分布式功能中读取,
* The variable will be sent to each cluster only once.
* 该变量只有一次将被发送到每个群集
*/
def broadcast[T: ClassTag](value: T): Broadcast[T] = {
assertNotStopped()
if (classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass)) {
// This is a warning instead of an exception in order to avoid breaking user programs that
//这是一个警告而不是异常为了避免用户程序可能已经创建了RDD广播变量而不使用它们
// might have created RDD broadcast variables but not used them:
//可能已经创建了RDD广播变量而不使用它们:
logWarning("Can not directly broadcast RDDs; instead, call collect() and "
+ "broadcast the result (see SPARK-5063)")
}
val bc = env.broadcastManager.newBroadcast[T](value, isLocal)
val callSite = getCallSite
logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm)
//广播结束将广播对象注册到ContextCleanner中,以便清理.
cleaner.foreach(_.registerBroadcastForCleanup(bc))
bc
}
/**
* Add a file to be downloaded with this Spark job on every node.
* 添加一个文件在每个Spark作业节点下载使用
* The `path` passed can be either a local file, a file in HDFS (or other Hadoop-supported
* 它的路径可以本地文件或者HDFS中的文件(或其他Hadoop的支持系统文件)or an HTTP, HTTPS or FTP URI
* filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs,
* 访问Spark作业中的文件,使用'sparkfiles.get(fileName)'找到下载位置
* use `SparkFiles.get(fileName)` to find its download location.
*/
def addFile(path: String): Unit = {
addFile(path, false)
}
/**
* Add a file to be downloaded with this Spark job on every node.
* 添加一个文件在每个Spark作业节点下载使用
* The `path` passed can be either a local file, a file in HDFS (or other Hadoop-supported
* filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs,
* 它的路径可以本地文件或者HDFS中的文件(或其他Hadoop的支持系统文件)or an HTTP, HTTPS or FTP URI
* use `SparkFiles.get(fileName)` to find its download location.
* 访问Spark作业中的文件,使用'sparkfiles.get(fileName)'找到下载位置
*
* A directory can be given if the recursive option is set to true. Currently directories are only
* supported for Hadoop-supported filesystems.
* 如果recursive选项设置为真,则可以给出一个目录,目前目录只支持Hadoop支持的文件系统
*/
def addFile(path: String, recursive: Boolean): Unit = {
val uri = new URI(path)
val schemeCorrectedPath = uri.getScheme match {
//D:\workspace\test\..\src\test1.txt getAbsolutePath 相对路径,返回当前目录的路径
//D:\workspace\src\test1.txt getCanonicalFile 不但是全路径,而且把..或者.这样的符号解析出来
case null | "local" => new File(path).getCanonicalFile.toURI.toString
case _ => path
}
val hadoopPath = new Path(schemeCorrectedPath)
val scheme = new URI(schemeCorrectedPath).getScheme
if (!Array("http", "https", "ftp").contains(scheme)) {
val fs = hadoopPath.getFileSystem(hadoopConfiguration)
if (!fs.exists(hadoopPath)) {
throw new FileNotFoundException(s"Added file $hadoopPath does not exist.")
}
val isDir = fs.getFileStatus(hadoopPath).isDir
if (!isLocal && scheme == "file" && isDir) {
throw new SparkException(s"addFile does not support local directories when not running " +
"local mode.")
}
if (!recursive && isDir) {
throw new SparkException(s"Added file $hadoopPath is a directory and recursive is not " +
"turned on.")
}
}
val key = if (!isLocal && scheme == "file") {
env.httpFileServer.addFile(new File(uri.getPath))
} else {
schemeCorrectedPath
}
val timestamp = System.currentTimeMillis
addedFiles(key) = timestamp
// Fetch the file locally in case a job is executed using DAGScheduler.runLocally().
//在一个作业执行runLocally时获取本地文件
Utils.fetchFile(path, new File(SparkFiles.getRootDirectory()), conf, env.securityManager,
hadoopConfiguration, timestamp, useCache = false)
logInfo("Added file " + path + " at " + key + " with timestamp " + addedFiles(key))
postEnvironmentUpdate()
}
/**
* :: DeveloperApi ::
* Register a listener to receive up-calls from events that happen during execution.
* 注册一个侦听器,接收在执行过程中发生事件的调用
*/
@DeveloperApi
def addSparkListener(listener: SparkListener) {
listenerBus.addListener(listener)
}
/**
* Update the cluster manager on our scheduling needs. Three bits of information are included
* to help it make decisions.
* 根据我们的调度需求更新集群管理器,三位参数信息被包括来帮助它作出决定
* @param numExecutors The total number of executors we'd like to have. The cluster manager
* shouldn't kill any running executor to reach this number, but,
* 我们需要总执行器数,集群管理器不应该杀死任何运行的执行器,以达到这个数字
* if all existing executors were to die, this is the number of executors
* we'd want to be allocated.
* 如果存在的执行器已死了,我们重新分配这些执行器
* @param localityAwareTasks The number of tasks in all active stages that have a locality
* preferences. This includes running, pending, and completed tasks.
* 一个最佳位置的所有活动阶段的任务数,这包括运行、挂起和完成任务
* @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages
* that would like to like to run on that host.
* 一个Map的Key主机,value所有活动阶段的任务数量,就喜欢在那个主机上运行
* This includes running, pending, and completed tasks.
* 包括运行、挂起和完成任务
*
* @return whether the request is acknowledged by the cluster manager.
*/
private[spark] override def requestTotalExecutors(
numExecutors: Int,
localityAwareTasks: Int,
hostToLocalTaskCount: scala.collection.immutable.Map[String, Int]
): Boolean = {
schedulerBackend match {
case b: CoarseGrainedSchedulerBackend =>
b.requestTotalExecutors(numExecutors, localityAwareTasks, hostToLocalTaskCount)
case _ =>
logWarning("Requesting executors is only supported in coarse-grained mode")
false
}
}
/**
* :: DeveloperApi ::
* Request an additional number of executors from the cluster manager.
* 从群集管理器请求添加执行器数
* @return whether the request is received.是否收到请求
*/
@DeveloperApi
override def requestExecutors(numAdditionalExecutors: Int): Boolean = {
schedulerBackend match {
case b: CoarseGrainedSchedulerBackend =>
b.requestExecutors(numAdditionalExecutors)
case _ =>
logWarning("Requesting executors is only supported in coarse-grained mode")
false
}
}
/**
* :: DeveloperApi ::
* Request that the cluster manager kill the specified executors.
* 请求集群管理器杀死执行器的列表
* Note: This is an indication to the cluster manager that the application wishes to adjust
* its resource usage downwards. If the application wishes to replace the executors it kills
* through this method with new ones, it should follow up explicitly with a call to
* {{SparkContext#requestExecutors}}.
* 注意:这表明应用程序希望向下调整其资源使用情况,如果应用程序想要替换执行器,
* 它会通过这种方法杀死新的执行程序,它应该通过调用明确跟踪requestExecutors
*
* @return whether the request is received. 是否收到请求
*/
@DeveloperApi
override def killExecutors(executorIds: Seq[String]): Boolean = {
schedulerBackend match {
case b: CoarseGrainedSchedulerBackend =>
b.killExecutors(executorIds)
case _ =>
logWarning("Killing executors is only supported in coarse-grained mode")
false
}
}
/**
* :: DeveloperApi ::
* Request that the cluster manager kill the specified executor.
* 请求集群管理器杀死指定的执行器
* Note: This is an indication to the cluster manager that the application wishes to adjust
* its resource usage downwards. If the application wishes to replace the executor it kills
* through this method with a new one, it should follow up explicitly with a call to
* {{SparkContext#requestExecutors}}.
* 注意:这表明应用程序希望向下调整其资源使用情况,如果应用程序想要替换执行器
* 它会通过这种方法杀死新的执行程序,它应该通过调用明确跟踪requestExecutors
*
* @return whether the request is received.
*/
@DeveloperApi
override def killExecutor(executorId: String): Boolean = super.killExecutor(executorId)
/**
* Request that the cluster manager kill the specified executor without adjusting the
* application resource requirements.
* 请求群集管理器在不调整应用程序资源需求的条件下,杀死指定的执行器
* The effect is that a new executor will be launched in place of the one killed by
* this request. This assumes the cluster manager will automatically and eventually
* 一个新的执行器将推出代替一个被这个请求杀死的执行器,
* fulfill all missing application resource requests.
* 这假设集群管理器将自动完成,并最终满足所有丢失的应用程序资源请求
*
* Note: The replace is by no means guaranteed; another application on the same cluster
* can steal the window of opportunity and acquire this application's resources in the
* mean time.
*
* @return whether the request is received. 是否收到请求
*/
private[spark] def killAndReplaceExecutor(executorId: String): Boolean = {
schedulerBackend match {
case b: CoarseGrainedSchedulerBackend =>
b.killExecutors(Seq(executorId), replace = true)
case _ =>
logWarning("Killing executors is only supported in coarse-grained mode")
false
}
}
/**
* The version of Spark on which this application is running.
* 应用程序正在运行Spark的版本
* */
def version: String = SPARK_VERSION
/**
* Return a map from the slave to the max memory available for caching and the remaining
* memory available for caching.
* 返回一个从slave可用于缓存的最大内存以及可用于缓存的剩余内存
*/
def getExecutorMemoryStatus: Map[String, (Long, Long)] = {
assertNotStopped()
env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) =>
(blockManagerId.host + ":" + blockManagerId.port, mem)
}
}
/**
* :: DeveloperApi ::
* Return information about what RDDs are cached, if they are in mem or on disk, how much space
* they take, etc.
* 返回关于RDD被缓存的信息,如果它们在mem内存或磁盘上,它们占用多少空间等等。
*/
@DeveloperApi
def getRDDStorageInfo: Array[RDDInfo] = {
assertNotStopped()
val rddInfos = persistentRdds.values.map(RDDInfo.fromRdd).toArray
StorageUtils.updateRddInfo(rddInfos, getExecutorStorageStatus)
rddInfos.filter(_.isCached)
}
/**
* Returns an immutable map of RDDs that have marked themselves as persistent via cache() call.
* 返回一个不可变Map的RDDS,这标志着自己调用持久方法cache()
* Note that this does not necessarily mean the caching or computation was successful.
* 注意:这并不一定意味着缓存或计算是成功的
*/
def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap
/**
* :: DeveloperApi ::
* Return information about blocks stored in all of the slaves
* 返回存储在从节点中所有块的信息
*/
@DeveloperApi
def getExecutorStorageStatus: Array[StorageStatus] = {
assertNotStopped()
env.blockManager.master.getStorageStatus
}
/**
* :: DeveloperApi ::
* Return pools for fair scheduler
* 返回公平调度池
*/
@DeveloperApi
def getAllPools: Seq[Schedulable] = {
assertNotStopped()
// TODO(xiajunluan): We should take nested pools into account
taskScheduler.rootPool.schedulableQueue.toSeq
}
/**
* :: DeveloperApi ::
* Return the pool associated with the given name, if one exists
* 返回存在给定名称关联调度器
*/
@DeveloperApi
def getPoolForName(pool: String): Option[Schedulable] = {
assertNotStopped()
Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool))
}
/**
* Return current scheduling mode
* 返回当前调度模式
*/
def getSchedulingMode: SchedulingMode.SchedulingMode = {
assertNotStopped()
taskScheduler.schedulingMode
}
/**
* Clear the job's list of files added by `addFile` so that they do not get downloaded to
* any new nodes.
* 清除“addFile”添加的作业列表,以使它们不被下载到任何新节点
*/
@deprecated("adding files no longer creates local copies that need to be deleted", "1.0.0")
def clearFiles() {
addedFiles.clear()
}
/**
* Gets the locality information associated with the partition in a particular rdd
* 获取一个指定的RDD的分区有关的位置信息
* @param rdd of interest 感兴趣的RDD
* @param partition to be looked up for locality 查找位置分区
* @return list of preferred locations for the partition 分区的首选位置列表
*/
private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = {
dagScheduler.getPreferredLocs(rdd, partition)
}
/**
* Register an RDD to be persisted in memory and/or disk storage
* 注册一个持久化的RDD存储内存或磁盘
*/
private[spark] def persistRDD(rdd: RDD[_]) {
persistentRdds(rdd.id) = rdd
}
/**
* Unpersist an RDD from memory and/or disk storage
* 在内存或磁盘删除一个RDD,参数blocking是否堵塞
*/
private[spark] def unpersistRDD(rddId: Int, blocking: Boolean = true) {
env.blockManager.master.removeRdd(rddId, blocking)
persistentRdds.remove(rddId)//根据RDDID从HashMap删除RDD
listenerBus.post(SparkListenerUnpersistRDD(rddId))//通知监听器
}
/**
* Adds a JAR dependency for all tasks to be executed on this SparkContext in the future.
* 增加所有任务要执行sparkcontext依赖JAR
* The `path` passed can be either a local file, a file in HDFS (or other Hadoop-supported
* filesystems), an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node.
* 通过的“路径”可以是本地文件,在HDFS中文件, an HTTP, HTTPS or FTP URI,或本地:在每一个工作节点上的文件路径
*/
def addJar(path: String) {
if (path == null) {
logWarning("null specified as parameter to addJar")
} else {
var key = ""
if (path.contains("\\")) {
// For local paths with backslashes on Windows, URI throws an exception
//用反斜线Windows本地路径,URI抛出一个异常
key = env.httpFileServer.addJar(new File(path))
} else {
val uri = new URI(path)
key = uri.getScheme match {
// A JAR file which exists only on the driver node
//只有在驱动节点存在的jar文件
case null | "file" =>
// yarn-standalone is deprecated, but still supported
//yarn-独立已过时,但仍然支持
if (SparkHadoopUtil.get.isYarnMode() &&
(master == "yarn-standalone" || master == "yarn-cluster")) {
// In order for this to work in yarn-cluster mode the user must specify the
// --addJars option to the client to upload the file into the distributed cache
// of the AM to make it show up in the current working directory.
//为了使其在yarn群集模式下工作,用户必须指定--addJars选项,
// 客户端将文件上传到AM的分布式缓存中,使其显示在当前工作目录中。
val fileName = new Path(uri.getPath).getName()
try {
env.httpFileServer.addJar(new File(fileName))
} catch {
case e: Exception =>
// For now just log an error but allow to go through so spark examples work.
// The spark examples don't really need the jar distributed since its also
// the app jar.
//现在只记录一个错误,但允许通过这样的Spark示例工作,Spark示例从它也不需要分发的jar
//应用程序jar
logError("Error adding jar (" + e + "), was the --addJars option used?")
null
}
} else {
try {
env.httpFileServer.addJar(new File(uri.getPath))
} catch {
case exc: FileNotFoundException =>
logError(s"Jar not found at $path")
null
case e: Exception =>
// For now just log an error but allow to go through so spark examples work.
//现在只需要日志一个错误,但允许通过这样的Spark的例子工作
// The spark examples don't really need the jar distributed since its also
// the app jar.
//Spark的例子并不真的需要它的jar,因为它也是应用程序jar
logError("Error adding jar (" + e + "), was the --addJars option used?")
null
}
}
// A JAR file which exists locally on every worker node
//在每个工作节点存在一个jar文件
case "local" =>
"file:" + uri.getPath
case _ =>
path
}
}
if (key != null) {
addedJars(key) = System.currentTimeMillis
logInfo("Added JAR " + path + " at " + key + " with timestamp " + addedJars(key))
}
}
postEnvironmentUpdate()
}
/**
* Clear the job's list of JARs added by `addJar` so that they do not get downloaded to
* any new nodes.
* 清除“addJar”添加的JAR作业列表,以便它们不被下载到任何新节点
*/
@deprecated("adding jars no longer creates local copies that need to be deleted", "1.0.0")
def clearJars() {
addedJars.clear()
}
// Shut down the SparkContext.
//关闭sparkcontext
def stop() {
// Use the stopping variable to ensure no contention for the stop scenario.
//使用停止变量,确保停止没有竞争,
// Still track the stopped variable for use elsewhere in the code.
//跟踪停止使用代码中的其他变量
if (!stopped.compareAndSet(false, true)) {
logInfo("SparkContext already stopped.")
return
}
if (_shutdownHookRef != null) {
ShutdownHookManager.removeShutdownHook(_shutdownHookRef)
}
Utils.tryLogNonFatalError {
postApplicationEnd()
}
Utils.tryLogNonFatalError {
_ui.foreach(_.stop())
}
if (env != null) {
Utils.tryLogNonFatalError {
env.metricsSystem.report()
}
}
if (metadataCleaner != null) {
Utils.tryLogNonFatalError {
metadataCleaner.cancel()
}
}
Utils.tryLogNonFatalError {
_cleaner.foreach(_.stop())
}
Utils.tryLogNonFatalError {
_executorAllocationManager.foreach(_.stop())
}
if (_dagScheduler != null) {
Utils.tryLogNonFatalError {
_dagScheduler.stop()
}
_dagScheduler = null
}
if (_listenerBusStarted) {
Utils.tryLogNonFatalError {
listenerBus.stop()
_listenerBusStarted = false
}
}
Utils.tryLogNonFatalError {
_eventLogger.foreach(_.stop())
}
if (env != null && _heartbeatReceiver != null) {
Utils.tryLogNonFatalError {
env.rpcEnv.stop(_heartbeatReceiver)
}
}
Utils.tryLogNonFatalError {
_progressBar.foreach(_.stop())
}
_taskScheduler = null
// TODO: Cache.stop()?
if (_env != null) {
Utils.tryLogNonFatalError {
_env.stop()
}
SparkEnv.set(null)
}
// Unset YARN mode system env variable, to allow switching between cluster types.
//取消设置YARN模式系统env变量,以允许在集群类型之间切换
System.clearProperty("SPARK_YARN_MODE")
SparkContext.clearActiveContext()
logInfo("Successfully stopped SparkContext")
}
/**
* Get Spark's home location from either a value set through the constructor,
* 获取SPARK_HOME的位置,来自一个构造函数设置的值或者spark.home属性,或者SPARK_HOME环境变量
* or the spark.home Java property, or the SPARK_HOME environment variable
* (in that order of preference). If neither of these is set, return None.
* (按优先顺序).如果这两个都没有设置,返回没有
*
*/
private[spark] def getSparkHome(): Option[String] = {
conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME")))
}
/**
* Set the thread-local property for overriding the call sites
* of actions and RDDs.设置本地线程属性,覆盖RDD调用位置
*/
def setCallSite(shortCallSite: String) {
setLocalProperty(CallSite.SHORT_FORM, shortCallSite)
}
/**
* Set the thread-local property for overriding the call sites
* of actions and RDDs.
* 设置本地线程属性,覆盖RDD调用位置
*/
private[spark] def setCallSite(callSite: CallSite) {
setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm)
setLocalProperty(CallSite.LONG_FORM, callSite.longForm)
}
/**
* Clear the thread-local property for overriding the call sites
* of actions and RDDs.清除本地线程属性,覆盖RDD调用位置
*/
def clearCallSite() {
setLocalProperty(CallSite.SHORT_FORM, null)
setLocalProperty(CallSite.LONG_FORM, null)
}
/**
* Capture the current user callsite and return a formatted version for printing. If the user
* has overridden the call site using `setCallSite()`, this will return the user's version.
* 获取当前用户的调用点,并返回一个打印格式化版本,如果用户重写调用站点使用'setCallSite',这将返回用户的版本
*/
private[spark] def getCallSite(): CallSite = {
Option(getLocalProperty(CallSite.SHORT_FORM)).map { case shortCallSite =>
val longCallSite = Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse("")
CallSite(shortCallSite, longCallSite)
}.getOrElse(Utils.getCallSite())
}
/**
* Run a function on a given set of partitions in an RDD and pass the results to the given
* handler function. This is the main entry point for all actions in Spark.
* 在RDD中给定的一组分区上运行一个函数,并将结果传递给给定的处理函数(resultHandler),这是Spark中所有操作的主要入口点
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
partitions: Seq[Int],
resultHandler: (Int, U) => Unit): Unit = {
if (stopped.get()) {
throw new IllegalStateException("SparkContext has been shutdown")
}
val callSite = getCallSite
val cleanedFunc = clean(func)
logInfo("Starting job: " + callSite.shortForm)
if (conf.getBoolean("spark.logLineage", false)) {
logInfo("RDD's recursive dependencies:\n" + rdd.toDebugString)
}
dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get)
progressBar.foreach(_.finishAll())
rdd.doCheckpoint()
}
/**
* Run a function on a given set of partitions in an RDD and return the results as an array.
* 在RDD中给定的一组分区上运行函数,并将结果作为数组返回。
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
partitions: Seq[Int]): Array[U] = {
val results = new Array[U](partitions.size)
runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res)
results
}
/**
* Run a job on a given set of partitions of an RDD, but take a function of type
* `Iterator[T] => U` instead of `(TaskContext, Iterator[T]) => U`.
*在RDD的一组给定的分区上运行作业Job,但是一个函数类型的迭代器Iterator [T] => U,而不是(TaskContext,Iterator [T])=> U的函数
* 在RDD的一组给定的分区上运行作业
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
func: Iterator[T] => U,
partitions: Seq[Int]): Array[U] = {
val cleanedFunc = clean(func)
runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions)
}
/**
* Run a function on a given set of partitions in an RDD and pass the results to the given
* handler function. This is the main entry point for all actions in Spark.
* 在RDD中给定的一组分区上运行一个函数,并将结果传递给给定的处理函数,这是Spark中所有操作的主要入口点
*
* The allowLocal flag is deprecated as of Spark 1.5.0+.
*/
@deprecated("use the version of runJob without the allowLocal parameter", "1.5.0")
def runJob[T, U: ClassTag](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
partitions: Seq[Int],
allowLocal: Boolean,
resultHandler: (Int, U) => Unit): Unit = {
if (allowLocal) {
logWarning("sc.runJob with allowLocal=true is deprecated in Spark 1.5.0+")
}
runJob(rdd, func, partitions, resultHandler)
}
/**
* Run a function on a given set of partitions in an RDD and return the results as an array.
*
* The allowLocal flag is deprecated as of Spark 1.5.0+.
*/
@deprecated("use the version of runJob without the allowLocal parameter", "1.5.0")
def runJob[T, U: ClassTag](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
partitions: Seq[Int],
allowLocal: Boolean
): Array[U] = {
if (allowLocal) {
logWarning("sc.runJob with allowLocal=true is deprecated in Spark 1.5.0+")
}
runJob(rdd, func, partitions)
}
/**
* Run a job on a given set of partitions of an RDD, but take a function of type
* `Iterator[T] => U` instead of `(TaskContext, Iterator[T]) => U`.
*
* The allowLocal argument is deprecated as of Spark 1.5.0+.
*/
@deprecated("use the version of runJob without the allowLocal parameter", "1.5.0")
def runJob[T, U: ClassTag](
rdd: RDD[T],
func: Iterator[T] => U,
partitions: Seq[Int],
allowLocal: Boolean
): Array[U] = {
if (allowLocal) {
logWarning("sc.runJob with allowLocal=true is deprecated in Spark 1.5.0+")
}
runJob(rdd, func, partitions)
}
/**
* Run a job on all partitions in an RDD and return the results in an array.
* 在RDD中的所有分区上运行作业Job,并将结果返回到数组中
*/
def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = {
runJob(rdd, func, 0 until rdd.partitions.length)
}
/**
* Run a job on all partitions in an RDD and return the results in an array.
* 在RDD中的所有分区上运行作业Job,并将结果返回到数组中
*/
def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = {
runJob(rdd, func, 0 until rdd.partitions.length)
}
/**
* Run a job on all partitions in an RDD and pass the results to a handler function.
* 在RDD中的所有分区上运行作业Job,并将结果传递给处理函数
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
processPartition: (TaskContext, Iterator[T]) => U,
resultHandler: (Int, U) => Unit)
{
runJob[T, U](rdd, processPartition, 0 until rdd.partitions.length, resultHandler)
}
/**
* Run a job on all partitions in an RDD and pass the results to a handler function.
* 在RDD中的所有分区上运行作业Job,并将结果传递给处理函数
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
processPartition: Iterator[T] => U,
resultHandler: (Int, U) => Unit)
{
val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter)
runJob[T, U](rdd, processFunc, 0 until rdd.partitions.length, resultHandler)
}
/**
* :: DeveloperApi ::
* Run a job that can return approximate results.
* 运行可以返回近似结果的作业
*/
@DeveloperApi
def runApproximateJob[T, U, R](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
evaluator: ApproximateEvaluator[U, R],
timeout: Long): PartialResult[R] = {
assertNotStopped()
val callSite = getCallSite
logInfo("Starting job: " + callSite.shortForm)
val start = System.nanoTime
val cleanedFunc = clean(func)
val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout,
localProperties.get)
logInfo(
"Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s")
result
}
/**
* :: Experimental ::
* Submit a job for execution and return a FutureJob holding the result.
* 提交一个作业执行并返回一个FutureJob持有的结果
*/
@Experimental
def submitJob[T, U, R](
rdd: RDD[T],
processPartition: Iterator[T] => U,
partitions: Seq[Int],
resultHandler: (Int, U) => Unit,
resultFunc: => R): SimpleFutureAction[R] =
{
assertNotStopped()
val cleanF = clean(processPartition)
val callSite = getCallSite
val waiter = dagScheduler.submitJob(
rdd,
(context: TaskContext, iter: Iterator[T]) => cleanF(iter),
partitions,
callSite,
resultHandler,
localProperties.get)
new SimpleFutureAction(waiter, resultFunc)
}
/**
* Cancel active jobs for the specified group. See [[org.apache.spark.SparkContext.setJobGroup]]
* for more information.
* 取消指定组的活动作业
*/
def cancelJobGroup(groupId: String) {
assertNotStopped()
dagScheduler.cancelJobGroup(groupId)
}
/**
*Cancel all jobs that have been scheduled or are running.
*取消已计划或正在运行的所有作业
*/
def cancelAllJobs() {
assertNotStopped()
dagScheduler.cancelAllJobs()
}
/**
* Cancel a given job if it's scheduled or running
* 取消一个指定的Job,如果它在计划或正在运行
*/
private[spark] def cancelJob(jobId: Int) {
dagScheduler.cancelJob(jobId)
}
/**
* Cancel a given stage and all jobs associated with it
* 取消一个给定的阶段和与它相关联的所有工作
*/
private[spark] def cancelStage(stageId: Int) {
dagScheduler.cancelStage(stageId)
}
/**
* Clean a closure to make it ready to serialized and send to tasks
* 清理一个关闭使它准备序列化并发送到任务
* (removes unreferenced variables in $outer's, updates REPL variables)
* If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively
* check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt>
* if not.
*
* @param f the closure to clean 清理闭包
* @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability
* @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not
* serializable
* ClosureCleaner清除闭包中的不能序列化的变量,防止RDD在网络传输过程中反序列化失败
*/
private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = {
ClosureCleaner.clean(f, checkSerializable)
f
}
/**
* Set the directory under which RDDs are going to be checkpointed. The directory must
* be a HDFS path if running on a cluster.
* 设置目录下的RDDS检查点,该目录必须在集群上运行的一个HDFS路径。
*/
def setCheckpointDir(directory: String) {
// If we are running on a cluster, log a warning if the directory is local.
//如果我们在一个集群上运行,如果目录是本地,则警告日志
// Otherwise, the driver may attempt to reconstruct the checkpointed RDD from
//否则,驱动器可能尝试从自己的本地文件系统RDD重建检查点
// its own local file system, which is incorrect because the checkpoint files
// are actually on the executor machines.
//这是不正确的,因为检查点文件实际上是在执行器机器上
if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) {
logWarning("Checkpoint directory must be non-local " +
"if Spark is running on a cluster: " + directory)
}
checkpointDir = Option(directory).map { dir =>
val path = new Path(dir, UUID.randomUUID().toString)
val fs = path.getFileSystem(hadoopConfiguration)
fs.mkdirs(path)
//FileStatus对象封装了文件系统中文件和目录的元数据,包括文件的长度、块大小、备份数、修改时间、所有者以及权限等信息
fs.getFileStatus(path).getPath.toString
}
}
def getCheckpointDir: Option[String] = checkpointDir
/**
* Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD).
* 默认并发数
*/
def defaultParallelism: Int = {
assertNotStopped()
taskScheduler.defaultParallelism
}
/** Default min number of partitions for Hadoop RDDs when not given by user
* 用户未给出Hadoop RDD的默认分区数*/
@deprecated("use defaultMinPartitions", "1.0.0")
//math.min返回指定的数字中带有最低值的数字
def defaultMinSplits: Int = math.min(defaultParallelism, 2)
/**
* Default min number of partitions for Hadoop RDDs when not given by user
* Hadoop默认最小RDDS分区数
* Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2.
* The reasons for this are discussed in https://github.com/mesos/spark/pull/718
*/
//math.min返回指定的数字中带有最低值的数字
def defaultMinPartitions: Int = math.min(defaultParallelism, 2)
private val nextShuffleId = new AtomicInteger(0)
private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement()
//AtomicInteger,一个提供原子操作的Integer的类,++i和i++操作并不是线程安全的,不可避免的会用到synchronized关键字。
//而AtomicInteger则通过一种线程安全的加减操作接口
private val nextRddId = new AtomicInteger(0)
/**
* Register a new RDD, returning its RDD ID
* 注册一个新的RDD,返回一个RDD ID标识
* */
private[spark] def newRddId(): Int = nextRddId.getAndIncrement()
/**
* Registers listeners specified in spark.extraListeners, then starts the listener bus.
* 在spark.extraListeners中注册一个监听,然后启动侦听器总线
* This should be called after all internal listeners have been registered with the listener bus
* 这应该被调用后,所有的内部侦听器已被注册的侦听器总线
* (e.g. after the web UI and event logging listeners have been registered).
*
*/
private def setupAndStartListenerBus(): Unit = {
// Use reflection to instantiate listeners specified via `spark.extraListeners`
//通过指定`spark.extraListeners`使用反射来实例化监听器
try {
val listenerClassNames: Seq[String] =
conf.get("spark.extraListeners", "").split(',').map(_.trim).filter(_ != "")
for (className <- listenerClassNames) {
// Use reflection to find the right constructor
//使用反射找到正确的构造函数
val constructors = {
val listenerClass = Utils.classForName(className)
listenerClass.getConstructors.asInstanceOf[Array[Constructor[_ <: SparkListener]]]
}
val constructorTakingSparkConf = constructors.find { c =>
c.getParameterTypes.sameElements(Array(classOf[SparkConf]))
}
lazy val zeroArgumentConstructor = constructors.find { c =>
c.getParameterTypes.isEmpty
}
val listener: SparkListener = {
if (constructorTakingSparkConf.isDefined) {
constructorTakingSparkConf.get.newInstance(conf)
} else if (zeroArgumentConstructor.isDefined) {
zeroArgumentConstructor.get.newInstance()
} else {
throw new SparkException(
s"$className did not have a zero-argument constructor or a" +
" single-argument constructor that accepts SparkConf. Note: if the class is" +
" defined inside of another Scala class, then its constructors may accept an" +
" implicit parameter that references the enclosing class; in this case, you must" +
" define the listener as a top-level class in order to prevent this extra" +
" parameter from breaking Spark's ability to find a valid constructor.")
}
}
listenerBus.addListener(listener)
logInfo(s"Registered listener $className")
}
} catch {
case e: Exception =>
try {
stop()
} finally {
throw new SparkException(s"Exception when registering SparkListener", e)
}
}
listenerBus.start(this)
_listenerBusStarted = true
}
/**
* Post the application start event
* 提交应用程序启动事件
* */
private def postApplicationStart() {
// Note: this code assumes that the task scheduler has been initialized and has contacted
// the cluster manager to get an application ID (in case the cluster manager provides one).
//注意:此代码假定任务调度程序已初始化,并已触发集群管理器获取应用程序ID(如果集群管理器提供一个)
listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId),
startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls))
}
/**
* Post the application end event
* 提交应用程序结束事件
* */
private def postApplicationEnd() {
listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis))
}
/**
* Post the environment update event once the task scheduler is ready
* 一旦任务调度程序准备就绪后,发布环境更新事件
* */
private def postEnvironmentUpdate() {
if (taskScheduler != null) {
val schedulingMode = getSchedulingMode.toString
val addedJarPaths = addedJars.keys.toSeq
val addedFilePaths = addedFiles.keys.toSeq
val environmentDetails = SparkEnv.environmentDetails(conf, schedulingMode, addedJarPaths,
addedFilePaths)
val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails)
listenerBus.post(environmentUpdate)
}
}
/** Called by MetadataCleaner to clean up the persistentRdds map periodically */
//由MetadataCleaner调用,定期清理persistentRdds映射
private[spark] def cleanup(cleanupTime: Long) {
//清除RDD过期内容
persistentRdds.clearOldValues(cleanupTime)
}
// In order to prevent multiple SparkContexts from being active at the same time, mark this
// context as having finished construction.
// NOTE: this must be placed at the end of the SparkContext constructor.
//SparkContext初始化的最后将当前SparkContext的状态从contextBeingConstructed构建中更改为已激活
SparkContext.setActiveContext(this, allowMultipleContexts)
}
/**
* The SparkContext object contains a number of implicit conversions and parameters for use with
* various Spark features.
* Sparkcontext对象包含用于各种Spark特征隐式转换和参数。
*/
object SparkContext extends Logging {
/**
* Lock that guards access to global variables that track SparkContext construction.
* 锁保护访问全局变量,跟踪sparkcontext构造
*/
private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object()
/**
* The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`.
* 这激活,全面构造一个SparkContext,如果没有sparkcontext活跃,那么这是null,
* Access to this field is guarded by SPARK_CONTEXT_CONSTRUCTOR_LOCK.
* 访问这个字段是由SPARK_CONTEXT_CONSTRUCTOR_LOCK
* AtomicReference则对应普通的对象引用,它可以保证你在修改对象引用时的线程安全性.
*/
private val activeContext: AtomicReference[SparkContext] =
new AtomicReference[SparkContext](null)
/**
* Points to a partially-constructed SparkContext if some thread is in the SparkContext
* constructor, or `None` if no SparkContext is being constructed.
*
* Access to this field is guarded by SPARK_CONTEXT_CONSTRUCTOR_LOCK
*
*如果某些线程在SparkContext中,则指向部分构造的SparkContext构造函数,如果没有构造SparkContext,则为“None”。
*访问此字段由SPARK_CONTEXT_CONSTRUCTOR_LOCK保护
*/
private var contextBeingConstructed: Option[SparkContext] = None
/**
* Called to ensure that no other SparkContext is running in this JVM.
* 为确保没有其他sparkcontext在JVM中运行
* Throws an exception if a running context is detected and logs a warning if another thread is
* constructing a SparkContext. This warning is necessary because the current locking scheme
* prevents us from reliably distinguishing between cases where another context is being
* constructed and cases where another constructor threw an exception.
* 如果检测到运行的上下文,则抛出异常,如果另一个线程正在构建SparkContext,则会发出警告,此警告是必要的,
* 因为当前的锁定方案阻止我们可靠地区分正在构建另一个上下文的情况和另一个构造函数抛出异常的情况。
* 如果另一个线程正在构造则抛出异常,
* assert没有其他上下文正在运行
*/
private def assertNoOtherContextIsRunning(
sc: SparkContext,
allowMultipleContexts: Boolean): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
contextBeingConstructed.foreach { otherContext =>
if (otherContext ne sc) { // checks for reference equality 检查引用相等
// Since otherContext might point to a partially-constructed context, guard against
// its creationSite field being null:
//由于otherContext可能指向部分构造的上下文,因此防范其creationSite字段为null:
val otherContextCreationSite =
Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location")
val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" +
" constructor). This may indicate an error, since only one SparkContext may be" +
" running in this JVM (see SPARK-2243)." +
s" The other SparkContext was created at:\n$otherContextCreationSite"
logWarning(warnMsg)
}
if (activeContext.get() != null) {
val ctx = activeContext.get()
val errMsg = "Only one SparkContext may be running in this JVM (see SPARK-2243)." +
" To ignore this error, set spark.driver.allowMultipleContexts = true. " +
s"The currently running SparkContext was created at:\n${ctx.creationSite.longForm}"
val exception = new SparkException(errMsg)
if (allowMultipleContexts) {
logWarning("Multiple running SparkContexts detected in the same JVM!", exception)
} else {
throw exception
}
}
}
}
}
/**
* This function may be used to get or instantiate a SparkContext and register it as a
* singleton object. Because we can only have one active SparkContext per JVM,
* this is useful when applications may wish to share a SparkContext.
*
* 这个函数用来获取或实例化单个sparkcontext,每个JVM只有一个SparkContext激活,使用的时候可能与应用程序共享SparkContext
*
* Note: This function cannot be used to create multiple SparkContext instances
* even if multiple contexts are allowed.
*注意:即使允许多个上下文,此函数也不能用于创建多个SparkContext实例
*/
def getOrCreate(config: SparkConf): SparkContext = {
// Synchronize to ensure that multiple create requests don't trigger an exception
// from assertNoOtherContextIsRunning within setActiveContext
//同步以确保多个创建请求不会从setActiveContext中的assertNoOtherContextIsRunning触发异常
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
if (activeContext.get() == null) {
setActiveContext(new SparkContext(config), allowMultipleContexts = false)
}
activeContext.get()
}
}
/**
* This function may be used to get or instantiate a SparkContext and register it as a
* singleton object. Because we can only have one active SparkContext per JVM,
* this is useful when applications may wish to share a SparkContext.
*
* 此函数可用于获取或实例化SparkContext并将其注册为单对象,因为我们每个JVM只能有一个活动的SparkContext
* 当应用程序可能希望共享一个SparkContext时,这很有用
*
* This method allows not passing a SparkConf (useful if just retrieving).
* 此方法允许不传递SparkConf(仅在检索时有用)
*
* Note: This function cannot be used to create multiple SparkContext instances
* even if multiple contexts are allowed.
*
* 注意:即使允许多个上下文,此功能也不能用于创建多个SparkContext实例,
*/
def getOrCreate(): SparkContext = {
getOrCreate(new SparkConf())
}
/**
* Called at the beginning of the SparkContext constructor to ensure that no SparkContext is
* running. Throws an exception if a running context is detected and logs a warning if another
* thread is constructing a SparkContext. This warning is necessary because the current locking
* scheme prevents us from reliably distinguishing between cases where another context is being
* constructed and cases where another constructor threw an exception.
*
* 在SparkContext构造函数的开头调用,以确保没有SparkContext正在运行,如果检测到运行的上下文,则抛出异常,
* 如果另一个线程正在构建SparkContext,则会发出警告,此警告是必要的,
* 因为当前的锁定方案阻止我们可靠地区分正在构建另一个上下文的情况和另一个构造函数抛出异常的情况。
*
* 确保实例的唯一性,并将当前SparkContext标记为正在构建中.
*/
private[spark] def markPartiallyConstructed(
sc: SparkContext,
allowMultipleContexts: Boolean): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
assertNoOtherContextIsRunning(sc, allowMultipleContexts)
contextBeingConstructed = Some(sc)//标示SparkContext正在构建中
}
}
/**
* Called at the end of the SparkContext constructor to ensure that no other SparkContext has
* raced with this constructor and started.
* SparkContext初始化的最后将当前SparkContext的状态从contextBeingConstructed构建中更改为已激活
*/
private[spark] def setActiveContext(
sc: SparkContext,
allowMultipleContexts: Boolean): Unit = {
//锁保护访问全局变量,跟踪sparkcontext构造
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
//为确保没有其他sparkcontext在JVM中运行
assertNoOtherContextIsRunning(sc, allowMultipleContexts)
//非None,sparkcontext正在建设
contextBeingConstructed = None
//
activeContext.set(sc)
}
}
/**
* Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's
* also called in unit tests to prevent a flood of warnings from test suites that don't / can't
* properly clean up their SparkContexts.
*
* 清除活动的SparkContext元数据,这由`SparkContext#stop()`调用,
* 它的也在单元测试中调用,以防止来自没有/不能正确清理SparkContexts的测试套件的大量警告
* 清除活动的sparkcontext元数据,这调用SparkContext#stop(),
*/
private[spark] def clearActiveContext(): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
activeContext.set(null)
}
}
private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description"
private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id"
private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel"
private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope"
private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride"
/**
* Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was
* 驱动程序的执行器标识,在早期Spark版本,
* changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see
* SPARK-6716 for more details).
*/
private[spark] val DRIVER_IDENTIFIER = "driver"
/**
* Legacy version of DRIVER_IDENTIFIER, retained for backwards-compatibility.
* 旧版本DRIVER_IDENTIFIER,保留向后兼容性
*/
private[spark] val LEGACY_DRIVER_IDENTIFIER = "<driver>"
// The following deprecated objects have already been copied to `object AccumulatorParam` to
// make the compiler find them automatically. They are duplicate codes only for backward
// compatibility, please update `object AccumulatorParam` accordingly if you plan to modify the
// following ones.
@deprecated("Replaced by implicit objects in AccumulatorParam. This is kept here only for " +
"backward compatibility.", "1.3.0")
object DoubleAccumulatorParam extends AccumulatorParam[Double] {
def addInPlace(t1: Double, t2: Double): Double = t1 + t2
def zero(initialValue: Double): Double = 0.0
}
@deprecated("Replaced by implicit objects in AccumulatorParam. This is kept here only for " +
"backward compatibility.", "1.3.0")
object IntAccumulatorParam extends AccumulatorParam[Int] {
def addInPlace(t1: Int, t2: Int): Int = t1 + t2
def zero(initialValue: Int): Int = 0
}
@deprecated("Replaced by implicit objects in AccumulatorParam. This is kept here only for " +
"backward compatibility.", "1.3.0")
object LongAccumulatorParam extends AccumulatorParam[Long] {
def addInPlace(t1: Long, t2: Long): Long = t1 + t2
def zero(initialValue: Long): Long = 0L
}
@deprecated("Replaced by implicit objects in AccumulatorParam. This is kept here only for " +
"backward compatibility.", "1.3.0")
object FloatAccumulatorParam extends AccumulatorParam[Float] {
def addInPlace(t1: Float, t2: Float): Float = t1 + t2
def zero(initialValue: Float): Float = 0f
}
// The following deprecated functions have already been moved to `object RDD` to
// make the compiler find them automatically. They are still kept here for backward compatibility
// and just call the corresponding functions in `object RDD`.
@deprecated("Replaced by implicit functions in the RDD companion object. This is " +
"kept here only for backward compatibility.", "1.3.0")
def rddToPairRDDFunctions[K, V](rdd: RDD[(K, V)])
(implicit kt: ClassTag[K], vt: ClassTag[V], ord: Ordering[K] = null): PairRDDFunctions[K, V] =
RDD.rddToPairRDDFunctions(rdd)
@deprecated("Replaced by implicit functions in the RDD companion object. This is " +
"kept here only for backward compatibility.", "1.3.0")
def rddToAsyncRDDActions[T: ClassTag](rdd: RDD[T]): AsyncRDDActions[T] =
RDD.rddToAsyncRDDActions(rdd)
@deprecated("Replaced by implicit functions in the RDD companion object. This is " +
"kept here only for backward compatibility.", "1.3.0")
def rddToSequenceFileRDDFunctions[K <% Writable: ClassTag, V <% Writable: ClassTag](
rdd: RDD[(K, V)]): SequenceFileRDDFunctions[K, V] = {
val kf = implicitly[K => Writable]
val vf = implicitly[V => Writable]
// Set the Writable class to null and `SequenceFileRDDFunctions` will use Reflection to get it
//将Writable类设置为null,并且“SequenceFileRDDFunctions”将使用Reflection来获取它
implicit val keyWritableFactory = new WritableFactory[K](_ => null, kf)
implicit val valueWritableFactory = new WritableFactory[V](_ => null, vf)
RDD.rddToSequenceFileRDDFunctions(rdd)
}
@deprecated("Replaced by implicit functions in the RDD companion object. This is " +
"kept here only for backward compatibility.", "1.3.0")
def rddToOrderedRDDFunctions[K : Ordering : ClassTag, V: ClassTag](
rdd: RDD[(K, V)]): OrderedRDDFunctions[K, V, (K, V)] =
RDD.rddToOrderedRDDFunctions(rdd)
@deprecated("Replaced by implicit functions in the RDD companion object. This is " +
"kept here only for backward compatibility.", "1.3.0")
def doubleRDDToDoubleRDDFunctions(rdd: RDD[Double]): DoubleRDDFunctions =
RDD.doubleRDDToDoubleRDDFunctions(rdd)
@deprecated("Replaced by implicit functions in the RDD companion object. This is " +
"kept here only for backward compatibility.", "1.3.0")
def numericRDDToDoubleRDDFunctions[T](rdd: RDD[T])(implicit num: Numeric[T]): DoubleRDDFunctions =
RDD.numericRDDToDoubleRDDFunctions(rdd)
// The following deprecated functions have already been moved to `object WritableFactory` to
// make the compiler find them automatically. They are still kept here for backward compatibility.
@deprecated("Replaced by implicit functions in the WritableFactory companion object. This is " +
"kept here only for backward compatibility.", "1.3.0")
implicit def intToIntWritable(i: Int): IntWritable = new IntWritable(i)
@deprecated("Replaced by implicit functions in the WritableFactory companion object. This is " +
"kept here only for backward compatibility.", "1.3.0")
implicit def longToLongWritable(l: Long): LongWritable = new LongWritable(l)
@deprecated("Replaced by implicit functions in the WritableFactory companion object. This is " +
"kept here only for backward compatibility.", "1.3.0")
implicit def floatToFloatWritable(f: Float): FloatWritable = new FloatWritable(f)
@deprecated("Replaced by implicit functions in the WritableFactory companion object. This is " +
"kept here only for backward compatibility.", "1.3.0")
implicit def doubleToDoubleWritable(d: Double): DoubleWritable = new DoubleWritable(d)
@deprecated("Replaced by implicit functions in the WritableFactory companion object. This is " +
"kept here only for backward compatibility.", "1.3.0")
implicit def boolToBoolWritable (b: Boolean): BooleanWritable = new BooleanWritable(b)
@deprecated("Replaced by implicit functions in the WritableFactory companion object. This is " +
"kept here only for backward compatibility.", "1.3.0")
implicit def bytesToBytesWritable (aob: Array[Byte]): BytesWritable = new BytesWritable(aob)
@deprecated("Replaced by implicit functions in the WritableFactory companion object. This is " +
"kept here only for backward compatibility.", "1.3.0")
implicit def stringToText(s: String): Text = new Text(s)
private implicit def arrayToArrayWritable[T <% Writable: ClassTag](arr: Traversable[T])
: ArrayWritable = {
def anyToWritable[U <% Writable](u: U): Writable = u
new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]],
arr.map(x => anyToWritable(x)).toArray)
}
// The following deprecated functions have already been moved to `object WritableConverter` to
// make the compiler find them automatically. They are still kept here for backward compatibility
// and just call the corresponding functions in `object WritableConverter`.
// 以下已弃用的函数已被移动到`Object WritableConverter',使编译器自动找到它们,
// 它们仍然保留在这里用于向后兼容,并且在“对象WritableConverter”中调用相应的函数,
@deprecated("Replaced by implicit functions in WritableConverter. This is kept here only for " +
"backward compatibility.", "1.3.0")
def intWritableConverter(): WritableConverter[Int] =
WritableConverter.intWritableConverter()
@deprecated("Replaced by implicit functions in WritableConverter. This is kept here only for " +
"backward compatibility.", "1.3.0")
def longWritableConverter(): WritableConverter[Long] =
WritableConverter.longWritableConverter()
@deprecated("Replaced by implicit functions in WritableConverter. This is kept here only for " +
"backward compatibility.", "1.3.0")
def doubleWritableConverter(): WritableConverter[Double] =
WritableConverter.doubleWritableConverter()
@deprecated("Replaced by implicit functions in WritableConverter. This is kept here only for " +
"backward compatibility.", "1.3.0")
def floatWritableConverter(): WritableConverter[Float] =
WritableConverter.floatWritableConverter()
@deprecated("Replaced by implicit functions in WritableConverter. This is kept here only for " +
"backward compatibility.", "1.3.0")
def booleanWritableConverter(): WritableConverter[Boolean] =
WritableConverter.booleanWritableConverter()
@deprecated("Replaced by implicit functions in WritableConverter. This is kept here only for " +
"backward compatibility.", "1.3.0")
def bytesWritableConverter(): WritableConverter[Array[Byte]] =
WritableConverter.bytesWritableConverter()
@deprecated("Replaced by implicit functions in WritableConverter. This is kept here only for " +
"backward compatibility.", "1.3.0")
def stringWritableConverter(): WritableConverter[String] =
WritableConverter.stringWritableConverter()
@deprecated("Replaced by implicit functions in WritableConverter. This is kept here only for " +
"backward compatibility.", "1.3.0")
def writableWritableConverter[T <: Writable](): WritableConverter[T] =
WritableConverter.writableWritableConverter()
/**
* Find the JAR from which a given class was loaded, to make it easy for users to pass
* their JARs to SparkContext.
* 找到给定加载的类JAR名称,使用户可以轻松地将其JAR传递给SparkContext
*/
def jarOfClass(cls: Class[_]): Option[String] = {
val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class")
if (uri != null) {
val uriStr = uri.toString
if (uriStr.startsWith("jar:file:")) {
// URI will be of the form "jar:file:/path/foo.jar!/package/cls.class",
// so pull out the /path/foo.jar
//URI将是“jar:file:/path/foo.jar!/package/cls.class”的形式,所以取出/path/foo.jar
// println("==="+"jar:file:".length+"==="+uriStr.indexOf('!'))
//===9===22,/path/foo.jar
Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!')))
} else {
None
}
} else {
None
}
}
/**
* Find the JAR that contains the class of a particular object, to make it easy for users
* to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in
* your driver program.
* 找到包含特定对象类的JAR,以便用户可以将其JAR传递给SparkContext,在大多数情况下,您可以在驱动程序中调用jarOfObject(this)。
*/
def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass)
/**
* 使用可以单独传递给SparkContext的参数,创建SparkConf的修改版本
* Creates a modified version of a SparkConf with the parameters that can be passed separately
* to SparkContext, to make it easier to write SparkContext's constructors. This ignores
* 以便更容易地编写SparkContext的构造函数,这忽略了作为空的默认值传递的参数,
* parameters that are passed as the default value of null, instead of throwing an exception
* like SparkConf would.
* 而不是像SparkConf那样抛出一个异常
*/
private[spark] def updatedConf(
conf: SparkConf,
master: String,
appName: String,
sparkHome: String = null,
jars: Seq[String] = Nil,//列表结尾为Nil
environment: Map[String, String] = Map()): SparkConf =
{
val res = conf.clone()
res.setMaster(master)
res.setAppName(appName)
if (sparkHome != null) {
res.setSparkHome(sparkHome)
}
if (jars != null && !jars.isEmpty) {
res.setJars(jars)
}
res.setExecutorEnv(environment.toSeq)
res
}
/**
* The number of driver cores to use for execution in local mode, 0 otherwise.
* 本地模式下的执行器内核的数,否则为0
*/
private[spark] def numDriverCores(master: String): Int = {
def convertToInt(threads: String): Int = {
//Runtime.getRuntime().availableProcessors()方法获得当前设备的CPU个数
if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt
}
master match {
case "local" => 1
case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads)
case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads)
case _ => 0 // driver is not used for execution 驱动器不用于执行
}
}
/**
* Create a task scheduler based on a given master URL.
* 基于一个给定的主节点地址创建一个任务调度程序,
* Return a 2-tuple of the scheduler backend and the task scheduler.
* 返回一个二元后台调度程序和任务调度
* TaskScheduler负责任务的提交,并且请求集群管理器对任务调度,TaskSchedule可以看做任务调度的客户端
* createTaskScheduler 根据Master的配置匹配部署模式,创建TaskSchedulerImpl,并且生成不同SchedulerBackend
*/
private def createTaskScheduler(
sc: SparkContext,
master: String): (SchedulerBackend, TaskScheduler) = {
import SparkMasterRegex._
// When running locally, don't try to re-execute tasks on failure.
//在本地运行时,不要尝试在失败时重新执行任务。
val MAX_LOCAL_TASK_FAILURES = 1
master match {
case "local" =>
val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true)
val backend = new LocalBackend(sc.getConf, scheduler, 1)
//TaskSchedulerImpl初始化,调度默认FiFO模式
scheduler.initialize(backend)
(backend, scheduler)
case LOCAL_N_REGEX(threads) =>
//Runtime.getRuntime().availableProcessors()方法获得当前设备的CPU个数
def localCpuCount: Int = Runtime.getRuntime.availableProcessors()
// local[*] estimates the number of cores on the machine; local[N] uses exactly N threads.
//local[*]估计机器上的内核数量,local[N]使用正n个线程
val threadCount = if (threads == "*") localCpuCount else threads.toInt
if (threadCount <= 0) {
throw new SparkException(s"Asked to run locally with $threadCount threads")
}
val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true)
val backend = new LocalBackend(sc.getConf, scheduler, threadCount)
scheduler.initialize(backend)
(backend, scheduler)
case LOCAL_N_FAILURES_REGEX(threads, maxFailures) =>
//Runtime.getRuntime().availableProcessors()方法获得当前设备的CPU个数
def localCpuCount: Int = Runtime.getRuntime.availableProcessors()
// local[*, M] means the number of cores on the computer with M failures
//local [*,M]表示计算机上有M个故障的内核数
// local[N, M] means exactly N threads with M failures
//本地[N,M]意味着具有M个故障的N个线程
val threadCount = if (threads == "*") localCpuCount else threads.toInt
val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true)
val backend = new LocalBackend(sc.getConf, scheduler, threadCount)
scheduler.initialize(backend)
(backend, scheduler)
case SPARK_REGEX(sparkUrl) =>
val scheduler = new TaskSchedulerImpl(sc)
val masterUrls = sparkUrl.split(",").map("spark://" + _)
val backend = new SparkDeploySchedulerBackend(scheduler, sc, masterUrls)
scheduler.initialize(backend)
(backend, scheduler)
//memoryPerSlave 指每个Worker占用的内存大小
//numSlaves 指Worker数量(numSlaves)
//coresPerSlave 指每个Work占用的CPU核数
//memoryPerSlave必须大于executorMemory,因为Worker的内存大小包括executor占用的内存
case LOCAL_CLUSTER_REGEX(numSlaves, coresPerSlave, memoryPerSlave) =>
// Check to make sure memory requested <= memoryPerSlave. Otherwise Spark will just hang.
//检查以确保内存请求<= memoryPerSlave,否则Spark会挂起。
val memoryPerSlaveInt = memoryPerSlave.toInt
if (sc.executorMemory > memoryPerSlaveInt) {
throw new SparkException(
"Asked to launch cluster with %d MB RAM / worker but requested %d MB/worker".format(
memoryPerSlaveInt, sc.executorMemory))
}
val scheduler = new TaskSchedulerImpl(sc)
val localCluster = new LocalSparkCluster(
numSlaves.toInt, coresPerSlave.toInt, memoryPerSlaveInt, sc.conf)
//启动集群
val masterUrls = localCluster.start()
val backend = new SparkDeploySchedulerBackend(scheduler, sc, masterUrls)
scheduler.initialize(backend)
//用于当Drive关闭时关闭集群,仅限于local-cluster
backend.shutdownCallback = (backend: SparkDeploySchedulerBackend) => {
localCluster.stop()
}
(backend, scheduler)
case "yarn-standalone" | "yarn-cluster" =>
if (master == "yarn-standalone") {
logWarning(
"\"yarn-standalone\" is deprecated as of Spark 1.0. Use \"yarn-cluster\" instead.")
}
val scheduler = try {
val clazz = Utils.classForName("org.apache.spark.scheduler.cluster.YarnClusterScheduler")
//getConstructor 获得构造函数,参数类型SparkContext
val cons = clazz.getConstructor(classOf[SparkContext])
//asInstanceOf强制类型转换TaskSchedulerImpl对象
cons.newInstance(sc).asInstanceOf[TaskSchedulerImpl]
} catch {
// TODO: Enumerate the exact reasons why it can fail
// But irrespective of it, it means we cannot proceed !
//但无论如何,这意味着我们不能继续下去!
case e: Exception => {
throw new SparkException("YARN mode not available ?", e)
}
}
val backend = try {
val clazz =
Utils.classForName("org.apache.spark.scheduler.cluster.YarnClusterSchedulerBackend")
val cons = clazz.getConstructor(classOf[TaskSchedulerImpl], classOf[SparkContext])
cons.newInstance(scheduler, sc).asInstanceOf[CoarseGrainedSchedulerBackend]
} catch {
case e: Exception => {
throw new SparkException("YARN mode not available ?", e)
}
}
scheduler.initialize(backend)
(backend, scheduler)
case "yarn-client" =>
val scheduler = try {
val clazz = Utils.classForName("org.apache.spark.scheduler.cluster.YarnScheduler")
val cons = clazz.getConstructor(classOf[SparkContext])
cons.newInstance(sc).asInstanceOf[TaskSchedulerImpl]
} catch {
case e: Exception => {
throw new SparkException("YARN mode not available ?", e)
}
}
val backend = try {
val clazz =
Utils.classForName("org.apache.spark.scheduler.cluster.YarnClientSchedulerBackend")
val cons = clazz.getConstructor(classOf[TaskSchedulerImpl], classOf[SparkContext])
cons.newInstance(scheduler, sc).asInstanceOf[CoarseGrainedSchedulerBackend]
} catch {
case e: Exception => {
throw new SparkException("YARN mode not available ?", e)
}
}
scheduler.initialize(backend)
(backend, scheduler)
case mesosUrl @ MESOS_REGEX(_) =>
MesosNativeLibrary.load()
val scheduler = new TaskSchedulerImpl(sc)
val coarseGrained = sc.conf.getBoolean("spark.mesos.coarse", false)
val url = mesosUrl.stripPrefix("mesos://") // strip scheme from raw Mesos URLs
val backend = if (coarseGrained) {
new CoarseMesosSchedulerBackend(scheduler, sc, url, sc.env.securityManager)
} else {
new MesosSchedulerBackend(scheduler, sc, url)
}
scheduler.initialize(backend)
(backend, scheduler)
case SIMR_REGEX(simrUrl) =>
val scheduler = new TaskSchedulerImpl(sc)
val backend = new SimrSchedulerBackend(scheduler, sc, simrUrl)
scheduler.initialize(backend)
(backend, scheduler)
case _ =>
throw new SparkException("Could not parse Master URL: '" + master + "'")
}
}
}
/**
* A collection of regexes for extracting information from the master string.
* 使用正则表达式从Master字符串中提取集合信息
*/
private object SparkMasterRegex {
// Regular expression used for local[N] and local[*] master formats
//注意转换\转义|\符匹配,正则表达式用于 local[N] and local[*]
val LOCAL_N_REGEX = """local\[([0-9]+|\*)\]""".r//
// Regular expression for local[N, maxRetries], used in tests with failing tasks
//正则表达式用于local[N, maxRetries],用于测试失败的任务
val LOCAL_N_FAILURES_REGEX = """local\[([0-9]+|\*)\s*,\s*([0-9]+)\]""".r
// Regular expression for simulating a Spark cluster of [N, cores, memory] locally
//正则表达式用于模拟Spark本地集群
val LOCAL_CLUSTER_REGEX = """local-cluster\[\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*]""".r
// Regular expression for connecting to Spark deploy clusters
//用于连接到Spark部署集群的正则表达式
val SPARK_REGEX = """spark://(.*)""".r
//正则表达式连接到目标群 by mesos:// or zk:// url
// Regular expression for connection to Mesos cluster by mesos:// or zk:// url
val MESOS_REGEX = """(mesos|zk)://.*""".r
// Regular expression for connection to Simr cluster
// 用于连接到集群环境的正则表达式
val SIMR_REGEX = """simr://(.*)""".r
}
/**
* A class encapsulating how to convert some type T to Writable. It stores both the Writable class
* corresponding to T (e.g. IntWritable for Int) and a function for doing the conversion.
* The getter for the writable class takes a ClassTag[T] in case this is a generic object
* that doesn't know the type of T when it is created. This sounds strange but is necessary to
* support converting subclasses of Writable to themselves (writableWritableConverter).
*
* 封装如何将某些类型T转换为可写的类,它存储对应于T的可写类(例如,Int Intrritable for Int)和用于进行转换的功能,
* 可写类的getter需要一个ClassTag [T],以防这是一个在创建时不知道T的类型的通用对象,
* 这听起来很奇怪,但是需要支持将Writable的子类转换为自己(writableWritableConverter)
*/
private[spark] class WritableConverter[T](
val writableClass: ClassTag[T] => Class[_ <: Writable],
val convert: Writable => T)
extends Serializable
object WritableConverter {
// Helper objects for converting common types to Writable
//帮助对象将常用类型转换为可写
private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T)
: WritableConverter[T] = {
val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]]
new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W]))
}
// The following implicit functions were in SparkContext before 1.3 and users had to
// `import SparkContext._` to enable them. Now we move them here to make the compiler find
// them automatically. However, we still keep the old functions in SparkContext for backward
// compatibility and forward to the following functions directly.
//以下隐含函数在1.3之前的SparkContext中,用户必须“导入SparkContext._”以启用它们,
// 现在我们将它们移到这里,使编译器自动找到它们,但是,我们仍然保留SparkContext中的旧功能以实现向后兼容,并直接转发到以下功能
implicit def intWritableConverter(): WritableConverter[Int] =
simpleWritableConverter[Int, IntWritable](_.get)
implicit def longWritableConverter(): WritableConverter[Long] =
simpleWritableConverter[Long, LongWritable](_.get)
implicit def doubleWritableConverter(): WritableConverter[Double] =
simpleWritableConverter[Double, DoubleWritable](_.get)
implicit def floatWritableConverter(): WritableConverter[Float] =
simpleWritableConverter[Float, FloatWritable](_.get)
implicit def booleanWritableConverter(): WritableConverter[Boolean] =
simpleWritableConverter[Boolean, BooleanWritable](_.get)
implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = {
simpleWritableConverter[Array[Byte], BytesWritable] { bw =>
// getBytes method returns array which is longer then data to be returned
//getBytes方法返回的数据长于要返回的数据
Arrays.copyOfRange(bw.getBytes, 0, bw.getLength)
}
}
implicit def stringWritableConverter(): WritableConverter[String] =
simpleWritableConverter[String, Text](_.toString)
implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] =
new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])
}
/**
* A class encapsulating how to convert some type T to Writable. It stores both the Writable class
* corresponding to T (e.g. IntWritable for Int) and a function for doing the conversion.
* The Writable class will be used in `SequenceFileRDDFunctions`.
* 封装如何将某些类型T转换为可写的类,它存储对应于T的可写类(例如,Int Intrritable for Int)
* 和用于进行转换的函数,可写类将用于“SequenceFileRDDFunctions”。
*/
private[spark] class WritableFactory[T](
val writableClass: ClassTag[T] => Class[_ <: Writable],
val convert: T => Writable) extends Serializable
object WritableFactory {
private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W)
: WritableFactory[T] = {
val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]]
new WritableFactory[T](_ => writableClass, convert)
}
implicit def intWritableFactory: WritableFactory[Int] =
simpleWritableFactory(new IntWritable(_))
implicit def longWritableFactory: WritableFactory[Long] =
simpleWritableFactory(new LongWritable(_))
implicit def floatWritableFactory: WritableFactory[Float] =
simpleWritableFactory(new FloatWritable(_))
implicit def doubleWritableFactory: WritableFactory[Double] =
simpleWritableFactory(new DoubleWritable(_))
implicit def booleanWritableFactory: WritableFactory[Boolean] =
simpleWritableFactory(new BooleanWritable(_))
implicit def bytesWritableFactory: WritableFactory[Array[Byte]] =
simpleWritableFactory(new BytesWritable(_))
implicit def stringWritableFactory: WritableFactory[String] =
simpleWritableFactory(new Text(_))
implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] =
simpleWritableFactory(w => w)
}
| tophua/spark1.52 | core/src/main/scala/org/apache/spark/SparkContext.scala | Scala | apache-2.0 | 152,192 |
package com.atanana
import java.io.PrintWriter
import scala.io.Source
import scala.util.Try
class FsHandler {
def readFile(filename: String): Try[String] = {
Try {
val source = Source.fromFile(filename)
try {
source.getLines().mkString
} finally {
source.close()
}
}
}
def writeFile(contents: String, filename: String): Unit = {
val writer = new PrintWriter(filename)
try {
writer.print(contents)
} finally {
writer.close()
}
}
}
| atanana/rating-bot | src/main/scala/com/atanana/FsHandler.scala | Scala | mit | 518 |
package com.landoop.avro4sui
import java.util.Scanner
import com.sksamuel.avro4s.json.JsonToAvroConverter
import javax.servlet.http.{HttpServletResponse, HttpServletRequest, HttpServlet}
import com.sksamuel.avro4s.{TemplateGenerator, ModuleGenerator}
import org.eclipse.jetty.server.Server
import org.eclipse.jetty.servlet.{ServletHolder, ServletContextHandler}
trait Avro4sEndpoint {
def server(port: Int): Unit = {
println(s"Starting alerting on $port")
val handler = new ServletContextHandler
handler.setContextPath("/")
// We try all possible transformations before failing
val servletAvro4s = new ServletHolder(new Avro4sServlet )
servletAvro4s.setAsyncSupported(true)
handler.addServlet(servletAvro4s, "/avro4s")
val server = new Server(port)
server.setHandler(handler)
server.start()
println(s"Servlet for 'avro4s' convertions listening on port: $port")
}
}
class Avro4sServlet extends HttpServlet {
override def service(req: HttpServletRequest, resp: HttpServletResponse): Unit = {
println("Servicing")
if (req.getMethod.equalsIgnoreCase("OPTIONS")) {
println("OPTIONS")
resp.setContentType("text/plain")
resp.setHeader("Access-Control-Allow-Origin", "*")
resp.setHeader("Access-Control-Allow-Methods", "POST, OPTIONS")
resp.setHeader("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, Key")
resp.getWriter.write("")
} else
if (req.getMethod.equalsIgnoreCase("POST")) {
println("POST")
val postRequestObject = new Scanner(req.getInputStream, "UTF-8").nextLine
println(postRequestObject )
println("Detected POST with payload:" + postRequestObject)
var response = ""
try {
val modules = ModuleGenerator(postRequestObject)
val templates = TemplateGenerator(modules)
templates.foreach(t => println(t.definition + " ---- " + t.file))
response+="scala###" + templates.head.definition + "###"
println("[Avro] detected !!");
} catch {
case (e:Throwable) => println("Avro [NOT] Detected"); println(e)
}
try {
val schema = new JsonToAvroConverter("com.test.avro").convert("MyClass", postRequestObject)
val schemaString = schema.toString(true)
println(schemaString)
response+="avro###" + schemaString + "###"
println("[JSon] detected !!");
} catch {
case (e:Throwable) => println("JSon [NOT] Detected"); println(e)
}
resp.setContentType("text/plain")
resp.setHeader("Access-Control-Allow-Origin", "*")
resp.setHeader("Access-Control-Allow-Methods", "POST, OPTIONS")
resp.setHeader("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, Key")
resp.getWriter.write(response)
}
}
}
object App extends Avro4sEndpoint with App {
println("Test Avro => Scala with:")
println(""" curl -X POST --data '{ "type" : "record", "name" : "MyClass", "namespace" : "com.test.avro", "fields" : [ { "name" : "foo", "type" : { "type" : "array", "items" : "boolean" } } ] }' http://localhost:1082/avro4s""")
println("Test Json => Avro with:")
val exampleJson = """ {"menu": { "id": "file", "value": "File", "popup": { "menuitem": [ {"value": "New" }, {"value": "Open" }, {"value": "Close"} ] } }} """.filter(_ >= ' ')
println(s""" curl -X POST --data '$exampleJson' http://localhost:1082/avro4s""")
// https://www.landoop.com/avro4s/avro4s
server(1082)
}
| Landoop/avro4s-ui | src/main/scala/com.landoop.avro4sui/App.scala | Scala | mit | 3,513 |
package org.openmole.plugin.method.evolution
import org.openmole.core.dsl._
import org.openmole.core.dsl.extension._
import org.openmole.tool.types.ToDouble
object DeltaTask {
def apply(objective: Delta*)(implicit name: sourcecode.Name, definitionScope: DefinitionScope) =
Task("DeltaTask") { p ⇒
import p._
context ++ objective.map {
case DeltaDouble(v, o) ⇒ Variable(v, math.abs(context(v) - o))
case DeltaInt(v, o) ⇒ Variable(v, math.abs(context(v) - o))
case DeltaLong(v, o) ⇒ Variable(v, math.abs(context(v) - o))
}
} set (
(inputs, outputs) += (objective.map(Delta.v): _*)
)
sealed trait Delta
case class DeltaDouble(v: Val[Double], objective: Double) extends Delta
case class DeltaInt(v: Val[Int], objective: Int) extends Delta
case class DeltaLong(v: Val[Long], objective: Long) extends Delta
object Delta {
implicit def fromTupleDouble[T](t: (Val[Double], T))(implicit toDouble: ToDouble[T]) = DeltaDouble(t._1, toDouble(t._2))
implicit def fromTupleInt(t: (Val[Int], Int)) = DeltaInt(t._1, t._2)
implicit def fromTupleLong(t: (Val[Long], Long)) = DeltaLong(t._1, t._2)
def v(delta: Delta) =
delta match {
case DeltaDouble(v, _) ⇒ v
case DeltaInt(v, _) ⇒ v
case DeltaLong(v, _) ⇒ v
}
}
}
object Delta {
import org.openmole.core.workflow.builder.DefinitionScope
def apply(dsl: DSL, objective: DeltaTask.Delta*)(implicit definitionScope: DefinitionScope) =
dsl -- DeltaTask(objective: _*)
}
| openmole/openmole | openmole/plugins/org.openmole.plugin.method.evolution/src/main/scala/org/openmole/plugin/method/evolution/DeltaTask.scala | Scala | agpl-3.0 | 1,566 |
package lore.compiler.poem
import lore.compiler.semantics.NamePath
case class PoemFunctionInstance(name: NamePath, typeArguments: Vector[PoemType]) {
override def toString: String = s"$name[${typeArguments.mkString(", ")}]"
}
| marcopennekamp/lore | compiler/src/lore/compiler/poem/PoemFunctionInstance.scala | Scala | mit | 230 |
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
* */
package io.github.tailhq.dynaml.models.lm
//Breeze Imports
import breeze.linalg.DenseVector
import breeze.numerics.sigmoid
import breeze.stats.distributions.Gaussian
import io.github.tailhq.dynaml.optimization.ProbitGradient
import org.apache.spark.mllib.linalg.Vectors
//DynaML Imports
import io.github.tailhq.dynaml.optimization.{
GradientDescentSpark, LogisticGradient,
RegularizedOptimizer, SquaredL2Updater}
//Spark Imports
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.rdd.RDD
/**
* @author tailhq date: 25/01/2017.
*
* Implementation of logistic regression model trained on Apache Spark [[RDD]]
* */
class SparkLogisticGLM(
data: RDD[(DenseVector[Double], Double)], numPoints: Long,
map: (DenseVector[Double]) => DenseVector[Double] =
identity[DenseVector[Double]]) extends GenericGLM[
RDD[(DenseVector[Double], Double)],
RDD[LabeledPoint]](data, numPoints, map) {
override def prepareData(d: RDD[(DenseVector[Double], Double)]) = d.map(pattern =>
new LabeledPoint(
pattern._2,
Vectors.dense(featureMap(pattern._1).toArray ++ Array(1.0)))
)
private lazy val sample_input = g.first()._1
/**
* The link function; in this case simply the identity map
* */
override val h: (Double) => Double = sigmoid(_)
override protected var params: DenseVector[Double] = initParams()
override protected val optimizer: RegularizedOptimizer[
DenseVector[Double], DenseVector[Double],
Double, RDD[LabeledPoint]] = new GradientDescentSpark(new LogisticGradient, new SquaredL2Updater)
def dimensions = featureMap(sample_input).length
override def initParams() = DenseVector.zeros[Double](dimensions + 1)
}
/**
* @author tailhq date: 25/01/2017
*
* Implementation of probit GLM model trained on Apache Spark [[RDD]]
* */
class SparkProbitGLM(
data: RDD[(DenseVector[Double], Double)], numPoints: Long,
map: (DenseVector[Double]) => DenseVector[Double] =
identity[DenseVector[Double]]) extends SparkLogisticGLM(data, numPoints, map) {
private val standardGaussian = new Gaussian(0, 1.0)
override val h: (Double) => Double = (x: Double) => standardGaussian.cdf(x)
override protected val optimizer: RegularizedOptimizer[
DenseVector[Double], DenseVector[Double],
Double, RDD[LabeledPoint]] = new GradientDescentSpark(new ProbitGradient, new SquaredL2Updater)
}
| mandar2812/DynaML | dynaml-core/src/main/scala/io/github/tailhq/dynaml/models/lm/SparkLogisticGLM.scala | Scala | apache-2.0 | 3,159 |
package com.ponkotuy.proxy
import io.netty.buffer.{ByteBuf, CompositeByteBuf}
import io.netty.channel.ChannelHandlerContext
import io.netty.handler.codec.http.{HttpContent, HttpObject, HttpRequest, HttpResponse, HttpResponseStatus}
import org.littleshoot.proxy.HttpFiltersAdapter
abstract class AggregateContentFilters(_originalRequest: HttpRequest, _ctx: ChannelHandlerContext)
extends HttpFiltersAdapter(_originalRequest, _ctx) {
def finished(requestContent: ByteBuf, responseContent: ByteBuf): Unit
private[this] var released = false
private[this] val requestBuf = ctx.alloc().compositeBuffer()
private[this] val responseBuf = ctx.alloc().compositeBuffer()
override def proxyToServerRequest(httpObject: HttpObject): HttpResponse = {
if (!released) {
addContentTo(requestBuf, httpObject)
}
super.proxyToServerRequest(httpObject)
}
override def serverToProxyResponse(httpObject: HttpObject): HttpObject = {
if (!released) {
addContentTo(responseBuf, httpObject)
}
super.serverToProxyResponse(httpObject)
}
private def addContentTo(buf: CompositeByteBuf, httpObject: HttpObject): Unit = {
httpObject match {
case httpContent: HttpContent =>
val c = httpContent.content()
if (c.isReadable) {
buf.addComponent(c.retain())
buf.writerIndex(buf.writerIndex() + c.readableBytes())
}
case _ =>
}
}
override def proxyToClientResponse(httpObject: HttpObject): HttpObject = {
httpObject match {
case res: HttpResponse if res.getStatus != HttpResponseStatus.OK => releaseBuffers()
case _ =>
}
super.proxyToClientResponse(httpObject)
}
override def serverToProxyResponseReceived(): Unit = {
if (!released) {
try
finished(requestBuf, responseBuf)
finally
releaseBuffers()
}
}
private def releaseBuffers(): Unit = {
if (!released) {
released = true
requestBuf.release()
responseBuf.release()
}
}
}
| ttdoda/MyFleetGirls | client/src/main/scala/com/ponkotuy/proxy/AggregateContentFilters.scala | Scala | mit | 2,015 |
//############################################################################
// Lisp interpreter (revived as an optimizer test.)
//############################################################################
//############################################################################
// Lisp Scanner
class LispTokenizer(s: String) extends Iterator[String] {
private var i = 0;
private def isDelimiter(ch: Char) = ch <= ' ' || ch == '(' || ch == ')'
def hasNext: Boolean = {
while (i < s.length() && s.charAt(i) <= ' ') i += 1
i < s.length()
}
def next: String =
if (hasNext) {
val start = i
if (isDelimiter(s charAt i)) i += 1
else
while {
i = i + 1
!isDelimiter(s charAt i)
} do ()
s.substring(start, i)
} else sys.error("premature end of string")
}
//############################################################################
// Lisp Interface
trait Lisp {
type Data
def string2lisp(s: String): Data
def lisp2string(s: Data): String
def evaluate(d: Data): Data
// !!! def evaluate(s: String): Data = evaluate(string2lisp(s))
def evaluate(s: String): Data
}
//############################################################################
// Lisp Implementation Using Case Classes
object LispCaseClasses extends Lisp {
import List.range
trait Data {
def elemsToString(): String = toString();
}
case class CONS(car: Data, cdr: Data) extends Data {
override def toString() = "(" + elemsToString() + ")";
override def elemsToString() = car.toString() + (cdr match {
case NIL() => ""
case _ => " " + cdr.elemsToString();
})
}
case class NIL() extends Data { // !!! use case object
override def toString() = "()";
}
case class SYM(name: String) extends Data {
override def toString() = name;
}
case class NUM(x: Int) extends Data {
override def toString() = x.toString();
}
case class STR(x: String) extends Data {
override def toString() = "\\"" + x + "\\"";
}
case class FUN(f: List[Data] => Data) extends Data {
override def toString() = "<fn>";
}
def list(): Data =
NIL();
def list(x0: Data): Data =
CONS(x0, NIL());
def list(x0: Data, x1: Data): Data =
CONS(x0, list(x1));
def list(x0: Data, x1: Data, x2: Data): Data =
CONS(x0, list(x1, x2));
def list(x0: Data, x1: Data, x2: Data, x3: Data): Data =
CONS(x0, list(x1, x2, x3));
def list(x0: Data, x1: Data, x2: Data, x3: Data, x4: Data): Data =
CONS(x0, list(x1, x2, x3, x4));
def list(x0: Data, x1: Data, x2: Data, x3: Data, x4: Data, x5: Data): Data =
CONS(x0, list(x1, x2, x3, x4, x5));
def list(x0: Data, x1: Data, x2: Data, x3: Data, x4: Data, x5: Data,
x6: Data): Data =
CONS(x0, list(x1, x2, x3, x4, x5, x6));
def list(x0: Data, x1: Data, x2: Data, x3: Data, x4: Data, x5: Data,
x6: Data, x7: Data): Data =
CONS(x0, list(x1, x2, x3, x4, x5, x6, x7));
def list(x0: Data, x1: Data, x2: Data, x3: Data, x4: Data, x5: Data,
x6: Data, x7: Data, x8: Data): Data =
CONS(x0, list(x1, x2, x3, x4, x5, x6, x7, x8));
def list(x0: Data, x1: Data, x2: Data, x3: Data, x4: Data, x5: Data,
x6: Data, x7: Data, x8: Data, x9: Data): Data =
CONS(x0, list(x1, x2, x3, x4, x5, x6, x7, x8, x9));
var curexp: Data = null
var trace: Boolean = false
var indent: Int = 0
def lispError[a](msg: String): a =
sys.error("error: " + msg + "\\n" + curexp);
trait Environment {
def lookup(n: String): Data;
def extendRec(name: String, expr: Environment => Data) =
new Environment {
def lookup(n: String): Data =
if (n == name) expr(this) else Environment.this.lookup(n);
}
def extend(name: String, v: Data) = extendRec(name, (env1 => v));
}
val EmptyEnvironment = new Environment {
def lookup(n: String): Data = lispError("undefined: " + n);
}
def toList(x: Data): List[Data] = x match {
case NIL() => List()
case CONS(y, ys) => y :: toList(ys)
case _ => lispError("malformed list: " + x);
}
def toBoolean(x: Data) = x match {
case NUM(0) => false
case _ => true
}
def normalize(x: Data): Data = x match {
case CONS(SYM("def"),
CONS(CONS(SYM(name), args), CONS(body, CONS(expr, NIL())))) =>
normalize(list(SYM("def"),
SYM(name), list(SYM("lambda"), args, body), expr))
case CONS(SYM("cond"), CONS(CONS(SYM("else"), CONS(expr, NIL())),NIL())) =>
normalize(expr)
case CONS(SYM("cond"), CONS(CONS(test, CONS(expr, NIL())), rest)) =>
normalize(list(SYM("if"), test, expr, CONS(SYM("cond"), rest)))
case CONS(h, t) => CONS(normalize(h), normalize(t))
case _ => x
}
def eval(x: Data, env: Environment): Data = {
val prevexp = curexp;
curexp = x;
if (trace) {
for (x <- range(1, indent)) Console.print(" ");
Console.println("===> " + x);
indent = indent + 1;
}
val result = eval1(x, env);
if (trace) {
indent = indent - 1;
for (x <- range(1, indent)) Console.print(" ");
Console.println("<=== " + result);
}
curexp = prevexp;
result
}
def eval1(x: Data, env: Environment): Data = x match {
case SYM(name) =>
env lookup name
case CONS(SYM("def"), CONS(SYM(name), CONS(y, CONS(z, NIL())))) =>
eval(z, env.extendRec(name, (env1 => eval(y, env1))))
case CONS(SYM("val"), CONS(SYM(name), CONS(y, CONS(z, NIL())))) =>
eval(z, env.extend(name, eval(y, env)))
case CONS(SYM("lambda"), CONS(params, CONS(y, NIL()))) =>
mkLambda(params, y, env)
case CONS(SYM("if"), CONS(c, CONS(t, CONS(e, NIL())))) =>
if (toBoolean(eval(c, env))) eval(t, env) else eval(e, env)
case CONS(SYM("quote"), CONS(x, NIL())) =>
x
case CONS(y, xs) =>
apply(eval(y, env), toList(xs) map (x => eval(x, env)))
case NUM(_) => x
case STR(_) => x
case FUN(_) => x
case _ =>
lispError("illegal term")
}
def apply(fn: Data, args: List[Data]): Data = fn match {
case FUN(f) => f(args);
case _ => lispError("application of non-function: " + fn);
}
def mkLambda(params: Data, expr: Data, env: Environment): Data = {
def extendEnv(env: Environment,
ps: List[String], args: List[Data]): Environment =
(ps, args) match {
case (List(), List()) =>
env
case (p :: ps1, arg :: args1) =>
extendEnv(env.extend(p, arg), ps1, args1)
case _ =>
lispError("wrong number of arguments")
}
val ps: List[String] = toList(params) map {
case SYM(name) => name
case _ => sys.error("illegal parameter list");
}
FUN(args => eval(expr, extendEnv(env, ps, args)))
}
val globalEnv = EmptyEnvironment
.extend("=", FUN({
case List(NUM(arg1),NUM(arg2)) => NUM(if (arg1 == arg2) 1 else 0)
case List(STR(arg1),STR(arg2)) => NUM(if (arg1 == arg2) 1 else 0)}))
.extend("+", FUN({
case List(NUM(arg1),NUM(arg2)) => NUM(arg1 + arg2)
case List(STR(arg1),STR(arg2)) => STR(arg1 + arg2)}))
.extend("-", FUN({
case List(NUM(arg1),NUM(arg2)) => NUM(arg1 - arg2)}))
.extend("*", FUN({
case List(NUM(arg1),NUM(arg2)) => NUM(arg1 * arg2)}))
.extend("/", FUN({
case List(NUM(arg1),NUM(arg2)) => NUM(arg1 / arg2)}))
.extend("car", FUN({
case List(CONS(x, xs)) => x}))
.extend("cdr", FUN({
case List(CONS(x, xs)) => xs}))
.extend("null?", FUN({
case List(NIL()) => NUM(1)
case _ => NUM(0)}))
.extend("cons", FUN({
case List(x, y) => CONS(x, y)}));
def evaluate(x: Data): Data = eval(normalize(x), globalEnv);
def evaluate(s: String): Data = evaluate(string2lisp(s));
def string2lisp(s: String): Data = {
val it = new LispTokenizer(s);
def parse(token: String): Data = {
if (token == "(") parseList
else if (token == ")") sys.error("unbalanced parentheses")
else if ('0' <= token.charAt(0) && token.charAt(0) <= '9')
NUM(token.toInt)
else if (token.charAt(0) == '\\"' && token.charAt(token.length()-1)=='\\"')
STR(token.substring(1,token.length() - 1))
else SYM(token)
}
def parseList: Data = {
val token = it.next;
if (token == ")") NIL() else CONS(parse(token), parseList)
}
parse(it.next)
}
def lisp2string(d: Data): String = d.toString();
}
//############################################################################
// Lisp Implementation Using Any
object LispAny extends Lisp {
import List._;
type Data = Any;
case class Lambda(f: List[Data] => Data);
var curexp: Data = null;
var trace: Boolean = false;
var indent: Int = 0;
def lispError[a](msg: String): a =
sys.error("error: " + msg + "\\n" + curexp);
trait Environment {
def lookup(n: String): Data;
def extendRec(name: String, expr: Environment => Data) =
new Environment {
def lookup(n: String): Data =
if (n == name) expr(this) else Environment.this.lookup(n);
}
def extend(name: String, v: Data) = extendRec(name, (env1 => v));
}
val EmptyEnvironment = new Environment {
def lookup(n: String): Data = lispError("undefined: " + n);
}
def asList(x: Data): List[Data] = x match {
case y: List[_] => y
case _ => lispError("malformed list: " + x)
}
def asInt(x: Data): Int = x match {
case y: Int => y
case _ => lispError("not an integer: " + x)
}
def asString(x: Data): String = x match {
case y: String => y
case _ => lispError("not a string: " + x)
}
def asBoolean(x: Data): Boolean = x != 0
def normalize(x: Data): Data = x match {
case Symbol("and") :: x :: y :: Nil =>
normalize(Symbol("if") :: x :: y :: 0 :: Nil)
case Symbol("or") :: x :: y :: Nil =>
normalize(Symbol("if") :: x :: 1 :: y :: Nil)
case Symbol("def") :: (name :: args) :: body :: expr :: Nil =>
normalize(Symbol("def") :: name :: (Symbol("lambda") :: args :: body :: Nil) :: expr :: Nil)
case Symbol("cond") :: (Symbol("else") :: expr :: Nil) :: rest =>
normalize(expr);
case Symbol("cond") :: (test :: expr :: Nil) :: rest =>
normalize(Symbol("if") :: test :: expr :: (Symbol("cond") :: rest) :: Nil)
case Symbol("cond") :: Symbol("else") :: expr :: Nil =>
normalize(expr)
case h :: t =>
normalize(h) :: asList(normalize(t))
case _ =>
x
}
def eval(x: Data, env: Environment): Data = {
val prevexp = curexp;
curexp = x;
if (trace) {
for (x <- range(1, indent)) Console.print(" ");
Console.println("===> " + x);
indent += 1;
}
val result = eval1(x, env);
if (trace) {
indent -= 1;
for (x <- range(1, indent)) Console.print(" ");
Console.println("<=== " + result);
}
curexp = prevexp;
result
}
def eval1(x: Data, env: Environment): Data = x match {
case Symbol(name) =>
env lookup name
case Symbol("def") :: Symbol(name) :: y :: z :: Nil =>
eval(z, env.extendRec(name, (env1 => eval(y, env1))))
case Symbol("val") :: Symbol(name) :: y :: z :: Nil =>
eval(z, env.extend(name, eval(y, env)))
case Symbol("lambda") :: params :: y :: Nil =>
mkLambda(params, y, env)
case Symbol("if") :: c :: y :: z :: Nil =>
if (asBoolean(eval(c, env))) eval(y, env) else eval(z, env)
case Symbol("quote") :: y :: Nil =>
y
case y :: z =>
apply(eval(y, env), z map (x => eval(x, env)))
case Lambda(_) => x
case y: String => x
case y: Int => x
case y => lispError("illegal term")
}
def lisp2string(x: Data): String = x match {
case Symbol(name) => name
case Nil => "()"
case y :: ys =>
def list2string(xs: List[Data]): String = xs match {
case List() => ""
case y :: ys => " " + lisp2string(y) + list2string(ys)
}
"(" + lisp2string(y) + list2string(ys) + ")"
case _ => if (x.isInstanceOf[String]) "\\"" + x + "\\""; else x.toString()
}
def apply(fn: Data, args: List[Data]): Data = fn match {
case Lambda(f) => f(args);
case _ => lispError("application of non-function: " + fn + " to " + args);
}
def mkLambda(params: Data, expr: Data, env: Environment): Data = {
def extendEnv(env: Environment,
ps: List[String], args: List[Data]): Environment =
(ps, args) match {
case (List(), List()) =>
env
case (p :: ps1, arg :: args1) =>
extendEnv(env.extend(p, arg), ps1, args1)
case _ =>
lispError("wrong number of arguments")
}
val ps: List[String] = asList(params) map {
case Symbol(name) => name
case _ => sys.error("illegal parameter list");
}
Lambda(args => eval(expr, extendEnv(env, ps, args)))
}
val globalEnv = EmptyEnvironment
.extend("=", Lambda{
case List(arg1, arg2) => if (arg1 == arg2) 1 else 0})
.extend("+", Lambda{
case List(arg1: Int, arg2: Int) => arg1 + arg2
case List(arg1: String, arg2: String) => arg1 + arg2})
.extend("-", Lambda{
case List(arg1: Int, arg2: Int) => arg1 - arg2})
.extend("*", Lambda{
case List(arg1: Int, arg2: Int) => arg1 * arg2})
.extend("/", Lambda{
case List(arg1: Int, arg2: Int) => arg1 / arg2})
.extend("nil", Nil)
.extend("cons", Lambda{
case List(arg1, arg2) => arg1 :: asList(arg2)})
.extend("car", Lambda{
case List(x :: xs) => x})
.extend("cdr", Lambda{
case List(x :: xs) => xs})
.extend("null?", Lambda{
case List(Nil) => 1
case _ => 0});
def evaluate(x: Data): Data = eval(normalize(x), globalEnv);
def evaluate(s: String): Data = evaluate(string2lisp(s));
def string2lisp(s: String): Data = {
val it = new LispTokenizer(s);
def parse(token: String): Data = {
if (token == "(") parseList
else if (token == ")") sys.error("unbalanced parentheses")
//else if (Character.isDigit(token.charAt(0)))
else if (token.charAt(0).isDigit)
token.toInt
else if (token.charAt(0) == '\\"' && token.charAt(token.length()-1)=='\\"')
token.substring(1,token.length() - 1)
else Symbol(token)
}
def parseList: List[Data] = {
val token = it.next;
if (token == ")") Nil else parse(token) :: parseList
}
parse(it.next)
}
}
//############################################################################
// List User
class LispUser(lisp: Lisp) {
import lisp._;
def evaluate(s: String) = lisp2string(lisp.evaluate(s));
def run = {
Console.println(string2lisp("(lambda (x) (+ (* x x) 1))").asInstanceOf[AnyRef]);
Console.println(lisp2string(string2lisp("(lambda (x) (+ (* x x) 1))")));
Console.println();
Console.println("( '(1 2 3)) = " + evaluate(" (quote(1 2 3))"));
Console.println("(car '(1 2 3)) = " + evaluate("(car (quote(1 2 3)))"));
Console.println("(cdr '(1 2 3)) = " + evaluate("(cdr (quote(1 2 3)))"));
Console.println("(null? '(2 3)) = " + evaluate("(null? (quote(2 3)))"));
Console.println("(null? '()) = " + evaluate("(null? (quote()))"));
Console.println();
Console.println("faculty(10) = " + evaluate(
"(def (faculty n) " +
"(if (= n 0) " +
"1 " +
"(* n (faculty (- n 1)))) " +
"(faculty 10))"));
Console.println("faculty(10) = " + evaluate(
"(def (faculty n) " +
"(cond " +
"((= n 0) 1) " +
"(else (* n (faculty (- n 1))))) " +
"(faculty 10))"));
Console.println("foobar = " + evaluate(
"(def (foo n) " +
"(cond " +
"((= n 0) \\"a\\")" +
"((= n 1) \\"b\\")" +
"((= (/ n 2) 1) " +
"(cond " +
"((= n 2) \\"c\\")" +
"(else \\"d\\")))" +
"(else " +
"(def (bar m) " +
"(cond " +
"((= m 0) \\"e\\")" +
"((= m 1) \\"f\\")" +
"(else \\"z\\"))" +
"(bar (- n 4)))))" +
"(val nil (quote ())" +
"(val v1 (foo 0) " +
"(val v2 (+ (foo 1) (foo 2)) " +
"(val v3 (+ (+ (foo 3) (foo 4)) (foo 5)) " +
"(val v4 (foo 6) " +
"(cons v1 (cons v2 (cons v3 (cons v4 nil))))))))))"));
Console.println();
}
}
//############################################################################
// Main
object Test {
def main(args: Array[String]): Unit = {
new LispUser(LispCaseClasses).run;
new LispUser(LispAny).run;
()
}
}
//############################################################################
| som-snytt/dotty | tests/pos/t4579.scala | Scala | apache-2.0 | 16,698 |
package com.typesafe.slick.testkit.tests
import slick.driver.H2Driver
import com.typesafe.slick.testkit.util.{RelationalTestDB, AsyncTest}
class NewQuerySemanticsTest extends AsyncTest[RelationalTestDB] {
import tdb.profile.api._
def testNewComposition = {
class SuppliersStd(tag: Tag) extends Table[(Int, String, String, String, String, String)](tag, "SUPPLIERS") {
def id = column[Int]("SUP_ID", O.PrimaryKey) // This is the primary key column
def name = column[String]("SUP_NAME")
def street = column[String]("STREET")
def city = column[String]("CITY")
def state = column[String]("STATE")
def zip = column[String]("ZIP")
def * = (id, name, street, city, state, zip)
}
val suppliersStd = TableQuery[SuppliersStd]
class CoffeesStd(tag: Tag) extends Table[(String, Int, Int, Int, Int)](tag, "COFFEES") {
def name = column[String]("COF_NAME", O.PrimaryKey, O.Length(254))
def supID = column[Int]("SUP_ID")
def price = column[Int]("PRICE")
def sales = column[Int]("SALES")
def total = column[Int]("TOTAL")
def * = (name, supID, price, sales, total)
def supplier = foreignKey("SUP_FK", supID, suppliersStd)(_.id)
}
val coffeesStd = TableQuery[CoffeesStd]
class Suppliers(tag: Tag) extends Table[(Int, String, String)](tag, "SUPPLIERS") {
def id = column[Int]("SUP_ID", O.PrimaryKey) // This is the primary key column
def name = column[String]("SUP_NAME")
def street = column[String]("STREET")
def city = column[String]("CITY")
def state = column[String]("STATE")
def zip = column[String]("ZIP")
def * = (id, name, street)
}
val suppliers = TableQuery[Suppliers]
class Coffees(tag: Tag) extends Table[(String, Int, Int, Int, Int)](tag, "COFFEES") {
def name = column[String]("COF_NAME", O.PrimaryKey)
def supID = column[Int]("SUP_ID")
def price = column[Int]("PRICE")
def sales = column[Int]("SALES")
def total = column[Int]("TOTAL")
def * = (name, supID, price, sales, (total * 10))
def totalComputed = sales * price
def supplier = foreignKey("SUP_FK", supID, suppliers)(_.id)
}
val coffees = TableQuery[Coffees]
val setup = seq(
(suppliersStd.schema ++ coffeesStd.schema).create,
suppliersStd += (101, "Acme, Inc.", "99 Market Street", "Groundsville", "CA", "95199"),
suppliersStd += ( 49, "Superior Coffee", "1 Party Place", "Mendocino", "CA", "95460"),
suppliersStd += (150, "The High Ground", "100 Coffee Lane", "Meadows", "CA", "93966"),
coffeesStd ++= Seq(
("Colombian", 101, 799, 1, 0),
("French_Roast", 49, 799, 2, 0),
("Espresso", 150, 999, 3, 0),
("Colombian_Decaf", 101, 849, 4, 0),
("French_Roast_Decaf", 49, 999, 5, 0)
)
).named("setup")
val qa = for {
c <- coffees.take(3)
} yield (c.supID, (c.name, 42))
val qa2 = coffees.take(3).map(_.name).take(2)
val qb = qa.take(2).map(_._2)
val qb2 = qa.map(n => n).take(2).map(_._2)
val qc = qa.map(_._2).take(2)
val a1 = seq(
mark("qa", qa.result).map(_.toSet).map { ra =>
ra.size shouldBe 3
// No sorting, so result contents can vary
ra shouldAllMatch { case (s: Int, (i: String, 42)) => () }
},
mark("qa2", qa2.result).map(_.toSet).map(_.size shouldBe 2),
mark("qb", qb.result).map(_.toSet).map { rb =>
rb.size shouldBe 2
// No sorting, so result contents can vary
rb shouldAllMatch { case (i: String, 42) => () }
},
mark("qb2", qb2.result).map(_.toSet).map { rb2 =>
rb2.size shouldBe 2
// No sorting, so result contents can vary
rb2 shouldAllMatch { case (i: String, 42) => () }
},
mark("qc", qc.result).map(_.toSet).map { rc =>
rc.size shouldBe 2
// No sorting, so result contents can vary
rc shouldAllMatch { case (i: String, 42) => () }
}
)
// Plain table
val q0 = coffees
// Plain implicit join
val q1 = for {
c <- coffees.sortBy(c => (c.name, c.price.desc)).take(2)
s <- suppliers
} yield ((c.name, (s.city ++ ":")), c, s, c.totalComputed)
// Explicit join with condition
val q1b_0 = coffees.sortBy(_.price).take(3) join suppliers on (_.supID === _.id)
def q1b = for {
(c, s) <- q1b_0.sortBy(_._1.price).take(2).filter(_._1.name =!= "Colombian")
(c2, s2) <- q1b_0
} yield (c.name, s.city, c2.name)
def a2 = seq(
mark("q0", q0.result).named("q0: Plain table").map(_.toSet).map { r0 =>
r0 shouldBe Set(
("Colombian", 101, 799, 1, 0),
("French_Roast", 49, 799, 2, 0),
("Espresso", 150, 999, 3, 0),
("Colombian_Decaf", 101, 849, 4, 0),
("French_Roast_Decaf", 49, 999, 5, 0)
)
},
mark("q1", q1.result).named("q1: Plain implicit join").map(_.toSet).map { r1 =>
r1 shouldBe Set(
(("Colombian","Groundsville:"),("Colombian",101,799,1,0),(101,"Acme, Inc.","99 Market Street"),799),
(("Colombian","Mendocino:"),("Colombian",101,799,1,0),(49,"Superior Coffee","1 Party Place"),799),
(("Colombian","Meadows:"),("Colombian",101,799,1,0),(150,"The High Ground","100 Coffee Lane"),799),
(("Colombian_Decaf","Groundsville:"),("Colombian_Decaf",101,849,4,0),(101,"Acme, Inc.","99 Market Street"),3396),
(("Colombian_Decaf","Mendocino:"),("Colombian_Decaf",101,849,4,0),(49,"Superior Coffee","1 Party Place"),3396),
(("Colombian_Decaf","Meadows:"),("Colombian_Decaf",101,849,4,0),(150,"The High Ground","100 Coffee Lane"),3396)
)
},
ifCap(rcap.pagingNested) {
mark("q1b", q1b.result).named("q1b: Explicit join with condition").map { r1b =>
r1b.toSet shouldBe Set(
("French_Roast","Mendocino","Colombian"),
("French_Roast","Mendocino","French_Roast"),
("French_Roast","Mendocino","Colombian_Decaf")
)
}
}
)
// More elaborate query
val q2 = for {
c <- coffees.filter(_.price < 900).map(_.*)
s <- suppliers if s.id === c._2
} yield (c._1, s.name)
// Lifting scalar values
val q3 = coffees.flatMap { c =>
val cf = Query(c).filter(_.price === 849)
cf.flatMap { cf =>
suppliers.filter(_.id === c.supID).map { s =>
(c.name, s.name, cf.name, cf.total, cf.totalComputed)
}
}
}
// Lifting scalar values, with extra tuple
val q3b = coffees.flatMap { c =>
val cf = Query((c, 42)).filter(_._1.price < 900)
cf.flatMap { case (cf, num) =>
suppliers.filter(_.id === c.supID).map { s =>
(c.name, s.name, cf.name, cf.total, cf.totalComputed, num)
}
}
}
// Map to tuple, then filter
def q4 = for {
c <- coffees.map(c => (c.name, c.price, 42)).sortBy(_._1).take(2).filter(_._2 < 800)
} yield (c._1, c._3)
// Map to tuple, then filter, with self-join
def q4b_0 = coffees.map(c => (c.name, c.price, 42)).filter(_._2 < 800)
def q4b = for {
c <- q4b_0
d <- q4b_0
} yield (c,d)
def a3 = for {
_ <- q2.result.named("More elaborate query").map(_.toSet).map { r2 =>
r2 shouldBe Set(
("Colombian","Acme, Inc."),
("French_Roast","Superior Coffee"),
("Colombian_Decaf","Acme, Inc.")
)
}
_ <- q3.result.named("Lifting scalar values").map(_.toSet).map { r3 =>
r3 shouldBe Set(("Colombian_Decaf","Acme, Inc.","Colombian_Decaf",0,3396))
}
_ <- q3b.result.named("Lifting scalar values, with extra tuple").map(_.toSet).map { r3b =>
r3b shouldBe Set(
("Colombian","Acme, Inc.","Colombian",0,799,42),
("French_Roast","Superior Coffee","French_Roast",0,1598,42),
("Colombian_Decaf","Acme, Inc.","Colombian_Decaf",0,3396,42)
)
}
_ <- ifCap(rcap.pagingNested) {
mark("q4", q4.result).named("q4: Map to tuple, then filter").map(_.toSet shouldBe Set(("Colombian",42)))
}
_ <- mark("q4b", q4b.result).map(_.toSet shouldBe Set(
(("Colombian",799,42),("Colombian",799,42)),
(("Colombian",799,42),("French_Roast",799,42)),
(("French_Roast",799,42),("Colombian",799,42)),
(("French_Roast",799,42),("French_Roast",799,42))
))
} yield ()
// Implicit self-join
val q5_0 = coffees.sortBy(_.price).take(2)
val q5 = for {
c1 <- q5_0
c2 <- q5_0
} yield (c1, c2)
// Explicit self-join with condition
val q5b = for {
t <- q5_0 join q5_0 on (_.name === _.name)
} yield (t._1, t._2)
// Unused outer query result, unbound TableQuery
val q6 = coffees.flatMap(c => suppliers)
def a4 = seq(
mark("q5", q5.result).named("q5: Implicit self-join").map(_.toSet).map { r5 =>
r5 shouldBe Set(
(("Colombian",101,799,1,0),("Colombian",101,799,1,0)),
(("Colombian",101,799,1,0),("French_Roast",49,799,2,0)),
(("French_Roast",49,799,2,0),("Colombian",101,799,1,0)),
(("French_Roast",49,799,2,0),("French_Roast",49,799,2,0))
)
},
mark("q5b", q5b.result).named("q5b: Explicit self-join with condition").map(_.toSet).map { r5b =>
r5b shouldBe Set(
(("Colombian",101,799,1,0),("Colombian",101,799,1,0)),
(("French_Roast",49,799,2,0),("French_Roast",49,799,2,0))
)
},
mark("q6", q6.result).named("q6: Unused outer query result, unbound TableQuery").map(_.toSet).map { r6 =>
r6 shouldBe Set(
(101,"Acme, Inc.","99 Market Street"),
(49,"Superior Coffee","1 Party Place"),
(150,"The High Ground","100 Coffee Lane")
)
}
)
// Simple union
val q7a = for {
c <- coffees.filter(_.price < 800) union coffees.filter(_.price > 950)
} yield (c.name, c.supID, c.total)
// Union
val q7 = for {
c <- coffees.filter(_.price < 800).map((_, 1)) union coffees.filter(_.price > 950).map((_, 2))
} yield (c._1.name, c._1.supID, c._2)
// Transitive push-down without union
val q71 = for {
c <- coffees.filter(_.price < 800).map((_, 1))
} yield (c._1.name, c._1.supID, c._2)
def a5 = seq(
q7a.result.named("Simple union").map(_.toSet).map { r7a =>
r7a shouldBe Set(
("Colombian",101,0),
("French_Roast",49,0),
("Espresso",150,0),
("French_Roast_Decaf",49,0)
)
},
q7.result.named("Union").map(_.toSet).map { r7 =>
r7 shouldBe Set(
("Colombian",101,1),
("French_Roast",49,1),
("Espresso",150,2),
("French_Roast_Decaf",49,2)
)
},
q71.result.named("Transitive push-down without union").map(_.toSet).map { r71 =>
r71 shouldBe Set(
("Colombian",101,1),
("French_Roast",49,1)
)
}
)
// Union with filter on the outside
val q7b = q7 filter (_._1 =!= "Colombian")
// Outer join
val q8 = for {
(c1, c2) <- coffees.filter(_.price < 900) joinLeft coffees.filter(_.price < 800) on (_.name === _.name)
} yield (c1.name, c2.map(_.name))
// Nested outer join
val q8b = for {
t <- coffees.sortBy(_.sales).take(1) joinLeft coffees.sortBy(_.sales).take(2) on (_.name === _.name) joinLeft coffees.sortBy(_.sales).take(4) on (_._1.supID === _.supID)
} yield (t._1, t._2)
def a6 = seq(
q7b.result.named("Union with filter on the outside").map(_.toSet).map { r7b =>
r7b shouldBe Set(
("French_Roast",49,1),
("Espresso",150,2),
("French_Roast_Decaf",49,2)
)
},
q8.result.named("Outer join").map(_.toSet).map { r8 =>
r8 shouldBe Set(
("Colombian",Some("Colombian")),
("French_Roast",Some("French_Roast")),
("Colombian_Decaf",None)
)
},
q8b.result.named("Nested outer join").map(_.toSet).map { r8b =>
r8b shouldBe Set(
((("Colombian",101,799,1,0),Some(("Colombian",101,799,1,0))),Some(("Colombian",101,799,1,0))),
((("Colombian",101,799,1,0),Some(("Colombian",101,799,1,0))),Some(("Colombian_Decaf",101,849,4,0)))
)
}
)
seq(setup, a1, a2, a3, a4, a5, a6)
}
def testOldComposition = {
import TupleMethods._
class Users(tag: Tag) extends Table[(Int, String, String)](tag, "users") {
def id = column[Int]("id")
def first = column[String]("first")
def last = column[String]("last")
def * = id ~ first ~ last
}
val users = TableQuery[Users]
class Orders(tag: Tag) extends Table[(Int, Int)](tag, "orders") {
def userID = column[Int]("userID")
def orderID = column[Int]("orderID")
def * = userID ~ orderID
}
val orders = TableQuery[Orders]
val q2 = for {
u <- users.sortBy(u => (u.first, u.last.desc))
o <- orders filter { o => u.id === o.userID }
} yield u.first ~ u.last ~ o.orderID
val q3 = for (u <- users filter (_.id === 42)) yield u.first ~ u.last
val q4 = (for {
(u, o) <- users join orders on (_.id === _.userID)
} yield (u.last, u.first ~ o.orderID)).sortBy(_._1).map(_._2)
val q6a =
(for (o <- orders if o.orderID === (for {o2 <- orders if o.userID === o2.userID} yield o2.orderID).max) yield o.orderID).sorted
val q6b =
(for (o <- orders if o.orderID === (for {o2 <- orders if o.userID === o2.userID} yield o2.orderID).max) yield o.orderID ~ o.userID).sortBy(_._1)
val q6c =
(for (o <- orders if o.orderID === (for {o2 <- orders if o.userID === o2.userID} yield o2.orderID).max) yield o).sortBy(_.orderID).map(o => o.orderID ~ o.userID)
seq(
(users.schema ++ orders.schema).create,
q3.result,
q4.result,
q6a.result,
q6b.result,
q6c.result,
(users.schema ++ orders.schema).drop
)
}
def testAdvancedFusion = {
class TableA(tag: Tag) extends Table[Int](tag, "TableA") {
def id = column[Int]("id")
def * = id
}
val tableA = TableQuery[TableA]
class TableB(tag: Tag) extends Table[(Int, Int)](tag, "TableB") {
def id = column[Int]("id")
def start = column[Int]("start")
def * = (id, start)
}
val tableB = TableQuery[TableB]
class TableC(tag: Tag) extends Table[Int](tag, "TableC") {
def start = column[Int]("start")
def * = start
}
val tableC = TableQuery[TableC]
val queryErr2 = for {
a <- tableA
b <- tableB if b.id === a.id
start = a.id + 1
c <- tableC if c.start <= start
} yield (b, c)
(tableA.schema ++ tableB.schema ++ tableC.schema).create >> queryErr2.result
}
def testSubquery = {
class A(tag: Tag) extends Table[Int](tag, "A_subquery") {
def id = column[Int]("id")
def * = id
}
val as = TableQuery[A]
for {
_ <- as.schema.create
_ <- as += 42
q0 = as.filter(_.id === 42.bind).length
_ <- q0.result.named("q0").map(_ shouldBe 1)
q1 = Compiled { (n: Rep[Int]) =>
as.filter(_.id === n).map(a => as.length)
}
_ <- q1(42).result.named("q1(42)").map(_ shouldBe List(1))
q2 = as.filter(_.id in as.sortBy(_.id).map(_.id))
_ <- q2.result.named("q2").map(_ shouldBe Vector(42))
} yield ()
}
def testExpansion = {
class A(tag: Tag) extends Table[(Int, String)](tag, "A_refexp") {
def id = column[Int]("id")
def a = column[String]("a")
def b = column[String]("b")
def * = (id, a)
override def create_* = collectFieldSymbols((id, a, b).shaped.toNode)
}
val as = TableQuery[A]
for {
_ <- as.schema.create
_ <- as.map(a => (a.id, a.a, a.b)) ++= Seq(
(1, "a1", "b1"),
(2, "a2", "b2"),
(3, "a3", "b3")
)
q1 = as.map(identity).filter(_.b === "b3")
_ <- q1.result.named("q1").map(r1 => r1.toSet shouldBe Set((3, "a3")))
q2a = as.sortBy(_.a) join as on (_.b === _.b)
q2 = for {
(c, s) <- q2a
c2 <- as
} yield (c.id, c2.a)
r2 <- q2.result.named("q2").map(_.toSet)
_ = r2 shouldBe Set((1, "a1"), (1, "a2"), (1, "a3"), (2, "a1"), (2, "a2"), (2, "a3"), (3, "a1"), (3, "a2"), (3, "a3"))
} yield ()
}
def testNewFusion = {
class A(tag: Tag) extends Table[(Int, String, String)](tag, "A_NEWFUSION") {
def id = column[Int]("id", O.PrimaryKey)
def a = column[String]("a")
def b = column[String]("b")
def * = (id, a, b)
}
val as = TableQuery[A]
val data = Set((1, "a", "a"), (2, "a", "b"), (3, "c", "b"))
val q1 = (as join as on (_.id === _.id))
val q2 = (as join as on (_.id === _.id) join as on (_._1.id === _.id))
val q3 = q2.map { case ((a1, a2), a3) => (a1.id, a2.a, a3.b) }
val q4 = as.map(a => (a.id, a.a, a.b, a)).filter(_._3 === "b").map { case (id, a1, b, a2) => (id, a2) }
val q5a = as.to[Set].filter(_.b === "b").map(_.id)
val q5b = as.filter(_.b === "b").to[Set].map(_.id)
val q5c = as.filter(_.b === "b").map(_.id).to[Set]
val q6 = (as join as).groupBy(j => (j._1.a, j._1.b)).map { case (ab, rs) => (ab, rs.length, rs.map(_._1).length, rs.map(_._2).length, rs.map(_._1.id).max, rs.map(_._1.id).length) }
val q7 = q6.filter(_._1._1 === "a").map(_._5.getOrElse(0))
val q8 = as.sortBy(_.id.desc).map(_.a)
val q9a = as.sortBy(_.b).sortBy(_.a.desc).map(_.id)
val q9b = as.sortBy(a => (a.a.desc, a.b)).map(_.id)
val q10 = (as join as).map { case (a1, a2) => a1.id * 3 + a2.id - 3 }.sorted
val q11a = q10.take(5)
val q11b = q10.take(5).take(3)
val q11c = q10.take(5).take(3).drop(1)
val q11d = q10.take(5).drop(1).take(3)
val q11e = q10.drop(7)
val q11f = q10.take(6).drop(2).filter(_ =!= 5)
val q12 = as.filter(_.id <= as.map(_.id).max-1).map(_.a)
val q13 = (as.filter(_.id < 2) union as.filter(_.id > 2)).map(_.id)
val q14 = q13.to[Set]
val q15 = (as.map(a => a.id.?).filter(_ < 2) unionAll as.map(a => a.id.?).filter(_ > 2)).map(_.get).to[Set]
val q16 = (as.map(a => a.id.?).filter(_ < 2) unionAll as.map(a => a.id.?).filter(_ > 2)).map(_.getOrElse(-1)).to[Set].filter(_ =!= 42)
val q17 = as.sortBy(_.id).zipWithIndex.filter(_._2 < 2L).map { case (a, i) => (a.id, i) }
val q18 = as.joinLeft(as).on { case (a1, a2) => a1.id === a2.id }.filter { case (a1, a2) => a1.id === 3 }.map { case (a1, a2) => a2 }
val q19 = as.joinLeft(as).on { case (a1, a2) => a1.id === a2.id }.joinLeft(as).on { case ((_, a2), a3) => a2.map(_.b) === a3.b }.map(_._2)
val q19b = as.joinLeft(as).on { case (a1, a2) => a1.id === a2.id }.joinLeft(as).on { case ((_, a2), a3) => a2.map(_.b) === a3.b }.subquery.map(_._2)
if(tdb.driver == H2Driver) {
assertNesting(q1, 1)
assertNesting(q2, 1)
assertNesting(q3, 1)
assertNesting(q4, 1)
assertNesting(q5a, 1)
assertNesting(q5b, 1)
assertNesting(q5c, 1)
assertNesting(q6, 1)
assertNesting(q7, 1)
assertNesting(q8, 1)
assertNesting(q9a, 1)
assertNesting(q9b, 1)
assertNesting(q10, 1)
assertNesting(q11a, 1)
assertNesting(q11b, 1)
assertNesting(q11c, 1)
assertNesting(q11d, 1)
assertNesting(q11e, 1)
assertNesting(q11f, 2)
assertNesting(q12, 2)
assertNesting(q13, 2)
assertNesting(q14, 2)
assertNesting(q15, 2)
assertNesting(q16, 2)
assertNesting(q17, 2)
assertNesting(q18, 1)
assertNesting(q19, 1)
assertNesting(q19b, 2)
}
for {
_ <- as.schema.create
_ <- as ++= data
_ <- mark("as", as.result).map(_.toSet shouldBe data)
_ <- mark("q1", q1.result).map(_.toSet shouldBe data.zip(data))
_ <- mark("q2", q2.result).map(_.toSet shouldBe data.zip(data).zip(data))
_ <- mark("q3", q3.result).map(_.toSet shouldBe data)
_ <- mark("q4", q4.result).map(_.toSet shouldBe data.filter(_._3 == "b").map { case t @ (id, _, _) => (id, t) })
_ <- mark("q5a", q5a.result).map(_ shouldBe Set(2, 3))
_ <- mark("q5b", q5b.result).map(_ shouldBe Set(2, 3))
_ <- mark("q5c", q5c.result).map(_ shouldBe Set(2, 3))
_ <- mark("q6", q6.result).map(_.toSet shouldBe Set((("c","b"),3,3,3,Some(3),3), (("a","a"),3,3,3,Some(1),3), (("a","b"),3,3,3,Some(2),3)))
_ <- mark("q7", q7.result).map(_.toSet shouldBe Set(1, 2))
_ <- mark("q8", q8.result).map(_ shouldBe Seq("c", "a", "a"))
_ <- mark("q9a", q9a.result).map(_ shouldBe Seq(3, 1, 2))
_ <- mark("q9b", q9b.result).map(_ shouldBe Seq(3, 1, 2))
_ <- mark("q10", q10.result).map(_ shouldBe Seq(1, 2, 3, 4, 5, 6, 7, 8, 9))
_ <- mark("q11a", q11a.result).map(_ shouldBe Seq(1, 2, 3, 4, 5))
_ <- mark("q11b", q11b.result).map(_ shouldBe Seq(1, 2, 3))
_ <- mark("q11c", q11c.result).map(_ shouldBe Seq(2, 3))
_ <- mark("q11d", q11d.result).map(_ shouldBe Seq(2, 3, 4))
_ <- mark("q11e", q11e.result).map(_ shouldBe Seq(8, 9))
_ <- mark("q11f", q11f.result).map(_ shouldBe Seq(3, 4, 6))
_ <- mark("q12", q12.result).map(_ shouldBe Seq("a", "a"))
_ <- mark("q13", q13.result).map(_.toSet shouldBe Set(1, 3))
_ <- mark("q14", q14.result).map(_ shouldBe Set(1, 3))
_ <- mark("q15", q15.result).map(_ shouldBe Set(1, 3))
_ <- mark("q16", q16.result).map(_ shouldBe Set(1, 3))
_ <- ifCap(rcap.zip)(mark("q17", q17.result).map(_ shouldBe Seq((1,0), (2,1))))
_ <- mark("q18", q18.result).map(_ shouldBe Seq(Some((3, "c", "b"))))
_ <- mark("q19", q19.result).map(_.toSet shouldBe Set(Some((1,"a","a")), Some((2,"a","b")), Some((3,"c","b"))))
_ <- mark("q19b", q19b.result).map(_.toSet shouldBe Set(Some((1,"a","a")), Some((2,"a","b")), Some((3,"c","b"))))
} yield ()
}
}
| seebcioo/slick | slick-testkit/src/main/scala/com/typesafe/slick/testkit/tests/NewQuerySemanticsTest.scala | Scala | bsd-2-clause | 22,027 |
package utils.pageobjects.save_for_later
import utils.WithJsBrowser
import utils.pageobjects._
final class GSaveForLaterSavePage(ctx:PageObjectsContext) extends ClaimPage(ctx, GSaveForLaterSavePage.url) {
}
object GSaveForLaterSavePage {
val url = "/save"
def apply(ctx:PageObjectsContext) = new GSaveForLaterSavePage(ctx)
}
/** The context for Specs tests */
trait GSaveForLaterSavePageContext extends PageContext {
this: WithJsBrowser[_] =>
val page = GSaveForLaterSavePage (PageObjectsContext(browser))
}
| Department-for-Work-and-Pensions/ClaimCapture | c3/test/utils/pageobjects/save_for_later/GSaveForLaterSavePage.scala | Scala | mit | 523 |
package org.jetbrains.plugins.scala
package codeInsight.intentions.booleans
import codeInsight.intentions.ScalaIntentionTestBase
import codeInsight.intention.booleans.FlipComparisonInMethodCallExprIntention
/**
* @author Ksenia.Sautina
* @since 4/20/12
*/
class FlipComparisonInMethodCallExprIntentionTest extends ScalaIntentionTestBase {
def familyName = FlipComparisonInMethodCallExprIntention.familyName
def testFlip() {
val text = "if (f.=<caret>=(false)) return"
val resultText = "if (false.=<caret>=(f)) return"
doTest(text, resultText)
}
def testFlip2() {
val text = "if (a.equal<caret>s(b)) return"
val resultText = "if (b.equal<caret>s(a)) return"
doTest(text, resultText)
}
def testFlip3() {
val text = "if (a.><caret>(b)) return"
val resultText = "if (b.<<caret>(a)) return"
doTest(text, resultText)
}
def testFlip4() {
val text = "if (a.<<caret>(b)) return"
val resultText = "if (b.><caret>(a)) return"
doTest(text, resultText)
}
def testFlip5() {
val text = "if (a.<=<caret>(b)) return"
val resultText = "if (b.>=<caret>(a)) return"
doTest(text, resultText)
}
def testFlip6() {
val text = "if (a.>=<caret>(b)) return"
val resultText = "if (b.<=<caret>(a)) return"
doTest(text, resultText)
}
def testFlip7() {
val text = "if (a.!<caret>=(b)) return"
val resultText = "if (b.!<caret>=(a)) return"
doTest(text, resultText)
}
def testFlip8() {
val text = "if (7.<<caret>(7 + 8)) return"
val resultText = "if ((7 + 8).><caret>(7)) return"
doTest(text, resultText)
}
def testFlip9() {
val text = "if ((7 + 8).<<caret>(7)) return"
val resultText = "if (7.><caret>(7 + 8)) return"
doTest(text, resultText)
}
def testFlip10() {
val text = "if (sourceClass == null || sourceClass.e<caret>q(clazz)) return null"
val resultText = "if (sourceClass == null || clazz.e<caret>q(sourceClass)) return null"
doTest(text, resultText)
}
def testFlip11() {
val text = "if (aClass.n<caret>e(b)) return"
val resultText = "if (b.n<caret>e(aClass)) return"
doTest(text, resultText)
}
}
| consulo/consulo-scala | test/org/jetbrains/plugins/scala/codeInsight/intentions/booleans/FlipComparisonInMethodCallExprIntentionTest.scala | Scala | apache-2.0 | 2,180 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
import java.io.{ InputStream, OutputStream }
import java.util.{ UUID, Properties }
import cascading.scheme.Scheme
import cascading.scheme.local.{ TextLine => CLTextLine, TextDelimited => CLTextDelimited }
import cascading.scheme.hadoop.{
TextLine => CHTextLine,
TextDelimited => CHTextDelimited,
SequenceFile => CHSequenceFile
}
import cascading.tap.hadoop.Hfs
import cascading.tap.MultiSourceTap
import cascading.tap.SinkMode
import cascading.tap.Tap
import cascading.tap.local.FileTap
import cascading.tuple.Fields
import com.etsy.cascading.tap.local.LocalTap
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{ FileStatus, PathFilter, Path }
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.mapred.OutputCollector
import org.apache.hadoop.mapred.RecordReader
import scala.util.{ Try, Success, Failure }
/**
* A base class for sources that take a scheme trait.
*/
abstract class SchemedSource extends Source {
/** The scheme to use if the source is local. */
def localScheme: Scheme[Properties, InputStream, OutputStream, _, _] =
throw ModeException("Cascading local mode not supported for: " + toString)
/** The scheme to use if the source is on hdfs. */
def hdfsScheme: Scheme[JobConf, RecordReader[_, _], OutputCollector[_, _], _, _] =
throw ModeException("Cascading Hadoop mode not supported for: " + toString)
// The mode to use for output taps determining how conflicts with existing output are handled.
val sinkMode: SinkMode = SinkMode.REPLACE
}
/**
* A trait which provides a method to create a local tap.
*/
trait LocalSourceOverride extends SchemedSource {
/** A path to use for the local tap. */
def localPath: String
/**
* Creates a local tap.
*
* @param sinkMode The mode for handling output conflicts.
* @returns A tap.
*/
def createLocalTap(sinkMode: SinkMode): Tap[_, _, _] = new FileTap(localScheme, localPath, sinkMode)
}
object HiddenFileFilter extends PathFilter {
def accept(p: Path) = {
val name = p.getName
!name.startsWith("_") && !name.startsWith(".")
}
}
object SuccessFileFilter extends PathFilter {
def accept(p: Path) = { p.getName == "_SUCCESS" }
}
object AcceptAllPathFilter extends PathFilter {
def accept(p: Path) = true
}
object FileSource {
def glob(glob: String, conf: Configuration, filter: PathFilter = AcceptAllPathFilter): Iterable[FileStatus] = {
val path = new Path(glob)
Option(path.getFileSystem(conf).globStatus(path, filter)).map {
_.toIterable // convert java Array to scala Iterable
} getOrElse {
Iterable.empty
}
}
/**
* @return whether globPath contains non hidden files
*/
def globHasNonHiddenPaths(globPath: String, conf: Configuration): Boolean = {
!glob(globPath, conf, HiddenFileFilter).isEmpty
}
/**
* @return whether globPath contains a _SUCCESS file
*/
def globHasSuccessFile(globPath: String, conf: Configuration): Boolean = {
!glob(globPath, conf, SuccessFileFilter).isEmpty
}
}
/**
* This is a base class for File-based sources
*/
abstract class FileSource extends SchemedSource with LocalSourceOverride {
/**
* Determines if a path is 'valid' for this source. In strict mode all paths must be valid.
* In non-strict mode, all invalid paths will be filtered out.
*
* Subclasses can override this to validate paths.
*
* The default implementation is a quick sanity check to look for missing or empty directories.
* It is necessary but not sufficient -- there are cases where this will return true but there is
* in fact missing data.
*
* TODO: consider writing a more in-depth version of this method in [[TimePathedSource]] that looks for
* TODO: missing days / hours etc.
*/
protected def pathIsGood(p: String, conf: Configuration) = FileSource.globHasNonHiddenPaths(p, conf)
def hdfsPaths: Iterable[String]
// By default, we write to the LAST path returned by hdfsPaths
def hdfsWritePath = hdfsPaths.last
override def createTap(readOrWrite: AccessMode)(implicit mode: Mode): Tap[_, _, _] = {
mode match {
// TODO support strict in Local
case Local(_) => {
createLocalTap(sinkMode)
}
case hdfsMode @ Hdfs(_, _) => readOrWrite match {
case Read => createHdfsReadTap(hdfsMode)
case Write => CastHfsTap(new Hfs(hdfsScheme, hdfsWritePath, sinkMode))
}
case _ => {
val tryTtp = Try(TestTapFactory(this, hdfsScheme, sinkMode)).map {
// these java types are invariant, so we cast here
_.createTap(readOrWrite)
.asInstanceOf[Tap[Any, Any, Any]]
}.orElse {
Try(TestTapFactory(this, localScheme.getSourceFields, sinkMode)).map {
_.createTap(readOrWrite)
.asInstanceOf[Tap[Any, Any, Any]]
}
}
tryTtp match {
case Success(s) => s
case Failure(e) => throw new java.lang.IllegalArgumentException(s"Failed to create tap for: $toString, with error: ${e.getMessage}", e)
}
}
}
}
// This is only called when Mode.sourceStrictness is true
protected def hdfsReadPathsAreGood(conf: Configuration) = {
hdfsPaths.forall { pathIsGood(_, conf) }
}
/*
* This throws InvalidSourceException if:
* 1) we are in sourceStrictness mode and all sources are not present.
* 2) we are not in the above, but some source has no input whatsoever
* TODO this only does something for HDFS now. Maybe we should do the same for LocalMode
*/
override def validateTaps(mode: Mode): Unit = {
mode match {
case Hdfs(strict, conf) => {
if (strict && (!hdfsReadPathsAreGood(conf))) {
throw new InvalidSourceException(
"[" + this.toString + "] Data is missing from one or more paths in: " +
hdfsPaths.toString)
} else if (!hdfsPaths.exists { pathIsGood(_, conf) }) {
//Check that there is at least one good path:
throw new InvalidSourceException(
"[" + this.toString + "] No good paths in: " + hdfsPaths.toString)
}
}
case _ => ()
}
}
/*
* Get all the set of valid paths based on source strictness.
*/
protected def goodHdfsPaths(hdfsMode: Hdfs) = {
hdfsMode match {
//we check later that all the paths are good
case Hdfs(true, _) => hdfsPaths
// If there are no matching paths, this is still an error, we need at least something:
case Hdfs(false, conf) => hdfsPaths.filter{ pathIsGood(_, conf) }
}
}
protected def createHdfsReadTap(hdfsMode: Hdfs): Tap[JobConf, _, _] = {
val taps: List[Tap[JobConf, RecordReader[_, _], OutputCollector[_, _]]] =
goodHdfsPaths(hdfsMode)
.toList.map { path => CastHfsTap(new Hfs(hdfsScheme, path, sinkMode)) }
taps.size match {
case 0 => {
// This case is going to result in an error, but we don't want to throw until
// validateTaps, so we just put a dummy path to return something so the
// Job constructor does not fail.
CastHfsTap(new Hfs(hdfsScheme, hdfsPaths.head, sinkMode))
}
case 1 => taps.head
case _ => new ScaldingMultiSourceTap(taps)
}
}
}
class ScaldingMultiSourceTap(taps: Seq[Tap[JobConf, RecordReader[_, _], OutputCollector[_, _]]])
extends MultiSourceTap[Tap[JobConf, RecordReader[_, _], OutputCollector[_, _]], JobConf, RecordReader[_, _]](taps: _*) {
private final val randomId = UUID.randomUUID.toString
override def getIdentifier() = randomId
override def hashCode: Int = randomId.hashCode
}
/**
* The fields here are ('offset, 'line)
*/
trait TextSourceScheme extends SchemedSource {
// The text-encoding to use when writing out the lines (default is UTF-8).
val textEncoding: String = CHTextLine.DEFAULT_CHARSET
override def localScheme = new CLTextLine(new Fields("offset", "line"), Fields.ALL, textEncoding)
override def hdfsScheme = HadoopSchemeInstance(new CHTextLine(CHTextLine.DEFAULT_SOURCE_FIELDS, textEncoding))
}
trait TextLineScheme extends TextSourceScheme with SingleMappable[String] {
//In textline, 0 is the byte position, the actual text string is in column 1
override def sourceFields = Dsl.intFields(Seq(1))
}
/**
* Mix this in for delimited schemes such as TSV or one-separated values
* By default, TSV is given
*/
trait DelimitedScheme extends SchemedSource {
//override these as needed:
val fields = Fields.ALL
//This is passed directly to cascading where null is interpretted as string
val types: Array[Class[_]] = null
val separator = "\\t"
val skipHeader = false
val writeHeader = false
val quote: String = null
// Whether to throw an exception or not if the number of fields does not match an expected number.
// If set to false, missing fields will be set to null.
val strict = true
// Whether to throw an exception if a field cannot be coerced to the right type.
// If set to false, then fields that cannot be coerced will be set to null.
val safe = true
//These should not be changed:
override def localScheme = new CLTextDelimited(fields, skipHeader, writeHeader, separator, strict, quote, types, safe)
override def hdfsScheme = {
assert(
types == null || fields.size == types.size,
"Fields [" + fields + "] of different size than types array [" + types.mkString(",") + "]")
HadoopSchemeInstance(new CHTextDelimited(fields, null, skipHeader, writeHeader, separator, strict, quote, types, safe))
}
}
trait SequenceFileScheme extends SchemedSource {
//override these as needed:
val fields = Fields.ALL
// TODO Cascading doesn't support local mode yet
override def hdfsScheme = HadoopSchemeInstance(new CHSequenceFile(fields))
}
/**
* Ensures that a _SUCCESS file is present in the Source path, which must be a glob,
* as well as the requirements of [[FileSource.pathIsGood]]
*/
trait SuccessFileSource extends FileSource {
override protected def pathIsGood(p: String, conf: Configuration) =
FileSource.globHasNonHiddenPaths(p, conf) && FileSource.globHasSuccessFile(p, conf)
}
/**
* Use this class to add support for Cascading local mode via the Hadoop tap.
* Put another way, this runs a Hadoop tap outside of Hadoop in the Cascading local mode
*/
trait LocalTapSource extends LocalSourceOverride {
override def createLocalTap(sinkMode: SinkMode) = new LocalTap(localPath, hdfsScheme, sinkMode).asInstanceOf[Tap[_, _, _]]
}
abstract class FixedPathSource(path: String*) extends FileSource {
def localPath = { assert(path.size == 1, "Cannot use multiple input files on local mode"); path(0) }
def hdfsPaths = path.toList
override def toString = getClass.getName + path
override def hashCode = toString.hashCode
override def equals(that: Any): Boolean = (that != null) && (that.toString == toString)
}
/**
* Tab separated value source
*/
case class Tsv(p: String, override val fields: Fields = Fields.ALL,
override val skipHeader: Boolean = false, override val writeHeader: Boolean = false,
override val sinkMode: SinkMode = SinkMode.REPLACE) extends FixedPathSource(p) with DelimitedScheme
/**
* Allows the use of multiple Tsv input paths. The Tsv files will
* be process through your flow as if they are a single pipe. Tsv
* files must have the same schema.
* For more details on how multiple files are handled check the
* cascading docs.
*/
case class MultipleTsvFiles(p: Seq[String], override val fields: Fields = Fields.ALL,
override val skipHeader: Boolean = false, override val writeHeader: Boolean = false) extends FixedPathSource(p: _*)
with DelimitedScheme
/**
* Csv value source
* separated by commas and quotes wrapping all fields
*/
case class Csv(p: String,
override val separator: String = ",",
override val fields: Fields = Fields.ALL,
override val skipHeader: Boolean = false,
override val writeHeader: Boolean = false,
override val quote: String = "\\"",
override val sinkMode: SinkMode = SinkMode.REPLACE) extends FixedPathSource(p) with DelimitedScheme
/**
* One separated value (commonly used by Pig)
*/
case class Osv(p: String, f: Fields = Fields.ALL,
override val sinkMode: SinkMode = SinkMode.REPLACE) extends FixedPathSource(p)
with DelimitedScheme {
override val fields = f
override val separator = "\\1"
}
object TextLine {
// Default encoding is UTF-8
val defaultTextEncoding: String = CHTextLine.DEFAULT_CHARSET
val defaultSinkMode: SinkMode = SinkMode.REPLACE
def apply(p: String, sm: SinkMode = defaultSinkMode, textEncoding: String = defaultTextEncoding): TextLine =
new TextLine(p, sm, textEncoding)
}
class TextLine(p: String, override val sinkMode: SinkMode, override val textEncoding: String) extends FixedPathSource(p) with TextLineScheme {
// For some Java interop
def this(p: String) = this(p, TextLine.defaultSinkMode, TextLine.defaultTextEncoding)
}
/**
* Alternate typed TextLine source that keeps both 'offset and 'line fields.
*/
class OffsetTextLine(filepath: String,
override val sinkMode: SinkMode,
override val textEncoding: String)
extends FixedPathSource(filepath) with Mappable[(Long, String)] with TextSourceScheme {
override def converter[U >: (Long, String)] =
TupleConverter.asSuperConverter[(Long, String), U](TupleConverter.of[(Long, String)])
}
/**
* Alternate typed TextLine source that keeps both 'offset and 'line fields.
*/
object OffsetTextLine {
// Default encoding is UTF-8
val defaultTextEncoding: String = CHTextLine.DEFAULT_CHARSET
val defaultSinkMode: SinkMode = SinkMode.REPLACE
def apply(p: String, sm: SinkMode = defaultSinkMode, textEncoding: String = defaultTextEncoding): OffsetTextLine =
new OffsetTextLine(p, sm, textEncoding)
}
case class SequenceFile(p: String, f: Fields = Fields.ALL, override val sinkMode: SinkMode = SinkMode.REPLACE)
extends FixedPathSource(p) with SequenceFileScheme with LocalTapSource {
override val fields = f
}
case class MultipleSequenceFiles(p: String*) extends FixedPathSource(p: _*) with SequenceFileScheme with LocalTapSource
case class MultipleTextLineFiles(p: String*) extends FixedPathSource(p: _*) with TextLineScheme
/**
* Delimited files source
* allowing to override separator and quotation characters and header configuration
*/
case class MultipleDelimitedFiles(f: Fields,
override val separator: String,
override val quote: String,
override val skipHeader: Boolean,
override val writeHeader: Boolean,
p: String*) extends FixedPathSource(p: _*) with DelimitedScheme {
override val fields = f
}
| oeddyo/scalding | scalding-core/src/main/scala/com/twitter/scalding/FileSource.scala | Scala | apache-2.0 | 15,182 |
package io.buoyant.linkerd
package examples
import io.buoyant.config.Parser
import java.io.{FilenameFilter, File}
import org.scalatest.FunSuite
import scala.io.Source
class ExamplesTest extends FunSuite {
val examplesDir = new File("linkerd/examples")
val files = examplesDir.listFiles(new FilenameFilter {
override def accept(dir: File, name: String): Boolean = name.endsWith(".yaml")
})
val mapper = Parser.jsonObjectMapper(Linker.LoadedInitializers.iter)
for (file <- files) {
// Example tests are running out of memory in CI and so have been temporarily been disabled to
// unblock CI. This needs to be investigated and fixed.
test(file.getName) {
val source = Source.fromFile(file)
try {
val lines = source.getLines().toSeq
val firstLine = lines.headOption
if (!firstLine.contains("#notest")) {
val yaml = lines.mkString("\\n")
val parsed = Linker.parse(yaml)
val loaded = parsed.mk()
assert(mapper.writeValueAsString(parsed).nonEmpty)
}
} finally source.close()
}
}
}
| linkerd/linkerd | linkerd/examples/src/test/scala/io/buoyant/linkerd/examples/ExamplesTest.scala | Scala | apache-2.0 | 1,101 |
package se.citerus.dddsample.domain.shared
;
/**
* AND specification, used to create a new specifcation that is the AND of two other specifications.
*/
class AndSpecification[T](val spec1: Specification[T], val spec2: Specification[T]) extends AbstractSpecification[T] {
/**
* { @inheritDoc }
*/
def isSatisfiedBy(t: T): Boolean = {
return spec1.isSatisfiedBy(t) && spec2.isSatisfiedBy(t);
}
}
| oluies/ddd-sample-scala | src/main/scala/se/citerus/dddsample/domain/shared/AndSpecification.scala | Scala | mit | 432 |
/*
* @author Philip Stutz
*
* Copyright 2014 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.triplerush.sparql
import com.signalcollect.triplerush.util.ResultBindingsHashSet
class DistinctIterator(encodedResultIterator: Iterator[Array[Int]]) extends Iterator[Array[Int]] {
val alreadyReportedBindings = new ResultBindingsHashSet(128)
var distinctNext: Array[Int] = if (encodedResultIterator.hasNext) {
val next = encodedResultIterator.next
alreadyReportedBindings.add(next)
next
} else {
null.asInstanceOf[Array[Int]]
}
def hasNext: Boolean = {
distinctNext != null
}
def next: Array[Int] = {
val nextThatWeWillReport = distinctNext
distinctNext = null.asInstanceOf[Array[Int]]
// Refill the next slot.
while (distinctNext == null && encodedResultIterator.hasNext) {
val next = encodedResultIterator.next
val alreadyReported = alreadyReportedBindings.add(next)
if (!alreadyReported) {
distinctNext = next
}
}
nextThatWeWillReport
}
} | jacqueslk/triplerush-filter | src/main/scala/com/signalcollect/triplerush/sparql/DistinctIterator.scala | Scala | apache-2.0 | 1,627 |
import scala.collection.mutable.Stack
object Solution {
def main(args: Array[String]) = {
val it = scala.io.Source.stdin.getLines
val q = it.next.toInt
val state = Stack[String]()
var i = 0
while(i < q) {
val op = it.next.split(" ")
op.length match {
case 1 => state.pop() //undo
case 2 => op match {
case Array("1",str) => state.push(state.headOption.getOrElse("") + str) //append
case Array("2",n) => state.push(state.top.dropRight(n.toInt)) //delete
case Array("3",n) => println(state.top.charAt(n.toInt - 1))
}
}
}
}
}
| marcos-sb/hacker-rank | data-structures/stacks/simple-text-editor/Solution.scala | Scala | apache-2.0 | 625 |
package com.guanghua.kafka.input
import com.guanghua.Tool.kafkaProductorTool
/**
* Created by admin2 on 2017/10/16.
*/
object YWHBASE_TASK_APPOINTMENT_RESULT extends Runnable{
override def run(): Unit ={
val topic = "YWHBASE_TASK_APPOINTMENT_RESULT_TOPIC"
val sql =
"""
|select t1.RESULT_ID||t1.APPOINTMENT_ID||t1.FIRST_OPEN as rowkey,
| t3.REASON,
| t4.CAPACITY,
| t4.CATEGORY,
| t4.DISTRICT,
| t4.DOOR_NO,
| t4.FILE_ID,
| t4.FLOOR,
| t4.ISDELETE,
| t4.ISZHONGXIN,
| t4.MANNED,
| t4.PIC,
| t4.PIC_CONTACT,
| t4.PROPERTY_RIGHT,
| --达标、未达标(0达标/1未达标)
| case t4.QUALIFIED_LEVEL
| when '0' then '达标'
| when '1' then '未达标'
| else ''
| end as QUALIFIED_LEVEL,
| t4.REGION_ID,
| t4.ROOM_NO,
| t4.ROOM_STATE,
| t4.ROOMDAIWEI,
| t4.ROOMLEIXING,
| t4.SQUARE_MEASURE,
| t4.STATE,
| t4.STRUCTURE,
| t4.SUB_DISTRICT,
| t4.USAGE,
| t2.AFFACT_ID,
| t2.APPLYER_ID,
| t2.APPOINTMENT_END_DATE,
| t2.APPOINTMENT_NO,
| t2.CANCLE_REASON,
| t2.CONSTRUCTION_NAME,
| t2.CONSTRUCTOR,
| t2.CONSTRUCTOR_ID,
| t2.DELETED,
| t2.END_TIME,
| t2.EXPERIENCE_USE_TIME,
| t2.REASON_ID,
| t2.ROOM_ID,
| t2.SCORE,
| t2.SKILL_REQUIRE,
| t2.START_TIME,
| case t2.STATUS
| when 0 then '待执行'
| when 1 then '已执行'
| when 2 then '已取消'
| when 3 then '开始执行'
| when 4 then '执行中'
| else ''
| end as STATUS,
| t2.TASK_APPOINTMENT_SERIAL_NO,
| t2.TASK_COMPLEXITY_LEVEL,
| t2.TASK_CONTENT,
| t2.TASK_INFO,
| t2.TASK_NAME,
| t2.TASK_NO,
| t2.TASK_POSITION_LATITUDE,
| t2.TASK_POSITION_LONGITUDE,
| case t2.TYPE_ID
| when 1 then '远程开门预约'
| when 2 then '现场随工预约'
| else ''
| end TYPE_ID,
| t2.VERIFICATIONCODE,
| t1.APPOINTMENT_ID,
| t1.DOOR_STATUS,
| t1.FIRST_OPEN,
| t1.LOCATION_TIME,
| t1.RESULT_ID,
| t1.SECOND_OPEN
| from ywgl.TASK_APPOINTMENT_RESULT t1
| left join ywgl.TASK_APPOINTMENT t2
| on t1.appointment_id = t2.appointment_id
| left join ywgl.D_TASK_REASON t3
| on t3.reason_id = t2.reason_id
| left join tlmanage.ROOM t4
| on t4.room_id = t2.room_id
|where to_date(t1.first_open,'YYYY-MM-DD HH24:mi:ss')>date'2014-01-01'
""".stripMargin
(new kafkaProductorTool).sendMsg(topic,sql,Array[AnyRef]())
}
}
| androidbestcoder/hadoop_own_exercise | src/main/scala/com/guanghua/kafka/input/YWHBASE_TASK_APPOINTMENT_RESULT.scala | Scala | apache-2.0 | 3,300 |
/**
* Copyright 2015 Zaradai
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.zaradai.snowy
import org.scalatest.{FeatureSpec, Matchers, OptionValues}
class SolverTest extends FeatureSpec with Matchers with OptionValues {
private val nodeGrid = new NodeGrid(2, 2, Array(10, 20, 30, 40))
private val topLeft = nodeGrid.nodeAt(0, 0).get
private val topRight = nodeGrid.nodeAt(0, 1).get
private val bottomLeft = nodeGrid.nodeAt(1, 0).get
private val bottomRight = nodeGrid.nodeAt(1, 1).get
feature("Node Grid contains nodes") {
scenario("Can't build invalid grid") {
intercept[IllegalArgumentException] {
new NodeGrid(2, 2, Array(0, 1, 2))
}
}
scenario("Should only return valid nodes") {
nodeGrid.nodeAt(0, 0) should be ('defined)
nodeGrid.nodeAt(0, 1) should be ('defined)
nodeGrid.nodeAt(1, 0) should be ('defined)
nodeGrid.nodeAt(1, 1) should be ('defined)
nodeGrid.nodeAt(-1, -1) should be (None)
nodeGrid.nodeAt(-1, 0) should be (None)
nodeGrid.nodeAt(0, 2) should be (None)
nodeGrid.nodeAt(2, 2) should be (None)
}
scenario("Should be based on value list LTR") {
nodeGrid.nodeAt(0, 0).get.value should be (10)
nodeGrid.nodeAt(0, 1).get.value should be (20)
nodeGrid.nodeAt(1, 0).get.value should be (30)
nodeGrid.nodeAt(1, 1).get.value should be (40)
}
scenario("Should Filter valid destination Nodes") {
topLeft.Paths should (contain(topRight) and contain(bottomLeft) and have size 2)
topRight.Paths should (contain(topLeft) and contain(bottomRight) and have size 2)
bottomLeft.Paths should (contain(topLeft) and contain(bottomRight) and have size 2)
bottomRight.Paths should (contain(bottomLeft) and contain(topRight) and have size 2)
}
scenario("Seed with create a path for all nodes") {
val paths = nodeGrid.seedPath
// Only node(1,1) can be a root node on a path
paths should have size 1
paths.filter(p => p.length == 1 && p.drop == 0) should have size 1
}
}
feature("Paths") {
scenario("Cannot create path with no nodes") {
intercept[IllegalArgumentException] {
Path(List())
}
}
scenario("Single Path has size 1") {
Path(List(topLeft)).length should be (1)
Path(List(topLeft)).drop should be (0)
}
scenario("Should navigate valid nodes") {
Path(List(topLeft)).traverse should have size 0
Path(List(topRight)).traverse should have size 1
Path(List(bottomLeft)).traverse should have size 1
Path(List(bottomRight)).traverse should have size 2
}
scenario("Navigated path should have attributes") {
var path = Path(List(topRight)).traverse.head
path.drop should be (10)
path.length should be (2)
path = Path(List(bottomLeft)).traverse.head
path.drop should be (20)
path.length should be (2)
val paths = Path(List(bottomRight)).traverse.sortBy(_.drop)
paths.head.drop should be (10)
paths.head.length should be (2)
paths(1).drop should be (20)
paths(1).length should be (2)
}
}
feature("Solver") {
scenario("Can solve given spec") {
val grid = GridFactory.from(4, 4, Array(4, 8, 7, 3, 2, 5, 9, 3, 6, 3, 2, 5, 4, 4, 1, 6))
val res = Solver.solve(grid)
res.length should be (5)
res.drop should be (8)
printResult(res)
}
scenario("Can solve from web source") {
val grid = GridFactory.from("http://s3-ap-southeast-1.amazonaws.com/geeks.redmart.com/coding-problems/map.txt")
val res = Solver.solve(grid)
res.length should be (15)
res.drop should be (1422)
printResult(res)
}
}
private def printResult(res: Path): Unit = {
println(s"Solution Solved: Length = ${res.length}, Drop = ${res.drop} ")
}
}
| zaradai/snowy | src/test/scala/com/zaradai/snowy/SolverTest.scala | Scala | apache-2.0 | 4,387 |
package uk.ac.ncl.openlab.intake24.services.systemdb.pairwiseAssociations
import uk.ac.ncl.openlab.intake24.errors.{UnexpectedDatabaseError, UpdateError}
import uk.ac.ncl.openlab.intake24.pairwiseAssociationRules.PairwiseAssociationRules
import scala.concurrent.{Future, Promise}
/**
* Created by Tim Osadchiy on 02/10/2017.
*/
trait PairwiseAssociationsDataService {
def getAssociations(): Future[Map[String, PairwiseAssociationRules]]
def writeAssociations(localeAssociations: Map[String, PairwiseAssociationRules]): Future[Either[UpdateError, Unit]]
def addTransactions(locale: String, transactions: Seq[Seq[String]]): Either[UnexpectedDatabaseError, Unit]
}
| digitalinteraction/intake24 | SystemDataServices/src/main/scala/uk/ac/ncl/openlab/intake24/services/systemdb/pairwiseAssociations/PairwiseAssociationsDataService.scala | Scala | apache-2.0 | 680 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.admin
import kafka.api.TopicMetadata
import org.junit.Assert._
import kafka.zk.ZooKeeperTestHarness
import kafka.utils.TestUtils._
import kafka.utils.TestUtils
import kafka.cluster.Broker
import kafka.client.ClientUtils
import kafka.server.{KafkaConfig, KafkaServer}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.errors.InvalidReplicaAssignmentException
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.security.auth.SecurityProtocol
import org.junit.{After, Before, Test}
class AddPartitionsTest extends ZooKeeperTestHarness {
var configs: Seq[KafkaConfig] = null
var servers: Seq[KafkaServer] = Seq.empty[KafkaServer]
var brokers: Seq[Broker] = Seq.empty[Broker]
val partitionId = 0
val topic1 = "new-topic1"
val topic1Assignment = Map(0->Seq(0,1))
val topic2 = "new-topic2"
val topic2Assignment = Map(0->Seq(1,2))
val topic3 = "new-topic3"
val topic3Assignment = Map(0->Seq(2,3,0,1))
val topic4 = "new-topic4"
val topic4Assignment = Map(0->Seq(0,3))
val topic5 = "new-topic5"
val topic5Assignment = Map(1->Seq(0,1))
@Before
override def setUp() {
super.setUp()
configs = (0 until 4).map(i => KafkaConfig.fromProps(TestUtils.createBrokerConfig(i, zkConnect, enableControlledShutdown = false)))
// start all the servers
servers = configs.map(c => TestUtils.createServer(c))
brokers = servers.map(s => TestUtils.createBroker(s.config.brokerId, s.config.hostName, TestUtils.boundPort(s)))
// create topics first
createTopic(zkClient, topic1, partitionReplicaAssignment = topic1Assignment, servers = servers)
createTopic(zkClient, topic2, partitionReplicaAssignment = topic2Assignment, servers = servers)
createTopic(zkClient, topic3, partitionReplicaAssignment = topic3Assignment, servers = servers)
createTopic(zkClient, topic4, partitionReplicaAssignment = topic4Assignment, servers = servers)
}
@After
override def tearDown() {
TestUtils.shutdownServers(servers)
super.tearDown()
}
@Test
def testWrongReplicaCount(): Unit = {
try {
adminZkClient.addPartitions(topic1, topic1Assignment, adminZkClient.getBrokerMetadatas(), 2,
Some(Map(0 -> Seq(0, 1), 1 -> Seq(0, 1, 2))))
fail("Add partitions should fail")
} catch {
case _: InvalidReplicaAssignmentException => //this is good
}
}
@Test
def testMissingPartition0(): Unit = {
try {
adminZkClient.addPartitions(topic5, topic5Assignment, adminZkClient.getBrokerMetadatas(), 2,
Some(Map(1 -> Seq(0, 1), 2 -> Seq(0, 1, 2))))
fail("Add partitions should fail")
} catch {
case e: AdminOperationException => //this is good
assertTrue(e.getMessage.contains("Unexpected existing replica assignment for topic 'new-topic5', partition id 0 is missing"))
}
}
@Test
def testIncrementPartitions(): Unit = {
adminZkClient.addPartitions(topic1, topic1Assignment, adminZkClient.getBrokerMetadatas(), 3)
// wait until leader is elected
val leader1 = waitUntilLeaderIsElectedOrChanged(zkClient, topic1, 1)
val leader2 = waitUntilLeaderIsElectedOrChanged(zkClient, topic1, 2)
val leader1FromZk = zkClient.getLeaderForPartition(new TopicPartition(topic1, 1)).get
val leader2FromZk = zkClient.getLeaderForPartition(new TopicPartition(topic1, 2)).get
assertEquals(leader1, leader1FromZk)
assertEquals(leader2, leader2FromZk)
// read metadata from a broker and verify the new topic partitions exist
TestUtils.waitUntilMetadataIsPropagated(servers, topic1, 1)
TestUtils.waitUntilMetadataIsPropagated(servers, topic1, 2)
val listenerName = ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)
val metadata = ClientUtils.fetchTopicMetadata(Set(topic1), brokers.map(_.brokerEndPoint(listenerName)),
"AddPartitionsTest-testIncrementPartitions", 2000, 0).topicsMetadata
val metaDataForTopic1 = metadata.filter(p => p.topic.equals(topic1))
val partitionDataForTopic1 = metaDataForTopic1.head.partitionsMetadata.sortBy(_.partitionId)
assertEquals(partitionDataForTopic1.size, 3)
assertEquals(partitionDataForTopic1(1).partitionId, 1)
assertEquals(partitionDataForTopic1(2).partitionId, 2)
val replicas = partitionDataForTopic1(1).replicas
assertEquals(replicas.size, 2)
assert(replicas.contains(partitionDataForTopic1(1).leader.get))
}
@Test
def testManualAssignmentOfReplicas(): Unit = {
// Add 2 partitions
adminZkClient.addPartitions(topic2, topic2Assignment, adminZkClient.getBrokerMetadatas(), 3,
Some(Map(0 -> Seq(1, 2), 1 -> Seq(0, 1), 2 -> Seq(2, 3))))
// wait until leader is elected
val leader1 = waitUntilLeaderIsElectedOrChanged(zkClient, topic2, 1)
val leader2 = waitUntilLeaderIsElectedOrChanged(zkClient, topic2, 2)
val leader1FromZk = zkClient.getLeaderForPartition(new TopicPartition(topic2, 1)).get
val leader2FromZk = zkClient.getLeaderForPartition(new TopicPartition(topic2, 2)).get
assertEquals(leader1, leader1FromZk)
assertEquals(leader2, leader2FromZk)
// read metadata from a broker and verify the new topic partitions exist
TestUtils.waitUntilMetadataIsPropagated(servers, topic2, 1)
TestUtils.waitUntilMetadataIsPropagated(servers, topic2, 2)
val metadata = ClientUtils.fetchTopicMetadata(Set(topic2),
brokers.map(_.brokerEndPoint(ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT))),
"AddPartitionsTest-testManualAssignmentOfReplicas", 2000, 0).topicsMetadata
val metaDataForTopic2 = metadata.filter(_.topic == topic2)
val partitionDataForTopic2 = metaDataForTopic2.head.partitionsMetadata.sortBy(_.partitionId)
assertEquals(3, partitionDataForTopic2.size)
assertEquals(1, partitionDataForTopic2(1).partitionId)
assertEquals(2, partitionDataForTopic2(2).partitionId)
val replicas = partitionDataForTopic2(1).replicas
assertEquals(2, replicas.size)
assertTrue(replicas.head.id == 0 || replicas.head.id == 1)
assertTrue(replicas(1).id == 0 || replicas(1).id == 1)
}
@Test
def testReplicaPlacementAllServers(): Unit = {
adminZkClient.addPartitions(topic3, topic3Assignment, adminZkClient.getBrokerMetadatas(), 7)
// read metadata from a broker and verify the new topic partitions exist
TestUtils.waitUntilMetadataIsPropagated(servers, topic3, 1)
TestUtils.waitUntilMetadataIsPropagated(servers, topic3, 2)
TestUtils.waitUntilMetadataIsPropagated(servers, topic3, 3)
TestUtils.waitUntilMetadataIsPropagated(servers, topic3, 4)
TestUtils.waitUntilMetadataIsPropagated(servers, topic3, 5)
TestUtils.waitUntilMetadataIsPropagated(servers, topic3, 6)
val metadata = ClientUtils.fetchTopicMetadata(Set(topic3),
brokers.map(_.brokerEndPoint(ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT))),
"AddPartitionsTest-testReplicaPlacementAllServers", 2000, 0).topicsMetadata
val metaDataForTopic3 = metadata.find(p => p.topic == topic3).get
validateLeaderAndReplicas(metaDataForTopic3, 0, 2, Set(2, 3, 0, 1))
validateLeaderAndReplicas(metaDataForTopic3, 1, 3, Set(3, 2, 0, 1))
validateLeaderAndReplicas(metaDataForTopic3, 2, 0, Set(0, 3, 1, 2))
validateLeaderAndReplicas(metaDataForTopic3, 3, 1, Set(1, 0, 2, 3))
validateLeaderAndReplicas(metaDataForTopic3, 4, 2, Set(2, 3, 0, 1))
validateLeaderAndReplicas(metaDataForTopic3, 5, 3, Set(3, 0, 1, 2))
validateLeaderAndReplicas(metaDataForTopic3, 6, 0, Set(0, 1, 2, 3))
}
@Test
def testReplicaPlacementPartialServers(): Unit = {
adminZkClient.addPartitions(topic2, topic2Assignment, adminZkClient.getBrokerMetadatas(), 3)
// read metadata from a broker and verify the new topic partitions exist
TestUtils.waitUntilMetadataIsPropagated(servers, topic2, 1)
TestUtils.waitUntilMetadataIsPropagated(servers, topic2, 2)
val metadata = ClientUtils.fetchTopicMetadata(Set(topic2),
brokers.map(_.brokerEndPoint(ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT))),
"AddPartitionsTest-testReplicaPlacementPartialServers", 2000, 0).topicsMetadata
val metaDataForTopic2 = metadata.find(p => p.topic == topic2).get
validateLeaderAndReplicas(metaDataForTopic2, 0, 1, Set(1, 2))
validateLeaderAndReplicas(metaDataForTopic2, 1, 2, Set(0, 2))
validateLeaderAndReplicas(metaDataForTopic2, 2, 3, Set(1, 3))
}
def validateLeaderAndReplicas(metadata: TopicMetadata, partitionId: Int, expectedLeaderId: Int, expectedReplicas: Set[Int]) = {
val partitionOpt = metadata.partitionsMetadata.find(_.partitionId == partitionId)
assertTrue(s"Partition $partitionId should exist", partitionOpt.isDefined)
val partition = partitionOpt.get
assertTrue("Partition leader should exist", partition.leader.isDefined)
assertEquals("Partition leader id should match", expectedLeaderId, partition.leader.get.id)
assertEquals("Replica set should match", expectedReplicas, partition.replicas.map(_.id).toSet)
}
}
| MyPureCloud/kafka | core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala | Scala | apache-2.0 | 9,857 |
package controllers
import javax.inject._
import play.api._
import play.api.mvc._
import play.api.libs.json._
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global
import com.github.tototoshi.play2.scalate._
import models.JsonFormatsTemplate._
import models._
import daos._
import services.RenderService
class TemplateController @Inject() (
cc: ControllerComponents,
render: RenderService,
navigationDAO: NavigationDAO
)extends AbstractController(cc) {
def validateJson[A: Reads] = BodyParsers.parse.json.validate(_.validate[A].asEither.left.map(e => BadRequest(JsError.toJson(e))))
def getSimpleTemplate = Action.async(validateJson[Template]) { request =>
val template = request.body
navigationDAO.find(template.navigationData.navigationName).flatMap {
case Some(navigation) => Future.successful(Ok(render.buildSimpleHtml(Option(navigation), template.templateData, template.navigationData.active)))
case _ => Future.successful(Ok(render.buildSimpleHtml(None, template.templateData, template.navigationData.active)))
}
}
def getTemplateWithNavigationWidget = Action.async(validateJson[Template]) { request =>
Future.successful(Ok(render.buildWithNavigationWidget(request.body.templateData)))
}
def getErrorTemplate = Action.async(validateJson[TemplateData]) { request =>
val templateData = request.body
Future.successful(Ok(render.buildErrorHtml(templateData)))
}
def getTemplate = Action.async(validateJson[Template]) { request =>
val template = request.body
val path = template.navigationData.active
Logger.debug(path)
navigationDAO.findByPath(path).flatMap {
case Some(navigation) => Future.successful(Ok(render.buildSimpleHtml(Option(navigation), template.templateData, template.navigationData.active)))
case _ => Future.successful(Ok(render.buildSimpleHtml(None, template.templateData, template.navigationData.active)))
}
}
}
| Viva-con-Agua/dispenser | app/controllers/TemplateController.scala | Scala | gpl-3.0 | 1,975 |
/**
* Copyright (C) 2019 Inera AB (http://www.inera.se)
*
* This file is part of statistik (https://github.com/sklintyg/statistik).
*
* statistik is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* statistik is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package se.inera.statistics.gatling
import io.gatling.core.Predef._
object InloggadSjukskrivningsgrad {
def exec(user: Login.User) = RestCall.get(
s"getDegreeOfSickLeaveStatistics: ${user.vardgivare}",
s"${Conf.uri}/api/verksamhet/getDegreeOfSickLeaveStatistics?vgid=${user.vardgivare}")
}
| sklintyg/statistik | gatling/src/test/scala/se/inera/statistics/gatling/InloggadSjukskrivningsgrad.scala | Scala | lgpl-3.0 | 1,084 |
package com.twitter.finagle.netty4.channel
import com.twitter.finagle.Stack
import com.twitter.finagle.decoder.Decoder
import com.twitter.finagle.netty4.codec.BufCodec
import com.twitter.finagle.netty4.decoder.DecoderHandler
import io.netty.channel._
private[netty4] object Netty4ClientChannelInitializer {
val BufCodecKey = "bufCodec"
val DecoderKey = "decoder"
val WriteTimeoutHandlerKey = "writeTimeout"
val ReadTimeoutHandlerKey = "readTimeout"
val ConnectionHandlerKey = "connectionHandler"
val ChannelStatsHandlerKey = "channelStats"
val ChannelRequestStatsHandlerKey = "channelRequestStats"
val ChannelLoggerHandlerKey = "channelLogger"
}
/**
* Client channel initialization logic.
*
* @param params configuration parameters.
* @param decoderFactory initialize per-channel decoder for emitting messages.
*/
private[netty4] class Netty4ClientChannelInitializer[T](
params: Stack.Params,
decoderFactory: Option[() => Decoder[T]] = None
) extends AbstractNetty4ClientChannelInitializer(params) {
import Netty4ClientChannelInitializer._
override def initChannel(ch: Channel): Unit = {
super.initChannel(ch)
// fist => last
// - a request flies from last to first
// - a response flies from first to last
//
// [pipeline from super.initChannel] => bufCodec => decoder
val pipe = ch.pipeline
pipe.addLast(BufCodecKey, BufCodec)
decoderFactory.foreach { newDecoder =>
pipe.addLast(DecoderKey, new DecoderHandler(newDecoder()))
}
}
}
| mkhq/finagle | finagle-netty4/src/main/scala/com/twitter/finagle/netty4/channel/Netty4ClientChannelInitializer.scala | Scala | apache-2.0 | 1,518 |
/*******************************************************************************
* Copyright (c) 2019. Carl Minden
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package com.anathema_roguelike
package entities.characters.player.perks.abilities.potions
import com.anathema_roguelike.entities.characters.perks.PerkGroup
import com.anathema_roguelike.entities.characters.player.perks.abilities.Ability
class SlowingPoison() extends PerkGroup(null, null) with Ability {
} | carlminden/anathema-roguelike | src/com/anathema_roguelike/entities/characters/player/perks/abilities/potions/SlowingPoison.scala | Scala | gpl-3.0 | 1,154 |
package edu.gemini.model.p1.immutable
trait MichelleBlueprintBase extends GeminiBlueprintBase {
def instrument = Instrument.Michelle
} | arturog8m/ocs | bundle/edu.gemini.model.p1/src/main/scala/edu/gemini/model/p1/immutable/MichelleBlueprintBase.scala | Scala | bsd-3-clause | 137 |
package beam.experiment
import java.io.IOException
import java.nio.file.{Files, Path, Paths}
import beam.sim.BeamHelper
class RunExperiments extends App with BeamHelper {
val dirPath: String = args(0)
val maxDepth = 2
var stream: java.util.stream.Stream[Path] = _
try {
stream = Files.find(Paths.get(dirPath), maxDepth, (path: Path, _) => path.endsWith(".conf"))
stream.forEach(fileName => {
logger.info(s"Going to run config $fileName")
runBeamUsing(Array("--config", fileName.toString))
})
} catch {
case e: IOException =>
logger.error("Error while executing experiment.", e)
} finally if (stream != null) stream.close()
}
| colinsheppard/beam | src/main/scala/beam/experiment/RunExperiments.scala | Scala | gpl-3.0 | 676 |
package out
object Out {
def println(string: String) {
System.out.println(string)
}
}
| wiiam/IrcBot | src/main/scala/out/Out.scala | Scala | agpl-3.0 | 97 |
package kofre.dotbased
import kofre.IdUtil.Id
import kofre.dotbased.DotStore.*
import kofre.causality.{CausalContext, Dot}
import kofre.dotbased.AddWinsSet
import kofre.{IdUtil, Lattice}
case class AddWinsSet[A](store: Map[A, Set[Dot]], context: CausalContext) {
// (updatesCurrent[Set[(id, dot)], knownPast[Set[dot]], newData[Set[(id,data)])
// a delta always includes new (id,dot) pairs, the known causal context for the modified ids as well as the new data elements
/** Adds a value conceptually from a new random replica */
// TODO: this … is probably not a good idea
def addRandom(e: A): AddWinsSet[A] = {
val id = IdUtil.genId()
addΔ(e, id)
}
def add(element: A, replicaID: Id): AddWinsSet[A] = Lattice.merge(this, addΔ(element, replicaID))
/** Adding an element adds it to the current dot store as well as to the causal context (past). */
def addΔ(element: A, replicaID: Id): AddWinsSet[A] = {
val dot = DotStore.next(replicaID, context)
val onlyDot = Set(dot)
AddWinsSet(Map(element -> onlyDot), CausalContext.fromSet(store.get(element).fold(onlyDot)(_ + dot)))
// TODO: potential optimization
// this is what the paper does:
// (Set((id, dot)), past.getOrElse(id, Set()) + dot, Set((id, e)))
// this is sufficient in my opinion:
// for adds we don't have to know (in the CC) conflicting adds or removes for this element because adds win anyway
// AddWinsSet(Map(id -> Set(dot)), Set(dot), Map(e -> id))
}
/** Merging removes all elements the other side should known (based on the causal context),
* but does not contain.
* Thus, the delta for removal is the empty map,
* with the dot of the removed element in the context.
*/
def removeΔ(e: A): AddWinsSet[A] = AddWinsSet[A](Map.empty, CausalContext.fromSet(store.getOrElse(e, Set.empty)))
def remove(element: A): AddWinsSet[A] = Lattice.merge(this, removeΔ(element))
def clear: AddWinsSet[A] = AddWinsSet[A](Map(), CausalContext.fromSet(DotStore[Map[A, Set[Dot]]].dots(store)))
def toSet: Set[A] = store.keySet
def contains(e: A): Boolean = store.contains(e)
}
//trait CausalCRDT[TCausal, TDotStore] {
// def apply(causal: Causal[TDotStore]): TCausal
//
// def dotStore(a: TCausal): TDotStore
//
// def causalContext(a: TCausal): Set[Dot]
//
// def merge(left: TCausal, right: TCausal)(implicit ev: DotStore[TDotStore]): TCausal = {
// def mkCausal(v: TCausal): Causal[TDotStore] = Causal(dotStore(v), causalContext(v))
// apply(DotStore[TDotStore].merge(mkCausal(left), mkCausal(right)))
// }
//}
object AddWinsSet {
def empty[A]: AddWinsSet[A] = AddWinsSet[A](Map.empty[A, Set[Dot]], CausalContext.empty)
/* AddWinsSet is isomorphic to the corresponding Causal */
implicit def toCausal[A](addWinsSet: AddWinsSet[A]): CausalStore[Map[A, Set[Dot]]] =
CausalStore(addWinsSet.store, addWinsSet.context)
implicit def fromCausal[A](causal: CausalStore[Map[A, Set[Dot]]]): AddWinsSet[A] =
AddWinsSet(causal.store, causal.context)
implicit def addWinsSetLattice[A]: Lattice[AddWinsSet[A]] =
new Lattice[AddWinsSet[A]] {
override def merge(left: AddWinsSet[A], right: AddWinsSet[A]): AddWinsSet[A] =
DotStore.DotMapInstance[A, Set[Dot]].merge(left, right)
}
}
| guidosalva/REScala | Code/Extensions/Kofre/src/main/scala/kofre/dotbased/AddWinsSet.scala | Scala | apache-2.0 | 3,290 |
package org.orbeon.oxf.xml
import javax.xml.transform.{Result, Source}
import org.orbeon.saxon.event._
import org.orbeon.saxon.trans.XPathException
import org.orbeon.saxon.{Configuration, Controller}
import org.xml.sax.SAXParseException
// Custom version of Saxon's IdentityTransformer which hooks up a `ComplexContentOutputter`
class IdentityTransformerWithFixup(config: Configuration) extends Controller(config) {
override def transform(source: Source, result: Result): Unit =
try {
val pipelineConfig = makePipelineConfiguration
val receiver =
getConfiguration.getSerializerFactory.getReceiver(result, pipelineConfig, getOutputProperties)
// To remove duplicate namespace declarations
val reducer = new NamespaceReducer
reducer.setUnderlyingReceiver(receiver)
reducer.setPipelineConfiguration(pipelineConfig)
// To fixup namespaces
val cco = new ComplexContentOutputter
cco.setHostLanguage(pipelineConfig.getHostLanguage)
cco.setPipelineConfiguration(pipelineConfig)
cco.setReceiver(reducer)
new Sender(pipelineConfig).send(source, cco, true)
} catch {
case xpe: XPathException =>
xpe.getException match {
case spe: SAXParseException if ! spe.getException.isInstanceOf[RuntimeException] => // NOP
case _ => reportFatalError(xpe)
}
throw xpe
}
}
//
// The contents of this file are subject to the Mozilla Public License Version 1.0 (the "License");
// you may not use this file except in compliance with the License. You may obtain a copy of the
// License at http://www.mozilla.org/MPL/
//
// Software distributed under the License is distributed on an "AS IS" basis,
// WITHOUT WARRANTY OF ANY KIND, either express or implied.
// See the License for the specific language governing rights and limitations under the License.
//
// The Original Code is: all this file.
//
// The Initial Developer of the Original Code is Michael H. Kay
//
// Portions created by (your name) are Copyright (C) (your legal entity). All Rights Reserved.
//
// Contributor(s): None
//
| orbeon/orbeon-forms | src/main/scala/org/orbeon/oxf/xml/IdentityTransformerWithFixup.scala | Scala | lgpl-2.1 | 2,119 |
package part2
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
class Exercise11IntListSpec extends AnyFlatSpec with Matchers {
// def pair(h: Int, t: IntList): IntPair =
// IntPair(h, t)
// val nil: IntList =
// IntNil()
// val numbers1: IntList =
// pair(1, pair(2, pair(3, nil)))
// val numbers2: IntList =
// pair(4, pair(5, pair(6, nil)))
"contains" should "return true and false appropriately" in {
pending
// numbers1.contains(1) should equal(true)
// numbers1.contains(5) should equal(false)
// numbers2.contains(5) should equal(true)
// numbers2.contains(1) should equal(false)
// nil.contains(1) should equal(false)
}
"addToEach" should "increment every element in the list" in {
pending
// numbers1.addToEach(1) should equal(pair(2, pair(3, pair(4, nil))))
// numbers2.addToEach(3) should equal(pair(7, pair(8, pair(9, nil))))
// nil.addToEach(10) should equal(nil)
}
"total" should "total all elements" in {
pending
// numbers1.total should equal(1 + 2 + 3)
// numbers2.total should equal(4 + 5 + 6)
// nil.total should equal(0)
}
"append" should "append lists" in {
pending
// nil.append(nil) should equal(nil)
// nil.append(numbers2) should equal(numbers2)
// numbers1.append(nil) should equal(numbers1)
// numbers1.append(numbers2) should equal(pair(1, pair(2, pair(3, pair(4, pair(5, pair(6, nil)))))))
}
"evensOnly" should "filter the list" in {
pending
// numbers1.evensOnly should equal(pair(2, nil))
// numbers2.evensOnly should equal(pair(4, pair(6, nil)))
// numbers1.append(numbers2).evensOnly should equal(pair(2, pair(4, pair(6, nil))))
// nil.evensOnly should equal(nil)
}
}
| underscoreio/essential-scala-code | src/test/scala/part2/Exercise11IntListSpec.scala | Scala | apache-2.0 | 1,789 |
package chandu0101.scalajs.react.components.popovers
import chandu0101.scalajs.react.components.all._
import chandu0101.scalajs.react.components.util.DomUtil
import chandu0101.scalajs.react.components.util.DomUtil.ClientRect
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.prefix_<^._
import org.scalajs.dom.html
import scala.scalajs.js
object ReactPopOver {
trait Style {
def popover : TagMod = Seq(positionAbsolute,
^.top := 0,
^.left := "-100%",
^.zIndex := 1060,
^.maxWidth := "500px",
^.padding := "1px",
^.fontSize := "15px",
^.fontWeight := "normal",
^.lineHeight := 1.42857143,
textAlignLeft,
^.backgroundColor := "white",
^.borderRadius := "6px",
^.border := "1px solid #cccccc",
^.whiteSpace := "normal").++(backgroundClipPreFixer("padding-box"))
.++(boxShadowPreFixer("0 5px 10px rgba(0, 0, 0, 0.2)"))
def popoverTop = Seq(^.marginTop := "-10px")
def popoverLeft = Seq(^.marginLeft := "-10px")
def popoverRight = Seq(^.marginLeft := "10px")
def popoverBottom = Seq(^.marginTop := "10px")
def popoverTitle = Seq(^.margin := 0, ^.padding := "8px 14px",
^.fontSize := "15px",
^.backgroundColor := "#f7f7f7",
^.borderBottom := "1px solid #ebebeb",
^.borderRadius := "5px 5px 0 0")
def popoverContent = Seq(^.padding := "9px 14px")
def popoverArrow = Seq(positionAbsolute,
displayBlock,
^.width := 0,
^.height := 0,
^.border := "solid transparent",
^.borderWidth := "11px"
)
def popoverArrowAfter = Seq(positionAbsolute,
displayBlock,
^.width := 0,
^.height := 0,
^.border := "solid transparent",
^.borderWidth := "10px"
)
def popoverTopArrow = Seq(^.left := "50%",
^.marginLeft := "-11px",
^.borderBottomWidth := 0,
^.borderTopColor := "#999999",
^.bottom := "-11px"
)
def popoverTopArrowAfter = Seq(
^.marginLeft := "-10px",
^.borderBottomWidth := 0,
^.borderTopColor := "#ffffff",
^.bottom := "1px"
)
def popoverRightArrow = Seq(^.top := "50%",
^.left := "-11px",
^.marginTop := "-11px",
^.borderLeftWidth := 0,
^.borderRightColor := "#999999",
^.bottom := "-11px"
)
def popoverRightArrowAfter = Seq(
^.left := "1px",
^.borderLeftWidth := 0,
^.borderRightColor := "#ffffff",
^.bottom := "-10px"
)
def popoverBottomArrow = Seq(^.left := "50%",
^.marginLeft := "-11px",
^.borderTopWidth := 0,
^.borderBottomColor := "#999999",
^.top := "-11px"
)
def popoverBottomArrowAfter = Seq(^.top := "1px",
^.marginLeft := "-10px",
^.borderTopWidth := 0,
^.borderBottomColor := "#ffffff"
)
def popoverLeftArrow = Seq(^.top := "50%",
^.right := "-11px",
^.marginTop := "-11px",
^.borderRightWidth := 0,
^.borderLeftColor := "#999999"
)
def popoverLeftArrowAfter = Seq(
^.right := "1px",
^.borderRightWidth := 0,
^.borderLeftColor := "#ffffff",
^.bottom := "-10px"
)
}
case class State(open: Boolean ,top : Double = 0,left : Double = 0)
class Backend(t: BackendScope[Props, State]) {
private def show(position: ClientRect) = t.modState(_.copy(open = true, top = position.top, left = position.left))
private def hide() = t.modState(_.copy(open = false))
def toggle(node: html.Element) = {
if(t.state.open) hide()
else {
val position = getPosition(node)
show(position)
}
}
def getPosition(node: html.Element): ClientRect = {
val offset = DomUtil.offset(node)
val height = node.offsetHeight
val width = node.offsetWidth
val popoverHeight = t.getDOMNode().asInstanceOf[html.Element].offsetHeight
val popoverWidth = t.getDOMNode().asInstanceOf[html.Element].offsetWidth
t.props.placement match {
case "right" => {
val top = offset.top + height/2 - popoverHeight/2
val left = offset.left + width
ClientRect(top,left)
}
case "left" => {
val top = offset.top + height/2 - popoverHeight/2
val left = offset.left - popoverWidth
ClientRect(top,left)
}
case "top" => {
val top = offset.top - popoverHeight
val left = offset.left + width/2 - popoverWidth/2
ClientRect(top,left)
}
case "bottom" => {
val top = offset.top + height
val left = offset.left + width/2 - popoverWidth/2
ClientRect(top,left)
}
case _ => throw new Exception(s"unsupported placement : ${t.props.placement}")
}
}
def arrowAfter : TagMod = {
val P = t.props
if (P.placement == "top") <.span(P.style.popoverArrowAfter,P.style.popoverTopArrowAfter," ")
else if (P.placement == "left") <.span(P.style.popoverArrowAfter,P.style.popoverLeftArrowAfter," ")
else if (P.placement == "right") <.span(P.style.popoverArrowAfter,P.style.popoverRightArrowAfter," ")
else if (P.placement == "bottom") <.span(P.style.popoverArrowAfter,P.style.popoverBottomArrowAfter," ")
else ""
}
}
val component = ReactComponentB[Props]("ReactPopover")
.initialState(State(open = false))
.backend(new Backend(_))
.render((P, C,S, B) => {
<.div(P.style.popover,
(P.placement == "top") ?= P.style.popoverTop,
(P.placement == "left") ?= P.style.popoverLeft,
(P.placement == "right") ?= P.style.popoverRight,
(P.placement == "bottom") ?= P.style.popoverBottom,
^.top := S.top ,S.open ?= (^.left := S.left))(
<.div(P.style.popoverArrow,
(P.placement == "top") ?= P.style.popoverTopArrow,
(P.placement == "left") ?= P.style.popoverLeftArrow,
(P.placement == "right") ?= P.style.popoverRightArrow,
(P.placement == "bottom") ?= P.style.popoverBottomArrow,B.arrowAfter),
!P.title.isEmpty ?= <.h3(P.style.popoverTitle)(P.title),
<.div(P.style.popoverContent)(
C
)
)
})
.build
case class Props(title : String,placement : String ,style : Style)
def apply(title : String = "",placement : String = "right",ref: js.UndefOr[String] = "", key: js.Any = {} ,style : Style = new Style {})(children : ReactNode*) = component.set(key, ref)(Props(title,placement,style),children)
}
| mproch/scalajs-react-components | core/src/main/scala/chandu0101/scalajs/react/components/popovers/ReactPopOver.scala | Scala | apache-2.0 | 6,427 |
package gateway
import configuration.Environment
import infrastructure.{BadRequestDownstreamException, InternalServerErrorDownstreamException}
import models.{Authorization, Token}
import play.api.Logger
import play.api.libs.json.Json
import play.api.libs.ws.WS
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
trait AuthGateway {
val authUrl: String
def createToken(authorization: Authorization): Future[Token] =
WS.url(s"$authUrl/authorize")
.withHeaders("Content-type" -> "application/json; charset=UTF-8")
.post(Json.toJson(authorization)).map { res =>
res.status match {
case 201 => res.json.as[Token]
case x if x >= 400 && x <= 499 =>
Logger.error(s"Status code $x from downstream")
throw BadRequestDownstreamException
case x if x >= 500 && x <= 599 =>
Logger.error(s"Status code $x from downstream")
throw InternalServerErrorDownstreamException
}
}
def findToken(username: String, token: String): Future[Boolean] = {
WS.url(s"$authUrl/authorize/$username/$token")
.withHeaders("Content-type" -> "application/json; charset=UTF-8")
.get.map { res =>
res.status match {
case 200 => true
case _ => false
}
}
}
}
object AuthGateway extends AuthGateway {
override val authUrl: String = Environment.auth
}
| tvlive/tv-api | app/gateway/AuthGateway.scala | Scala | apache-2.0 | 1,407 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.examples
import java.io.File
import org.apache.spark.sql.SparkSession
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
/**
* For alter table relative syntax, you can refer to DDL operation
* document (ddl-operation-on-carbondata.md)
*/
object AlterTableExample {
def main(args: Array[String]): Unit = {
val rootPath = new File(this.getClass.getResource("/").getPath
+ "../../../..").getCanonicalPath
val storeLocation = s"$rootPath/examples/spark2/target/store"
val warehouse = s"$rootPath/examples/spark2/target/warehouse"
val metastoredb = s"$rootPath/examples/spark2/target/metastore_db"
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
import org.apache.spark.sql.CarbonSession._
val spark = SparkSession
.builder()
.master("local")
.appName("AlterTableExample")
.config("spark.sql.warehouse.dir", warehouse)
.getOrCreateCarbonSession(storeLocation, metastoredb)
spark.sparkContext.setLogLevel("WARN")
spark.sql("DROP TABLE IF EXISTS carbon_table")
spark.sql("DROP TABLE IF EXISTS new_carbon_table")
spark.sql(
s"""
| CREATE TABLE carbon_table(
| shortField SHORT,
| intField INT,
| bigintField LONG,
| doubleField DOUBLE,
| stringField STRING,
| timestampField TIMESTAMP,
| decimalField DECIMAL(18,2),
| dateField DATE,
| charField CHAR(5),
| floatField FLOAT,
| complexData ARRAY<STRING>
| )
| STORED BY 'carbondata'
| TBLPROPERTIES('DICTIONARY_INCLUDE'='dateField, charField')
""".stripMargin)
// Alter table change data type
spark.sql("DESCRIBE FORMATTED carbon_table").show()
spark.sql("ALTER TABLE carbon_table CHANGE intField intField BIGINT").show()
// Alter table add columns
spark.sql("DESCRIBE FORMATTED carbon_table").show()
spark.sql("ALTER TABLE carbon_table ADD COLUMNS (newField STRING) " +
"TBLPROPERTIES ('DEFAULT.VALUE.newField'='def')").show()
// Alter table drop columns
spark.sql("DESCRIBE FORMATTED carbon_table").show()
spark.sql("ALTER TABLE carbon_table DROP COLUMNS (newField)").show()
spark.sql("DESCRIBE FORMATTED carbon_table").show()
// Alter table rename table name
spark.sql("SHOW TABLES").show()
spark.sql("ALTER TABLE carbon_table RENAME TO new_carbon_table").show()
spark.sql("SHOW TABLES").show()
spark.sql("DROP TABLE IF EXISTS carbon_table")
spark.sql("DROP TABLE IF EXISTS new_carbon_table")
spark.stop()
}
}
| nehabhardwaj01/incubator-carbondata | examples/spark2/src/main/scala/org/apache/carbondata/examples/AlterTableExample.scala | Scala | apache-2.0 | 3,579 |
package controllers
import com.mohiva.play.silhouette.api.{ LoginEvent, LoginInfo, SignUpEvent }
import com.mohiva.play.silhouette.impl.providers.{ CommonSocialProfile, CredentialsProvider }
import models.user.{ RegistrationData, UserForms }
import play.api.i18n.{ Messages, MessagesApi }
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import play.api.mvc.AnyContent
import services.user.AuthenticationEnvironment
import scala.concurrent.Future
@javax.inject.Singleton
class RegistrationController @javax.inject.Inject() (
override val messagesApi: MessagesApi,
override val env: AuthenticationEnvironment
) extends BaseController {
def registrationForm = withSession { implicit request =>
Future.successful(Ok(views.html.register(request.identity, UserForms.registrationForm)))
}
def register = withSession { implicit request =>
UserForms.registrationForm.bindFromRequest.fold(
form => Future.successful(BadRequest(views.html.register(request.identity, form))),
data => {
env.identityService.retrieve(LoginInfo(CredentialsProvider.ID, data.email)).flatMap {
case Some(user) => Future.successful {
Ok(views.html.register(request.identity, UserForms.registrationForm.fill(data))).flashing("error" -> "That email address is already taken.")
}
case None => env.identityService.retrieve(data.username) flatMap {
case Some(user) => Future.successful {
Ok(views.html.register(request.identity, UserForms.registrationForm.fill(data))).flashing("error" -> "That username is already taken.")
}
case None => saveProfile(data)
}
}
}
)
}
private[this] def saveProfile(data: RegistrationData)(implicit request: SecuredRequest[AnyContent]) = {
if (request.identity.profiles.exists(_.providerID == "credentials")) {
throw new IllegalStateException("You're already registered.") // TODO Fix?
}
val loginInfo = LoginInfo(CredentialsProvider.ID, data.email)
val authInfo = env.hasher.hash(data.password)
val user = request.identity.copy(
username = if (data.username.isEmpty) { request.identity.username } else { Some(data.username) },
profiles = request.identity.profiles :+ loginInfo
)
val profile = CommonSocialProfile(
loginInfo = loginInfo,
email = Some(data.email)
)
val r = Redirect(controllers.routes.HomeController.index())
for {
avatar <- env.avatarService.retrieveURL(data.email)
profile <- env.userService.create(user, profile.copy(avatarURL = avatar.orElse(Some("default"))))
u <- env.userService.save(user, update = true)
authInfo <- env.authInfoService.save(loginInfo, authInfo)
authenticator <- env.authenticatorService.create(loginInfo)
value <- env.authenticatorService.init(authenticator)
result <- env.authenticatorService.embed(value, r)
} yield {
env.eventBus.publish(SignUpEvent(u, request, request2Messages))
env.eventBus.publish(LoginEvent(u, request, request2Messages))
result
}
}
}
| rynmccrmck/thunderdome | app/controllers/RegistrationController.scala | Scala | apache-2.0 | 3,124 |
/* __ *\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2002-2013, LAMP/EPFL **
** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\___/_/ |_/____/_/ | | **
** |/ **
\* */
// GENERATED CODE: DO NOT EDIT. See scala.Function0 for timestamp.
package scala
/** A tuple of 5 elements; the canonical representation of a [[scala.Product5]].
*
* @constructor Create a new tuple with 5 elements. Note that it is more idiomatic to create a Tuple5 via `(t1, t2, t3, t4, t5)`
* @param _1 Element 1 of this Tuple5
* @param _2 Element 2 of this Tuple5
* @param _3 Element 3 of this Tuple5
* @param _4 Element 4 of this Tuple5
* @param _5 Element 5 of this Tuple5
*/
final case class Tuple5[+T1, +T2, +T3, +T4, +T5](_1: T1, _2: T2, _3: T3, _4: T4, _5: T5)
extends Product5[T1, T2, T3, T4, T5]
{
override def toString() = "(" + _1 + "," + _2 + "," + _3 + "," + _4 + "," + _5 + ")"
}
| felixmulder/scala | src/library/scala/Tuple5.scala | Scala | bsd-3-clause | 1,260 |
/**
*
*/
package org.duffqiu.rest.test.actor
import scala.actors.Actor
import scala.actors.Actor.State.Terminated
import scala.actors.Exit
import scala.actors.TIMEOUT
/**
* @author macbook
*
* Jun 7, 2014
*/
object RestClientMasterActor {
private final val DEFAULT_INTERVAL = 5000
}
class RestClientMasterActor(interval: Int = RestClientMasterActor.DEFAULT_INTERVAL) extends Actor {
var workers: List[RestClientWorkActor] = List[RestClientWorkActor]()
var workIndex = 0
var exitConfirmCount = 0
var isExit = false
var exceptionList: List[RestClientExceptionMessage] = List[RestClientExceptionMessage]()
override def act(): Unit = {
trapExit = true
loopWhile(!isExit) {
receiveWithin(interval) {
case BYE => {
// println("[debug]server receive bye")
workers.foreach(_ ! CLIENT_BYE)
// println("finish to send bye to all clients")
}
case RestTestTaskMessage(resource, req, operation, resp, expectResult) => {
getWorker ! RestTestTaskMessage(resource, req, operation, resp, expectResult)
// println("[Client Master Actor] send from master to worker(" + worker.name + "), operation: " + operation + ", expect result: " + expectResult)
}
case RestTestTaskBatchMsg(resource, operation, reqRespMap, expectResult) =>
//can't use par since getWorker is not thread safe
// println("[Client Master Actor] receive batch messsage and spit them to send to worker actors")
reqRespMap.foreach {
t =>
getWorker ! RestTestTaskMessage(resource, t._1, operation, t._2, expectResult)
}
case TIMEOUT =>
// println("master actor timeout")
case worker: RestClientWorkActor => {
workers = worker :: workers
worker.start
// println("add worker: " + worker.name)
}
case except: RestClientExceptionMessage => {
// println("[Client Master Actor] got exception: " + except.exception + " from " + except.name)
exceptionList = except :: exceptionList
}
case Exit(linked, reason) =>
exitConfirmCount = exitConfirmCount + 1
// println("client exit because " + reason)
if (exitConfirmCount >= workers.length) {
// println("master exit since all client workers are closed")
isExit = true
}
case _ =>
println("[Client Master Actor]receive unknown message in master worker")
}
}
}
private[this] def getWorker = {
workIndex = (workIndex + 1) % workers.length
workers(workIndex)
}
private[this] def shouldNoClientException = {
if (!exceptionList.isEmpty) {
exceptionList.foreach(println(_))
exceptionList.foreach(e => throw e.exception)
}
}
def stop: Unit = {
this ! BYE
while (this.getState != Terminated) {
Thread.sleep(interval)
}
shouldNoClientException
}
}
| duffqiu/rest-test-dsl | src/main/scala/org/duffqiu/rest/test/actor/RestClientMasterActor.scala | Scala | apache-2.0 | 3,495 |
package org.scaladebugger.api.lowlevel.watchpoints
/**
* Represents an exception that occurred when attempting to create a watchpoint
* request and the desired class or field was not found on the remote JVM.
*
* @param className The name of the class containing the field
* @param fieldName The name of the field to watch
*/
case class NoFieldFound(className: String, fieldName: String)
extends Throwable(s"No field for $className.$fieldName was found!")
| ensime/scala-debugger | scala-debugger-api/src/main/scala/org/scaladebugger/api/lowlevel/watchpoints/NoFieldFound.scala | Scala | apache-2.0 | 464 |
package agni
import cats.syntax.either._
import com.datastax.oss.driver.api.core.ProtocolVersion
import com.datastax.oss.driver.api.core.cql.Row
import com.datastax.oss.driver.api.core.data.{TupleValue, UdtValue}
trait RowDeserializer[A] {
def apply(row: Row, i: Int, version: ProtocolVersion): Either[Throwable, A]
def apply(row: Row, name: String, version: ProtocolVersion): Either[Throwable, A]
}
object RowDeserializer {
def apply[A](implicit A: RowDeserializer[A]): RowDeserializer[A] = A
implicit def builtIn[A](implicit A: Deserializer[A]): RowDeserializer[A] = new RowDeserializer[A] {
override def apply(row: Row, i: Int, version: ProtocolVersion): Either[Throwable, A] =
for {
v <- Either.catchNonFatal(row.getBytesUnsafe(i))
r <- A(v, version)
} yield r
override def apply(row: Row, name: String, version: ProtocolVersion): Either[Throwable, A] =
for {
v <- Either.catchNonFatal(row.getBytesUnsafe(name))
r <- A(v, version)
} yield r
}
implicit val tupleValue: RowDeserializer[TupleValue] = new RowDeserializer[TupleValue] {
override def apply(row: Row, i: Int, version: ProtocolVersion): Either[Throwable, TupleValue] =
row.getTupleValue(i).asRight
override def apply(row: Row, name: String, version: ProtocolVersion): Either[Throwable, TupleValue] =
row.getTupleValue(name).asRight
}
implicit val udtValue: RowDeserializer[UdtValue] = new RowDeserializer[UdtValue] {
override def apply(row: Row, i: Int, version: ProtocolVersion): Either[Throwable, UdtValue] =
Either.catchNonFatal(row.getUdtValue(i))
override def apply(row: Row, name: String, version: ProtocolVersion): Either[Throwable, UdtValue] =
Either.catchNonFatal(row.getUdtValue(name))
}
}
| tkrs/agni | core/src/main/scala/agni/RowDeserializer.scala | Scala | mit | 1,795 |
import scala.language.implicitConversions
object Test {
abstract class Unit
object NoUnit extends Unit
object Hour extends Unit { override def toString = "Hour" }
case class Measure(scalar: Double, unit: Unit) {
def *(newUnit: Unit) = Measure(scalar, newUnit)
}
implicit def double2Measure(scalar: Double): Test.Measure =
Measure(scalar, NoUnit)
def main(args: Array[String]): scala.Unit = {
Console.println("3.0 * Hour = " + (3.0 * Hour))
}
}
| lampepfl/dotty | tests/run/impconvtimes.scala | Scala | apache-2.0 | 477 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.