code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package elegans import TimerLogic._ object Cells { def neighbors(cell: Cell): Set[Cell] = { var ns = Set[Cell]() for (n <- cell.N) { for (op <- n.outputPorts) { for (Edge(_, dst, _) <- op.E) { val neighbor = dst.parent.parent assert(neighbor != null) ns += neighbor } } } ns } class Cell(val name: String) { private var _nodes = Set[Node]() def N = _nodes /** Registers given node in cell and returns it */ def node[N <: Node](n: N): N = { addNode(n) n.setParent(this) n } /** Adds a new node */ private def addNode(n: Node) { _nodes += n } /** Orders nodes such that execution order is correct for nodes linked with * non-delay edges. Computed only once, lazily, therefore nodes should not * be added dynamically with this implementation */ lazy val orderedN: List[Node] = { import scala.collection.mutable.{Set=>MutableSet, ListBuffer} val result = ListBuffer[Node]() val haveNonDelayedOutput = MutableSet[Node]() haveNonDelayedOutput ++= N.filter(n => n.outputPorts.exists(p => !p.nonDelayedEdges.isEmpty)) while(!haveNonDelayedOutput.isEmpty) { val noPred = haveNonDelayedOutput.find{ n1 => !haveNonDelayedOutput.exists{ n2 => val dpset = n2.outputPorts.map(op => op.nonDelayedEdges.map(e => e.dest)) val dps = dpset.reduce(_ union _) ! (dps intersect (n1.inputPorts)).isEmpty } }.get result append noPred haveNonDelayedOutput -= noPred } val toReturn = result.toList ::: ((N -- result.toSet).toList) toReturn } def updateValues() { for (n <- N) n.updateValues() } override def toString = name /** Checks that - there is a logic for each output port - each port is connected to at least one other. */ def sanityCheck() { for (n <- N) n.sanityCheck() } def intercellularInputPorts = { for (n <- N) { for (p <- n.inputPorts) { // todo save back edges in ports and check whether that input port is // in same cell val neighborOutputs = p.incomingEdges map (e => e.source) } } } def outcomeBundles: Map[String, Set[PortBundle]] = { var toRet = Map[String, Set[PortBundle]]() for (n <- N) { for ((bundle, value) <- n.outcomeBundles) { toRet.get(value) match { case Some(set) => toRet += ((value, set + bundle)) case None => toRet += ((value, Set(bundle))) } } } toRet.toMap } } abstract class Node(val name: String) { private var _inputPorts = Set[Port]() private var _outputPorts = Set[Port]() private var _logics = Set[Logic]() private var _parent: Cell = null private var _inputBundles = Set[PortBundle]() private var _outputBundles = Set[PortBundle]() private var _outcomeBundles = Map[PortBundle, String]() // specific to TimerSemantics, gives a unique id to each ?? in the node private var holeCounter = 0 private var logicCounter = 0 def P: Set[Port] = _inputPorts ++ _outputPorts def inputPorts: Set[Port] = _inputPorts def outputPorts: Set[Port] = _outputPorts def logics = _logics def inputBundles = _inputBundles def outputBundles = _outputBundles def bundles = inputBundles ++ outputBundles def outcomeBundles = _outcomeBundles def parent = _parent def setParent(c: Cell) { _parent = c } def input(name: String) (levels: String*) (implicit a: Assumption = null): PortBundle = { val ports = levels map (l => new Port(name + "_" + l)) ports foreach addInput ports foreach (_.setParent(this)) val bundle = PortBundle(name, ports) if (a != null) bundle.assumptions += a addInputBundle(bundle) bundle } def output(name: String) (levels: String*) (implicit a: Assumption = null): PortBundle = { val ports = levels map (l => new Port(name + "_" + l)) ports foreach addOutput ports foreach (_.setParent(this)) val bundle = PortBundle(name, ports) if (a != null) bundle.assumptions += a addOutputBundle(bundle) bundle } def outcome(bundle: PortBundle, value: String) { _outcomeBundles += ((bundle, value)) } /** Ensures that all of the same hole expressions in different * instantiations of each cell get the same identifier string, by using the * holeCounter */ def ??(ps: Port*): HolePredicate = { holeCounter += 1 HolePredicate(ps.toList, this.name + "#" + (ps.map(_.name).mkString(":")) + "_" + holeCounter) } def logic(l: Logic): DeclaredLogic = { logicCounter += 1 l.id = this.name + "#" + logicCounter.toString l.setNode(this) DeclaredLogic(l) } case class DeclaredLogic(l: Logic) def register(dl: DeclaredLogic) = dl match { case DeclaredLogic(l) => _logics += l l match { case TimerLogic(out, trigger, preds) => out.setLogic(l) case _ => // doesn't need to be registered here // TODO refactor } } /** Adds a new port */ private def addInput(p: Port) { _inputPorts += p } /** Adds a new port */ private def addOutput(p: Port) { _outputPorts += p } private def addInputBundle(b: PortBundle) { _inputBundles += b } private def addOutputBundle(b: PortBundle) { _outputBundles += b } def updateValues() { for (p <- P) p.updateValue() } def sanityCheck() { for (op <- outputPorts) { // check for existence of a logic for each output if (op.logic.isEmpty) { // TODO commented because we don't have it for new semantics // logWarning("No logic for output port " + op.name + " in " + this.name) } // check whether it is connected to anything // if (op.E.isEmpty) // logWarning("Output " + op + " is not connected to any other port") } } override def toString = parent.toString + "_" + name } sealed trait Assumption case object Monotonic extends Assumption case object Constant extends Assumption case class PortBundle(id: String, ports: Seq[Port]) { var assumptions = Set[Assumption]() def -->(b: PortBundle) { if (this.ports.size != b.ports.size) terminate("Bundle sizes don't match for " + this.id + " --> " + b.id) this.ports zip b.ports foreach { case (p1, p2) => p1.connectDelayed(p2, Activating) } } def ==>(b: PortBundle) { if (this.ports.size != b.ports.size) terminate("Bundle sizes don't match for " + this.id + " --> " + b.id) this.ports zip b.ports foreach { case (p1, p2) => p1.connectNonDelayed(p2, Activating) } } def --|(b: PortBundle) { if (this.ports.size != b.ports.size) terminate("Bundle sizes don't match for " + this.id + " --> " + b.id) this.ports zip b.ports foreach { case (p1, p2) => p1.connectDelayed(p2, Inhibiting) } } def ==|(b: PortBundle) { if (this.ports.size != b.ports.size) terminate("Bundle sizes don't match for " + this.id + " --> " + b.id) this.ports zip b.ports foreach { case (p1, p2) => p1.connectNonDelayed(p2, Inhibiting) } } } class Port(val name: String) { private var _delayedEdges = Set[Edge]() private var _nonDelayedEdges = Set[Edge]() private var _incomingDelayedEdges = Set[Edge]() private var _incomingNonDelayedEdges = Set[Edge]() private var _parent: Node = null private var _logic: Option[Logic] = None private var _enabledNow = false private var _enabledNext = false def delayedEdges = _delayedEdges def nonDelayedEdges = _nonDelayedEdges def E = nonDelayedEdges ++ delayedEdges def incomingDelayedEdges = _incomingDelayedEdges def incomingNonDelayedEdges = _incomingNonDelayedEdges def incomingEdges = incomingDelayedEdges ++ incomingNonDelayedEdges def parent = _parent def setParent(n: Node) { _parent = n } def logic = _logic def setLogic(l: Logic) { _logic = Some(l) } def enabledNow = _enabledNow def enabledNext = _enabledNext def forceEnable() { _enabledNow = true } def forceDisable() { _enabledNow = false } def setNextValue(v: Boolean) { _enabledNext = v } private def propagateActivation() { for (e <- delayedEdges) { e.dest.activateNext() } for (e <- nonDelayedEdges) { e.dest.activateNow() } } def activateNext() { _enabledNext = true propagateActivation() } def activateNow() { _enabledNow = true _enabledNext = true assert(E.isEmpty) } def updateValue() { _enabledNow = _enabledNext _enabledNext = false } def connectDelayed(p: Port, r: EdgeRole) { val e = Edge(this, p, r) _delayedEdges += e p._incomingDelayedEdges += e } def connectNonDelayed(p: Port, r: EdgeRole) { val e = Edge(this, p, r) _nonDelayedEdges += e p._incomingNonDelayedEdges += e } def -->(p: Port) { connectDelayed(p, Activating) } def ==>(p: Port) { connectNonDelayed(p, Activating) } def --|(p: Port) { connectDelayed(p, Inhibiting) } def ==|(p: Port) { connectNonDelayed(p, Inhibiting) } override def toString = parent.toString + "_" + name } sealed trait EdgeRole case object Activating extends EdgeRole case object Inhibiting extends EdgeRole case class Edge(source: Port, dest: Port, role: EdgeRole) { override def toString = { val arrowString = role match { case Activating => " -> " case Inhibiting => " -| " } source.name + arrowString + dest.name } } object CellPrinter { def apply(c: Cell): String = { "cell " + c.name + " {" + "\\n" + indent( (for (n <- c.N) yield apply(n)).mkString("\\n\\n") ) + "\\n" + "}" } def apply(n: Node): String = { "node " + n.name + " {" + "\\n" + indent( (for (l <- n.logics) yield l.toString).mkString("\\n\\n") ) + "\\n" + "}" } } object CellDotPrinter { def apply(c: Cell): Seq[String] = { (for (n <- c.N) yield apply(n)).toList.flatten } def apply(n: Node): Seq[String] = { // println(n.name) (for (l <- n.logics) yield { l.toDotString }).toList } } }
koksal/elegans-popl2013-code
src/main/scala/elegans/Cells.scala
Scala
bsd-2-clause
10,958
class A { class B } class C(val a: A, val b: a.B) @main def Test = val a = A() val b = a.B() val c = C(a, b) val d = c.b val d1: c.a.B = d
som-snytt/dotty
tests/pos/i8069.scala
Scala
apache-2.0
153
import collection.mutable.ListBuffer import org.apache.commons.lang3.StringEscapeUtils object GradingFeedback { private val feedbackSummary = new ListBuffer[String]() private val feedbackDetails = new ListBuffer[String]() private def addSummary(msg: String) { feedbackSummary += msg; feedbackSummary += "\\n\\n" } private def addDetails(msg: String) { feedbackDetails += msg; feedbackDetails += "\\n\\n" } /** * Converts the string to HTML - coursera displays the feedback in an html page. */ def feedbackString(uuid: String, html: Boolean = true) = { val total = totalGradeMessage(totalScore) + "\\n\\n" // trim removes the newlines at the end val s = (total + feedbackSummary.mkString + feedbackDetails.mkString + uniqueGradeId(uuid) + "\\n\\n").trim if (html) "<pre>"+ StringEscapeUtils.escapeHtml4(s) +"</pre>" else s } private var vTestScore: Double = 0d private var vStyleScore: Double = 0d def totalScore = vTestScore + vStyleScore private var vMaxTestScore: Double = 0d private var vMaxStyleScore: Double = 0d def maxTestScore = vMaxTestScore def maxStyleScore = vMaxStyleScore // a string obtained from coursera when downloading an assignment. it has to be // used again when uploading the grade. var apiState: String = "" /** * `failed` means that there was an unexpected error during grading. This includes * - student's code does not compile * - our tests don't compile (against the student's code) * - crash while executing ScalaTest (not test failures, but problems trying to run the tests!) * - crash while executing the style checker (again, not finding style problems!) * * When failed is `true`, later grading stages will not be executed: this is handled automatically * by SBT, tasks depending on a failed one are not run. * * However, these dependent tasks still fail (i.e. mapR on them is invoked). The variable below * allows us to know if something failed before. In this case, we don't add any more things to * the log. (see `ProgFunBuild.handleFailure`) */ private var failed = false def isFailed = failed def initialize() { feedbackSummary.clear() feedbackDetails.clear() vTestScore = 0d vStyleScore = 0d apiState = "" failed = false } def setMaxScore(maxScore: Double, styleScoreRatio: Double) { vMaxTestScore = maxScore * (1-styleScoreRatio) vMaxStyleScore = maxScore * styleScoreRatio } /* Methods to build up the feedback log */ def downloadUnpackFailed(log: String) { failed = true addSummary(downloadUnpackFailedMessage) addDetails("======== FAILURES WHILE DOWNLOADING OR EXTRACTING THE SUBMISSION ========") addDetails(log) } def compileFailed(log: String) { failed = true addSummary(compileFailedMessage) addDetails("======== COMPILATION FAILURES ========") addDetails(log) } def testCompileFailed(log: String) { failed = true addSummary(testCompileFailedMessage) addDetails("======== TEST COMPILATION FAILURES ========") addDetails(log) } def allTestsPassed() { addSummary(allTestsPassedMessage) vTestScore = maxTestScore } def testsFailed(log: String, score: Double) { addSummary(testsFailedMessage(score)) vTestScore = score addDetails("======== LOG OF FAILED TESTS ========") addDetails(log) } def testExecutionFailed(log: String) { failed = true addSummary(testExecutionFailedMessage) addDetails("======== ERROR LOG OF TESTING TOOL ========") addDetails(log) } def testExecutionDebugLog(log: String) { addDetails("======== DEBUG OUTPUT OF TESTING TOOL ========") addDetails(log) } def perfectStyle() { addSummary(perfectStyleMessage) vStyleScore = maxStyleScore } def styleProblems(log: String, score: Double) { addSummary(styleProblemsMessage(score)) vStyleScore = score addDetails("======== CODING STYLE ISSUES ========") addDetails(log) } /* Feedback Messages */ private val downloadUnpackFailedMessage = """We were not able to download your submission from the coursera servers, or extracting the |archive containing your source code failed. | |If you see this error message as your grade feedback, please contact one of the teaching |assistants. See below for a detailed error log.""".stripMargin private val compileFailedMessage = """We were not able to compile the source code you submitted. This is not expected to happen, |because the `submit` command in SBT can only be executed if your source code compiles. | |Please verify the following points: | - You should use the `submit` command in SBT to upload your solution | - You should not perform any changes to the SBT project definition files, i.e. the *.sbt | files, and the files in the `project/` directory | |Take a careful look at the compiler output below - maybe you can find out what the problem is. | |If you cannot find a solution, ask for help on the discussion forums on the course website.""".stripMargin private val testCompileFailedMessage = """We were not able to compile our tests, and therefore we could not correct your submission. | |The most likely reason for this problem is that your submitted code uses different names |for methods, classes, objects or different types than expected. | |In principle, this can only arise if you changed some names or types in the code that we |provide, for instance a method name or a parameter type. | |To diagnose your problem, perform the following steps: | - Run the tests that we provide with our hand-out. These tests verify that all names and | types are correct. In case these tests pass, but you still see this message, please post | a report on the forums [1]. | - Take a careful look at the error messages from the Scala compiler below. They should give | you a hint where your code has an unexpected shape. | |If you cannot find a solution, ask for help on the discussion forums on the course website.""".stripMargin private def testsFailedMessage(score: Double) = """The code you submitted did not pass all of our tests: your submission achieved a score of |%.2f out of %.2f in our tests. | |In order to find bugs in your code, we advise to perform the following steps: | - Take a close look at the test output that you can find below: it should point you to | the part of your code that has bugs. | - Run the tests that we provide with the handout on your code. | - The tests we provide do not test your code in depth: they are very incomplete. In order | to test more aspects of your code, write your own unit tests. | - Take another very careful look at the assignment description. Try to find out if you | misunderstood parts of it. While reading through the assignment, write more tests. | |Below you can find a short feedback for every individual test that failed.""".stripMargin.format(score, vMaxTestScore) // def so that we read the right value of vMaxTestScore (initialize modifies it) private def allTestsPassedMessage = """Your solution passed all of our tests, congratulations! You obtained the maximal test |score of %.2f.""".stripMargin.format(vMaxTestScore) private val testExecutionFailedMessage = """An error occurred while running our tests on your submission. | |In order for us to help you, please contact one of the teaching assistants and send |them the entire feedback message that you received.""".stripMargin // def so that we read the right value of vMaxStyleScore (initialize modifies it) private def perfectStyleMessage = """Our automated style checker tool could not find any issues with your code. You obtained the maximal |style score of %.2f.""".stripMargin.format(vMaxStyleScore) private def styleProblemsMessage(score: Double) = """Our automated style checker tool found issues in your code with respect to coding style: it |computed a style score of %.2f out of %.2f for your submission. See below for detailed feedback.""".stripMargin.format(score, vMaxStyleScore) private def totalGradeMessage(score: Double) = """Your overall score for this assignment is %.2f out of %.2f""".format(score, vMaxTestScore + vMaxStyleScore) // This is added because the feedback is getting overwritten by someone. private def uniqueGradeId(s: String) = """Unique identifier of this grade is %s. This identifier will uniquely identify your assignment throughout the grading system.""".format(s) }
dreikanter/progfun
week2/funsets/project/GradingFeedback.scala
Scala
cc0-1.0
8,801
/* * Copyright 2022 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package services.testdata.candidate.onlinetests.phase3 import common.FutureEx import javax.inject.{ Inject, Singleton } import model.exchange.testdata.CreateCandidateResponse.CreateCandidateResponse import model.testdata.candidate.CreateCandidateData.CreateCandidateData import org.joda.time.DateTime import play.api.mvc.RequestHeader import repositories.onlinetesting.Phase3TestRepository import services.onlinetesting.phase3.Phase3TestService import services.testdata.candidate.ConstructiveGenerator import uk.gov.hmrc.http.HeaderCarrier import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future @Singleton class Phase3TestsStartedStatusGenerator @Inject() (val previousStatusGenerator: Phase3TestsInvitedStatusGenerator, otRepository: Phase3TestRepository, otService: Phase3TestService ) extends ConstructiveGenerator { // val otRepository: Phase3TestRepository // val otService: Phase3TestService def generate(generationId: Int, generatorConfig: CreateCandidateData) (implicit hc: HeaderCarrier, rh: RequestHeader): Future[CreateCandidateResponse] = { for { candidate <- previousStatusGenerator.generate(generationId, generatorConfig) _ <- FutureEx.traverseSerial(candidate.phase3TestGroup.get.tests.map(_.token))(token => otService.markAsStarted(token, generatorConfig.phase3TestData.flatMap(_.start).getOrElse(DateTime.now)) ) } yield candidate } }
hmrc/fset-faststream
app/services/testdata/candidate/onlinetests/phase3/Phase3TestsStartedStatusGenerator.scala
Scala
apache-2.0
2,201
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.plan import com.google.common.collect.ImmutableList import org.apache.calcite.plan.RelOptPlanner.CannotPlanException import org.apache.calcite.plan.hep.{HepMatchOrder, HepPlanner, HepProgram, HepProgramBuilder} import org.apache.calcite.plan.{Context, Convention, RelOptPlanner, RelOptUtil, RelTraitSet} import org.apache.calcite.rel.RelNode import org.apache.calcite.tools.{Programs, RuleSet, RuleSets} import org.apache.flink.table.api.{TableConfig, TableException} import org.apache.flink.table.api.internal.TableEnvImpl import org.apache.flink.table.calcite.CalciteConfig import org.apache.flink.table.plan.nodes.FlinkConventions import org.apache.flink.table.plan.rules.FlinkRuleSets import org.apache.flink.table.planner.PlanningConfigurationBuilder import scala.collection.JavaConverters._ /** * Common functionalities for both [[StreamOptimizer]] and [[BatchOptimizer]]. An [[Optimizer]] * should be used to create an optimized tree from a logical input tree. * See [[StreamOptimizer.optimize]] and [[BatchOptimizer.optimize]] * * @param calciteConfig provider for [[CalciteConfig]]. It is a provider because the * [[TableConfig]] in a [[TableEnvImpl]] is mutable. * @param planningConfigurationBuilder provider for [[RelOptPlanner]] and [[Context]] */ abstract class Optimizer( calciteConfig: () => CalciteConfig, planningConfigurationBuilder: PlanningConfigurationBuilder) { protected def materializedConfig: CalciteConfig = calciteConfig.apply() /** * Returns the normalization rule set for this optimizer * including a custom RuleSet configuration. */ protected def getNormRuleSet: RuleSet = { materializedConfig.normRuleSet match { case None => getBuiltInNormRuleSet case Some(ruleSet) => if (materializedConfig.replacesNormRuleSet) { ruleSet } else { RuleSets.ofList((getBuiltInNormRuleSet.asScala ++ ruleSet.asScala).asJava) } } } /** * Returns the logical optimization rule set for this optimizer * including a custom RuleSet configuration. */ protected def getLogicalOptRuleSet: RuleSet = { materializedConfig.logicalOptRuleSet match { case None => getBuiltInLogicalOptRuleSet case Some(ruleSet) => if (materializedConfig.replacesLogicalOptRuleSet) { ruleSet } else { RuleSets.ofList((getBuiltInLogicalOptRuleSet.asScala ++ ruleSet.asScala).asJava) } } } /** * Returns the logical rewrite rule set for this optimizer * including a custom RuleSet configuration. */ protected def getLogicalRewriteRuleSet: RuleSet = { materializedConfig.logicalRewriteRuleSet match { case None => getBuiltInLogicalRewriteRuleSet case Some(ruleSet) => if (materializedConfig.replacesLogicalRewriteRuleSet) { ruleSet } else { RuleSets.ofList((getBuiltInLogicalRewriteRuleSet.asScala ++ ruleSet.asScala).asJava) } } } /** * Returns the physical optimization rule set for this optimizer * including a custom RuleSet configuration. */ protected def getPhysicalOptRuleSet: RuleSet = { materializedConfig.physicalOptRuleSet match { case None => getBuiltInPhysicalOptRuleSet case Some(ruleSet) => if (materializedConfig.replacesPhysicalOptRuleSet) { ruleSet } else { RuleSets.ofList((getBuiltInPhysicalOptRuleSet.asScala ++ ruleSet.asScala).asJava) } } } /** * Returns the built-in normalization rules that are defined by the optimizer. */ protected def getBuiltInNormRuleSet: RuleSet /** * Returns the built-in logical optimization rules that are defined by the optimizer. */ protected def getBuiltInLogicalOptRuleSet: RuleSet = { FlinkRuleSets.LOGICAL_OPT_RULES } /** * Returns the built-in logical rewrite rules that are defined by the optimizer. */ protected def getBuiltInLogicalRewriteRuleSet: RuleSet = { FlinkRuleSets.LOGICAL_REWRITE_RULES } /** * Returns the built-in physical optimization rules that are defined by the optimizer. */ protected def getBuiltInPhysicalOptRuleSet: RuleSet protected def optimizeConvertSubQueries(relNode: RelNode): RelNode = { runHepPlannerSequentially( HepMatchOrder.BOTTOM_UP, FlinkRuleSets.TABLE_SUBQUERY_RULES, relNode, relNode.getTraitSet) } protected def optimizeExpandPlan(relNode: RelNode): RelNode = { val result = runHepPlannerSimultaneously( HepMatchOrder.TOP_DOWN, FlinkRuleSets.EXPAND_PLAN_RULES, relNode, relNode.getTraitSet) runHepPlannerSequentially( HepMatchOrder.TOP_DOWN, FlinkRuleSets.POST_EXPAND_CLEAN_UP_RULES, result, result.getTraitSet) } protected def optimizeNormalizeLogicalPlan(relNode: RelNode): RelNode = { val normRuleSet = getNormRuleSet if (normRuleSet.iterator().hasNext) { runHepPlannerSequentially(HepMatchOrder.BOTTOM_UP, normRuleSet, relNode, relNode.getTraitSet) } else { relNode } } protected def optimizeLogicalRewritePlan(relNode: RelNode): RelNode = { val logicalRewriteRuleSet = getLogicalRewriteRuleSet if (logicalRewriteRuleSet.iterator().hasNext) { runHepPlannerSimultaneously( HepMatchOrder.TOP_DOWN, logicalRewriteRuleSet, relNode, relNode.getTraitSet) } else { relNode } } protected def optimizeLogicalPlan(relNode: RelNode): RelNode = { val logicalOptRuleSet = getLogicalOptRuleSet val logicalOutputProps = relNode.getTraitSet.replace(FlinkConventions.LOGICAL).simplify() if (logicalOptRuleSet.iterator().hasNext) { runVolcanoPlanner(logicalOptRuleSet, relNode, logicalOutputProps) } else { relNode } } protected def optimizePhysicalPlan(relNode: RelNode, convention: Convention): RelNode = { val physicalOptRuleSet = getPhysicalOptRuleSet val physicalOutputProps = relNode.getTraitSet.replace(convention).simplify() if (physicalOptRuleSet.iterator().hasNext) { runVolcanoPlanner(physicalOptRuleSet, relNode, physicalOutputProps) } else { relNode } } /** * run HEP planner with rules applied one by one. First apply one rule to all of the nodes * and only then apply the next rule. If a rule creates a new node preceding rules will not * be applied to the newly created node. */ protected def runHepPlannerSequentially( hepMatchOrder: HepMatchOrder, ruleSet: RuleSet, input: RelNode, targetTraits: RelTraitSet): RelNode = { val builder = new HepProgramBuilder builder.addMatchOrder(hepMatchOrder) val it = ruleSet.iterator() while (it.hasNext) { builder.addRuleInstance(it.next()) } runHepPlanner(builder.build(), input, targetTraits) } /** * run HEP planner with rules applied simultaneously. Apply all of the rules to the given * node before going to the next one. If a rule creates a new node all of the rules will * be applied to this new node. */ protected def runHepPlannerSimultaneously( hepMatchOrder: HepMatchOrder, ruleSet: RuleSet, input: RelNode, targetTraits: RelTraitSet): RelNode = { val builder = new HepProgramBuilder builder.addMatchOrder(hepMatchOrder) builder.addRuleCollection(ruleSet.asScala.toList.asJava) runHepPlanner(builder.build(), input, targetTraits) } /** * run HEP planner */ protected def runHepPlanner( hepProgram: HepProgram, input: RelNode, targetTraits: RelTraitSet): RelNode = { val planner = new HepPlanner(hepProgram, planningConfigurationBuilder.getContext) planner.setRoot(input) if (input.getTraitSet != targetTraits) { planner.changeTraits(input, targetTraits.simplify) } planner.findBestExp } /** * run VOLCANO planner */ protected def runVolcanoPlanner( ruleSet: RuleSet, input: RelNode, targetTraits: RelTraitSet): RelNode = { val optProgram = Programs.ofRules(ruleSet) val output = try { optProgram.run(planningConfigurationBuilder.getPlanner, input, targetTraits, ImmutableList.of(), ImmutableList.of()) } catch { case e: CannotPlanException => throw new TableException( s"Cannot generate a valid execution plan for the given query: \\n\\n" + s"${RelOptUtil.toString(input)}\\n" + s"This exception indicates that the query uses an unsupported SQL feature.\\n" + s"Please check the documentation for the set of currently supported SQL features.") case t: TableException => throw new TableException( s"Cannot generate a valid execution plan for the given query: \\n\\n" + s"${RelOptUtil.toString(input)}\\n" + s"${t.getMessage}\\n" + s"Please check the documentation for the set of currently supported SQL features.") case a: AssertionError => // keep original exception stack for caller throw a } output } }
mbode/flink
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/plan/Optimizer.scala
Scala
apache-2.0
10,033
package com.twitter.finagle.redis.exp import com.twitter.conversions.time._ import com.twitter.finagle.{Service, ServiceClosedException} import com.twitter.finagle.redis.Client import com.twitter.finagle.redis.protocol._ import com.twitter.finagle.redis.util.BufToString import com.twitter.io.Buf import com.twitter.logging.Logger import com.twitter.util.{Future, Futures, Throw, Timer} import java.util.concurrent.ConcurrentHashMap import scala.collection.JavaConverters._ import scala.util.control.NonFatal object SubscribeCommands { object MessageBytes { val SUBSCRIBE: Buf = Buf.Utf8("subscribe") val UNSUBSCRIBE: Buf = Buf.Utf8("unsubscribe") val PSUBSCRIBE: Buf = Buf.Utf8("psubscribe") val PUNSUBSCRIBE: Buf = Buf.Utf8("punsubscribe") val MESSAGE: Buf = Buf.Utf8("message") val PMESSAGE: Buf = Buf.Utf8("pmessage") } } sealed trait SubscribeHandler { def onSuccess(channel: Buf, node: Service[SubscribeCommand, Reply]): Unit def onException(node: Service[SubscribeCommand, Reply], ex: Throwable): Unit def onMessage(message: Reply): Unit } sealed trait SubscriptionType[Message] { type MessageHandler = Message => Unit def subscribeCommand(channel: Buf, handler: SubscribeHandler): SubscribeCommand def unsubscribeCommand(channel: Buf, handler: SubscribeHandler): SubscribeCommand } case object Channel extends SubscriptionType[(Buf, Buf)] { def subscribeCommand(channel: Buf, handler: SubscribeHandler) = { Subscribe(Seq(channel), handler) } def unsubscribeCommand(channel: Buf, handler: SubscribeHandler) = { Unsubscribe(Seq(channel), handler) } } case object Pattern extends SubscriptionType[(Buf, Buf, Buf)] { def subscribeCommand(channel: Buf, handler: SubscribeHandler) = { PSubscribe(Seq(channel), handler) } def unsubscribeCommand(channel: Buf, handler: SubscribeHandler) = { PUnsubscribe(Seq(channel), handler) } } /** * SubscribeClient is used to (un)subscribe messages from redis' PUB/SUB subsystem. * Once a client enters PUB/SUB state by subscribing to some channel/pattern, it * should not issue any other commands, except the (un)subscribe commands, until it * exits from the PUB/SUB state, by unsubscribing from all the channels and patterns. * For this reason, we put the (un)subscribe commands here, separately from the other * ordinary commands. */ trait SubscribeCommands { self: Client => private[redis] val timer: Timer import SubscribeCommands._ private[this] val log = Logger(getClass) private[this] val subManager = new SubscriptionManager(Channel, timer) private[this] val pSubManager = new SubscriptionManager(Pattern, timer) /** * Subscribe to channels. Messages received from the subscribed channels will be processed by * the handler. * * A channel will be subscribed to only once. Subscribing to an already subscribed channel will * be ignored. Although a Seq is passed in as argument, the channels are subscribed to one by * one, with individual commands, and when the client is connected to multiple server nodes, * it is not guaranteed that they are subscribed to from the same node. * * When the Future returned by this method is completed, it is guaranteed that an attempt is * made, to send a subscribe command for each of the channels that is not subscribed to yet. * And the failed subscriptions are returned as a map from the failed channel to the exception * object. Subscriptions will be managed by the SubscriptionManager, even if it failed at the * first attempt. In that case, subsequent attempts will be made regularly until the channel is * subscribed to successfully, or the subscription is cancelled by calling the unsubscribed * method. */ def subscribe(channels: Seq[Buf])(handler: subManager.typ.MessageHandler) : Future[Map[Buf, Throwable]] = { val notSubscribed = subManager.uniquify(channels, handler) val subscriptions = notSubscribed.map(subManager.subscribe) Futures.collectToTry(subscriptions.asJava) .map(_.asScala.zip(notSubscribed).collect { case (Throw(ex), channel) => (channel, ex) }.toMap) } /** * Subscribe to patterns. Messages received from the subscribed patterns will be processed by * the handler. * * A pattern will be subscribed to only once. Subscribing to an already subscribed pattern will * be ignored. Although a Seq is passed in as argument, the patterns are subscribed to one by * one, with individual commands, and when the client is connected to multiple server nodes, * it is not guaranteed that they are subscribed to from the same node. * * When the Future returned by this method is completed, it is guaranteed that an attempt is * made, to send a pSubscribe command for each of the patterns that is not subscribed to yet. * And the failed subscriptions are returned as a map from the failed channel to the exception * object. Subscriptions will be managed by the SubscriptionManager, even if it failed at the * first attempt. In that case, subsequent attempts will be made regularly until the pattern is * subscribed to successfully, or the subscription is cancelled by calling the pUnsubscribed * method. */ def pSubscribe(patterns: Seq[Buf])(handler: pSubManager.typ.MessageHandler) : Future[Map[Buf, Throwable]] = { val notSubscribed = pSubManager.uniquify(patterns, handler) val subscriptions = notSubscribed.map(pSubManager.subscribe) Futures.collectToTry(subscriptions.asJava) .map(_.asScala.zip(notSubscribed).collect { case (Throw(ex), pattern) => (pattern, ex) }.toMap) } /** * Unsubscribe from channels. The subscriptions to the specified channels are removed from the * SubscriptionManager. An unsubscribe command is sent for each of the succeeded * subscriptions, and the failed ones are returned as a Future of map from the channel to the * exception object. */ def unsubscribe(channels: Seq[Buf]): Future[Map[Buf, Throwable]] = { Futures.collectToTry(channels.map(subManager.unsubscribe).asJava) .map(_.asScala.zip(channels).collect { case (Throw(ex), channel) => (channel, ex) }.toMap) } /** * Unsubscribe from patterns. The subscriptions to the specified patterns are removed from the * SubscriptionManager. An unsubscribe command is sent for each of the succeeded * subscriptions, and the failed ones are returned as a Future of map from the pattern to the * exception object. */ def pUnsubscribe(patterns: Seq[Buf]): Future[Map[Buf, Throwable]] = { Futures.collectToTry(patterns.map(pSubManager.unsubscribe).asJava) .map(_.asScala.zip(patterns).collect { case (Throw(ex), pattern) => (pattern, ex) }.toMap) } private[this] def doRequest(cmd: SubscribeCommand) = { RedisPool.forSubscription(factory)(cmd) } private class SubscriptionManager[Message](val typ: SubscriptionType[Message], timer: Timer) extends SubscribeHandler { sealed trait State case object Pending extends State case class Subscribed(node: Service[SubscribeCommand, Reply]) extends State private case class Subscription(handler: typ.MessageHandler, state: State) private[this] val subscriptions = new ConcurrentHashMap[Buf, Subscription]().asScala def uniquify(channels: Seq[Buf], handler: typ.MessageHandler): Seq[Buf] = { channels.filter { channel => subscriptions.putIfAbsent(channel, Subscription(handler, Pending)).isEmpty } } def onSuccess(channel: Buf, node: Service[SubscribeCommand, Reply]): Unit = { subscriptions.get(channel) match { case Some(subscription) => subscriptions.put(channel, subscription.copy(state = Subscribed(node))) case None => // In case that some retrying attempt is made successfully after the channel is // unsubscribed. node(typ.unsubscribeCommand(channel, this)) } } def onMessage(message: Reply): Unit = { message match { case MBulkReply(BulkReply(MessageBytes.MESSAGE) :: BulkReply(channel) :: BulkReply(message) :: Nil) => subManager.handleMessage(channel, (channel, message)) case MBulkReply(BulkReply(MessageBytes.PMESSAGE) :: BulkReply(pattern) :: BulkReply(channel) :: BulkReply(message) :: Nil) => pSubManager.handleMessage(pattern, (pattern, channel, message)) case MBulkReply(BulkReply(tpe) :: BulkReply(channel) :: IntegerReply(count) :: Nil) => tpe match { case MessageBytes.PSUBSCRIBE | MessageBytes.PUNSUBSCRIBE | MessageBytes.SUBSCRIBE | MessageBytes.UNSUBSCRIBE => // The acknowledgement messages may come after a subscribed channel message. // So we register the message handler right after the subscription request // is sent. Nothing is going to be done here. We match against them just to // detect something unexpected. case _ => throw new IllegalArgumentException(s"Unsupported message type: ${BufToString(tpe)}") } case _ => throw new IllegalArgumentException(s"Unexpected reply type: ${message.getClass.getSimpleName}") } } def handleMessage(channel: Buf, message: Message): Unit = { try { subscriptions.get(channel).foreach(_.handler(message)) } catch { case NonFatal(ex) => log.error(ex, "Failed to handle a message: %s", message) } } def onException(node: Service[SubscribeCommand, Reply], ex: Throwable): Unit = { subManager._onException(node, ex) pSubManager._onException(node, ex) } private def _onException(node: Service[SubscribeCommand, Reply], ex: Throwable): Unit = { // Take a snapshot of the managed subscriptions, and change the state. subscriptions.toList.collect { case (channel, subscription) => subscriptions.put(channel, subscription.copy(state = Pending)) subscribe(channel) } } private def retry(channel: Buf): Future[Reply] = doRequest(typ.subscribeCommand(channel, this)) def subscribe(channel: Buf): Future[Reply] = { // It is possible that the channel is unsubscribed, so we always check it before making // another attempt. if (subscriptions.get(channel).isEmpty) Future.value(NoReply) else retry(channel).onFailure { case sce: ServiceClosedException => subscriptions.remove(channel) case _ => timer.doLater(1.second)(subscribe(channel)) } } def unsubscribe(channel: Buf): Future[Reply] = { subscriptions.remove(channel) match { case Some(Subscription(_, Subscribed(node))) => node(typ.unsubscribeCommand(channel, this)) case _ => Future.value(NoReply) } } } }
koshelev/finagle
finagle-redis/src/main/scala/com/twitter/finagle/redis/exp/SubscribeClient.scala
Scala
apache-2.0
10,965
package mapmartadero package snippet import lib.DependencyFactory import net.liftweb._ import common._ import http._ import util._ import Helpers._ class HelloWorldSpec extends BaseWordSpec { val session = new LiftSession("", randomString(20), Empty) val stableTime = now override def withFixture(test: NoArgTest) { S.initIfUninitted(session) { DependencyFactory.time.doWith(stableTime) { test() } } } "HelloWorld Snippet" should { "Put the time in the node" in { val hello = new HelloWorld Thread.sleep(1000) // make sure the time changes val str = hello.render(<span>Welcome to your Lift app at <span id="time">Time goes here</span></span>).toString str.indexOf(stableTime.toString) should be >= 0 str.indexOf("Welcome to your Lift app at") should be >= 0 } } }
jgenso/mapamartadero
src/test/scala/mapmartadero/snippet/HelloWorldSpec.scala
Scala
apache-2.0
849
package com.twitter.finagle import com.twitter.app.Flaggable import com.twitter.util.{Local, Var, Activity} import java.io.PrintWriter import java.net.SocketAddress import java.util.concurrent.atomic.AtomicReference import scala.collection.generic.CanBuildFrom import scala.collection.immutable.VectorBuilder import scala.collection.mutable.Builder import scala.collection.mutable /** * A Dtab--short for delegation table--comprises a sequence * of delegation rules. Together, these describe how to bind a * path to an Addr. */ case class Dtab(dentries0: IndexedSeq[Dentry]) extends IndexedSeq[Dentry] with Namer { private lazy val dentries = dentries0.reverse def apply(i: Int): Dentry = dentries0(i) def length = dentries0.length override def isEmpty = length == 0 private def rewriteApplies(prefix: Path, path: Path) = (prefix, path) match { case (Path(), Path.Utf8("#", _*)) => false case _ => path startsWith prefix } private def lookup0(path: Path): NameTree[Path] = { val matches = dentries collect { case Dentry(prefix, dst) if rewriteApplies(prefix, path) => val suff = path drop prefix.size dst map { pfx => pfx ++ suff } } matches.size match { case 0 => NameTree.Neg case 1 => matches(0) case _ => NameTree.Alt(matches:_*) } } def lookup(path: Path): Activity[NameTree[Name]] = Activity.value(lookup0(path) map { path => Name(path) }) def enum(prefix: Path): Activity[Dtab] = { val dtab = Dtab(dentries0 collect { case Dentry(path, dst) if path startsWith prefix => Dentry(path drop prefix.size, dst) }) Activity.value(dtab) } /** * Construct a Dtab representing the alternative composition of * ``this`` with its argument, under evaluation; that is * * {{{ * val a, b: Dtab = .. * a.alt(b).lookup(path).eval == NameTree.Alt(a.lookup(path), b.lookup(path)).eval * }}} */ def alt(other: Dtab): Dtab = other ++ this /** * Construct a Dtab representing the union composition of ``this`` * with its argument, under evaluation; that is * * {{{ * val a, b: Dtab = .. * a.union(b).lookup(path).eval == NameTree.Union(a.lookup(path), b.lookup(path)).eval * }}} */ def union(other: Dtab): Dtab = { // This uses a somewhat funny but simple algorithm. We consider // all unique paths in the Dtabs's left-hand sides. These represent // all valid prefixes for the combined Dtab. We then construct a // new Dtab by looking up all possible prefixes in both dtabs, // and setting the destination name tree to their union. // // The resulting dentries each represent what a lookup in the union // Dtab would return for queries with this prefix. Thus, if we // sort the resulting Dtab by prefix size, the most specific // applicable dentry will be used (first); it in turn embodies // the most specific (and correct) answer of the combined dtab. // // Consider the union of the Dtabs // // /a/b => /two; // /a => /one // // and // // /a/b => /three; // /b => /four // // We look up, /a, /b, and /a/b, taking the union of their results; i.e. // // /a => /one; // /b => /four; // /a/b => /three & (/one/b | /two) // // The Dtab // // /foo/bar -> /quux; // /foo -> /xyzzy // // unioned with itself requires looking up /foo and /foo/bar: // // /foo => /xyzzy & /zyzzy // /foo/bar => (/xyzzy/bar & /quux) | (/xyzzy/bar & /quux) val paths: Set[Path] = this.map(_.prefix).toSet ++ other.map(_.prefix).toSet val dentries = for (path: Path <- paths.toIndexedSeq) yield Dentry(path, NameTree.Union(this.lookup0(path), other.lookup0(path))) Dtab(dentries.sortBy(_.prefix.size)) } /** * Construct a new Dtab with the given delegation * entry appended. */ def +(dentry: Dentry): Dtab = Dtab(dentries0 :+ dentry) /** * Java API for '+' */ def append(dentry: Dentry): Dtab = this + dentry /** * Construct a new Dtab with the given dtab appended. */ def ++(dtab: Dtab): Dtab = { if (dtab.isEmpty) this else Dtab(dentries0 ++ dtab.dentries0) } /** * Java API for '++' */ def concat(dtab: Dtab): Dtab = this ++ dtab /** * Efficiently removes prefix `prefix` from `dtab`. */ def stripPrefix(prefix: Dtab): Dtab = { if (this eq prefix) return Dtab.empty if (isEmpty) return this if (size < prefix.size) return this var i = 0 while (i < prefix.size) { val d1 = this(i) val d2 = prefix(i) if (d1 != d2) return this i += 1 } if (i == size) Dtab.empty else Dtab(this drop prefix.size) } /** * Print a pretty representation of this Dtab. */ def print(printer: PrintWriter) { printer.println("Dtab("+size+")") for (Dentry(prefix, dst) <- this) printer.println(" "+prefix.show+" => "+dst.show) } /** * Simplify the Dtab. This returns a functionally equivalent Dtab * whose destination name trees have been simplified. The returned * Dtab is equivalent with respect to evaluation. * * @todo dedup equivalent entries so that the only the last entry is retained * @todo collapse entries with common prefixes */ def simplified: Dtab = Dtab({ val simple = this map { case Dentry(prefix, dst) => Dentry(prefix, dst.simplified) } // Negative destinations are no-ops simple.filter(_.dst != NameTree.Neg) }) def show: String = dentries0 map (_.show) mkString ";" override def toString = "Dtab("+show+")" } /** * Trait Dentry describes a delegation table entry. * It always has a prefix, describing the paths to * which the entry applies, and a bind method to * bind the given path. */ case class Dentry(prefix: Path, dst: NameTree[Path]) { def show = "%s=>%s".format(prefix.show, dst.show) override def toString = "Dentry("+show+")" } object Dentry { /** * Parse a Dentry from the string `s` with concrete syntax: * {{{ * dentry ::= path '=>' tree * }}} * * where the productions ``path`` and ``tree`` are from the grammar * documented in [[com.twitter.finagle.NameTree$ NameTree.read]]. */ def read(s: String): Dentry = NameTreeParsers.parseDentry(s) // The prefix to this is an illegal path in the sense that the // concrete syntax will not admit it. It will do for a no-op. val nop: Dentry = Dentry(Path.Utf8("/"), NameTree.Neg) implicit val equiv: Equiv[Dentry] = new Equiv[Dentry] { def equiv(d1: Dentry, d2: Dentry): Boolean = ( d1.prefix == d2.prefix && d1.dst.simplified == d2.dst.simplified ) } } /** * Object Dtab manages 'base' and 'local' Dtabs. */ object Dtab { implicit val equiv: Equiv[Dtab] = new Equiv[Dtab] { def equiv(d1: Dtab, d2: Dtab): Boolean = ( d1.size == d2.size && d1.zip(d2).forall { case (de1, de2) => Equiv[Dentry].equiv(de1, de2) } ) } /** * A failing delegation table. */ val fail: Dtab = Dtab.read("/=>!") /** * An empty delegation table. */ val empty: Dtab = Dtab(Vector.empty) /** * The base, or "system", or "global", delegation table applies to * every request in this process. It is generally set at process * startup, and not changed thereafter. */ @volatile var base: Dtab = empty /** * Java API for ``base_=`` */ def setBase(dtab: Dtab) { base = dtab } private[this] val l = new Local[Dtab] /** * The local, or "per-request", delegation table applies to the * current [[com.twitter.util.Local Local]] scope which is usually * defined on a per-request basis. Finagle uses the Dtab * ``Dtab.base ++ Dtab.local`` to bind * [[com.twitter.finagle.Name.Path Paths]]. * * Local's scope is dictated by [[com.twitter.util.Local Local]]. * * The local dtab is serialized into outbound requests when * supported protocols are used. (Http, Thrift via TTwitter, Mux, * and ThriftMux are among these.) The upshot is that ``local`` is * defined for the entire request graph, so that a local dtab * defined here will apply to downstream services as well. */ def local: Dtab = l() getOrElse Dtab.empty def local_=(dtab: Dtab) { l() = dtab } /** * Java API for ``local_=`` */ def setLocal(dtab: Dtab) { local = dtab } def unwind[T](f: => T): T = { val save = l() try f finally l.set(save) } /** * Parse a Dtab from string `s` with concrete syntax * * {{{ * dtab ::= dentry ';' dtab | dentry * }}} * * where the production ``dentry`` is from the grammar documented in * [[com.twitter.finagle.Dentry$ Dentry.read]] * */ def read(s: String): Dtab = NameTreeParsers.parseDtab(s) /** Scala collection plumbing required to build new dtabs */ def newBuilder: DtabBuilder = new DtabBuilder implicit val canBuildFrom: CanBuildFrom[TraversableOnce[Dentry], Dentry, Dtab] = new CanBuildFrom[TraversableOnce[Dentry], Dentry, Dtab] { def apply(_ign: TraversableOnce[Dentry]): DtabBuilder = newBuilder def apply(): DtabBuilder = newBuilder } /** * implicit conversion from [[com.twitter.finagle.Dtab]] to * [[com.twitter.app.Flaggable]], allowing Dtabs to be easily used as * [[com.twitter.app.Flag]]s */ implicit val flaggable: Flaggable[Dtab] = new Flaggable[Dtab] { override def default = None def parse(s: String) = Dtab.read(s) override def show(dtab: Dtab) = dtab.show } } final class DtabBuilder extends Builder[Dentry, Dtab] { private var builder = new VectorBuilder[Dentry] def +=(d: Dentry): this.type = { builder += d this } def clear() = builder.clear() def result(): Dtab = Dtab(builder.result) }
yancl/finagle-6.22.0
finagle-core/src/main/scala/com/twitter/finagle/Dtab.scala
Scala
apache-2.0
9,862
/* * Copyright 2022 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package router.services import javax.inject.Inject import play.api.mvc.Request import router.connectors.PropertyConnector import router.constants.Versions._ import router.httpParsers.SelfAssessmentHttpParser.SelfAssessmentOutcome import uk.gov.hmrc.http.HeaderCarrier import scala.concurrent.Future class PropertyEopsObligationsService @Inject()(val connector: PropertyConnector) extends Service { def get()(implicit hc: HeaderCarrier, req: Request[_]): Future[SelfAssessmentOutcome] = { withApiVersion { case Some(VERSION_2) => connector.get(s"/$VERSION_2${req.uri}") } } }
hmrc/self-assessment-api
app/router/services/PropertyEopsObligationsService.scala
Scala
apache-2.0
1,202
package edu.depauw.scales.graphics import Base._ // Describes the bounding box for the shape. The reference point is always at (0, 0). sealed trait Bounds { def left: Double def right: Double def top: Double def bottom: Double def width: Double = right - left def height: Double = bottom - top def center: Double = (left + right) / 2 def middle: Double = (top + bottom) / 2 def union(that: Bounds): Bounds def transform(xform: AffineTransform): Bounds } case object EmptyBounds extends Bounds { val left: Double = 0 val right: Double = 0 val top: Double = 0 val bottom: Double = 0 def union(that: Bounds): Bounds = that def transform(xform: AffineTransform): Bounds = this } case class HorizontalStrut(left: Double, right: Double) extends Bounds { val top: Double = 0 val bottom: Double = 0 def union(that: Bounds): Bounds = that match { case EmptyBounds => this case HorizontalStrut(l, r) => HorizontalStrut(left min l, right max r) case VerticalStrut(t, b) => RectBounds(left, right, t, b) case RectBounds(l, r, t, b) => RectBounds(left min l, right max r, t, b) } def transform(xform: AffineTransform): Bounds = RectBounds(left, right, 0, 0).transform(xform) } case class VerticalStrut(top: Double, bottom: Double) extends Bounds { val left: Double = 0 val right: Double = 0 def union(that: Bounds): Bounds = that match { case EmptyBounds => this case HorizontalStrut(l, r) => RectBounds(l, r, top, bottom) case VerticalStrut(t, b) => VerticalStrut(top min t, bottom max b) case RectBounds(l, r, t, b) => RectBounds(l, r, top min t, bottom max b) } def transform(xform: AffineTransform): Bounds = RectBounds(0, 0, top, bottom).transform(xform) } case class RectBounds(left: Double, right: Double, top: Double, bottom: Double) extends Bounds { def union(that: Bounds): Bounds = that match { case EmptyBounds => this case HorizontalStrut(l, r) => RectBounds(left min l, right max r, top, bottom) case VerticalStrut(t, b) => RectBounds(left, right, top min t, bottom max b) case RectBounds(l, r, t, b) => RectBounds(left min l, right max r, top min t, bottom max b) } def transform(xform: AffineTransform): Bounds = { val (x1, y1) = xform((left, top)) val (x2, y2) = xform((right, top)) val (x3, y3) = xform((right, bottom)) val (x4, y4) = xform((left, bottom)) val l = x1 min x2 min x3 min x4 val r = x1 max x2 max x3 max x4 val t = y1 min y2 min y3 min y4 val b = y1 max y2 max y3 max y4 if (t == b) { HorizontalStrut(l, r) } else if (l == r) { VerticalStrut(t, b) } else { RectBounds(l, r, t, b) } } } // TODO add non-rectangular bounds? circles, perhaps? // TODO generalize to "envelopes" (compute a boundary given an arbitrary vector) as in Diagrams?
DePauwREU2013/sjs-scales
src/main/scala/edu/depauw/scales/graphics/Bounds.scala
Scala
mit
2,876
/* * Happy Melly Teller * Copyright (C) 2013 - 2016, Happy Melly http://www.happymelly.com * * This file is part of the Happy Melly Teller. * * Happy Melly Teller is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Happy Melly Teller is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Happy Melly Teller. If not, see <http://www.gnu.org/licenses/>. * * If you have questions concerning this license or the applicable additional terms, you may contact * by email Sergey Kotlov, sergey.kotlov@happymelly.com or * in writing Happy Melly One, Handelsplein 37, Rotterdam, The Netherlands, 3071 PR */ package controllers.core import javax.inject.Inject import controllers.Security import be.objectify.deadbolt.scala.{ActionBuilders, DeadboltActions} import be.objectify.deadbolt.scala.cache.HandlerCache import models.Notification import models.UserRole.Role._ import models.repository.Repositories import play.api.data.Form import play.api.data.Forms._ import play.api.i18n.MessagesApi import play.api.libs.json.{Json, Writes} import services.TellerRuntimeEnvironment /** * Manages notifications */ class Notifications @Inject() (override implicit val env: TellerRuntimeEnvironment, override val messagesApi: MessagesApi, val repos: Repositories, deadbolt: DeadboltActions, handlers: HandlerCache, actionBuilder: ActionBuilders) extends Security(deadbolt, handlers, actionBuilder, repos)(messagesApi, env) { /** * Returns list of notifications for current user * * @param offset Offset * @param limit Number of notifications to retrieve * @return */ def list(offset: Long = 0, limit: Long = 5) = RestrictedAction(Viewer) { implicit request => implicit handler => implicit user => implicit val notificationWrites = new Writes[Notification] { def writes(notification: Notification) = { Json.obj( "id" -> notification.id, "type" -> notification.typ, "body" -> notification.render, "unread" -> notification.unread) } } repos.notification.find(user.person.identifier, offset, limit) flatMap { notifications => jsonOk(Json.arr(notifications)) } } /** * Marks the given notifications as read */ def read() = RestrictedAction(Viewer) { implicit request => implicit handler => implicit user => val form = Form(single("ids" -> play.api.data.Forms.list(longNumber))) form.bindFromRequest.fold( errors => jsonBadRequest("Missing notification identifiers"), ids => repos.notification.read(ids, user.person.identifier) flatMap { _ => jsonSuccess("Notifications were marked as read") } ) } /** * Returns the number of unread notifications for current user */ def unread() = RestrictedAction(Viewer) { implicit request => implicit handler => implicit user => repos.notification.unread(user.person.identifier) flatMap { counter => jsonOk(Json.obj("unread" -> counter)) } } }
HappyMelly/teller
app/controllers/core/Notifications.scala
Scala
gpl-3.0
3,546
package ildl /** * A trait defining an iLDL transformation description object. * It must be extended by any transformation description object. * The minimum description contains two elements: * * ``` * def toRepr[T1, T2, ...](high: (Int, Int)): Long @high * def toHigh[T1, T2, ...](repr: Long @high): (Int, Int) * ``` * * Aside from the coercions, there are other things you can * specify in the description object. These are: * * * Extension methods * * Extension methods for implicitly added methods * * Constructor interception methods * * For more information, please see the examples included in the project. * * @see [[RigidTransformationDescription]] for a more rigid specification. */ trait TransformationDescription { // Add your own transformations here, please } /** * A more rigid transformation descritption object, where the * high-level and representation types are specified explicitly: * ``` * type High = (Int, Int) * type Repr = Long * def toRepr(high: (Int, Int)): Long @high = ... * def toHigh(repr: Long @high): (Int, Int) = ... * ``` * * @see [[TransformationDescription]] for the members you can add * to the objects extending this trait */ trait RigidTransformationDescription extends TransformationDescription { type High type Repr def toRepr(high: High): Repr @high def toHigh(lo: Repr @high): High // Add your own transformations here, please }
miniboxing/ildl-plugin
components/runtime/src/ildl/TransformationDescription.scala
Scala
bsd-3-clause
1,449
/** * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package boxdata.rest import boxdata.data.dto.ThreadDto import boxdata.ejb.SystemThreadsEjb import scala.collection.JavaConverters._ import javax.ejb.EJB import javax.ws.rs.GET import javax.ws.rs.Path import javax.ws.rs.Produces @Path("/system-threads") class SystemThreads { @EJB var threads: SystemThreadsEjb = _ @GET @Produces(Array("application/json")) def get(): java.util.List[ThreadDto] = { threads.getThreadsInfo.asJava } }
tveronezi/boxdata
src/main/scala/boxdata/rest/SystemThreads.scala
Scala
apache-2.0
1,280
package edu.rice.habanero.benchmarks.radixsort import java.util.Random import edu.rice.habanero.actors.{ScalaActor, ScalaActorState} import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner} /** * @author <a href="http://shams.web.rice.edu/">Shams Imam</a> (shams@rice.edu) */ object RadixSortScalaActorBenchmark { def main(args: Array[String]) { BenchmarkRunner.runBenchmark(args, new RadixSortScalaActorBenchmark) } private final class RadixSortScalaActorBenchmark extends Benchmark { def initialize(args: Array[String]) { RadixSortConfig.parseArgs(args) } def printArgInfo() { RadixSortConfig.printArgs() } def runIteration() { val validationActor = new ValidationActor(RadixSortConfig.N) validationActor.start() val sourceActor = new IntSourceActor(RadixSortConfig.N, RadixSortConfig.M, RadixSortConfig.S) sourceActor.start() var radix = RadixSortConfig.M / 2 var nextActor: ScalaActor[AnyRef] = validationActor while (radix > 0) { val sortActor = new SortActor(RadixSortConfig.N, radix, nextActor) sortActor.start() radix /= 2 nextActor = sortActor } sourceActor.send(NextActorMessage(nextActor)) ScalaActorState.awaitTermination() } def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double) { } } private case class NextActorMessage(actor: ScalaActor[AnyRef]) private case class ValueMessage(value: Long) private class IntSourceActor(numValues: Int, maxValue: Long, seed: Long) extends ScalaActor[AnyRef] { val random = new Random(seed) override def process(msg: AnyRef) { msg match { case nm: NextActorMessage => var i = 0 while (i < numValues) { val candidate = Math.abs(random.nextLong()) % maxValue val message = new ValueMessage(candidate) nm.actor.send(message) i += 1 } exit() } } } private class SortActor(numValues: Int, radix: Long, nextActor: ScalaActor[AnyRef]) extends ScalaActor[AnyRef] { private val orderingArray = Array.ofDim[ValueMessage](numValues) private var valuesSoFar = 0 private var j = 0 override def process(msg: AnyRef): Unit = { msg match { case vm: ValueMessage => valuesSoFar += 1 val current = vm.value if ((current & radix) == 0) { nextActor.send(vm) } else { orderingArray(j) = vm j += 1 } if (valuesSoFar == numValues) { var i = 0 while (i < j) { nextActor.send(orderingArray(i)) i += 1 } exit() } } } } private class ValidationActor(numValues: Int) extends ScalaActor[AnyRef] { private var sumSoFar = 0.0 private var valuesSoFar = 0 private var prevValue = 0L private var errorValue = (-1L, -1) override def process(msg: AnyRef) { msg match { case vm: ValueMessage => valuesSoFar += 1 if (vm.value < prevValue && errorValue._1 < 0) { errorValue = (vm.value, valuesSoFar - 1) } prevValue = vm.value sumSoFar += prevValue if (valuesSoFar == numValues) { if (errorValue._1 >= 0) { println("ERROR: Value out of place: " + errorValue._1 + " at index " + errorValue._2) } else { println("Elements sum: " + sumSoFar) } exit() } } } } }
smarr/savina
src/main/scala/edu/rice/habanero/benchmarks/radixsort/RadixSortScalaActorBenchmark.scala
Scala
gpl-2.0
3,640
package kogu.practice.fpinscala.state import kogu.practice.fpinscala.state.State.{get, modify, sequence} sealed trait Input case object Coin extends Input case object Turn extends Input case class Machine(locked: Boolean, candies: Int, coins: Int) object Machine { type Coins = Int type Candies = Int type Result = (Coins, Candies) // The rules of the machine are as follows: // * Inserting a coin into a locked machine will cause it to unlock if there’s any candy left. // * Turning the knob on an unlocked machine will cause it to dispense candy and become locked. // * Turning the knob on a locked machine or inserting a coin into an unlocked machine does nothing. // * A machine that’s out of candy ignores all inputs. val onInput: Input => Machine => Machine = input => machine => (input, machine) match { case (Coin, Machine(true, candies, coins)) if candies > 0 => Machine(locked = false, candies, coins + 1) case (Turn, Machine(false, candies, coins)) => Machine(locked = true, candies - 1, coins) case _ => machine } def simulateMachine(inputs: List[Input]): State[Machine, (Int, Int)] = { val mods: List[State[Machine, Unit]] = inputs.map(onInput andThen modify) val state = sequence(mods) state.flatMap(_ => get.map(machine => (machine.coins, machine.candies))) } }
kogupta/scala-playground
src/main/scala/kogu/practice/fpinscala/state/Machine.scala
Scala
apache-2.0
1,353
package rwsscala.httptrait import scalaz._, Scalaz._ trait RwsHttps { def get(domain: String, path: String, params: Map[String, String]): Validation[ConnectionError, Response] }
nisshiee/rws-scala
core/src/main/scala/httptrait/RwsHttps.scala
Scala
mit
183
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.mandar2812.dynaml.probability.distributions import breeze.linalg.{DenseMatrix, cholesky, diag, sum, trace} import breeze.numerics.log import breeze.stats.distributions.{ContinuousDistr, Moments, Rand, RandBasis} import scala.math.log1p /** * Matrix normal distribution over n &times; p matrices * * @param m The mode, mean and center of the distribution * @param u The n &times; n covariance matrix of the rows * @param v The p &times; p covariance matrix of the columns * * @author mandar2812 date: 05/02/2017. * */ case class MatrixNormal( m: DenseMatrix[Double], u: DenseMatrix[Double], v: DenseMatrix[Double])( implicit rand: RandBasis = Rand) extends AbstractContinuousDistr[DenseMatrix[Double]] with Moments[DenseMatrix[Double], (DenseMatrix[Double], DenseMatrix[Double])] with HasErrorBars[DenseMatrix[Double]] { private lazy val (rootu, rootv) = (cholesky(u), cholesky(v)) private val (n,p) = (u.rows, v.cols) override def unnormalizedLogPdf(x: DenseMatrix[Double]) = { val d = x - m val y = rootu.t \\ (rootu \\ d) -0.5*trace(rootv.t\\(rootv\\(d.t*y))) } override lazy val logNormalizer = { val detU = sum(log(diag(rootu))) val detV = sum(log(diag(rootv))) 0.5*(log(2.0*math.Pi)*n*p + detU*p + detV*n) } override def mean = m override def variance = (u,v) override def mode = m override def draw() = { val z: DenseMatrix[Double] = DenseMatrix.rand(m.rows, m.cols, rand.gaussian(0.0, 1.0)) mean + (rootu*z*rootv.t) } lazy val entropy = { m.rows * m.cols * (log1p(2 * math.Pi) + sum(log(diag(rootu))) + sum(log(diag(rootv)))) } override def confidenceInterval(s: Double) = { val signFlag = if(s < 0) -1.0 else 1.0 val ones = DenseMatrix.ones[Double](mean.rows, mean.cols) val multiplier = signFlag*s val z = ones*multiplier val bar: DenseMatrix[Double] = rootu*z*rootv.t (mean - bar, mean + bar) } }
transcendent-ai-labs/DynaML
dynaml-core/src/main/scala/io/github/mandar2812/dynaml/probability/distributions/MatrixNormal.scala
Scala
apache-2.0
2,723
// goseumdochi: experiments with incarnation // Copyright 2016 John V. Sichi // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package org.goseumdochi.vision import org.goseumdochi.common._ import org.bytedeco.javacpp.opencv_core._ import akka.actor._ import akka.routing._ import scala.concurrent.duration._ import scala.collection._ object VisionActor { // sent messages final case class DimensionsKnownMsg( corner : RetinalPos, eventTime : TimePoint) extends EventMsg trait AnalyzerResponseMsg extends EventMsg { def renderOverlay(overlay : RetinalOverlay) {} } trait ObjDetectedMsg extends AnalyzerResponseMsg final case class TheaterClickMsg( pos : PlanarPos, retinalPos : RetinalPos, eventTime : TimePoint) extends ObjDetectedMsg { override def renderOverlay(overlay : RetinalOverlay) { overlay.drawCircle( retinalPos, 10, NamedColor.RED, 2) } } final case class RequireLightMsg( color : LightColor, eventTime : TimePoint) extends AnalyzerResponseMsg // internal messages final case class GrabFrameMsg(lastTime : TimePoint) // received messages final case class ActivateAnalyzersMsg( analyzerClassNames : Seq[String], xform : RetinalTransform, eventTime : TimePoint) extends EventMsg final case class ActivateAugmentersMsg( augmenterClassNames : Seq[String], eventTime : TimePoint) extends EventMsg final case class HintBodyLocationMsg(pos : PlanarPos, eventTime : TimePoint) extends EventMsg final case class GoalLocationMsg( pos : Option[PlanarPos], eventTime : TimePoint) extends EventMsg final case class OpenEyesMsg( eventTime : TimePoint) extends EventMsg final case class CloseEyesMsg( eventTime : TimePoint) extends EventMsg def startFrameGrabber(visionActor : ActorRef, listener : ActorRef) { visionActor ! Listen(listener) visionActor ! GrabFrameMsg(TimePoint.now) } } import VisionActor._ class VisionActor(retinalInput : RetinalInput, theater : RetinalTheater) extends Actor with Listeners with RetinalTheaterListener with RetinalTransformProvider { private val settings = ActorSettings(context) private val throttlePeriod = settings.Vision.throttlePeriod private var analyzers : Seq[VisionAnalyzer] = Seq.empty private var augmenters : Seq[VisionAugmenter] = Seq.empty private val imageDeck = new ImageDeck private var corner : Option[RetinalPos] = None private var hintBodyPos : Option[PlanarPos] = None private var goalPos : Option[PlanarPos] = None private var goalExpiry = TimePoint.ZERO private var retinalTransform : RetinalTransform = FlipRetinalTransform private var eyesOpen = true private var shutDown = false override def getRetinalTransform = retinalTransform def receive = { case OpenEyesMsg(eventTime) => { eyesOpen = true self ! GrabFrameMsg(eventTime) } case CloseEyesMsg(eventTime) => { eyesOpen = false } case GrabFrameMsg(lastTime) => { if (!shutDown) { val thisTime = TimePoint.now val analyze = eyesOpen && (thisTime > lastTime + throttlePeriod) grabOne(analyze) import context.dispatcher context.system.scheduler.scheduleOnce(200.milliseconds) { self ! GrabFrameMsg(if (analyze) thisTime else lastTime) } } } case ActivateAnalyzersMsg(analyzerClassNames, xform, eventTime) => { closeAnalyzers(true) retinalTransform = xform val existing = analyzers.map(_.getClass.getName) analyzers = (analyzers ++ analyzerClassNames. filterNot(existing.contains(_)).map( settings.instantiateObject(_, this). asInstanceOf[VisionAnalyzer])) } case ActivateAugmentersMsg(augmenterClassNames, eventTime) => { closeAugmenters augmenters = augmenterClassNames.map( settings.instantiateObject(_). asInstanceOf[VisionAugmenter]) } case HintBodyLocationMsg(pos, eventTime) => { hintBodyPos = Some(pos) } case GoalLocationMsg(pos, eventTime) => { goalPos = pos goalExpiry = eventTime + 10.seconds } case m : Any => { listenerManagement(m) } } private def analyzeFrame(img : IplImage, frameTime : TimePoint) = { val copy = img.clone val allMsgs = new mutable.ArrayBuffer[VisionActor.AnalyzerResponseMsg] imageDeck.cycle(copy) if (imageDeck.isReady) { analyzers.map( analyzer => { val analyzerMsgs = analyzer.analyzeFrame( imageDeck, frameTime, hintBodyPos) analyzerMsgs.foreach(msg => { msg match { case BodyDetector.BodyDetectedMsg(pos, _) => { hintBodyPos = Some(pos) } case _ => {} } gossip(msg) allMsgs += msg }) } ) } allMsgs } private def grabOne(analyze : Boolean) { try { val (frame, frameTime) = retinalInput.nextFrame val converted = retinalInput.frameToImage(frame) // without this, Android crashes...wish I understood why! val img = converted.clone if (corner.isEmpty) { val newCorner = RetinalPos(img.width, img.height) gossip(DimensionsKnownMsg(newCorner, frameTime)) corner = Some(newCorner) } val overlay = new OpenCvRetinalOverlay(img, retinalTransform, corner.get) if (frameTime > goalExpiry) { goalPos = None } goalPos match { case Some(pos) => { overlay.drawCircle( retinalTransform.worldToRetina(pos), 30, NamedColor.BLUE, 2) } case _ => {} } if (analyze) { val msgs = analyzeFrame(img, frameTime) msgs.foreach(_.renderOverlay(overlay)) } else { hintBodyPos match { case Some(pos) => { overlay.drawCircle( retinalTransform.worldToRetina(pos), 30, NamedColor.GREEN, 2) } case _ => {} } } augmenters.foreach(_.augmentFrame(overlay, frameTime, hintBodyPos)) val result = theater.imageToFrame(img) theater.display(result, frameTime) img.release converted.release } catch { case ex : Throwable => { ex.printStackTrace } } } override def preStart() { theater.setListener(this) } override def postStop() { if (!shutDown) { shutDown = true retinalInput.quit theater.quit } closeAnalyzers(false) closeAugmenters imageDeck.clear } private def closeAnalyzers(shortLivedOnly : Boolean) { if (shortLivedOnly) { val shortLived = analyzers.filterNot(_.isLongLived) analyzers = analyzers.filter(_.isLongLived) shortLived.foreach(_.close) } else { analyzers.foreach(_.close) analyzers = Seq.empty } } private def closeAugmenters() { augmenters.foreach(_.close) augmenters = Seq.empty } override def onTheaterClick(retinalPos : RetinalPos) { gossip( TheaterClickMsg( retinalTransform.retinaToWorld(retinalPos), retinalPos, TimePoint.now)) } override def onTheaterClose() { if (!shutDown) { shutDown = true context.system.shutdown } } }
lingeringsocket/goseumdochi
base/src/main/scala/org/goseumdochi/vision/VisionActor.scala
Scala
apache-2.0
7,924
package com.pragmaxim.websocket import com.pragmaxim.websocket.{EndpointHandler, FallbackHandler, HandlerChannels, RxWebSocketServer} import org.java_websocket.framing.CloseFrame import upickle.legacy._ import scala.collection.JavaConverters._ import scala.concurrent.duration._ /** Supposed to be started via 'startTestServer' task in 'jvm' project from 'js' project for running scalajs test suite * * Each of these handlers is an instance shared by multiple websocket connections and therefore it has a dedicated worker thread * It means that you are supposed to keep mutable state within handlers only. Mutating outside state from handlers would not be thread-safe * * This server just replies to messages with thread-id, client test suites validates that each handler has a dedicated thread */ object TestingServer extends App { val fallbackHandler = new FallbackHandler { override def handle(channel: HandlerChannels): Unit = { channel.in .dump(Endpoint.fallback + "Handler") .foreach { case InMsg(ws, msg) => val testMsg = read[TestMsg](msg) assert(testMsg.endpoint == Endpoint.fallback, Endpoint.fallback + " handler does not accept message from endpoint : " + testMsg.endpoint) val response = write[TestMsg](testMsg.copy(threadId = Thread.currentThread().getId.toString)) ws.send(response) case _ => } } } val chatHandler = new EndpointHandler(Endpoint.chat) { // here is a space for mutating state thread-safely, for instance who belongs to which char room etc. def handle(channel: HandlerChannels): Unit = { channel.in .dump(Endpoint.chat + "Handler") .foreach { case InMsg(ws, msg) => val testMsg = read[TestMsg](msg) assert(testMsg.endpoint == Endpoint.chat, Endpoint.chat + " handler does not accept message from endpoint : " + testMsg.endpoint) val response = write[TestMsg](testMsg.copy(threadId = Thread.currentThread().getId.toString)) ws.send(response) case _ => } } } val adminHandler = new EndpointHandler(Endpoint.admin) { def handle(channel: HandlerChannels): Unit = { channel.in .dump(Endpoint.admin + "Handler") .foreach { case InMsg(ws, msg) => val testMsg = read[TestMsg](msg) assert(testMsg.endpoint == Endpoint.admin, Endpoint.admin + " handler does not accept message from endpoint : " + testMsg.endpoint) val response = write[TestMsg](testMsg.copy(threadId = Thread.currentThread().getId.toString)) ws.send(response) case _ => } } } /** handler listening to commands regarding lifecycle of this test suite */ val controller = new EndpointHandler(Endpoint.controller) { def handle(channel: HandlerChannels): Unit = { channel.in .dump(Endpoint.controller + "Handler") .foreach { case InMsg(ws, msg) => read[ControlMsg](msg).cmd match { case ControlMsg.closeAll => globalScheduler.scheduleOnce(500.millis) { println(Console.CYAN + "STOPPING SERVER" + Console.RESET) val connections = channel.server.connections() connections.synchronized { connections.asScala.toList.foreach(_.close(CloseFrame.GOING_AWAY)) } } } case _ => } } } RxWebSocketServer(fallbackHandler, List(adminHandler, chatHandler, controller)).start() }
viagraphs/reactive-websocket
jvm/src/test/scala/com/pragmaxim/websocket/TestingServer.scala
Scala
mit
3,565
package com.ponkotuy.data import org.json4s._ /** * * @param alv 艦載機熟練度 * @author ponkotuy * Date: 14/03/26. */ case class SlotItem(id: Int, slotitemId: Int, locked: Boolean, level: Int, alv: Option[Int]) object SlotItem { implicit val format = DefaultFormats def fromJson(obj: JValue): List[SlotItem] = { val JArray(xs) = obj xs.flatMap { x => x.extractOpt[RawSlotItem].map(_.build) } } case class RawSlotItem(api_id: Int, api_slotitem_id: Int, api_locked: Int, api_level: Int, api_alv: Option[Int]) { def build: SlotItem = SlotItem(api_id, api_slotitem_id, api_locked != 0, api_level, api_alv) } }
ttdoda/MyFleetGirls
library/src/main/scala/com/ponkotuy/data/SlotItem.scala
Scala
mit
654
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.expressions import java.nio.charset.StandardCharsets import java.sql.{Date, Timestamp} import org.scalatest.Matchers import org.apache.spark.SparkFunSuite import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.plans.PlanTestBase import org.apache.spark.sql.catalyst.util._ import org.apache.spark.sql.types.{IntegerType, LongType, _} import org.apache.spark.unsafe.array.ByteArrayMethods import org.apache.spark.unsafe.types.{CalendarInterval, UTF8String} class UnsafeRowConverterSuite extends SparkFunSuite with Matchers with PlanTestBase with ExpressionEvalHelper { private def roundedSize(size: Int) = ByteArrayMethods.roundNumberOfBytesToNearestWord(size) testBothCodegenAndInterpreted("basic conversion with only primitive types") { val factory = UnsafeProjection val fieldTypes: Array[DataType] = Array(LongType, LongType, IntegerType) val converter = factory.create(fieldTypes) val row = new SpecificInternalRow(fieldTypes) row.setLong(0, 0) row.setLong(1, 1) row.setInt(2, 2) val unsafeRow: UnsafeRow = converter.apply(row) assert(unsafeRow.getSizeInBytes === 8 + (3 * 8)) assert(unsafeRow.getLong(0) === 0) assert(unsafeRow.getLong(1) === 1) assert(unsafeRow.getInt(2) === 2) val unsafeRowCopy = unsafeRow.copy() assert(unsafeRowCopy.getLong(0) === 0) assert(unsafeRowCopy.getLong(1) === 1) assert(unsafeRowCopy.getInt(2) === 2) unsafeRow.setLong(1, 3) assert(unsafeRow.getLong(1) === 3) unsafeRow.setInt(2, 4) assert(unsafeRow.getInt(2) === 4) // Mutating the original row should not have changed the copy assert(unsafeRowCopy.getLong(0) === 0) assert(unsafeRowCopy.getLong(1) === 1) assert(unsafeRowCopy.getInt(2) === 2) // Make sure the converter can be reused, i.e. we correctly reset all states. val unsafeRow2: UnsafeRow = converter.apply(row) assert(unsafeRow2.getSizeInBytes === 8 + (3 * 8)) assert(unsafeRow2.getLong(0) === 0) assert(unsafeRow2.getLong(1) === 1) assert(unsafeRow2.getInt(2) === 2) } testBothCodegenAndInterpreted("basic conversion with primitive, string and binary types") { val factory = UnsafeProjection val fieldTypes: Array[DataType] = Array(LongType, StringType, BinaryType) val converter = factory.create(fieldTypes) val row = new SpecificInternalRow(fieldTypes) row.setLong(0, 0) row.update(1, UTF8String.fromString("Hello")) row.update(2, "World".getBytes(StandardCharsets.UTF_8)) val unsafeRow: UnsafeRow = converter.apply(row) assert(unsafeRow.getSizeInBytes === 8 + (8 * 3) + roundedSize("Hello".getBytes(StandardCharsets.UTF_8).length) + roundedSize("World".getBytes(StandardCharsets.UTF_8).length)) assert(unsafeRow.getLong(0) === 0) assert(unsafeRow.getString(1) === "Hello") assert(unsafeRow.getBinary(2) === "World".getBytes(StandardCharsets.UTF_8)) } testBothCodegenAndInterpreted( "basic conversion with primitive, string, date and timestamp types") { val factory = UnsafeProjection val fieldTypes: Array[DataType] = Array(LongType, StringType, DateType, TimestampType) val converter = factory.create(fieldTypes) val row = new SpecificInternalRow(fieldTypes) row.setLong(0, 0) row.update(1, UTF8String.fromString("Hello")) row.update(2, DateTimeUtils.fromJavaDate(Date.valueOf("1970-01-01"))) row.update(3, DateTimeUtils.fromJavaTimestamp(Timestamp.valueOf("2015-05-08 08:10:25"))) val unsafeRow: UnsafeRow = converter.apply(row) assert(unsafeRow.getSizeInBytes === 8 + (8 * 4) + roundedSize("Hello".getBytes(StandardCharsets.UTF_8).length)) assert(unsafeRow.getLong(0) === 0) assert(unsafeRow.getString(1) === "Hello") // Date is represented as Int in unsafeRow assert(DateTimeUtils.toJavaDate(unsafeRow.getInt(2)) === Date.valueOf("1970-01-01")) // Timestamp is represented as Long in unsafeRow DateTimeUtils.toJavaTimestamp(unsafeRow.getLong(3)) should be (Timestamp.valueOf("2015-05-08 08:10:25")) unsafeRow.setInt(2, DateTimeUtils.fromJavaDate(Date.valueOf("2015-06-22"))) assert(DateTimeUtils.toJavaDate(unsafeRow.getInt(2)) === Date.valueOf("2015-06-22")) unsafeRow.setLong(3, DateTimeUtils.fromJavaTimestamp(Timestamp.valueOf("2015-06-22 08:10:25"))) DateTimeUtils.toJavaTimestamp(unsafeRow.getLong(3)) should be (Timestamp.valueOf("2015-06-22 08:10:25")) } testBothCodegenAndInterpreted("null handling") { val factory = UnsafeProjection val fieldTypes: Array[DataType] = Array( NullType, BooleanType, ByteType, ShortType, IntegerType, LongType, FloatType, DoubleType, StringType, BinaryType, DecimalType.USER_DEFAULT, DecimalType.SYSTEM_DEFAULT // ArrayType(IntegerType) ) val converter = factory.create(fieldTypes) val rowWithAllNullColumns: InternalRow = { val r = new SpecificInternalRow(fieldTypes) for (i <- fieldTypes.indices) { r.setNullAt(i) } r } val createdFromNull: UnsafeRow = converter.apply(rowWithAllNullColumns) for (i <- fieldTypes.indices) { assert(createdFromNull.isNullAt(i)) } assert(createdFromNull.getBoolean(1) === false) assert(createdFromNull.getByte(2) === 0) assert(createdFromNull.getShort(3) === 0) assert(createdFromNull.getInt(4) === 0) assert(createdFromNull.getLong(5) === 0) assert(createdFromNull.getFloat(6) === 0.0f) assert(createdFromNull.getDouble(7) === 0.0d) assert(createdFromNull.getUTF8String(8) === null) assert(createdFromNull.getBinary(9) === null) assert(createdFromNull.getDecimal(10, 10, 0) === null) assert(createdFromNull.getDecimal(11, 38, 18) === null) // assert(createdFromNull.get(11) === null) // If we have an UnsafeRow with columns that are initially non-null and we null out those // columns, then the serialized row representation should be identical to what we would get by // creating an entirely null row via the converter val rowWithNoNullColumns: InternalRow = { val r = new SpecificInternalRow(fieldTypes) r.setNullAt(0) r.setBoolean(1, false) r.setByte(2, 20) r.setShort(3, 30) r.setInt(4, 400) r.setLong(5, 500) r.setFloat(6, 600) r.setDouble(7, 700) r.update(8, UTF8String.fromString("hello")) r.update(9, "world".getBytes(StandardCharsets.UTF_8)) r.setDecimal(10, Decimal(10), 10) r.setDecimal(11, Decimal(10.00, 38, 18), 38) // r.update(11, Array(11)) r } val setToNullAfterCreation = converter.apply(rowWithNoNullColumns) assert(setToNullAfterCreation.isNullAt(0) === rowWithNoNullColumns.isNullAt(0)) assert(setToNullAfterCreation.getBoolean(1) === rowWithNoNullColumns.getBoolean(1)) assert(setToNullAfterCreation.getByte(2) === rowWithNoNullColumns.getByte(2)) assert(setToNullAfterCreation.getShort(3) === rowWithNoNullColumns.getShort(3)) assert(setToNullAfterCreation.getInt(4) === rowWithNoNullColumns.getInt(4)) assert(setToNullAfterCreation.getLong(5) === rowWithNoNullColumns.getLong(5)) assert(setToNullAfterCreation.getFloat(6) === rowWithNoNullColumns.getFloat(6)) assert(setToNullAfterCreation.getDouble(7) === rowWithNoNullColumns.getDouble(7)) assert(setToNullAfterCreation.getString(8) === rowWithNoNullColumns.getString(8)) assert(setToNullAfterCreation.getBinary(9) === rowWithNoNullColumns.getBinary(9)) assert(setToNullAfterCreation.getDecimal(10, 10, 0) === rowWithNoNullColumns.getDecimal(10, 10, 0)) assert(setToNullAfterCreation.getDecimal(11, 38, 18) === rowWithNoNullColumns.getDecimal(11, 38, 18)) for (i <- fieldTypes.indices) { // Cann't call setNullAt() on DecimalType if (i == 11) { setToNullAfterCreation.setDecimal(11, null, 38) } else { setToNullAfterCreation.setNullAt(i) } } setToNullAfterCreation.setNullAt(0) setToNullAfterCreation.setBoolean(1, false) setToNullAfterCreation.setByte(2, 20) setToNullAfterCreation.setShort(3, 30) setToNullAfterCreation.setInt(4, 400) setToNullAfterCreation.setLong(5, 500) setToNullAfterCreation.setFloat(6, 600) setToNullAfterCreation.setDouble(7, 700) // setToNullAfterCreation.update(8, UTF8String.fromString("hello")) // setToNullAfterCreation.update(9, "world".getBytes) setToNullAfterCreation.setDecimal(10, Decimal(10), 10) setToNullAfterCreation.setDecimal(11, Decimal(10.00, 38, 18), 38) // setToNullAfterCreation.update(11, Array(11)) assert(setToNullAfterCreation.isNullAt(0) === rowWithNoNullColumns.isNullAt(0)) assert(setToNullAfterCreation.getBoolean(1) === rowWithNoNullColumns.getBoolean(1)) assert(setToNullAfterCreation.getByte(2) === rowWithNoNullColumns.getByte(2)) assert(setToNullAfterCreation.getShort(3) === rowWithNoNullColumns.getShort(3)) assert(setToNullAfterCreation.getInt(4) === rowWithNoNullColumns.getInt(4)) assert(setToNullAfterCreation.getLong(5) === rowWithNoNullColumns.getLong(5)) assert(setToNullAfterCreation.getFloat(6) === rowWithNoNullColumns.getFloat(6)) assert(setToNullAfterCreation.getDouble(7) === rowWithNoNullColumns.getDouble(7)) // assert(setToNullAfterCreation.getString(8) === rowWithNoNullColumns.getString(8)) // assert(setToNullAfterCreation.get(9) === rowWithNoNullColumns.get(9)) assert(setToNullAfterCreation.getDecimal(10, 10, 0) === rowWithNoNullColumns.getDecimal(10, 10, 0)) assert(setToNullAfterCreation.getDecimal(11, 38, 18) === rowWithNoNullColumns.getDecimal(11, 38, 18)) // assert(setToNullAfterCreation.get(11) === rowWithNoNullColumns.get(11)) } testBothCodegenAndInterpreted("NaN canonicalization") { val factory = UnsafeProjection val fieldTypes: Array[DataType] = Array(FloatType, DoubleType) val row1 = new SpecificInternalRow(fieldTypes) row1.setFloat(0, java.lang.Float.intBitsToFloat(0x7f800001)) row1.setDouble(1, java.lang.Double.longBitsToDouble(0x7ff0000000000001L)) val row2 = new SpecificInternalRow(fieldTypes) row2.setFloat(0, java.lang.Float.intBitsToFloat(0x7fffffff)) row2.setDouble(1, java.lang.Double.longBitsToDouble(0x7fffffffffffffffL)) val converter = factory.create(fieldTypes) assert(converter.apply(row1).getBytes === converter.apply(row2).getBytes) } testBothCodegenAndInterpreted("basic conversion with struct type") { val factory = UnsafeProjection val fieldTypes: Array[DataType] = Array( new StructType().add("i", IntegerType), new StructType().add("nest", new StructType().add("l", LongType)) ) val converter = factory.create(fieldTypes) val row = new GenericInternalRow(fieldTypes.length) row.update(0, InternalRow(1)) row.update(1, InternalRow(InternalRow(2L))) val unsafeRow: UnsafeRow = converter.apply(row) assert(unsafeRow.numFields == 2) val row1 = unsafeRow.getStruct(0, 1) assert(row1.getSizeInBytes == 8 + 1 * 8) assert(row1.numFields == 1) assert(row1.getInt(0) == 1) val row2 = unsafeRow.getStruct(1, 1) assert(row2.numFields() == 1) val innerRow = row2.getStruct(0, 1) { assert(innerRow.getSizeInBytes == 8 + 1 * 8) assert(innerRow.numFields == 1) assert(innerRow.getLong(0) == 2L) } assert(row2.getSizeInBytes == 8 + 1 * 8 + innerRow.getSizeInBytes) assert(unsafeRow.getSizeInBytes == 8 + 2 * 8 + row1.getSizeInBytes + row2.getSizeInBytes) } private def createArray(values: Any*): ArrayData = new GenericArrayData(values.toArray) private def createMap(keys: Any*)(values: Any*): MapData = { assert(keys.length == values.length) new ArrayBasedMapData(createArray(keys: _*), createArray(values: _*)) } private def testArrayInt(array: UnsafeArrayData, values: Seq[Int]): Unit = { assert(array.numElements == values.length) assert(array.getSizeInBytes == 8 + scala.math.ceil(values.length / 64.toDouble) * 8 + roundedSize(4 * values.length)) values.zipWithIndex.foreach { case (value, index) => assert(array.getInt(index) == value) } } private def testMapInt(map: UnsafeMapData, keys: Seq[Int], values: Seq[Int]): Unit = { assert(keys.length == values.length) assert(map.numElements == keys.length) testArrayInt(map.keyArray, keys) testArrayInt(map.valueArray, values) assert(map.getSizeInBytes == 8 + map.keyArray.getSizeInBytes + map.valueArray.getSizeInBytes) } testBothCodegenAndInterpreted("basic conversion with array type") { val factory = UnsafeProjection val fieldTypes: Array[DataType] = Array( ArrayType(IntegerType), ArrayType(ArrayType(IntegerType)) ) val converter = factory.create(fieldTypes) val row = new GenericInternalRow(fieldTypes.length) row.update(0, createArray(1, 2)) row.update(1, createArray(createArray(3, 4))) val unsafeRow: UnsafeRow = converter.apply(row) assert(unsafeRow.numFields() == 2) val unsafeArray1 = unsafeRow.getArray(0) testArrayInt(unsafeArray1, Seq(1, 2)) val unsafeArray2 = unsafeRow.getArray(1) assert(unsafeArray2.numElements == 1) val nestedArray = unsafeArray2.getArray(0) testArrayInt(nestedArray, Seq(3, 4)) assert(unsafeArray2.getSizeInBytes == 8 + 8 + 8 + nestedArray.getSizeInBytes) val array1Size = roundedSize(unsafeArray1.getSizeInBytes) val array2Size = roundedSize(unsafeArray2.getSizeInBytes) assert(unsafeRow.getSizeInBytes == 8 + 8 * 2 + array1Size + array2Size) } testBothCodegenAndInterpreted("basic conversion with map type") { val factory = UnsafeProjection val fieldTypes: Array[DataType] = Array( MapType(IntegerType, IntegerType), MapType(IntegerType, MapType(IntegerType, IntegerType)) ) val converter = factory.create(fieldTypes) val map1 = createMap(1, 2)(3, 4) val innerMap = createMap(5, 6)(7, 8) val map2 = createMap(9)(innerMap) val row = new GenericInternalRow(fieldTypes.length) row.update(0, map1) row.update(1, map2) val unsafeRow: UnsafeRow = converter.apply(row) assert(unsafeRow.numFields == 2) val unsafeMap1 = unsafeRow.getMap(0) testMapInt(unsafeMap1, Seq(1, 2), Seq(3, 4)) val unsafeMap2 = unsafeRow.getMap(1) assert(unsafeMap2.numElements == 1) val keyArray = unsafeMap2.keyArray testArrayInt(keyArray, Seq(9)) val valueArray = unsafeMap2.valueArray { assert(valueArray.numElements == 1) val nestedMap = valueArray.getMap(0) testMapInt(nestedMap, Seq(5, 6), Seq(7, 8)) assert(valueArray.getSizeInBytes == 8 + 8 + 8 + roundedSize(nestedMap.getSizeInBytes)) } assert(unsafeMap2.getSizeInBytes == 8 + keyArray.getSizeInBytes + valueArray.getSizeInBytes) val map1Size = roundedSize(unsafeMap1.getSizeInBytes) val map2Size = roundedSize(unsafeMap2.getSizeInBytes) assert(unsafeRow.getSizeInBytes == 8 + 8 * 2 + map1Size + map2Size) } testBothCodegenAndInterpreted("basic conversion with struct and array") { val factory = UnsafeProjection val fieldTypes: Array[DataType] = Array( new StructType().add("arr", ArrayType(IntegerType)), ArrayType(new StructType().add("l", LongType)) ) val converter = factory.create(fieldTypes) val row = new GenericInternalRow(fieldTypes.length) row.update(0, InternalRow(createArray(1))) row.update(1, createArray(InternalRow(2L))) val unsafeRow: UnsafeRow = converter.apply(row) assert(unsafeRow.numFields() == 2) val field1 = unsafeRow.getStruct(0, 1) assert(field1.numFields == 1) val innerArray = field1.getArray(0) testArrayInt(innerArray, Seq(1)) assert(field1.getSizeInBytes == 8 + 8 + roundedSize(innerArray.getSizeInBytes)) val field2 = unsafeRow.getArray(1) assert(field2.numElements == 1) val innerStruct = field2.getStruct(0, 1) { assert(innerStruct.numFields == 1) assert(innerStruct.getSizeInBytes == 8 + 8) assert(innerStruct.getLong(0) == 2L) } assert(field2.getSizeInBytes == 8 + 8 + 8 + innerStruct.getSizeInBytes) assert(unsafeRow.getSizeInBytes == 8 + 8 * 2 + field1.getSizeInBytes + roundedSize(field2.getSizeInBytes)) } testBothCodegenAndInterpreted("basic conversion with struct and map") { val factory = UnsafeProjection val fieldTypes: Array[DataType] = Array( new StructType().add("map", MapType(IntegerType, IntegerType)), MapType(IntegerType, new StructType().add("l", LongType)) ) val converter = factory.create(fieldTypes) val row = new GenericInternalRow(fieldTypes.length) row.update(0, InternalRow(createMap(1)(2))) row.update(1, createMap(3)(InternalRow(4L))) val unsafeRow: UnsafeRow = converter.apply(row) assert(unsafeRow.numFields() == 2) val field1 = unsafeRow.getStruct(0, 1) assert(field1.numFields == 1) val innerMap = field1.getMap(0) testMapInt(innerMap, Seq(1), Seq(2)) assert(field1.getSizeInBytes == 8 + 8 + roundedSize(innerMap.getSizeInBytes)) val field2 = unsafeRow.getMap(1) val keyArray = field2.keyArray testArrayInt(keyArray, Seq(3)) val valueArray = field2.valueArray { assert(valueArray.numElements == 1) val innerStruct = valueArray.getStruct(0, 1) assert(innerStruct.numFields == 1) assert(innerStruct.getSizeInBytes == 8 + 8) assert(innerStruct.getLong(0) == 4L) assert(valueArray.getSizeInBytes == 8 + 8 + 8 + innerStruct.getSizeInBytes) } assert(field2.getSizeInBytes == 8 + keyArray.getSizeInBytes + valueArray.getSizeInBytes) assert(unsafeRow.getSizeInBytes == 8 + 8 * 2 + field1.getSizeInBytes + roundedSize(field2.getSizeInBytes)) } testBothCodegenAndInterpreted("basic conversion with array and map") { val factory = UnsafeProjection val fieldTypes: Array[DataType] = Array( ArrayType(MapType(IntegerType, IntegerType)), MapType(IntegerType, ArrayType(IntegerType)) ) val converter = factory.create(fieldTypes) val row = new GenericInternalRow(fieldTypes.length) row.update(0, createArray(createMap(1)(2))) row.update(1, createMap(3)(createArray(4))) val unsafeRow: UnsafeRow = converter.apply(row) assert(unsafeRow.numFields() == 2) val field1 = unsafeRow.getArray(0) assert(field1.numElements == 1) val innerMap = field1.getMap(0) testMapInt(innerMap, Seq(1), Seq(2)) assert(field1.getSizeInBytes == 8 + 8 + 8 + roundedSize(innerMap.getSizeInBytes)) val field2 = unsafeRow.getMap(1) assert(field2.numElements == 1) val keyArray = field2.keyArray testArrayInt(keyArray, Seq(3)) val valueArray = field2.valueArray { assert(valueArray.numElements == 1) val innerArray = valueArray.getArray(0) testArrayInt(innerArray, Seq(4)) assert(valueArray.getSizeInBytes == 8 + 8 + 8 + innerArray.getSizeInBytes) } assert(field2.getSizeInBytes == 8 + keyArray.getSizeInBytes + valueArray.getSizeInBytes) assert(unsafeRow.getSizeInBytes == 8 + 8 * 2 + roundedSize(field1.getSizeInBytes) + roundedSize(field2.getSizeInBytes)) } testBothCodegenAndInterpreted("SPARK-25374 converts back into safe representation") { def convertBackToInternalRow(inputRow: InternalRow, fields: Array[DataType]): InternalRow = { val unsafeProj = UnsafeProjection.create(fields) val unsafeRow = unsafeProj(inputRow) val safeProj = SafeProjection.create(fields) safeProj(unsafeRow) } // Simple tests val inputRow = InternalRow.fromSeq(Seq( false, 3.toByte, 15.toShort, -83, 129L, 1.0f, 8.0, UTF8String.fromString("test"), Decimal(255), CalendarInterval.fromString("interval 1 day"), Array[Byte](1, 2) )) val fields1 = Array( BooleanType, ByteType, ShortType, IntegerType, LongType, FloatType, DoubleType, StringType, DecimalType.defaultConcreteType, CalendarIntervalType, BinaryType) assert(convertBackToInternalRow(inputRow, fields1) === inputRow) // Array tests val arrayRow = InternalRow.fromSeq(Seq( createArray(1, 2, 3), createArray( createArray(Seq("a", "b", "c").map(UTF8String.fromString): _*), createArray(Seq("d").map(UTF8String.fromString): _*)) )) val fields2 = Array[DataType]( ArrayType(IntegerType), ArrayType(ArrayType(StringType))) assert(convertBackToInternalRow(arrayRow, fields2) === arrayRow) // Struct tests val structRow = InternalRow.fromSeq(Seq( InternalRow.fromSeq(Seq[Any](1, 4.0)), InternalRow.fromSeq(Seq( UTF8String.fromString("test"), InternalRow.fromSeq(Seq( 1, createArray(Seq("2", "3").map(UTF8String.fromString): _*) )) )) )) val fields3 = Array[DataType]( StructType( StructField("c0", IntegerType) :: StructField("c1", DoubleType) :: Nil), StructType( StructField("c2", StringType) :: StructField("c3", StructType( StructField("c4", IntegerType) :: StructField("c5", ArrayType(StringType)) :: Nil)) :: Nil)) assert(convertBackToInternalRow(structRow, fields3) === structRow) // Map tests val mapRow = InternalRow.fromSeq(Seq( createMap(Seq("k1", "k2").map(UTF8String.fromString): _*)(1, 2), createMap( createMap(3, 5)(Seq("v1", "v2").map(UTF8String.fromString): _*), createMap(7, 9)(Seq("v3", "v4").map(UTF8String.fromString): _*) )( createMap(Seq("k3", "k4").map(UTF8String.fromString): _*)(3.toShort, 4.toShort), createMap(Seq("k5", "k6").map(UTF8String.fromString): _*)(5.toShort, 6.toShort) ))) val fields4 = Array[DataType]( MapType(StringType, IntegerType), MapType(MapType(IntegerType, StringType), MapType(StringType, ShortType))) val mapResultRow = convertBackToInternalRow(mapRow, fields4) val mapExpectedRow = mapRow checkResult(mapExpectedRow, mapResultRow, exprDataType = StructType(fields4.zipWithIndex.map(f => StructField(s"c${f._2}", f._1))), exprNullable = false) // UDT tests val vector = new TestUDT.MyDenseVector(Array(1.0, 3.0, 5.0, 7.0, 9.0)) val udt = new TestUDT.MyDenseVectorUDT() val udtRow = InternalRow.fromSeq(Seq(udt.serialize(vector))) val fields5 = Array[DataType](udt) assert(convertBackToInternalRow(udtRow, fields5) === udtRow) } }
guoxiaolongzte/spark
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/UnsafeRowConverterSuite.scala
Scala
apache-2.0
23,520
/* * Copyright 2012 Twitter Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.twitter.zipkin.storage.cassandra import org.specs.Specification import com.twitter.zipkin.gen import com.twitter.zipkin.common.{Span, Endpoint, Annotation} import collection.mutable.ArrayBuffer class SnappyCodecSpec extends Specification { val thriftCodec = new ScroogeThriftCodec[gen.Span](gen.Span) val snappyCodec = new SnappyCodec(thriftCodec) "SnappyCodec" should { "compress and decompress" in { val expected = Span(123, "boo", 456, None, List( new Annotation(1, "bah", Some(Endpoint(23567, 345, "service"))), new Annotation(2, gen.Constants.CLIENT_SEND, Some(Endpoint(23567, 345, "service"))), new Annotation(3, gen.Constants.CLIENT_RECV, Some(Endpoint(23567, 345, "service")))), ArrayBuffer()) val actual = Span.fromThrift(snappyCodec.decode(snappyCodec.encode(expected.toThrift))) expected mustEqual actual } } }
lanrion/zipkin
zipkin-server/src/test/scala/com/twitter/zipkin/storage/cassandra/SnappyCodecSpec.scala
Scala
apache-2.0
1,506
package com.programmaticallyspeaking.ncd.nashorn import java.util import com.programmaticallyspeaking.ncd.host._ import com.programmaticallyspeaking.ncd.host.types.{ExceptionData, ObjectPropertyDescriptor, PropertyDescriptorType, Undefined} import jdk.nashorn.api.scripting.AbstractJSObject import org.scalactic.Equality import org.scalatest.Inside import org.scalatest.prop.TableDrivenPropertyChecks import scala.collection.{GenMap, mutable} import scala.language.higherKinds import scala.util.Try class RealMarshallerTest extends RealMarshallerTestFixture with Inside with TableDrivenPropertyChecks { import RealMarshallerTest._ val simpleValues = Table( ("desc", "expression", "expected"), ("string", "'hello world'", SimpleValue("hello world")), ("concatenated string", "'hello ' + 'world'", SimpleValue("hello world")), ("concatenated string via argument", "(function (arg) { return 'hello ' + arg; }).call(null, 'world')", SimpleValue("hello world")), ("integer value", "42", SimpleValue(42)), ("floating-point value", "42.5", SimpleValue(42.5d)), ("actual Java char", "java.lang.Character.valueOf('f')", SimpleValue('f')), ("actual Java short", "java.lang.Short.valueOf(42)", SimpleValue(42.asInstanceOf[Short])), ("actual Java byte", "java.lang.Byte.valueOf(42)", SimpleValue(42.asInstanceOf[Byte])), ("boolean value", "true", SimpleValue(true)), ("null", "null", EmptyNode), ("undefined", "undefined", SimpleValue(Undefined)), ("NaN", "NaN", SimpleValue(Double.NaN)) ) def evalArray(expr: String)(handler: (ArrayNode) => Unit): Unit = { evaluateExpression(expr) { (host, actual) => inside(actual) { case an: ArrayNode => handler(an) } } } def evalObject(expr: String)(handler: (ObjectNode) => Unit): Unit = { evaluateExpression(expr) { (_, actual) => inside(actual) { case on: ObjectNode => handler(on) } } } "Marshalling to ValueNode works for" - { forAll(simpleValues) { (desc, expr, expected) => desc in { evaluateExpression(expr) { (_, actual) => actual should equal (expected) } } } "Date" in { evaluateExpression("new Date(2017,0,21)") { (_, actual) => inside(actual) { case DateNode(str, _) => str should fullyMatch regex "Sat Jan 21 2017 00:00:00 [A-Z]{3}[0-9+]{5} (.*)" } } } "actual Java float" in { evaluateExpression("java.lang.Float.valueOf(42.5)") { (_, actual) => // Java 8 and Java 9 behave differently. inside(actual) { case SimpleValue(f: Float) => f should be (42.5f) case SimpleValue(d: Double) => d should be (42.5d) } } } "Error" in { evaluateExpression("new TypeError('oops')") { (_, actual) => inside(actual) { case ErrorValue(data, isBasedOnThrowable, _, _) => val stack = "TypeError: oops\\n\\tat <program> (<eval>:1)" data should be (ExceptionData("TypeError", "oops", 1, -1, "<eval>", Some(stack))) isBasedOnThrowable should be (false) } } } "thrown Error with 0-based column number" in { val expr = "(function(){try{\\r\\nthrow new Error('error');}catch(e){return e;}})()" evaluateExpression(expr) { (_, actual) => inside(actual) { case ErrorValue(data, _, _, _) => data.columnNumberBase0 should be (0) } } } "Java Exception" - { val expr = "(function(){try{throw new java.lang.IllegalArgumentException('oops');}catch(e){return e;}})()" def evalException(handler: (ErrorValue) => Unit): Unit = { evaluateExpression(expr) { (_, actual) => inside(actual) { case err: ErrorValue => handler(err) } } } "with appropriate exception data" in { evalException { err => err.data should be (ExceptionData("java.lang.IllegalArgumentException", "oops", 3, -1, "<eval>", Some("java.lang.IllegalArgumentException: oops"))) } } } "RegExp" - { // Note: flags 'u' and 'y' are not supported by Nashorn // See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/RegExp val expr = "/.*/gim" def evalRegexp(handler: (RegExpNode) => Unit): Unit = { evaluateExpression(expr) { (_, actual) => inside(actual) { case re: RegExpNode => handler(re) } } } "with a string representation" in { evalRegexp { re => re.stringRepresentation should be ("/.*/gim") } } } "JSObject-based array" - { val testCases = Table( ("description", "class"), ("with proper class name", classOf[ClassNameBasedArrayJSObject]), ("with isArray==true", classOf[IsArrayBasedArrayJSObject]) ) forAll(testCases) { (description, clazz) => description in { val expr = s"createInstance('${clazz.getName}')" evalArray(expr) { an => an.size should be (2) } } } } "plain array" - { "should not get a class name" in { val expr = "[1,2]" evalArray(expr) { an => an.typedClassName should be (None) } } } "typed array" - { val typedArrayCases = Table( ("className"), ("Int8Array"), ("Uint8Array"), ("Uint8ClampedArray"), ("Int16Array"), ("Uint16Array"), ("Int32Array"), ("Uint32Array"), ("Float32Array"), ("Float64Array") ) forAll(typedArrayCases) { (className) => s"gets a class name for $className" in { val expr = s"new $className([1,2])" evalArray(expr) { an => an.typedClassName should be (Some(className)) } } } } "object class name" - { val classNameCases = Table( ("desc", "expr", "className"), ("plain object", "{foo:42}", "Object"), ("object with type", "new ArrayBuffer()", "ArrayBuffer"), ("Java object", "new java.util.ArrayList()", "java.util.ArrayList"), ("JSObject object", s"createInstance('${classOf[ObjectLikeJSObject].getName}')", "Object"), ("JS 'class'", s"(function() { return new MyClass(); function MyClass() {} })()", "MyClass"), ("DataView", "new DataView(new ArrayBuffer(10))", "DataView"), ("Global", "this", "global") ) forAll(classNameCases) { (desc, expr, className) => s"is set for $desc" in { evalObject(expr) { obj => obj.className should be (className) } } } } "Java array" - { "gets correct size" in { val expr = """(function() { |var StringArray = Java.type("java.lang.String[]"); |var arr = new StringArray(2); |arr[0] = "testing"; |arr[1] = "foobar"; |return arr; |})() """.stripMargin evalArray(expr) { an => an.size should be (2) } } } "JSObject-based function" - { def evalFunction(expr: String)(handler: (FunctionNode) => Unit): Unit = { evaluateExpression(expr) { (_, actual) => inside(actual) { case fn: FunctionNode => handler(fn) } } } val testCases = Table( ("description", "class", "tester"), ("with proper class name", classOf[ClassNameBasedFunctionJSObject], (fn: FunctionNode) => {fn.name should be ("")}), ("with isFunction==true", classOf[IsFunctionBasedFunctionJSObject], (fn: FunctionNode) => {fn.name should be ("")}), ("with a name", classOf[WithNameFunctionJSObject], (fn: FunctionNode) => {fn.copy(objectId = null) should be (FunctionNode("fun", "function fun() {}", null))}) ) forAll(testCases) { (description, clazz, tester) => description in { val expr = s"createInstance('${clazz.getName}')" evalFunction(expr) { fn => tester(fn) } } } } } } object RealMarshallerTest { case class Cycle(objectId: ObjectId) def expand(host: ScriptHost, node: ValueNode, includeInherited: Boolean = false, onlyAccessors: Boolean = false, expandProto: Boolean = false): Any = { val seenObjectIds = mutable.Set[ObjectId]() // Remove the 'class' JavaBean getter because it's everywhere so it's noise. def removeProps: ((String, ObjectPropertyDescriptor)) => Boolean = e => { if (e._1 == "class") false else if (e._1 == "__proto__" && !expandProto) false else true } def recurse(node: ValueNode): Any = node match { case complex: ComplexNode if seenObjectIds.contains(complex.objectId) => // In Nashorn, apparently the constructor of the prototype of a function is the function itself... Cycle(complex.objectId) case scope: ScopeObject => Map("scope" -> true, "name" -> scope.name, "type" -> scope.scopeType.toString) case complex: ComplexNode => seenObjectIds += complex.objectId host.getObjectProperties(complex.objectId, !includeInherited, onlyAccessors).filter(removeProps).map(e => e._2.descriptorType match { case PropertyDescriptorType.Generic => e._1 -> "???" case PropertyDescriptorType.Data => e._2.value match { case Some(value) => e._1 -> recurse(value) case None => throw new RuntimeException("Incorrect data descriptor, no value") } case PropertyDescriptorType.Accessor => val props = e._2.getter.map(_ => "get").toSeq ++ e._2.setter.map(_ => "set") e._1 -> props.map(p => p -> "<function>").toMap }).toMap case EmptyNode => null case SimpleValue(simple) => simple case other => throw new Exception("Unhandled: " + other) } recurse(node) } implicit val valueNodeEq: Equality[ValueNode] = (a: ValueNode, b: Any) => b match { case vn: ValueNode => a match { case SimpleValue(d: Double) => vn match { case SimpleValue(ad: Double) => // Required for NaN comparison java.lang.Double.compare(d, ad) == 0 case other => false } case _ => a == vn } case other => false } /** * If [[anyEqWithMapSupport]] is used as implicit `Equality`, this object can appear as a `Map` value (possibly in * a nested `Map`) to match any actual value. */ case object AnyObject def anyEqWithMapSupport: Equality[Any] = (a: Any, b: Any) => compareAny(a, b) private def compareAny(a: Any, b: Any): Boolean = a match { case aMap: Map[Any, _] => b match { case bMap: Map[Any, _] => compareMaps(aMap, bMap) case _ => a == b } case _ => a == b } private def compareMaps(a: Map[Any, _], b: Map[Any, _]): Boolean = { if (a.size != b.size) return false // Go through entries in a and require equality for values val bIsOk = a forall { entry => b.get(entry._1) match { case Some(AnyObject) => true case Some(value) => compareAny(entry._2, value) case None => false } } // Return false if b contains an entry that is not in a val bHasKeyNotInA = !b.forall(e => a.contains(e._1)) bIsOk && !bHasKeyNotInA } } abstract class BaseArrayJSObject(items: Seq[AnyRef]) extends AbstractJSObject { import scala.collection.JavaConverters._ override def hasSlot(slot: Int): Boolean = slot >= 0 && slot < items.size override def getSlot(index: Int): AnyRef = items(index) override def hasMember(name: String): Boolean = Try(name.toInt).map(hasSlot).getOrElse(name == "length") override def getMember(name: String): AnyRef = Try(name.toInt).map(getSlot).getOrElse(if (name == "length") items.size.asInstanceOf[AnyRef] else null) override def keySet(): util.Set[String] = (items.indices.map(_.toString) :+ "length").toSet.asJava override def values(): util.Collection[AnyRef] = items.asJava } class ClassNameBasedArrayJSObject extends BaseArrayJSObject(Seq("a", "b")) { override def getClassName: String = "Array" } class IsArrayBasedArrayJSObject extends BaseArrayJSObject(Seq("a", "b")) { override def isArray: Boolean = true } class OnlySlotBasedArrayJSObject extends IsArrayBasedArrayJSObject { override def getMember(name: String): AnyRef = if (name == "length") super.getMember("length") else null } class SlotBasedArrayJSObjectThatMisbehavesForGetMember extends IsArrayBasedArrayJSObject { override def getMember(name: String): AnyRef = { if (name == "length") super.getMember("length") else throw new RuntimeException("getMember not supported for: " + name) } } class ObjectLikeJSObject extends AbstractJSObject { import scala.collection.JavaConverters._ val data: Map[String, AnyRef] = Map("a" -> 42.asInstanceOf[AnyRef], "b" -> 43.asInstanceOf[AnyRef]) override def values(): util.Collection[AnyRef] = data.values.toList.asJava override def hasMember(name: String): Boolean = data.contains(name) override def getMember(name: String): AnyRef = data(name) override def getClassName: String = "Object" override def keySet(): util.Set[String] = data.keySet.asJava } abstract class BaseFunctionJSObject extends AbstractJSObject { override def call(thiz: scala.Any, args: AnyRef*): AnyRef = "ok" } class ClassNameBasedFunctionJSObject extends BaseFunctionJSObject { override def getClassName: String = "Function" override def call(thiz: scala.Any, args: AnyRef*): AnyRef = "ok" } class IsFunctionBasedFunctionJSObject extends BaseFunctionJSObject { override def call(thiz: scala.Any, args: AnyRef*): AnyRef = "ok" override def isFunction: Boolean = true } class WithNameFunctionJSObject extends ClassNameBasedFunctionJSObject { override def getMember(name: String): AnyRef = { if (name == "name") "fun" else super.getMember(name) } }
provegard/ncdbg
src/test/scala/com/programmaticallyspeaking/ncd/nashorn/RealMarshallerTest.scala
Scala
bsd-3-clause
14,221
package controllers import com.mohiva.play.silhouette.api.{Environment, Silhouette} import com.mohiva.play.silhouette.impl.authenticators.SessionAuthenticator import com.vividsolutions.jts.geom._ import com.vividsolutions.jts.index.kdtree.{KdNode, KdTree} import controllers.headers.ProvidesHeader import java.sql.Timestamp import java.time.Instant import javax.inject.Inject import models.attribute.{GlobalAttributeForAPI, GlobalAttributeTable, GlobalAttributeWithLabelForAPI} import org.locationtech.jts.geom.{Coordinate => JTSCoordinate} import math._ import models.region._ import models.daos.slick.DBTableDefinitions.{DBUser, UserTable} import models.label.{LabelLocation, LabelTable} import models.street.{OsmWayStreetEdge, OsmWayStreetEdgeTable} import models.street.{StreetEdge, StreetEdgeTable} import models.user.{User, WebpageActivity, WebpageActivityTable} import play.api.Play.current import play.api.libs.json._ import play.api.libs.json.Json._ import play.extras.geojson.{LatLng => JsonLatLng, LineString => JsonLineString, MultiPolygon => JsonMultiPolygon, Point => JsonPoint} import scala.collection.JavaConversions._ import scala.collection.mutable.{ArrayBuffer, Buffer} import scala.concurrent.Future import helper.ShapefilesCreatorHelper import models.region.RegionTable.MultiPolygonUtils case class NeighborhoodAttributeSignificance (val name: String, val geometry: Array[JTSCoordinate], val regionID: Int, val coverage: Double, val score: Double, val attributeScores: Array[Double], val significanceScores: Array[Double]) case class StreetAttributeSignificance (val geometry: Array[JTSCoordinate], val streetID: Int, val osmID: Int, val score: Double, val attributeScores: Array[Double], val significanceScores: Array[Double]) /** * Holds the HTTP requests associated with API. * * @param env The Silhouette environment. */ class ProjectSidewalkAPIController @Inject()(implicit val env: Environment[User, SessionAuthenticator]) extends Silhouette[User, SessionAuthenticator] with ProvidesHeader { case class AttributeForAccessScore(lat: Float, lng: Float, labelType: String) case class AccessScoreStreet(streetEdge: StreetEdge, osmId: Int, score: Double, attributes: Array[Double], significance: Array[Double]) { def toJSON: JsObject = { val latlngs: List[JsonLatLng] = streetEdge.geom.getCoordinates.map(coord => JsonLatLng(coord.y, coord.x)).toList val linestring: JsonLineString[JsonLatLng] = JsonLineString(latlngs) val properties = Json.obj( "street_edge_id" -> streetEdge.streetEdgeId, "osm_id" -> osmId, "score" -> score, "significance" -> Json.obj( "CurbRamp" -> significance(0), "NoCurbRamp" -> significance(1), "Obstacle" -> significance(2), "SurfaceProblem" -> significance(3) ), "feature" -> Json.obj( "CurbRamp" -> attributes(0), "NoCurbRamp" -> attributes(1), "Obstacle" -> attributes(2), "SurfaceProblem" -> attributes(3) ) ) Json.obj("type" -> "Feature", "geometry" -> linestring, "properties" -> properties) } } /** * Adds an entry to the webpage_activity table with the endpoint used. * * @param remoteAddress The remote address that made the API call * @param identity The user that made the API call, if the user is signed in. If no user is signed in, the value is None * @param requestStr The full request sent by the API call */ def apiLogging(remoteAddress: String, identity: Option[User], requestStr: String) = { if (remoteAddress != "0:0:0:0:0:0:0:1") { val timestamp: Timestamp = new Timestamp(Instant.now.toEpochMilli) val ipAddress: String = remoteAddress identity match { case Some(user) => WebpageActivityTable.save(WebpageActivity(0, user.userId.toString, ipAddress, requestStr, timestamp)) case None => val anonymousUser: DBUser = UserTable.find("anonymous").get WebpageActivityTable.save(WebpageActivity(0, anonymousUser.userId.toString, ipAddress, requestStr, timestamp)) } } } /** * Returns all the global attributes within the bounding box and the labels that make up those attributes in geojson. * * @param lat1 * @param lng1 * @param lat2 * @param lng2 * @param severity * @param filetype * @return */ def getAccessAttributesWithLabelsV2(lat1: Double, lng1: Double, lat2: Double, lng2: Double, severity: Option[String], filetype: Option[String]) = UserAwareAction.async { implicit request => apiLogging(request.remoteAddress, request.identity, request.toString) val minLat:Float = min(lat1, lat2).toFloat val maxLat:Float = max(lat1, lat2).toFloat val minLng:Float = min(lng1, lng2).toFloat val maxLng:Float = max(lng1, lng2).toFloat // In CSV format. if (filetype.isDefined && filetype.get == "csv") { val file = new java.io.File("access_attributes_with_labels.csv") val writer = new java.io.PrintStream(file) val header: String = "Attribute ID,Label Type,Attribute Severity,Attribute Temporary,Street ID," + "OSM Street ID,Neighborhood Name,Label ID,Panorama ID,Attribute Latitude," + "Attribute Longitude,Label Latitude,Label Longitude,Heading,Pitch,Zoom,Canvas X,Canvas Y," + "Canvas Width,Canvas Height,GSV URL,Label Severity,Label Temporary,Agree Count,Disagree Count,Not Sure Count" // Write column headers. writer.println(header) // Write each row in the CSV. for (current <- GlobalAttributeTable.getGlobalAttributesWithLabelsInBoundingBox(minLat, minLng, maxLat, maxLng, severity)) { writer.println(current.attributesToArray.mkString(",")) } writer.close() Future.successful(Ok.sendFile(content = file, onClose = () => file.delete())) } else if (filetype.isDefined && filetype.get == "shapefile") { val attributeList: Buffer[GlobalAttributeForAPI] = GlobalAttributeTable.getGlobalAttributesInBoundingBox(minLat, minLng, maxLat, maxLng, severity).to[ArrayBuffer] ShapefilesCreatorHelper.createAttributeShapeFile("attributes", attributeList) val labelList: Buffer[GlobalAttributeWithLabelForAPI] = GlobalAttributeTable.getGlobalAttributesWithLabelsInBoundingBox(minLat, minLng, maxLat, maxLng, severity).to[ArrayBuffer] ShapefilesCreatorHelper.createLabelShapeFile("labels", labelList) val shapefile: java.io.File = ShapefilesCreatorHelper.zipShapeFiles("attributeWithLabels", Array("attributes", "labels")) Future.successful(Ok.sendFile(content = shapefile, onClose = () => shapefile.delete())) } else { // In GeoJSON format. val features: List[JsObject] = GlobalAttributeTable.getGlobalAttributesWithLabelsInBoundingBox(minLat, minLng, maxLat, maxLng, severity).map(_.toJSON) Future.successful(Ok(Json.obj("type" -> "FeatureCollection", "features" -> features))) } } /** * Returns all the global attributes within the bounding box in geoJson. * * @param lat1 * @param lng1 * @param lat2 * @param lng2 * @param severity * @param filetype * @return */ def getAccessAttributesV2(lat1: Double, lng1: Double, lat2: Double, lng2: Double, severity: Option[String], filetype: Option[String]) = UserAwareAction.async { implicit request => apiLogging(request.remoteAddress, request.identity, request.toString) val minLat:Float = min(lat1, lat2).toFloat val maxLat:Float = max(lat1, lat2).toFloat val minLng:Float = min(lng1, lng2).toFloat val maxLng:Float = max(lng1, lng2).toFloat // In CSV format. if (filetype != None && filetype.get == "csv") { val accessAttributesfile = new java.io.File("access_attributes.csv") val writer = new java.io.PrintStream(accessAttributesfile) // Write column headers. writer.println("Attribute ID,Label Type,Street ID,OSM Street ID,Neighborhood Name,Attribute Latitude,Attribute Longitude,Severity,Temporary,Agree Count,Disagree Count,Not Sure Count") // Write each rown in the CSV. for (current <- GlobalAttributeTable.getGlobalAttributesInBoundingBox(minLat, minLng, maxLat, maxLng, severity)) { writer.println(current.attributesToArray.mkString(",")) } writer.close() Future.successful(Ok.sendFile(content = accessAttributesfile, onClose = () => accessAttributesfile.delete())) } else if (filetype.isDefined && filetype.get == "shapefile") { val attributeList: Buffer[GlobalAttributeForAPI] = GlobalAttributeTable.getGlobalAttributesInBoundingBox(minLat, minLng, maxLat, maxLng, severity).to[ArrayBuffer] ShapefilesCreatorHelper.createAttributeShapeFile("attributes", attributeList) val shapefile: java.io.File = ShapefilesCreatorHelper.zipShapeFiles("accessAttributes", Array("attributes")); Future.successful(Ok.sendFile(content = shapefile, onClose = () => shapefile.delete())) } else { // In GeoJSON format. val features: List[JsObject] = GlobalAttributeTable.getGlobalAttributesInBoundingBox(minLat, minLng, maxLat, maxLng, severity).map(_.toJSON) Future.successful(Ok(Json.obj("type" -> "FeatureCollection", "features" -> features))) } } /** * Returns all the global attributes within the bounding box in geoJson. * * @param lat1 First latttude value for the bounding box * @param lng1 First longitude value for the bounding box * @param lat2 Second latitude value for the bounding box * @param lng2 Second longitude value for the bounding box * @param severity The severity of the attributes that should be added in the geojson */ def getAccessAttributesV1(lat1: Double, lng1: Double, lat2: Double, lng2: Double) = UserAwareAction.async { implicit request => apiLogging(request.remoteAddress, request.identity, request.toString) val minLat = min(lat1, lat2) val maxLat = max(lat1, lat2) val minLng = min(lng1, lng2) val maxLng = max(lng1, lng2) def featureCollection = { // Retrieve data and cluster them by location and label type. val labelLocations: List[LabelLocation] = LabelTable.selectLocationsOfLabelsIn(minLat, minLng, maxLat, maxLng) val clustered: List[LabelLocation] = clusterLabelLocations(labelLocations) val features: List[JsObject] = clustered.map { label => val latlng = JsonLatLng(label.lat.toDouble, label.lng.toDouble) val point = JsonPoint(latlng) val labelType = label.labelType val labelId = label.labelId val panoramaId = label.gsvPanoramaId val properties = Json.obj( "label_type" -> labelType, // Todo. Actually calculate the access score, "panorama_id" -> panoramaId ) Json.obj("type" -> "Feature", "geometry" -> point, "properties" -> properties) } Json.obj("type" -> "FeatureCollection", "features" -> features) } Future.successful(Ok(featureCollection)) } /** * E.g. /v1/access/score/neighborhood?lng1=-77.01098442077637&lat1=38.89035159350444&lng2=-76.97793960571289&lat2=38.91851800248647 * @param lat1 First latitude value for the bounding box * @param lng1 First longitude value for the bounding box * @param lat2 Second latitude value for the bounding box * @param lng2 Second longitude value for the bounding box * @return The access score for the given neighborhood */ def getAccessScoreNeighborhoodsV1(lat1: Double, lng1: Double, lat2: Double, lng2: Double) = UserAwareAction.async { implicit request => apiLogging(request.remoteAddress, request.identity, request.toString) val coordinates = Array(min(lat1, lat2), max(lat1, lat2), min(lng1, lng2), max(lng1, lng2)) Future.successful(Ok(getAccessScoreNeighborhoodsJson(version = 1, coordinates))) } /** * E.g. /v2/access/score/neighborhood?lng1=-77.01098442077637&lat1=38.89035159350444&lng2=-76.97793960571289&lat2=38.91851800248647 * @param lat1 * @param lng1 * @param lat2 * @param lng2 * @param filetype * @return */ def getAccessScoreNeighborhoodsV2(lat1: Double, lng1: Double, lat2: Double, lng2: Double, filetype: Option[String]) = UserAwareAction.async { implicit request => apiLogging(request.remoteAddress, request.identity, request.toString) val coordinates = Array(min(lat1, lat2), max(lat1, lat2), min(lng1, lng2), max(lng1, lng2)) // In CSV format. if (filetype.isDefined && filetype.get == "csv") { val file: java.io.File = getAccessScoreNeighborhoodsCSV(version = 2, coordinates) Future.successful(Ok.sendFile(content = file, onClose = () => file.delete())) } else if(filetype.isDefined && filetype.get == "shapefile"){ val file: java.io.File = getAccessScoreNeighborhoodsShapefile(coordinates) Future.successful(Ok.sendFile(content = file, onClose = () => file.delete())) } else { // In GeoJSON format. Future.successful(Ok(getAccessScoreNeighborhoodsJson(version = 2, coordinates))) } } /** * Gets the Access Score of the neighborhoods within the coordinates in a shapefile format. * * @param coordinates: A coordinate representation of the bounding box for the query. Every neighborhood * within this bounding box will have their access score calculated and returned. * @return A shapefile representation of the access scores within the given coordinates. */ def getAccessScoreNeighborhoodsShapefile(coordinates: Array[Double]): java.io.File = { // Gather all of the data that will be written to the Shapefile. val labelsForScore: List[AttributeForAccessScore] = getLabelsForScore(version = 2, coordinates) val allStreetEdges: List[StreetEdge] = StreetEdgeTable.selectStreetsIntersecting(coordinates(0), coordinates(2), coordinates(1), coordinates(3)) val auditedStreetEdges: List[StreetEdge] = StreetEdgeTable.selectAuditedStreetsIntersecting(coordinates(0), coordinates(2), coordinates(1), coordinates(3)) val neighborhoods: List[NamedRegion] = RegionTable.selectNamedNeighborhoodsWithin(coordinates(0), coordinates(2), coordinates(1), coordinates(3)) val significance: Array[Double] = Array(0.75, -1.0, -1.0, -1.0) // Create a list of NeighborhoodAttributeSignificance objects to pass to the helper class. val neighborhoodList: Buffer[NeighborhoodAttributeSignificance] = new ArrayBuffer[NeighborhoodAttributeSignificance] // Populate every object in the list. for (neighborhood <- neighborhoods) { val coordinates: Array[JTSCoordinate] = neighborhood.geom.getCoordinates.map(c => new JTSCoordinate(c.x, c.y)) val auditedStreetsIntersectingTheNeighborhood = auditedStreetEdges.filter(_.geom.intersects(neighborhood.geom)) // set default values for everything to 0, so null values will be 0 as well. var coverage: Double = 0.0 var accessScore: Double = 0.0 var averagedStreetFeatures: Array[Double] = Array(0.0,0.0,0.0,0.0,0.0) if (auditedStreetsIntersectingTheNeighborhood.nonEmpty) { val streetAccessScores: List[AccessScoreStreet] = computeAccessScoresForStreets(auditedStreetsIntersectingTheNeighborhood, labelsForScore) // I'm just interested in getting the attributes averagedStreetFeatures = streetAccessScores.map(_.attributes).transpose.map(_.sum / streetAccessScores.size).toArray accessScore = computeAccessScore(averagedStreetFeatures, significance) val allStreetsIntersectingTheNeighborhood = allStreetEdges.filter(_.geom.intersects(neighborhood.geom)) coverage = auditedStreetsIntersectingTheNeighborhood.size.toDouble / allStreetsIntersectingTheNeighborhood.size assert(coverage <= 1.0) } neighborhoodList.add(new NeighborhoodAttributeSignificance(neighborhood.name, coordinates, neighborhood.regionId, coverage, accessScore, averagedStreetFeatures, significance)) } // Send the list of objects to the helper class. ShapefilesCreatorHelper.createNeighborhoodShapefile("neighborhood", neighborhoodList) val shapefile: java.io.File = ShapefilesCreatorHelper.zipShapeFiles("neighborhoodScore", Array("neighborhood")) shapefile } /** * Generic version of getAccessScoreNeighborHood, makes changes for v1 vs v2, for CSV file format only. * * @param version * @param coordinates * @return */ def getAccessScoreNeighborhoodsCSV(version: Int, coordinates: Array[Double]): java.io.File = { val file = new java.io.File("access_score_neighborhoods.csv") val writer = new java.io.PrintStream(file) val header: String = "Neighborhood Name,Region ID,Access Score,Coordinates,Coverage,Average Curb Ramp Score," + "Average No Curb Ramp Score,Average Obstacle Score,Average Surface Problem Score," + "Curb Ramp Significance,No Curb Ramp Significance,Obstacle Significance," + "Surface Problem Significance" // Write the column headers. writer.println(header) val labelsForScore: List[AttributeForAccessScore] = getLabelsForScore(version, coordinates) val allStreetEdges: List[StreetEdge] = StreetEdgeTable.selectStreetsIntersecting(coordinates(0), coordinates(2), coordinates(1), coordinates(3)) val auditedStreetEdges: List[StreetEdge] = StreetEdgeTable.selectAuditedStreetsIntersecting(coordinates(0), coordinates(2), coordinates(1), coordinates(3)) val neighborhoods: List[NamedRegion] = RegionTable.selectNamedNeighborhoodsWithin(coordinates(0), coordinates(2), coordinates(1), coordinates(3)) val significance = Array(0.75, -1.0, -1.0, -1.0) // Write each row in the CSV. for (neighborhood <- neighborhoods) { val coordinates: Array[Coordinate] = neighborhood.geom.getCoordinates val auditedStreetsIntersectingTheNeighborhood = auditedStreetEdges.filter(_.geom.intersects(neighborhood.geom)) val coordStr: String = "\"[" + coordinates.map(c => "(" + c.x + "," + c.y + ")").mkString(",") + "]\"" if (auditedStreetsIntersectingTheNeighborhood.nonEmpty) { val streetAccessScores: List[AccessScoreStreet] = computeAccessScoresForStreets(auditedStreetsIntersectingTheNeighborhood, labelsForScore) // I'm just interested in getting the attributes val averagedStreetFeatures = streetAccessScores.map(_.attributes).transpose.map(_.sum / streetAccessScores.size).toArray val accessScore: Double = computeAccessScore(averagedStreetFeatures, significance) val allStreetsIntersectingTheNeighborhood = allStreetEdges.filter(_.geom.intersects(neighborhood.geom)) val coverage: Double = auditedStreetsIntersectingTheNeighborhood.size.toDouble / allStreetsIntersectingTheNeighborhood.size assert(coverage <= 1.0) writer.println(neighborhood.name + "," + neighborhood.regionId + "," + accessScore + "," + coordStr + "," + coverage + "," + averagedStreetFeatures(0) + "," + averagedStreetFeatures(1) + "," + averagedStreetFeatures(2) + "," + averagedStreetFeatures(3) + "," + significance(0) + "," + significance(1) + "," + significance(2) + "," + significance(3)) } else { writer.println(neighborhood.name + "," + neighborhood.regionId + "," + "NA" + "," + coordStr + "," + 0.0 + "," + "NA" + "," + "NA" + "," + "NA" + "," + "NA" + "," + significance(0) + "," + significance(1) + "," + significance(2) + "," + significance(3)) } } writer.close() file } /** * Gets list of clustered attributes within a bounding box. * * @param version * @param coordinates * @return */ def getLabelsForScore(version: Int, coordinates: Array[Double]): List[AttributeForAccessScore] = { val labelsForScore: List[AttributeForAccessScore] = version match { case 1 => val labelLocations: List[LabelLocation] = LabelTable.selectLocationsOfLabelsIn(coordinates(0), coordinates(2), coordinates(1), coordinates(3)) val clusteredLabelLocations: List[LabelLocation] = clusterLabelLocations(labelLocations) clusteredLabelLocations.map(l => AttributeForAccessScore(l.lat, l.lng, l.labelType)) case 2 => val globalAttributes: List[GlobalAttributeForAPI] = GlobalAttributeTable.getGlobalAttributesInBoundingBox(coordinates(0).toFloat, coordinates(2).toFloat, coordinates(1).toFloat, coordinates(3).toFloat, None) globalAttributes.map(l => AttributeForAccessScore(l.lat, l.lng, l.labelType)) } labelsForScore } /** * Generic version of getAccessScoreNeighborhood, makes changes for v1 vs v2, for GeoJSON file format only. * * @param version * @param coordinates * @return */ def getAccessScoreNeighborhoodsJson(version: Int, coordinates: Array[Double]): JsObject = { // Retrieve data and cluster them by location and label type. def featureCollection = { val labelsForScore: List[AttributeForAccessScore] = getLabelsForScore(version, coordinates) val allStreetEdges: List[StreetEdge] = StreetEdgeTable.selectStreetsIntersecting(coordinates(0), coordinates(2), coordinates(1), coordinates(3)) val auditedStreetEdges: List[StreetEdge] = StreetEdgeTable.selectAuditedStreetsIntersecting(coordinates(0), coordinates(2), coordinates(1), coordinates(3)) val neighborhoods: List[NamedRegion] = RegionTable.selectNamedNeighborhoodsWithin(coordinates(0), coordinates(2), coordinates(1), coordinates(3)) val neighborhoodsJson = for (neighborhood <- neighborhoods) yield { val neighborhoodJson: JsonMultiPolygon[JsonLatLng] = neighborhood.geom.toJSON // Get access score // Element-wise sum of arrays: http://stackoverflow.com/questions/32878818/how-to-sum-up-every-column-of-a-scala-array val auditedStreetsIntersectingTheNeighborhood = auditedStreetEdges.filter(_.geom.intersects(neighborhood.geom)) if (auditedStreetsIntersectingTheNeighborhood.nonEmpty) { val streetAccessScores: List[AccessScoreStreet] = computeAccessScoresForStreets(auditedStreetsIntersectingTheNeighborhood, labelsForScore) // I'm just interested in getting the attributes val averagedStreetFeatures = streetAccessScores.map(_.attributes).transpose.map(_.sum / streetAccessScores.size).toArray val significance = Array(0.75, -1.0, -1.0, -1.0) val accessScore: Double = computeAccessScore(averagedStreetFeatures, significance) val allStreetsIntersectingTheNeighborhood = allStreetEdges.filter(_.geom.intersects(neighborhood.geom)) val coverage: Double = auditedStreetsIntersectingTheNeighborhood.size.toDouble / allStreetsIntersectingTheNeighborhood.size assert(coverage <= 1.0) val properties = Json.obj( "coverage" -> coverage, "region_id" -> neighborhood.regionId, "region_name" -> neighborhood.name, "score" -> accessScore, "significance" -> Json.obj( "CurbRamp" -> 0.75, "NoCurbRamp" -> -1.0, "Obstacle" -> -1.0, "SurfaceProblem" -> -1.0 ), "feature" -> Json.obj( "CurbRamp" -> averagedStreetFeatures(0), "NoCurbRamp" -> averagedStreetFeatures(1), "Obstacle" -> averagedStreetFeatures(2), "SurfaceProblem" -> averagedStreetFeatures(3) ) ) Json.obj("type" -> "Feature", "geometry" -> neighborhoodJson, "properties" -> properties) } else { val properties = Json.obj( "coverage" -> 0.0, "region_id" -> neighborhood.regionId, "region_name" -> neighborhood.name, "score" -> None.asInstanceOf[Option[Double]], "significance" -> Json.obj( "CurbRamp" -> 0.75, "NoCurbRamp" -> -1.0, "Obstacle" -> -1.0, "SurfaceProblem" -> -1.0 ), "feature" -> None.asInstanceOf[Option[Array[Double]]] ) Json.obj("type" -> "Feature", "geometry" -> neighborhoodJson, "properties" -> properties) } } Json.obj("type" -> "FeatureCollection", "features" -> neighborhoodsJson) } featureCollection } /** * AccessScore:Street * * E.g., /v1/access/score/streets?lng1=-76.9975519180&lat1=38.910286924&lng2=-76.9920158386&lat2=38.90793262720 * @param lat1 First latttude value for the bounding box * @param lng1 First longitude value for the bounding box * @param lat2 Second latitude value for the bounding box * @param lng2 Second longitude value for the bounding box * @return The access score for the given neighborhood */ def getAccessScoreStreetsV1(lat1: Double, lng1: Double, lat2: Double, lng2: Double) = UserAwareAction.async { implicit request => apiLogging(request.remoteAddress, request.identity, request.toString) val features: List[JsObject] = getAccessScoreStreetsGeneric(lat1, lng1, lat2, lng2, version = 1).map(_.toJSON) Future.successful(Ok(Json.obj("type" -> "FeatureCollection", "features" -> features))) } /** * AccessScore:Street V2 (using new clustering methods) * * E.g., /v2/access/score/streets?lng1=-76.9975519180&lat1=38.910286924&lng2=-76.9920158386&lat2=38.90793262720 * @param lat1 First latitude value for the bounding box * @param lng1 First longitude value for the bounding box * @param lat2 Second latitude value for the bounding box * @param lng2 Second longitude value for the bounding box * @return The access score for the given neighborhood */ def getAccessScoreStreetsV2(lat1: Double, lng1: Double, lat2: Double, lng2: Double, filetype: Option[String]) = UserAwareAction.async { implicit request => apiLogging(request.remoteAddress, request.identity, request.toString) val streetAccessScores: List[AccessScoreStreet] = getAccessScoreStreetsGeneric(lat1, lng1, lat2, lng2, version = 2) // In CSV format. if (filetype.isDefined && filetype.get == "csv") { val file = new java.io.File("access_score_streets.csv") val writer = new java.io.PrintStream(file) val header: String = "Region ID,OSM ID,Access Score,Coordinates,Average Curb Ramp Score," + "Average No Curb Ramp Score,Average Obstacle Score,Average Surface Problem Score," + "Curb Ramp Significance,No Curb Ramp Significance,Obstacle Significance," + "Surface Problem Significance" // Write column headers. writer.println(header) // Write each row in the CSV. for (streetAccessScore <- streetAccessScores) { val coordStr: String = "\"[" + streetAccessScore.streetEdge.geom.getCoordinates.map(c => "(" + c.x + "," + c.y + ")").mkString(",") + "]\"" writer.println(streetAccessScore.streetEdge.streetEdgeId + "," + streetAccessScore.osmId + "," + streetAccessScore.score + "," + coordStr + "," + streetAccessScore.attributes(0) + "," + streetAccessScore.attributes(1) + "," + streetAccessScore.attributes(2) + "," + streetAccessScore.attributes(3) + "," + streetAccessScore.significance(0) + "," + streetAccessScore.significance(1) + "," + streetAccessScore.significance(2) + "," + streetAccessScore.significance(3)) } writer.close() Future.successful(Ok.sendFile(content = file, onClose = () => file.delete)) } else if (filetype.isDefined && filetype.get == "shapefile"){ val streetBuffer: Buffer[StreetAttributeSignificance] = new ArrayBuffer[StreetAttributeSignificance] for(streetAccessScore <- streetAccessScores){ streetBuffer.add( StreetAttributeSignificance( streetAccessScore.streetEdge.geom.getCoordinates().map(c => new JTSCoordinate(c.x, c.y)), streetAccessScore.streetEdge.streetEdgeId, streetAccessScore.osmId, streetAccessScore.score, streetAccessScore.attributes, streetAccessScore.significance)) } ShapefilesCreatorHelper.createStreetShapefile("streetValues", streetBuffer) val shapefile: java.io.File = ShapefilesCreatorHelper.zipShapeFiles("streetScore", Array.apply("streetValues")) Future.successful(Ok.sendFile(content = shapefile, onClose = () => shapefile.delete())) } else { // In GeoJSON format. val features: List[JsObject] = streetAccessScores.map(_.toJSON) Future.successful(Ok(Json.obj("type" -> "FeatureCollection", "features" -> features))) } } /** * Generic version of getAccessScoreStreets, makes appropriate changes for v1 vs. v2. * * @param lat1 * @param lng1 * @param lat2 * @param lng2 * @param version * @return */ def getAccessScoreStreetsGeneric(lat1: Double, lng1: Double, lat2: Double, lng2: Double, version: Int): List[AccessScoreStreet] = { val coordinates = Array(min(lat1, lat2), max(lat1, lat2), min(lng1, lng2), max(lng1, lng2)) // Retrieve data and cluster them by location and label type. val streetEdges: List[StreetEdge] = StreetEdgeTable.selectAuditedStreetsWithin(coordinates(0), coordinates(2), coordinates(1), coordinates(3)) computeAccessScoresForStreets(streetEdges, getLabelsForScore(version, coordinates)) } // Helper methods def clusterLabelLocations(labelLocations: List[LabelLocation]): List[LabelLocation] = { // Cluster together the labelLocations var clusterIndex = 1 val radius = 5.78E-5 // Approximately 5 meters val group = labelLocations.groupBy(l => l.labelType) val clustered = for ((labelType, groupedLabels) <- group) yield { val tree: KdTree = new KdTree(0.0) groupedLabels.foreach { label => tree.insert(new Coordinate(label.lng.toDouble, label.lat.toDouble), label) } val clusters = new scala.collection.mutable.HashMap[LabelLocation, Int] for (label <- groupedLabels) { val (x, y) = (label.lng.toDouble, label.lat.toDouble) val (xMin, xMax, yMin, yMax) = (x - radius, x + radius, y - radius, y + radius) val envelope = new Envelope(xMin, xMax, yMin, yMax) val nearbyLabels = tree.query(envelope).toArray.map { node => node.asInstanceOf[KdNode].getData.asInstanceOf[LabelLocation] } // Group the labels into a cluster if (!clusters.contains(label)) { clusters.put(label, clusterIndex) nearbyLabels.foreach { nearbyLabel => if (!clusters.contains(nearbyLabel)) { clusters.put(nearbyLabel, clusterIndex) } } clusterIndex += 1 } } val swapped = clusters.groupBy(_._2).mapValues(_.keys) val clusteredLabelLocations = for ((ci, ll) <- swapped) yield { val labels = ll.toSeq val xmean = labels.map(_.lng).sum / labels.size val ymean = labels.map(_.lat).sum / labels.size LabelLocation(0, 0, labels.head.gsvPanoramaId, labelType, ymean, xmean) } clusteredLabelLocations } clustered.flatten.toList } /** * Retrieve streets in the given bounding box and corresponding labels for each street. * * References: * - http://www.vividsolutions.com/jts/javadoc/com/vividsolutions/jts/geom/Geometry.html * * @param streets List of streets that should be scored * @param labelLocations List of AttributeForAccessScore * */ def computeAccessScoresForStreets(streets: List[StreetEdge], labelLocations: List[AttributeForAccessScore]): List[AccessScoreStreet] = { val radius = 3.0E-4 // Approximately 10 meters val pm = new PrecisionModel() val srid = 4326 val factory: GeometryFactory = new GeometryFactory(pm, srid) val streetsWithOsmWayIds: List[(StreetEdge, OsmWayStreetEdge)] = OsmWayStreetEdgeTable.selectOsmWayIdsForStreets(streets) val streetAccessScores = streetsWithOsmWayIds.map { item => val (edge: StreetEdge, osmStreetId: OsmWayStreetEdge) = item; // Expand each edge a little bit and count the number of accessibility attributes. val buffer: Geometry = edge.geom.buffer(radius) // Increment a value in Map: http://stackoverflow.com/questions/15505048/access-initialize-and-update-values-in-a-mutable-map val labelCounter = collection.mutable.Map[String, Int]( "CurbRamp" -> 0, "NoCurbRamp" -> 0, "Obstacle" -> 0, "SurfaceProblem" -> 0 ).withDefaultValue(0) labelLocations.foreach { ll => val p: Point = factory.createPoint(new Coordinate(ll.lng.toDouble, ll.lat.toDouble)) if (p.within(buffer)) { labelCounter(ll.labelType) += 1 } } // Compute an access score. val attributes = Array(labelCounter("CurbRamp"), labelCounter("NoCurbRamp"), labelCounter("Obstacle"), labelCounter("SurfaceProblem")).map(_.toDouble) val significance = Array(0.75, -1.0, -1.0, -1.0) val accessScore: Double = computeAccessScore(attributes, significance) AccessScoreStreet(edge, osmStreetId.osmWayId, accessScore, attributes, significance) } streetAccessScores } def computeAccessScore(attributes: Array[Double], significance: Array[Double]): Double = { val t = (for ( (f, s) <- (attributes zip significance) ) yield f * s).sum // dot product val s = 1 / (1 + math.exp(-t)) // sigmoid function s } /** * Compute distance between two latlng coordinates using the Haversine formula * References: * https://rosettacode.org/wiki/Haversine_formula#Scala * * @param lat1 * @param lon1 * @param lat2 * @param lon2 * @return Distance in meters */ def haversine(lat1:Double, lon1:Double, lat2:Double, lon2:Double): Double = { val R = 6372800.0 //radius in m val dLat=(lat2 - lat1).toRadians val dLon=(lon2 - lon1).toRadians val a = pow(sin(dLat/2),2) + pow(sin(dLon/2),2) * cos(lat1.toRadians) * cos(lat2.toRadians) val c = 2 * asin(sqrt(a)) R * c } /** * Compute distance between two latlng coordinates using the Haversine formula * @param latLng1 * @param latLng2 * @return Distance in meters */ def haversine(latLng1: JsonLatLng, latLng2: JsonLatLng): Double = haversine(latLng1.lat, latLng1.lng, latLng2.lat, latLng2.lng) /** * Make a grid of latlng coordinates in a bounding box specified by a pair of latlng coordinates * @param latLng1 A latlng coordinate * @param latLng2 A latlng coordinate * @param stepSize A step size in meters * @return A list of latlng grid */ def makeALatLngGrid(latLng1: JsonLatLng, latLng2: JsonLatLng, stepSize: Double): List[JsonLatLng] = { val minLat: Double = min(latLng1.lat, latLng2.lat) val maxLat: Double = max(latLng1.lat, latLng2.lat) val minLng: Double = min(latLng1.lng, latLng2.lng) val maxLng: Double = max(latLng1.lng, latLng2.lng) val distance = haversine(minLat, minLng, maxLat, maxLng) val stepRatio: Double = stepSize / distance val dLat = maxLat - minLat val dLng = maxLng - minLng val stepSizeLng = dLng * stepRatio val stepSizeLat = dLat * stepRatio val lngRange = minLng to maxLng by stepSizeLng val latRange = minLat to maxLat by stepSizeLat val latLngs = for { lat <- latRange lng <- lngRange } yield JsonLatLng(lat, lng) latLngs.toList } /** * Make a grid of latlng coordinates in a bounding box specified by a pair of latlng coordinates * @param lat1 Latitude * @param lng1 Longitude * @param lat2 Latitude * @param lng2 Longitude * @param stepSize A step size in meters * @return A list of latlng grid */ def makeALatLngGrid(lat1: Double, lng1: Double, lat2: Double, lng2: Double, stepSize: Double): List[JsonLatLng] = makeALatLngGrid(JsonLatLng(lat1, lng1), JsonLatLng(lat2, lng2), stepSize) }
ProjectSidewalk/SidewalkWebpage
app/controllers/ProjectSidewalkAPIController.scala
Scala
mit
37,155
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs // Licence: http://www.gnu.org/licenses/gpl-3.0.en.html package org.ensime.indexer import scala.collection.immutable.Queue import akka.event.slf4j.SLF4JLogging import org.apache.commons.vfs2.FileObject import org.objectweb.asm._ import org.objectweb.asm.Opcodes._ trait ClassfileIndexer { this: SLF4JLogging => /** * @param file to index * @return the parsed version of the classfile and FQNs referenced within */ def indexClassfile(file: FileObject): (RawClassfile, Set[FullyQualifiedName]) = { val name = file.getName require(file.exists(), s"$name does not exist") require(name.getBaseName.endsWith(".class"), s"$name is not a class file") val in = file.getContent.getInputStream val raw = try { val reader = new ClassReader(in) val receiver = new AsmCallback reader.accept(receiver, ClassReader.SKIP_FRAMES) receiver } finally in.close() (raw.clazz, raw.refs) } // extracts all the classnames from a descriptor private def classesInDescriptor(desc: String): List[ClassName] = DescriptorParser.parse(desc) match { case Descriptor(params, ret) => (ret :: params).map { case c: ClassName => c case a: ArrayDescriptor => a.reifier } } private class AsmCallback extends ClassVisitor(ASM5) with ReferenceInClassHunter { // updated every time we get more info @volatile var clazz: RawClassfile = _ override def visit( version: Int, access: Int, name: String, signature: String, superName: String, interfaces: Array[String] ): Unit = { clazz = RawClassfile( ClassName.fromInternal(name), Option(signature), Option(superName).map(ClassName.fromInternal), interfaces.toList.map(ClassName.fromInternal), Access(access), (ACC_DEPRECATED & access) > 0, Queue.empty, Queue.empty, RawSource(None, None) ) } override def visitSource(filename: String, debug: String): Unit = { clazz = clazz.copy(source = RawSource(Option(filename), None)) } override def visitField(access: Int, name: String, desc: String, signature: String, value: AnyRef): FieldVisitor = { val field = RawField( FieldName(clazz.name, name), DescriptorParser.parseType(desc), Option(signature), Access(access) ) clazz = clazz.copy(fields = clazz.fields enqueue field) super.visitField(access, name, desc, signature, value) } override def visitMethod(access: Int, region: String, desc: String, signature: String, exceptions: Array[String]): MethodVisitor = { super.visitMethod(access, region, desc, signature, exceptions) new MethodVisitor(ASM5) with ReferenceInMethodHunter { var firstLine: Option[Int] = None override def visitLineNumber(line: Int, start: Label): Unit = { val isEarliestLineSeen = firstLine.map(_ < line).getOrElse(true) if (isEarliestLineSeen) firstLine = Some(line) } override def visitEnd(): Unit = { addRefs(internalRefs) region match { case "<init>" | "<clinit>" => (clazz.source.line, firstLine) match { case (_, None) => case (Some(existing), Some(latest)) if existing <= latest => case _ => clazz = clazz.copy(source = clazz.source.copy(line = firstLine)) } case name => val descriptor = DescriptorParser.parse(desc) val method = RawMethod(MethodName(clazz.name, name, descriptor), Access(access), Option(signature), firstLine) clazz = clazz.copy(methods = clazz.methods enqueue method) } } } } } // factors out much of the verbose code that looks for references to members private trait ReferenceInClassHunter { this: ClassVisitor => def clazz: RawClassfile // NOTE: only mutate via addRefs var refs = Set.empty[FullyQualifiedName] protected def addRefs(seen: Seq[FullyQualifiedName]): Unit = { refs ++= seen.filterNot(_.contains(clazz.name)) } protected def addRef(seen: FullyQualifiedName): Unit = addRefs(seen :: Nil) private val fieldVisitor = new FieldVisitor(ASM5) { override def visitAnnotation(desc: String, visible: Boolean) = handleAnn(desc) override def visitTypeAnnotation( typeRef: Int, typePath: TypePath, desc: String, visible: Boolean ) = handleAnn(desc) } override def visitField(access: Int, name: String, desc: String, signature: String, value: AnyRef): FieldVisitor = { addRef(ClassName.fromDescriptor(desc)) fieldVisitor } override def visitMethod(access: Int, region: String, desc: String, signature: String, exceptions: Array[String]): MethodVisitor = { addRefs(classesInDescriptor(desc)) if (exceptions != null) addRefs(exceptions.map(ClassName.fromInternal)) null } override def visitInnerClass(name: String, outerName: String, innerName: String, access: Int): Unit = { addRef(ClassName.fromInternal(name)) } override def visitOuterClass(owner: String, name: String, desc: String): Unit = { addRef(ClassName.fromInternal(owner)) } private val annVisitor: AnnotationVisitor = new AnnotationVisitor(ASM5) { override def visitAnnotation(name: String, desc: String) = handleAnn(desc) override def visitEnum( name: String, desc: String, value: String ): Unit = handleAnn(desc) } private def handleAnn(desc: String): AnnotationVisitor = { addRef(ClassName.fromDescriptor(desc)) annVisitor } override def visitAnnotation(desc: String, visible: Boolean) = handleAnn(desc) override def visitTypeAnnotation( typeRef: Int, typePath: TypePath, desc: String, visible: Boolean ) = handleAnn(desc) } private trait ReferenceInMethodHunter { this: MethodVisitor => // NOTE: :+ and :+= are really slow (scala 2.10), prefer "enqueue" protected var internalRefs = Queue.empty[FullyQualifiedName] // doesn't disambiguate FQNs of methods, so storing as FieldName references private def memberOrInit(owner: String, name: String): FullyQualifiedName = name match { case "<init>" | "<clinit>" => ClassName.fromInternal(owner) case member => FieldName(ClassName.fromInternal(owner), member) } override def visitLocalVariable( name: String, desc: String, signature: String, start: Label, end: Label, index: Int ): Unit = { internalRefs = internalRefs enqueue ClassName.fromDescriptor(desc) } override def visitMultiANewArrayInsn(desc: String, dims: Int): Unit = { internalRefs = internalRefs enqueue ClassName.fromDescriptor(desc) } override def visitTypeInsn(opcode: Int, desc: String): Unit = { internalRefs = internalRefs enqueue ClassName.fromInternal(desc) } override def visitFieldInsn( opcode: Int, owner: String, name: String, desc: String ): Unit = { internalRefs = internalRefs enqueue memberOrInit(owner, name) internalRefs = internalRefs enqueue ClassName.fromDescriptor(desc) } override def visitMethodInsn( opcode: Int, owner: String, name: String, desc: String, itf: Boolean ): Unit = { internalRefs :+= memberOrInit(owner, name) internalRefs = internalRefs.enqueue(classesInDescriptor(desc)) } override def visitInvokeDynamicInsn(name: String, desc: String, bsm: Handle, bsmArgs: AnyRef*): Unit = { internalRefs :+= memberOrInit(bsm.getOwner, bsm.getName) internalRefs = internalRefs.enqueue(classesInDescriptor(bsm.getDesc)) } private val annVisitor: AnnotationVisitor = new AnnotationVisitor(ASM5) { override def visitAnnotation(name: String, desc: String) = handleAnn(desc) override def visitEnum(name: String, desc: String, value: String): Unit = handleAnn(desc) } private def handleAnn(desc: String): AnnotationVisitor = { internalRefs = internalRefs enqueue ClassName.fromDescriptor(desc) annVisitor } override def visitAnnotation(desc: String, visible: Boolean) = handleAnn(desc) override def visitAnnotationDefault() = annVisitor override def visitInsnAnnotation( typeRef: Int, typePath: TypePath, desc: String, visible: Boolean ) = handleAnn(desc) override def visitLocalVariableAnnotation( typeRef: Int, typePath: TypePath, start: Array[Label], end: Array[Label], index: Array[Int], desc: String, visible: Boolean ) = handleAnn(desc) override def visitParameterAnnotation( parameter: Int, desc: String, visible: Boolean ) = handleAnn(desc) override def visitTryCatchAnnotation( typeRef: Int, typePath: TypePath, desc: String, visible: Boolean ) = handleAnn(desc) override def visitTypeAnnotation( typeRef: Int, typePath: TypePath, desc: String, visible: Boolean ) = handleAnn(desc) } }
d1egoaz/ensime-sbt
src/sbt-test/sbt-ensime/ensime-server/core/src/main/scala/org/ensime/indexer/ClassfileIndexer.scala
Scala
apache-2.0
9,114
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql import java.util.{Locale, Properties, UUID} import scala.collection.JavaConverters._ import org.apache.spark.annotation.Stable import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.analysis.{EliminateSubqueryAliases, UnresolvedRelation} import org.apache.spark.sql.catalyst.catalog._ import org.apache.spark.sql.catalyst.expressions.Literal import org.apache.spark.sql.catalyst.plans.logical.{AppendData, InsertIntoTable, LogicalPlan, OverwriteByExpression} import org.apache.spark.sql.execution.SQLExecution import org.apache.spark.sql.execution.command.DDLUtils import org.apache.spark.sql.execution.datasources.{CreateTable, DataSource, LogicalRelation} import org.apache.spark.sql.execution.datasources.v2.{DataSourceV2Relation, DataSourceV2Utils, FileDataSourceV2, WriteToDataSourceV2} import org.apache.spark.sql.sources.BaseRelation import org.apache.spark.sql.sources.v2._ import org.apache.spark.sql.sources.v2.TableCapability._ import org.apache.spark.sql.sources.v2.writer.SupportsSaveMode import org.apache.spark.sql.types.StructType import org.apache.spark.sql.util.CaseInsensitiveStringMap /** * Interface used to write a [[Dataset]] to external storage systems (e.g. file systems, * key-value stores, etc). Use `Dataset.write` to access this. * * @since 1.4.0 */ @Stable final class DataFrameWriter[T] private[sql](ds: Dataset[T]) { private val df = ds.toDF() /** * Specifies the behavior when data or table already exists. Options include: * <ul> * <li>`SaveMode.Overwrite`: overwrite the existing data.</li> * <li>`SaveMode.Append`: append the data.</li> * <li>`SaveMode.Ignore`: ignore the operation (i.e. no-op).</li> * <li>`SaveMode.ErrorIfExists`: default option, throw an exception at runtime.</li> * </ul> * * @since 1.4.0 */ def mode(saveMode: SaveMode): DataFrameWriter[T] = { this.mode = saveMode this } /** * Specifies the behavior when data or table already exists. Options include: * <ul> * <li>`overwrite`: overwrite the existing data.</li> * <li>`append`: append the data.</li> * <li>`ignore`: ignore the operation (i.e. no-op).</li> * <li>`error` or `errorifexists`: default option, throw an exception at runtime.</li> * </ul> * * @since 1.4.0 */ def mode(saveMode: String): DataFrameWriter[T] = { this.mode = saveMode.toLowerCase(Locale.ROOT) match { case "overwrite" => SaveMode.Overwrite case "append" => SaveMode.Append case "ignore" => SaveMode.Ignore case "error" | "errorifexists" | "default" => SaveMode.ErrorIfExists case _ => throw new IllegalArgumentException(s"Unknown save mode: $saveMode. " + "Accepted save modes are 'overwrite', 'append', 'ignore', 'error', 'errorifexists'.") } this } /** * Specifies the underlying output data source. Built-in options include "parquet", "json", etc. * * @since 1.4.0 */ def format(source: String): DataFrameWriter[T] = { this.source = source this } /** * Adds an output option for the underlying data source. * * You can set the following option(s): * <ul> * <li>`timeZone` (default session local timezone): sets the string that indicates a timezone * to be used to format timestamps in the JSON/CSV datasources or partition values.</li> * </ul> * * @since 1.4.0 */ def option(key: String, value: String): DataFrameWriter[T] = { this.extraOptions += (key -> value) this } /** * Adds an output option for the underlying data source. * * @since 2.0.0 */ def option(key: String, value: Boolean): DataFrameWriter[T] = option(key, value.toString) /** * Adds an output option for the underlying data source. * * @since 2.0.0 */ def option(key: String, value: Long): DataFrameWriter[T] = option(key, value.toString) /** * Adds an output option for the underlying data source. * * @since 2.0.0 */ def option(key: String, value: Double): DataFrameWriter[T] = option(key, value.toString) /** * (Scala-specific) Adds output options for the underlying data source. * * You can set the following option(s): * <ul> * <li>`timeZone` (default session local timezone): sets the string that indicates a timezone * to be used to format timestamps in the JSON/CSV datasources or partition values.</li> * </ul> * * @since 1.4.0 */ def options(options: scala.collection.Map[String, String]): DataFrameWriter[T] = { this.extraOptions ++= options this } /** * Adds output options for the underlying data source. * * You can set the following option(s): * <ul> * <li>`timeZone` (default session local timezone): sets the string that indicates a timezone * to be used to format timestamps in the JSON/CSV datasources or partition values.</li> * </ul> * * @since 1.4.0 */ def options(options: java.util.Map[String, String]): DataFrameWriter[T] = { this.options(options.asScala) this } /** * Partitions the output by the given columns on the file system. If specified, the output is * laid out on the file system similar to Hive's partitioning scheme. As an example, when we * partition a dataset by year and then month, the directory layout would look like: * <ul> * <li>year=2016/month=01/</li> * <li>year=2016/month=02/</li> * </ul> * * Partitioning is one of the most widely used techniques to optimize physical data layout. * It provides a coarse-grained index for skipping unnecessary data reads when queries have * predicates on the partitioned columns. In order for partitioning to work well, the number * of distinct values in each column should typically be less than tens of thousands. * * This is applicable for all file-based data sources (e.g. Parquet, JSON) starting with Spark * 2.1.0. * * @since 1.4.0 */ @scala.annotation.varargs def partitionBy(colNames: String*): DataFrameWriter[T] = { this.partitioningColumns = Option(colNames) this } /** * Buckets the output by the given columns. If specified, the output is laid out on the file * system similar to Hive's bucketing scheme. * * This is applicable for all file-based data sources (e.g. Parquet, JSON) starting with Spark * 2.1.0. * * @since 2.0 */ @scala.annotation.varargs def bucketBy(numBuckets: Int, colName: String, colNames: String*): DataFrameWriter[T] = { this.numBuckets = Option(numBuckets) this.bucketColumnNames = Option(colName +: colNames) this } /** * Sorts the output in each bucket by the given columns. * * This is applicable for all file-based data sources (e.g. Parquet, JSON) starting with Spark * 2.1.0. * * @since 2.0 */ @scala.annotation.varargs def sortBy(colName: String, colNames: String*): DataFrameWriter[T] = { this.sortColumnNames = Option(colName +: colNames) this } /** * Saves the content of the `DataFrame` at the specified path. * * @since 1.4.0 */ def save(path: String): Unit = { this.extraOptions += ("path" -> path) save() } /** * Saves the content of the `DataFrame` as the specified table. * * @since 1.4.0 */ def save(): Unit = { if (source.toLowerCase(Locale.ROOT) == DDLUtils.HIVE_PROVIDER) { throw new AnalysisException("Hive data source can only be used with tables, you can not " + "write files of Hive data source directly.") } assertNotBucketed("save") val session = df.sparkSession val useV1Sources = session.sessionState.conf.userV1SourceWriterList.toLowerCase(Locale.ROOT).split(",") val lookupCls = DataSource.lookupDataSource(source, session.sessionState.conf) val cls = lookupCls.newInstance() match { case f: FileDataSourceV2 if useV1Sources.contains(f.shortName()) || useV1Sources.contains(lookupCls.getCanonicalName.toLowerCase(Locale.ROOT)) => f.fallBackFileFormat case _ => lookupCls } // In Data Source V2 project, partitioning is still under development. // Here we fallback to V1 if partitioning columns are specified. // TODO(SPARK-26778): use V2 implementations when partitioning feature is supported. if (classOf[TableProvider].isAssignableFrom(cls) && partitioningColumns.isEmpty) { val provider = cls.getConstructor().newInstance().asInstanceOf[TableProvider] val sessionOptions = DataSourceV2Utils.extractSessionConfigs( provider, session.sessionState.conf) val options = sessionOptions ++ extraOptions val dsOptions = new CaseInsensitiveStringMap(options.asJava) import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Implicits._ provider.getTable(dsOptions) match { case table: SupportsWrite if table.supports(BATCH_WRITE) => lazy val relation = DataSourceV2Relation.create(table, dsOptions) mode match { case SaveMode.Append => runCommand(df.sparkSession, "save") { AppendData.byName(relation, df.logicalPlan) } case SaveMode.Overwrite if table.supportsAny(TRUNCATE, OVERWRITE_BY_FILTER) => // truncate the table runCommand(df.sparkSession, "save") { OverwriteByExpression.byName(relation, df.logicalPlan, Literal(true)) } case _ => table.newWriteBuilder(dsOptions) match { case writeBuilder: SupportsSaveMode => val write = writeBuilder.mode(mode) .withQueryId(UUID.randomUUID().toString) .withInputDataSchema(df.logicalPlan.schema) .buildForBatch() // It can only return null with `SupportsSaveMode`. We can clean it up after // removing `SupportsSaveMode`. if (write != null) { runCommand(df.sparkSession, "save") { WriteToDataSourceV2(write, df.logicalPlan) } } case _ => throw new AnalysisException( s"data source ${table.name} does not support SaveMode $mode") } } // Streaming also uses the data source V2 API. So it may be that the data source implements // v2, but has no v2 implementation for batch writes. In that case, we fall back to saving // as though it's a V1 source. case _ => saveToV1Source() } } else { saveToV1Source() } } private def saveToV1Source(): Unit = { // Code path for data source v1. runCommand(df.sparkSession, "save") { DataSource( sparkSession = df.sparkSession, className = source, partitionColumns = partitioningColumns.getOrElse(Nil), options = extraOptions.toMap).planForWriting(mode, df.logicalPlan) } } /** * Inserts the content of the `DataFrame` to the specified table. It requires that * the schema of the `DataFrame` is the same as the schema of the table. * * @note Unlike `saveAsTable`, `insertInto` ignores the column names and just uses position-based * resolution. For example: * * {{{ * scala> Seq((1, 2)).toDF("i", "j").write.mode("overwrite").saveAsTable("t1") * scala> Seq((3, 4)).toDF("j", "i").write.insertInto("t1") * scala> Seq((5, 6)).toDF("a", "b").write.insertInto("t1") * scala> sql("select * from t1").show * +---+---+ * | i| j| * +---+---+ * | 5| 6| * | 3| 4| * | 1| 2| * +---+---+ * }}} * * Because it inserts data to an existing table, format or options will be ignored. * * @since 1.4.0 */ def insertInto(tableName: String): Unit = { insertInto(df.sparkSession.sessionState.sqlParser.parseTableIdentifier(tableName)) } private def insertInto(tableIdent: TableIdentifier): Unit = { assertNotBucketed("insertInto") if (partitioningColumns.isDefined) { throw new AnalysisException( "insertInto() can't be used together with partitionBy(). " + "Partition columns have already been defined for the table. " + "It is not necessary to use partitionBy()." ) } runCommand(df.sparkSession, "insertInto") { InsertIntoTable( table = UnresolvedRelation(tableIdent), partition = Map.empty[String, Option[String]], query = df.logicalPlan, overwrite = mode == SaveMode.Overwrite, ifPartitionNotExists = false) } } private def getBucketSpec: Option[BucketSpec] = { if (sortColumnNames.isDefined && numBuckets.isEmpty) { throw new AnalysisException("sortBy must be used together with bucketBy") } numBuckets.map { n => BucketSpec(n, bucketColumnNames.get, sortColumnNames.getOrElse(Nil)) } } private def assertNotBucketed(operation: String): Unit = { if (getBucketSpec.isDefined) { if (sortColumnNames.isEmpty) { throw new AnalysisException(s"'$operation' does not support bucketBy right now") } else { throw new AnalysisException(s"'$operation' does not support bucketBy and sortBy right now") } } } private def assertNotPartitioned(operation: String): Unit = { if (partitioningColumns.isDefined) { throw new AnalysisException(s"'$operation' does not support partitioning") } } /** * Saves the content of the `DataFrame` as the specified table. * * In the case the table already exists, behavior of this function depends on the * save mode, specified by the `mode` function (default to throwing an exception). * When `mode` is `Overwrite`, the schema of the `DataFrame` does not need to be * the same as that of the existing table. * * When `mode` is `Append`, if there is an existing table, we will use the format and options of * the existing table. The column order in the schema of the `DataFrame` doesn't need to be same * as that of the existing table. Unlike `insertInto`, `saveAsTable` will use the column names to * find the correct column positions. For example: * * {{{ * scala> Seq((1, 2)).toDF("i", "j").write.mode("overwrite").saveAsTable("t1") * scala> Seq((3, 4)).toDF("j", "i").write.mode("append").saveAsTable("t1") * scala> sql("select * from t1").show * +---+---+ * | i| j| * +---+---+ * | 1| 2| * | 4| 3| * +---+---+ * }}} * * In this method, save mode is used to determine the behavior if the data source table exists in * Spark catalog. We will always overwrite the underlying data of data source (e.g. a table in * JDBC data source) if the table doesn't exist in Spark catalog, and will always append to the * underlying data of data source if the table already exists. * * When the DataFrame is created from a non-partitioned `HadoopFsRelation` with a single input * path, and the data source provider can be mapped to an existing Hive builtin SerDe (i.e. ORC * and Parquet), the table is persisted in a Hive compatible format, which means other systems * like Hive will be able to read this table. Otherwise, the table is persisted in a Spark SQL * specific format. * * @since 1.4.0 */ def saveAsTable(tableName: String): Unit = { saveAsTable(df.sparkSession.sessionState.sqlParser.parseTableIdentifier(tableName)) } private def saveAsTable(tableIdent: TableIdentifier): Unit = { val catalog = df.sparkSession.sessionState.catalog val tableExists = catalog.tableExists(tableIdent) val db = tableIdent.database.getOrElse(catalog.getCurrentDatabase) val tableIdentWithDB = tableIdent.copy(database = Some(db)) val tableName = tableIdentWithDB.unquotedString (tableExists, mode) match { case (true, SaveMode.Ignore) => // Do nothing case (true, SaveMode.ErrorIfExists) => throw new AnalysisException(s"Table $tableIdent already exists.") case (true, SaveMode.Overwrite) => // Get all input data source or hive relations of the query. val srcRelations = df.logicalPlan.collect { case LogicalRelation(src: BaseRelation, _, _, _) => src case relation: HiveTableRelation => relation.tableMeta.identifier } val tableRelation = df.sparkSession.table(tableIdentWithDB).queryExecution.analyzed EliminateSubqueryAliases(tableRelation) match { // check if the table is a data source table (the relation is a BaseRelation). case LogicalRelation(dest: BaseRelation, _, _, _) if srcRelations.contains(dest) => throw new AnalysisException( s"Cannot overwrite table $tableName that is also being read from") // check hive table relation when overwrite mode case relation: HiveTableRelation if srcRelations.contains(relation.tableMeta.identifier) => throw new AnalysisException( s"Cannot overwrite table $tableName that is also being read from") case _ => // OK } // Drop the existing table catalog.dropTable(tableIdentWithDB, ignoreIfNotExists = true, purge = false) createTable(tableIdentWithDB) // Refresh the cache of the table in the catalog. catalog.refreshTable(tableIdentWithDB) case _ => createTable(tableIdent) } } private def createTable(tableIdent: TableIdentifier): Unit = { val storage = DataSource.buildStorageFormatFromOptions(extraOptions.toMap) val tableType = if (storage.locationUri.isDefined) { CatalogTableType.EXTERNAL } else { CatalogTableType.MANAGED } val tableDesc = CatalogTable( identifier = tableIdent, tableType = tableType, storage = storage, schema = new StructType, provider = Some(source), partitionColumnNames = partitioningColumns.getOrElse(Nil), bucketSpec = getBucketSpec) runCommand(df.sparkSession, "saveAsTable")(CreateTable(tableDesc, mode, Some(df.logicalPlan))) } /** * Saves the content of the `DataFrame` to an external database table via JDBC. In the case the * table already exists in the external database, behavior of this function depends on the * save mode, specified by the `mode` function (default to throwing an exception). * * Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash * your external database systems. * * You can set the following JDBC-specific option(s) for storing JDBC: * <ul> * <li>`truncate` (default `false`): use `TRUNCATE TABLE` instead of `DROP TABLE`.</li> * </ul> * * In case of failures, users should turn off `truncate` option to use `DROP TABLE` again. Also, * due to the different behavior of `TRUNCATE TABLE` among DBMS, it's not always safe to use this. * MySQLDialect, DB2Dialect, MsSqlServerDialect, DerbyDialect, and OracleDialect supports this * while PostgresDialect and default JDBCDirect doesn't. For unknown and unsupported JDBCDirect, * the user option `truncate` is ignored. * * @param url JDBC database url of the form `jdbc:subprotocol:subname` * @param table Name of the table in the external database. * @param connectionProperties JDBC database connection arguments, a list of arbitrary string * tag/value. Normally at least a "user" and "password" property * should be included. "batchsize" can be used to control the * number of rows per insert. "isolationLevel" can be one of * "NONE", "READ_COMMITTED", "READ_UNCOMMITTED", "REPEATABLE_READ", * or "SERIALIZABLE", corresponding to standard transaction * isolation levels defined by JDBC's Connection object, with default * of "READ_UNCOMMITTED". * @since 1.4.0 */ def jdbc(url: String, table: String, connectionProperties: Properties): Unit = { assertNotPartitioned("jdbc") assertNotBucketed("jdbc") // connectionProperties should override settings in extraOptions. this.extraOptions ++= connectionProperties.asScala // explicit url and dbtable should override all this.extraOptions += ("url" -> url, "dbtable" -> table) format("jdbc").save() } /** * Saves the content of the `DataFrame` in JSON format (<a href="http://jsonlines.org/"> * JSON Lines text format or newline-delimited JSON</a>) at the specified path. * This is equivalent to: * {{{ * format("json").save(path) * }}} * * You can set the following JSON-specific option(s) for writing JSON files: * <ul> * <li>`compression` (default `null`): compression codec to use when saving to file. This can be * one of the known case-insensitive shorten names (`none`, `bzip2`, `gzip`, `lz4`, * `snappy` and `deflate`). </li> * <li>`dateFormat` (default `yyyy-MM-dd`): sets the string that indicates a date format. * Custom date formats follow the formats at `java.time.format.DateTimeFormatter`. * This applies to date type.</li> * <li>`timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss.SSSXXX`): sets the string that * indicates a timestamp format. Custom date formats follow the formats at * `java.time.format.DateTimeFormatter`. This applies to timestamp type.</li> * <li>`encoding` (by default it is not set): specifies encoding (charset) of saved json * files. If it is not set, the UTF-8 charset will be used. </li> * <li>`lineSep` (default `\n`): defines the line separator that should be used for writing.</li> * </ul> * * @since 1.4.0 */ def json(path: String): Unit = { format("json").save(path) } /** * Saves the content of the `DataFrame` in Parquet format at the specified path. * This is equivalent to: * {{{ * format("parquet").save(path) * }}} * * You can set the following Parquet-specific option(s) for writing Parquet files: * <ul> * <li>`compression` (default is the value specified in `spark.sql.parquet.compression.codec`): * compression codec to use when saving to file. This can be one of the known case-insensitive * shorten names(`none`, `uncompressed`, `snappy`, `gzip`, `lzo`, `brotli`, `lz4`, and `zstd`). * This will override `spark.sql.parquet.compression.codec`.</li> * </ul> * * @since 1.4.0 */ def parquet(path: String): Unit = { format("parquet").save(path) } /** * Saves the content of the `DataFrame` in ORC format at the specified path. * This is equivalent to: * {{{ * format("orc").save(path) * }}} * * You can set the following ORC-specific option(s) for writing ORC files: * <ul> * <li>`compression` (default is the value specified in `spark.sql.orc.compression.codec`): * compression codec to use when saving to file. This can be one of the known case-insensitive * shorten names(`none`, `snappy`, `zlib`, and `lzo`). This will override * `orc.compress` and `spark.sql.orc.compression.codec`. If `orc.compress` is given, * it overrides `spark.sql.orc.compression.codec`.</li> * </ul> * * @since 1.5.0 * @note Currently, this method can only be used after enabling Hive support */ def orc(path: String): Unit = { format("orc").save(path) } /** * Saves the content of the `DataFrame` in a text file at the specified path. * The DataFrame must have only one column that is of string type. * Each row becomes a new line in the output file. For example: * {{{ * // Scala: * df.write.text("/path/to/output") * * // Java: * df.write().text("/path/to/output") * }}} * The text files will be encoded as UTF-8. * * You can set the following option(s) for writing text files: * <ul> * <li>`compression` (default `null`): compression codec to use when saving to file. This can be * one of the known case-insensitive shorten names (`none`, `bzip2`, `gzip`, `lz4`, * `snappy` and `deflate`). </li> * <li>`lineSep` (default `\n`): defines the line separator that should be used for writing.</li> * </ul> * * @since 1.6.0 */ def text(path: String): Unit = { format("text").save(path) } /** * Saves the content of the `DataFrame` in CSV format at the specified path. * This is equivalent to: * {{{ * format("csv").save(path) * }}} * * You can set the following CSV-specific option(s) for writing CSV files: * <ul> * <li>`sep` (default `,`): sets a single character as a separator for each * field and value.</li> * <li>`quote` (default `"`): sets a single character used for escaping quoted values where * the separator can be part of the value. If an empty string is set, it uses `u0000` * (null character).</li> * <li>`escape` (default `\`): sets a single character used for escaping quotes inside * an already quoted value.</li> * <li>`charToEscapeQuoteEscaping` (default `escape` or `\0`): sets a single character used for * escaping the escape for the quote character. The default value is escape character when escape * and quote characters are different, `\0` otherwise.</li> * <li>`escapeQuotes` (default `true`): a flag indicating whether values containing * quotes should always be enclosed in quotes. Default is to escape all values containing * a quote character.</li> * <li>`quoteAll` (default `false`): a flag indicating whether all values should always be * enclosed in quotes. Default is to only escape values containing a quote character.</li> * <li>`header` (default `false`): writes the names of columns as the first line.</li> * <li>`nullValue` (default empty string): sets the string representation of a null value.</li> * <li>`emptyValue` (default `""`): sets the string representation of an empty value.</li> * <li>`encoding` (by default it is not set): specifies encoding (charset) of saved csv * files. If it is not set, the UTF-8 charset will be used.</li> * <li>`compression` (default `null`): compression codec to use when saving to file. This can be * one of the known case-insensitive shorten names (`none`, `bzip2`, `gzip`, `lz4`, * `snappy` and `deflate`). </li> * <li>`dateFormat` (default `yyyy-MM-dd`): sets the string that indicates a date format. * Custom date formats follow the formats at `java.time.format.DateTimeFormatter`. * This applies to date type.</li> * <li>`timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss.SSSXXX`): sets the string that * indicates a timestamp format. Custom date formats follow the formats at * `java.time.format.DateTimeFormatter`. This applies to timestamp type.</li> * <li>`ignoreLeadingWhiteSpace` (default `true`): a flag indicating whether or not leading * whitespaces from values being written should be skipped.</li> * <li>`ignoreTrailingWhiteSpace` (default `true`): a flag indicating defines whether or not * trailing whitespaces from values being written should be skipped.</li> * <li>`lineSep` (default `\n`): defines the line separator that should be used for writing. * Maximum length is 1 character.</li> * </ul> * * @since 2.0.0 */ def csv(path: String): Unit = { format("csv").save(path) } /** * Wrap a DataFrameWriter action to track the QueryExecution and time cost, then report to the * user-registered callback functions. */ private def runCommand(session: SparkSession, name: String)(command: LogicalPlan): Unit = { val qe = session.sessionState.executePlan(command) // call `QueryExecution.toRDD` to trigger the execution of commands. SQLExecution.withNewExecutionId(session, qe, Some(name))(qe.toRdd) } /////////////////////////////////////////////////////////////////////////////////////// // Builder pattern config options /////////////////////////////////////////////////////////////////////////////////////// private var source: String = df.sparkSession.sessionState.conf.defaultDataSourceName private var mode: SaveMode = SaveMode.ErrorIfExists private val extraOptions = new scala.collection.mutable.HashMap[String, String] private var partitioningColumns: Option[Seq[String]] = None private var bucketColumnNames: Option[Seq[String]] = None private var numBuckets: Option[Int] = None private var sortColumnNames: Option[Seq[String]] = None }
Aegeaner/spark
sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
Scala
apache-2.0
29,466
/* * BeliefPropagation.scala * A belief propagation algorithm. * * Created By: Brian Ruttenberg (bruttenberg@cra.com) * Creation Date: Jan 15, 2014 * * Copyright 2013 Avrom J. Pfeffer and Charles River Analytics, Inc. * See http://www.cra.com or email figaro@cra.com for information. * * See http://www.github.com/p2t2/figaro for a copy of the software license. */ package com.cra.figaro.algorithm.factored.beliefpropagation import scala.Option.option2Iterable import com.cra.figaro.algorithm._ import com.cra.figaro.algorithm.sampling._ import com.cra.figaro.language._ import com.cra.figaro.util._ import annotation.tailrec import com.cra.figaro.algorithm.OneTimeProbQuery import com.cra.figaro.algorithm.ProbQueryAlgorithm import com.cra.figaro.algorithm.factored._ import com.cra.figaro.algorithm.factored.Variable import com.cra.figaro.algorithm.sampling.ProbEvidenceSampler import com.cra.figaro.language.Element import com.cra.figaro.language.Universe import com.cra.figaro.algorithm.lazyfactored.LazyValues import com.cra.figaro.algorithm.lazyfactored.BoundedProbFactor import scala.collection.mutable.Map /** * Trait for performing belief propagation. * * @tparam T The type of entries in the factors. */ trait BeliefPropagation[T] extends FactoredAlgorithm[T] { /** * By default, implementations that inherit this trait have no debug information. * Override this if you want a debugging option. */ val debug: Boolean = false /** * The universe on which this belief propagation algorithm should be applied. */ val universe: Universe /** * Target elements that should not be eliminated but should be available for querying. */ val targetElements: List[Element[_]] /** * Since BP uses division to compute messages, the semiring has to have a division function defined */ override val semiring: DivideableSemiRing[T] /** * Elements towards which queries are directed. By default, these are the target elements. * This is overridden by DecisionVariableElimination, where it also includes utility variables. */ def starterElements: List[Element[_]] = targetElements /* The factor graph for this BP object */ protected[figaro] val factorGraph: FactorGraph[T] /* The beliefs associated with each node in the factor graph. The belief is the product * of all messages to the node times any factor at the node */ private[figaro] val beliefMap: Map[Node, Factor[T]] = Map() /* * Returns a new message from a source node to a target node. */ protected[figaro] def newMessage(source: Node, target: Node): Factor[T] = { val message: Factor[T] = (source, target) match { case (f: FactorNode, v: VariableNode) => getNewMessageFactorToVar(f, v) case (v: VariableNode, f: FactorNode) => getNewMessageVarToFactor(v, f) case _ => throw new UnsupportedOperationException() } if (debug) { println("message: " + source + " to " + target) println(message.toReadableString) } message } /* * A message from a factor Node to a variable Node is the product of the factor with * messages from all other Nodes (except the destination node), * marginalized over all variables except the variable: */ private def getNewMessageFactorToVar(fn: FactorNode, vn: VariableNode) = { val vnFactor = factorGraph.getLastMessage(vn, fn) val total = beliefMap(fn).combination(vnFactor, semiring.divide) total.marginalizeTo(semiring, vn.variable) } /* * A message from a variable Node to a factor Node is the product of the messages from * all other neighboring factor Nodes (except the recipient; alternatively one can say the * recipient sends the message "1"): */ private def getNewMessageVarToFactor(vn: VariableNode, fn: FactorNode) = { val fnFactor = factorGraph.getLastMessage(fn, vn) val total = beliefMap(vn).combination(fnFactor, semiring.divide) total } /** * Returns the product of all messages from a source node's neighbors to itself. */ def belief(source: Node) = { val messageList = factorGraph.getNeighbors(source) map (factorGraph.getLastMessage(_, source)) val f = if (messageList.isEmpty) { source match { case fn: FactorNode => factorGraph.uniformFactor(fn.variables) case vn: VariableNode => factorGraph.uniformFactor(List(vn.variable)) } } else { val messageBelief = messageList.reduceLeft(_.product(_, semiring)) source match { case fn: FactorNode => messageBelief.product(factorGraph.getFactorForNode(fn), semiring) case vn: VariableNode => messageBelief } } f } /* * This is intended to perform an asynchronous update of the factor graph. * It is unclear if this is the correct implementation since messages * are updating in the factor graph immediately */ private def asynchronousUpdate(): Unit = { factorGraph.getNodes.foreach { node1 => factorGraph.getNeighbors(node1).foreach { node2 => factorGraph.update(node1, node2, newMessage(node1, node2)) } } // Update the beliefs of each node factorGraph.getNodes.foreach(n => beliefMap.update(n, belief(n))) } /* * Propagates one set of synchronous message in the graph */ private def synchronousUpdate(): Unit = { val updates = factorGraph.getNodes.flatMap { node1 => factorGraph.getNeighbors(node1).map { node2 => (node1, node2, newMessage(node1, node2)) } } updates.foreach { u => factorGraph.update(u._1, u._2, u._3) } // Update the beliefs of each node factorGraph.getNodes.foreach(n => beliefMap.update(n, belief(n))) } /** * Runs this belief propagation algorithm for one iteration. An iteration * consists of each node of the factor graph sending a message to each of its neighbors. */ def runStep() { if (debug) { println("Factor graph: ") println(factorGraph.getNodes.map(n => n -> factorGraph.getNeighbors(n)).toMap.mkString("\\n")) println() } synchronousUpdate() } override def initialize() = { factorGraph.getNodes.foreach(n => beliefMap.update(n, belief(n))) } } /** * Trait for probabilistic BP algorithms */ trait ProbabilisticBeliefPropagation extends BeliefPropagation[Double] { /** * Normalize a factor */ def normalize(factor: Factor[Double]): Factor[Double] = { //val z = factor.foldLeft(semiring.zero, _ + _) val z = semiring.sumMany(factor.contents.values) //val normedFactor = /*new Factor[Double](factor.variables)*/Factory.make[Double](factor.variables) // Since we're in log space, d - z = log(exp(d)/exp(z)) factor.mapTo((d: Double) => if (z != semiring.zero) d - z else semiring.zero, factor.variables) //normedFactor } /* * Overrides newMessage in the BP with normalization at the end */ override protected[figaro] def newMessage(source: Node, target: Node): Factor[Double] = { val newMessage = super.newMessage(source, target) normalize(newMessage) //newMessage } /** * Returns the factors needed for BP. Since BP operates on a complete factor graph, factors are created * for all elements in the universe. */ def getFactors(neededElements: List[Element[_]], targetElements: List[Element[_]], upperBounds: Boolean = false): List[Factor[Double]] = { Factory.removeFactors() val thisUniverseFactors = (neededElements flatMap (BoundedProbFactor.make(_, upperBounds))).filterNot(_.isEmpty) val dependentUniverseFactors = for { (dependentUniverse, evidence) <- dependentUniverses } yield Factory.makeDependentFactor(universe, dependentUniverse, dependentAlgorithm(dependentUniverse, evidence)) val factors = dependentUniverseFactors ::: thisUniverseFactors // To prevent underflow, we do all computation in log space factors.map(makeLogarithmic(_)) } private def makeLogarithmic(factor: Factor[Double]): Factor[Double] = { //val result = Factory.make[Double](factor.variables) factor.mapTo((d: Double) => Math.log(d), factor.variables) //result } /** * Get the belief for an element */ protected[figaro] def getBeliefsForElement[T](target: Element[T]): List[(Double, T)] = { val finalFactor = getFinalFactorForElement(target) if (finalFactor.isEmpty) { List[(Double, T)]() } else { val factor = normalize(finalFactor) val factorVariable = Variable(target) // Since all computations have been in log space, we get out of log space here to provide the final beliefs factorVariable.range.zipWithIndex.map(pair => (Math.exp(factor.get(List(pair._2))), pair._1.value)) } } /** * Get the final factor for an element */ def getFinalFactorForElement[T](target: Element[T]): Factor[Double] = { val targetVar = Variable(target) val targetNode = factorGraph.getNodes.find { node => node match { case vn: VariableNode => vn.variable == targetVar case _ => false } } beliefMap(targetNode.get) } } /** * Trait for One Time BP algorithms */ trait OneTimeProbabilisticBeliefPropagation extends ProbabilisticBeliefPropagation with OneTime { val iterations: Int def run() = { if (debug) { val varNodes = factorGraph.getNodes.filter(_.isInstanceOf[VariableNode]) val allVars = (Set[Variable[_]]() /: factorGraph.getNodes)((s: Set[Variable[_]], n: Node) => { val a = (n match { case vn: VariableNode => Set(vn.variable) case fn: FactorNode => fn.variables }) s ++ a }) println("*****************\\nElement ids:") for { variable <- allVars } { variable match { case elemVar: /*Extended*/ ElementVariable[_] => println(variable.id + "(" + elemVar.element.name.string + ")" + "@" + elemVar.element.hashCode + ": " + elemVar.element) case _ => println(variable.id + ": not an element variable") } } println("*****************\\nOriginal Factors:") factorGraph.getNodes.foreach { n => n match { case fn: FactorNode => println(factorGraph.getFactorForNode(fn).toReadableString) case _ => } } println("*****************") } for { i <- 1 to iterations } { runStep() } } } /** * Trait for Anytime BP algorithms */ trait AnytimeProbabilisticBeliefPropagation extends ProbabilisticBeliefPropagation with Anytime /** * Class to implement a probability query BP algorithm */ abstract class ProbQueryBeliefPropagation(override val universe: Universe, targets: Element[_]*)( val dependentUniverses: List[(Universe, List[NamedEvidence[_]])], val dependentAlgorithm: (Universe, List[NamedEvidence[_]]) => () => Double, depth: Int = Int.MaxValue, upperBounds: Boolean = false) extends ProbQueryAlgorithm with ProbabilisticBeliefPropagation with ProbEvidenceBeliefPropagation { val targetElements = targets.toList val queryTargets = targetElements val semiring = LogSumProductSemiring val (neededElements, needsBounds) = getNeededElements(starterElements, depth) // Depth < MaxValue implies we are using bounds val factors = if (depth < Int.MaxValue && needsBounds) { getFactors(neededElements, targetElements, upperBounds) } else { getFactors(neededElements, targetElements) } val factorGraph = new BasicFactorGraph(factors, semiring) def computeDistribution[T](target: Element[T]): Stream[(Double, T)] = getBeliefsForElement(target).toStream def computeExpectation[T](target: Element[T], function: T => Double): Double = { computeDistribution(target).map((pair: (Double, T)) => pair._1 * function(pair._2)).sum } } trait ProbEvidenceBeliefPropagation extends ProbabilisticBeliefPropagation { def logFcn: (Double => Double) = semiring match { case LogSumProductSemiring => (d: Double) => d case SumProductSemiring => (d: Double) => if (d == semiring.zero) Double.NegativeInfinity else math.log(d) } def probFcn: (Double => Double) = semiring match { case LogSumProductSemiring => (d: Double) => if (d == semiring.zero) 0 else math.exp(d) case SumProductSemiring => (d: Double) => d } def entropy(probFactor: Factor[Double], logFactor: Factor[Double]): Double = { //println("probfactor: " + probFactor.toReadableString) //println("logfactor: " + logFactor.toReadableString) // Even though the variables in each factor are the same, the order of the vars might be different val logFactorMapping = probFactor.variables.map(v => logFactor.variables.indexOf(v)) def remap(l: List[Int]) = l.zipWithIndex.map(s => (s._1, logFactorMapping(s._2))).sortBy(_._2).unzip._1 val e = (0.0 /: probFactor.allIndices)((c: Double, i: List[Int]) => { val p = probFcn(probFactor.get(i)) if (p == 0) c else c + p * logFcn(logFactor.get(remap(i))) }) e } /* Not true mutual information for > 2 factors, but standard for computing Bethe approximation */ def mutualInformation(joint: Factor[Double], marginals: Iterable[Factor[Double]]) = { println(joint.toReadableString) marginals foreach (f => println(f.toReadableString)) val newFactor = (joint /: marginals)((c: Factor[Double], n: Factor[Double]) => c.combination(n, semiring.divide)) val mi = (0.0 /: newFactor.allIndices)((c: Double, i: List[Int]) => { val p = probFcn(joint.get(i)) if (p == 0) c else c + p * logFcn(newFactor.get(i)) }) mi } def computeEvidence(): Double = { //println("Computing P(Evidence)") val factorNodes = factorGraph.getNodes.filter(_.isInstanceOf[FactorNode]).toList val varNodes = factorGraph.getNodes.filter(_.isInstanceOf[VariableNode]).toList val nonZeroEvidence = factorNodes.exists(p => beliefMap(p).contents.exists(_._2 != Double.NegativeInfinity)) if (nonZeroEvidence) { //println("Computing energy") val betheEnergy = -1 * factorNodes.map(f => { entropy(normalize(beliefMap(f)), factorGraph.getFactorForNode(f.asInstanceOf[FactorNode])) }).sum //println("Computing entropy") val betheEntropy = { val factorEntropy = -1 * factorNodes.map(f => { entropy(normalize(beliefMap(f)), normalize(beliefMap(f))) }).sum val varEntropy = varNodes.map(v => { (factorGraph.getNeighbors(v).size - 1) * entropy(normalize(beliefMap(v)), normalize(beliefMap(v))) }).sum factorEntropy + varEntropy } //println("energy: " + betheEnergy + ", entropy: " + betheEntropy) math.exp(-1 * (betheEnergy - betheEntropy)) } else { 0.0 } } } object BeliefPropagation { /** * Creates a One Time belief propagation computer in the current default universe. */ def apply(myIterations: Int, targets: Element[_]*)(implicit universe: Universe) = new ProbQueryBeliefPropagation(universe, targets: _*)( List(), (u: Universe, e: List[NamedEvidence[_]]) => () => ProbEvidenceSampler.computeProbEvidence(10000, e)(u)) with OneTimeProbabilisticBeliefPropagation with OneTimeProbQuery { val iterations = myIterations } /** * Creates a Anytime belief propagation computer in the current default universe. */ def apply(targets: Element[_]*)(implicit universe: Universe) = new ProbQueryBeliefPropagation(universe, targets: _*)( List(), (u: Universe, e: List[NamedEvidence[_]]) => () => ProbEvidenceSampler.computeProbEvidence(10000, e)(u)) with AnytimeProbabilisticBeliefPropagation with AnytimeProbQuery /** * Create a One Time belief propagation computer current default universe, with debug information enabled. */ def debugged(myIterations: Int, targets: Element[_]*)(implicit universe: Universe) = new ProbQueryBeliefPropagation(universe, targets: _*)( List(), (u: Universe, e: List[NamedEvidence[_]]) => () => ProbEvidenceSampler.computeProbEvidence(10000, e)(u)) with OneTimeProbabilisticBeliefPropagation with OneTimeProbQuery { val iterations = myIterations; override val debug = true } /** * Create a Anytime belief propagation computer using the given dependent universes in the current default universe. */ def apply(dependentUniverses: List[(Universe, List[NamedEvidence[_]])], myIterations: Int, targets: Element[_]*)(implicit universe: Universe) = new ProbQueryBeliefPropagation(universe, targets: _*)( dependentUniverses, (u: Universe, e: List[NamedEvidence[_]]) => () => ProbEvidenceSampler.computeProbEvidence(10000, e)(u)) with OneTimeProbabilisticBeliefPropagation with OneTimeProbQuery { val iterations = myIterations } /** * Create a One Time belief propagation computer using the given dependent universes in the current default universe. */ def apply(dependentUniverses: List[(Universe, List[NamedEvidence[_]])], targets: Element[_]*)(implicit universe: Universe) = new ProbQueryBeliefPropagation(universe, targets: _*)( dependentUniverses, (u: Universe, e: List[NamedEvidence[_]]) => () => ProbEvidenceSampler.computeProbEvidence(10000, e)(u)) with AnytimeProbabilisticBeliefPropagation with AnytimeProbQuery /** * Create a One Time belief propagation computer using the given dependent universes in the current * default universe. Use the given dependent algorithm function to determine the algorithm to use * to compute probability of evidence in each dependent universe. */ def apply( dependentUniverses: List[(Universe, List[NamedEvidence[_]])], dependentAlgorithm: (Universe, List[NamedEvidence[_]]) => () => Double, myIterations: Int, targets: Element[_]*)(implicit universe: Universe) = new ProbQueryBeliefPropagation(universe, targets: _*)( dependentUniverses, dependentAlgorithm) with OneTimeProbabilisticBeliefPropagation with OneTimeProbQuery { val iterations = myIterations } /** * Create a Anytime belief propagation computer using the given dependent universes in the current * default universe. Use the given dependent algorithm function to determine the algorithm to use * to compute probability of evidence in each dependent universe. */ def apply( dependentUniverses: List[(Universe, List[NamedEvidence[_]])], dependentAlgorithm: (Universe, List[NamedEvidence[_]]) => () => Double, targets: Element[_]*)(implicit universe: Universe) = new ProbQueryBeliefPropagation(universe, targets: _*)( dependentUniverses, dependentAlgorithm) with AnytimeProbabilisticBeliefPropagation with AnytimeProbQuery /** * Use BP to compute the probability that the given element has the given value. */ def probability[T](target: Element[T], value: T, numIterations: Int = 10): Double = { val alg = BeliefPropagation(numIterations, target) alg.start() val result = alg.probability(target, value) alg.kill() result } /** * Lazy version of BP that operates only on bounds */ def lazyBP(myIterations: Int, depth: Int, upperBounds: Boolean, targets: Element[_]*)(implicit universe: Universe) = new ProbQueryBeliefPropagation(universe, targets: _*)( List(), (u: Universe, e: List[NamedEvidence[_]]) => () => ProbEvidenceSampler.computeProbEvidence(10000, e)(u), depth, upperBounds) with OneTimeProbabilisticBeliefPropagation with OneTimeProbQuery { val iterations = myIterations; override val debug = false } }
bruttenberg/figaro
Figaro/src/main/scala/com/cra/figaro/algorithm/factored/beliefpropagation/BeliefPropagation.scala
Scala
bsd-3-clause
19,960
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package de.fuberlin.wiwiss.silk.learning.active.linkselector import de.fuberlin.wiwiss.silk.entity._ import de.fuberlin.wiwiss.silk.linkagerule.similarity.{Aggregation, Comparison} import de.fuberlin.wiwiss.silk.util.DPair import de.fuberlin.wiwiss.silk.linkagerule.input.PathInput import de.fuberlin.wiwiss.silk.linkagerule.LinkageRule import de.fuberlin.wiwiss.silk.plugins.aggegrator.MinimumAggregator import de.fuberlin.wiwiss.silk.evaluation.ReferenceEntities import de.fuberlin.wiwiss.silk.plugins.aggegrator.MinimumAggregator import scala.Some import de.fuberlin.wiwiss.silk.plugins.distance.equality.EqualityMetric import de.fuberlin.wiwiss.silk.plugins.distance.equality.EqualityMetric object LinkSelectorTest extends App { val selector1: LinkSelector = JensenShannonDivergenceSelector(fulfilledOnly = true) val selector2: LinkSelector = JensenShannonDivergenceSelector(fulfilledOnly = false) val referenceLinks = ReferenceEntities.fromEntities( positiveEntities = entities("Frankenstein", "2000", "Frankenstein", "2000") :: Nil, negativeEntities = entities("Frankenstein", "2000", "Rambo", "1900") :: entities("Frankenstein", "2000", "Matrix", "2000") :: Nil ) val unlabeledLinks = Seq( link("Frankenstein", "2000", "Rambo", "1900"), link("Frankenstein", "2000", "Frankenstein", "1900"), link("Frankenstein", "2000", "Matrix", "2000"), link("Frankenstein", "2000", "Frankenstein", "2000") ) val rules = rule(true, true) :: rule(false, true) :: rule(false, true) :: rule(true, false) :: Nil println(selector1(rules, unlabeledLinks, referenceLinks)) println(selector2(rules, unlabeledLinks, referenceLinks)) def rule(matchLabel: Boolean, matchDate: Boolean) = { def labelComparison = Comparison( metric = EqualityMetric(), inputs = DPair(PathInput(path = Path.parse("?a/<label>")), PathInput(path = Path.parse("?b/<label>"))) ) def dateComparison = Comparison( metric = EqualityMetric(), inputs = DPair(PathInput(path = Path.parse("?a/<date>")), PathInput(path = Path.parse("?b/<date>"))) ) val operator = (matchLabel, matchDate) match { case (false, false) => None case (true, false) => Some(labelComparison) case (false, true) => Some(dateComparison) case (true, true) => { Some( Aggregation( aggregator = MinimumAggregator(), operators = Seq(labelComparison, dateComparison) ) ) } } new WeightedLinkageRule(operator, 0.0) } def link(label1: String, date1: String, label2: String, date2: String) = { new Link( source = label1 + date1, target = label2 + date2, entities = Some(entities(label1, date1, label2, date2)) ) } def entities(label1: String, date1: String, label2: String, date2: String) = { val sourceEntityDesc = EntityDescription("a", SparqlRestriction.empty, IndexedSeq(Path.parse("?a/<label>"), Path.parse("?a/<date>"))) val targetEntityDesc = EntityDescription("b", SparqlRestriction.empty, IndexedSeq(Path.parse("?b/<label>"), Path.parse("?b/<date>"))) DPair( source = new Entity(label1 + date1, IndexedSeq(Set(label1), Set(date1)), sourceEntityDesc), target = new Entity(label2 + date2, IndexedSeq(Set(label2), Set(date2)), targetEntityDesc) ) } }
fusepoolP3/p3-silk
silk-learning/src/test/scala/de/fuberlin/wiwiss/silk/learning/active/linkselector/LinkSelectorTest.scala
Scala
apache-2.0
3,924
/** * Copyright 2012-2013 StackMob * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.stackmob.scaliak.tests.util import com.basho.riak.client.cap.VClock import com.basho.riak.client.raw.RiakResponse import org.specs2._ import mock._ import com.basho.riak.client.{RiakLink, IRiakObject} import scala.collection.JavaConverters._ import com.basho.riak.client.query.indexes.{IntIndex, BinIndex} trait MockRiakUtils { this: Specification with Mockito => def mockRiakObj(bucket: String, key: String, value: Array[Byte], contentType: String, vClockStr: String, links: List[RiakLink] = List(), metadata: Map[String, String] = Map(), vTag: String = "", lastModified: java.util.Date = new java.util.Date(System.currentTimeMillis), binIndexes: Map[String, Set[String]] = Map(), intIndexes: Map[String, Set[Int]] = Map()): IRiakObject = { val mocked = mock[IRiakObject] val mockedVClock = mock[VClock] mockedVClock.asString returns vClockStr mockedVClock.getBytes returns vClockStr.getBytes mocked.getKey returns key mocked.getValue returns value mocked.getBucket returns bucket mocked.getVClock returns mockedVClock mocked.getContentType returns contentType mocked.getLinks returns links.asJava mocked.getVtag returns vTag mocked.getLastModified returns lastModified mocked.getMeta returns metadata.asJava mocked.allBinIndexes returns ((for { (k,v) <- binIndexes } yield (BinIndex.named(k), v.asJava)).toMap.asJava) mocked.allIntIndexesV2 returns ((for { (k,v) <- intIndexes } yield (IntIndex.named(k), v.map(new java.lang.Long(_)).asJava)).toMap.asJava) mocked } def mockRiakResponse(objects: Array[IRiakObject]) = { val mocked = mock[RiakResponse] mocked.getRiakObjects returns objects mocked.numberOfValues returns objects.length mocked } }
stackmob/scaliak
src/test/scala/com/stackmob/scaliak/tests/util/MockRiakUtils.scala
Scala
apache-2.0
2,536
package org.jetbrains.sbt package project.template import com.intellij.platform.ProjectTemplate /** * User: Dmitry.Naydanov, Pavel Fatin * Date: 11.03.14. */ class SbtProjectTemplate extends ProjectTemplate { override def getName = "SBT" override def getDescription = "SBT-based Scala project" override def getIcon = Sbt.Icon override def createModuleBuilder() = new SbtModuleBuilder() override def validateSettings() = null }
LPTK/intellij-scala
src/org/jetbrains/sbt/project/template/SbtProjectTemplate.scala
Scala
apache-2.0
446
/* * Copyright (c) 2014-2021 by The Monix Project Developers. * See the project homepage at: https://monix.io * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package monix.eval.internal import cats.effect.CancelToken import monix.catnap.CancelableF import monix.execution.Callback import monix.eval.Task import monix.execution.atomic.{Atomic, PaddingStrategy} private[eval] object TaskRaceList { /** * Implementation for `Task.raceList` */ def apply[A](tasks: Iterable[Task[A]]): Task[A] = Task.Async(new Register(tasks), trampolineBefore = true, trampolineAfter = true) // Implementing Async's "start" via `ForkedStart` in order to signal // that this is a task that forks on evaluation. // // N.B. the contract is that the injected callback gets called after // a full async boundary! private final class Register[A](tasks: Iterable[Task[A]]) extends ForkedRegister[A] { def apply(context: Task.Context, callback: Callback[Throwable, A]): Unit = { implicit val s = context.scheduler val conn = context.connection val isActive = Atomic.withPadding(true, PaddingStrategy.LeftRight128) val taskArray = tasks.toArray val cancelableArray = buildCancelableArray(taskArray.length) conn.pushConnections(cancelableArray.toIndexedSeq: _*) var index = 0 while (index < taskArray.length) { val task = taskArray(index) val taskCancelable = cancelableArray(index) val taskContext = context.withConnection(taskCancelable) index += 1 Task.unsafeStartEnsureAsync( task, taskContext, new Callback[Throwable, A] { private def popAndCancelRest(): CancelToken[Task] = { conn.pop() val arr2 = cancelableArray.collect { case cc if cc ne taskCancelable => cc.cancel } CancelableF.cancelAllTokens[Task](arr2.toIndexedSeq: _*) } def onSuccess(value: A): Unit = if (isActive.getAndSet(false)) { popAndCancelRest().map(_ => callback.onSuccess(value)).runAsyncAndForget } def onError(ex: Throwable): Unit = if (isActive.getAndSet(false)) { popAndCancelRest().map(_ => callback.onError(ex)).runAsyncAndForget } else { s.reportFailure(ex) } } ) } } } private def buildCancelableArray(n: Int): Array[TaskConnection] = { val array = new Array[TaskConnection](n) var i = 0 while (i < n) { array(i) = TaskConnection(); i += 1 } array } }
monixio/monix
monix-eval/shared/src/main/scala/monix/eval/internal/TaskRaceList.scala
Scala
apache-2.0
3,188
package cdmuhlb.dgview.color import math.{pow, rint} object SRgbUtils { def forwardTransfer(v: Double): Double = { if (v <= 0.0031308) 12.92*v else 1.055*pow(v, 1.0/2.4) - 0.055 } def forwardTransfer(v: Float): Float = { if (v <= 0.0031308f) 12.92f*v else 1.055f*pow(v, 1.0/2.4).toFloat - 0.055f } def reverseTransfer(v: Double): Double = { if (v <= 0.04045) v/12.92 else pow((v + 0.055)/1.055, 2.4) } def reverseTransfer(v: Float): Float = { if (v <= 0.04045f) v/12.92f else pow((v + 0.055f)/1.055f, 2.4).toFloat } def encode(v: Double): Int = { rint(255.0*v).toInt.min(255).max(0) } def encode(v: Float): Int = { rint(255.0f*v).toInt.min(255).max(0) } def decode(b: Int): Double = { require((b >= 0) && (b <= 255)) b / 255.0 } def decodeFloat(b: Int): Float = { require((b >= 0) && (b <= 255)) b / 255.0f } } object Bt709Utils { def forwardTransfer(c: Double): Double = { if (c <= 0.018) 4.5*c else 1.099*pow(c, 0.45) - 0.099 } def forwardTransfer(c: Float): Float = { if (c <= 0.018f) 4.5f*c else 1.099f*pow(c, 0.45).toFloat - 0.099f } def reverseTransfer(c: Double): Double = { if (c <= 0.081) c/4.5 else pow((c + 0.099)/1.099, 1.0/0.45) } def reverseTransfer(c: Float): Float = { if (c <= 0.081f) c/4.5f else pow((c + 0.099f)/1.099f, 1.0/0.45).toFloat } def eY(r: Double, g: Double, b: Double): Double = { 0.2126*r + 0.7152*g + 0.0722*b } def eY(r: Float, g: Float, b: Float): Float = { 0.2126f*r + 0.7152f*g + 0.0722f*b } def ePb(b: Double, eY: Double): Double = { 0.5*(b - eY) / 0.9278 } def ePb(b: Float, eY: Float): Float = { 0.5f*(b - eY) / 0.9278f } def ePr(r: Double, eY: Double): Double = { 0.5*(r - eY) / 0.7874 } def ePr(r: Float, eY: Float): Float = { 0.5f*(r - eY) / 0.7874f } def encodeY(eY: Double): Int = { rint(219.0*eY + 16.0).toInt.min(254).max(1) } def encodeY(eY: Float): Int = { rint(219.0f*eY + 16.0f).toInt.min(254).max(1) } def encodeC(eP: Double): Int = { rint(224.0*eP + 128.0).toInt.min(254).max(1) } def encodeC(eP: Float): Int = { rint(224.0f*eP + 128.0f).toInt.min(254).max(1) } def decodeY(d: Int): Double = { require((d >= 1) && (d <= 254)) (d - 16)/219.0 } } object LabUtils { // CIE XYZ normalization for 6504 K val Xn = 0.95047 val Yn = 1.0 val Zn = 1.0883 def pauliF(q: Double): Double = { if (q > 216.0/24389.0) pow(q, 1.0/3.0) else (841.0/108.0)*q + 4.0/29.0 } def pauliF(q: Float): Float = { if (q > (216.0/24389.0).asInstanceOf[Float]) pow(q, 1.0/3.0).toFloat else (841.0/108.0).asInstanceOf[Float]*q + (4.0/29.0).asInstanceOf[Float] } def pauliFInverse(t: Double): Double = { if (t > 6.0/29.0) t*t*t else (108.0/841.0)*(t - 4.0/29.0) } def pauliFInverse(t: Float): Float = { if (t > (6.0/29.0).asInstanceOf[Float]) t*t*t else (108.0/841.0).asInstanceOf[Float]*(t - (4.0/29.0).asInstanceOf[Float]) } } object ColorUtils { /** * Lightness is defined to range from 0 to 100. * Output ranges from 0 to 1. */ def lightnessToSRgbValue(labL: Double): Double = { require((labL >= 0.0) && (labL <= 100.0)) val t = (labL + 16.0)/116.0 val y = LabUtils.Yn*LabUtils.pauliFInverse(t) SRgbUtils.forwardTransfer(y) } }
cdmuhlb/DgView
src/main/scala/cdmuhlb/dgview/color/ColorUtils.scala
Scala
mit
3,398
package filodb.core.store import com.typesafe.scalalogging.slf4j.StrictLogging import java.nio.ByteBuffer import org.velvia.filo.RowReader import org.velvia.filo.RowReader.TypedFieldExtractor import scala.concurrent.{Await, ExecutionContext, Future} import scala.concurrent.duration._ import scala.language.existentials import spray.caching._ import filodb.core._ import filodb.core.metadata.{Column, Projection, RichProjection} sealed trait ScanMethod case class SinglePartitionScan(partition: Any) extends ScanMethod case class SinglePartitionRangeScan(keyRange: KeyRange[_, _]) extends ScanMethod case class FilteredPartitionScan(split: ScanSplit, filter: Any => Boolean = (a: Any) => true) extends ScanMethod case class FilteredPartitionRangeScan(split: ScanSplit, start: Any, end: Any, filter: Any => Boolean = (a: Any) => true) extends ScanMethod trait ScanSplit { // Should return a set of hostnames or IP addresses describing the preferred hosts for that scan split def hostnames: Set[String] } /** * High-level interface of a column store. Writes and reads segments, which are pretty high level. * Most implementations will probably want to be based on something like the CachedmergingColumnStore * below, which implements much of the business logic and gives lower level primitives. This trait * exists though to allow special implementations that want to use different lower level primitives. */ trait ColumnStore { import filodb.core.Types._ import RowReaderSegment._ def ec: ExecutionContext implicit val execContext = ec /** * Initializes the column store for a given dataset projection. Must be called once before appending * segments to that projection. */ def initializeProjection(projection: Projection): Future[Response] /** * Clears all data from the column store for that given projection, for all versions. * NOTE: please make sure there are no reprojections or writes going on before calling this */ def clearProjectionData(projection: Projection): Future[Response] /** * Appends the segment to the column store. The passed in segment must be somehow merged with an existing * segment to produce a new segment that has combined data such that rows with new unique primary keys * are appended and rows with existing primary keys will overwrite. Also, the sort order must somehow be * preserved such that the chunk/row# in the ChunkRowMap can be read out in sort key order. * @param segment the partial Segment to write / merge to the columnar store * @param version the version # to write the segment to * @return Success. Future.failure(exception) otherwise. */ def appendSegment(projection: RichProjection, segment: Segment, version: Int): Future[Response] /** * Scans segments from a dataset. ScanMethod determines what gets scanned. * * @param projection the Projection to read from * @param columns the set of columns to read back. Order determines the order of columns read back * in each row * @param version the version # to read from * @param method ScanMethod determining what to read from * @return An iterator over RowReaderSegment's */ def scanSegments(projection: RichProjection, columns: Seq[Column], version: Int, method: ScanMethod): Future[Iterator[Segment]] /** * Scans over segments, just like scanSegments, but returns an iterator of RowReader * for all of those row-oriented applications. Contains a high performance iterator * implementation, probably faster than trying to do it yourself. :) */ def scanRows(projection: RichProjection, columns: Seq[Column], version: Int, method: ScanMethod, readerFactory: RowReaderFactory = DefaultReaderFactory): Future[Iterator[RowReader]] = { for { segmentIt <- scanSegments(projection, columns, version, method) } yield { if (segmentIt.hasNext) { // TODO: fork this kind of code into a macro, called fastFlatMap. // That's what we really need... :-p new Iterator[RowReader] { final def getNextRowIt: Iterator[RowReader] = { val readerSeg = segmentIt.next.asInstanceOf[RowReaderSegment] readerSeg.rowIterator(readerFactory) } var rowIt: Iterator[RowReader] = getNextRowIt final def hasNext: Boolean = { var _hasNext = rowIt.hasNext while (!_hasNext) { if (segmentIt.hasNext) { rowIt = getNextRowIt _hasNext = rowIt.hasNext } else { // all done. No more segments. return false } } _hasNext } final def next: RowReader = rowIt.next.asInstanceOf[RowReader] } } else { Iterator.empty } } } /** * Determines how to split the scanning of a dataset across a columnstore. * @param dataset the name of the dataset to determine splits for * @param splitsPerNode the number of splits to target per node. May not actually be possible. * @return a Seq[ScanSplit] */ def getScanSplits(dataset: TableName, splitsPerNode: Int = 1): Seq[ScanSplit] /** * Shuts down the ColumnStore, including any threads that might be hanging around */ def shutdown(): Unit } case class ChunkedData(column: Types.ColumnId, chunks: Seq[(Types.SegmentId, Types.ChunkID, ByteBuffer)]) /** * A partial implementation of a ColumnStore, based on separating storage of chunks and ChunkRowMaps, * use of a segment cache to speed up merging, and a ChunkMergingStrategy to merge segments. It defines * lower level primitives and implements the ColumnStore methods in terms of these primitives. */ trait CachedMergingColumnStore extends ColumnStore with ColumnStoreScanner with StrictLogging { import filodb.core.Types._ import filodb.core.Iterators._ def segmentCache: Cache[Segment] def mergingStrategy: ChunkMergingStrategy // This ExecutionContext is the default used for writing, it should have bounds set implicit val ec: ExecutionContext def clearProjectionData(projection: Projection): Future[Response] = { // Clear out any entries from segmentCache first logger.info(s"Clearing out segment cache for dataset ${projection.dataset}") segmentCache.keys.foreach { case key @ (dataset, _, _, _) => if (dataset == projection.dataset) segmentCache.remove(key) } logger.info(s"Clearing all columnar projection data for dataset ${projection.dataset}") clearProjectionDataInner(projection) } def clearProjectionDataInner(projection: Projection): Future[Response] /** * == Lower level storage engine primitives == */ /** * Writes chunks to underlying storage. * @param chunks an Iterator over triples of (columnName, chunkId, chunk bytes) * @return Success. Future.failure(exception) otherwise. */ def writeChunks(dataset: TableName, partition: BinaryPartition, version: Int, segmentId: SegmentId, chunks: Iterator[(ColumnId, ChunkID, ByteBuffer)]): Future[Response] def writeChunkRowMap(dataset: TableName, partition: BinaryPartition, version: Int, segmentId: SegmentId, chunkRowMap: ChunkRowMap): Future[Response] /** * == Caching and merging implementation of the high level functions == */ def clearSegmentCache(): Unit = { segmentCache.clear() } def appendSegment(projection: RichProjection, segment: Segment, version: Int): Future[Response] = { if (segment.isEmpty) return(Future.successful(NotApplied)) for { oldSegment <- getSegFromCache(projection.toRowKeyOnlyProjection, segment, version) mergedSegment = mergingStrategy.mergeSegments(oldSegment, segment) writeChunksResp <- writeBatchedChunks(projection.datasetName, version, mergedSegment) writeCRMapResp <- writeChunkRowMap(projection.datasetName, segment.binaryPartition, version, segment.segmentId, mergedSegment.index) if writeChunksResp == Success } yield { // Important! Update the cache with the new merged segment. updateCache(projection, version, mergedSegment) writeCRMapResp } } def chunkBatchSize: Int private def writeBatchedChunks(dataset: TableName, version: Int, segment: Segment): Future[Response] = { val binPartition = segment.binaryPartition Future.traverse(segment.getChunks.grouped(chunkBatchSize).toSeq) { chunks => writeChunks(dataset, binPartition, version, segment.segmentId, chunks.toIterator) }.map { responses => responses.head } } private def getSegFromCache(projection: RichProjection, segment: Segment, version: Int): Future[Segment] = { segmentCache((projection.datasetName, segment.binaryPartition, version, segment.segmentId)) { val newSegInfo = segment.segInfo.basedOn(projection) mergingStrategy.readSegmentForCache(projection, version)(newSegInfo)(readEc) } } private def updateCache(projection: RichProjection, version: Int, newSegment: Segment): Unit = { // NOTE: Spray caching doesn't have an update() method, so we have to delete and then repopulate. :( // TODO: consider if we need to lock the code below. Probably not since ColumnStore is single-writer but // we might want to update the spray-caching API to have an update method. val key = (projection.datasetName, newSegment.binaryPartition, version, newSegment.segmentId) segmentCache.remove(key) segmentCache(key)(mergingStrategy.pruneForCache(newSegment)) } }
adeandrade/FiloDB
core/src/main/scala/filodb.core/store/ColumnStore.scala
Scala
apache-2.0
10,144
package com.twitter.finatra.http import com.twitter.finagle.http.{Method => HttpMethod} import com.twitter.finatra.http.internal.marshalling.CallbackConverter import com.twitter.finatra.http.internal.routing.Route import com.twitter.finatra.http.routing.AdminIndexInfo import com.twitter.inject.Injector private[http] class RouteBuilder[RequestType: Manifest, ResponseType: Manifest]( method: HttpMethod, route: String, name: String, admin: Boolean, adminIndexInfo: Option[AdminIndexInfo], callback: RequestType => ResponseType, routeDsl: RouteDSL) { def build(callbackConverter: CallbackConverter, injector: Injector) = Route( name, method, route, admin, adminIndexInfo, callbackConverter.convertToFutureResponse(callback), routeDsl.annotations, manifest[RequestType].runtimeClass, manifest[ResponseType].runtimeClass, routeDsl.buildFilter(injector)) }
syamantm/finatra
http/src/main/scala/com/twitter/finatra/http/RouteBuilder.scala
Scala
apache-2.0
913
package io.github.finagle.smtp.filter import com.twitter.finagle.{Filter, Service} import com.twitter.util.Future import io.github.finagle.smtp._ import io.github.finagle.smtp.extension.{BodyEncoding, ExtendedMailingSession} /** * Sends [[io.github.finagle.smtp.EmailMessage]], transforming it to a sequence of SMTP commands. */ object MailFilter extends Filter[EmailMessage, Unit, Request, Reply]{ override def apply(msg: EmailMessage, send: Service[Request, Reply]): Future[Unit] = { val body = msg.body val bodyEnc = body.contentTransferEncoding match { case "8bit" => BodyEncoding.EightBit case "binary" => BodyEncoding.Binary case _ => BodyEncoding.SevenBit } val envelope: Seq[Request] = Seq(ExtendedMailingSession(msg.sender) .messageSize(body.size) .bodyEncoding(bodyEnc)) ++ msg.to.map(Request.AddRecipient(_)) ++ msg.cc.map(Request.AddRecipient(_)) ++ msg.bcc.map(Request.AddRecipient(_)) val data: Seq[Request] = body match { case mimepart: MimePart => Seq(Request.MimeData(mimepart)) case multipart: MimeMultipart => Seq(Request.TextData(multipart.getMimeHeaders)) ++ { for (part <- multipart.parts) yield Seq ( Request.TextData(Seq(multipart.delimiter)), Request.MimeData(part) ) }.flatten ++ Seq(Request.TextData(Seq(multipart.closingDelimiter))) } val reqs: Seq[Request] = Seq(Request.Reset) ++ envelope ++ Seq(Request.BeginData) ++ data val freqs = for (req <- reqs) yield send(req) Future.join(freqs) } }
finagle/finagle-smtp
src/main/scala/io/github/finagle/smtp/filter/MailFilter.scala
Scala
apache-2.0
1,659
package examples.formvalidation import java.net.URL import com.twitter.finagle.Http import com.twitter.finagle.http.filter.Cors import com.twitter.finagle.http.filter.Cors.HttpFilter import com.twitter.finagle.http.path.Root import com.twitter.util.Await import io.fintrospect.RouteModule import io.fintrospect.formats.Html import io.fintrospect.renderers.SiteMapModuleRenderer import io.fintrospect.templating.{MustacheTemplates, RenderView} /** * This example shows how to use Body.webform() and a templating engine to construct a validating form, with custom messages * for each field. */ object FormValidationApp extends App { val renderView = new RenderView(Html.ResponseBuilder, MustacheTemplates.HotReload("src/main/resources")) val module = RouteModule(Root, new SiteMapModuleRenderer(new URL("http://my.cool.app")), renderView) .withDescriptionPath(_ / "sitemap.xml") .withRoutes(new ReportAge(new GreetingDatabase)) println("See the validating form at: http://localhost:8181") Await.ready( Http.serve(":8181", new HttpFilter(Cors.UnsafePermissivePolicy).andThen(module.toService)) ) }
daviddenton/fintrospect
src/main/scala/examples/formvalidation/FormValidationApp.scala
Scala
apache-2.0
1,131
package org.http4s.server.middleware.authentication import java.util.LinkedHashMap import scala.annotation.tailrec private[authentication] object NonceKeeper { sealed abstract class Reply case object StaleReply extends Reply case object OKReply extends Reply case object BadNCReply extends Reply } /** * A thread-safe class used to manage a database of nonces. * * @param staleTimeout Amount of time (in milliseconds) after which a nonce * is considered stale (i.e. not used for authentication * purposes anymore). * @param bits The number of random bits a nonce should consist of. */ private[authentication] class NonceKeeper( staleTimeout: Long, nonceCleanupInterval: Long, bits: Int) { require(bits > 0, "Please supply a positive integer for bits.") private val nonces = new LinkedHashMap[String, Nonce] private var lastCleanup = System.currentTimeMillis() /** * Removes nonces that are older than staleTimeout * Note: this _MUST_ be executed inside a block synchronized on `nonces` */ private def checkStale() = { val d = System.currentTimeMillis() if (d - lastCleanup > nonceCleanupInterval) { lastCleanup = d // Because we are using an LinkedHashMap, the keys will be returned in the order they were // inserted. Therefor, once we reach a non-stale value, the remaining values are also not stale. val it = nonces.values().iterator() @tailrec def dropStale(): Unit = if (it.hasNext && staleTimeout > d - it.next().created.getTime) { it.remove() dropStale() } dropStale() } } /** * Get a fresh nonce in form of a {@link String}. * @return A fresh nonce. */ def newNonce(): String = { var n: Nonce = null nonces.synchronized { checkStale() do { n = Nonce.gen(bits) } while (nonces.get(n.data) != null) nonces.put(n.data, n) } n.data } /** * Checks if the nonce {@link data} is known and the {@link nc} value is * correct. If this is so, the nc value associated with the nonce is increased * and the appropriate status is returned. * @param data The nonce. * @param nc The nonce counter. * @return A reply indicating the status of (data, nc). */ def receiveNonce(data: String, nc: Int): NonceKeeper.Reply = nonces.synchronized { checkStale() nonces.get(data) match { case null => NonceKeeper.StaleReply case n: Nonce => { if (nc > n.nc) { n.nc = n.nc + 1 NonceKeeper.OKReply } else NonceKeeper.BadNCReply } } } }
reactormonk/http4s
server/src/main/scala/org/http4s/server/middleware/authentication/NonceKeeper.scala
Scala
apache-2.0
2,719
package org.jetbrains.plugins.scala package testingSupport import java.util.concurrent.atomic.AtomicReference import com.intellij.execution.configurations.RunnerSettings import com.intellij.execution.executors.DefaultRunExecutor import com.intellij.execution.impl.DefaultJavaProgramRunner import com.intellij.execution.process.{ProcessAdapter, ProcessEvent, ProcessHandler, ProcessListener} import com.intellij.execution.runners.{ExecutionEnvironmentBuilder, ProgramRunner} import com.intellij.execution.testframework.AbstractTestProxy import com.intellij.execution.testframework.sm.runner.ui.SMTRunnerConsoleView import com.intellij.execution.ui.RunContentDescriptor import com.intellij.execution.{Executor, PsiLocation, RunnerAndConfigurationSettings} import com.intellij.ide.structureView.newStructureView.StructureViewComponent import com.intellij.ide.util.treeView.smartTree.{NodeProvider, TreeElement, TreeElementWrapper} import com.intellij.openapi.Disposable import com.intellij.openapi.fileEditor.FileDocumentManager import com.intellij.openapi.util.Key import com.intellij.psi.impl.file.PsiDirectoryFactory import com.intellij.psi.{PsiElement, PsiManager} import com.intellij.testFramework.UsefulTestCase import com.intellij.util.concurrency.Semaphore import org.jetbrains.plugins.scala.debugger.{ScalaDebuggerTestBase, ScalaVersion_2_11} import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile import org.jetbrains.plugins.scala.lang.psi.impl.{ScalaPsiManager, ScPackageImpl} import org.jetbrains.plugins.scala.lang.structureView.ScalaStructureViewModel import org.jetbrains.plugins.scala.lang.structureView.elements.impl.TestStructureViewElement import org.jetbrains.plugins.scala.testingSupport.test.structureView.TestNodeProvider import org.jetbrains.plugins.scala.testingSupport.test.{AbstractTestConfigurationProducer, AbstractTestRunConfiguration} /** * @author Roman.Shein * Date: 03.03.14 */ abstract class ScalaTestingTestCase(private val configurationProducer: AbstractTestConfigurationProducer) extends ScalaDebuggerTestBase with IntegrationTest with ScalaVersion_2_11 { override val testDataBasePrefix = "testingSupport" protected val useDynamicClassPath = false override protected def runFileStructureViewTest(testClassName: String, status: Int, tests: String*) = { val structureViewRoot = buildFileStructure(testClassName + ".scala") for (test <- tests) { assert(checkTestNodeInFileStructure(structureViewRoot, test, None, status)) } } override protected def runFileStructureViewTest(testClassName: String, testName: String, parentTestName: Option[String], testStatus: Int = TestStructureViewElement.normalStatusId) = { val structureViewRoot = buildFileStructure(testClassName + ".scala") assert(checkTestNodeInFileStructure(structureViewRoot, testName, parentTestName, testStatus)) } override protected def buildFileStructure(fileName: String): TreeElementWrapper = { val ioFile = new java.io.File(srcDir, fileName) val file = PsiManager.getInstance(getProject).findFile(getVirtualFile(ioFile)) val treeViewModel = new ScalaStructureViewModel(file.asInstanceOf[ScalaFile]) { override def isEnabled(provider: NodeProvider[_ <: TreeElement]): Boolean = provider.isInstanceOf[TestNodeProvider] } val wrapper = new StructureViewComponent.StructureViewTreeElementWrapper(getProject, treeViewModel.getRoot, treeViewModel) def initTree(wrapper: StructureViewComponent.StructureViewTreeElementWrapper) { import scala.collection.JavaConversions._ wrapper.initChildren() wrapper.getChildren.toList.foreach(node => initTree(node.asInstanceOf[StructureViewComponent.StructureViewTreeElementWrapper])) } initTree(wrapper) wrapper } override protected def createLocation(lineNumber: Int, offset: Int, fileName: String): PsiLocation[PsiElement] = { val ioFile = new java.io.File(srcDir, fileName) val file = getVirtualFile(ioFile) val project = getProject val myManager = PsiManager.getInstance(project) val psiFile = myManager.findViewProvider(file).getPsi(ScalaFileType.SCALA_LANGUAGE) val psiElement = psiFile.findElementAt(FileDocumentManager.getInstance().getDocument(file). getLineStartOffset(lineNumber) + offset) new PsiLocation(project, myModule, psiElement) } private def failedConfigMessage(fileName: String, lineNumber: Int, offset: Int) = "Failed to create run configuration for test from file " + fileName + " from line " + lineNumber + " at offset " + offset private def failedConfigMessage(packageName: String) = "Failed to create run configuration for test from package " + packageName override protected def createTestFromLocation(lineNumber: Int, offset: Int, fileName: String): RunnerAndConfigurationSettings = configurationProducer.createConfigurationByLocation(createLocation(lineNumber, offset, fileName)).map(_._2) match { case Some(testConfig) => testConfig case _ => throw new RuntimeException(failedConfigMessage(fileName, lineNumber, offset)) } override protected def createTestFromPackage(packageName: String): RunnerAndConfigurationSettings = configurationProducer.createConfigurationByLocation( new PsiLocation(getProject, PsiDirectoryFactory.getInstance(getProject).createDirectory(getProject.getBaseDir.findChild("src").findChild(packageName))) ).map(_._2) match { case Some(testConfig) => testConfig case _ => throw new RuntimeException(failedConfigMessage(packageName)) } override protected def runTestFromConfig(configurationCheck: RunnerAndConfigurationSettings => Boolean, runConfig: RunnerAndConfigurationSettings, checkOutputs: Boolean = false, duration: Int = 3000, debug: Boolean = false ): (String, Option[AbstractTestProxy]) = { assert(configurationCheck(runConfig)) assert(runConfig.getConfiguration.isInstanceOf[AbstractTestRunConfiguration]) runConfig.getConfiguration.asInstanceOf[AbstractTestRunConfiguration].setupIntegrationTestClassPath() val testResultListener = new TestResultListener(runConfig.getName) var testTreeRoot: Option[AbstractTestProxy] = None UsefulTestCase.edt(new Runnable { def run() { if (needMake) { make() saveChecksums() } val runner = ProgramRunner.PROGRAM_RUNNER_EP.getExtensions.find { _.getClass == classOf[DefaultJavaProgramRunner] }.get val (handler, runContentDescriptor) = runProcess(runConfig, classOf[DefaultRunExecutor], new ProcessAdapter { override def onTextAvailable(event: ProcessEvent, outputType: Key[_]) { val text = event.getText if (debug) print(text) } }, runner) runContentDescriptor.getExecutionConsole match { case descriptor: SMTRunnerConsoleView => testTreeRoot = Some(descriptor.getResultsViewer.getRoot) case _ => } handler.addProcessListener(testResultListener) } }) val res = testResultListener.waitForTestEnd(duration) (res, testTreeRoot) } private def runProcess(runConfiguration: RunnerAndConfigurationSettings, executorClass: Class[_ <: Executor], listener: ProcessListener, runner: ProgramRunner[_ <: RunnerSettings]): (ProcessHandler, RunContentDescriptor) = { val configuration = runConfiguration.getConfiguration val executor: Executor = Executor.EXECUTOR_EXTENSION_NAME.findExtension(executorClass) val executionEnvironmentBuilder: ExecutionEnvironmentBuilder = new ExecutionEnvironmentBuilder(configuration.getProject, executor) executionEnvironmentBuilder.runProfile(configuration) val semaphore: Semaphore = new Semaphore semaphore.down() val processHandler: AtomicReference[ProcessHandler] = new AtomicReference[ProcessHandler] val contentDescriptor: AtomicReference[RunContentDescriptor] = new AtomicReference[RunContentDescriptor] runner.execute(executionEnvironmentBuilder.build, new ProgramRunner.Callback { def processStarted(descriptor: RunContentDescriptor) { System.setProperty("idea.dynamic.classpath", useDynamicClassPath.toString) disposeOnTearDown(new Disposable { def dispose() { descriptor.dispose() } }) val handler: ProcessHandler = descriptor.getProcessHandler assert(handler != null) handler.addProcessListener(listener) processHandler.set(handler) contentDescriptor.set(descriptor) semaphore.up() } }) semaphore.waitFor() (processHandler.get, contentDescriptor.get) } }
advancedxy/intellij-scala
test/org/jetbrains/plugins/scala/testingSupport/ScalaTestingTestCase.scala
Scala
apache-2.0
9,003
package com.fitbit.model.activities sealed trait TimeSeriesType { def name: String = getClass.getSimpleName.toLowerCase() def endpoint: String override def toString = endpoint } case object Calories extends TimeSeriesType { val endpoint = "/activities/calories" } case object Distance extends TimeSeriesType { val endpoint = "/activities/distance" } case object Elevation extends TimeSeriesType { val endpoint = "/activities/elevation" } case object Floors extends TimeSeriesType { val endpoint = "/activities/floors" } case object Steps extends TimeSeriesType { val endpoint = "/activities/steps" } case object Heart extends TimeSeriesType { val endpoint = "/activities/heart" } object TimeSeriesType { def typeForName(name: String):TimeSeriesType = { name match { case "calories" => Calories case "distance" => Distance case "elevation" => Elevation case "floors" => Floors case "steps" => Steps case "heart" => Heart } } }
richardE353/fitbit_rest
src/main/scala/com/fitbit/model/activities/TimeSeriesTypes.scala
Scala
apache-2.0
1,000
package play.modules.reactivemongo import scala.util.{ Failure, Success } import scala.util.control.NonFatal import scala.reflect.ClassTag import play.api.data.FormError import play.api.data.format.Formatter import reactivemongo.api.bson._ import reactivemongo.play.json.compat.ValueConverters /** Instances of [[https://www.playframework.com/documentation/latest/api/scala/index.html#play.api.data.format.Formatter Play Formatter]] for the ReactiveMongo types. */ object Formatters { self => import play.api.libs.json.Json type Result[T] = Either[Seq[FormError], T] private def bind[T]( key: String, data: Map[String, String] )(f: String => Result[T] ): Result[T] = data .get(key) .fold[Result[T]](Left(Seq(FormError(key, "error.required", Nil))))(f) /** Formats BSON value as JSON. */ implicit def bsonFormatter[T <: BSONValue]( implicit cls: ClassTag[T] ): Formatter[T] = new Formatter[T] { def bind(key: String, data: Map[String, String]): Result[T] = self.bind[T](key, data) { str => try { ValueConverters.toValue(Json parse str) match { case `cls`(v) => Right(v) case unexpected => Left( Seq(FormError(key, s"Unexpected BSONValue: $unexpected", Nil)) ) } } catch { case NonFatal(cause) => Left( Seq( FormError( key, s"fails to parse the JSON representation: $cause", Nil ) ) ) } } def unbind(key: String, value: T): Map[String, String] = Map(key -> Json.stringify(ValueConverters.fromValue(value))) } implicit object NumberLikeFormatter extends Formatter[BSONNumberLike] { def bind(key: String, data: Map[String, String]): Result[BSONNumberLike] = self.bind[BSONNumberLike](key, data) { str => try { ValueConverters.toValue(Json parse str) match { case n: BSONNumberLike => Right(n) case _ => Left(Seq(FormError(key, "error.jsnumber.expected", str))) } } catch { case NonFatal(_) => Left(Seq(FormError(key, "error.jsnumber.expected", str))) } } @SuppressWarnings(Array("BigDecimalDoubleConstructor")) def unbind(key: String, value: BSONNumberLike): Map[String, String] = value.toDouble match { case Success(d) => { val n = BigDecimal(d) val json = { if (!n.ulp.isWhole) Json.toJson(d) else if (n.isValidInt) Json.toJson(d.toInt) else Json.toJson(d.toLong) } Map(key -> Json.stringify(json)) } case _ => value.toLong match { case Success(l) => { val json = { if (l.isValidInt) Json.toJson(l.toInt) else Json.toJson(l) } Map(key -> Json.stringify(json)) } case Failure(cause) => throw cause } } } implicit object BooleanLikeFormatter extends Formatter[BSONBooleanLike] { def bind(key: String, data: Map[String, String]): Result[BSONBooleanLike] = self.bind[BSONBooleanLike](key, data) { str => try { ValueConverters.toValue(Json parse str) match { case b: BSONBooleanLike => Right(b) case _ => Left(Seq(FormError(key, "error.jsboolean.expected", str))) } } catch { case NonFatal(_) => Left(Seq(FormError(key, "error.jsboolean.expected", str))) } } def unbind(key: String, value: BSONBooleanLike): Map[String, String] = value.toBoolean match { case Success(b) => Map(key -> Json.stringify(Json toJson b)) case Failure(cause) => throw cause } } }
ReactiveMongo/Play-ReactiveMongo
src/main/scala/play/modules/reactivemongo/Formatters.scala
Scala
apache-2.0
4,023
/** * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package kafka.api import java.io.{DataInputStream, DataOutputStream} import java.net.Socket import java.nio.ByteBuffer import java.util.concurrent.ExecutionException import java.util.{ArrayList, Collections, Properties} import kafka.cluster.EndPoint import kafka.common.{ErrorMapping, TopicAndPartition} import kafka.coordinator.GroupCoordinator import kafka.integration.KafkaServerTestHarness import kafka.security.auth._ import kafka.server.KafkaConfig import kafka.utils.TestUtils import org.apache.kafka.clients.consumer.{OffsetAndMetadata, Consumer, ConsumerRecord, KafkaConsumer} import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} import org.apache.kafka.common.errors._ import org.apache.kafka.common.protocol.{ApiKeys, Errors, SecurityProtocol} import org.apache.kafka.common.requests._ import org.apache.kafka.common.security.auth.KafkaPrincipal import org.apache.kafka.common.{TopicPartition, requests} import org.junit.Assert._ import org.junit.{After, Assert, Before, Test} import scala.collection.JavaConverters._ import scala.collection.mutable import scala.collection.mutable.Buffer class AuthorizerIntegrationTest extends KafkaServerTestHarness { val topic = "topic" val part = 0 val brokerId: Integer = 0 val correlationId = 0 val clientId = "client-Id" val tp = new TopicPartition(topic, part) val topicAndPartition = new TopicAndPartition(topic, part) val group = "my-group" val topicResource = new Resource(Topic, topic) val groupResource = new Resource(Group, group) val GroupReadAcl = Map(groupResource -> Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Read))) val ClusterAcl = Map(Resource.ClusterResource -> Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, ClusterAction))) val TopicReadAcl = Map(topicResource -> Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Read))) val TopicWriteAcl = Map(topicResource -> Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Write))) val TopicDescribeAcl = Map(topicResource -> Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Describe))) val consumers = Buffer[KafkaConsumer[Array[Byte], Array[Byte]]]() val producers = Buffer[KafkaProducer[Array[Byte], Array[Byte]]]() val numServers = 1 val producerCount = 1 val consumerCount = 2 val producerConfig = new Properties val numRecords = 1 val overridingProps = new Properties() overridingProps.put(KafkaConfig.AuthorizerClassNameProp, classOf[SimpleAclAuthorizer].getName) overridingProps.put(KafkaConfig.BrokerIdProp, brokerId.toString) overridingProps.put(KafkaConfig.OffsetsTopicPartitionsProp, "1") val endPoint = new EndPoint("localhost", 0, SecurityProtocol.PLAINTEXT) val RequestKeyToResponseDeserializer: Map[Short, Class[_ <: Any]] = Map(ApiKeys.METADATA.id -> classOf[requests.MetadataResponse], ApiKeys.PRODUCE.id -> classOf[requests.ProduceResponse], ApiKeys.FETCH.id -> classOf[requests.FetchResponse], ApiKeys.LIST_OFFSETS.id -> classOf[requests.ListOffsetResponse], ApiKeys.OFFSET_COMMIT.id -> classOf[requests.OffsetCommitResponse], ApiKeys.OFFSET_FETCH.id -> classOf[requests.OffsetFetchResponse], ApiKeys.GROUP_COORDINATOR.id -> classOf[requests.GroupCoordinatorResponse], ApiKeys.UPDATE_METADATA_KEY.id -> classOf[requests.UpdateMetadataResponse], ApiKeys.JOIN_GROUP.id -> classOf[JoinGroupResponse], ApiKeys.SYNC_GROUP.id -> classOf[SyncGroupResponse], ApiKeys.HEARTBEAT.id -> classOf[HeartbeatResponse], ApiKeys.LEAVE_GROUP.id -> classOf[LeaveGroupResponse], ApiKeys.LEADER_AND_ISR.id -> classOf[requests.LeaderAndIsrResponse], ApiKeys.STOP_REPLICA.id -> classOf[requests.StopReplicaResponse], ApiKeys.CONTROLLED_SHUTDOWN_KEY.id -> classOf[requests.ControlledShutdownResponse] ) val RequestKeyToErrorCode = Map[Short, (Nothing) => Short]( ApiKeys.METADATA.id -> ((resp: requests.MetadataResponse) => resp.errors().asScala.find(_._1 == topic).getOrElse(("test", Errors.NONE))._2.code()), ApiKeys.PRODUCE.id -> ((resp: requests.ProduceResponse) => resp.responses().asScala.find(_._1 == tp).get._2.errorCode), ApiKeys.FETCH.id -> ((resp: requests.FetchResponse) => resp.responseData().asScala.find(_._1 == tp).get._2.errorCode), ApiKeys.LIST_OFFSETS.id -> ((resp: requests.ListOffsetResponse) => resp.responseData().asScala.find(_._1 == tp).get._2.errorCode), ApiKeys.OFFSET_COMMIT.id -> ((resp: requests.OffsetCommitResponse) => resp.responseData().asScala.find(_._1 == tp).get._2), ApiKeys.OFFSET_FETCH.id -> ((resp: requests.OffsetFetchResponse) => resp.responseData().asScala.find(_._1 == tp).get._2.errorCode), ApiKeys.GROUP_COORDINATOR.id -> ((resp: requests.GroupCoordinatorResponse) => resp.errorCode()), ApiKeys.UPDATE_METADATA_KEY.id -> ((resp: requests.UpdateMetadataResponse) => resp.errorCode()), ApiKeys.JOIN_GROUP.id -> ((resp: JoinGroupResponse) => resp.errorCode()), ApiKeys.SYNC_GROUP.id -> ((resp: SyncGroupResponse) => resp.errorCode()), ApiKeys.HEARTBEAT.id -> ((resp: HeartbeatResponse) => resp.errorCode()), ApiKeys.LEAVE_GROUP.id -> ((resp: LeaveGroupResponse) => resp.errorCode()), ApiKeys.LEADER_AND_ISR.id -> ((resp: requests.LeaderAndIsrResponse) => resp.responses().asScala.find(_._1 == tp).get._2), ApiKeys.STOP_REPLICA.id -> ((resp: requests.StopReplicaResponse) => resp.responses().asScala.find(_._1 == tp).get._2), ApiKeys.CONTROLLED_SHUTDOWN_KEY.id -> ((resp: requests.ControlledShutdownResponse) => resp.errorCode()) ) val RequestKeysToAcls = Map[Short, Map[Resource, Set[Acl]]]( ApiKeys.METADATA.id -> TopicDescribeAcl, ApiKeys.PRODUCE.id -> TopicWriteAcl, ApiKeys.FETCH.id -> TopicReadAcl, ApiKeys.LIST_OFFSETS.id -> TopicDescribeAcl, ApiKeys.OFFSET_COMMIT.id -> (TopicReadAcl ++ GroupReadAcl), ApiKeys.OFFSET_FETCH.id -> (TopicReadAcl ++ GroupReadAcl), ApiKeys.GROUP_COORDINATOR.id -> (TopicReadAcl ++ GroupReadAcl), ApiKeys.UPDATE_METADATA_KEY.id -> ClusterAcl, ApiKeys.JOIN_GROUP.id -> GroupReadAcl, ApiKeys.SYNC_GROUP.id -> GroupReadAcl, ApiKeys.HEARTBEAT.id -> GroupReadAcl, ApiKeys.LEAVE_GROUP.id -> GroupReadAcl, ApiKeys.LEADER_AND_ISR.id -> ClusterAcl, ApiKeys.STOP_REPLICA.id -> ClusterAcl, ApiKeys.CONTROLLED_SHUTDOWN_KEY.id -> ClusterAcl ) // configure the servers and clients override def generateConfigs() = TestUtils.createBrokerConfigs(1, zkConnect, enableControlledShutdown = false).map(KafkaConfig.fromProps(_, overridingProps)) @Before override def setUp() { super.setUp() addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, ClusterAction)), Resource.ClusterResource) for (i <- 0 until producerCount) producers += TestUtils.createNewProducer(TestUtils.getBrokerListStrFromServers(servers), acks = 1) for (i <- 0 until consumerCount) consumers += TestUtils.createNewConsumer(TestUtils.getBrokerListStrFromServers(servers), groupId = group, securityProtocol = SecurityProtocol.PLAINTEXT) // create the consumer offset topic TestUtils.createTopic(zkUtils, GroupCoordinator.GroupMetadataTopicName, 1, 1, servers, servers.head.consumerCoordinator.offsetsTopicConfigs) // create the test topic with all the brokers as replicas TestUtils.createTopic(zkUtils, topic, 1, 1, this.servers) } @After override def tearDown() = { removeAllAcls super.tearDown() } private def createMetadataRequest = { new requests.MetadataRequest(List(topic).asJava) } private def createProduceRequest = { new requests.ProduceRequest(1, 5000, collection.mutable.Map(tp -> ByteBuffer.wrap("test".getBytes)).asJava) } private def createFetchRequest = { new requests.FetchRequest(5000, 100, Map(tp -> new requests.FetchRequest.PartitionData(0, 100)).asJava) } private def createListOffsetsRequest = { new requests.ListOffsetRequest(Map(tp -> new ListOffsetRequest.PartitionData(0, 100)).asJava) } private def createOffsetFetchRequest = { new requests.OffsetFetchRequest(group, List(tp).asJava) } private def createGroupCoordinatorRequest = { new requests.GroupCoordinatorRequest(group) } private def createUpdateMetadataRequest = { val partitionState = Map(tp -> new requests.UpdateMetadataRequest.PartitionState(Int.MaxValue, brokerId, Int.MaxValue, List(brokerId).asJava, 2, Set(brokerId).asJava)).asJava val brokers = Set(new requests.UpdateMetadataRequest.Broker(brokerId, Map(SecurityProtocol.PLAINTEXT -> new requests.UpdateMetadataRequest.EndPoint("localhost", 0)).asJava)).asJava new requests.UpdateMetadataRequest(brokerId, Int.MaxValue, partitionState, brokers) } private def createJoinGroupRequest = { new JoinGroupRequest(group, 30000, "", "consumer", List( new JoinGroupRequest.ProtocolMetadata("consumer-range",ByteBuffer.wrap("test".getBytes()))).asJava) } private def createSyncGroupRequest = { new SyncGroupRequest(group, 1, "", Map[String, ByteBuffer]().asJava) } private def createOffsetCommitRequest = { new requests.OffsetCommitRequest(group, 1, "", 1000, Map(tp -> new requests.OffsetCommitRequest.PartitionData(0, "metadata")).asJava) } private def createHeartbeatRequest = { new HeartbeatRequest(group, 1, "") } private def createLeaveGroupRequest = { new LeaveGroupRequest(group, "") } private def createLeaderAndIsrRequest = { new requests.LeaderAndIsrRequest(brokerId, Int.MaxValue, Map(tp -> new requests.LeaderAndIsrRequest.PartitionState(Int.MaxValue, brokerId, Int.MaxValue, List(brokerId).asJava, 2, Set(brokerId).asJava)).asJava, Set(new requests.LeaderAndIsrRequest.EndPoint(brokerId,"localhost", 0)).asJava) } private def createStopReplicaRequest = { new requests.StopReplicaRequest(brokerId, Int.MaxValue, true, Set(tp).asJava) } private def createControlledShutdownRequest = { new requests.ControlledShutdownRequest(brokerId) } @Test def testAuthorization() { val requestKeyToRequest = mutable.LinkedHashMap[Short, AbstractRequest]( ApiKeys.METADATA.id -> createMetadataRequest, ApiKeys.PRODUCE.id -> createProduceRequest, ApiKeys.FETCH.id -> createFetchRequest, ApiKeys.LIST_OFFSETS.id -> createListOffsetsRequest, ApiKeys.OFFSET_FETCH.id -> createOffsetFetchRequest, ApiKeys.GROUP_COORDINATOR.id -> createGroupCoordinatorRequest, ApiKeys.UPDATE_METADATA_KEY.id -> createUpdateMetadataRequest, ApiKeys.JOIN_GROUP.id -> createJoinGroupRequest, ApiKeys.SYNC_GROUP.id -> createSyncGroupRequest, ApiKeys.OFFSET_COMMIT.id -> createOffsetCommitRequest, ApiKeys.HEARTBEAT.id -> createHeartbeatRequest, ApiKeys.LEAVE_GROUP.id -> createLeaveGroupRequest, ApiKeys.LEADER_AND_ISR.id -> createLeaderAndIsrRequest, ApiKeys.STOP_REPLICA.id -> createStopReplicaRequest, ApiKeys.CONTROLLED_SHUTDOWN_KEY.id -> createControlledShutdownRequest ) val socket = new Socket("localhost", servers.head.boundPort()) for ((key, request) <- requestKeyToRequest) { removeAllAcls val resources = RequestKeysToAcls(key).map(_._1.resourceType).toSet sendRequestAndVerifyResponseErrorCode(socket, key, request, resources, isAuthorized = false) for ((resource, acls) <- RequestKeysToAcls(key)) addAndVerifyAcls(acls, resource) sendRequestAndVerifyResponseErrorCode(socket, key, request, resources, isAuthorized = true) } } @Test def testProduceWithNoTopicAccess() { try { sendRecords(numRecords, tp) fail("sendRecords should have thrown") } catch { case e: TopicAuthorizationException => assertEquals(Collections.singleton(topic), e.unauthorizedTopics()) } } @Test def testProduceWithTopicDescribe() { addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Describe)), topicResource) try { sendRecords(numRecords, tp) fail("sendRecords should have thrown") } catch { case e: TopicAuthorizationException => assertEquals(Collections.singleton(topic), e.unauthorizedTopics()) } } @Test def testProduceWithTopicRead() { addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Read)), topicResource) try { sendRecords(numRecords, tp) fail("sendRecords should have thrown") } catch { case e: TopicAuthorizationException => assertEquals(Collections.singleton(topic), e.unauthorizedTopics()) } } @Test def testProduceWithTopicWrite() { addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Write)), topicResource) sendRecords(numRecords, tp) } @Test def testCreatePermissionNeededForWritingToNonExistentTopic() { val newTopic = "newTopic" val topicPartition = new TopicPartition(newTopic, 0) addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Write)), new Resource(Topic, newTopic)) try { sendRecords(numRecords, topicPartition) Assert.fail("should have thrown exception") } catch { case e: TopicAuthorizationException => assertEquals(Collections.singleton(newTopic), e.unauthorizedTopics()) } addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Create)), Resource.ClusterResource) sendRecords(numRecords, topicPartition) } @Test(expected = classOf[AuthorizationException]) def testConsumeWithNoAccess(): Unit = { addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Write)), topicResource) sendRecords(1, tp) removeAllAcls() this.consumers.head.assign(List(tp).asJava) consumeRecords(this.consumers.head) } @Test def testConsumeWithNoGroupAccess(): Unit = { addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Write)), topicResource) sendRecords(1, tp) removeAllAcls() addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Read)), topicResource) try { this.consumers.head.assign(List(tp).asJava) consumeRecords(this.consumers.head) Assert.fail("should have thrown exception") } catch { case e: GroupAuthorizationException => assertEquals(group, e.groupId()) } } @Test def testConsumeWithNoTopicAccess() { addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Write)), topicResource) sendRecords(1, tp) removeAllAcls() addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Read)), groupResource) try { this.consumers.head.assign(List(tp).asJava) consumeRecords(this.consumers.head) Assert.fail("should have thrown exception") } catch { case e: TopicAuthorizationException => assertEquals(Collections.singleton(topic), e.unauthorizedTopics()); } } @Test def testConsumeWithTopicDescribe() { addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Write)), topicResource) sendRecords(1, tp) removeAllAcls() addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Describe)), topicResource) addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Read)), groupResource) try { this.consumers.head.assign(List(tp).asJava) consumeRecords(this.consumers.head) Assert.fail("should have thrown exception") } catch { case e: TopicAuthorizationException => assertEquals(Collections.singleton(topic), e.unauthorizedTopics()); } } @Test def testConsumeWithTopicWrite() { addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Write)), topicResource) sendRecords(1, tp) removeAllAcls() addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Write)), topicResource) addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Read)), groupResource) try { this.consumers.head.assign(List(tp).asJava) consumeRecords(this.consumers.head) Assert.fail("should have thrown exception") } catch { case e: TopicAuthorizationException => assertEquals(Collections.singleton(topic), e.unauthorizedTopics()); } } @Test def testConsumeWithTopicAndGroupRead() { addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Write)), topicResource) sendRecords(1, tp) removeAllAcls() addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Read)), topicResource) addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Read)), groupResource) this.consumers.head.assign(List(tp).asJava) consumeRecords(this.consumers.head) } @Test def testCreatePermissionNeededToReadFromNonExistentTopic() { val newTopic = "newTopic" val topicPartition = new TopicPartition(newTopic, 0) val newTopicResource = new Resource(Topic, newTopic) addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Read)), newTopicResource) addAndVerifyAcls(GroupReadAcl(groupResource), groupResource) addAndVerifyAcls(ClusterAcl(Resource.ClusterResource), Resource.ClusterResource) try { this.consumers(0).assign(List(topicPartition).asJava) consumeRecords(this.consumers(0)) Assert.fail("should have thrown exception") } catch { case e: TopicAuthorizationException => assertEquals(Collections.singleton(newTopic), e.unauthorizedTopics()); } addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Write)), newTopicResource) addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Create)), Resource.ClusterResource) sendRecords(numRecords, topicPartition) consumeRecords(this.consumers(0), topic = newTopic, part = 0) } @Test(expected = classOf[AuthorizationException]) def testCommitWithNoAccess() { this.consumers.head.commitSync(Map(tp -> new OffsetAndMetadata(5)).asJava) } @Test(expected = classOf[TopicAuthorizationException]) def testCommitWithNoTopicAccess() { addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Read)), groupResource) this.consumers.head.commitSync(Map(tp -> new OffsetAndMetadata(5)).asJava) } @Test(expected = classOf[TopicAuthorizationException]) def testCommitWithTopicWrite() { addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Read)), groupResource) addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Write)), topicResource) this.consumers.head.commitSync(Map(tp -> new OffsetAndMetadata(5)).asJava) } @Test(expected = classOf[TopicAuthorizationException]) def testCommitWithTopicDescribe() { addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Read)), groupResource) addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Describe)), topicResource) this.consumers.head.commitSync(Map(tp -> new OffsetAndMetadata(5)).asJava) } @Test(expected = classOf[GroupAuthorizationException]) def testCommitWithNoGroupAccess() { addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Read)), topicResource) this.consumers.head.commitSync(Map(tp -> new OffsetAndMetadata(5)).asJava) } @Test def testCommitWithTopicAndGroupRead() { addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Read)), groupResource) addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Read)), topicResource) this.consumers.head.commitSync(Map(tp -> new OffsetAndMetadata(5)).asJava) } @Test(expected = classOf[AuthorizationException]) def testOffsetFetchWithNoAccess() { this.consumers.head.assign(List(tp).asJava) this.consumers.head.position(tp) } @Test(expected = classOf[GroupAuthorizationException]) def testOffsetFetchWithNoGroupAccess() { addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Read)), topicResource) this.consumers.head.assign(List(tp).asJava) this.consumers.head.position(tp) } @Test(expected = classOf[TopicAuthorizationException]) def testOffsetFetchWithNoTopicAccess() { addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Read)), groupResource) this.consumers.head.assign(List(tp).asJava) this.consumers.head.position(tp) } @Test def testOffsetFetchTopicDescribe() { addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Read)), groupResource) addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Describe)), topicResource) this.consumers.head.assign(List(tp).asJava) this.consumers.head.position(tp) } @Test def testOffsetFetchWithTopicAndGroupRead() { addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Read)), groupResource) addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Read)), topicResource) this.consumers.head.assign(List(tp).asJava) this.consumers.head.position(tp) } @Test def testListOffsetsWithNoTopicAccess() { val e = intercept[TopicAuthorizationException] { this.consumers.head.partitionsFor(topic); } assertEquals(Set(topic), e.unauthorizedTopics().asScala) } @Test def testListOfsetsWithTopicDescribe() { addAndVerifyAcls(Set(new Acl(KafkaPrincipal.ANONYMOUS, Allow, Acl.WildCardHost, Describe)), topicResource) this.consumers.head.partitionsFor(topic); } def removeAllAcls() = { servers.head.apis.authorizer.get.getAcls().keys.foreach { resource => servers.head.apis.authorizer.get.removeAcls(resource) TestUtils.waitAndVerifyAcls(Set.empty[Acl], servers.head.apis.authorizer.get, resource) } } def sendRequestAndVerifyResponseErrorCode(socket: Socket, key: Short, request: AbstractRequest, resources: Set[ResourceType], isAuthorized: Boolean): AbstractRequestResponse = { val header = new RequestHeader(key, "client", 1) val body = request.toStruct val buffer = ByteBuffer.allocate(header.sizeOf() + body.sizeOf()) header.writeTo(buffer) body.writeTo(buffer) buffer.rewind() val requestBytes = buffer.array() sendRequest(socket, key, requestBytes) val resp = receiveResponse(socket) ResponseHeader.parse(resp) val response = RequestKeyToResponseDeserializer(key).getMethod("parse", classOf[ByteBuffer]).invoke(null, resp).asInstanceOf[AbstractRequestResponse] val errorCode = RequestKeyToErrorCode(key).asInstanceOf[(AbstractRequestResponse) => Short](response) val possibleErrorCodes = resources.map(_.errorCode) if (isAuthorized) assertFalse(s"${ApiKeys.forId(key)} should be allowed", possibleErrorCodes.contains(errorCode)) else assertTrue(s"${ApiKeys.forId(key)} should be forbidden", possibleErrorCodes.contains(errorCode)) response } private def sendRequest(socket: Socket, id: Short, request: Array[Byte]) { val outgoing = new DataOutputStream(socket.getOutputStream) outgoing.writeInt(request.length) outgoing.write(request) outgoing.flush() } private def receiveResponse(socket: Socket): ByteBuffer = { val incoming = new DataInputStream(socket.getInputStream) val len = incoming.readInt() val response = new Array[Byte](len) incoming.readFully(response) ByteBuffer.wrap(response) } private def sendRecords(numRecords: Int, tp: TopicPartition) { val futures = (0 until numRecords).map { i => this.producers.head.send(new ProducerRecord(tp.topic(), tp.partition(), i.toString.getBytes, i.toString.getBytes)) } try { futures.foreach(_.get) } catch { case e: ExecutionException => throw e.getCause } } private def addAndVerifyAcls(acls: Set[Acl], resource: Resource) = { servers.head.apis.authorizer.get.addAcls(acls, resource) TestUtils.waitAndVerifyAcls(servers.head.apis.authorizer.get.getAcls(resource) ++ acls, servers.head.apis.authorizer.get, resource) } private def removeAndVerifyAcls(acls: Set[Acl], resource: Resource) = { servers.head.apis.authorizer.get.removeAcls(acls, resource) TestUtils.waitAndVerifyAcls(servers.head.apis.authorizer.get.getAcls(resource) -- acls, servers.head.apis.authorizer.get, resource) } private def consumeRecords(consumer: Consumer[Array[Byte], Array[Byte]], numRecords: Int = 1, startingOffset: Int = 0, topic: String = topic, part: Int = part) { val records = new ArrayList[ConsumerRecord[Array[Byte], Array[Byte]]]() val maxIters = numRecords * 50 var iters = 0 while (records.size < numRecords) { for (record <- consumer.poll(50).asScala) { records.add(record) } if (iters > maxIters) throw new IllegalStateException("Failed to consume the expected records after " + iters + " iterations.") iters += 1 } for (i <- 0 until numRecords) { val record = records.get(i) val offset = startingOffset + i assertEquals(topic, record.topic()) assertEquals(part, record.partition()) assertEquals(offset.toLong, record.offset()) } } }
prashantbh/kafka
core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala
Scala
apache-2.0
26,642
/* * Copyright 2017 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.ct.accounts.frs10x.boxes import org.mockito.Mockito._ import org.scalatest.BeforeAndAfterEach import uk.gov.hmrc.ct.accounts.frs10x.retriever.Frs10xAccountsBoxRetriever import uk.gov.hmrc.ct.accounts.{AccountStatementValidationFixture, MockFrs10xAccountsRetriever} class AC8081Spec extends AccountStatementValidationFixture[Frs10xAccountsBoxRetriever] with BeforeAndAfterEach with MockFrs10xAccountsRetriever{ override def setupMocks = { when(boxRetriever.acq8999()).thenReturn(ACQ8999(None)) } doStatementValidationTests("AC8081", AC8081.apply) "validation disabled if dormant" in { when(boxRetriever.acq8999()).thenReturn(ACQ8999(Some(true))) AC8081(None).validate(boxRetriever) shouldBe Set.empty AC8081(Some(true)).validate(boxRetriever) shouldBe Set.empty } }
liquidarmour/ct-calculations
src/test/scala/uk/gov/hmrc/ct/accounts/frs10x/boxes/AC8081Spec.scala
Scala
apache-2.0
1,433
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.internal import java.util.{Locale, NoSuchElementException, Properties, TimeZone} import java.util.concurrent.TimeUnit import java.util.concurrent.atomic.AtomicReference import scala.collection.JavaConverters._ import scala.collection.immutable import scala.util.matching.Regex import org.apache.hadoop.fs.Path import org.apache.spark.TaskContext import org.apache.spark.internal.Logging import org.apache.spark.internal.config._ import org.apache.spark.network.util.ByteUnit import org.apache.spark.sql.catalyst.analysis.Resolver import org.apache.spark.sql.catalyst.expressions.codegen.CodeGenerator import org.apache.spark.util.Utils //////////////////////////////////////////////////////////////////////////////////////////////////// // This file defines the configuration options for Spark SQL. //////////////////////////////////////////////////////////////////////////////////////////////////// object SQLConf { private val sqlConfEntries = java.util.Collections.synchronizedMap( new java.util.HashMap[String, ConfigEntry[_]]()) val staticConfKeys: java.util.Set[String] = java.util.Collections.synchronizedSet(new java.util.HashSet[String]()) private def register(entry: ConfigEntry[_]): Unit = sqlConfEntries.synchronized { require(!sqlConfEntries.containsKey(entry.key), s"Duplicate SQLConfigEntry. ${entry.key} has been registered") sqlConfEntries.put(entry.key, entry) } // For testing only private[sql] def unregister(entry: ConfigEntry[_]): Unit = sqlConfEntries.synchronized { sqlConfEntries.remove(entry.key) } def buildConf(key: String): ConfigBuilder = ConfigBuilder(key).onCreate(register) def buildStaticConf(key: String): ConfigBuilder = { ConfigBuilder(key).onCreate { entry => staticConfKeys.add(entry.key) SQLConf.register(entry) } } /** * Default config. Only used when there is no active SparkSession for the thread. * See [[get]] for more information. */ private lazy val fallbackConf = new ThreadLocal[SQLConf] { override def initialValue: SQLConf = new SQLConf } /** See [[get]] for more information. */ def getFallbackConf: SQLConf = fallbackConf.get() /** * Defines a getter that returns the SQLConf within scope. * See [[get]] for more information. */ private val confGetter = new AtomicReference[() => SQLConf](() => fallbackConf.get()) /** * Sets the active config object within the current scope. * See [[get]] for more information. */ def setSQLConfGetter(getter: () => SQLConf): Unit = { confGetter.set(getter) } /** * Returns the active config object within the current scope. If there is an active SparkSession, * the proper SQLConf associated with the thread's session is used. * * The way this works is a little bit convoluted, due to the fact that config was added initially * only for physical plans (and as a result not in sql/catalyst module). * * The first time a SparkSession is instantiated, we set the [[confGetter]] to return the * active SparkSession's config. If there is no active SparkSession, it returns using the thread * local [[fallbackConf]]. The reason [[fallbackConf]] is a thread local (rather than just a conf) * is to support setting different config options for different threads so we can potentially * run tests in parallel. At the time this feature was implemented, this was a no-op since we * run unit tests (that does not involve SparkSession) in serial order. */ def get: SQLConf = { if (Utils.isTesting && TaskContext.get != null) { // we're accessing it during task execution, fail. throw new IllegalStateException("SQLConf should only be created and accessed on the driver.") } confGetter.get()() } val OPTIMIZER_MAX_ITERATIONS = buildConf("spark.sql.optimizer.maxIterations") .internal() .doc("The max number of iterations the optimizer and analyzer runs.") .intConf .createWithDefault(100) val OPTIMIZER_INSET_CONVERSION_THRESHOLD = buildConf("spark.sql.optimizer.inSetConversionThreshold") .internal() .doc("The threshold of set size for InSet conversion.") .intConf .createWithDefault(10) val COMPRESS_CACHED = buildConf("spark.sql.inMemoryColumnarStorage.compressed") .doc("When set to true Spark SQL will automatically select a compression codec for each " + "column based on statistics of the data.") .booleanConf .createWithDefault(true) val COLUMN_BATCH_SIZE = buildConf("spark.sql.inMemoryColumnarStorage.batchSize") .doc("Controls the size of batches for columnar caching. Larger batch sizes can improve " + "memory utilization and compression, but risk OOMs when caching data.") .intConf .createWithDefault(10000) val IN_MEMORY_PARTITION_PRUNING = buildConf("spark.sql.inMemoryColumnarStorage.partitionPruning") .internal() .doc("When true, enable partition pruning for in-memory columnar tables.") .booleanConf .createWithDefault(true) val CACHE_VECTORIZED_READER_ENABLED = buildConf("spark.sql.inMemoryColumnarStorage.enableVectorizedReader") .doc("Enables vectorized reader for columnar caching.") .booleanConf .createWithDefault(true) val COLUMN_VECTOR_OFFHEAP_ENABLED = buildConf("spark.sql.columnVector.offheap.enabled") .internal() .doc("When true, use OffHeapColumnVector in ColumnarBatch.") .booleanConf .createWithDefault(false) val PREFER_SORTMERGEJOIN = buildConf("spark.sql.join.preferSortMergeJoin") .internal() .doc("When true, prefer sort merge join over shuffle hash join.") .booleanConf .createWithDefault(true) val RADIX_SORT_ENABLED = buildConf("spark.sql.sort.enableRadixSort") .internal() .doc("When true, enable use of radix sort when possible. Radix sort is much faster but " + "requires additional memory to be reserved up-front. The memory overhead may be " + "significant when sorting very small rows (up to 50% more in this case).") .booleanConf .createWithDefault(true) val AUTO_BROADCASTJOIN_THRESHOLD = buildConf("spark.sql.autoBroadcastJoinThreshold") .doc("Configures the maximum size in bytes for a table that will be broadcast to all worker " + "nodes when performing a join. By setting this value to -1 broadcasting can be disabled. " + "Note that currently statistics are only supported for Hive Metastore tables where the " + "command <code>ANALYZE TABLE &lt;tableName&gt; COMPUTE STATISTICS noscan</code> has been " + "run, and file-based data source tables where the statistics are computed directly on " + "the files of data.") .longConf .createWithDefault(10L * 1024 * 1024) val LIMIT_SCALE_UP_FACTOR = buildConf("spark.sql.limit.scaleUpFactor") .internal() .doc("Minimal increase rate in number of partitions between attempts when executing a take " + "on a query. Higher values lead to more partitions read. Lower values might lead to " + "longer execution times as more jobs will be run") .intConf .createWithDefault(4) val ADVANCED_PARTITION_PREDICATE_PUSHDOWN = buildConf("spark.sql.hive.advancedPartitionPredicatePushdown.enabled") .internal() .doc("When true, advanced partition predicate pushdown into Hive metastore is enabled.") .booleanConf .createWithDefault(true) val ENABLE_FALL_BACK_TO_HDFS_FOR_STATS = buildConf("spark.sql.statistics.fallBackToHdfs") .doc("If the table statistics are not available from table metadata enable fall back to hdfs." + " This is useful in determining if a table is small enough to use auto broadcast joins.") .booleanConf .createWithDefault(false) val DEFAULT_SIZE_IN_BYTES = buildConf("spark.sql.defaultSizeInBytes") .internal() .doc("The default table size used in query planning. By default, it is set to Long.MaxValue " + "which is larger than `spark.sql.autoBroadcastJoinThreshold` to be more conservative. " + "That is to say by default the optimizer will not choose to broadcast a table unless it " + "knows for sure its size is small enough.") .longConf .createWithDefault(Long.MaxValue) val SHUFFLE_PARTITIONS = buildConf("spark.sql.shuffle.partitions") .doc("The default number of partitions to use when shuffling data for joins or aggregations.") .intConf .createWithDefault(200) val SHUFFLE_TARGET_POSTSHUFFLE_INPUT_SIZE = buildConf("spark.sql.adaptive.shuffle.targetPostShuffleInputSize") .doc("The target post-shuffle input size in bytes of a task.") .bytesConf(ByteUnit.BYTE) .createWithDefault(64 * 1024 * 1024) val ADAPTIVE_EXECUTION_ENABLED = buildConf("spark.sql.adaptive.enabled") .doc("When true, enable adaptive query execution.") .booleanConf .createWithDefault(false) val SHUFFLE_MIN_NUM_POSTSHUFFLE_PARTITIONS = buildConf("spark.sql.adaptive.minNumPostShufflePartitions") .internal() .doc("The advisory minimal number of post-shuffle partitions provided to " + "ExchangeCoordinator. This setting is used in our test to make sure we " + "have enough parallelism to expose issues that will not be exposed with a " + "single partition. When the value is a non-positive value, this setting will " + "not be provided to ExchangeCoordinator.") .intConf .createWithDefault(-1) val SUBEXPRESSION_ELIMINATION_ENABLED = buildConf("spark.sql.subexpressionElimination.enabled") .internal() .doc("When true, common subexpressions will be eliminated.") .booleanConf .createWithDefault(true) val CASE_SENSITIVE = buildConf("spark.sql.caseSensitive") .internal() .doc("Whether the query analyzer should be case sensitive or not. " + "Default to case insensitive. It is highly discouraged to turn on case sensitive mode.") .booleanConf .createWithDefault(false) val CONSTRAINT_PROPAGATION_ENABLED = buildConf("spark.sql.constraintPropagation.enabled") .internal() .doc("When true, the query optimizer will infer and propagate data constraints in the query " + "plan to optimize them. Constraint propagation can sometimes be computationally expensive " + "for certain kinds of query plans (such as those with a large number of predicates and " + "aliases) which might negatively impact overall runtime.") .booleanConf .createWithDefault(true) val ESCAPED_STRING_LITERALS = buildConf("spark.sql.parser.escapedStringLiterals") .internal() .doc("When true, string literals (including regex patterns) remain escaped in our SQL " + "parser. The default is false since Spark 2.0. Setting it to true can restore the behavior " + "prior to Spark 2.0.") .booleanConf .createWithDefault(false) val FILE_COMRESSION_FACTOR = buildConf("spark.sql.sources.fileCompressionFactor") .internal() .doc("When estimating the output data size of a table scan, multiply the file size with this " + "factor as the estimated data size, in case the data is compressed in the file and lead to" + " a heavily underestimated result.") .doubleConf .checkValue(_ > 0, "the value of fileDataSizeFactor must be larger than 0") .createWithDefault(1.0) val PARQUET_SCHEMA_MERGING_ENABLED = buildConf("spark.sql.parquet.mergeSchema") .doc("When true, the Parquet data source merges schemas collected from all data files, " + "otherwise the schema is picked from the summary file or a random data file " + "if no summary file is available.") .booleanConf .createWithDefault(false) val PARQUET_SCHEMA_RESPECT_SUMMARIES = buildConf("spark.sql.parquet.respectSummaryFiles") .doc("When true, we make assumption that all part-files of Parquet are consistent with " + "summary files and we will ignore them when merging schema. Otherwise, if this is " + "false, which is the default, we will merge all part-files. This should be considered " + "as expert-only option, and shouldn't be enabled before knowing what it means exactly.") .booleanConf .createWithDefault(false) val PARQUET_BINARY_AS_STRING = buildConf("spark.sql.parquet.binaryAsString") .doc("Some other Parquet-producing systems, in particular Impala and older versions of " + "Spark SQL, do not differentiate between binary data and strings when writing out the " + "Parquet schema. This flag tells Spark SQL to interpret binary data as a string to provide " + "compatibility with these systems.") .booleanConf .createWithDefault(false) val PARQUET_INT96_AS_TIMESTAMP = buildConf("spark.sql.parquet.int96AsTimestamp") .doc("Some Parquet-producing systems, in particular Impala, store Timestamp into INT96. " + "Spark would also store Timestamp as INT96 because we need to avoid precision lost of the " + "nanoseconds field. This flag tells Spark SQL to interpret INT96 data as a timestamp to " + "provide compatibility with these systems.") .booleanConf .createWithDefault(true) val PARQUET_INT96_TIMESTAMP_CONVERSION = buildConf("spark.sql.parquet.int96TimestampConversion") .doc("This controls whether timestamp adjustments should be applied to INT96 data when " + "converting to timestamps, for data written by Impala. This is necessary because Impala " + "stores INT96 data with a different timezone offset than Hive & Spark.") .booleanConf .createWithDefault(false) object ParquetOutputTimestampType extends Enumeration { val INT96, TIMESTAMP_MICROS, TIMESTAMP_MILLIS = Value } val PARQUET_OUTPUT_TIMESTAMP_TYPE = buildConf("spark.sql.parquet.outputTimestampType") .doc("Sets which Parquet timestamp type to use when Spark writes data to Parquet files. " + "INT96 is a non-standard but commonly used timestamp type in Parquet. TIMESTAMP_MICROS " + "is a standard timestamp type in Parquet, which stores number of microseconds from the " + "Unix epoch. TIMESTAMP_MILLIS is also standard, but with millisecond precision, which " + "means Spark has to truncate the microsecond portion of its timestamp value.") .stringConf .transform(_.toUpperCase(Locale.ROOT)) .checkValues(ParquetOutputTimestampType.values.map(_.toString)) .createWithDefault(ParquetOutputTimestampType.INT96.toString) val PARQUET_INT64_AS_TIMESTAMP_MILLIS = buildConf("spark.sql.parquet.int64AsTimestampMillis") .doc(s"(Deprecated since Spark 2.3, please set ${PARQUET_OUTPUT_TIMESTAMP_TYPE.key}.) " + "When true, timestamp values will be stored as INT64 with TIMESTAMP_MILLIS as the " + "extended type. In this mode, the microsecond portion of the timestamp value will be" + "truncated.") .booleanConf .createWithDefault(false) val PARQUET_COMPRESSION = buildConf("spark.sql.parquet.compression.codec") .doc("Sets the compression codec used when writing Parquet files. If either `compression` or " + "`parquet.compression` is specified in the table-specific options/properties, the " + "precedence would be `compression`, `parquet.compression`, " + "`spark.sql.parquet.compression.codec`. Acceptable values include: none, uncompressed, " + "snappy, gzip, lzo.") .stringConf .transform(_.toLowerCase(Locale.ROOT)) .checkValues(Set("none", "uncompressed", "snappy", "gzip", "lzo", "lz4", "brotli", "zstd")) .createWithDefault("snappy") val PARQUET_FILTER_PUSHDOWN_ENABLED = buildConf("spark.sql.parquet.filterPushdown") .doc("Enables Parquet filter push-down optimization when set to true.") .booleanConf .createWithDefault(true) val PARQUET_FILTER_PUSHDOWN_DATE_ENABLED = buildConf("spark.sql.parquet.filterPushdown.date") .doc("If true, enables Parquet filter push-down optimization for Date. " + "This configuration only has an effect when 'spark.sql.parquet.filterPushdown' is enabled.") .internal() .booleanConf .createWithDefault(true) val PARQUET_WRITE_LEGACY_FORMAT = buildConf("spark.sql.parquet.writeLegacyFormat") .doc("Whether to be compatible with the legacy Parquet format adopted by Spark 1.4 and prior " + "versions, when converting Parquet schema to Spark SQL schema and vice versa.") .booleanConf .createWithDefault(false) val PARQUET_RECORD_FILTER_ENABLED = buildConf("spark.sql.parquet.recordLevelFilter.enabled") .doc("If true, enables Parquet's native record-level filtering using the pushed down " + "filters. This configuration only has an effect when 'spark.sql.parquet.filterPushdown' " + "is enabled.") .booleanConf .createWithDefault(false) val PARQUET_OUTPUT_COMMITTER_CLASS = buildConf("spark.sql.parquet.output.committer.class") .doc("The output committer class used by Parquet. The specified class needs to be a " + "subclass of org.apache.hadoop.mapreduce.OutputCommitter. Typically, it's also a subclass " + "of org.apache.parquet.hadoop.ParquetOutputCommitter. If it is not, then metadata summaries" + "will never be created, irrespective of the value of parquet.enable.summary-metadata") .internal() .stringConf .createWithDefault("org.apache.parquet.hadoop.ParquetOutputCommitter") val PARQUET_VECTORIZED_READER_ENABLED = buildConf("spark.sql.parquet.enableVectorizedReader") .doc("Enables vectorized parquet decoding.") .booleanConf .createWithDefault(true) val PARQUET_VECTORIZED_READER_BATCH_SIZE = buildConf("spark.sql.parquet.columnarReaderBatchSize") .doc("The number of rows to include in a parquet vectorized reader batch. The number should " + "be carefully chosen to minimize overhead and avoid OOMs in reading data.") .intConf .createWithDefault(4096) val ORC_COMPRESSION = buildConf("spark.sql.orc.compression.codec") .doc("Sets the compression codec used when writing ORC files. If either `compression` or " + "`orc.compress` is specified in the table-specific options/properties, the precedence " + "would be `compression`, `orc.compress`, `spark.sql.orc.compression.codec`." + "Acceptable values include: none, uncompressed, snappy, zlib, lzo.") .stringConf .transform(_.toLowerCase(Locale.ROOT)) .checkValues(Set("none", "uncompressed", "snappy", "zlib", "lzo")) .createWithDefault("snappy") val ORC_IMPLEMENTATION = buildConf("spark.sql.orc.impl") .doc("When native, use the native version of ORC support instead of the ORC library in Hive " + "1.2.1. It is 'hive' by default prior to Spark 2.4.") .internal() .stringConf .checkValues(Set("hive", "native")) .createWithDefault("native") val ORC_VECTORIZED_READER_ENABLED = buildConf("spark.sql.orc.enableVectorizedReader") .doc("Enables vectorized orc decoding.") .booleanConf .createWithDefault(true) val ORC_VECTORIZED_READER_BATCH_SIZE = buildConf("spark.sql.orc.columnarReaderBatchSize") .doc("The number of rows to include in a orc vectorized reader batch. The number should " + "be carefully chosen to minimize overhead and avoid OOMs in reading data.") .intConf .createWithDefault(4096) val ORC_COPY_BATCH_TO_SPARK = buildConf("spark.sql.orc.copyBatchToSpark") .doc("Whether or not to copy the ORC columnar batch to Spark columnar batch in the " + "vectorized ORC reader.") .internal() .booleanConf .createWithDefault(false) val ORC_FILTER_PUSHDOWN_ENABLED = buildConf("spark.sql.orc.filterPushdown") .doc("When true, enable filter pushdown for ORC files.") .booleanConf .createWithDefault(true) val HIVE_VERIFY_PARTITION_PATH = buildConf("spark.sql.hive.verifyPartitionPath") .doc("When true, check all the partition paths under the table\'s root directory " + "when reading data stored in HDFS. This configuration will be deprecated in the future " + "releases and replaced by spark.files.ignoreMissingFiles.") .booleanConf .createWithDefault(false) val HIVE_METASTORE_PARTITION_PRUNING = buildConf("spark.sql.hive.metastorePartitionPruning") .doc("When true, some predicates will be pushed down into the Hive metastore so that " + "unmatching partitions can be eliminated earlier. This only affects Hive tables " + "not converted to filesource relations (see HiveUtils.CONVERT_METASTORE_PARQUET and " + "HiveUtils.CONVERT_METASTORE_ORC for more information).") .booleanConf .createWithDefault(true) val HIVE_MANAGE_FILESOURCE_PARTITIONS = buildConf("spark.sql.hive.manageFilesourcePartitions") .doc("When true, enable metastore partition management for file source tables as well. " + "This includes both datasource and converted Hive tables. When partition management " + "is enabled, datasource tables store partition in the Hive metastore, and use the " + "metastore to prune partitions during query planning.") .booleanConf .createWithDefault(true) val HIVE_FILESOURCE_PARTITION_FILE_CACHE_SIZE = buildConf("spark.sql.hive.filesourcePartitionFileCacheSize") .doc("When nonzero, enable caching of partition file metadata in memory. All tables share " + "a cache that can use up to specified num bytes for file metadata. This conf only " + "has an effect when hive filesource partition management is enabled.") .longConf .createWithDefault(250 * 1024 * 1024) object HiveCaseSensitiveInferenceMode extends Enumeration { val INFER_AND_SAVE, INFER_ONLY, NEVER_INFER = Value } val HIVE_CASE_SENSITIVE_INFERENCE = buildConf("spark.sql.hive.caseSensitiveInferenceMode") .doc("Sets the action to take when a case-sensitive schema cannot be read from a Hive " + "table's properties. Although Spark SQL itself is not case-sensitive, Hive compatible file " + "formats such as Parquet are. Spark SQL must use a case-preserving schema when querying " + "any table backed by files containing case-sensitive field names or queries may not return " + "accurate results. Valid options include INFER_AND_SAVE (the default mode-- infer the " + "case-sensitive schema from the underlying data files and write it back to the table " + "properties), INFER_ONLY (infer the schema but don't attempt to write it to the table " + "properties) and NEVER_INFER (fallback to using the case-insensitive metastore schema " + "instead of inferring).") .stringConf .transform(_.toUpperCase(Locale.ROOT)) .checkValues(HiveCaseSensitiveInferenceMode.values.map(_.toString)) .createWithDefault(HiveCaseSensitiveInferenceMode.INFER_AND_SAVE.toString) val TYPECOERCION_COMPARE_DATE_TIMESTAMP_IN_TIMESTAMP = buildConf("spark.sql.typeCoercion.compareDateTimestampInTimestamp") .internal() .doc("When true (default), compare Date with Timestamp after converting both sides to " + "Timestamp. This behavior is compatible with Hive 2.2 or later. See HIVE-15236. " + "When false, restore the behavior prior to Spark 2.4. Compare Date with Timestamp after " + "converting both sides to string. This config will be removed in spark 3.0") .booleanConf .createWithDefault(true) val OPTIMIZER_METADATA_ONLY = buildConf("spark.sql.optimizer.metadataOnly") .doc("When true, enable the metadata-only query optimization that use the table's metadata " + "to produce the partition columns instead of table scans. It applies when all the columns " + "scanned are partition columns and the query has an aggregate operator that satisfies " + "distinct semantics.") .booleanConf .createWithDefault(true) val COLUMN_NAME_OF_CORRUPT_RECORD = buildConf("spark.sql.columnNameOfCorruptRecord") .doc("The name of internal column for storing raw/un-parsed JSON and CSV records that fail " + "to parse.") .stringConf .createWithDefault("_corrupt_record") val FROM_JSON_FORCE_NULLABLE_SCHEMA = buildConf("spark.sql.fromJsonForceNullableSchema") .internal() .doc("When true, force the output schema of the from_json() function to be nullable " + "(including all the fields). Otherwise, the schema might not be compatible with" + "actual data, which leads to curruptions.") .booleanConf .createWithDefault(true) val BROADCAST_TIMEOUT = buildConf("spark.sql.broadcastTimeout") .doc("Timeout in seconds for the broadcast wait time in broadcast joins.") .timeConf(TimeUnit.SECONDS) .createWithDefault(5 * 60) // This is only used for the thriftserver val THRIFTSERVER_POOL = buildConf("spark.sql.thriftserver.scheduler.pool") .doc("Set a Fair Scheduler pool for a JDBC client session.") .stringConf .createOptional val THRIFTSERVER_INCREMENTAL_COLLECT = buildConf("spark.sql.thriftServer.incrementalCollect") .internal() .doc("When true, enable incremental collection for execution in Thrift Server.") .booleanConf .createWithDefault(false) val THRIFTSERVER_UI_STATEMENT_LIMIT = buildConf("spark.sql.thriftserver.ui.retainedStatements") .doc("The number of SQL statements kept in the JDBC/ODBC web UI history.") .intConf .createWithDefault(200) val THRIFTSERVER_UI_SESSION_LIMIT = buildConf("spark.sql.thriftserver.ui.retainedSessions") .doc("The number of SQL client sessions kept in the JDBC/ODBC web UI history.") .intConf .createWithDefault(200) // This is used to set the default data source val DEFAULT_DATA_SOURCE_NAME = buildConf("spark.sql.sources.default") .doc("The default data source to use in input/output.") .stringConf .createWithDefault("parquet") val CONVERT_CTAS = buildConf("spark.sql.hive.convertCTAS") .internal() .doc("When true, a table created by a Hive CTAS statement (no USING clause) " + "without specifying any storage property will be converted to a data source table, " + "using the data source set by spark.sql.sources.default.") .booleanConf .createWithDefault(false) val GATHER_FASTSTAT = buildConf("spark.sql.hive.gatherFastStats") .internal() .doc("When true, fast stats (number of files and total size of all files) will be gathered" + " in parallel while repairing table partitions to avoid the sequential listing in Hive" + " metastore.") .booleanConf .createWithDefault(true) val PARTITION_COLUMN_TYPE_INFERENCE = buildConf("spark.sql.sources.partitionColumnTypeInference.enabled") .doc("When true, automatically infer the data types for partitioned columns.") .booleanConf .createWithDefault(true) val BUCKETING_ENABLED = buildConf("spark.sql.sources.bucketing.enabled") .doc("When false, we will treat bucketed table as normal table") .booleanConf .createWithDefault(true) val CROSS_JOINS_ENABLED = buildConf("spark.sql.crossJoin.enabled") .doc("When false, we will throw an error if a query contains a cartesian product without " + "explicit CROSS JOIN syntax.") .booleanConf .createWithDefault(false) val ORDER_BY_ORDINAL = buildConf("spark.sql.orderByOrdinal") .doc("When true, the ordinal numbers are treated as the position in the select list. " + "When false, the ordinal numbers in order/sort by clause are ignored.") .booleanConf .createWithDefault(true) val GROUP_BY_ORDINAL = buildConf("spark.sql.groupByOrdinal") .doc("When true, the ordinal numbers in group by clauses are treated as the position " + "in the select list. When false, the ordinal numbers are ignored.") .booleanConf .createWithDefault(true) val GROUP_BY_ALIASES = buildConf("spark.sql.groupByAliases") .doc("When true, aliases in a select list can be used in group by clauses. When false, " + "an analysis exception is thrown in the case.") .booleanConf .createWithDefault(true) // The output committer class used by data sources. The specified class needs to be a // subclass of org.apache.hadoop.mapreduce.OutputCommitter. val OUTPUT_COMMITTER_CLASS = buildConf("spark.sql.sources.outputCommitterClass") .internal() .stringConf .createOptional val FILE_COMMIT_PROTOCOL_CLASS = buildConf("spark.sql.sources.commitProtocolClass") .internal() .stringConf .createWithDefault( "org.apache.spark.sql.execution.datasources.SQLHadoopMapReduceCommitProtocol") val PARALLEL_PARTITION_DISCOVERY_THRESHOLD = buildConf("spark.sql.sources.parallelPartitionDiscovery.threshold") .doc("The maximum number of paths allowed for listing files at driver side. If the number " + "of detected paths exceeds this value during partition discovery, it tries to list the " + "files with another Spark distributed job. This applies to Parquet, ORC, CSV, JSON and " + "LibSVM data sources.") .intConf .checkValue(parallel => parallel >= 0, "The maximum number of paths allowed for listing " + "files at driver side must not be negative") .createWithDefault(32) val PARALLEL_PARTITION_DISCOVERY_PARALLELISM = buildConf("spark.sql.sources.parallelPartitionDiscovery.parallelism") .doc("The number of parallelism to list a collection of path recursively, Set the " + "number to prevent file listing from generating too many tasks.") .internal() .intConf .createWithDefault(10000) // Whether to automatically resolve ambiguity in join conditions for self-joins. // See SPARK-6231. val DATAFRAME_SELF_JOIN_AUTO_RESOLVE_AMBIGUITY = buildConf("spark.sql.selfJoinAutoResolveAmbiguity") .internal() .booleanConf .createWithDefault(true) // Whether to retain group by columns or not in GroupedData.agg. val DATAFRAME_RETAIN_GROUP_COLUMNS = buildConf("spark.sql.retainGroupColumns") .internal() .booleanConf .createWithDefault(true) val DATAFRAME_PIVOT_MAX_VALUES = buildConf("spark.sql.pivotMaxValues") .doc("When doing a pivot without specifying values for the pivot column this is the maximum " + "number of (distinct) values that will be collected without error.") .intConf .createWithDefault(10000) val RUN_SQL_ON_FILES = buildConf("spark.sql.runSQLOnFiles") .internal() .doc("When true, we could use `datasource`.`path` as table in SQL query.") .booleanConf .createWithDefault(true) val WHOLESTAGE_CODEGEN_ENABLED = buildConf("spark.sql.codegen.wholeStage") .internal() .doc("When true, the whole stage (of multiple operators) will be compiled into single java" + " method.") .booleanConf .createWithDefault(true) val WHOLESTAGE_CODEGEN_USE_ID_IN_CLASS_NAME = buildConf("spark.sql.codegen.useIdInClassName") .internal() .doc("When true, embed the (whole-stage) codegen stage ID into " + "the class name of the generated class as a suffix") .booleanConf .createWithDefault(true) val WHOLESTAGE_MAX_NUM_FIELDS = buildConf("spark.sql.codegen.maxFields") .internal() .doc("The maximum number of fields (including nested fields) that will be supported before" + " deactivating whole-stage codegen.") .intConf .createWithDefault(100) val CODEGEN_FALLBACK = buildConf("spark.sql.codegen.fallback") .internal() .doc("When true, (whole stage) codegen could be temporary disabled for the part of query that" + " fail to compile generated code") .booleanConf .createWithDefault(true) val CODEGEN_LOGGING_MAX_LINES = buildConf("spark.sql.codegen.logging.maxLines") .internal() .doc("The maximum number of codegen lines to log when errors occur. Use -1 for unlimited.") .intConf .checkValue(maxLines => maxLines >= -1, "The maximum must be a positive integer, 0 to " + "disable logging or -1 to apply no limit.") .createWithDefault(1000) val WHOLESTAGE_HUGE_METHOD_LIMIT = buildConf("spark.sql.codegen.hugeMethodLimit") .internal() .doc("The maximum bytecode size of a single compiled Java function generated by whole-stage " + "codegen. When the compiled function exceeds this threshold, the whole-stage codegen is " + "deactivated for this subtree of the current query plan. The default value is 65535, which " + "is the largest bytecode size possible for a valid Java method. When running on HotSpot, " + s"it may be preferable to set the value to ${CodeGenerator.DEFAULT_JVM_HUGE_METHOD_LIMIT} " + "to match HotSpot's implementation.") .intConf .createWithDefault(65535) val WHOLESTAGE_SPLIT_CONSUME_FUNC_BY_OPERATOR = buildConf("spark.sql.codegen.splitConsumeFuncByOperator") .internal() .doc("When true, whole stage codegen would put the logic of consuming rows of each " + "physical operator into individual methods, instead of a single big method. This can be " + "used to avoid oversized function that can miss the opportunity of JIT optimization.") .booleanConf .createWithDefault(true) val FILES_MAX_PARTITION_BYTES = buildConf("spark.sql.files.maxPartitionBytes") .doc("The maximum number of bytes to pack into a single partition when reading files.") .longConf .createWithDefault(128 * 1024 * 1024) // parquet.block.size val FILES_OPEN_COST_IN_BYTES = buildConf("spark.sql.files.openCostInBytes") .internal() .doc("The estimated cost to open a file, measured by the number of bytes could be scanned in" + " the same time. This is used when putting multiple files into a partition. It's better to" + " over estimated, then the partitions with small files will be faster than partitions with" + " bigger files (which is scheduled first).") .longConf .createWithDefault(4 * 1024 * 1024) val IGNORE_CORRUPT_FILES = buildConf("spark.sql.files.ignoreCorruptFiles") .doc("Whether to ignore corrupt files. If true, the Spark jobs will continue to run when " + "encountering corrupted files and the contents that have been read will still be returned.") .booleanConf .createWithDefault(false) val IGNORE_MISSING_FILES = buildConf("spark.sql.files.ignoreMissingFiles") .doc("Whether to ignore missing files. If true, the Spark jobs will continue to run when " + "encountering missing files and the contents that have been read will still be returned.") .booleanConf .createWithDefault(false) val MAX_RECORDS_PER_FILE = buildConf("spark.sql.files.maxRecordsPerFile") .doc("Maximum number of records to write out to a single file. " + "If this value is zero or negative, there is no limit.") .longConf .createWithDefault(0) val EXCHANGE_REUSE_ENABLED = buildConf("spark.sql.exchange.reuse") .internal() .doc("When true, the planner will try to find out duplicated exchanges and re-use them.") .booleanConf .createWithDefault(true) val STATE_STORE_PROVIDER_CLASS = buildConf("spark.sql.streaming.stateStore.providerClass") .internal() .doc( "The class used to manage state data in stateful streaming queries. This class must " + "be a subclass of StateStoreProvider, and must have a zero-arg constructor.") .stringConf .createWithDefault( "org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider") val STATE_STORE_MIN_DELTAS_FOR_SNAPSHOT = buildConf("spark.sql.streaming.stateStore.minDeltasForSnapshot") .internal() .doc("Minimum number of state store delta files that needs to be generated before they " + "consolidated into snapshots.") .intConf .createWithDefault(10) val CHECKPOINT_LOCATION = buildConf("spark.sql.streaming.checkpointLocation") .doc("The default location for storing checkpoint data for streaming queries.") .stringConf .createOptional val MIN_BATCHES_TO_RETAIN = buildConf("spark.sql.streaming.minBatchesToRetain") .internal() .doc("The minimum number of batches that must be retained and made recoverable.") .intConf .createWithDefault(100) val UNSUPPORTED_OPERATION_CHECK_ENABLED = buildConf("spark.sql.streaming.unsupportedOperationCheck") .internal() .doc("When true, the logical plan for streaming query will be checked for unsupported" + " operations.") .booleanConf .createWithDefault(true) val VARIABLE_SUBSTITUTE_ENABLED = buildConf("spark.sql.variable.substitute") .doc("This enables substitution using syntax like ${var} ${system:var} and ${env:var}.") .booleanConf .createWithDefault(true) val VARIABLE_SUBSTITUTE_DEPTH = buildConf("spark.sql.variable.substitute.depth") .internal() .doc("Deprecated: The maximum replacements the substitution engine will do.") .intConf .createWithDefault(40) val ENABLE_TWOLEVEL_AGG_MAP = buildConf("spark.sql.codegen.aggregate.map.twolevel.enabled") .internal() .doc("Enable two-level aggregate hash map. When enabled, records will first be " + "inserted/looked-up at a 1st-level, small, fast map, and then fallback to a " + "2nd-level, larger, slower map when 1st level is full or keys cannot be found. " + "When disabled, records go directly to the 2nd level. Defaults to true.") .booleanConf .createWithDefault(true) val MAX_NESTED_VIEW_DEPTH = buildConf("spark.sql.view.maxNestedViewDepth") .internal() .doc("The maximum depth of a view reference in a nested view. A nested view may reference " + "other nested views, the dependencies are organized in a directed acyclic graph (DAG). " + "However the DAG depth may become too large and cause unexpected behavior. This " + "configuration puts a limit on this: when the depth of a view exceeds this value during " + "analysis, we terminate the resolution to avoid potential errors.") .intConf .checkValue(depth => depth > 0, "The maximum depth of a view reference in a nested view " + "must be positive.") .createWithDefault(100) val STREAMING_FILE_COMMIT_PROTOCOL_CLASS = buildConf("spark.sql.streaming.commitProtocolClass") .internal() .stringConf .createWithDefault("org.apache.spark.sql.execution.streaming.ManifestFileCommitProtocol") val OBJECT_AGG_SORT_BASED_FALLBACK_THRESHOLD = buildConf("spark.sql.objectHashAggregate.sortBased.fallbackThreshold") .internal() .doc("In the case of ObjectHashAggregateExec, when the size of the in-memory hash map " + "grows too large, we will fall back to sort-based aggregation. This option sets a row " + "count threshold for the size of the hash map.") .intConf // We are trying to be conservative and use a relatively small default count threshold here // since the state object of some TypedImperativeAggregate function can be quite large (e.g. // percentile_approx). .createWithDefault(128) val USE_OBJECT_HASH_AGG = buildConf("spark.sql.execution.useObjectHashAggregateExec") .internal() .doc("Decides if we use ObjectHashAggregateExec") .booleanConf .createWithDefault(true) val FILE_SINK_LOG_DELETION = buildConf("spark.sql.streaming.fileSink.log.deletion") .internal() .doc("Whether to delete the expired log files in file stream sink.") .booleanConf .createWithDefault(true) val FILE_SINK_LOG_COMPACT_INTERVAL = buildConf("spark.sql.streaming.fileSink.log.compactInterval") .internal() .doc("Number of log files after which all the previous files " + "are compacted into the next log file.") .intConf .createWithDefault(10) val FILE_SINK_LOG_CLEANUP_DELAY = buildConf("spark.sql.streaming.fileSink.log.cleanupDelay") .internal() .doc("How long that a file is guaranteed to be visible for all readers.") .timeConf(TimeUnit.MILLISECONDS) .createWithDefault(TimeUnit.MINUTES.toMillis(10)) // 10 minutes val FILE_SOURCE_LOG_DELETION = buildConf("spark.sql.streaming.fileSource.log.deletion") .internal() .doc("Whether to delete the expired log files in file stream source.") .booleanConf .createWithDefault(true) val FILE_SOURCE_LOG_COMPACT_INTERVAL = buildConf("spark.sql.streaming.fileSource.log.compactInterval") .internal() .doc("Number of log files after which all the previous files " + "are compacted into the next log file.") .intConf .createWithDefault(10) val FILE_SOURCE_LOG_CLEANUP_DELAY = buildConf("spark.sql.streaming.fileSource.log.cleanupDelay") .internal() .doc("How long in milliseconds a file is guaranteed to be visible for all readers.") .timeConf(TimeUnit.MILLISECONDS) .createWithDefault(TimeUnit.MINUTES.toMillis(10)) // 10 minutes val STREAMING_SCHEMA_INFERENCE = buildConf("spark.sql.streaming.schemaInference") .internal() .doc("Whether file-based streaming sources will infer its own schema") .booleanConf .createWithDefault(false) val STREAMING_POLLING_DELAY = buildConf("spark.sql.streaming.pollingDelay") .internal() .doc("How long to delay polling new data when no data is available") .timeConf(TimeUnit.MILLISECONDS) .createWithDefault(10L) val STREAMING_NO_DATA_PROGRESS_EVENT_INTERVAL = buildConf("spark.sql.streaming.noDataProgressEventInterval") .internal() .doc("How long to wait between two progress events when there is no data") .timeConf(TimeUnit.MILLISECONDS) .createWithDefault(10000L) val STREAMING_NO_DATA_MICRO_BATCHES_ENABLED = buildConf("spark.sql.streaming.noDataMicroBatchesEnabled") .doc( "Whether streaming micro-batch engine will execute batches without data " + "for eager state management for stateful streaming queries.") .booleanConf .createWithDefault(true) val STREAMING_METRICS_ENABLED = buildConf("spark.sql.streaming.metricsEnabled") .doc("Whether Dropwizard/Codahale metrics will be reported for active streaming queries.") .booleanConf .createWithDefault(false) val STREAMING_PROGRESS_RETENTION = buildConf("spark.sql.streaming.numRecentProgressUpdates") .doc("The number of progress updates to retain for a streaming query") .intConf .createWithDefault(100) val STREAMING_CHECKPOINT_FILE_MANAGER_CLASS = buildConf("spark.sql.streaming.checkpointFileManagerClass") .doc("The class used to write checkpoint files atomically. This class must be a subclass " + "of the interface CheckpointFileManager.") .internal() .stringConf val NDV_MAX_ERROR = buildConf("spark.sql.statistics.ndv.maxError") .internal() .doc("The maximum estimation error allowed in HyperLogLog++ algorithm when generating " + "column level statistics.") .doubleConf .createWithDefault(0.05) val HISTOGRAM_ENABLED = buildConf("spark.sql.statistics.histogram.enabled") .doc("Generates histograms when computing column statistics if enabled. Histograms can " + "provide better estimation accuracy. Currently, Spark only supports equi-height " + "histogram. Note that collecting histograms takes extra cost. For example, collecting " + "column statistics usually takes only one table scan, but generating equi-height " + "histogram will cause an extra table scan.") .booleanConf .createWithDefault(false) val HISTOGRAM_NUM_BINS = buildConf("spark.sql.statistics.histogram.numBins") .internal() .doc("The number of bins when generating histograms.") .intConf .checkValue(num => num > 1, "The number of bins must be larger than 1.") .createWithDefault(254) val PERCENTILE_ACCURACY = buildConf("spark.sql.statistics.percentile.accuracy") .internal() .doc("Accuracy of percentile approximation when generating equi-height histograms. " + "Larger value means better accuracy. The relative error can be deduced by " + "1.0 / PERCENTILE_ACCURACY.") .intConf .createWithDefault(10000) val AUTO_SIZE_UPDATE_ENABLED = buildConf("spark.sql.statistics.size.autoUpdate.enabled") .doc("Enables automatic update for table size once table's data is changed. Note that if " + "the total number of files of the table is very large, this can be expensive and slow " + "down data change commands.") .booleanConf .createWithDefault(false) val CBO_ENABLED = buildConf("spark.sql.cbo.enabled") .doc("Enables CBO for estimation of plan statistics when set true.") .booleanConf .createWithDefault(false) val JOIN_REORDER_ENABLED = buildConf("spark.sql.cbo.joinReorder.enabled") .doc("Enables join reorder in CBO.") .booleanConf .createWithDefault(false) val JOIN_REORDER_DP_THRESHOLD = buildConf("spark.sql.cbo.joinReorder.dp.threshold") .doc("The maximum number of joined nodes allowed in the dynamic programming algorithm.") .intConf .checkValue(number => number > 0, "The maximum number must be a positive integer.") .createWithDefault(12) val JOIN_REORDER_CARD_WEIGHT = buildConf("spark.sql.cbo.joinReorder.card.weight") .internal() .doc("The weight of cardinality (number of rows) for plan cost comparison in join reorder: " + "rows * weight + size * (1 - weight).") .doubleConf .checkValue(weight => weight >= 0 && weight <= 1, "The weight value must be in [0, 1].") .createWithDefault(0.7) val JOIN_REORDER_DP_STAR_FILTER = buildConf("spark.sql.cbo.joinReorder.dp.star.filter") .doc("Applies star-join filter heuristics to cost based join enumeration.") .booleanConf .createWithDefault(false) val STARSCHEMA_DETECTION = buildConf("spark.sql.cbo.starSchemaDetection") .doc("When true, it enables join reordering based on star schema detection. ") .booleanConf .createWithDefault(false) val STARSCHEMA_FACT_TABLE_RATIO = buildConf("spark.sql.cbo.starJoinFTRatio") .internal() .doc("Specifies the upper limit of the ratio between the largest fact tables" + " for a star join to be considered. ") .doubleConf .createWithDefault(0.9) val SESSION_LOCAL_TIMEZONE = buildConf("spark.sql.session.timeZone") .doc("""The ID of session local timezone, e.g. "GMT", "America/Los_Angeles", etc.""") .stringConf .createWithDefaultFunction(() => TimeZone.getDefault.getID) val WINDOW_EXEC_BUFFER_IN_MEMORY_THRESHOLD = buildConf("spark.sql.windowExec.buffer.in.memory.threshold") .internal() .doc("Threshold for number of rows guaranteed to be held in memory by the window operator") .intConf .createWithDefault(4096) val WINDOW_EXEC_BUFFER_SPILL_THRESHOLD = buildConf("spark.sql.windowExec.buffer.spill.threshold") .internal() .doc("Threshold for number of rows to be spilled by window operator") .intConf .createWithDefault(SHUFFLE_SPILL_NUM_ELEMENTS_FORCE_SPILL_THRESHOLD.defaultValue.get) val SORT_MERGE_JOIN_EXEC_BUFFER_IN_MEMORY_THRESHOLD = buildConf("spark.sql.sortMergeJoinExec.buffer.in.memory.threshold") .internal() .doc("Threshold for number of rows guaranteed to be held in memory by the sort merge " + "join operator") .intConf .createWithDefault(Int.MaxValue) val SORT_MERGE_JOIN_EXEC_BUFFER_SPILL_THRESHOLD = buildConf("spark.sql.sortMergeJoinExec.buffer.spill.threshold") .internal() .doc("Threshold for number of rows to be spilled by sort merge join operator") .intConf .createWithDefault(SHUFFLE_SPILL_NUM_ELEMENTS_FORCE_SPILL_THRESHOLD.defaultValue.get) val CARTESIAN_PRODUCT_EXEC_BUFFER_IN_MEMORY_THRESHOLD = buildConf("spark.sql.cartesianProductExec.buffer.in.memory.threshold") .internal() .doc("Threshold for number of rows guaranteed to be held in memory by the cartesian " + "product operator") .intConf .createWithDefault(4096) val CARTESIAN_PRODUCT_EXEC_BUFFER_SPILL_THRESHOLD = buildConf("spark.sql.cartesianProductExec.buffer.spill.threshold") .internal() .doc("Threshold for number of rows to be spilled by cartesian product operator") .intConf .createWithDefault(SHUFFLE_SPILL_NUM_ELEMENTS_FORCE_SPILL_THRESHOLD.defaultValue.get) val SUPPORT_QUOTED_REGEX_COLUMN_NAME = buildConf("spark.sql.parser.quotedRegexColumnNames") .doc("When true, quoted Identifiers (using backticks) in SELECT statement are interpreted" + " as regular expressions.") .booleanConf .createWithDefault(false) val RANGE_EXCHANGE_SAMPLE_SIZE_PER_PARTITION = buildConf("spark.sql.execution.rangeExchange.sampleSizePerPartition") .internal() .doc("Number of points to sample per partition in order to determine the range boundaries" + " for range partitioning, typically used in global sorting (without limit).") .intConf .createWithDefault(100) val ARROW_EXECUTION_ENABLED = buildConf("spark.sql.execution.arrow.enabled") .doc("When true, make use of Apache Arrow for columnar data transfers. Currently available " + "for use with pyspark.sql.DataFrame.toPandas, and " + "pyspark.sql.SparkSession.createDataFrame when its input is a Pandas DataFrame. " + "The following data types are unsupported: " + "BinaryType, MapType, ArrayType of TimestampType, and nested StructType.") .booleanConf .createWithDefault(false) val ARROW_FALLBACK_ENABLED = buildConf("spark.sql.execution.arrow.fallback.enabled") .doc("When true, optimizations enabled by 'spark.sql.execution.arrow.enabled' will " + "fallback automatically to non-optimized implementations if an error occurs.") .booleanConf .createWithDefault(true) val ARROW_EXECUTION_MAX_RECORDS_PER_BATCH = buildConf("spark.sql.execution.arrow.maxRecordsPerBatch") .doc("When using Apache Arrow, limit the maximum number of records that can be written " + "to a single ArrowRecordBatch in memory. If set to zero or negative there is no limit.") .intConf .createWithDefault(10000) val PANDAS_RESPECT_SESSION_LOCAL_TIMEZONE = buildConf("spark.sql.execution.pandas.respectSessionTimeZone") .internal() .doc("When true, make Pandas DataFrame with timestamp type respecting session local " + "timezone when converting to/from Pandas DataFrame. This configuration will be " + "deprecated in the future releases.") .booleanConf .createWithDefault(true) val REPLACE_EXCEPT_WITH_FILTER = buildConf("spark.sql.optimizer.replaceExceptWithFilter") .internal() .doc("When true, the apply function of the rule verifies whether the right node of the" + " except operation is of type Filter or Project followed by Filter. If yes, the rule" + " further verifies 1) Excluding the filter operations from the right (as well as the" + " left node, if any) on the top, whether both the nodes evaluates to a same result." + " 2) The left and right nodes don't contain any SubqueryExpressions. 3) The output" + " column names of the left node are distinct. If all the conditions are met, the" + " rule will replace the except operation with a Filter by flipping the filter" + " condition(s) of the right node.") .booleanConf .createWithDefault(true) val DECIMAL_OPERATIONS_ALLOW_PREC_LOSS = buildConf("spark.sql.decimalOperations.allowPrecisionLoss") .internal() .doc("When true (default), establishing the result type of an arithmetic operation " + "happens according to Hive behavior and SQL ANSI 2011 specification, ie. rounding the " + "decimal part of the result if an exact representation is not possible. Otherwise, NULL " + "is returned in those cases, as previously.") .booleanConf .createWithDefault(true) val SQL_STRING_REDACTION_PATTERN = ConfigBuilder("spark.sql.redaction.string.regex") .doc("Regex to decide which parts of strings produced by Spark contain sensitive " + "information. When this regex matches a string part, that string part is replaced by a " + "dummy value. This is currently used to redact the output of SQL explain commands. " + "When this conf is not set, the value from `spark.redaction.string.regex` is used.") .fallbackConf(org.apache.spark.internal.config.STRING_REDACTION_PATTERN) val CONCAT_BINARY_AS_STRING = buildConf("spark.sql.function.concatBinaryAsString") .doc("When this option is set to false and all inputs are binary, `functions.concat` returns " + "an output as binary. Otherwise, it returns as a string. ") .booleanConf .createWithDefault(false) val ELT_OUTPUT_AS_STRING = buildConf("spark.sql.function.eltOutputAsString") .doc("When this option is set to false and all inputs are binary, `elt` returns " + "an output as binary. Otherwise, it returns as a string. ") .booleanConf .createWithDefault(false) val ALLOW_CREATING_MANAGED_TABLE_USING_NONEMPTY_LOCATION = buildConf("spark.sql.allowCreatingManagedTableUsingNonemptyLocation") .internal() .doc("When this option is set to true, creating managed tables with nonempty location " + "is allowed. Otherwise, an analysis exception is thrown. ") .booleanConf .createWithDefault(false) val CONTINUOUS_STREAMING_EXECUTOR_QUEUE_SIZE = buildConf("spark.sql.streaming.continuous.executorQueueSize") .internal() .doc("The size (measured in number of rows) of the queue used in continuous execution to" + " buffer the results of a ContinuousDataReader.") .intConf .createWithDefault(1024) val CONTINUOUS_STREAMING_EXECUTOR_POLL_INTERVAL_MS = buildConf("spark.sql.streaming.continuous.executorPollIntervalMs") .internal() .doc("The interval at which continuous execution readers will poll to check whether" + " the epoch has advanced on the driver.") .timeConf(TimeUnit.MILLISECONDS) .createWithDefault(100) val DISABLED_V2_STREAMING_WRITERS = buildConf("spark.sql.streaming.disabledV2Writers") .internal() .doc("A comma-separated list of fully qualified data source register class names for which" + " StreamWriteSupport is disabled. Writes to these sources will fall back to the V1 Sinks.") .stringConf .createWithDefault("") val DISABLED_V2_STREAMING_MICROBATCH_READERS = buildConf("spark.sql.streaming.disabledV2MicroBatchReaders") .internal() .doc( "A comma-separated list of fully qualified data source register class names for which " + "MicroBatchReadSupport is disabled. Reads from these sources will fall back to the " + "V1 Sources.") .stringConf .createWithDefault("") val REJECT_TIMEZONE_IN_STRING = buildConf("spark.sql.function.rejectTimezoneInString") .internal() .doc("If true, `to_utc_timestamp` and `from_utc_timestamp` return null if the input string " + "contains a timezone part, e.g. `2000-10-10 00:00:00+00:00`.") .booleanConf .createWithDefault(true) object PartitionOverwriteMode extends Enumeration { val STATIC, DYNAMIC = Value } val PARTITION_OVERWRITE_MODE = buildConf("spark.sql.sources.partitionOverwriteMode") .doc("When INSERT OVERWRITE a partitioned data source table, we currently support 2 modes: " + "static and dynamic. In static mode, Spark deletes all the partitions that match the " + "partition specification(e.g. PARTITION(a=1,b)) in the INSERT statement, before " + "overwriting. In dynamic mode, Spark doesn't delete partitions ahead, and only overwrite " + "those partitions that have data written into it at runtime. By default we use static " + "mode to keep the same behavior of Spark prior to 2.3. Note that this config doesn't " + "affect Hive serde tables, as they are always overwritten with dynamic mode.") .stringConf .transform(_.toUpperCase(Locale.ROOT)) .checkValues(PartitionOverwriteMode.values.map(_.toString)) .createWithDefault(PartitionOverwriteMode.STATIC.toString) val SORT_BEFORE_REPARTITION = buildConf("spark.sql.execution.sortBeforeRepartition") .internal() .doc("When perform a repartition following a shuffle, the output row ordering would be " + "nondeterministic. If some downstream stages fail and some tasks of the repartition " + "stage retry, these tasks may generate different data, and that can lead to correctness " + "issues. Turn on this config to insert a local sort before actually doing repartition " + "to generate consistent repartition results. The performance of repartition() may go " + "down since we insert extra local sort before it.") .booleanConf .createWithDefault(true) object Deprecated { val MAPRED_REDUCE_TASKS = "mapred.reduce.tasks" } object Replaced { val MAPREDUCE_JOB_REDUCES = "mapreduce.job.reduces" } } /** * A class that enables the setting and getting of mutable config parameters/hints. * * In the presence of a SQLContext, these can be set and queried by passing SET commands * into Spark SQL's query functions (i.e. sql()). Otherwise, users of this class can * modify the hints by programmatically calling the setters and getters of this class. * * SQLConf is thread-safe (internally synchronized, so safe to be used in multiple threads). */ class SQLConf extends Serializable with Logging { import SQLConf._ /** Only low degree of contention is expected for conf, thus NOT using ConcurrentHashMap. */ @transient protected[spark] val settings = java.util.Collections.synchronizedMap( new java.util.HashMap[String, String]()) @transient private val reader = new ConfigReader(settings) /** ************************ Spark SQL Params/Hints ******************* */ def optimizerMaxIterations: Int = getConf(OPTIMIZER_MAX_ITERATIONS) def optimizerInSetConversionThreshold: Int = getConf(OPTIMIZER_INSET_CONVERSION_THRESHOLD) def stateStoreProviderClass: String = getConf(STATE_STORE_PROVIDER_CLASS) def stateStoreMinDeltasForSnapshot: Int = getConf(STATE_STORE_MIN_DELTAS_FOR_SNAPSHOT) def checkpointLocation: Option[String] = getConf(CHECKPOINT_LOCATION) def isUnsupportedOperationCheckEnabled: Boolean = getConf(UNSUPPORTED_OPERATION_CHECK_ENABLED) def streamingFileCommitProtocolClass: String = getConf(STREAMING_FILE_COMMIT_PROTOCOL_CLASS) def fileSinkLogDeletion: Boolean = getConf(FILE_SINK_LOG_DELETION) def fileSinkLogCompactInterval: Int = getConf(FILE_SINK_LOG_COMPACT_INTERVAL) def fileSinkLogCleanupDelay: Long = getConf(FILE_SINK_LOG_CLEANUP_DELAY) def fileSourceLogDeletion: Boolean = getConf(FILE_SOURCE_LOG_DELETION) def fileSourceLogCompactInterval: Int = getConf(FILE_SOURCE_LOG_COMPACT_INTERVAL) def fileSourceLogCleanupDelay: Long = getConf(FILE_SOURCE_LOG_CLEANUP_DELAY) def streamingSchemaInference: Boolean = getConf(STREAMING_SCHEMA_INFERENCE) def streamingPollingDelay: Long = getConf(STREAMING_POLLING_DELAY) def streamingNoDataProgressEventInterval: Long = getConf(STREAMING_NO_DATA_PROGRESS_EVENT_INTERVAL) def streamingNoDataMicroBatchesEnabled: Boolean = getConf(STREAMING_NO_DATA_MICRO_BATCHES_ENABLED) def streamingMetricsEnabled: Boolean = getConf(STREAMING_METRICS_ENABLED) def streamingProgressRetention: Int = getConf(STREAMING_PROGRESS_RETENTION) def filesMaxPartitionBytes: Long = getConf(FILES_MAX_PARTITION_BYTES) def filesOpenCostInBytes: Long = getConf(FILES_OPEN_COST_IN_BYTES) def ignoreCorruptFiles: Boolean = getConf(IGNORE_CORRUPT_FILES) def ignoreMissingFiles: Boolean = getConf(IGNORE_MISSING_FILES) def maxRecordsPerFile: Long = getConf(MAX_RECORDS_PER_FILE) def useCompression: Boolean = getConf(COMPRESS_CACHED) def orcCompressionCodec: String = getConf(ORC_COMPRESSION) def orcVectorizedReaderEnabled: Boolean = getConf(ORC_VECTORIZED_READER_ENABLED) def orcVectorizedReaderBatchSize: Int = getConf(ORC_VECTORIZED_READER_BATCH_SIZE) def parquetCompressionCodec: String = getConf(PARQUET_COMPRESSION) def parquetVectorizedReaderEnabled: Boolean = getConf(PARQUET_VECTORIZED_READER_ENABLED) def parquetVectorizedReaderBatchSize: Int = getConf(PARQUET_VECTORIZED_READER_BATCH_SIZE) def columnBatchSize: Int = getConf(COLUMN_BATCH_SIZE) def cacheVectorizedReaderEnabled: Boolean = getConf(CACHE_VECTORIZED_READER_ENABLED) def numShufflePartitions: Int = getConf(SHUFFLE_PARTITIONS) def targetPostShuffleInputSize: Long = getConf(SHUFFLE_TARGET_POSTSHUFFLE_INPUT_SIZE) def adaptiveExecutionEnabled: Boolean = getConf(ADAPTIVE_EXECUTION_ENABLED) def minNumPostShufflePartitions: Int = getConf(SHUFFLE_MIN_NUM_POSTSHUFFLE_PARTITIONS) def minBatchesToRetain: Int = getConf(MIN_BATCHES_TO_RETAIN) def parquetFilterPushDown: Boolean = getConf(PARQUET_FILTER_PUSHDOWN_ENABLED) def parquetFilterPushDownDate: Boolean = getConf(PARQUET_FILTER_PUSHDOWN_DATE_ENABLED) def orcFilterPushDown: Boolean = getConf(ORC_FILTER_PUSHDOWN_ENABLED) def verifyPartitionPath: Boolean = getConf(HIVE_VERIFY_PARTITION_PATH) def metastorePartitionPruning: Boolean = getConf(HIVE_METASTORE_PARTITION_PRUNING) def manageFilesourcePartitions: Boolean = getConf(HIVE_MANAGE_FILESOURCE_PARTITIONS) def filesourcePartitionFileCacheSize: Long = getConf(HIVE_FILESOURCE_PARTITION_FILE_CACHE_SIZE) def caseSensitiveInferenceMode: HiveCaseSensitiveInferenceMode.Value = HiveCaseSensitiveInferenceMode.withName(getConf(HIVE_CASE_SENSITIVE_INFERENCE)) def compareDateTimestampInTimestamp : Boolean = getConf(TYPECOERCION_COMPARE_DATE_TIMESTAMP_IN_TIMESTAMP) def gatherFastStats: Boolean = getConf(GATHER_FASTSTAT) def optimizerMetadataOnly: Boolean = getConf(OPTIMIZER_METADATA_ONLY) def wholeStageEnabled: Boolean = getConf(WHOLESTAGE_CODEGEN_ENABLED) def wholeStageUseIdInClassName: Boolean = getConf(WHOLESTAGE_CODEGEN_USE_ID_IN_CLASS_NAME) def wholeStageMaxNumFields: Int = getConf(WHOLESTAGE_MAX_NUM_FIELDS) def codegenFallback: Boolean = getConf(CODEGEN_FALLBACK) def loggingMaxLinesForCodegen: Int = getConf(CODEGEN_LOGGING_MAX_LINES) def hugeMethodLimit: Int = getConf(WHOLESTAGE_HUGE_METHOD_LIMIT) def wholeStageSplitConsumeFuncByOperator: Boolean = getConf(WHOLESTAGE_SPLIT_CONSUME_FUNC_BY_OPERATOR) def tableRelationCacheSize: Int = getConf(StaticSQLConf.FILESOURCE_TABLE_RELATION_CACHE_SIZE) def exchangeReuseEnabled: Boolean = getConf(EXCHANGE_REUSE_ENABLED) def caseSensitiveAnalysis: Boolean = getConf(SQLConf.CASE_SENSITIVE) def constraintPropagationEnabled: Boolean = getConf(CONSTRAINT_PROPAGATION_ENABLED) def escapedStringLiterals: Boolean = getConf(ESCAPED_STRING_LITERALS) def fileCompressionFactor: Double = getConf(FILE_COMRESSION_FACTOR) def stringRedationPattern: Option[Regex] = SQL_STRING_REDACTION_PATTERN.readFrom(reader) def sortBeforeRepartition: Boolean = getConf(SORT_BEFORE_REPARTITION) /** * Returns the [[Resolver]] for the current configuration, which can be used to determine if two * identifiers are equal. */ def resolver: Resolver = { if (caseSensitiveAnalysis) { org.apache.spark.sql.catalyst.analysis.caseSensitiveResolution } else { org.apache.spark.sql.catalyst.analysis.caseInsensitiveResolution } } def subexpressionEliminationEnabled: Boolean = getConf(SUBEXPRESSION_ELIMINATION_ENABLED) def autoBroadcastJoinThreshold: Long = getConf(AUTO_BROADCASTJOIN_THRESHOLD) def limitScaleUpFactor: Int = getConf(LIMIT_SCALE_UP_FACTOR) def advancedPartitionPredicatePushdownEnabled: Boolean = getConf(ADVANCED_PARTITION_PREDICATE_PUSHDOWN) def fallBackToHdfsForStatsEnabled: Boolean = getConf(ENABLE_FALL_BACK_TO_HDFS_FOR_STATS) def preferSortMergeJoin: Boolean = getConf(PREFER_SORTMERGEJOIN) def enableRadixSort: Boolean = getConf(RADIX_SORT_ENABLED) def defaultSizeInBytes: Long = getConf(DEFAULT_SIZE_IN_BYTES) def isParquetSchemaMergingEnabled: Boolean = getConf(PARQUET_SCHEMA_MERGING_ENABLED) def isParquetSchemaRespectSummaries: Boolean = getConf(PARQUET_SCHEMA_RESPECT_SUMMARIES) def parquetOutputCommitterClass: String = getConf(PARQUET_OUTPUT_COMMITTER_CLASS) def isParquetBinaryAsString: Boolean = getConf(PARQUET_BINARY_AS_STRING) def isParquetINT96AsTimestamp: Boolean = getConf(PARQUET_INT96_AS_TIMESTAMP) def isParquetINT96TimestampConversion: Boolean = getConf(PARQUET_INT96_TIMESTAMP_CONVERSION) def isParquetINT64AsTimestampMillis: Boolean = getConf(PARQUET_INT64_AS_TIMESTAMP_MILLIS) def parquetOutputTimestampType: ParquetOutputTimestampType.Value = { val isOutputTimestampTypeSet = settings.containsKey(PARQUET_OUTPUT_TIMESTAMP_TYPE.key) if (!isOutputTimestampTypeSet && isParquetINT64AsTimestampMillis) { // If PARQUET_OUTPUT_TIMESTAMP_TYPE is not set and PARQUET_INT64_AS_TIMESTAMP_MILLIS is set, // respect PARQUET_INT64_AS_TIMESTAMP_MILLIS and use TIMESTAMP_MILLIS. Otherwise, // PARQUET_OUTPUT_TIMESTAMP_TYPE has higher priority. ParquetOutputTimestampType.TIMESTAMP_MILLIS } else { ParquetOutputTimestampType.withName(getConf(PARQUET_OUTPUT_TIMESTAMP_TYPE)) } } def writeLegacyParquetFormat: Boolean = getConf(PARQUET_WRITE_LEGACY_FORMAT) def parquetRecordFilterEnabled: Boolean = getConf(PARQUET_RECORD_FILTER_ENABLED) def inMemoryPartitionPruning: Boolean = getConf(IN_MEMORY_PARTITION_PRUNING) def offHeapColumnVectorEnabled: Boolean = getConf(COLUMN_VECTOR_OFFHEAP_ENABLED) def columnNameOfCorruptRecord: String = getConf(COLUMN_NAME_OF_CORRUPT_RECORD) def broadcastTimeout: Long = getConf(BROADCAST_TIMEOUT) def defaultDataSourceName: String = getConf(DEFAULT_DATA_SOURCE_NAME) def convertCTAS: Boolean = getConf(CONVERT_CTAS) def partitionColumnTypeInferenceEnabled: Boolean = getConf(SQLConf.PARTITION_COLUMN_TYPE_INFERENCE) def fileCommitProtocolClass: String = getConf(SQLConf.FILE_COMMIT_PROTOCOL_CLASS) def parallelPartitionDiscoveryThreshold: Int = getConf(SQLConf.PARALLEL_PARTITION_DISCOVERY_THRESHOLD) def parallelPartitionDiscoveryParallelism: Int = getConf(SQLConf.PARALLEL_PARTITION_DISCOVERY_PARALLELISM) def bucketingEnabled: Boolean = getConf(SQLConf.BUCKETING_ENABLED) def dataFrameSelfJoinAutoResolveAmbiguity: Boolean = getConf(DATAFRAME_SELF_JOIN_AUTO_RESOLVE_AMBIGUITY) def dataFrameRetainGroupColumns: Boolean = getConf(DATAFRAME_RETAIN_GROUP_COLUMNS) def dataFramePivotMaxValues: Int = getConf(DATAFRAME_PIVOT_MAX_VALUES) def runSQLonFile: Boolean = getConf(RUN_SQL_ON_FILES) def enableTwoLevelAggMap: Boolean = getConf(ENABLE_TWOLEVEL_AGG_MAP) def useObjectHashAggregation: Boolean = getConf(USE_OBJECT_HASH_AGG) def objectAggSortBasedFallbackThreshold: Int = getConf(OBJECT_AGG_SORT_BASED_FALLBACK_THRESHOLD) def variableSubstituteEnabled: Boolean = getConf(VARIABLE_SUBSTITUTE_ENABLED) def variableSubstituteDepth: Int = getConf(VARIABLE_SUBSTITUTE_DEPTH) def warehousePath: String = new Path(getConf(StaticSQLConf.WAREHOUSE_PATH)).toString def hiveThriftServerSingleSession: Boolean = getConf(StaticSQLConf.HIVE_THRIFT_SERVER_SINGLESESSION) def orderByOrdinal: Boolean = getConf(ORDER_BY_ORDINAL) def groupByOrdinal: Boolean = getConf(GROUP_BY_ORDINAL) def groupByAliases: Boolean = getConf(GROUP_BY_ALIASES) def crossJoinEnabled: Boolean = getConf(SQLConf.CROSS_JOINS_ENABLED) def sessionLocalTimeZone: String = getConf(SQLConf.SESSION_LOCAL_TIMEZONE) def ndvMaxError: Double = getConf(NDV_MAX_ERROR) def histogramEnabled: Boolean = getConf(HISTOGRAM_ENABLED) def histogramNumBins: Int = getConf(HISTOGRAM_NUM_BINS) def percentileAccuracy: Int = getConf(PERCENTILE_ACCURACY) def cboEnabled: Boolean = getConf(SQLConf.CBO_ENABLED) def autoSizeUpdateEnabled: Boolean = getConf(SQLConf.AUTO_SIZE_UPDATE_ENABLED) def joinReorderEnabled: Boolean = getConf(SQLConf.JOIN_REORDER_ENABLED) def joinReorderDPThreshold: Int = getConf(SQLConf.JOIN_REORDER_DP_THRESHOLD) def joinReorderCardWeight: Double = getConf(SQLConf.JOIN_REORDER_CARD_WEIGHT) def joinReorderDPStarFilter: Boolean = getConf(SQLConf.JOIN_REORDER_DP_STAR_FILTER) def windowExecBufferInMemoryThreshold: Int = getConf(WINDOW_EXEC_BUFFER_IN_MEMORY_THRESHOLD) def windowExecBufferSpillThreshold: Int = getConf(WINDOW_EXEC_BUFFER_SPILL_THRESHOLD) def sortMergeJoinExecBufferInMemoryThreshold: Int = getConf(SORT_MERGE_JOIN_EXEC_BUFFER_IN_MEMORY_THRESHOLD) def sortMergeJoinExecBufferSpillThreshold: Int = getConf(SORT_MERGE_JOIN_EXEC_BUFFER_SPILL_THRESHOLD) def cartesianProductExecBufferInMemoryThreshold: Int = getConf(CARTESIAN_PRODUCT_EXEC_BUFFER_IN_MEMORY_THRESHOLD) def cartesianProductExecBufferSpillThreshold: Int = getConf(CARTESIAN_PRODUCT_EXEC_BUFFER_SPILL_THRESHOLD) def maxNestedViewDepth: Int = getConf(SQLConf.MAX_NESTED_VIEW_DEPTH) def starSchemaDetection: Boolean = getConf(STARSCHEMA_DETECTION) def starSchemaFTRatio: Double = getConf(STARSCHEMA_FACT_TABLE_RATIO) def supportQuotedRegexColumnName: Boolean = getConf(SUPPORT_QUOTED_REGEX_COLUMN_NAME) def rangeExchangeSampleSizePerPartition: Int = getConf(RANGE_EXCHANGE_SAMPLE_SIZE_PER_PARTITION) def arrowEnabled: Boolean = getConf(ARROW_EXECUTION_ENABLED) def arrowFallbackEnabled: Boolean = getConf(ARROW_FALLBACK_ENABLED) def arrowMaxRecordsPerBatch: Int = getConf(ARROW_EXECUTION_MAX_RECORDS_PER_BATCH) def pandasRespectSessionTimeZone: Boolean = getConf(PANDAS_RESPECT_SESSION_LOCAL_TIMEZONE) def replaceExceptWithFilter: Boolean = getConf(REPLACE_EXCEPT_WITH_FILTER) def decimalOperationsAllowPrecisionLoss: Boolean = getConf(DECIMAL_OPERATIONS_ALLOW_PREC_LOSS) def continuousStreamingExecutorQueueSize: Int = getConf(CONTINUOUS_STREAMING_EXECUTOR_QUEUE_SIZE) def continuousStreamingExecutorPollIntervalMs: Long = getConf(CONTINUOUS_STREAMING_EXECUTOR_POLL_INTERVAL_MS) def disabledV2StreamingWriters: String = getConf(DISABLED_V2_STREAMING_WRITERS) def disabledV2StreamingMicroBatchReaders: String = getConf(DISABLED_V2_STREAMING_MICROBATCH_READERS) def concatBinaryAsString: Boolean = getConf(CONCAT_BINARY_AS_STRING) def eltOutputAsString: Boolean = getConf(ELT_OUTPUT_AS_STRING) def allowCreatingManagedTableUsingNonemptyLocation: Boolean = getConf(ALLOW_CREATING_MANAGED_TABLE_USING_NONEMPTY_LOCATION) def partitionOverwriteMode: PartitionOverwriteMode.Value = PartitionOverwriteMode.withName(getConf(PARTITION_OVERWRITE_MODE)) /** ********************** SQLConf functionality methods ************ */ /** Set Spark SQL configuration properties. */ def setConf(props: Properties): Unit = settings.synchronized { props.asScala.foreach { case (k, v) => setConfString(k, v) } } /** Set the given Spark SQL configuration property using a `string` value. */ def setConfString(key: String, value: String): Unit = { require(key != null, "key cannot be null") require(value != null, s"value cannot be null for key: $key") val entry = sqlConfEntries.get(key) if (entry != null) { // Only verify configs in the SQLConf object entry.valueConverter(value) } setConfWithCheck(key, value) } /** Set the given Spark SQL configuration property. */ def setConf[T](entry: ConfigEntry[T], value: T): Unit = { require(entry != null, "entry cannot be null") require(value != null, s"value cannot be null for key: ${entry.key}") require(sqlConfEntries.get(entry.key) == entry, s"$entry is not registered") setConfWithCheck(entry.key, entry.stringConverter(value)) } /** Return the value of Spark SQL configuration property for the given key. */ @throws[NoSuchElementException]("if key is not set") def getConfString(key: String): String = { Option(settings.get(key)). orElse { // Try to use the default value Option(sqlConfEntries.get(key)).map { e => e.stringConverter(e.readFrom(reader)) } }. getOrElse(throw new NoSuchElementException(key)) } /** * Return the value of Spark SQL configuration property for the given key. If the key is not set * yet, return `defaultValue`. This is useful when `defaultValue` in ConfigEntry is not the * desired one. */ def getConf[T](entry: ConfigEntry[T], defaultValue: T): T = { require(sqlConfEntries.get(entry.key) == entry, s"$entry is not registered") Option(settings.get(entry.key)).map(entry.valueConverter).getOrElse(defaultValue) } /** * Return the value of Spark SQL configuration property for the given key. If the key is not set * yet, return `defaultValue` in [[ConfigEntry]]. */ def getConf[T](entry: ConfigEntry[T]): T = { require(sqlConfEntries.get(entry.key) == entry, s"$entry is not registered") entry.readFrom(reader) } /** * Return the value of an optional Spark SQL configuration property for the given key. If the key * is not set yet, returns None. */ def getConf[T](entry: OptionalConfigEntry[T]): Option[T] = { require(sqlConfEntries.get(entry.key) == entry, s"$entry is not registered") entry.readFrom(reader) } /** * Return the `string` value of Spark SQL configuration property for the given key. If the key is * not set yet, return `defaultValue`. */ def getConfString(key: String, defaultValue: String): String = { if (defaultValue != null && defaultValue != ConfigEntry.UNDEFINED) { val entry = sqlConfEntries.get(key) if (entry != null) { // Only verify configs in the SQLConf object entry.valueConverter(defaultValue) } } Option(settings.get(key)).getOrElse { // If the key is not set, need to check whether the config entry is registered and is // a fallback conf, so that we can check its parent. sqlConfEntries.get(key) match { case e: FallbackConfigEntry[_] => getConfString(e.fallback.key, defaultValue) case _ => defaultValue } } } /** * Return all the configuration properties that have been set (i.e. not the default). * This creates a new copy of the config properties in the form of a Map. */ def getAllConfs: immutable.Map[String, String] = settings.synchronized { settings.asScala.toMap } /** * Return all the configuration definitions that have been defined in [[SQLConf]]. Each * definition contains key, defaultValue and doc. */ def getAllDefinedConfs: Seq[(String, String, String)] = sqlConfEntries.synchronized { sqlConfEntries.values.asScala.filter(_.isPublic).map { entry => val displayValue = Option(getConfString(entry.key, null)).getOrElse(entry.defaultValueString) (entry.key, displayValue, entry.doc) }.toSeq } /** * Return whether a given key is set in this [[SQLConf]]. */ def contains(key: String): Boolean = { settings.containsKey(key) } private def setConfWithCheck(key: String, value: String): Unit = { settings.put(key, value) } def unsetConf(key: String): Unit = { settings.remove(key) } def unsetConf(entry: ConfigEntry[_]): Unit = { settings.remove(entry.key) } def clear(): Unit = { settings.clear() } override def clone(): SQLConf = { val result = new SQLConf getAllConfs.foreach { case(k, v) => if (v ne null) result.setConfString(k, v) } result } // For test only def copy(entries: (ConfigEntry[_], Any)*): SQLConf = { val cloned = clone() entries.foreach { case (entry, value) => cloned.setConfString(entry.key, value.toString) } cloned } }
szhem/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
Scala
apache-2.0
77,187
package fix package v0_7_0 import scalafix.v1._ import scala.meta._ class AddMissingImports extends SyntacticRule("AddMissingImports") { private val imports = scala.collection.mutable.ArrayBuffer.empty[(String, String)] // Check that the package is not imported multiple times in the same file def addImport(p: Position, i: Importer) = { val Importer(s) = i val Input.VirtualFile(path, _) = p.input val t = (s.toString, path) if (!imports.contains(t)) { imports += t Patch.addGlobalImport(i) } else Patch.empty } object Avro { val fns = List("objectFile", "avroFile", "typedAvroFile", "protobufFile") ++ List("saveAsAvroFile", "saveAsTypedAvroFile", "saveAsObjectFile", "saveAsProtobufFile") val `import` = importer"com.spotify.scio.avro._" } object BQ { val fns = List( "bigQuerySelect", "bigQueryTable", "bigQueryTable", "typedBigQuery", "tableRowJsonFile" ) ++ List( "saveAsBigQuery", "saveAsBigQuery", "saveAsTypedBigQuery", "saveAsTypedBigQuery", "saveAsTableRowJsonFile" ) val `import` = importer"com.spotify.scio.bigquery._" } override def fix(implicit doc: SyntacticDocument): Patch = doc.tree.collect { case t @ Term.Name(n) if Avro.fns.contains(n) => addImport(t.pos, Avro.`import`) case t @ Term.Name(n) if BQ.fns.contains(n) => addImport(t.pos, BQ.`import`) }.asPatch }
spotify/scio
scalafix/rules/src/main/scala/fix/AddMissingImports.scala
Scala
apache-2.0
1,529
package controllers import play.api.mvc.{Action, Controller} import play.api.libs.json.JsBoolean object StatusController extends Controller { def ping = Action { request => Ok(JsBoolean(true))} }
WiredThing/hither
app/controllers/StatusController.scala
Scala
mit
202
package dotty.tools.dotc package ast import core._ import Types._, Names._, Flags._, util.Positions._, Contexts._, Constants._, SymDenotations._, Symbols._ import Denotations._, StdNames._ import annotation.tailrec import language.higherKinds import collection.IndexedSeqOptimized import collection.immutable.IndexedSeq import collection.mutable.ListBuffer import parsing.Tokens.Token import printing.Printer import util.{Stats, Attachment, DotClass} import annotation.unchecked.uncheckedVariance import language.implicitConversions object Trees { // Note: it would be more logical to make Untyped = Nothing. // However, this interacts in a bad way with Scala's current type inference. // In fact, we cannot write something like Select(pre, name), where pre is // of type Tree[Nothing]; type inference will treat the Nothing as an uninstantiated // value and will not infer Nothing as the type parameter for Select. // We should come back to this issue once type inference is changed. type Untyped = Null /** The total number of created tree nodes, maintained if Stats.enabled */ var ntrees = 0 /** Modifiers and annotations for definitions * @param flags The set flags * @param privateWithin If a private or protected has is followed by a * qualifier [q], the name q, "" as a typename otherwise. * @param annotations The annotations preceding the modifiers */ case class Modifiers[-T >: Untyped] ( flags: FlagSet = EmptyFlags, privateWithin: TypeName = tpnme.EMPTY, annotations: List[Tree[T]] = Nil) extends Positioned with Cloneable { def is(fs: FlagSet): Boolean = flags is fs def is(fc: FlagConjunction): Boolean = flags is fc def | (fs: FlagSet): Modifiers[T] = withFlags(flags | fs) def & (fs: FlagSet): Modifiers[T] = withFlags(flags & fs) def &~(fs: FlagSet): Modifiers[T] = withFlags(flags &~ fs) def toTypeFlags: Modifiers[T] = withFlags(flags.toTypeFlags) def toTermFlags: Modifiers[T] = withFlags(flags.toTermFlags) private def withFlags(flags: FlagSet) = if (this.flags == flags) this else copy(flags = flags) def withAnnotations[U >: Untyped <: T](annots: List[Tree[U]]): Modifiers[U] = if (annots.isEmpty) this else copy(annotations = annotations ++ annots) def withPrivateWithin(pw: TypeName) = if (pw.isEmpty) this else copy(privateWithin = pw) def hasFlags = flags != EmptyFlags def hasAnnotations = annotations.nonEmpty def hasPrivateWithin = privateWithin != tpnme.EMPTY def tokenPos: Seq[(Token, Position)] = ??? } private var nextId = 0 // for debugging type LazyTree = AnyRef /* really: Tree | Lazy[Tree] */ type LazyTreeList = AnyRef /* really: List[Tree] | Lazy[List[Tree]] */ /** Trees take a parameter indicating what the type of their `tpe` field * is. Two choices: `Type` or `Untyped`. * Untyped trees have type `Tree[Untyped]`. * * Tree typing uses a copy-on-write implementation: * * - You can never observe a `tpe` which is `null` (throws an exception) * - So when creating a typed tree with `withType` we can re-use * the existing tree transparently, assigning its `tpe` field, * provided it was `null` before. * - It is impossible to embed untyped trees in typed ones. * - Typed trees can be embedded untyped ones provided they are rooted * in a TypedSplice node. * - Type checking an untyped tree should remove all embedded `TypedSplice` * nodes. */ abstract class Tree[-T >: Untyped] extends Positioned with Product with Attachment.Container with printing.Showable with Cloneable { if (Stats.enabled) ntrees += 1 /** A unique identifier for this tree. Used for debugging, and potentially * tracking presentation compiler interactions */ val uniqueId = { nextId += 1 //assert(nextId != 214, this) nextId } /** The type constructor at the root of the tree */ type ThisTree[T >: Untyped] <: Tree[T] private[this] var myTpe: T = _ /** Destructively set the type of the tree. This should be called only when it is known that * it is safe under sharing to do so. One use-case is in the withType method below * which implements copy-on-write. Another use-case is in method interpolateAndAdapt in Typer, * where we overwrite with a simplified version of the type itself. */ private[dotc] def overwriteType(tpe: T) = { if (this.isInstanceOf[Template[_]]) assert(tpe.isInstanceOf[WithFixedSym], s"$this <--- $tpe") myTpe = tpe } /** The type of the tree. In case of an untyped tree, * an UnAssignedTypeException is thrown. (Overridden by empty trees) */ def tpe: T @uncheckedVariance = { if (myTpe == null) throw new UnAssignedTypeException(this) myTpe } /** Copy `tpe` attribute from tree `from` into this tree, independently * whether it is null or not. final def copyAttr[U >: Untyped](from: Tree[U]): ThisTree[T] = { val t1 = this.withPos(from.pos) val t2 = if (from.myTpe != null) t1.withType(from.myTpe.asInstanceOf[Type]) else t1 t2.asInstanceOf[ThisTree[T]] } */ /** Return a typed tree that's isomorphic to this tree, but has given * type. (Overridden by empty trees) */ def withType(tpe: Type)(implicit ctx: Context): ThisTree[Type] = { if (tpe == ErrorType) assert(ctx.errorsReported) withTypeUnchecked(tpe) } def withTypeUnchecked(tpe: Type): ThisTree[Type] = { val tree = (if (myTpe == null || (myTpe.asInstanceOf[AnyRef] eq tpe.asInstanceOf[AnyRef])) this else clone).asInstanceOf[Tree[Type]] tree overwriteType tpe tree.asInstanceOf[ThisTree[Type]] } /** Does the tree have its type field set? Note: this operation is not * referentially transparent, because it can observe the withType * modifications. Should be used only in special circumstances (we * need it for printing trees with optional type info). */ final def hasType: Boolean = myTpe != null final def typeOpt: Type = myTpe match { case tp: Type => tp case _ => NoType } /** The denotation referred tno by this tree. * Defined for `DenotingTree`s and `ProxyTree`s, NoDenotation for other * kinds of trees */ def denot(implicit ctx: Context): Denotation = NoDenotation /** Shorthand for `denot.symbol`. */ final def symbol(implicit ctx: Context): Symbol = denot.symbol /** Does this tree represent a type? */ def isType: Boolean = false /** Does this tree represent a term? */ def isTerm: Boolean = false /** Is this a legal part of a pattern which is not at the same time a term? */ def isPattern: Boolean = false /** Does this tree define a new symbol that is not defined elsewhere? */ def isDef: Boolean = false /** Is this tree either the empty tree or the empty ValDef? */ def isEmpty: Boolean = false /** Convert tree to a list. Gives a singleton list, except * for thickets which return their element trees. */ def toList: List[Tree[T]] = this :: Nil /** if this tree is the empty tree, the alternative, else this tree */ def orElse[U >: Untyped <: T](that: => Tree[U]): Tree[U] = if (this eq genericEmptyTree) that else this /** The number of nodes in this tree */ def treeSize: Int = { var s = 1 def addSize(elem: Any): Unit = elem match { case t: Tree[_] => s += t.treeSize case ts: List[_] => ts foreach addSize case _ => } productIterator foreach addSize s } /** If this is a thicket, perform `op` on each of its trees * otherwise, perform `op` ion tree itself. */ def foreachInThicket(op: Tree[T] => Unit): Unit = op(this) override def toText(printer: Printer) = printer.toText(this) override def hashCode(): Int = System.identityHashCode(this) override def equals(that: Any) = this eq that.asInstanceOf[AnyRef] } class UnAssignedTypeException[T >: Untyped](tree: Tree[T]) extends RuntimeException { override def getMessage: String = s"type of $tree is not assigned" } // ------ Categories of trees ----------------------------------- /** Instances of this class are trees for which isType is definitely true. * Note that some trees have isType = true without being TypTrees (e.g. Ident, AnnotatedTree) */ trait TypTree[-T >: Untyped] extends Tree[T] { type ThisTree[-T >: Untyped] <: TypTree[T] override def isType = true } /** Instances of this class are trees for which isTerm is definitely true. * Note that some trees have isTerm = true without being TermTrees (e.g. Ident, AnnotatedTree) */ trait TermTree[-T >: Untyped] extends Tree[T] { type ThisTree[-T >: Untyped] <: TermTree[T] override def isTerm = true } /** Instances of this class are trees which are not terms but are legal * parts of patterns. */ trait PatternTree[-T >: Untyped] extends Tree[T] { type ThisTree[-T >: Untyped] <: PatternTree[T] override def isPattern = true } /** Tree's denotation can be derived from its type */ abstract class DenotingTree[-T >: Untyped] extends Tree[T] { type ThisTree[-T >: Untyped] <: DenotingTree[T] override def denot(implicit ctx: Context) = tpe match { case tpe: NamedType => tpe.denot case tpe: ThisType => tpe.cls.denot case tpe: AnnotatedType => tpe.stripAnnots match { case tpe: NamedType => tpe.denot case tpe: ThisType => tpe.cls.denot case _ => NoDenotation } case _ => NoDenotation } } /** Tree's denot/isType/isTerm properties come from a subtree * identified by `forwardTo`. */ abstract class ProxyTree[-T >: Untyped] extends Tree[T] { type ThisTree[-T >: Untyped] <: ProxyTree[T] def forwardTo: Tree[T] override def denot(implicit ctx: Context): Denotation = forwardTo.denot override def isTerm = forwardTo.isTerm override def isType = forwardTo.isType } /** Tree has a name */ abstract class NameTree[-T >: Untyped] extends DenotingTree[T] { type ThisTree[-T >: Untyped] <: NameTree[T] def name: Name } /** Tree refers by name to a denotation */ abstract class RefTree[-T >: Untyped] extends NameTree[T] { type ThisTree[-T >: Untyped] <: RefTree[T] def qualifier: Tree[T] override def isType = name.isTypeName override def isTerm = name.isTermName } /** Tree defines a new symbol */ trait DefTree[-T >: Untyped] extends DenotingTree[T] { type ThisTree[-T >: Untyped] <: DefTree[T] override def isDef = true def namedType = tpe.asInstanceOf[NamedType] } /** Tree defines a new symbol and carries modifiers. * The position of a MemberDef contains only the defined identifier or pattern. * The envelope of a MemberDef contains the whole definition and has its point * on the opening keyword (or the next token after that if keyword is missing). */ abstract class MemberDef[-T >: Untyped] extends NameTree[T] with DefTree[T] { type ThisTree[-T >: Untyped] <: MemberDef[T] private[this] var myMods: Modifiers[T] = null private[ast] def rawMods: Modifiers[T] = if (myMods == null) genericEmptyModifiers else myMods def withMods(mods: Modifiers[Untyped]): ThisTree[Untyped] = { val tree = if (myMods == null || (myMods == mods)) this else clone.asInstanceOf[MemberDef[Untyped]] tree.setMods(mods) tree.asInstanceOf[ThisTree[Untyped]] } def withFlags(flags: FlagSet): ThisTree[Untyped] = withMods(Modifiers(flags)) protected def setMods(mods: Modifiers[T @uncheckedVariance]) = myMods = mods override def envelope: Position = rawMods.pos.union(pos).union(initialPos) } /** A ValDef or DefDef tree */ trait ValOrDefDef[-T >: Untyped] extends MemberDef[T] with WithLazyField[Tree[T]] { def tpt: Tree[T] def unforcedRhs: LazyTree = unforced def rhs(implicit ctx: Context): Tree[T] = forceIfLazy } // ----------- Tree case classes ------------------------------------ /** name */ case class Ident[-T >: Untyped] private[ast] (name: Name) extends RefTree[T] { type ThisTree[-T >: Untyped] = Ident[T] def qualifier: Tree[T] = genericEmptyTree } class BackquotedIdent[-T >: Untyped] private[ast] (name: Name) extends Ident[T](name) { override def toString = s"BackquotedIdent($name)" } /** qualifier.name */ case class Select[-T >: Untyped] private[ast] (qualifier: Tree[T], name: Name) extends RefTree[T] { type ThisTree[-T >: Untyped] = Select[T] } class SelectWithSig[-T >: Untyped] private[ast] (qualifier: Tree[T], name: Name, val sig: Signature) extends Select[T](qualifier, name) { override def toString = s"SelectWithSig($qualifier, $name, $sig)" } /** qual.this */ case class This[-T >: Untyped] private[ast] (qual: TypeName) extends DenotingTree[T] with TermTree[T] { type ThisTree[-T >: Untyped] = This[T] // Denotation of a This tree is always the underlying class; needs correction for modules. override def denot(implicit ctx: Context): Denotation = { tpe match { case tpe @ TermRef(pre, _) if tpe.symbol is Module => tpe.symbol.moduleClass.denot.asSeenFrom(pre) case _ => super.denot } } } /** C.super[mix], where qual = C.this */ case class Super[-T >: Untyped] private[ast] (qual: Tree[T], mix: TypeName) extends ProxyTree[T] with TermTree[T] { type ThisTree[-T >: Untyped] = Super[T] def forwardTo = qual } abstract class GenericApply[-T >: Untyped] extends ProxyTree[T] with TermTree[T] { type ThisTree[-T >: Untyped] <: GenericApply[T] val fun: Tree[T] val args: List[Tree[T]] def forwardTo = fun } /** fun(args) */ case class Apply[-T >: Untyped] private[ast] (fun: Tree[T], args: List[Tree[T]]) extends GenericApply[T] { type ThisTree[-T >: Untyped] = Apply[T] } /** fun[args] */ case class TypeApply[-T >: Untyped] private[ast] (fun: Tree[T], args: List[Tree[T]]) extends GenericApply[T] { type ThisTree[-T >: Untyped] = TypeApply[T] } /** const */ case class Literal[-T >: Untyped] private[ast] (const: Constant) extends TermTree[T] { type ThisTree[-T >: Untyped] = Literal[T] } /** new tpt, but no constructor call */ case class New[-T >: Untyped] private[ast] (tpt: Tree[T]) extends TermTree[T] { type ThisTree[-T >: Untyped] = New[T] } /** (left, right) */ case class Pair[-T >: Untyped] private[ast] (left: Tree[T], right: Tree[T]) extends TermTree[T] { type ThisTree[-T >: Untyped] = Pair[T] override def isTerm = left.isTerm && right.isTerm override def isType = left.isType && right.isType override def isPattern = !isTerm && (left.isPattern || left.isTerm) && (right.isPattern || right.isTerm) } /** expr : tpt */ case class Typed[-T >: Untyped] private[ast] (expr: Tree[T], tpt: Tree[T]) extends ProxyTree[T] with TermTree[T] { type ThisTree[-T >: Untyped] = Typed[T] def forwardTo = expr } /** name = arg, in a parameter list */ case class NamedArg[-T >: Untyped] private[ast] (name: Name, arg: Tree[T]) extends Tree[T] { type ThisTree[-T >: Untyped] = NamedArg[T] } /** name = arg, outside a parameter list */ case class Assign[-T >: Untyped] private[ast] (lhs: Tree[T], rhs: Tree[T]) extends TermTree[T] { type ThisTree[-T >: Untyped] = Assign[T] } /** { stats; expr } */ case class Block[-T >: Untyped] private[ast] (stats: List[Tree[T]], expr: Tree[T]) extends TermTree[T] { type ThisTree[-T >: Untyped] = Block[T] } /** if cond then thenp else elsep */ case class If[-T >: Untyped] private[ast] (cond: Tree[T], thenp: Tree[T], elsep: Tree[T]) extends TermTree[T] { type ThisTree[-T >: Untyped] = If[T] } /** A closure with an environment and a reference to a method. * @param env The captured parameters of the closure * @param meth A ref tree that refers to the method of the closure. * The first (env.length) parameters of that method are filled * with env values. * @param tpt Either EmptyTree or a TypeTree. If tpt is EmptyTree the type * of the closure is a function type, otherwise it is the type * given in `tpt`, which must be a SAM type. */ case class Closure[-T >: Untyped] private[ast] (env: List[Tree[T]], meth: Tree[T], tpt: Tree[T]) extends TermTree[T] { type ThisTree[-T >: Untyped] = Closure[T] } /** selector match { cases } */ case class Match[-T >: Untyped] private[ast] (selector: Tree[T], cases: List[CaseDef[T]]) extends TermTree[T] { type ThisTree[-T >: Untyped] = Match[T] } /** case pat if guard => body; only appears as child of a Match */ case class CaseDef[-T >: Untyped] private[ast] (pat: Tree[T], guard: Tree[T], body: Tree[T]) extends Tree[T] { type ThisTree[-T >: Untyped] = CaseDef[T] } /** return expr * where `from` refers to the method from which the return takes place * After program transformations this is not necessarily the enclosing method, because * closures can intervene. */ case class Return[-T >: Untyped] private[ast] (expr: Tree[T], from: Tree[T] = genericEmptyTree) extends TermTree[T] { type ThisTree[-T >: Untyped] = Return[T] } /** try block catch handler finally finalizer * * Note: if the handler is a case block CASES of the form * * { case1 ... caseN } * * the parser returns Match(EmptyTree, CASES). Desugaring and typing this yields a closure * node * * { def $anonfun(x: Throwable) = x match CASES; Closure(Nil, $anonfun) } * * At some later stage when we normalize the try we can revert this to * * Match(EmptyTree, CASES) * * or else if stack is non-empty * * Match(EmptyTree, <case x: Throwable => $anonfun(x)>) */ case class Try[-T >: Untyped] private[ast] (expr: Tree[T], cases: List[CaseDef[T]], finalizer: Tree[T]) extends TermTree[T] { type ThisTree[-T >: Untyped] = Try[T] } /** Seq(elems) */ case class SeqLiteral[-T >: Untyped] private[ast] (elems: List[Tree[T]]) extends Tree[T] { type ThisTree[-T >: Untyped] = SeqLiteral[T] } /** Array(elems) */ class JavaSeqLiteral[T >: Untyped] private[ast] (elems: List[Tree[T]]) extends SeqLiteral(elems) { override def toString = s"JavaSeqLiteral($elems)" } /** A type tree that represents an existing or inferred type */ case class TypeTree[-T >: Untyped] private[ast] (original: Tree[T]) extends DenotingTree[T] with TypTree[T] { type ThisTree[-T >: Untyped] = TypeTree[T] override def initialPos = NoPosition override def isEmpty = !hasType && original.isEmpty override def toString = s"TypeTree${if (hasType) s"[$typeOpt]" else s"($original)"}" } /** ref.type */ case class SingletonTypeTree[-T >: Untyped] private[ast] (ref: Tree[T]) extends DenotingTree[T] with TypTree[T] { type ThisTree[-T >: Untyped] = SingletonTypeTree[T] } /** qualifier # name */ case class SelectFromTypeTree[-T >: Untyped] private[ast] (qualifier: Tree[T], name: Name) extends RefTree[T] { type ThisTree[-T >: Untyped] = SelectFromTypeTree[T] } /** left & right */ case class AndTypeTree[-T >: Untyped] private[ast] (left: Tree[T], right: Tree[T]) extends TypTree[T] { type ThisTree[-T >: Untyped] = AndTypeTree[T] } /** left | right */ case class OrTypeTree[-T >: Untyped] private[ast] (left: Tree[T], right: Tree[T]) extends TypTree[T] { type ThisTree[-T >: Untyped] = OrTypeTree[T] } /** tpt { refinements } */ case class RefinedTypeTree[-T >: Untyped] private[ast] (tpt: Tree[T], refinements: List[Tree[T]]) extends ProxyTree[T] with TypTree[T] { type ThisTree[-T >: Untyped] = RefinedTypeTree[T] def forwardTo = tpt } /** tpt[args] */ case class AppliedTypeTree[-T >: Untyped] private[ast] (tpt: Tree[T], args: List[Tree[T]]) extends ProxyTree[T] with TypTree[T] { type ThisTree[-T >: Untyped] = AppliedTypeTree[T] def forwardTo = tpt } /** => T */ case class ByNameTypeTree[-T >: Untyped] private[ast] (result: Tree[T]) extends TypTree[T] { type ThisTree[-T >: Untyped] = ByNameTypeTree[T] } /** >: lo <: hi */ case class TypeBoundsTree[-T >: Untyped] private[ast] (lo: Tree[T], hi: Tree[T]) extends TypTree[T] { type ThisTree[-T >: Untyped] = TypeBoundsTree[T] } /** name @ body */ case class Bind[-T >: Untyped] private[ast] (name: Name, body: Tree[T]) extends NameTree[T] with DefTree[T] with PatternTree[T] { type ThisTree[-T >: Untyped] = Bind[T] override def isType = name.isTypeName override def isTerm = name.isTermName override def envelope: Position = pos union initialPos } /** tree_1 | ... | tree_n */ case class Alternative[-T >: Untyped] private[ast] (trees: List[Tree[T]]) extends PatternTree[T] { type ThisTree[-T >: Untyped] = Alternative[T] } /** The typed translation of `extractor(patterns)` in a pattern. The translation has the following * components: * * @param fun is `extractor.unapply` (or, for backwards compatibility, `extractor.unapplySeq`) * possibly with type parameters * @param implicits Any implicit parameters passed to the unapply after the selector * @param patterns The argument patterns in the pattern match. * * It is typed with same type as first `fun` argument * Given a match selector `sel` a pattern UnApply(fun, implicits, patterns) is roughly translated as follows * * val result = fun(sel)(implicits) * if (result.isDefined) "match patterns against result" */ case class UnApply[-T >: Untyped] private[ast] (fun: Tree[T], implicits: List[Tree[T]], patterns: List[Tree[T]]) extends PatternTree[T] { type ThisTree[-T >: Untyped] = UnApply[T] } /** mods val name: tpt = rhs */ case class ValDef[-T >: Untyped] private[ast] (name: TermName, tpt: Tree[T], private var preRhs: LazyTree) extends ValOrDefDef[T] { type ThisTree[-T >: Untyped] = ValDef[T] assert(isEmpty || tpt != genericEmptyTree) def unforced = preRhs protected def force(x: AnyRef) = preRhs = x } /** mods def name[tparams](vparams_1)...(vparams_n): tpt = rhs */ case class DefDef[-T >: Untyped] private[ast] (name: TermName, tparams: List[TypeDef[T]], vparamss: List[List[ValDef[T]]], tpt: Tree[T], private var preRhs: LazyTree) extends ValOrDefDef[T] { type ThisTree[-T >: Untyped] = DefDef[T] assert(tpt != genericEmptyTree) def unforced = preRhs protected def force(x: AnyRef) = preRhs = x } /** mods class name template or * mods trait name template or * mods type name = rhs or * mods type name >: lo <: hi, if rhs = TypeBoundsTree(lo, hi) & (lo ne hi) */ case class TypeDef[-T >: Untyped] private[ast] (name: TypeName, rhs: Tree[T]) extends MemberDef[T] { type ThisTree[-T >: Untyped] = TypeDef[T] /** Is this a definition of a class? */ def isClassDef = rhs.isInstanceOf[Template[_]] /** If this a non-class type definition, its type parameters. * Can be different from Nil only for PolyTypeDefs, which are always * untyped and get eliminated during desugaring. */ def tparams: List[untpd.TypeDef] = Nil } /** extends parents { self => body } */ case class Template[-T >: Untyped] private[ast] (constr: DefDef[T], parents: List[Tree[T]], self: ValDef[T], private var preBody: LazyTreeList) extends DefTree[T] with WithLazyField[List[Tree[T]]] { type ThisTree[-T >: Untyped] = Template[T] def unforcedBody = unforced def unforced = preBody protected def force(x: AnyRef) = preBody = x def body(implicit ctx: Context): List[Tree[T]] = forceIfLazy } /** import expr.selectors * where a selector is either an untyped `Ident`, `name` or * an untyped `Pair` `name => rename` */ case class Import[-T >: Untyped] private[ast] (expr: Tree[T], selectors: List[Tree[Untyped]]) extends DenotingTree[T] { type ThisTree[-T >: Untyped] = Import[T] } /** package pid { stats } */ case class PackageDef[-T >: Untyped] private[ast] (pid: RefTree[T], stats: List[Tree[T]]) extends ProxyTree[T] { type ThisTree[-T >: Untyped] = PackageDef[T] def forwardTo = pid } /** arg @annot */ case class Annotated[-T >: Untyped] private[ast] (annot: Tree[T], arg: Tree[T]) extends ProxyTree[T] { type ThisTree[-T >: Untyped] = Annotated[T] def forwardTo = arg } trait WithoutTypeOrPos[-T >: Untyped] extends Tree[T] { override def tpe: T @uncheckedVariance = NoType.asInstanceOf[T] override def withTypeUnchecked(tpe: Type) = this.asInstanceOf[ThisTree[Type]] override def pos = NoPosition override def setPos(pos: Position) = {} } /** Temporary class that results from translation of ModuleDefs * (and possibly other statements). * The contained trees will be integrated when transformed with * a `transform(List[Tree])` call. */ case class Thicket[-T >: Untyped](trees: List[Tree[T]]) extends Tree[T] with WithoutTypeOrPos[T] { type ThisTree[-T >: Untyped] = Thicket[T] override def isEmpty: Boolean = trees.isEmpty override def toList: List[Tree[T]] = flatten(trees) override def toString = if (isEmpty) "EmptyTree" else "Thicket(" + trees.mkString(", ") + ")" override def withPos(pos: Position): this.type = { val newTrees = trees.map(_.withPos(pos)) new Thicket[T](newTrees).asInstanceOf[this.type] } override def foreachInThicket(op: Tree[T] => Unit): Unit = trees foreach (_.foreachInThicket(op)) } class EmptyValDef[T >: Untyped] extends ValDef[T]( nme.WILDCARD, genericEmptyTree[T], genericEmptyTree[T]) with WithoutTypeOrPos[T] { override def isEmpty: Boolean = true setMods(Modifiers[T](PrivateLocal)) } val theEmptyTree: Thicket[Type] = Thicket(Nil) val theEmptyValDef = new EmptyValDef[Type] val theEmptyModifiers = new Modifiers() def genericEmptyValDef[T >: Untyped]: ValDef[T] = theEmptyValDef.asInstanceOf[ValDef[T]] def genericEmptyTree[T >: Untyped]: Thicket[T] = theEmptyTree.asInstanceOf[Thicket[T]] def genericEmptyModifiers[T >: Untyped]: Modifiers[T] = theEmptyModifiers.asInstanceOf[Modifiers[T]] def flatten[T >: Untyped](trees: List[Tree[T]]): List[Tree[T]] = { var buf: ListBuffer[Tree[T]] = null var xs = trees while (xs.nonEmpty) { xs.head match { case Thicket(elems) => if (buf == null) { buf = new ListBuffer var ys = trees while (ys ne xs) { buf += ys.head ys = ys.tail } } for (elem <- elems) { assert(!elem.isInstanceOf[Thicket[_]]) buf += elem } case tree => if (buf != null) buf += tree } xs = xs.tail } if (buf != null) buf.toList else trees } // ----- Lazy trees and tree sequences /** A tree that can have a lazy field * The field is represented by some private `var` which is * proxied `unforced` and `force`. Forcing the field will * set the `var` to the underlying value. */ trait WithLazyField[+T <: AnyRef] { def unforced: AnyRef protected def force(x: AnyRef): Unit def forceIfLazy(implicit ctx: Context): T = unforced match { case lzy: Lazy[T] => val x = lzy.complete force(x) x case x: T @ unchecked => x } } /** A base trait for lazy tree fields. * These can be instantiated with Lazy instances which * can delay tree construction until the field is first demanded. */ trait Lazy[T <: AnyRef] { def complete(implicit ctx: Context): T } // ----- Generic Tree Instances, inherited from `tpt` and `untpd`. abstract class Instance[T >: Untyped <: Type] extends DotClass { inst => type Modifiers = Trees.Modifiers[T] type Tree = Trees.Tree[T] type TypTree = Trees.TypTree[T] type TermTree = Trees.TermTree[T] type PatternTree = Trees.PatternTree[T] type DenotingTree = Trees.DenotingTree[T] type ProxyTree = Trees.ProxyTree[T] type NameTree = Trees.NameTree[T] type RefTree = Trees.RefTree[T] type DefTree = Trees.DefTree[T] type MemberDef = Trees.MemberDef[T] type ValOrDefDef = Trees.ValOrDefDef[T] type Ident = Trees.Ident[T] type BackquotedIdent = Trees.BackquotedIdent[T] type Select = Trees.Select[T] type SelectWithSig = Trees.SelectWithSig[T] type This = Trees.This[T] type Super = Trees.Super[T] type Apply = Trees.Apply[T] type TypeApply = Trees.TypeApply[T] type Literal = Trees.Literal[T] type New = Trees.New[T] type Pair = Trees.Pair[T] type Typed = Trees.Typed[T] type NamedArg = Trees.NamedArg[T] type Assign = Trees.Assign[T] type Block = Trees.Block[T] type If = Trees.If[T] type Closure = Trees.Closure[T] type Match = Trees.Match[T] type CaseDef = Trees.CaseDef[T] type Return = Trees.Return[T] type Try = Trees.Try[T] type SeqLiteral = Trees.SeqLiteral[T] type JavaSeqLiteral = Trees.JavaSeqLiteral[T] type TypeTree = Trees.TypeTree[T] type SingletonTypeTree = Trees.SingletonTypeTree[T] type SelectFromTypeTree = Trees.SelectFromTypeTree[T] type AndTypeTree = Trees.AndTypeTree[T] type OrTypeTree = Trees.OrTypeTree[T] type RefinedTypeTree = Trees.RefinedTypeTree[T] type AppliedTypeTree = Trees.AppliedTypeTree[T] type ByNameTypeTree = Trees.ByNameTypeTree[T] type TypeBoundsTree = Trees.TypeBoundsTree[T] type Bind = Trees.Bind[T] type Alternative = Trees.Alternative[T] type UnApply = Trees.UnApply[T] type ValDef = Trees.ValDef[T] type DefDef = Trees.DefDef[T] type TypeDef = Trees.TypeDef[T] type Template = Trees.Template[T] type Import = Trees.Import[T] type PackageDef = Trees.PackageDef[T] type Annotated = Trees.Annotated[T] type Thicket = Trees.Thicket[T] val EmptyTree: Thicket = genericEmptyTree val EmptyValDef: ValDef = genericEmptyValDef val EmptyModifiers: Modifiers = genericEmptyModifiers // ----- Auxiliary creation methods ------------------ def Modifiers(flags: FlagSet = EmptyFlags, privateWithin: TypeName = tpnme.EMPTY, annotations: List[Tree] = Nil) = new Modifiers(flags, privateWithin, annotations) def Thicket(trees: List[Tree]): Thicket = new Thicket(trees) def Thicket(): Thicket = EmptyTree def Thicket(x1: Tree, x2: Tree): Thicket = Thicket(x1 :: x2 :: Nil) def Thicket(x1: Tree, x2: Tree, x3: Tree): Thicket = Thicket(x1 :: x2 :: x3 :: Nil) def flatTree(xs: List[Tree]): Tree = flatten(xs) match { case x :: Nil => x case ys => Thicket(ys) } // ----- Accessing modifiers ---------------------------------------------------- abstract class ModsDeco { def mods: Modifiers } implicit def modsDeco(mdef: MemberDef)(implicit ctx: Context): ModsDeco // ----- Helper classes for copying, transforming, accumulating ----------------- val cpy: TreeCopier /** A class for copying trees. The copy methods avid creating a new tree * If all arguments stay the same. * * Note: Some of the copy methods take a context. * These are exactly those methods that are overridden in TypedTreeCopier * so that they selectively retype themselves. Retyping needs a context. */ abstract class TreeCopier { def postProcess(tree: Tree, copied: untpd.Tree): copied.ThisTree[T] def postProcess(tree: Tree, copied: untpd.MemberDef): copied.ThisTree[T] def finalize(tree: Tree, copied: untpd.Tree): copied.ThisTree[T] = postProcess(tree, copied withPos tree.pos) def finalize(tree: Tree, copied: untpd.MemberDef): copied.ThisTree[T] = postProcess(tree, copied withPos tree.pos) def Ident(tree: Tree)(name: Name): Ident = tree match { case tree: BackquotedIdent => if (name == tree.name) tree else finalize(tree, new BackquotedIdent(name)) case tree: Ident if name == tree.name => tree case _ => finalize(tree, untpd.Ident(name)) } def Select(tree: Tree)(qualifier: Tree, name: Name)(implicit ctx: Context): Select = tree match { case tree: SelectWithSig => if ((qualifier eq tree.qualifier) && (name == tree.name)) tree else finalize(tree, new SelectWithSig(qualifier, name, tree.sig)) case tree: Select if (qualifier eq tree.qualifier) && (name == tree.name) => tree case _ => finalize(tree, untpd.Select(qualifier, name)) } def This(tree: Tree)(qual: TypeName): This = tree match { case tree: This if qual == tree.qual => tree case _ => finalize(tree, untpd.This(qual)) } def Super(tree: Tree)(qual: Tree, mix: TypeName): Super = tree match { case tree: Super if (qual eq tree.qual) && (mix == tree.mix) => tree case _ => finalize(tree, untpd.Super(qual, mix)) } def Apply(tree: Tree)(fun: Tree, args: List[Tree])(implicit ctx: Context): Apply = tree match { case tree: Apply if (fun eq tree.fun) && (args eq tree.args) => tree case _ => finalize(tree, untpd.Apply(fun, args)) } def TypeApply(tree: Tree)(fun: Tree, args: List[Tree])(implicit ctx: Context): TypeApply = tree match { case tree: TypeApply if (fun eq tree.fun) && (args eq tree.args) => tree case _ => finalize(tree, untpd.TypeApply(fun, args)) } def Literal(tree: Tree)(const: Constant)(implicit ctx: Context): Literal = tree match { case tree: Literal if const == tree.const => tree case _ => finalize(tree, untpd.Literal(const)) } def New(tree: Tree)(tpt: Tree)(implicit ctx: Context): New = tree match { case tree: New if tpt eq tree.tpt => tree case _ => finalize(tree, untpd.New(tpt)) } def Pair(tree: Tree)(left: Tree, right: Tree)(implicit ctx: Context): Pair = tree match { case tree: Pair if (left eq tree.left) && (right eq tree.right) => tree case _ => finalize(tree, untpd.Pair(left, right)) } def Typed(tree: Tree)(expr: Tree, tpt: Tree)(implicit ctx: Context): Typed = tree match { case tree: Typed if (expr eq tree.expr) && (tpt eq tree.tpt) => tree case _ => finalize(tree, untpd.Typed(expr, tpt)) } def NamedArg(tree: Tree)(name: Name, arg: Tree)(implicit ctx: Context): NamedArg = tree match { case tree: NamedArg if (name == tree.name) && (arg eq tree.arg) => tree case _ => finalize(tree, untpd.NamedArg(name, arg)) } def Assign(tree: Tree)(lhs: Tree, rhs: Tree)(implicit ctx: Context): Assign = tree match { case tree: Assign if (lhs eq tree.lhs) && (rhs eq tree.rhs) => tree case _ => finalize(tree, untpd.Assign(lhs, rhs)) } def Block(tree: Tree)(stats: List[Tree], expr: Tree)(implicit ctx: Context): Block = tree match { case tree: Block if (stats eq tree.stats) && (expr eq tree.expr) => tree case _ => finalize(tree, untpd.Block(stats, expr)) } def If(tree: Tree)(cond: Tree, thenp: Tree, elsep: Tree)(implicit ctx: Context): If = tree match { case tree: If if (cond eq tree.cond) && (thenp eq tree.thenp) && (elsep eq tree.elsep) => tree case _ => finalize(tree, untpd.If(cond, thenp, elsep)) } def Closure(tree: Tree)(env: List[Tree], meth: Tree, tpt: Tree)(implicit ctx: Context): Closure = tree match { case tree: Closure if (env eq tree.env) && (meth eq tree.meth) && (tpt eq tree.tpt) => tree case _ => finalize(tree, untpd.Closure(env, meth, tpt)) } def Match(tree: Tree)(selector: Tree, cases: List[CaseDef])(implicit ctx: Context): Match = tree match { case tree: Match if (selector eq tree.selector) && (cases eq tree.cases) => tree case _ => finalize(tree, untpd.Match(selector, cases)) } def CaseDef(tree: Tree)(pat: Tree, guard: Tree, body: Tree)(implicit ctx: Context): CaseDef = tree match { case tree: CaseDef if (pat eq tree.pat) && (guard eq tree.guard) && (body eq tree.body) => tree case _ => finalize(tree, untpd.CaseDef(pat, guard, body)) } def Return(tree: Tree)(expr: Tree, from: Tree)(implicit ctx: Context): Return = tree match { case tree: Return if (expr eq tree.expr) && (from eq tree.from) => tree case _ => finalize(tree, untpd.Return(expr, from)) } def Try(tree: Tree)(expr: Tree, cases: List[CaseDef], finalizer: Tree)(implicit ctx: Context): Try = tree match { case tree: Try if (expr eq tree.expr) && (cases eq tree.cases) && (finalizer eq tree.finalizer) => tree case _ => finalize(tree, untpd.Try(expr, cases, finalizer)) } def SeqLiteral(tree: Tree)(elems: List[Tree])(implicit ctx: Context): SeqLiteral = tree match { case tree: JavaSeqLiteral => if (elems eq tree.elems) tree else finalize(tree, new JavaSeqLiteral(elems)) case tree: SeqLiteral if elems eq tree.elems => tree case _ => finalize(tree, untpd.SeqLiteral(elems)) } def TypeTree(tree: Tree)(original: Tree): TypeTree = tree match { case tree: TypeTree if original eq tree.original => tree case _ => finalize(tree, untpd.TypeTree(original)) } def SingletonTypeTree(tree: Tree)(ref: Tree): SingletonTypeTree = tree match { case tree: SingletonTypeTree if ref eq tree.ref => tree case _ => finalize(tree, untpd.SingletonTypeTree(ref)) } def SelectFromTypeTree(tree: Tree)(qualifier: Tree, name: Name): SelectFromTypeTree = tree match { case tree: SelectFromTypeTree if (qualifier eq tree.qualifier) && (name == tree.name) => tree case _ => finalize(tree, untpd.SelectFromTypeTree(qualifier, name)) } def AndTypeTree(tree: Tree)(left: Tree, right: Tree): AndTypeTree = tree match { case tree: AndTypeTree if (left eq tree.left) && (right eq tree.right) => tree case _ => finalize(tree, untpd.AndTypeTree(left, right)) } def OrTypeTree(tree: Tree)(left: Tree, right: Tree): OrTypeTree = tree match { case tree: OrTypeTree if (left eq tree.left) && (right eq tree.right) => tree case _ => finalize(tree, untpd.OrTypeTree(left, right)) } def RefinedTypeTree(tree: Tree)(tpt: Tree, refinements: List[Tree]): RefinedTypeTree = tree match { case tree: RefinedTypeTree if (tpt eq tree.tpt) && (refinements eq tree.refinements) => tree case _ => finalize(tree, untpd.RefinedTypeTree(tpt, refinements)) } def AppliedTypeTree(tree: Tree)(tpt: Tree, args: List[Tree]): AppliedTypeTree = tree match { case tree: AppliedTypeTree if (tpt eq tree.tpt) && (args eq tree.args) => tree case _ => finalize(tree, untpd.AppliedTypeTree(tpt, args)) } def ByNameTypeTree(tree: Tree)(result: Tree): ByNameTypeTree = tree match { case tree: ByNameTypeTree if result eq tree.result => tree case _ => finalize(tree, untpd.ByNameTypeTree(result)) } def TypeBoundsTree(tree: Tree)(lo: Tree, hi: Tree): TypeBoundsTree = tree match { case tree: TypeBoundsTree if (lo eq tree.lo) && (hi eq tree.hi) => tree case _ => finalize(tree, untpd.TypeBoundsTree(lo, hi)) } def Bind(tree: Tree)(name: Name, body: Tree): Bind = tree match { case tree: Bind if (name eq tree.name) && (body eq tree.body) => tree case _ => finalize(tree, untpd.Bind(name, body)) } def Alternative(tree: Tree)(trees: List[Tree]): Alternative = tree match { case tree: Alternative if trees eq tree.trees => tree case _ => finalize(tree, untpd.Alternative(trees)) } def UnApply(tree: Tree)(fun: Tree, implicits: List[Tree], patterns: List[Tree]): UnApply = tree match { case tree: UnApply if (fun eq tree.fun) && (implicits eq tree.implicits) && (patterns eq tree.patterns) => tree case _ => finalize(tree, untpd.UnApply(fun, implicits, patterns)) } def ValDef(tree: Tree)(name: TermName, tpt: Tree, rhs: LazyTree): ValDef = tree match { case tree: ValDef if (name == tree.name) && (tpt eq tree.tpt) && (rhs eq tree.unforcedRhs) => tree case _ => finalize(tree, untpd.ValDef(name, tpt, rhs)) } def DefDef(tree: Tree)(name: TermName, tparams: List[TypeDef], vparamss: List[List[ValDef]], tpt: Tree, rhs: LazyTree): DefDef = tree match { case tree: DefDef if (name == tree.name) && (tparams eq tree.tparams) && (vparamss eq tree.vparamss) && (tpt eq tree.tpt) && (rhs eq tree.unforcedRhs) => tree case _ => finalize(tree, untpd.DefDef(name, tparams, vparamss, tpt, rhs)) } def TypeDef(tree: Tree)(name: TypeName, rhs: Tree, tparams: List[untpd.TypeDef]): TypeDef = tree match { case tree: TypeDef if (name == tree.name) && (rhs eq tree.rhs) && (tparams eq tree.tparams) => tree case _ => finalize(tree, untpd.TypeDef(name, tparams, rhs)) } def Template(tree: Tree)(constr: DefDef, parents: List[Tree], self: ValDef, body: LazyTreeList): Template = tree match { case tree: Template if (constr eq tree.constr) && (parents eq tree.parents) && (self eq tree.self) && (body eq tree.unforcedBody) => tree case _ => finalize(tree, untpd.Template(constr, parents, self, body)) } def Import(tree: Tree)(expr: Tree, selectors: List[untpd.Tree]): Import = tree match { case tree: Import if (expr eq tree.expr) && (selectors eq tree.selectors) => tree case _ => finalize(tree, untpd.Import(expr, selectors)) } def PackageDef(tree: Tree)(pid: RefTree, stats: List[Tree]): PackageDef = tree match { case tree: PackageDef if (pid eq tree.pid) && (stats eq tree.stats) => tree case _ => finalize(tree, untpd.PackageDef(pid, stats)) } def Annotated(tree: Tree)(annot: Tree, arg: Tree)(implicit ctx: Context): Annotated = tree match { case tree: Annotated if (annot eq tree.annot) && (arg eq tree.arg) => tree case _ => finalize(tree, untpd.Annotated(annot, arg)) } def Thicket(tree: Tree)(trees: List[Tree]): Thicket = tree match { case tree: Thicket if trees eq tree.trees => tree case _ => finalize(tree, untpd.Thicket(trees)) } // Copier methods with default arguments; these demand that the original tree // is of the same class as the copy. We only include trees with more than 2 elements here. def If(tree: If)(cond: Tree = tree.cond, thenp: Tree = tree.thenp, elsep: Tree = tree.elsep)(implicit ctx: Context): If = If(tree: Tree)(cond, thenp, elsep) def Closure(tree: Closure)(env: List[Tree] = tree.env, meth: Tree = tree.meth, tpt: Tree = tree.tpt)(implicit ctx: Context): Closure = Closure(tree: Tree)(env, meth, tpt) def CaseDef(tree: CaseDef)(pat: Tree = tree.pat, guard: Tree = tree.guard, body: Tree = tree.body)(implicit ctx: Context): CaseDef = CaseDef(tree: Tree)(pat, guard, body) def Try(tree: Try)(expr: Tree = tree.expr, cases: List[CaseDef] = tree.cases, finalizer: Tree = tree.finalizer)(implicit ctx: Context): Try = Try(tree: Tree)(expr, cases, finalizer) def UnApply(tree: UnApply)(fun: Tree = tree.fun, implicits: List[Tree] = tree.implicits, patterns: List[Tree] = tree.patterns): UnApply = UnApply(tree: Tree)(fun, implicits, patterns) def ValDef(tree: ValDef)(name: TermName = tree.name, tpt: Tree = tree.tpt, rhs: LazyTree = tree.unforcedRhs): ValDef = ValDef(tree: Tree)(name, tpt, rhs) def DefDef(tree: DefDef)(name: TermName = tree.name, tparams: List[TypeDef] = tree.tparams, vparamss: List[List[ValDef]] = tree.vparamss, tpt: Tree = tree.tpt, rhs: LazyTree = tree.unforcedRhs): DefDef = DefDef(tree: Tree)(name, tparams, vparamss, tpt, rhs) def TypeDef(tree: TypeDef)(name: TypeName = tree.name, rhs: Tree = tree.rhs, tparams: List[untpd.TypeDef] = tree.tparams): TypeDef = TypeDef(tree: Tree)(name, rhs, tparams) def Template(tree: Template)(constr: DefDef = tree.constr, parents: List[Tree] = tree.parents, self: ValDef = tree.self, body: LazyTreeList = tree.unforcedBody): Template = Template(tree: Tree)(constr, parents, self, body) } abstract class TreeMap(val cpy: TreeCopier = inst.cpy) { def transform(tree: Tree)(implicit ctx: Context): Tree = tree match { case Ident(name) => tree case Select(qualifier, name) => cpy.Select(tree)(transform(qualifier), name) case This(qual) => tree case Super(qual, mix) => cpy.Super(tree)(transform(qual), mix) case Apply(fun, args) => cpy.Apply(tree)(transform(fun), transform(args)) case TypeApply(fun, args) => cpy.TypeApply(tree)(transform(fun), transform(args)) case Literal(const) => tree case New(tpt) => cpy.New(tree)(transform(tpt)) case Pair(left, right) => cpy.Pair(tree)(transform(left), transform(right)) case Typed(expr, tpt) => cpy.Typed(tree)(transform(expr), transform(tpt)) case NamedArg(name, arg) => cpy.NamedArg(tree)(name, transform(arg)) case Assign(lhs, rhs) => cpy.Assign(tree)(transform(lhs), transform(rhs)) case Block(stats, expr) => cpy.Block(tree)(transformStats(stats), transform(expr)) case If(cond, thenp, elsep) => cpy.If(tree)(transform(cond), transform(thenp), transform(elsep)) case Closure(env, meth, tpt) => cpy.Closure(tree)(transform(env), transform(meth), transform(tpt)) case Match(selector, cases) => cpy.Match(tree)(transform(selector), transformSub(cases)) case CaseDef(pat, guard, body) => cpy.CaseDef(tree)(transform(pat), transform(guard), transform(body)) case Return(expr, from) => cpy.Return(tree)(transform(expr), transformSub(from)) case Try(block, cases, finalizer) => cpy.Try(tree)(transform(block), transformSub(cases), transform(finalizer)) case SeqLiteral(elems) => cpy.SeqLiteral(tree)(transform(elems)) case TypeTree(original) => tree case SingletonTypeTree(ref) => cpy.SingletonTypeTree(tree)(transform(ref)) case SelectFromTypeTree(qualifier, name) => cpy.SelectFromTypeTree(tree)(transform(qualifier), name) case AndTypeTree(left, right) => cpy.AndTypeTree(tree)(transform(left), transform(right)) case OrTypeTree(left, right) => cpy.OrTypeTree(tree)(transform(left), transform(right)) case RefinedTypeTree(tpt, refinements) => cpy.RefinedTypeTree(tree)(transform(tpt), transformSub(refinements)) case AppliedTypeTree(tpt, args) => cpy.AppliedTypeTree(tree)(transform(tpt), transform(args)) case ByNameTypeTree(result) => cpy.ByNameTypeTree(tree)(transform(result)) case TypeBoundsTree(lo, hi) => cpy.TypeBoundsTree(tree)(transform(lo), transform(hi)) case Bind(name, body) => cpy.Bind(tree)(name, transform(body)) case Alternative(trees) => cpy.Alternative(tree)(transform(trees)) case UnApply(fun, implicits, patterns) => cpy.UnApply(tree)(transform(fun), transform(implicits), transform(patterns)) case EmptyValDef => tree case tree @ ValDef(name, tpt, _) => val tpt1 = transform(tpt) val rhs1 = transform(tree.rhs) cpy.ValDef(tree)(name, tpt1, rhs1) case tree @ DefDef(name, tparams, vparamss, tpt, _) => cpy.DefDef(tree)(name, transformSub(tparams), vparamss mapConserve (transformSub(_)), transform(tpt), transform(tree.rhs)) case tree @ TypeDef(name, rhs) => cpy.TypeDef(tree)(name, transform(rhs), tree.tparams) case tree @ Template(constr, parents, self, _) => cpy.Template(tree)(transformSub(constr), transform(parents), transformSub(self), transformStats(tree.body)) case Import(expr, selectors) => cpy.Import(tree)(transform(expr), selectors) case PackageDef(pid, stats) => cpy.PackageDef(tree)(transformSub(pid), transformStats(stats)) case Annotated(annot, arg) => cpy.Annotated(tree)(transform(annot), transform(arg)) case Thicket(trees) => val trees1 = transform(trees) if (trees1 eq trees) tree else Thicket(trees1) } def transformStats(trees: List[Tree])(implicit ctx: Context): List[Tree] = transform(trees) def transform(trees: List[Tree])(implicit ctx: Context): List[Tree] = flatten(trees mapConserve (transform(_))) def transformSub[Tr <: Tree](tree: Tr)(implicit ctx: Context): Tr = transform(tree).asInstanceOf[Tr] def transformSub[Tr <: Tree](trees: List[Tr])(implicit ctx: Context): List[Tr] = transform(trees).asInstanceOf[List[Tr]] } abstract class TreeAccumulator[X] { def apply(x: X, tree: Tree)(implicit ctx: Context): X def apply(x: X, trees: Traversable[Tree])(implicit ctx: Context): X = (x /: trees)(apply) def foldOver(x: X, tree: Tree)(implicit ctx: Context): X = { def localCtx = if (tree.hasType && tree.symbol.exists) ctx.withOwner(tree.symbol) else ctx tree match { case Ident(name) => x case Select(qualifier, name) => this(x, qualifier) case This(qual) => x case Super(qual, mix) => this(x, qual) case Apply(fun, args) => this(this(x, fun), args) case TypeApply(fun, args) => this(this(x, fun), args) case Literal(const) => x case New(tpt) => this(x, tpt) case Pair(left, right) => this(this(x, left), right) case Typed(expr, tpt) => this(this(x, expr), tpt) case NamedArg(name, arg) => this(x, arg) case Assign(lhs, rhs) => this(this(x, lhs), rhs) case Block(stats, expr) => this(this(x, stats), expr) case If(cond, thenp, elsep) => this(this(this(x, cond), thenp), elsep) case Closure(env, meth, tpt) => this(this(this(x, env), meth), tpt) case Match(selector, cases) => this(this(x, selector), cases) case CaseDef(pat, guard, body) => this(this(this(x, pat), guard), body) case Return(expr, from) => this(this(x, expr), from) case Try(block, handler, finalizer) => this(this(this(x, block), handler), finalizer) case SeqLiteral(elems) => this(x, elems) case TypeTree(original) => x case SingletonTypeTree(ref) => this(x, ref) case SelectFromTypeTree(qualifier, name) => this(x, qualifier) case AndTypeTree(left, right) => this(this(x, left), right) case OrTypeTree(left, right) => this(this(x, left), right) case RefinedTypeTree(tpt, refinements) => this(this(x, tpt), refinements) case AppliedTypeTree(tpt, args) => this(this(x, tpt), args) case ByNameTypeTree(result) => this(x, result) case TypeBoundsTree(lo, hi) => this(this(x, lo), hi) case Bind(name, body) => this(x, body) case Alternative(trees) => this(x, trees) case UnApply(fun, implicits, patterns) => this(this(this(x, fun), implicits), patterns) case tree @ ValDef(name, tpt, _) => implicit val ctx: Context = localCtx this(this(x, tpt), tree.rhs) case tree @ DefDef(name, tparams, vparamss, tpt, _) => implicit val ctx: Context = localCtx this(this((this(x, tparams) /: vparamss)(apply), tpt), tree.rhs) case TypeDef(name, rhs) => implicit val ctx: Context = localCtx this(x, rhs) case tree @ Template(constr, parents, self, _) => this(this(this(this(x, constr), parents), self), tree.body) case Import(expr, selectors) => this(x, expr) case PackageDef(pid, stats) => this(this(x, pid), stats)(localCtx) case Annotated(annot, arg) => this(this(x, annot), arg) case Thicket(ts) => this(x, ts) } } } abstract class TreeTraverser extends TreeAccumulator[Unit] { def traverse(tree: Tree)(implicit ctx: Context): Unit def apply(x: Unit, tree: Tree)(implicit ctx: Context) = traverse(tree) protected def traverseChildren(tree: Tree)(implicit ctx: Context) = foldOver((), tree) } /** Fold `f` over all tree nodes, in depth-first, prefix order */ class DeepFolder[X](f: (X, Tree) => X) extends TreeAccumulator[X] { def apply(x: X, tree: Tree)(implicit ctx: Context): X = foldOver(f(x, tree), tree) } /** Fold `f` over all tree nodes, in depth-first, prefix order, but don't visit * subtrees where `f` returns a different result for the root, i.e. `f(x, root) ne x`. */ class ShallowFolder[X](f: (X, Tree) => X) extends TreeAccumulator[X] { def apply(x: X, tree: Tree)(implicit ctx: Context): X = { val x1 = f(x, tree) if (x1.asInstanceOf[AnyRef] ne x1.asInstanceOf[AnyRef]) x1 else foldOver(x1, tree) } } def rename(tree: NameTree, newName: Name)(implicit ctx: Context): tree.ThisTree[T] = { tree match { case tree: Ident => cpy.Ident(tree)(newName) case tree: Select => cpy.Select(tree)(tree.qualifier, newName) case tree: Bind => cpy.Bind(tree)(newName, tree.body) case tree: ValDef => cpy.ValDef(tree)(name = newName.asTermName) case tree: DefDef => cpy.DefDef(tree)(name = newName.asTermName) case tree: untpd.PolyTypeDef => untpd.cpy.PolyTypeDef(tree)(newName.asTypeName, tree.tparams, tree.rhs).withMods(tree.rawMods) case tree: TypeDef => cpy.TypeDef(tree)(name = newName.asTypeName) case tree: SelectFromTypeTree => cpy.SelectFromTypeTree(tree)(tree.qualifier, newName) } }.asInstanceOf[tree.ThisTree[T]] } }
spetz911/dotty
src/dotty/tools/dotc/ast/Trees.scala
Scala
bsd-3-clause
55,356
/** * Copyright 2011-2017 GatlingCorp (http://gatling.io) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.gatling.http.action.async import io.gatling.commons.validation.Validation import io.gatling.core.action.{ Action, RequestAction } import io.gatling.core.session._ import io.gatling.core.stats.StatsEngine abstract class CancelCheckAction( val requestName: Expression[String], actorName: String, val statsEngine: StatsEngine, val next: Action ) extends RequestAction with AsyncProtocolAction { def sendRequest(requestName: String, session: Session): Validation[Unit] = for (actor <- fetchActor(actorName, session)) yield actor ! CancelCheck(requestName, next, session) }
MykolaB/gatling
gatling-http/src/main/scala/io/gatling/http/action/async/CancelCheckAction.scala
Scala
apache-2.0
1,236
package com.evojam.mongodb.evolutions.model.evolution import java.io.{IOException, File} import org.joda.time.DateTime import play.api.libs.functional.syntax._ import play.api.libs.json._ import com.evojam.mongodb.evolutions.model.evolution.State.State import com.evojam.mongodb.evolutions.util.Resources case class EvolutionException(msg: String) extends Exception(msg) case class Evolution( revision: Int, up: Script, down: Option[Script], state: Option[State], timestamp: Option[DateTime], lastProblem: Option[String]) { require(revision > 0, "revision must be greater than 0") require(up != null, "up cannot be null") require(down != null, "down cannot be null") require(state != null, "state cannot be null") require(timestamp != null, "timestamp cannot be null") require(lastProblem != null, "lastProblem cannot be null") def hash(): String = up.md5 + down.map(_.md5).getOrElse("") } object Evolution { implicit val writes = new Writes[Evolution] { override def writes(evo: Evolution) = Json.obj( "_id" -> evo.revision, "up" -> evo.up, "down" -> evo.down, "state" -> evo.state, "timestamp" -> evo.timestamp, "lastProblem" -> evo.lastProblem, "hash" -> evo.hash) } implicit val reads = ( (__ \\ '_id).read[Int] ~ (__ \\ 'up).read[Script] ~ (__ \\ 'down).readNullable[Script] ~ (__ \\ 'state).readNullable[State] ~ (__ \\ 'timestamp).readNullable[DateTime] ~ (__ \\ 'lastProblem).readNullable[String])( Evolution.apply _) def fromFile(file: File): Evolution = Resources.load(file) .map(content => { val (down, up) = scripts(content) Evolution( revision(file.getName), up, down, None, None, None) }).getOrElse(throw new IOException(s"Cannot read from ${file.getAbsolutePath}")) private def revision(name: String): Int = name match { case EvolutionRevision(rev) => rev.toInt case _ => throw EvolutionException(s"Cannot parse the revision: $name") } private def scripts(content: String): (Option[Script], Script) = content.split('\\n') .filter(_.nonEmpty) .foldLeft((List.empty[String], List.empty[String], Marker.Empty))(reduceLines) match { case (downs, ups, _) => (script(downs), script(ups).getOrElse(throw new EvolutionException(s"Up not defined: $content"))) } private type ReductionStep = (List[String], List[String], Marker.Value) private def reduceLines(step: ReductionStep, line: String): ReductionStep = step match { case (downs, ups, marker) => line match { case UpsMarker() => (downs, ups, Marker.Up) case DownsMarker() => (downs, ups, Marker.Down) case _ => marker match { case Marker.Up => (downs, line :: ups, marker) case Marker.Down => (line :: downs, ups, marker) case _ => (downs, ups, marker) } } } private def script(content: List[String]) = content.filter(_.nonEmpty) match { case lines if lines.nonEmpty => Some(Script(lines.reverse.mkString("\\n"))) case _ => None } private val UpsMarker = "^//.*!Ups.*$".r private val DownsMarker = "^//.*!Downs.*$".r private val EvolutionRevision = "^(\\\\d+).js$".r }
evojam/mongodb-evolutions-scala
src/main/scala/com/evojam/mongodb/evolutions/model/evolution/Evolution.scala
Scala
apache-2.0
3,305
package doobie.hi import doobie.enum.holdability._ import doobie.enum.resultsettype._ import doobie.enum.resultsetconcurrency._ import doobie.enum.transactionisolation._ import doobie.enum.autogeneratedkeys.AutoGeneratedKeys import doobie.enum.jdbctype.JdbcType import doobie.syntax.catchable._ import doobie.syntax.process._ import doobie.util.analysis.Analysis import doobie.util.composite.Composite import doobie.util.process.resource import doobie.util.capture.Capture import doobie.free.{ connection => C } import doobie.free.{ preparedstatement => PS } import doobie.free.{ callablestatement => CS } import doobie.free.{ resultset => RS } import doobie.free.{ statement => S } import doobie.free.{ databasemetadata => DMD } import doobie.hi.{ preparedstatement => HPS } import doobie.hi.{ resultset => HRS } import java.sql.{ Connection, Savepoint, PreparedStatement, ResultSet } import scala.collection.immutable.Map import scala.collection.JavaConverters._ import scalaz.stream.Process import scalaz.{ Monad, ~>, Catchable, Foldable } import scalaz.syntax.id._ import scalaz.syntax.monad._ /** * Module of high-level constructors for `ConnectionIO` actions. * @group Modules */ object connection { /** @group Typeclass Instances */ implicit val MonadConnectionIO = C.MonadConnectionIO /** @group Typeclass Instances */ implicit val CatchableConnectionIO = C.CatchableConnectionIO /** @group Lifting */ def delay[A](a: => A): ConnectionIO[A] = C.delay(a) // TODO: make this public if the API sticks; still iffy private def liftProcess[A: Composite]( create: ConnectionIO[PreparedStatement], prep: PreparedStatementIO[Unit], exec: PreparedStatementIO[ResultSet]): Process[ConnectionIO, A] = { val preparedStatement: Process[ConnectionIO, PreparedStatement] = resource( create)(ps => C.liftPreparedStatement(ps, PS.close))(ps => Option(ps).point[ConnectionIO]).take(1) // note def results(ps: PreparedStatement): Process[ConnectionIO, A] = resource( C.liftPreparedStatement(ps, exec))(rs => C.liftResultSet(rs, RS.close))(rs => C.liftResultSet(rs, resultset.getNext[A])) for { ps <- preparedStatement _ <- Process.eval(C.liftPreparedStatement(ps, prep)) a <- results(ps) } yield a } /** * Construct a prepared statement from the given `sql`, configure it with the given `PreparedStatementIO` * action, and return results via a `Process`. * @group Prepared Statements */ def process[A: Composite](sql: String, prep: PreparedStatementIO[Unit]): Process[ConnectionIO, A] = liftProcess(C.prepareStatement(sql), prep, PS.executeQuery) /** * Construct a prepared update statement with the given return columns (and composite destination * type `A`) and sql source, configure it with the given `PreparedStatementIO` action, and return * the generated key results via a * `Process`. * @group Prepared Statements */ def updateWithGeneratedKeys[A: Composite](cols: List[String])(sql: String, prep: PreparedStatementIO[Unit]): Process[ConnectionIO, A] = liftProcess(C.prepareStatement(sql, cols.toArray), prep, PS.executeUpdate >> PS.getGeneratedKeys) /** @group Prepared Statements */ def updateManyWithGeneratedKeys[F[_]: Foldable, A: Composite, B: Composite](cols: List[String])(sql: String, prep: PreparedStatementIO[Unit], fa: F[A]): Process[ConnectionIO, B] = liftProcess[B](C.prepareStatement(sql, cols.toArray), prep, HPS.addBatchesAndExecute(fa) >> PS.getGeneratedKeys) /** @group Transaction Control */ val commit: ConnectionIO[Unit] = C.commit /** * Construct an analysis for the provided `sql` query, given parameter composite type `A` and * resultset row composite `B`. */ def prepareQueryAnalysis[A: Composite, B: Composite](sql: String): ConnectionIO[Analysis] = nativeTypeMap flatMap (m => prepareStatement(sql) { (HPS.getParameterMappings[A] |@| HPS.getColumnMappings[B])(Analysis(sql, m, _, _)) }) def prepareQueryAnalysis0[B: Composite](sql: String): ConnectionIO[Analysis] = nativeTypeMap flatMap (m => prepareStatement(sql) { HPS.getColumnMappings[B] map (cm => Analysis(sql, m, Nil, cm)) }) def prepareUpdateAnalysis[A: Composite](sql: String): ConnectionIO[Analysis] = nativeTypeMap flatMap (m => prepareStatement(sql) { HPS.getParameterMappings[A] map (pm => Analysis(sql, m, pm, Nil)) }) def prepareUpdateAnalysis0(sql: String): ConnectionIO[Analysis] = nativeTypeMap flatMap (m => prepareStatement(sql) { Analysis(sql, m, Nil, Nil).point[PreparedStatementIO] }) /** @group Statements */ def createStatement[A](k: StatementIO[A]): ConnectionIO[A] = C.createStatement.flatMap(s => C.liftStatement(s, k ensuring S.close)) /** @group Statements */ def createStatement[A](rst: ResultSetType, rsc: ResultSetConcurrency)(k: StatementIO[A]): ConnectionIO[A] = C.createStatement(rst.toInt, rsc.toInt).flatMap(s => C.liftStatement(s, k ensuring S.close)) /** @group Statements */ def createStatement[A](rst: ResultSetType, rsc: ResultSetConcurrency, rsh: Holdability)(k: StatementIO[A]): ConnectionIO[A] = C.createStatement(rst.toInt, rsc.toInt, rsh.toInt).flatMap(s => C.liftStatement(s, k ensuring S.close)) /** @group Connection Properties */ val getCatalog: ConnectionIO[String] = C.getCatalog /** @group Connection Properties */ def getClientInfo(key: String): ConnectionIO[Option[String]] = C.getClientInfo(key).map(Option(_)) /** @group Connection Properties */ val getClientInfo: ConnectionIO[Map[String, String]] = C.getClientInfo.map(_.asScala.toMap) /** @group Connection Properties */ val getHoldability: ConnectionIO[Holdability] = C.getHoldability.map(Holdability.unsafeFromInt) /** @group Connection Properties */ def getMetaData[A](k: DatabaseMetaDataIO[A]): ConnectionIO[A] = C.getMetaData.flatMap(s => C.liftDatabaseMetaData(s, k)) /** @group Transaction Control */ val getTransactionIsolation: ConnectionIO[TransactionIsolation] = C.getTransactionIsolation.map(TransactionIsolation.unsafeFromInt) /** @group Connection Properties */ val isReadOnly: ConnectionIO[Boolean] = C.isReadOnly /** @group Callable Statements */ def prepareCall[A](sql: String, rst: ResultSetType, rsc: ResultSetConcurrency)(k: CallableStatementIO[A]): ConnectionIO[A] = C.prepareCall(sql, rst.toInt, rsc.toInt).flatMap(s => C.liftCallableStatement(s, k ensuring CS.close)) /** @group Callable Statements */ def prepareCall[A](sql: String)(k: CallableStatementIO[A]): ConnectionIO[A] = C.prepareCall(sql).flatMap(s => C.liftCallableStatement(s, k ensuring CS.close)) /** @group Callable Statements */ def prepareCall[A](sql: String, rst: ResultSetType, rsc: ResultSetConcurrency, rsh: Holdability)(k: CallableStatementIO[A]): ConnectionIO[A] = C.prepareCall(sql, rst.toInt, rsc.toInt, rsh.toInt).flatMap(s => C.liftCallableStatement(s, k ensuring CS.close)) /** @group Prepared Statements */ def prepareStatement[A](sql: String, rst: ResultSetType, rsc: ResultSetConcurrency)(k: PreparedStatementIO[A]): ConnectionIO[A] = C.prepareStatement(sql, rst.toInt, rsc.toInt).flatMap(s => C.liftPreparedStatement(s, k ensuring PS.close)) /** @group Prepared Statements */ def prepareStatement[A](sql: String)(k: PreparedStatementIO[A]): ConnectionIO[A] = C.prepareStatement(sql).flatMap(s => C.liftPreparedStatement(s, k ensuring PS.close)) /** @group Prepared Statements */ def prepareStatement[A](sql: String, rst: ResultSetType, rsc: ResultSetConcurrency, rsh: Holdability)(k: PreparedStatementIO[A]): ConnectionIO[A] = C.prepareStatement(sql, rst.toInt, rsc.toInt, rsh.toInt).flatMap(s => C.liftPreparedStatement(s, k ensuring PS.close)) /** @group Prepared Statements */ def prepareStatement[A](sql: String, agk: AutoGeneratedKeys)(k: PreparedStatementIO[A]): ConnectionIO[A] = C.prepareStatement(sql, agk.toInt).flatMap(s => C.liftPreparedStatement(s, k ensuring PS.close)) /** @group Prepared Statements */ def prepareStatementI[A](sql: String, columnIndexes: List[Int])(k: PreparedStatementIO[A]): ConnectionIO[A] = C.prepareStatement(sql, columnIndexes.toArray).flatMap(s => C.liftPreparedStatement(s, k ensuring PS.close)) /** @group Prepared Statements */ def prepareStatementS[A](sql: String, columnNames: List[String])(k: PreparedStatementIO[A]): ConnectionIO[A] = C.prepareStatement(sql, columnNames.toArray).flatMap(s => C.liftPreparedStatement(s, k ensuring PS.close)) /** @group Transaction Control */ def releaseSavepoint(sp: Savepoint): ConnectionIO[Unit] = C.releaseSavepoint(sp) /** @group Transaction Control */ def rollback(sp: Savepoint): ConnectionIO[Unit] = C.rollback(sp) /** @group Transaction Control */ val rollback: ConnectionIO[Unit] = C.rollback /** @group Connection Properties */ def setCatalog(catalog: String): ConnectionIO[Unit] = C.setCatalog(catalog) /** @group Connection Properties */ def setClientInfo(key: String, value: String): ConnectionIO[Unit] = C.setClientInfo(key, value) /** @group Connection Properties */ def setClientInfo(info: Map[String, String]): ConnectionIO[Unit] = C.setClientInfo(new java.util.Properties <| (_.putAll(info.asJava))) /** @group Connection Properties */ def setHoldability(h: Holdability): ConnectionIO[Unit] = C.setHoldability(h.toInt) /** @group Connection Properties */ def setReadOnly(readOnly: Boolean): ConnectionIO[Unit] = C.setReadOnly(readOnly) /** @group Transaction Control */ val setSavepoint: ConnectionIO[Savepoint] = C.setSavepoint /** @group Transaction Control */ def setSavepoint(name: String): ConnectionIO[Savepoint] = C.setSavepoint(name) /** @group Transaction Control */ def setTransactionIsolation(ti: TransactionIsolation): ConnectionIO[Unit] = C.setTransactionIsolation(ti.toInt) /** @group Process Syntax */ implicit class ProcessConnectionIOOps[A](pa: Process[ConnectionIO, A]) { def trans[M[_]: Monad: Catchable: Capture](c: Connection): Process[M, A] = pa.translate(new (ConnectionIO ~> M) { def apply[B](ma: ConnectionIO[B]): M[B] = ma.transK[M].run(c) }) } /** * Compute a map from native type to closest-matching JDBC type. * @group MetaData */ val nativeTypeMap: ConnectionIO[Map[String, JdbcType]] = { getMetaData(DMD.getTypeInfo.flatMap(DMD.liftResultSet(_, HRS.process[(String, JdbcType)].list.map(_.toMap)))) } }
beni55/doobie
core/src/main/scala/doobie/hi/connection.scala
Scala
mit
10,680
package zangelo.spray.json.annotation import scala.annotation.StaticAnnotation final case class JsonProperty(name:String) extends StaticAnnotation
zackangelo/spray-json-macros
src/main/scala/zangelo/spray/json/annotation/JsonProperty.scala
Scala
apache-2.0
148
/* * Copyright 2022 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package models case class BiKsWithExclusions(iabdType: String, status: Int, numberOfExclusions: Int)
hmrc/pbik-frontend
app/models/BiKsWithExclusions.scala
Scala
apache-2.0
707
package controllers import play.api.mvc._ trait Secured { def username(request: RequestHeader) = request.session.get("session.email") def onUnauthorized(request: RequestHeader) = Results.Redirect(routes.AuthController.login) def withAuth(f: => String => Request[AnyContent] => Result) = { Security.Authenticated(username, onUnauthorized) { user => Action(request => f(user)(request)) } } // // /** // * This method shows how you could wrap the withAuth method to also fetch your user // * You will need to implement UserDAO.findOneByUsername // */ // def withUser(f: User => Request[AnyContent] => Result) = withAuth { username => implicit request => // UserDAO.findOneByUsername(username).map { user => // f(user)(request) // }.getOrElse(onUnauthorized(request)) // } }
rysh/scalatrader
scalatrader/app/controllers/Secured.scala
Scala
mit
821
package skinny.nlp object SkinnyJapaneseAnalyzer { lazy val default: SkinnyJapaneseAnalyzer = SkinnyJapaneseAnalyzerFactory.create() } trait SkinnyJapaneseAnalyzer { def toKatakanaReadings(str: String): Seq[String] def toHiraganaReadings(str: String): Seq[String] def toRomajiReadings(str: String): Seq[String] def toRomaji(str: String): String def toHiragana(str: String): String def toKatanaka(str: String): String }
skinny-framework/skinny-framework
common/src/main/scala/skinny/nlp/SkinnyJapaneseAnalyzer.scala
Scala
mit
443
package com.rasterfoundry.common.color import io.circe.generic.JsonCodec sealed trait ColorCorrection { val enabled: Boolean } @JsonCodec final case class BandGamma(enabled: Boolean, redGamma: Option[Double], greenGamma: Option[Double], blueGamma: Option[Double]) extends ColorCorrection @JsonCodec final case class PerBandClipping(enabled: Boolean, redMax: Option[Int], greenMax: Option[Int], blueMax: Option[Int], redMin: Option[Int], greenMin: Option[Int], blueMin: Option[Int]) extends ColorCorrection @JsonCodec final case class MultiBandClipping(enabled: Boolean, min: Option[Int], max: Option[Int]) extends ColorCorrection @JsonCodec final case class SigmoidalContrast(enabled: Boolean, alpha: Option[Double], beta: Option[Double]) extends ColorCorrection @JsonCodec final case class Saturation(enabled: Boolean, saturation: Option[Double]) extends ColorCorrection @JsonCodec final case class Equalization(enabled: Boolean) extends ColorCorrection @JsonCodec final case class AutoWhiteBalance(enabled: Boolean) extends ColorCorrection
azavea/raster-foundry
app-backend/common/src/main/scala/com/rasterfoundry/common/color/Corrections.scala
Scala
apache-2.0
1,492
package transport import com.twitter.finatra.request.QueryParam case class IdRequest(@QueryParam id: Int)
icyJoseph/crowdpath
backend/src/main/scala/transport/IdRequest.scala
Scala
mit
108
package models.query case class PeoplesInAgeGroupSchooling( schooling: String,group: String, peoples: Int)
LeonardoZ/SAEB
app/models/query/PeoplesInAgeGroupSchooling.scala
Scala
mit
111
/* * YIFYCache.scala * * Copyright (c) 2014 Ronald Kurniawan. All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301 USA */ package net.fluxo.dd.dbo /** * Data Object for representing one entry in the YIFY cache table. * * @author Ronald Kurniawan (viper) * @version 0.4.5, 3/04/14 */ class YIFYCache { private var _movieID: Int = 0 def MovieID: Int = _movieID def MovieID_:(value: Int) { _movieID = value } private var _title: Option[String] = None def MovieTitle: Option[String] = _title def MovieTitle_:(value: String) { _title = Some(value) } private var _year: Option[String] = None def MovieYear: Option[String] = _year def MovieYear_:(value: String) { _year = Some(value) } private var _quality: Option[String] = None def MovieQuality: Option[String] = _quality def MovieQuality_:(value: String) { _quality = Some(value) } private var _size: Option[String] = None def MovieSize: Option[String] = _size def MovieSize_:(value: String) { _size = Some(value) } private var _coverImage: Option[String] = None def MovieCoverImage: Option[String] = _coverImage def MovieCoverImage_:(value: String) { _coverImage = Some(value) } }
fluxodesign/DownloadDaemon
src/main/scala/net/fluxo/dd/dbo/YIFYCache.scala
Scala
gpl-2.0
1,878
/** * Copyright (C) 2009-2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.fusesource.scalate.console import _root_.org.fusesource.scalate.FunSuiteSupport class SourceLineTest extends FunSuiteSupport { val line = SourceLine(1, "abcd") test("split line") { expect(("", "a", "bcd")) { line.splitOnCharacter(0) } expect(("a", "b", "cd")) { line.splitOnCharacter(1) } expect(("ab", "c", "d")) { line.splitOnCharacter(2) } expect(("abc", "d", "")) { line.splitOnCharacter(3) } expect(("abcd", "", "")) { line.splitOnCharacter(4) } expect(("abcd", "", "")) { line.splitOnCharacter(5) } } }
dnatic09/scalate
scalate-core/src/test/scala/org/fusesource/scalate/console/SourceLineTest.scala
Scala
apache-2.0
1,293
package controllers.organization import extensions.JJson import com.mongodb.casbah.Imports._ import models._ import models.HubMongoContext._ import play.api.i18n.Messages import controllers.{ OrganizationController, Token } import play.api.mvc.{ Results, AnyContent, RequestHeader, Action } import play.api.data.Forms._ import extensions.Formatters._ import play.api.data.Form import core.CultureHubPlugin import core.access.{ ResourceType, Resource } import collection.JavaConverters._ import play.api.Logger import scala.Some import com.escalatesoft.subcut.inject.BindingModule /** * * @author Gerald de Jong <gerald@delving.eu> */ class Groups(implicit val bindingModule: BindingModule) extends OrganizationController { def list = OrganizationMember { MultitenantAction { implicit request => val groups = Group.dao.list(userName, configuration.orgId).filterNot(_.isSystemGroup).map { group => GroupListModel( id = group._id.toString, name = group.name, description = Role.get(group.roleKey).getDescription(getLang), size = group.users.size ) }.toSeq val groupsData = Map("groups" -> groups) Ok(Template('groups -> JJson.generate(groupsData))) } } def groups(groupId: Option[ObjectId]) = OrganizationMember { MultitenantAction { implicit request => if (groupId != None && !canUpdateGroup(configuration.orgId, groupId.get) || groupId == None && !canCreateGroup(configuration.orgId)) { Forbidden(Messages("hub.YouDoNotHaveAccess")) } else { val group: Option[Group] = groupId.flatMap(Group.dao.findOneById(_)) val usersAsTokens = group match { case None => JJson.generate(List()) case Some(g) => val userTokens = g.users.map(m => Token(m, m)) JJson.generate(userTokens) } Ok(Template( 'id -> groupId, 'data -> load(configuration.orgId, groupId), 'groupForm -> GroupViewModel.groupForm, 'users -> usersAsTokens, 'roles -> Role.allPrimaryRoles(configuration). filterNot(_ == Role.OWN). map(role => role.key -> role.getDescription(getLang)). toMap.asJava )) } } } def remove(groupId: Option[ObjectId]) = OrganizationAdmin { implicit request => if (!groupId.isDefined) { Results.BadRequest } else { Group.dao.remove(MongoDBObject("_id" -> groupId, "orgId" -> configuration.orgId)) Ok } } def submit: Action[AnyContent] = OrganizationMember { MultitenantAction { implicit request => GroupViewModel.groupForm.bindFromRequest.fold( formWithErrors => handleValidationError(formWithErrors), groupForm => { Logger("CultureHub").debug("Received group submission: " + groupForm) val groupId = groupForm.id if (groupForm.id != None && !canUpdateGroup(configuration.orgId, groupId.get) || groupId == None && !canCreateGroup(configuration.orgId)) { Forbidden(Messages("hub.YouDoNotHaveAccess")) } else { val role = try { Role.get(groupForm.roleKey) } catch { case t: Throwable => reportSecurity("Attempting to save Group with role " + groupForm.roleKey) return MultitenantAction { BadRequest("Invalid Role " + groupForm.roleKey) } } if (role == Role.OWN && (groupForm.id == None || groupForm.id != None && Group.dao.findOneById(groupForm.id.get) == None)) { reportSecurity("User %s tried to create an owners team!".format(connectedUser)) return MultitenantAction { Forbidden("Your IP has been logged and reported to the police.") } } val persisted = groupForm.id match { case None => Group.dao.insert( Group( name = groupForm.name, orgId = configuration.orgId, roleKey = role.key ) ) match { case None => None case Some(id) => groupForm.users.foreach(u => Group.dao.addUser(configuration.orgId, u.id, id)) groupForm.resources.foreach(r => Group.dao.addResource(configuration.orgId, r.id, role.resourceType.get, id)) Some(groupForm.copy(id = Some(id))) } case Some(id) => Group.dao.findOneById(groupForm.id.get) match { case None => return MultitenantAction { NotFound("Group with ID %s was not found".format(id)) } case Some(g) => g.roleKey match { case Role.OWN.key => // do nothing case _ => val resources: Seq[Resource] = role.resourceType.map { resourceType => val lookup = CultureHubPlugin.getResourceLookup(role.resourceType.get).get groupForm.resources.flatMap { resourceToken => lookup.findResourceByKey(configuration.orgId, resourceToken.id) } }.getOrElse { Seq.empty } Group.dao.updateGroupInfo(id, groupForm.name, role, groupForm.users.map(_.id), resources.map(r => PersistedResource(r))) groupForm.users.foreach(u => Group.dao.addUser(configuration.orgId, u.id, id)) } Some(groupForm) } } persisted match { case Some(group) => Json(group) case None => Error(Messages("hub.CouldNotSaveGroup")) } } }) } } def searchResourceTokens(resourceType: String, q: String) = OrganizationMember { MultitenantAction { implicit request => val maybeLookup = CultureHubPlugin.getResourceLookup(ResourceType(resourceType)) maybeLookup.map { lookup => val tokens = lookup.findResources(configuration.orgId, q).map { resource => Token(resource.getResourceKey, resource.getResourceKey, Some(resource.getResourceType.resourceType)) } Json(tokens) }.getOrElse( Json(Seq.empty) ) } } private def load(orgId: String, groupId: Option[ObjectId])(implicit configuration: OrganizationConfiguration): String = { val resourceRoles = Role.allPrimaryRoles(configuration).filterNot(_.resourceType.isEmpty) val defaultGroupViewModel = GroupViewModel( roleKey = Role.allPrimaryRoles(configuration).head.key, rolesWithResources = resourceRoles.map(_.key), rolesWithResourceAdmin = Role.allPrimaryRoles(configuration).filter(_.isResourceAdmin).map(_.key), rolesResourceType = resourceRoles.map(r => RoleResourceType(r.key, r.resourceType.get.resourceType, Messages("accessControl.resourceType." + r.resourceType.get.resourceType))) ) groupId.flatMap(Group.dao.findOneById(_)) match { case None => JJson.generate(defaultGroupViewModel) case Some(group) => JJson.generate( GroupViewModel( id = Some(group._id), name = group.name, roleKey = group.roleKey, canChangeGrantType = group.roleKey != Role.OWN.key, users = group.users.map(u => Token(u, u)), resources = group.resources.map(r => Token(r.getResourceKey, r.getResourceKey, Some(r.getResourceType.resourceType))), rolesWithResources = defaultGroupViewModel.rolesWithResources, rolesWithResourceAdmin = defaultGroupViewModel.rolesWithResourceAdmin, rolesResourceType = defaultGroupViewModel.rolesResourceType ) ) } } private def canUpdateGroup[A](orgId: String, groupId: ObjectId)(implicit request: MultitenantRequest[A]): Boolean = { groupId != null && organizationServiceLocator.byDomain.isAdmin(orgId, userName) } private def canCreateGroup[A](orgId: String)(implicit request: MultitenantRequest[A]): Boolean = organizationServiceLocator.byDomain.isAdmin(orgId, userName) } case class GroupViewModel(id: Option[ObjectId] = None, name: String = "", roleKey: String, canChangeGrantType: Boolean = true, users: Seq[Token] = Seq.empty[Token], resources: Seq[Token] = Seq.empty[Token], rolesWithResources: Seq[String] = Seq.empty, rolesWithResourceAdmin: Seq[String] = Seq.empty, rolesResourceType: Seq[RoleResourceType] = Seq.empty) case class RoleResourceType(roleKey: String, resourceType: String, resourceTypeName: String) object GroupViewModel { // ~~~ Form utilities import extensions.Formatters._ val tokenListMapping = seq( play.api.data.Forms.mapping( "id" -> text, "name" -> text, "tokenType" -> optional(text), "data" -> optional(of[Map[String, String]]) )(Token.apply)(Token.unapply) ) val groupForm: Form[GroupViewModel] = Form( mapping( "id" -> optional(of[ObjectId]), "name" -> nonEmptyText, "roleKey" -> nonEmptyText, "canChangeGrantType" -> boolean, "users" -> tokenListMapping, "resources" -> tokenListMapping, "rolesWithResources" -> seq(nonEmptyText), "rolesWithResourceAdmin" -> seq(nonEmptyText), "rolesResourceType" -> seq( mapping( "roleKey" -> nonEmptyText, "resourceType" -> nonEmptyText, "resourceTypeName" -> nonEmptyText )(RoleResourceType.apply)(RoleResourceType.unapply) ) )(GroupViewModel.apply)(GroupViewModel.unapply) ) } case class GroupListModel(id: String, name: String, size: Int, description: String)
delving/culture-hub
app/controllers/organization/Groups.scala
Scala
apache-2.0
10,141
class C(val x1: Int, val x2: Int, val x3: Int, val x4: Int) object Test { class D(n: Int) { println(n) def result = n } object O2 extends D(2) object O2a extends D(2) object O2b extends D(2) object O3 extends D(3) object O3a extends D(3) object O3b extends D(3) inline def f(): Unit = { println(new C( { println(1); 1 }, { println(2); 2 }, { println(3); 3 }, { println(4); 4 } ).x1) println(new C( { println(1); 1 }, { println(2); 2 }, { println(3); 3 }, { println(4); 4 } ).x2) println(new C( { println(1); 1 }, { println(2); 2 }, { println(3); 3 }, { println(4); 4 } ).x3) println(new C( { println(1); 1 }, { println(2); 2 }, { println(3); 3 }, { println(4); 4 } ).x4) println("===") println(new C( { 1 }, { println(2); 2 }, { println(3); 3 }, { println(4); 4 } ).x1) println(new C( { println(1); 1 }, { 2 }, { println(3); 3 }, { println(4); 4 } ).x2) println(new C( { println(1); 1 }, { println(2); 2 }, { 3 }, { println(4); 4 } ).x3) println(new C( { println(1); 1 }, { println(2); 2 }, { println(3); 3 }, { 4 } ).x4) println("===") println(new C( { 1 }, { println(2); 2 }, { println(3); 3 }, { 4 } ).x1) println(new C( { 1 }, { 2 }, { println(3); 3 }, { 4 } ).x2) println(new C( { 1 }, { println(2); 2 }, { 3 }, { 4 } ).x3) println(new C( { 1 }, { println(2); 2 }, { println(3); 3 }, { 4 } ).x4) println("===") println(new C( { 1 }, O2.result, O3.result, { 4 } ).x1) println(new C( { 1 }, { 2 }, O3a.result, { 4 } ).x2) println(new C( { 1 }, O2a.result, { 3 }, { 4 } ).x3) println(new C( { 1 }, O2b.result, O3b.result, { 4 } ).x4) } def main(args: Array[String]): Unit = { f() } }
som-snytt/dotty
tests/run/reduce-projections.scala
Scala
apache-2.0
2,150
package es.upm.fi.oeg.morph.tc class D011Test extends R2RMLTest("D011-M2MRelations") { "TC011a" should "gen 19 in DG" in{ val dg=generate("R2RMLTC0011a") dg.getDefaultGraph.size should be (19) } "TC011b" should "gen 16 in DG" in{ val dg=generate("R2RMLTC0011b") dg.getDefaultGraph.size should be (16) } }
jpcik/morph
morph-r2rml-tc/src/test/scala/es/upm/fi/oeg/morph/tc/D011Test.scala
Scala
apache-2.0
326
package geotrellis.ExampleTwo import geotrellis._ import geotrellis.process._ import org.scalatest.Spec import org.scalatest.matchers.MustMatchers case class Timer[T](f:() => T) { val t0 = System.currentTimeMillis() val result = f() val t1 = System.currentTimeMillis() val time = t1 - t0 } class ExampleTwoSpec extends Spec with MustMatchers {}
Tjoene/thesis
Case_Programs/geotrellis-0.7.0/src/test/scala/geotrellis/bugs/example2.scala
Scala
gpl-2.0
358
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.deploy.yarn import java.io.{File, FileOutputStream, IOException, OutputStreamWriter} import java.net.{InetAddress, UnknownHostException, URI} import java.nio.ByteBuffer import java.nio.charset.StandardCharsets import java.util.{Properties, UUID} import java.util.zip.{ZipEntry, ZipOutputStream} import scala.collection.JavaConverters._ import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, ListBuffer, Map} import scala.util.control.NonFatal import com.google.common.base.Objects import com.google.common.io.Files import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs._ import org.apache.hadoop.fs.permission.FsPermission import org.apache.hadoop.io.DataOutputBuffer import org.apache.hadoop.mapreduce.MRJobConfig import org.apache.hadoop.security.{Credentials, UserGroupInformation} import org.apache.hadoop.util.StringUtils import org.apache.hadoop.yarn.api._ import org.apache.hadoop.yarn.api.ApplicationConstants.Environment import org.apache.hadoop.yarn.api.protocolrecords._ import org.apache.hadoop.yarn.api.records._ import org.apache.hadoop.yarn.client.api.{YarnClient, YarnClientApplication} import org.apache.hadoop.yarn.conf.YarnConfiguration import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException import org.apache.hadoop.yarn.util.Records import org.apache.spark.{SecurityManager, SparkConf, SparkException} import org.apache.spark.deploy.SparkHadoopUtil import org.apache.spark.deploy.yarn.config._ import org.apache.spark.deploy.yarn.security.ConfigurableCredentialManager import org.apache.spark.internal.Logging import org.apache.spark.internal.config._ import org.apache.spark.launcher.{LauncherBackend, SparkAppHandle, YarnCommandBuilderUtils} import org.apache.spark.util.{CallerContext, Utils} private[spark] class Client( val args: ClientArguments, val hadoopConf: Configuration, val sparkConf: SparkConf) extends Logging { import Client._ import YarnSparkHadoopUtil._ def this(clientArgs: ClientArguments, spConf: SparkConf) = this(clientArgs, SparkHadoopUtil.get.newConfiguration(spConf), spConf) private val yarnClient = YarnClient.createYarnClient private val yarnConf = new YarnConfiguration(hadoopConf) private val isClusterMode = sparkConf.get("spark.submit.deployMode", "client") == "cluster" // AM related configurations private val amMemory = if (isClusterMode) { sparkConf.get(DRIVER_MEMORY).toInt } else { sparkConf.get(AM_MEMORY).toInt } private val amMemoryOverhead = { val amMemoryOverheadEntry = if (isClusterMode) DRIVER_MEMORY_OVERHEAD else AM_MEMORY_OVERHEAD sparkConf.get(amMemoryOverheadEntry).getOrElse( math.max((MEMORY_OVERHEAD_FACTOR * amMemory).toLong, MEMORY_OVERHEAD_MIN)).toInt } private val amCores = if (isClusterMode) { sparkConf.get(DRIVER_CORES) } else { sparkConf.get(AM_CORES) } // Executor related configurations private val executorMemory = sparkConf.get(EXECUTOR_MEMORY) private val executorMemoryOverhead = sparkConf.get(EXECUTOR_MEMORY_OVERHEAD).getOrElse( math.max((MEMORY_OVERHEAD_FACTOR * executorMemory).toLong, MEMORY_OVERHEAD_MIN)).toInt private val distCacheMgr = new ClientDistributedCacheManager() private var loginFromKeytab = false private var principal: String = null private var keytab: String = null private var credentials: Credentials = null private var amKeytabFileName: String = null private val launcherBackend = new LauncherBackend() { override def onStopRequest(): Unit = { if (isClusterMode && appId != null) { yarnClient.killApplication(appId) } else { setState(SparkAppHandle.State.KILLED) stop() } } } private val fireAndForget = isClusterMode && !sparkConf.get(WAIT_FOR_APP_COMPLETION) private var appId: ApplicationId = null // The app staging dir based on the STAGING_DIR configuration if configured // otherwise based on the users home directory. private val appStagingBaseDir = sparkConf.get(STAGING_DIR).map { new Path(_) } .getOrElse(FileSystem.get(hadoopConf).getHomeDirectory()) private val credentialManager = new ConfigurableCredentialManager(sparkConf, hadoopConf) def reportLauncherState(state: SparkAppHandle.State): Unit = { launcherBackend.setState(state) } def stop(): Unit = { launcherBackend.close() yarnClient.stop() // Unset YARN mode system env variable, to allow switching between cluster types. System.clearProperty("SPARK_YARN_MODE") } /** * Submit an application running our ApplicationMaster to the ResourceManager. * * The stable Yarn API provides a convenience method (YarnClient#createApplication) for * creating applications and setting up the application submission context. This was not * available in the alpha API. */ def submitApplication(): ApplicationId = { var appId: ApplicationId = null try { launcherBackend.connect() // Setup the credentials before doing anything else, // so we have don't have issues at any point. setupCredentials() yarnClient.init(yarnConf) yarnClient.start() logInfo("Requesting a new application from cluster with %d NodeManagers" .format(yarnClient.getYarnClusterMetrics.getNumNodeManagers)) // Get a new application from our RM val newApp = yarnClient.createApplication() val newAppResponse = newApp.getNewApplicationResponse() appId = newAppResponse.getApplicationId() new CallerContext("CLIENT", sparkConf.get(APP_CALLER_CONTEXT), Option(appId.toString)).setCurrentContext() // Verify whether the cluster has enough resources for our AM verifyClusterResources(newAppResponse) // Set up the appropriate contexts to launch our AM val containerContext = createContainerLaunchContext(newAppResponse) val appContext = createApplicationSubmissionContext(newApp, containerContext) // Finally, submit and monitor the application logInfo(s"Submitting application $appId to ResourceManager") yarnClient.submitApplication(appContext) launcherBackend.setAppId(appId.toString) reportLauncherState(SparkAppHandle.State.SUBMITTED) appId } catch { case e: Throwable => if (appId != null) { cleanupStagingDir(appId) } throw e } } /** * Cleanup application staging directory. */ private def cleanupStagingDir(appId: ApplicationId): Unit = { val stagingDirPath = new Path(appStagingBaseDir, getAppStagingDir(appId)) try { val preserveFiles = sparkConf.get(PRESERVE_STAGING_FILES) val fs = stagingDirPath.getFileSystem(hadoopConf) if (!preserveFiles && fs.delete(stagingDirPath, true)) { logInfo(s"Deleted staging directory $stagingDirPath") } } catch { case ioe: IOException => logWarning("Failed to cleanup staging dir " + stagingDirPath, ioe) } } /** * Set up the context for submitting our ApplicationMaster. * This uses the YarnClientApplication not available in the Yarn alpha API. */ def createApplicationSubmissionContext( newApp: YarnClientApplication, containerContext: ContainerLaunchContext): ApplicationSubmissionContext = { val appContext = newApp.getApplicationSubmissionContext appContext.setApplicationName(sparkConf.get("spark.app.name", "Spark")) appContext.setQueue(sparkConf.get(QUEUE_NAME)) appContext.setAMContainerSpec(containerContext) appContext.setApplicationType("SPARK") sparkConf.get(APPLICATION_TAGS).foreach { tags => appContext.setApplicationTags(new java.util.HashSet[String](tags.asJava)) } sparkConf.get(MAX_APP_ATTEMPTS) match { case Some(v) => appContext.setMaxAppAttempts(v) case None => logDebug(s"${MAX_APP_ATTEMPTS.key} is not set. " + "Cluster's default value will be used.") } sparkConf.get(AM_ATTEMPT_FAILURE_VALIDITY_INTERVAL_MS).foreach { interval => appContext.setAttemptFailuresValidityInterval(interval) } val capability = Records.newRecord(classOf[Resource]) capability.setMemory(amMemory + amMemoryOverhead) capability.setVirtualCores(amCores) sparkConf.get(AM_NODE_LABEL_EXPRESSION) match { case Some(expr) => val amRequest = Records.newRecord(classOf[ResourceRequest]) amRequest.setResourceName(ResourceRequest.ANY) amRequest.setPriority(Priority.newInstance(0)) amRequest.setCapability(capability) amRequest.setNumContainers(1) amRequest.setNodeLabelExpression(expr) appContext.setAMContainerResourceRequest(amRequest) case None => appContext.setResource(capability) } sparkConf.get(ROLLED_LOG_INCLUDE_PATTERN).foreach { includePattern => try { val logAggregationContext = Records.newRecord(classOf[LogAggregationContext]) // These two methods were added in Hadoop 2.6.4, so we still need to use reflection to // avoid compile error when building against Hadoop 2.6.0 ~ 2.6.3. val setRolledLogsIncludePatternMethod = logAggregationContext.getClass.getMethod("setRolledLogsIncludePattern", classOf[String]) setRolledLogsIncludePatternMethod.invoke(logAggregationContext, includePattern) sparkConf.get(ROLLED_LOG_EXCLUDE_PATTERN).foreach { excludePattern => val setRolledLogsExcludePatternMethod = logAggregationContext.getClass.getMethod("setRolledLogsExcludePattern", classOf[String]) setRolledLogsExcludePatternMethod.invoke(logAggregationContext, excludePattern) } appContext.setLogAggregationContext(logAggregationContext) } catch { case NonFatal(e) => logWarning(s"Ignoring ${ROLLED_LOG_INCLUDE_PATTERN.key} because the version of YARN " + "does not support it", e) } } appContext } /** Set up security tokens for launching our ApplicationMaster container. */ private def setupSecurityToken(amContainer: ContainerLaunchContext): Unit = { val dob = new DataOutputBuffer credentials.writeTokenStorageToStream(dob) amContainer.setTokens(ByteBuffer.wrap(dob.getData)) } /** Get the application report from the ResourceManager for an application we have submitted. */ def getApplicationReport(appId: ApplicationId): ApplicationReport = yarnClient.getApplicationReport(appId) /** * Return the security token used by this client to communicate with the ApplicationMaster. * If no security is enabled, the token returned by the report is null. */ private def getClientToken(report: ApplicationReport): String = Option(report.getClientToAMToken).map(_.toString).getOrElse("") /** * Fail fast if we have requested more resources per container than is available in the cluster. */ private def verifyClusterResources(newAppResponse: GetNewApplicationResponse): Unit = { val maxMem = newAppResponse.getMaximumResourceCapability().getMemory() logInfo("Verifying our application has not requested more than the maximum " + s"memory capability of the cluster ($maxMem MB per container)") val executorMem = executorMemory + executorMemoryOverhead if (executorMem > maxMem) { throw new IllegalArgumentException(s"Required executor memory ($executorMemory" + s"+$executorMemoryOverhead MB) is above the max threshold ($maxMem MB) of this cluster! " + "Please check the values of 'yarn.scheduler.maximum-allocation-mb' and/or " + "'yarn.nodemanager.resource.memory-mb'.") } val amMem = amMemory + amMemoryOverhead if (amMem > maxMem) { throw new IllegalArgumentException(s"Required AM memory ($amMemory" + s"+$amMemoryOverhead MB) is above the max threshold ($maxMem MB) of this cluster! " + "Please increase the value of 'yarn.scheduler.maximum-allocation-mb'.") } logInfo("Will allocate AM container, with %d MB memory including %d MB overhead".format( amMem, amMemoryOverhead)) // We could add checks to make sure the entire cluster has enough resources but that involves // getting all the node reports and computing ourselves. } /** * Copy the given file to a remote file system (e.g. HDFS) if needed. * The file is only copied if the source and destination file systems are different. This is used * for preparing resources for launching the ApplicationMaster container. Exposed for testing. */ private[yarn] def copyFileToRemote( destDir: Path, srcPath: Path, replication: Short, symlinkCache: Map[URI, Path], force: Boolean = false, destName: Option[String] = None): Path = { val destFs = destDir.getFileSystem(hadoopConf) val srcFs = srcPath.getFileSystem(hadoopConf) var destPath = srcPath if (force || !compareFs(srcFs, destFs)) { destPath = new Path(destDir, destName.getOrElse(srcPath.getName())) logInfo(s"Uploading resource $srcPath -> $destPath") FileUtil.copy(srcFs, srcPath, destFs, destPath, false, hadoopConf) destFs.setReplication(destPath, replication) destFs.setPermission(destPath, new FsPermission(APP_FILE_PERMISSION)) } else { logInfo(s"Source and destination file systems are the same. Not copying $srcPath") } // Resolve any symlinks in the URI path so using a "current" symlink to point to a specific // version shows the specific version in the distributed cache configuration val qualifiedDestPath = destFs.makeQualified(destPath) val qualifiedDestDir = qualifiedDestPath.getParent val resolvedDestDir = symlinkCache.getOrElseUpdate(qualifiedDestDir.toUri(), { val fc = FileContext.getFileContext(qualifiedDestDir.toUri(), hadoopConf) fc.resolvePath(qualifiedDestDir) }) new Path(resolvedDestDir, qualifiedDestPath.getName()) } /** * Upload any resources to the distributed cache if needed. If a resource is intended to be * consumed locally, set up the appropriate config for downstream code to handle it properly. * This is used for setting up a container launch context for our ApplicationMaster. * Exposed for testing. */ def prepareLocalResources( destDir: Path, pySparkArchives: Seq[String]): HashMap[String, LocalResource] = { logInfo("Preparing resources for our AM container") // Upload Spark and the application JAR to the remote file system if necessary, // and add them as local resources to the application master. val fs = destDir.getFileSystem(hadoopConf) // Merge credentials obtained from registered providers val nearestTimeOfNextRenewal = credentialManager.obtainCredentials(hadoopConf, credentials) if (credentials != null) { logDebug(YarnSparkHadoopUtil.get.dumpTokens(credentials).mkString("\\n")) } // If we use principal and keytab to login, also credentials can be renewed some time // after current time, we should pass the next renewal and updating time to credential // renewer and updater. if (loginFromKeytab && nearestTimeOfNextRenewal > System.currentTimeMillis() && nearestTimeOfNextRenewal != Long.MaxValue) { // Valid renewal time is 75% of next renewal time, and the valid update time will be // slightly later then renewal time (80% of next renewal time). This is to make sure // credentials are renewed and updated before expired. val currTime = System.currentTimeMillis() val renewalTime = (nearestTimeOfNextRenewal - currTime) * 0.75 + currTime val updateTime = (nearestTimeOfNextRenewal - currTime) * 0.8 + currTime sparkConf.set(CREDENTIALS_RENEWAL_TIME, renewalTime.toLong) sparkConf.set(CREDENTIALS_UPDATE_TIME, updateTime.toLong) } // Used to keep track of URIs added to the distributed cache. If the same URI is added // multiple times, YARN will fail to launch containers for the app with an internal // error. val distributedUris = new HashSet[String] // Used to keep track of URIs(files) added to the distribute cache have the same name. If // same name but different path files are added multiple time, YARN will fail to launch // containers for the app with an internal error. val distributedNames = new HashSet[String] val replication = sparkConf.get(STAGING_FILE_REPLICATION).map(_.toShort) .getOrElse(fs.getDefaultReplication(destDir)) val localResources = HashMap[String, LocalResource]() FileSystem.mkdirs(fs, destDir, new FsPermission(STAGING_DIR_PERMISSION)) val statCache: Map[URI, FileStatus] = HashMap[URI, FileStatus]() val symlinkCache: Map[URI, Path] = HashMap[URI, Path]() def addDistributedUri(uri: URI): Boolean = { val uriStr = uri.toString() val fileName = new File(uri.getPath).getName if (distributedUris.contains(uriStr)) { logWarning(s"Same path resource $uri added multiple times to distributed cache.") false } else if (distributedNames.contains(fileName)) { logWarning(s"Same name resource $uri added multiple times to distributed cache") false } else { distributedUris += uriStr distributedNames += fileName true } } /** * Distribute a file to the cluster. * * If the file's path is a "local:" URI, it's actually not distributed. Other files are copied * to HDFS (if not already there) and added to the application's distributed cache. * * @param path URI of the file to distribute. * @param resType Type of resource being distributed. * @param destName Name of the file in the distributed cache. * @param targetDir Subdirectory where to place the file. * @param appMasterOnly Whether to distribute only to the AM. * @return A 2-tuple. First item is whether the file is a "local:" URI. Second item is the * localized path for non-local paths, or the input `path` for local paths. * The localized path will be null if the URI has already been added to the cache. */ def distribute( path: String, resType: LocalResourceType = LocalResourceType.FILE, destName: Option[String] = None, targetDir: Option[String] = None, appMasterOnly: Boolean = false): (Boolean, String) = { val trimmedPath = path.trim() val localURI = Utils.resolveURI(trimmedPath) if (localURI.getScheme != LOCAL_SCHEME) { if (addDistributedUri(localURI)) { val localPath = getQualifiedLocalPath(localURI, hadoopConf) val linkname = targetDir.map(_ + "/").getOrElse("") + destName.orElse(Option(localURI.getFragment())).getOrElse(localPath.getName()) val destPath = copyFileToRemote(destDir, localPath, replication, symlinkCache) val destFs = FileSystem.get(destPath.toUri(), hadoopConf) distCacheMgr.addResource( destFs, hadoopConf, destPath, localResources, resType, linkname, statCache, appMasterOnly = appMasterOnly) (false, linkname) } else { (false, null) } } else { (true, trimmedPath) } } // If we passed in a keytab, make sure we copy the keytab to the staging directory on // HDFS, and setup the relevant environment vars, so the AM can login again. if (loginFromKeytab) { logInfo("To enable the AM to login from keytab, credentials are being copied over to the AM" + " via the YARN Secure Distributed Cache.") val (_, localizedPath) = distribute(keytab, destName = Some(amKeytabFileName), appMasterOnly = true) require(localizedPath != null, "Keytab file already distributed.") } /** * Add Spark to the cache. There are two settings that control what files to add to the cache: * - if a Spark archive is defined, use the archive. The archive is expected to contain * jar files at its root directory. * - if a list of jars is provided, filter the non-local ones, resolve globs, and * add the found files to the cache. * * Note that the archive cannot be a "local" URI. If none of the above settings are found, * then upload all files found in $SPARK_HOME/jars. */ val sparkArchive = sparkConf.get(SPARK_ARCHIVE) if (sparkArchive.isDefined) { val archive = sparkArchive.get require(!isLocalUri(archive), s"${SPARK_ARCHIVE.key} cannot be a local URI.") distribute(Utils.resolveURI(archive).toString, resType = LocalResourceType.ARCHIVE, destName = Some(LOCALIZED_LIB_DIR)) } else { sparkConf.get(SPARK_JARS) match { case Some(jars) => // Break the list of jars to upload, and resolve globs. val localJars = new ArrayBuffer[String]() jars.foreach { jar => if (!isLocalUri(jar)) { val path = getQualifiedLocalPath(Utils.resolveURI(jar), hadoopConf) val pathFs = FileSystem.get(path.toUri(), hadoopConf) pathFs.globStatus(path).filter(_.isFile()).foreach { entry => val uri = entry.getPath().toUri() statCache.update(uri, entry) distribute(uri.toString(), targetDir = Some(LOCALIZED_LIB_DIR)) } } else { localJars += jar } } // Propagate the local URIs to the containers using the configuration. sparkConf.set(SPARK_JARS, localJars) case None => // No configuration, so fall back to uploading local jar files. logWarning(s"Neither ${SPARK_JARS.key} nor ${SPARK_ARCHIVE.key} is set, falling back " + "to uploading libraries under SPARK_HOME.") val jarsDir = new File(YarnCommandBuilderUtils.findJarsDir( sparkConf.getenv("SPARK_HOME"))) val jarsArchive = File.createTempFile(LOCALIZED_LIB_DIR, ".zip", new File(Utils.getLocalDir(sparkConf))) val jarsStream = new ZipOutputStream(new FileOutputStream(jarsArchive)) try { jarsStream.setLevel(0) jarsDir.listFiles().foreach { f => if (f.isFile && f.getName.toLowerCase().endsWith(".jar") && f.canRead) { jarsStream.putNextEntry(new ZipEntry(f.getName)) Files.copy(f, jarsStream) jarsStream.closeEntry() } } } finally { jarsStream.close() } distribute(jarsArchive.toURI.getPath, resType = LocalResourceType.ARCHIVE, destName = Some(LOCALIZED_LIB_DIR)) } } /** * Copy user jar to the distributed cache if their scheme is not "local". * Otherwise, set the corresponding key in our SparkConf to handle it downstream. */ Option(args.userJar).filter(_.trim.nonEmpty).foreach { jar => val (isLocal, localizedPath) = distribute(jar, destName = Some(APP_JAR_NAME)) if (isLocal) { require(localizedPath != null, s"Path $jar already distributed") // If the resource is intended for local use only, handle this downstream // by setting the appropriate property sparkConf.set(APP_JAR, localizedPath) } } /** * Do the same for any additional resources passed in through ClientArguments. * Each resource category is represented by a 3-tuple of: * (1) comma separated list of resources in this category, * (2) resource type, and * (3) whether to add these resources to the classpath */ val cachedSecondaryJarLinks = ListBuffer.empty[String] List( (sparkConf.get(JARS_TO_DISTRIBUTE), LocalResourceType.FILE, true), (sparkConf.get(FILES_TO_DISTRIBUTE), LocalResourceType.FILE, false), (sparkConf.get(ARCHIVES_TO_DISTRIBUTE), LocalResourceType.ARCHIVE, false) ).foreach { case (flist, resType, addToClasspath) => flist.foreach { file => val (_, localizedPath) = distribute(file, resType = resType) // If addToClassPath, we ignore adding jar multiple times to distitrbuted cache. if (addToClasspath) { if (localizedPath != null) { cachedSecondaryJarLinks += localizedPath } } else { if (localizedPath == null) { throw new IllegalArgumentException(s"Attempt to add ($file) multiple times" + " to the distributed cache.") } } } } if (cachedSecondaryJarLinks.nonEmpty) { sparkConf.set(SECONDARY_JARS, cachedSecondaryJarLinks) } if (isClusterMode && args.primaryPyFile != null) { distribute(args.primaryPyFile, appMasterOnly = true) } pySparkArchives.foreach { f => distribute(f) } // The python files list needs to be treated especially. All files that are not an // archive need to be placed in a subdirectory that will be added to PYTHONPATH. sparkConf.get(PY_FILES).foreach { f => val targetDir = if (f.endsWith(".py")) Some(LOCALIZED_PYTHON_DIR) else None distribute(f, targetDir = targetDir) } // Update the configuration with all the distributed files, minus the conf archive. The // conf archive will be handled by the AM differently so that we avoid having to send // this configuration by other means. See SPARK-14602 for one reason of why this is needed. distCacheMgr.updateConfiguration(sparkConf) // Upload the conf archive to HDFS manually, and record its location in the configuration. // This will allow the AM to know where the conf archive is in HDFS, so that it can be // distributed to the containers. // // This code forces the archive to be copied, so that unit tests pass (since in that case both // file systems are the same and the archive wouldn't normally be copied). In most (all?) // deployments, the archive would be copied anyway, since it's a temp file in the local file // system. val remoteConfArchivePath = new Path(destDir, LOCALIZED_CONF_ARCHIVE) val remoteFs = FileSystem.get(remoteConfArchivePath.toUri(), hadoopConf) sparkConf.set(CACHED_CONF_ARCHIVE, remoteConfArchivePath.toString()) val localConfArchive = new Path(createConfArchive().toURI()) copyFileToRemote(destDir, localConfArchive, replication, symlinkCache, force = true, destName = Some(LOCALIZED_CONF_ARCHIVE)) // Manually add the config archive to the cache manager so that the AM is launched with // the proper files set up. distCacheMgr.addResource( remoteFs, hadoopConf, remoteConfArchivePath, localResources, LocalResourceType.ARCHIVE, LOCALIZED_CONF_DIR, statCache, appMasterOnly = false) // Clear the cache-related entries from the configuration to avoid them polluting the // UI's environment page. This works for client mode; for cluster mode, this is handled // by the AM. CACHE_CONFIGS.foreach(sparkConf.remove) localResources } /** * Create an archive with the config files for distribution. * * These will be used by AM and executors. The files are zipped and added to the job as an * archive, so that YARN will explode it when distributing to AM and executors. This directory * is then added to the classpath of AM and executor process, just to make sure that everybody * is using the same default config. * * This follows the order of precedence set by the startup scripts, in which HADOOP_CONF_DIR * shows up in the classpath before YARN_CONF_DIR. * * Currently this makes a shallow copy of the conf directory. If there are cases where a * Hadoop config directory contains subdirectories, this code will have to be fixed. * * The archive also contains some Spark configuration. Namely, it saves the contents of * SparkConf in a file to be loaded by the AM process. */ private def createConfArchive(): File = { val hadoopConfFiles = new HashMap[String, File]() // Uploading $SPARK_CONF_DIR/log4j.properties file to the distributed cache to make sure that // the executors will use the latest configurations instead of the default values. This is // required when user changes log4j.properties directly to set the log configurations. If // configuration file is provided through --files then executors will be taking configurations // from --files instead of $SPARK_CONF_DIR/log4j.properties. // Also uploading metrics.properties to distributed cache if exists in classpath. // If user specify this file using --files then executors will use the one // from --files instead. for { prop <- Seq("log4j.properties", "metrics.properties") url <- Option(Utils.getContextOrSparkClassLoader.getResource(prop)) if url.getProtocol == "file" } { hadoopConfFiles(prop) = new File(url.getPath) } Seq("HADOOP_CONF_DIR", "YARN_CONF_DIR").foreach { envKey => sys.env.get(envKey).foreach { path => val dir = new File(path) if (dir.isDirectory()) { val files = dir.listFiles() if (files == null) { logWarning("Failed to list files under directory " + dir) } else { files.foreach { file => if (file.isFile && !hadoopConfFiles.contains(file.getName())) { hadoopConfFiles(file.getName()) = file } } } } } } val confArchive = File.createTempFile(LOCALIZED_CONF_DIR, ".zip", new File(Utils.getLocalDir(sparkConf))) val confStream = new ZipOutputStream(new FileOutputStream(confArchive)) try { confStream.setLevel(0) hadoopConfFiles.foreach { case (name, file) => if (file.canRead()) { confStream.putNextEntry(new ZipEntry(name)) Files.copy(file, confStream) confStream.closeEntry() } } // Save Spark configuration to a file in the archive. val props = new Properties() sparkConf.getAll.foreach { case (k, v) => props.setProperty(k, v) } // Override spark.yarn.key to point to the location in distributed cache which will be used // by AM. Option(amKeytabFileName).foreach { k => props.setProperty(KEYTAB.key, k) } confStream.putNextEntry(new ZipEntry(SPARK_CONF_FILE)) val writer = new OutputStreamWriter(confStream, StandardCharsets.UTF_8) props.store(writer, "Spark configuration.") writer.flush() confStream.closeEntry() } finally { confStream.close() } confArchive } /** * Set up the environment for launching our ApplicationMaster container. */ private def setupLaunchEnv( stagingDirPath: Path, pySparkArchives: Seq[String]): HashMap[String, String] = { logInfo("Setting up the launch environment for our AM container") val env = new HashMap[String, String]() populateClasspath(args, yarnConf, sparkConf, env, sparkConf.get(DRIVER_CLASS_PATH)) env("SPARK_YARN_MODE") = "true" env("SPARK_YARN_STAGING_DIR") = stagingDirPath.toString env("SPARK_USER") = UserGroupInformation.getCurrentUser().getShortUserName() if (loginFromKeytab) { val credentialsFile = "credentials-" + UUID.randomUUID().toString sparkConf.set(CREDENTIALS_FILE_PATH, new Path(stagingDirPath, credentialsFile).toString) logInfo(s"Credentials file set to: $credentialsFile") } // Pick up any environment variables for the AM provided through spark.yarn.appMasterEnv.* val amEnvPrefix = "spark.yarn.appMasterEnv." sparkConf.getAll .filter { case (k, v) => k.startsWith(amEnvPrefix) } .map { case (k, v) => (k.substring(amEnvPrefix.length), v) } .foreach { case (k, v) => YarnSparkHadoopUtil.addPathToEnvironment(env, k, v) } // If pyFiles contains any .py files, we need to add LOCALIZED_PYTHON_DIR to the PYTHONPATH // of the container processes too. Add all non-.py files directly to PYTHONPATH. // // NOTE: the code currently does not handle .py files defined with a "local:" scheme. val pythonPath = new ListBuffer[String]() val (pyFiles, pyArchives) = sparkConf.get(PY_FILES).partition(_.endsWith(".py")) if (pyFiles.nonEmpty) { pythonPath += buildPath(Environment.PWD.$$(), LOCALIZED_PYTHON_DIR) } (pySparkArchives ++ pyArchives).foreach { path => val uri = Utils.resolveURI(path) if (uri.getScheme != LOCAL_SCHEME) { pythonPath += buildPath(Environment.PWD.$$(), new Path(uri).getName()) } else { pythonPath += uri.getPath() } } // Finally, update the Spark config to propagate PYTHONPATH to the AM and executors. if (pythonPath.nonEmpty) { val pythonPathStr = (sys.env.get("PYTHONPATH") ++ pythonPath) .mkString(ApplicationConstants.CLASS_PATH_SEPARATOR) env("PYTHONPATH") = pythonPathStr sparkConf.setExecutorEnv("PYTHONPATH", pythonPathStr) } if (isClusterMode) { // propagate PYSPARK_DRIVER_PYTHON and PYSPARK_PYTHON to driver in cluster mode Seq("PYSPARK_DRIVER_PYTHON", "PYSPARK_PYTHON").foreach { envname => if (!env.contains(envname)) { sys.env.get(envname).foreach(env(envname) = _) } } sys.env.get("PYTHONHASHSEED").foreach(env.put("PYTHONHASHSEED", _)) } sys.env.get(ENV_DIST_CLASSPATH).foreach { dcp => env(ENV_DIST_CLASSPATH) = dcp } env } /** * Set up a ContainerLaunchContext to launch our ApplicationMaster container. * This sets up the launch environment, java options, and the command for launching the AM. */ private def createContainerLaunchContext(newAppResponse: GetNewApplicationResponse) : ContainerLaunchContext = { logInfo("Setting up container launch context for our AM") val appId = newAppResponse.getApplicationId val appStagingDirPath = new Path(appStagingBaseDir, getAppStagingDir(appId)) val pySparkArchives = if (sparkConf.get(IS_PYTHON_APP)) { findPySparkArchives() } else { Nil } val launchEnv = setupLaunchEnv(appStagingDirPath, pySparkArchives) val localResources = prepareLocalResources(appStagingDirPath, pySparkArchives) val amContainer = Records.newRecord(classOf[ContainerLaunchContext]) amContainer.setLocalResources(localResources.asJava) amContainer.setEnvironment(launchEnv.asJava) val javaOpts = ListBuffer[String]() // Set the environment variable through a command prefix // to append to the existing value of the variable var prefixEnv: Option[String] = None // Add Xmx for AM memory javaOpts += "-Xmx" + amMemory + "m" val tmpDir = new Path(Environment.PWD.$$(), YarnConfiguration.DEFAULT_CONTAINER_TEMP_DIR) javaOpts += "-Djava.io.tmpdir=" + tmpDir // TODO: Remove once cpuset version is pushed out. // The context is, default gc for server class machines ends up using all cores to do gc - // hence if there are multiple containers in same node, Spark GC affects all other containers' // performance (which can be that of other Spark containers) // Instead of using this, rely on cpusets by YARN to enforce "proper" Spark behavior in // multi-tenant environments. Not sure how default Java GC behaves if it is limited to subset // of cores on a node. val useConcurrentAndIncrementalGC = launchEnv.get("SPARK_USE_CONC_INCR_GC").exists(_.toBoolean) if (useConcurrentAndIncrementalGC) { // In our expts, using (default) throughput collector has severe perf ramifications in // multi-tenant machines javaOpts += "-XX:+UseConcMarkSweepGC" javaOpts += "-XX:MaxTenuringThreshold=31" javaOpts += "-XX:SurvivorRatio=8" javaOpts += "-XX:+CMSIncrementalMode" javaOpts += "-XX:+CMSIncrementalPacing" javaOpts += "-XX:CMSIncrementalDutyCycleMin=0" javaOpts += "-XX:CMSIncrementalDutyCycle=10" } // Include driver-specific java options if we are launching a driver if (isClusterMode) { sparkConf.get(DRIVER_JAVA_OPTIONS).foreach { opts => javaOpts ++= Utils.splitCommandString(opts).map(YarnSparkHadoopUtil.escapeForShell) } val libraryPaths = Seq(sparkConf.get(DRIVER_LIBRARY_PATH), sys.props.get("spark.driver.libraryPath")).flatten if (libraryPaths.nonEmpty) { prefixEnv = Some(getClusterPath(sparkConf, Utils.libraryPathEnvPrefix(libraryPaths))) } if (sparkConf.get(AM_JAVA_OPTIONS).isDefined) { logWarning(s"${AM_JAVA_OPTIONS.key} will not take effect in cluster mode") } } else { // Validate and include yarn am specific java options in yarn-client mode. sparkConf.get(AM_JAVA_OPTIONS).foreach { opts => if (opts.contains("-Dspark")) { val msg = s"${AM_JAVA_OPTIONS.key} is not allowed to set Spark options (was '$opts')." throw new SparkException(msg) } if (opts.contains("-Xmx")) { val msg = s"${AM_JAVA_OPTIONS.key} is not allowed to specify max heap memory settings " + s"(was '$opts'). Use spark.yarn.am.memory instead." throw new SparkException(msg) } javaOpts ++= Utils.splitCommandString(opts).map(YarnSparkHadoopUtil.escapeForShell) } sparkConf.get(AM_LIBRARY_PATH).foreach { paths => prefixEnv = Some(getClusterPath(sparkConf, Utils.libraryPathEnvPrefix(Seq(paths)))) } } // For log4j configuration to reference javaOpts += ("-Dspark.yarn.app.container.log.dir=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR) val userClass = if (isClusterMode) { Seq("--class", YarnSparkHadoopUtil.escapeForShell(args.userClass)) } else { Nil } val userJar = if (args.userJar != null) { Seq("--jar", args.userJar) } else { Nil } val primaryPyFile = if (isClusterMode && args.primaryPyFile != null) { Seq("--primary-py-file", new Path(args.primaryPyFile).getName()) } else { Nil } val primaryRFile = if (args.primaryRFile != null) { Seq("--primary-r-file", args.primaryRFile) } else { Nil } val amClass = if (isClusterMode) { Utils.classForName("org.apache.spark.deploy.yarn.ApplicationMaster").getName } else { Utils.classForName("org.apache.spark.deploy.yarn.ExecutorLauncher").getName } if (args.primaryRFile != null && args.primaryRFile.endsWith(".R")) { args.userArgs = ArrayBuffer(args.primaryRFile) ++ args.userArgs } val userArgs = args.userArgs.flatMap { arg => Seq("--arg", YarnSparkHadoopUtil.escapeForShell(arg)) } val amArgs = Seq(amClass) ++ userClass ++ userJar ++ primaryPyFile ++ primaryRFile ++ userArgs ++ Seq("--properties-file", buildPath(Environment.PWD.$$(), LOCALIZED_CONF_DIR, SPARK_CONF_FILE)) // Command for the ApplicationMaster val commands = prefixEnv ++ Seq(Environment.JAVA_HOME.$$() + "/bin/java", "-server") ++ javaOpts ++ amArgs ++ Seq( "1>", ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout", "2>", ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr") // TODO: it would be nicer to just make sure there are no null commands here val printableCommands = commands.map(s => if (s == null) "null" else s).toList amContainer.setCommands(printableCommands.asJava) logDebug("===============================================================================") logDebug("YARN AM launch context:") logDebug(s" user class: ${Option(args.userClass).getOrElse("N/A")}") logDebug(" env:") launchEnv.foreach { case (k, v) => logDebug(s" $k -> $v") } logDebug(" resources:") localResources.foreach { case (k, v) => logDebug(s" $k -> $v")} logDebug(" command:") logDebug(s" ${printableCommands.mkString(" ")}") logDebug("===============================================================================") // send the acl settings into YARN to control who has access via YARN interfaces val securityManager = new SecurityManager(sparkConf) amContainer.setApplicationACLs( YarnSparkHadoopUtil.getApplicationAclsForYarn(securityManager).asJava) setupSecurityToken(amContainer) amContainer } def setupCredentials(): Unit = { loginFromKeytab = sparkConf.contains(PRINCIPAL.key) if (loginFromKeytab) { principal = sparkConf.get(PRINCIPAL).get keytab = sparkConf.get(KEYTAB).orNull require(keytab != null, "Keytab must be specified when principal is specified.") logInfo("Attempting to login to the Kerberos" + s" using principal: $principal and keytab: $keytab") val f = new File(keytab) // Generate a file name that can be used for the keytab file, that does not conflict // with any user file. amKeytabFileName = f.getName + "-" + UUID.randomUUID().toString sparkConf.set(PRINCIPAL.key, principal) } // Defensive copy of the credentials credentials = new Credentials(UserGroupInformation.getCurrentUser.getCredentials) } /** * Report the state of an application until it has exited, either successfully or * due to some failure, then return a pair of the yarn application state (FINISHED, FAILED, * KILLED, or RUNNING) and the final application state (UNDEFINED, SUCCEEDED, FAILED, * or KILLED). * * @param appId ID of the application to monitor. * @param returnOnRunning Whether to also return the application state when it is RUNNING. * @param logApplicationReport Whether to log details of the application report every iteration. * @return A pair of the yarn application state and the final application state. */ def monitorApplication( appId: ApplicationId, returnOnRunning: Boolean = false, logApplicationReport: Boolean = true): (YarnApplicationState, FinalApplicationStatus) = { val interval = sparkConf.get(REPORT_INTERVAL) var lastState: YarnApplicationState = null while (true) { Thread.sleep(interval) val report: ApplicationReport = try { getApplicationReport(appId) } catch { case e: ApplicationNotFoundException => logError(s"Application $appId not found.") cleanupStagingDir(appId) return (YarnApplicationState.KILLED, FinalApplicationStatus.KILLED) case NonFatal(e) => logError(s"Failed to contact YARN for application $appId.", e) // Don't necessarily clean up staging dir because status is unknown return (YarnApplicationState.FAILED, FinalApplicationStatus.FAILED) } val state = report.getYarnApplicationState if (logApplicationReport) { logInfo(s"Application report for $appId (state: $state)") // If DEBUG is enabled, log report details every iteration // Otherwise, log them every time the application changes state if (log.isDebugEnabled) { logDebug(formatReportDetails(report)) } else if (lastState != state) { logInfo(formatReportDetails(report)) } } if (lastState != state) { state match { case YarnApplicationState.RUNNING => reportLauncherState(SparkAppHandle.State.RUNNING) case YarnApplicationState.FINISHED => report.getFinalApplicationStatus match { case FinalApplicationStatus.FAILED => reportLauncherState(SparkAppHandle.State.FAILED) case FinalApplicationStatus.KILLED => reportLauncherState(SparkAppHandle.State.KILLED) case _ => reportLauncherState(SparkAppHandle.State.FINISHED) } case YarnApplicationState.FAILED => reportLauncherState(SparkAppHandle.State.FAILED) case YarnApplicationState.KILLED => reportLauncherState(SparkAppHandle.State.KILLED) case _ => } } if (state == YarnApplicationState.FINISHED || state == YarnApplicationState.FAILED || state == YarnApplicationState.KILLED) { cleanupStagingDir(appId) return (state, report.getFinalApplicationStatus) } if (returnOnRunning && state == YarnApplicationState.RUNNING) { return (state, report.getFinalApplicationStatus) } lastState = state } // Never reached, but keeps compiler happy throw new SparkException("While loop is depleted! This should never happen...") } private def formatReportDetails(report: ApplicationReport): String = { val details = Seq[(String, String)]( ("client token", getClientToken(report)), ("diagnostics", report.getDiagnostics), ("ApplicationMaster host", report.getHost), ("ApplicationMaster RPC port", report.getRpcPort.toString), ("queue", report.getQueue), ("start time", report.getStartTime.toString), ("final status", report.getFinalApplicationStatus.toString), ("tracking URL", report.getTrackingUrl), ("user", report.getUser) ) // Use more loggable format if value is null or empty details.map { case (k, v) => val newValue = Option(v).filter(_.nonEmpty).getOrElse("N/A") s"\\n\\t $k: $newValue" }.mkString("") } /** * Submit an application to the ResourceManager. * If set spark.yarn.submit.waitAppCompletion to true, it will stay alive * reporting the application's status until the application has exited for any reason. * Otherwise, the client process will exit after submission. * If the application finishes with a failed, killed, or undefined status, * throw an appropriate SparkException. */ def run(): Unit = { this.appId = submitApplication() if (!launcherBackend.isConnected() && fireAndForget) { val report = getApplicationReport(appId) val state = report.getYarnApplicationState logInfo(s"Application report for $appId (state: $state)") logInfo(formatReportDetails(report)) if (state == YarnApplicationState.FAILED || state == YarnApplicationState.KILLED) { throw new SparkException(s"Application $appId finished with status: $state") } } else { val (yarnApplicationState, finalApplicationStatus) = monitorApplication(appId) if (yarnApplicationState == YarnApplicationState.FAILED || finalApplicationStatus == FinalApplicationStatus.FAILED) { throw new SparkException(s"Application $appId finished with failed status") } if (yarnApplicationState == YarnApplicationState.KILLED || finalApplicationStatus == FinalApplicationStatus.KILLED) { throw new SparkException(s"Application $appId is killed") } if (finalApplicationStatus == FinalApplicationStatus.UNDEFINED) { throw new SparkException(s"The final status of application $appId is undefined") } } } private def findPySparkArchives(): Seq[String] = { sys.env.get("PYSPARK_ARCHIVES_PATH") .map(_.split(",").toSeq) .getOrElse { val pyLibPath = Seq(sys.env("SPARK_HOME"), "python", "lib").mkString(File.separator) val pyArchivesFile = new File(pyLibPath, "pyspark.zip") require(pyArchivesFile.exists(), s"$pyArchivesFile not found; cannot run pyspark application in YARN mode.") val py4jFile = new File(pyLibPath, "py4j-0.10.4-src.zip") require(py4jFile.exists(), s"$py4jFile not found; cannot run pyspark application in YARN mode.") Seq(pyArchivesFile.getAbsolutePath(), py4jFile.getAbsolutePath()) } } } private object Client extends Logging { def main(argStrings: Array[String]) { if (!sys.props.contains("SPARK_SUBMIT")) { logWarning("WARNING: This client is deprecated and will be removed in a " + "future version of Spark. Use ./bin/spark-submit with \\"--master yarn\\"") } // Set an env variable indicating we are running in YARN mode. // Note that any env variable with the SPARK_ prefix gets propagated to all (remote) processes System.setProperty("SPARK_YARN_MODE", "true") val sparkConf = new SparkConf // SparkSubmit would use yarn cache to distribute files & jars in yarn mode, // so remove them from sparkConf here for yarn mode. sparkConf.remove("spark.jars") sparkConf.remove("spark.files") val args = new ClientArguments(argStrings) new Client(args, sparkConf).run() } // Alias for the user jar val APP_JAR_NAME: String = "__app__.jar" // URI scheme that identifies local resources val LOCAL_SCHEME = "local" // Staging directory for any temporary jars or files val SPARK_STAGING: String = ".sparkStaging" // Staging directory is private! -> rwx-------- val STAGING_DIR_PERMISSION: FsPermission = FsPermission.createImmutable(Integer.parseInt("700", 8).toShort) // App files are world-wide readable and owner writable -> rw-r--r-- val APP_FILE_PERMISSION: FsPermission = FsPermission.createImmutable(Integer.parseInt("644", 8).toShort) // Distribution-defined classpath to add to processes val ENV_DIST_CLASSPATH = "SPARK_DIST_CLASSPATH" // Subdirectory where the user's Spark and Hadoop config files will be placed. val LOCALIZED_CONF_DIR = "__spark_conf__" // File containing the conf archive in the AM. See prepareLocalResources(). val LOCALIZED_CONF_ARCHIVE = LOCALIZED_CONF_DIR + ".zip" // Name of the file in the conf archive containing Spark configuration. val SPARK_CONF_FILE = "__spark_conf__.properties" // Subdirectory where the user's python files (not archives) will be placed. val LOCALIZED_PYTHON_DIR = "__pyfiles__" // Subdirectory where Spark libraries will be placed. val LOCALIZED_LIB_DIR = "__spark_libs__" /** * Return the path to the given application's staging directory. */ private def getAppStagingDir(appId: ApplicationId): String = { buildPath(SPARK_STAGING, appId.toString()) } /** * Populate the classpath entry in the given environment map with any application * classpath specified through the Hadoop and Yarn configurations. */ private[yarn] def populateHadoopClasspath(conf: Configuration, env: HashMap[String, String]) : Unit = { val classPathElementsToAdd = getYarnAppClasspath(conf) ++ getMRAppClasspath(conf) classPathElementsToAdd.foreach { c => YarnSparkHadoopUtil.addPathToEnvironment(env, Environment.CLASSPATH.name, c.trim) } } private def getYarnAppClasspath(conf: Configuration): Seq[String] = Option(conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH)) match { case Some(s) => s.toSeq case None => getDefaultYarnApplicationClasspath } private def getMRAppClasspath(conf: Configuration): Seq[String] = Option(conf.getStrings("mapreduce.application.classpath")) match { case Some(s) => s.toSeq case None => getDefaultMRApplicationClasspath } private[yarn] def getDefaultYarnApplicationClasspath: Seq[String] = YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH.toSeq private[yarn] def getDefaultMRApplicationClasspath: Seq[String] = StringUtils.getStrings(MRJobConfig.DEFAULT_MAPREDUCE_APPLICATION_CLASSPATH).toSeq /** * Populate the classpath entry in the given environment map. * * User jars are generally not added to the JVM's system classpath; those are handled by the AM * and executor backend. When the deprecated `spark.yarn.user.classpath.first` is used, user jars * are included in the system classpath, though. The extra class path and other uploaded files are * always made available through the system class path. * * @param args Client arguments (when starting the AM) or null (when starting executors). */ private[yarn] def populateClasspath( args: ClientArguments, conf: Configuration, sparkConf: SparkConf, env: HashMap[String, String], extraClassPath: Option[String] = None): Unit = { extraClassPath.foreach { cp => addClasspathEntry(getClusterPath(sparkConf, cp), env) } addClasspathEntry(Environment.PWD.$$(), env) addClasspathEntry(Environment.PWD.$$() + Path.SEPARATOR + LOCALIZED_CONF_DIR, env) if (sparkConf.get(USER_CLASS_PATH_FIRST)) { // in order to properly add the app jar when user classpath is first // we have to do the mainJar separate in order to send the right thing // into addFileToClasspath val mainJar = if (args != null) { getMainJarUri(Option(args.userJar)) } else { getMainJarUri(sparkConf.get(APP_JAR)) } mainJar.foreach(addFileToClasspath(sparkConf, conf, _, APP_JAR_NAME, env)) val secondaryJars = if (args != null) { getSecondaryJarUris(Option(sparkConf.get(JARS_TO_DISTRIBUTE))) } else { getSecondaryJarUris(sparkConf.get(SECONDARY_JARS)) } secondaryJars.foreach { x => addFileToClasspath(sparkConf, conf, x, null, env) } } // Add the Spark jars to the classpath, depending on how they were distributed. addClasspathEntry(buildPath(Environment.PWD.$$(), LOCALIZED_LIB_DIR, "*"), env) if (sparkConf.get(SPARK_ARCHIVE).isEmpty) { sparkConf.get(SPARK_JARS).foreach { jars => jars.filter(isLocalUri).foreach { jar => addClasspathEntry(getClusterPath(sparkConf, jar), env) } } } populateHadoopClasspath(conf, env) sys.env.get(ENV_DIST_CLASSPATH).foreach { cp => addClasspathEntry(getClusterPath(sparkConf, cp), env) } } /** * Returns a list of URIs representing the user classpath. * * @param conf Spark configuration. */ def getUserClasspath(conf: SparkConf): Array[URI] = { val mainUri = getMainJarUri(conf.get(APP_JAR)) val secondaryUris = getSecondaryJarUris(conf.get(SECONDARY_JARS)) (mainUri ++ secondaryUris).toArray } private def getMainJarUri(mainJar: Option[String]): Option[URI] = { mainJar.flatMap { path => val uri = Utils.resolveURI(path) if (uri.getScheme == LOCAL_SCHEME) Some(uri) else None }.orElse(Some(new URI(APP_JAR_NAME))) } private def getSecondaryJarUris(secondaryJars: Option[Seq[String]]): Seq[URI] = { secondaryJars.getOrElse(Nil).map(new URI(_)) } /** * Adds the given path to the classpath, handling "local:" URIs correctly. * * If an alternate name for the file is given, and it's not a "local:" file, the alternate * name will be added to the classpath (relative to the job's work directory). * * If not a "local:" file and no alternate name, the linkName will be added to the classpath. * * @param conf Spark configuration. * @param hadoopConf Hadoop configuration. * @param uri URI to add to classpath (optional). * @param fileName Alternate name for the file (optional). * @param env Map holding the environment variables. */ private def addFileToClasspath( conf: SparkConf, hadoopConf: Configuration, uri: URI, fileName: String, env: HashMap[String, String]): Unit = { if (uri != null && uri.getScheme == LOCAL_SCHEME) { addClasspathEntry(getClusterPath(conf, uri.getPath), env) } else if (fileName != null) { addClasspathEntry(buildPath(Environment.PWD.$$(), fileName), env) } else if (uri != null) { val localPath = getQualifiedLocalPath(uri, hadoopConf) val linkName = Option(uri.getFragment()).getOrElse(localPath.getName()) addClasspathEntry(buildPath(Environment.PWD.$$(), linkName), env) } } /** * Add the given path to the classpath entry of the given environment map. * If the classpath is already set, this appends the new path to the existing classpath. */ private def addClasspathEntry(path: String, env: HashMap[String, String]): Unit = YarnSparkHadoopUtil.addPathToEnvironment(env, Environment.CLASSPATH.name, path) /** * Returns the path to be sent to the NM for a path that is valid on the gateway. * * This method uses two configuration values: * * - spark.yarn.config.gatewayPath: a string that identifies a portion of the input path that may * only be valid in the gateway node. * - spark.yarn.config.replacementPath: a string with which to replace the gateway path. This may * contain, for example, env variable references, which will be expanded by the NMs when * starting containers. * * If either config is not available, the input path is returned. */ def getClusterPath(conf: SparkConf, path: String): String = { val localPath = conf.get(GATEWAY_ROOT_PATH) val clusterPath = conf.get(REPLACEMENT_ROOT_PATH) if (localPath != null && clusterPath != null) { path.replace(localPath, clusterPath) } else { path } } /** * Return whether the two file systems are the same. */ private def compareFs(srcFs: FileSystem, destFs: FileSystem): Boolean = { val srcUri = srcFs.getUri() val dstUri = destFs.getUri() if (srcUri.getScheme() == null || srcUri.getScheme() != dstUri.getScheme()) { return false } var srcHost = srcUri.getHost() var dstHost = dstUri.getHost() // In HA or when using viewfs, the host part of the URI may not actually be a host, but the // name of the HDFS namespace. Those names won't resolve, so avoid even trying if they // match. if (srcHost != null && dstHost != null && srcHost != dstHost) { try { srcHost = InetAddress.getByName(srcHost).getCanonicalHostName() dstHost = InetAddress.getByName(dstHost).getCanonicalHostName() } catch { case e: UnknownHostException => return false } } Objects.equal(srcHost, dstHost) && srcUri.getPort() == dstUri.getPort() } /** * Given a local URI, resolve it and return a qualified local path that corresponds to the URI. * This is used for preparing local resources to be included in the container launch context. */ private def getQualifiedLocalPath(localURI: URI, hadoopConf: Configuration): Path = { val qualifiedURI = if (localURI.getScheme == null) { // If not specified, assume this is in the local filesystem to keep the behavior // consistent with that of Hadoop new URI(FileSystem.getLocal(hadoopConf).makeQualified(new Path(localURI)).toString) } else { localURI } new Path(qualifiedURI) } /** * Whether to consider jars provided by the user to have precedence over the Spark jars when * loading user classes. */ def isUserClassPathFirst(conf: SparkConf, isDriver: Boolean): Boolean = { if (isDriver) { conf.get(DRIVER_USER_CLASS_PATH_FIRST) } else { conf.get(EXECUTOR_USER_CLASS_PATH_FIRST) } } /** * Joins all the path components using Path.SEPARATOR. */ def buildPath(components: String*): String = { components.mkString(Path.SEPARATOR) } /** Returns whether the URI is a "local:" URI. */ def isLocalUri(uri: String): Boolean = { uri.startsWith(s"$LOCAL_SCHEME:") } }
jianran/spark
resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
Scala
apache-2.0
60,205
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.streaming.continuous import java.io.Closeable import java.util.concurrent.{ArrayBlockingQueue, TimeUnit} import scala.util.control.NonFatal import org.apache.spark.{SparkEnv, SparkException, TaskContext} import org.apache.spark.internal.Logging import org.apache.spark.sql.catalyst.expressions.UnsafeRow import org.apache.spark.sql.sources.v2.reader.{InputPartition, InputPartitionReader} import org.apache.spark.sql.sources.v2.reader.streaming.PartitionOffset import org.apache.spark.util.ThreadUtils /** * A wrapper for a continuous processing data reader, including a reading queue and epoch markers. * * This will be instantiated once per partition - successive calls to compute() in the * [[ContinuousDataSourceRDD]] will reuse the same reader. This is required to get continuity of * offsets across epochs. Each compute() should call the next() method here until null is returned. */ class ContinuousQueuedDataReader( partition: InputPartition[UnsafeRow], context: TaskContext, dataQueueSize: Int, epochPollIntervalMs: Long) extends Closeable { private val reader = partition.createPartitionReader() // Important sequencing - we must get our starting point before the provider threads start running private var currentOffset: PartitionOffset = ContinuousDataSourceRDD.getContinuousReader(reader).getOffset private var currentEpoch: Long = context.getLocalProperty(ContinuousExecution.START_EPOCH_KEY).toLong /** * The record types in the read buffer. */ sealed trait ContinuousRecord case object EpochMarker extends ContinuousRecord case class ContinuousRow(row: UnsafeRow, offset: PartitionOffset) extends ContinuousRecord private val queue = new ArrayBlockingQueue[ContinuousRecord](dataQueueSize) private val coordinatorId = context.getLocalProperty(ContinuousExecution.EPOCH_COORDINATOR_ID_KEY) private val epochCoordEndpoint = EpochCoordinatorRef.get( context.getLocalProperty(ContinuousExecution.EPOCH_COORDINATOR_ID_KEY), SparkEnv.get) private val epochMarkerExecutor = ThreadUtils.newDaemonSingleThreadScheduledExecutor( s"epoch-poll--$coordinatorId--${context.partitionId()}") private val epochMarkerGenerator = new EpochMarkerGenerator epochMarkerExecutor.scheduleWithFixedDelay( epochMarkerGenerator, 0, epochPollIntervalMs, TimeUnit.MILLISECONDS) private val dataReaderThread = new DataReaderThread dataReaderThread.setDaemon(true) dataReaderThread.start() context.addTaskCompletionListener(_ => { this.close() }) private def shouldStop() = { context.isInterrupted() || context.isCompleted() } /** * Return the next UnsafeRow to be read in the current epoch, or null if the epoch is done. * * After returning null, the [[ContinuousDataSourceRDD]] compute() for the following epoch * will call next() again to start getting rows. */ def next(): UnsafeRow = { val POLL_TIMEOUT_MS = 1000 var currentEntry: ContinuousRecord = null while (currentEntry == null) { if (shouldStop()) { // Force the epoch to end here. The writer will notice the context is interrupted // or completed and not start a new one. This makes it possible to achieve clean // shutdown of the streaming query. // TODO: The obvious generalization of this logic to multiple stages won't work. It's // invalid to send an epoch marker from the bottom of a task if all its child tasks // haven't sent one. currentEntry = EpochMarker } else { if (dataReaderThread.failureReason != null) { throw new SparkException("Data read failed", dataReaderThread.failureReason) } if (epochMarkerGenerator.failureReason != null) { throw new SparkException( "Epoch marker generation failed", epochMarkerGenerator.failureReason) } currentEntry = queue.poll(POLL_TIMEOUT_MS, TimeUnit.MILLISECONDS) } } currentEntry match { case EpochMarker => epochCoordEndpoint.send(ReportPartitionOffset( context.partitionId(), currentEpoch, currentOffset)) currentEpoch += 1 null case ContinuousRow(row, offset) => currentOffset = offset row } } override def close(): Unit = { dataReaderThread.interrupt() epochMarkerExecutor.shutdown() } /** * The data component of [[ContinuousQueuedDataReader]]. Pushes (row, offset) to the queue when * a new row arrives to the [[InputPartitionReader]]. */ class DataReaderThread extends Thread( s"continuous-reader--${context.partitionId()}--" + s"${context.getLocalProperty(ContinuousExecution.EPOCH_COORDINATOR_ID_KEY)}") with Logging { @volatile private[continuous] var failureReason: Throwable = _ override def run(): Unit = { TaskContext.setTaskContext(context) val baseReader = ContinuousDataSourceRDD.getContinuousReader(reader) try { while (!shouldStop()) { if (!reader.next()) { // Check again, since reader.next() might have blocked through an incoming interrupt. if (!shouldStop()) { throw new IllegalStateException( "Continuous reader reported no elements! Reader should have blocked waiting.") } else { return } } queue.put(ContinuousRow(reader.get().copy(), baseReader.getOffset)) } } catch { case _: InterruptedException => // Continuous shutdown always involves an interrupt; do nothing and shut down quietly. logInfo(s"shutting down interrupted data reader thread $getName") case NonFatal(t) => failureReason = t logWarning("data reader thread failed", t) // If we throw from this thread, we may kill the executor. Let the parent thread handle // it. case t: Throwable => failureReason = t throw t } finally { reader.close() } } } /** * The epoch marker component of [[ContinuousQueuedDataReader]]. Populates the queue with * EpochMarker when a new epoch marker arrives. */ class EpochMarkerGenerator extends Runnable with Logging { @volatile private[continuous] var failureReason: Throwable = _ private val epochCoordEndpoint = EpochCoordinatorRef.get( context.getLocalProperty(ContinuousExecution.EPOCH_COORDINATOR_ID_KEY), SparkEnv.get) // Note that this is *not* the same as the currentEpoch in [[ContinuousDataQueuedReader]]! That // field represents the epoch wrt the data being processed. The currentEpoch here is just a // counter to ensure we send the appropriate number of markers if we fall behind the driver. private var currentEpoch = context.getLocalProperty(ContinuousExecution.START_EPOCH_KEY).toLong override def run(): Unit = { try { val newEpoch = epochCoordEndpoint.askSync[Long](GetCurrentEpoch) // It's possible to fall more than 1 epoch behind if a GetCurrentEpoch RPC ends up taking // a while. We catch up by injecting enough epoch markers immediately to catch up. This will // result in some epochs being empty for this partition, but that's fine. for (i <- currentEpoch to newEpoch - 1) { queue.put(EpochMarker) logDebug(s"Sent marker to start epoch ${i + 1}") } currentEpoch = newEpoch } catch { case t: Throwable => failureReason = t throw t } } } }
szhem/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/continuous/ContinuousQueuedDataReader.scala
Scala
apache-2.0
8,425
package formless sealed trait Path { def / (field: String): Path = PField(this, field) def / (index: Int ): Path = PIndex(this, index) } final case object PNil extends Path { override def toString = "/" } final case class PField(root: Path, field: String) extends Path { override def toString = s"${root}${field}/" } final case class PIndex(root: Path, index: Int) extends Path { override def toString = s"${root}${index}/" }
underscoreio/formless
core/src/main/scala/formless/Path.scala
Scala
apache-2.0
442
/* scala-stm - (c) 2009-2010, Stanford University, PPL */ package scala.concurrent.stm package impl import scala.collection.mutable.Builder /** `RefFactory` is responsible for creating concrete `Ref` instances. */ trait RefFactory { def newRef(v0: Boolean): Ref[Boolean] def newRef(v0: Byte): Ref[Byte] def newRef(v0: Short): Ref[Short] def newRef(v0: Char): Ref[Char] def newRef(v0: Int): Ref[Int] def newRef(v0: Float): Ref[Float] def newRef(v0: Long): Ref[Long] def newRef(v0: Double): Ref[Double] def newRef(v0: Unit): Ref[Unit] /** `T` will not be one of the primitive types (for which a `newRef` * specialization exists). */ def newRef[A : ClassManifest](v0: A): Ref[A] def newTxnLocal[A](init: => A, initialValue: InTxn => A, beforeCommit: InTxn => Unit, whilePreparing: InTxnEnd => Unit, whileCommitting: InTxnEnd => Unit, afterCommit: A => Unit, afterRollback: Txn.Status => Unit, afterCompletion: Txn.Status => Unit): TxnLocal[A] def newTArray[A : ClassManifest](length: Int): TArray[A] def newTArray[A : ClassManifest](xs: TraversableOnce[A]): TArray[A] def newTMap[A, B]: TMap[A, B] def newTMapBuilder[A, B]: Builder[(A, B), TMap[A, B]] def newTSet[A]: TSet[A] def newTSetBuilder[A]: Builder[A, TSet[A]] }
djspiewak/scala-stm
src/main/scala/scala/concurrent/stm/impl/RefFactory.scala
Scala
bsd-3-clause
1,439
/* * Copyright 2014–2017 SlamData Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package quasar.sst import slamdata.Predef._ import quasar.contrib.matryoshka._ import quasar.ejson.{EJsonArbitrary, TypeTag} import quasar.pkg.tests.{arbitrary => tarb, _} import matryoshka.Delay import scalaz.scalacheck.ScalaCheckBinding._ import scalaz._, Scalaz._ /** An ADT representing the two forms of type tagging that quasar does in EJson. * * This exists primarily for the arbitrary instance, used by TypedEJson to * generate arbitrary EJson that contains the particular `Meta` nodes that * represent type tags. */ private[sst] sealed abstract class TypeMetadata[A] object TypeMetadata { final case class Type[A](tag: TypeTag, value: A) extends TypeMetadata[A] final case class SizedType[A](tag: TypeTag, size: BigInt, value: A) extends TypeMetadata[A] // NB: For more control over generation frequency. final case class Absent[A](value: A) extends TypeMetadata[A] // NB: Used as a termination case in the generator. final case class Null[A]() extends TypeMetadata[A] implicit val arbitrary: Delay[Arbitrary, TypeMetadata] = new PatternArbitrary[TypeMetadata] { import EJsonArbitrary._ def leafGenerators[A] = uniformly(const(Null[A]())) def branchGenerators[A: Arbitrary] = NonEmptyList( (700, tarb[A] ^^ Absent[A]), (200, (tarb[TypeTag] ⊛ tarb[A])(Type(_, _))), (100, (tarb[TypeTag] ⊛ genBigInt ⊛ tarb[A])(SizedType(_, _, _)))) } implicit val traverse: Traverse[TypeMetadata] = new Traverse[TypeMetadata] with Foldable.FromFoldr[TypeMetadata] { def traverseImpl[G[_]: Applicative, A, B](fa: TypeMetadata[A])(f: A => G[B]): G[TypeMetadata[B]] = fa match { case Type(t, a) => f(a) map (Type(t, _)) case SizedType(t, n, a) => f(a) map (SizedType(t, n, _)) case Absent(a) => f(a) map (Absent(_)) case Null() => (Null(): TypeMetadata[B]).point[G] } } }
drostron/quasar
frontend/src/test/scala/quasar/sst/TypeMetadata.scala
Scala
apache-2.0
2,576
package com.schnobosoft.pagerank.breeze import scala.Range import breeze.linalg.CSCMatrix import breeze.linalg.DenseVector import breeze.linalg.Vector import breeze.linalg.norm import breeze.linalg.sum /** * Implementation of the PageRank algorithm using Scala and Breeze. * @author Carsten Schnober */ object PageRank { final val EPSILON = 0.0000001 private final val BETA_DEFAULT = 0.8 object Method extends Enumeration { val ITERATIVE, MATRIX = Value } /** * @param location location of the input file * @param nPages total number of pages (nodes) * @param nLines the maximum number of lines to read from input file * @param method the method to use (iterative vs. matrix-based) * @return a vector defining the PageRank value for each page/node */ def pagerank(location: String, nPages: Int, nLines: Int = Int.MaxValue, method: Method.Value = PageRank.Method.ITERATIVE, beta: Double = BETA_DEFAULT): DenseVector[Double] = { val m = MatrixUtils.stochasticMatrix(MatrixUtils.adjMatrix(location, nPages, nLines)) // val m = MatrixUtils.adjMatrix(location, nPages, nLines) val rInitial = DenseVector.ones[Double](nPages) :/ nPages.toDouble if (method == Method.ITERATIVE) { println("Using iterative method.") computeRIterative(m, rInitial, beta = beta) } else if (method == Method.MATRIX) { println("Using matrix-based method.") computeRMatrix(m, rInitial, beta = beta) } else { throw new IllegalArgumentException() } } /** * Generate a vector of length n with either beta or 0 for dead ends * @param m a CSCMatrix * @param beta the default beta value for non-dead ends * @return a vector containing beta or 0 for each column of m */ def computeBeta(m: CSCMatrix[Double], beta: Double): Vector[Double] = { val v = DenseVector.fill[Double](m.cols, beta) for (i <- Range(0, m.cols).filter(MatrixUtils.outDegree(_, m) == 0)) { v.update(i, 0) } v } /** * Iterative implementation of PageRank * * @param m the stochastic adjacency matrix * @param r the initial vector R holding the PageRank values for each node/page * @param beta one minus the teleport probability * @param counter counts the number of iterations * @return a vector defining the PageRank value for each page/node */ @deprecated("Use computeRMatrix() instead.") def computeRIterative(m: CSCMatrix[Double], r: DenseVector[Double], beta: Double, counter: Int = 1): DenseVector[Double] = { println("Iteration: " + counter) /* compute r' */ val rNew = DenseVector.zeros[Double](r.size); for (j <- 0 until r.length) { MatrixUtils.incoming(j, m).foreach { i => rNew(j) += beta * (r(i) / MatrixUtils.outDegree(i, m)) } } /* re-insert r' */ val S = sum(rNew) rNew :+= (1 - S) / rNew.size /* recursion */ if (manhattanDistance(rNew, r) > PageRank.EPSILON) computeRIterative(m, rNew, beta, counter + 1) else rNew } /** * Matrix-based implementation of PageRank * * @param m the stochastic adjacency matrix * @param r the initial vector R holding the PageRank values for each node/page * @param beta one minus the teleport probability * @param counter counts the number of iterations * @return a vector defining the PageRank value for each page/node */ def computeRMatrix(m: CSCMatrix[Double], r: DenseVector[Double], beta: Double, counter: Int = 1): DenseVector[Double] = { println("Iteration: " + counter) val rNew = (m * beta) * r :+ ((1 - beta) / m.cols) /* recursion */ if (PageRank.manhattanDistance(rNew, r) > PageRank.EPSILON) computeRMatrix(m, rNew, beta, counter + 1) else rNew } /** * Compute the cosine distance between two vectors, i.e. 1 - the cosine similarity. * * @param a the first vector * @param b the second vector * @return the cosine distance between the two vectors */ private def cosineDistance(a: Vector[Double], b: Vector[Double]): Double = { 1 - (a dot b) / (norm(a, 2) * norm(b, 2)) } /** * Compute the Manhattan distance between two vectors. * * @param a the first vector * @param b the second vector * @return the Manhatten distance between the two vectors */ private def manhattanDistance(a: Vector[Double], b: Vector[Double]): Double = { (a - b).norm(1) } }
carschno/pagerank
src/main/scala/com/schnobosoft/pagerank/breeze/PageRank.scala
Scala
gpl-2.0
4,418
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.spark.serialization import java.util.Collections import java.util.Date import java.util.{List => JList} import scala.collection.JavaConverters.asScalaBufferConverter import scala.collection.Seq import scala.collection.mutable.LinkedHashMap import scala.collection.mutable.Map import org.elasticsearch.hadoop.cfg.Settings import org.elasticsearch.hadoop.serialization.FieldType import org.elasticsearch.hadoop.serialization.FieldType.BINARY import org.elasticsearch.hadoop.serialization.FieldType.BOOLEAN import org.elasticsearch.hadoop.serialization.FieldType.BYTE import org.elasticsearch.hadoop.serialization.FieldType.DATE import org.elasticsearch.hadoop.serialization.FieldType.DATE_NANOS import org.elasticsearch.hadoop.serialization.FieldType.DOUBLE import org.elasticsearch.hadoop.serialization.FieldType.HALF_FLOAT import org.elasticsearch.hadoop.serialization.FieldType.SCALED_FLOAT import org.elasticsearch.hadoop.serialization.FieldType.FLOAT import org.elasticsearch.hadoop.serialization.FieldType.INTEGER import org.elasticsearch.hadoop.serialization.FieldType.JOIN import org.elasticsearch.hadoop.serialization.FieldType.KEYWORD import org.elasticsearch.hadoop.serialization.FieldType.GEO_POINT import org.elasticsearch.hadoop.serialization.FieldType.GEO_SHAPE import org.elasticsearch.hadoop.serialization.FieldType.LONG import org.elasticsearch.hadoop.serialization.FieldType.NULL import org.elasticsearch.hadoop.serialization.FieldType.SHORT import org.elasticsearch.hadoop.serialization.FieldType.STRING import org.elasticsearch.hadoop.serialization.FieldType.TEXT import org.elasticsearch.hadoop.serialization.FieldType.TOKEN_COUNT import org.elasticsearch.hadoop.serialization.FieldType.WILDCARD import org.elasticsearch.hadoop.serialization.Parser import org.elasticsearch.hadoop.serialization.Parser.Token.VALUE_BOOLEAN import org.elasticsearch.hadoop.serialization.Parser.Token.VALUE_NULL import org.elasticsearch.hadoop.serialization.Parser.Token.VALUE_NUMBER import org.elasticsearch.hadoop.serialization.SettingsAware import org.elasticsearch.hadoop.serialization.builder.AbstractValueReader import org.elasticsearch.hadoop.serialization.field.FieldFilter import org.elasticsearch.hadoop.serialization.field.FieldFilter.NumberedInclude import org.elasticsearch.hadoop.util.DateUtils import org.elasticsearch.hadoop.util.SettingsUtils import org.elasticsearch.hadoop.util.StringUtils import org.elasticsearch.hadoop.util.unit.Booleans import java.sql.Timestamp import java.time.Instant import java.time.temporal.TemporalAccessor import scala.annotation.tailrec class ScalaValueReader extends AbstractValueReader with SettingsAware { var emptyAsNull: Boolean = false var richDate: Boolean = false var arrayInclude: JList[NumberedInclude] = Collections.emptyList() var arrayExclude: JList[String] = Collections.emptyList() def readValue(parser: Parser, value: String, esType: FieldType) = { if (esType == null || parser.currentToken() == VALUE_NULL) { nullValue() } else { esType match { case NULL => nullValue() case STRING => textValue(value, parser) case TEXT => textValue(value, parser) case KEYWORD => textValue(value, parser) case WILDCARD => textValue(value, parser) case BYTE => byteValue(value, parser) case SHORT => shortValue(value, parser) case INTEGER => intValue(value, parser) case TOKEN_COUNT => longValue(value, parser) case LONG => longValue(value, parser) case HALF_FLOAT => floatValue(value, parser) case SCALED_FLOAT => doubleValue(value, parser) case FLOAT => floatValue(value, parser) case DOUBLE => doubleValue(value, parser) case BOOLEAN => booleanValue(value, parser) case BINARY => binaryValue(Option(parser.binaryValue()).getOrElse(value.getBytes())) case DATE => date(value, parser) case DATE_NANOS => dateNanos(value, parser) // GEO is ambiguous so use the JSON type instead to differentiate between doubles (a lot in GEO_SHAPE) and strings case GEO_POINT | GEO_SHAPE => { if (parser.currentToken() == VALUE_NUMBER) doubleValue(value, parser) else textValue(value, parser) } // JOIN field is special. Only way that we could have reached here is if the join value we're reading is // the short-hand format of the join field for parent documents. Make a container and put the value under it. case JOIN => { val container = createMap() addToMap(container, "name", textValue(value, parser)) container } // everything else (IP, GEO) gets translated to strings case _ => textValue(value, parser) } } } def checkNull(converter: (String, Parser) => Any, value: String, parser: Parser) = { if (value != null) { if (!StringUtils.hasText(value) && emptyAsNull) { nullValue() } else { converter(value, parser).asInstanceOf[AnyRef] } } else { nullValue() } } def nullValue() = { null } def textValue(value: String, parser: Parser) = { checkNull (parseText, value, parser) } protected def parseText(value:String, parser: Parser) = { value } def byteValue(value: String, parser: Parser) = { checkNull (parseByte, value, parser) } protected def parseByte(value: String, parser:Parser) = { if (parser.currentToken()== VALUE_NUMBER) parser.intValue().toByte else value.toByte } def shortValue(value: String, parser:Parser) = { checkNull (parseShort, value, parser) } protected def parseShort(value: String, parser:Parser) = { if (parser.currentToken()== VALUE_NUMBER) parser.shortValue().toShort else value.toShort } def intValue(value: String, parser:Parser) = { checkNull(parseInt, value, parser) } protected def parseInt(value: String, parser:Parser) = { if (parser.currentToken()== VALUE_NUMBER) parser.intValue().toInt else value.toInt } def longValue(value: String, parser:Parser) = { checkNull(parseLong, value, parser) } protected def parseLong(value: String, parser:Parser) = { if (parser.currentToken()== VALUE_NUMBER) parser.longValue().toLong else value.toLong } def floatValue(value: String, parser:Parser) = { checkNull(parseFloat, value, parser) } protected def parseFloat(value: String, parser:Parser) = { if (parser.currentToken()== VALUE_NUMBER) parser.floatValue().toFloat else value.toFloat } def doubleValue(value: String, parser:Parser) = { checkNull(parseDouble, value, parser) } protected def parseDouble(value: String, parser:Parser) = { if (parser.currentToken()== VALUE_NUMBER) parser.doubleValue().toDouble else value.toDouble } def booleanValue(value: String, parser:Parser) = { checkNull(parseBoolean, value, parser) } protected def parseBoolean(value: String, parser:Parser) = { if (parser.currentToken()== VALUE_NULL) nullValue() else if (parser.currentToken()== VALUE_BOOLEAN) parser.booleanValue() else if (parser.currentToken()== VALUE_NUMBER) parser.intValue() != 0 else Booleans.parseBoolean(value) } def binaryValue(value: Array[Byte]) = { Option(value) collect { case value: Array[Byte] if !emptyAsNull || !value.isEmpty => parseBinary(value) } getOrElse nullValue() } protected def parseBinary(value: Array[Byte]) = { value } def date(value: String, parser: Parser) = { checkNull(parseDate, value, parser) } def dateNanos(value: String, parser: Parser) = { checkNull(parseDateNanos, value, parser) } protected def parseDate(value: String, parser:Parser) = { if (parser.currentToken() == VALUE_NUMBER) { if (richDate) createDate(parser.longValue()) else parser.longValue() } else { if (richDate) createDate(value) else value } } protected def parseDateNanos(value: String, parser:Parser) = { if (parser.currentToken() == VALUE_NUMBER) { if (richDate) createDate(parser.longValue()) else parser.longValue() } else { if (richDate) createDateNanos(value) else value } } protected[serialization] def createDate(value: Long): Any = { new Date(value) } protected def createDate(value: String):Any = { createDate(DateUtils.parseDate(value).getTimeInMillis()) } protected[serialization] def createDateNanos(value: String) = { DateUtils.parseDateNanos(value) } def setSettings(settings: Settings) = { emptyAsNull = settings.getReadFieldEmptyAsNull richDate = settings.getMappingDateRich arrayInclude = SettingsUtils.getFieldArrayFilterInclude(settings); arrayExclude = StringUtils.tokenize(settings.getReadFieldAsArrayExclude()); } def createMap(): AnyRef = { new LinkedHashMap } override def addToMap(map: AnyRef, key: AnyRef, value: Any): Unit = { map.asInstanceOf[Map[AnyRef, Any]].put(key, value) } override def wrapString(value: String): AnyRef = { value } def createArray(typ: FieldType): AnyRef = { val ctx = getCurrentField if (ctx != null) { ctx.setArrayDepth(ctx.getArrayDepth + 1) } List.empty } override def addToArray(array: AnyRef, values: java.util.List[Object]): AnyRef = { val ctx = getCurrentField if (ctx != null) { ctx.setArrayDepth(ctx.getArrayDepth - 1) } var arr: AnyRef = values.asScala // outer most array (a multi level array might be defined) if (ctx != null && ctx.getArrayDepth == 0) { val result = FieldFilter.filter(ctx.getFieldName, arrayInclude, arrayExclude, false) if (result.matched && result.depth > 1) { val extraDepth = result.depth - arrayDepth(arr) if (extraDepth > 0) { arr = wrapArray(arr, extraDepth) } } } arr } def arrayDepth(potentialArray: AnyRef): Int = { @tailrec def _arrayDepth(potentialArray: AnyRef, depth: Int): Int = { potentialArray match { case Seq(x: AnyRef, _*) => _arrayDepth(x, depth + 1) case _ => depth } } _arrayDepth(potentialArray, 0) } def wrapArray(array: AnyRef, extraDepth: Int): AnyRef = { var arr = array for (_ <- 0 until extraDepth) { arr = List(arr) } arr } }
elastic/elasticsearch-hadoop
spark/core/src/main/scala/org/elasticsearch/spark/serialization/ScalaValueReader.scala
Scala
apache-2.0
11,056
import org.apache.spark.SparkConf import org.apache.spark.sql.SparkSession object Job { val AppName = "ECAD_JSON_Converter" val sparkMaster = "local[3]" // val sparkMaster = "spark://node0.local:7077" // val HDFSDataDir = "hdfs://node0.local:9000/ECAD_Data/" // val HDFSNameNode = "hdfs://node0.local:9000" val HDFSDataDir = "/home/Data_KNMI/ECAD/Europe/Done/" def main(args: Array[String]): Unit = { val conf = new SparkConf().setAppName(AppName).setMaster(sparkMaster) val spark = SparkSession .builder() .config(conf) .getOrCreate() val sc = spark.sparkContext val hadoopConf = sc.hadoopConfiguration // hadoopConf.set("fs.defaultFS", HDFSNameNode) val mapper = new Mappers() val sourceDF = mapper.genSourceDF(spark, HDFSDataDir + "sources.txt") sourceDF.show() //RDD with tuple(filepath, *content of file*) val fileList = sc.wholeTextFiles(HDFSDataDir + "RR_SOUID1000*.txt") mapper.precipicationDF(spark, fileList) // println(precipDS.count()) // val precipDF = mapper.precipicationDF(, HDFSDataDir + "RR_SOUID100014.txt") } }
luxinator/RainyDay
Scala_code/src/main/scala/Job.scala
Scala
apache-2.0
1,140
package picasso.frontend.basic import picasso.utils._ import picasso.math._ import picasso.ast._ class Report(name: String) { //TODO replace by the report in utils protected var parsed: Option[(Iterable[Actor], Expression)] = None def setParsed(p: (Iterable[Actor], Expression)) = { parsed = Some(p) } protected var typed: Option[Iterable[Actor]] = None def setTyped(t: Iterable[Actor]) = { typed = Some(t) } protected var agents: Option[Iterable[AgentDefinition[Actors.PC]]] = None def setAgents(a: Iterable[AgentDefinition[Actors.PC]]) = { agents = Some(a) } protected var transitions: Option[Iterable[Analysis#DBT]] = None def setTransitions(t: Iterable[Analysis#DBT]) = { transitions = Some(t) } protected var initConf: Option[Analysis#DBCC] = None def setInitConf(i: Analysis#DBCC) = { initConf = Some(i) } protected var cover: Option[Iterable[Analysis#DBCC]] = None def setCover(c: Iterable[Analysis#DBCC]) = { cover = Some(c) } protected var error: Option[String] = None def setError(err: String) = { error = Some(err) } def makeConsoleReport = { Console.println("Analysis of \\"" + name + "\\".\\n") for (p <- typed orElse parsed.map(_._1)) Console.println("Input Actors:\\n\\n" + p.mkString("","\\n\\n","") + "\\n") for (i <- parsed.map(_._2)) Console.println("Initial Configuration:\\n\\n" + i + "\\n") for (a <- agents) Console.println("As CFA:\\n\\n" + a.mkString("","\\n\\n","") + "\\n") for (t <- transitions) Console.println("Transitions:\\n\\n" + t.mkString("","\\n\\n","") + "\\n") for (i <- initConf) Console.println("Initial Configuration:\\n\\n" + i + "\\n") for (c <- cover) Console.println("Basis of the covering set:\\n\\n" + c.mkString("","\\n\\n","") + "\\n") for (e <- error) Console.println("ERROR:\\n\\n" + e) } def makeHtmlReport(fileName: String) = { val buffer = new scala.collection.mutable.StringBuilder(100 * 1024) buffer ++= "<!DOCTYPE HTML>\\n" buffer ++= "<html>\\n" buffer ++= "<head>\\n" buffer ++= " <meta charset=\\"utf-8\\">\\n" buffer ++= " <title>Analysis report for "+name+"</title>\\n" buffer ++= "</head>\\n" buffer ++= "<body>\\n" buffer ++= "<h1>Analysis report for "+name+"</h1>\\n" buffer ++= "<h2>Input</h2>\\n" buffer ++= "<h3>Actors</h3>\\n" for (p <- typed orElse parsed.map(_._1); a <- p) { buffer ++= "<pre>\\n" + a + "\\n</pre>\\n" } buffer ++= "<h3>Initial Configuration</h3>\\n" for (i <- parsed.map(_._2)) buffer ++= "<pre>\\n" + i + "\\n</pre>\\n" buffer ++= "<h2>CFA</h2>\\n" for (as <- agents; a <- as) { buffer ++= "<p>" + a.id + a.params.mkString("(",", ",")") +"</p>" + "\\n" buffer ++= Misc.graphvizToSvgDot(Misc.docToString(a.toGraphviz("agent", "digraph", "agt"))) + "\\n" } buffer ++= "<h2>Graph rewriting rules</h2>\\n" buffer ++= "<h3>Transitions</h3>\\n" for (ts <- transitions; t <- ts) { buffer ++= "<p>"+t.id+"</p>" + "\\n" buffer ++= Misc.graphvizToSvgDot(Misc.docToString(t.toGraphviz("trs"))) + "\\n" } buffer ++= "<h3>Initial Configuration</h3>\\n" for (i <- initConf) buffer ++= Misc.graphvizToSvgFdp(i.toGraphviz("init")) + "\\n" buffer ++= "<h2>Cover</h2>\\n" for (cs <- cover; (c, i) <- cs.zipWithIndex) { buffer ++= "<p>("+i+")</p>" + "\\n" buffer ++= Misc.graphvizToSvgFdp(c.toGraphviz("cover")) + "\\n" } for (e <- error) { buffer ++= "<h2>ERROR</h2>\\n" buffer ++= "<pre>\\n" + e + "\\n</pre>\\n" } buffer ++= "</body>\\n" buffer ++= "</html>\\n" IO.writeInFile(fileName, buffer.toString) } }
dzufferey/picasso
frontend/basic/src/main/scala/picasso/frontend/basic/Report.scala
Scala
bsd-2-clause
3,649
/* * Copyright 2001-2013 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalactic.enablers import org.scalactic.Equality import org.scalatest.words.ArrayWrapper import scala.collection.GenTraversable import org.scalatest.FailureMessages import scala.annotation.tailrec import scala.collection.JavaConverters._ /** * Supertrait for typeclasses that enable the <code>be readable</code> matcher syntax. * * <p> * A <code>Readability[T]</code> provides access to the "readable nature" of type <code>T</code> in such * a way that <code>be readable</code> matcher syntax can be used with type <code>T</code>. A <code>T</code> * can be any type for which the concept of being readable makes sense, such as <code>java.io.File</code>. * You can enable the <code>be readable</code> matcher syntax on your own type <code>U</code> by defining a * <code>Readability[U]</code> for the type and making it available implicitly. * * <p> * ScalaTest provides an implicit <code>Readability</code> instance for <code>java.io.File</code> and arbitary * object with <code>isReadable()</code> or <code>isReadable</code> in the <code>Readability</code> companion object. * </p> */ trait Readability[-T] { /** * Determines whether the passed thing is readable, <em>i.e.</em>, the passed file is readable. * * @param thing the thing to check for readability * @return <code>true</code> if the passed thing is readable, <code>false</code> otherwise * */ def isReadable(thing: T): Boolean } /** * Companion object for <code>Readability</code> that provides implicit implementations for the following types: * * <ul> * <li><code>java.io.File</code></li> * <li>arbitary object with a <code>isReadable()</code> method that returns <code>Boolean</code></li> * <li>arbitary object with a parameterless <code>isReadable</code> method that returns <code>Boolean</code></li> * </ul> */ object Readability { /** * Enable <code>Readability</code> implementation for <code>java.io.File</code>. * * @tparam FILE any subtype of <code>java.io.File</code> * @return <code>Readability[FILE]</code> that supports <code>java.io.File</code> in <code>be readable</code> syntax */ implicit def readabilityOfFile[FILE <: java.io.File]: Readability[FILE] = new Readability[FILE] { def isReadable(file: FILE): Boolean = file.canRead } import scala.language.reflectiveCalls /** * Enable <code>Readability</code> implementation for any arbitrary object with a <code>isReadable()</code> method that returns <code>Boolean</code> * * @tparam T any type that has a <code>isReadable()</code> method that returns <code>Boolean</code> * @return <code>Readability[T]</code> that supports <code>T</code> in <code>be readable</code> syntax */ implicit def readabilityOfAnyRefWithIsReadableMethod[T <: AnyRef { def isReadable(): Boolean}]: Readability[T] = new Readability[T] { def isReadable(obj: T): Boolean = obj.isReadable } /** * Enable <code>Readability</code> implementation for any arbitrary object with a parameterless <code>isReadable</code> method that returns <code>Boolean</code> * * @tparam T any type that has a parameterless <code>isReadable</code> method that returns <code>Boolean</code> * @return <code>Readability[T]</code> that supports <code>T</code> in <code>be readable</code> syntax */ implicit def readabilityOfAnyRefWithParameterlessIsReadableMethod[T <: AnyRef { def isReadable: Boolean}]: Readability[T] = new Readability[T] { def isReadable(obj: T): Boolean = obj.isReadable } }
travisbrown/scalatest
src/main/scala/org/scalactic/enablers/Readability.scala
Scala
apache-2.0
4,142
/* * Copyright (c) 2012 - 2020 Splice Machine, Inc. * * This file is part of Splice Machine. * Splice Machine is free software: you can redistribute it and/or modify it under the terms of the * GNU Affero General Public License as published by the Free Software Foundation, either * version 3, or (at your option) any later version. * Splice Machine is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU Affero General Public License for more details. * You should have received a copy of the GNU Affero General Public License along with Splice Machine. * If not, see <http://www.gnu.org/licenses/>. * */ package com.splicemachine.spark2.splicemachine import java.util import java.util.{Properties, UUID} import com.splicemachine.derby.impl.kryo.KryoSerialization import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord, KafkaConsumer} import org.apache.kafka.common.serialization.{ByteArrayDeserializer, IntegerDeserializer} import org.apache.spark.rdd.RDD import org.apache.spark.sql.types._ import org.apache.spark.sql.{Dataset, Row, SparkSession} import edu.umd.cs.findbugs.annotations.SuppressFBWarnings import scala.collection.JavaConverters._ @SuppressFBWarnings(value = Array("NP_ALWAYS_NULL","SE_BAD_FIELD"), justification = "Fields 'row' and 'records' aren't always null, and null checks didn't eliminate Spotbugs error; see DB-9580.|This class isn't serializable, and there's no field named 'outer'.") class KafkaToDF(kafkaServers: String, pollTimeout: Long, querySchema: StructType) { val timeout = java.time.Duration.ofMillis(pollTimeout) val shortTimeout = if( pollTimeout <= 1000L ) timeout else java.time.Duration.ofMillis(1000L) val kryo = new KryoSerialization() def spark(): SparkSession = SparkSession.builder.getOrCreate def df(topicName: String): Dataset[Row] = { val (rdd, schema) = rdd_schema(topicName) spark.createDataFrame( rdd , schema ) } def rdd(topicName: String): RDD[Row] = rdd_schema(topicName)._1 def rdd_schema(topicName: String): (RDD[Row], StructType) = { val props = new Properties() val groupId = "spark-consumer-s2s-ktdf" props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers) props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId) props.put(ConsumerConfig.CLIENT_ID_CONFIG, groupId +"-"+ UUID.randomUUID()) props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[IntegerDeserializer].getName) props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[ByteArrayDeserializer].getName) props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") val consumer = new KafkaConsumer[Integer, Array[Byte]](props) consumer.subscribe(util.Arrays.asList(topicName)) var records = Iterable.empty[ConsumerRecord[Integer, Array[Byte]]] var newRecords = consumer.poll(timeout).asScala // records: Iterable[ConsumerRecord[Integer, Array[Byte]]] records = records ++ newRecords while( newRecords.nonEmpty ) { newRecords = consumer.poll(shortTimeout).asScala // records: Iterable[ConsumerRecord[Integer, Array[Byte]]] records = records ++ newRecords } consumer.close if (records.isEmpty) { throw new Exception(s"Kafka poll timed out after ${pollTimeout/1000.0} seconds.") } else if(records.size == 1) { kryo.init val (row, schema) = rowFrom(records.head) kryo.close ( spark.sparkContext.parallelize( if(row.size > 0) { Seq(row) } else { Seq[Row]() } ), schema ) } else { val seqBuilder = Seq.newBuilder[Row] var schema = new StructType kryo.init for (record <- records.iterator) { val rs = rowFrom(record) seqBuilder += rs._1 schema = rs._2 } kryo.close val rows = seqBuilder.result val rdd = spark.sparkContext.parallelize(rows) (rdd, schema) } } // Needed for SSDS def rowFrom(record: ConsumerRecord[Integer, Array[Byte]]): (Row, StructType) = { val row = kryo.deserialize(record.value).asInstanceOf[Row] val values = for (i <- 0 until row.length) yield { // convert each column of the row if( row.isNullAt(i) ) { null } else { querySchema(i).dataType match { case BinaryType => row.getAs[Array[Byte]](i) case BooleanType => row.getBoolean(i) case ByteType => row.getByte(i) case DateType => row.getDate(i) case t: DecimalType => row.getDecimal(i) case DoubleType => row.getDouble(i) case FloatType => row.getFloat(i) case IntegerType => row.getInt(i) case LongType => row.getLong(i) case ShortType => row.getShort(i) case StringType => row.getString(i) case TimestampType => row.getTimestamp(i) case TimeType => java.sql.Time.valueOf( row.getTimestamp(i).toLocalDateTime.toLocalTime ) case _ => throw new IllegalArgumentException(s"Can't get data for ${row.schema(i).dataType.simpleString}") } } } (Row.fromSeq(values), querySchema) } }
splicemachine/spliceengine
splice_spark2/src/main/spark3.1/com/splicemachine/spark2/splicemachine/KafkaToDF.scala
Scala
agpl-3.0
5,196
package org.scaladebugger.api.lowlevel.threads import org.scalamock.scalatest.MockFactory import org.scalatest.{FunSpec, Matchers, ParallelTestExecution} import org.scaladebugger.api.lowlevel.requests.JDIRequestArgument import org.scaladebugger.test.helpers.ParallelMockFunSpec import test.TestThreadStartManager import scala.util.Success class ThreadStartManagerSpec extends ParallelMockFunSpec { private val TestRequestId = java.util.UUID.randomUUID().toString private val mockThreadStartManager = mock[ThreadStartManager] private val testThreadStartManager = new TestThreadStartManager( mockThreadStartManager ) { override protected def newRequestId(): String = TestRequestId } describe("ThreadStartManager") { describe("#createThreadStartRequest") { it("should invoke createThreadStartRequestWithId") { val expected = Success(TestRequestId) val testExtraArguments = Seq(stub[JDIRequestArgument]) (mockThreadStartManager.createThreadStartRequestWithId _) .expects(TestRequestId, testExtraArguments) .returning(expected).once() val actual = testThreadStartManager.createThreadStartRequest( testExtraArguments: _* ) actual should be (expected) } } describe("#createThreadStartRequestFromInfo") { it("should invoke createThreadStartRequestWithId") { val expected = Success(TestRequestId) val testIsPending = false val testExtraArguments = Seq(stub[JDIRequestArgument]) (mockThreadStartManager.createThreadStartRequestWithId _) .expects(TestRequestId, testExtraArguments) .returning(expected).once() val info = ThreadStartRequestInfo( TestRequestId, testIsPending, testExtraArguments ) val actual = testThreadStartManager.createThreadStartRequestFromInfo(info) actual should be(expected) } } } }
chipsenkbeil/scala-debugger
scala-debugger-api/src/test/scala/org/scaladebugger/api/lowlevel/threads/ThreadStartManagerSpec.scala
Scala
apache-2.0
1,960
package com.sstwitterprocessor.config import com.typesafe.config.{Config, ConfigFactory} /** * Created by simonsaffer on 2015-11-06. */ object ApplicationConfig { private val config = ConfigFactory.load() private val credentials = ConfigFactory.load("credentials.properties") object Credentials { val consumerKey = credentials.getString("consumerKey") val consumerSecret = credentials.getString("consumerSecret") val accessToken = credentials.getString("accessToken") val accessTokenSecret = credentials.getString("accessTokenSecret") } object Kafka { import scala.collection.JavaConversions._ val topics: Set[String] = Set(config.getString("topics")) val brokers: String = config.getString("brokers") } }
simonsaffer/sstwitterprocessor
src/main/scala/com/sstwitterprocessor/config/ApplicationConfig.scala
Scala
gpl-2.0
757
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.dllib.utils.tf.loaders import java.nio.ByteOrder import com.intel.analytics.bigdl.Module import com.intel.analytics.bigdl.dllib.nn.ops.{SegmentSum => SegmentSumOps} import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric import com.intel.analytics.bigdl.dllib.utils.tf.Context import org.tensorflow.framework.NodeDef import scala.reflect.ClassTag class SegmentSum extends TensorflowOpsLoader { override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder, context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = { SegmentSumOps[T]() } }
intel-analytics/BigDL
scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/utils/tf/loaders/SegmentSum.scala
Scala
apache-2.0
1,231
/* * This file is part of pelam-scala-csv * * Copyright © Peter Lamberg 2015 (pelam-scala-csv@pelam.fi) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package fi.pelam.csv.table import java.nio.charset.Charset import java.util.Locale import fi.pelam.csv.CsvConstants /** * * * @param dataLocale * @param cellTypeLocale */ // TODO: Scaladoc case class LocaleMetadata(override val dataLocale: Locale = Locale.ROOT, override val cellTypeLocale: Locale = Locale.ROOT, override val charset: Charset = CsvConstants.defaultCharset, override val separator: Char = CsvConstants.defaultSeparatorChar) extends LocaleTableMetadata[LocaleMetadata] { /** * This is a polymorphic way of accessing the concrete case class copy. */ override def withFormatParameters(separator: Char, charset: Charset, cellTypeLocale: Locale, dataLocale: Locale): LocaleMetadata = { copy(separator = separator, charset = charset, cellTypeLocale = cellTypeLocale, dataLocale = dataLocale) } }
pelamfi/pelam-scala-csv
src/main/scala/fi/pelam/csv/table/LocaleMetadata.scala
Scala
apache-2.0
1,521
/* * Copyright (C) FuseSource, Inc. * http://fusesource.com * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.fusesource.camel.tooling.util import java.io.File import org.junit.Assert._ class BlueprintWithPrefixTest extends RouteXmlTestSupport { test("parses valid XML file") { val x = assertRoutes(new File(baseDir, "src/test/resources/blueprintWithPrefix.xml"), 4, CamelNamespaces.blueprintNS) } }
janstey/fuse
tooling/camel-tooling-util/src/test/scala/org/fusesource/camel/tooling/util/BlueprintWithPrefixTest.scala
Scala
apache-2.0
937
package webserviceclients.vrmretentionretain import composition.TestConfig import helpers.UnitSpec import org.joda.time.DateTime import play.api.libs.json.Json import uk.gov.dvla.vehicles.presentation.common import common.webserviceclients.common.VssWebEndUserDto import common.webserviceclients.common.VssWebHeaderDto import common.webserviceclients.emailservice.EmailServiceSendRequest import common.webserviceclients.emailservice.From import webserviceclients.fakes.DateServiceConstants.{DayValid, MonthValid, YearValid} import webserviceclients.fakes.VrmRetentionRetainWebServiceConstants.ReplacementRegistrationNumberValid import webserviceclients.paymentsolve.PaymentSolveUpdateRequest class VRMRetentionRetainRequestSpec extends UnitSpec { "format" should { "write json with currentVRM" in { toJson.toString() should include(ReplacementRegistrationNumberValid) } "write json with ISO formatted data" in { toJson.toString() should include("1970-11-25T00:00:00.000") } } private def dateTime = new DateTime( YearValid.toInt, MonthValid.toInt, DayValid.toInt, 0, 0 ) private val config = new TestConfig().build private def request = VRMRetentionRetainRequest( buildWebHeader("1234567890"), currentVRM = ReplacementRegistrationNumberValid, transactionTimestamp = dateTime, PaymentSolveUpdateRequest( "", "", "", isPrimaryUrl = false, List(EmailServiceSendRequest("", "", None, From("", ""), "", None, None)) ), Seq.empty, Seq.empty ) private def toJson = Json.toJson(request) private def buildWebHeader(trackingId: String): VssWebHeaderDto = { VssWebHeaderDto( transactionId = trackingId, originDateTime = new DateTime, applicationCode = config.applicationCode, serviceTypeCode = config.vssServiceTypeCode, buildEndUser() ) } private def buildEndUser(): VssWebEndUserDto = { VssWebEndUserDto(endUserId = config.orgBusinessUnit, orgBusUnit = config.orgBusinessUnit) } }
dvla/vrm-retention-online
test/webserviceclients/vrmretentionretain/VRMRetentionRetainRequestSpec.scala
Scala
mit
2,053
package im.mange.little.percentage import org.scalatest.{MustMatchers, WordSpec} class PercentageSpec extends WordSpec with MustMatchers { "can round trip from percentage" in { val expected = Percentage.fromPercentage("105.00") val to = Percentage.fromDecimalFraction("1.05") expected mustBe to } }
alltonp/little
src/test/scala/im/mange/little/percentage/PercentageSpec.scala
Scala
apache-2.0
317
package ohnosequences package object fastarious { type Symbol = Char type Score = Int type ErrorP = Prob type Prob = Num type Num = Double }
ohnosequences/fastarious
src/main/scala/package.scala
Scala
agpl-3.0
169
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.streaming.api.scala.testutils import org.apache.flink.api.common.functions.RuntimeContext import org.apache.flink.configuration.Configuration import org.apache.flink.streaming.api.scala.function.RichProcessAllWindowFunction import org.apache.flink.streaming.api.windowing.windows.Window import org.apache.flink.util.Collector class CheckingIdentityRichProcessAllWindowFunction[T, W <: Window] extends RichProcessAllWindowFunction[T, T, W] { override def process(context: Context, input: Iterable[T], out: Collector[T]): Unit = { for (value <- input) { out.collect(value) } } override def open(conf: Configuration): Unit = { super.open(conf) CheckingIdentityRichProcessAllWindowFunction.openCalled = true } override def close(): Unit = { super.close() CheckingIdentityRichProcessAllWindowFunction.closeCalled = true } override def setRuntimeContext(context: RuntimeContext): Unit = { super.setRuntimeContext(context) CheckingIdentityRichProcessAllWindowFunction.contextSet = true } } object CheckingIdentityRichProcessAllWindowFunction { @volatile private[CheckingIdentityRichProcessAllWindowFunction] var closeCalled = false @volatile private[CheckingIdentityRichProcessAllWindowFunction] var openCalled = false @volatile private[CheckingIdentityRichProcessAllWindowFunction] var contextSet = false def reset(): Unit = { closeCalled = false openCalled = false contextSet = false } def checkRichMethodCalls(): Unit = { if (!contextSet) { throw new AssertionError("context not set") } if (!openCalled) { throw new AssertionError("open() not called") } if (!closeCalled) { throw new AssertionError("close() not called") } } }
DieBauer/flink
flink-streaming-scala/src/test/scala/org/apache/flink/streaming/api/scala/testutils/CheckingIdentityRichProcessAllWindowFunction.scala
Scala
apache-2.0
2,598
import _root_.sbt.Keys._ import _root_.sbt.Keys._ import sbt._ import sbt.Classpaths.publishTask import Keys._ import sbtassembly.Plugin._ import AssemblyKeys._ import scala.sys.process._ import scala.util.Properties.{ envOrNone => env } import scala.collection.JavaConversions._ object RootBuild extends Build { //////// Project definitions/configs /////// val OBSELETE_HADOOP_VERSION = "1.0.4" val DEFAULT_HADOOP_VERSION = "2.2.0" val SPARK_VERSION = "1.3.1-adatao" val YARN_ENABLED = env("SPARK_YARN").getOrElse("true").toBoolean // Target JVM version val SCALAC_JVM_VERSION = "jvm-1.6" val JAVAC_JVM_VERSION = "1.6" val theScalaVersion = "2.10.3" val majorScalaVersion = theScalaVersion.split(".[0-9]+$")(0) val targetDir = "target/scala-" + majorScalaVersion // to help mvn and sbt share the same target dir val rootOrganization = "io" val projectName = "ddf" val rootProjectName = projectName val rootVersion = "1.5.0-SNAPSHOT" //val rootVersion = if(YARN_ENABLED) { // "1.2-adatao" //} else { // "1.2-mesos" //} val projectOrganization = rootOrganization + "." + projectName val coreProjectName = "ddf_core" val coreVersion = rootVersion val coreJarName = coreProjectName.toLowerCase + "_" + theScalaVersion + "-" + coreVersion + ".jar" val coreTestJarName = coreProjectName + "-" + coreVersion + "-tests.jar" val sparkProjectName = "ddf_spark" val sparkVersion = rootVersion // val sparkVersion = if(YARN_ENABLED) { // rootVersion // } else { // rootVersion + "-mesos" // } val sparkJarName = sparkProjectName.toLowerCase + "_" + theScalaVersion + "-" + rootVersion + ".jar" val sparkTestJarName = sparkProjectName.toLowerCase + "_" + theScalaVersion + "-" + rootVersion + "-tests.jar" val examplesProjectName = projectName + "_examples" val examplesVersion = rootVersion val examplesJarName = examplesProjectName + "-" + rootVersion + ".jar" val examplesTestJarName = examplesProjectName + "-" + rootVersion + "-tests.jar" // lazy val root = Project("root", file("."), settings = rootSettings) aggregate(core, spark, examples) lazy val root = Project("root", file("."), settings = rootSettings) aggregate(core, spark, examples) lazy val core = Project("core", file("core"), settings = coreSettings) // lazy val spark = Project("spark", file("spark"), settings = sparkSettings) dependsOn (core) lazy val spark = Project("spark", file("spark"), settings = sparkSettings) dependsOn (core) lazy val examples = Project("examples", file("examples"), settings = examplesSettings) dependsOn (spark) dependsOn (core) // A configuration to set an alternative publishLocalConfiguration lazy val MavenCompile = config("m2r") extend(Compile) lazy val publishLocalBoth = TaskKey[Unit]("publish-local", "publish local for m2 and ivy") //////// Variables/flags //////// // Hadoop version to build against. For example, "0.20.2", "0.20.205.0", or // "1.0.4" for Apache releases, or "0.20.2-cdh3u5" for Cloudera Hadoop. val HADOOP_VERSION = "1.0.4" val HADOOP_MAJOR_VERSION = "0" // For Hadoop 2 versions such as "2.0.0-mr1-cdh4.1.1", set the HADOOP_MAJOR_VERSION to "2" //val HADOOP_VERSION = "2.0.0-mr1-cdh4.1.1" //val HADOOP_MAJOR_VERSION = "2" val slf4jVersion = "1.7.2" val excludeAvro = ExclusionRule(organization = "org.apache.avro" , name = "avro-ipc") val excludeJacksonCore = ExclusionRule(organization = "org.codehaus.jackson", name = "jackson-core-asl") val excludeJacksonMapper = ExclusionRule(organization = "org.codehaus.jackson", name = "jackson-mapper-asl") val excludeNetty = ExclusionRule(organization = "org.jboss.netty", name = "netty") val excludeScala = ExclusionRule(organization = "org.scala-lang", name = "scala-library") val excludeGuava = ExclusionRule(organization = "com.google.guava", name = "guava-parent") val excludeAsm = ExclusionRule(organization = "asm", name = "asm") val excludeSpark = ExclusionRule(organization = "org.apache.spark", name = "spark-core_2.10") val excludeEverthing = ExclusionRule(organization = "*", name = "*") val excludeEverythingHackForMakePom = ExclusionRule(organization = "_MAKE_POM_EXCLUDE_ALL_", name = "_MAKE_POM_EXCLUDE_ALL_") // We define this explicitly rather than via unmanagedJars, so that make-pom will generate it in pom.xml as well // org % package % version val rforge = Seq( "net.rforge" % "REngine" % "2.1.1.compiled", "net.rforge" % "Rserve" % "1.8.2.compiled" ) val scalaArtifacts = Seq("jline", "scala-compiler", "scala-library", "scala-reflect") val scalaDependencies = scalaArtifacts.map( artifactId => "org.scala-lang" % artifactId % theScalaVersion) val spark_dependencies = Seq( "commons-configuration" % "commons-configuration" % "1.6", "com.google.code.gson"% "gson" % "2.2.2", "com.novocode" % "junit-interface" % "0.10" % "test", "net.sf" % "jsqlparser" % "0.9.8.5", "org.jblas" % "jblas" % "1.2.3", // for fast linear algebra //"org.apache.derby" % "derby" % "10.4.2.0", // "org.apache.spark" % "spark-streaming_2.10" % SPARK_VERSION excludeAll(excludeSpark), "org.apache.spark" % "spark-core_2.10" % SPARK_VERSION exclude("net.java.dev.jets3t", "jets3t") exclude("com.google.protobuf", "protobuf-java") exclude("org.jboss.netty", "netty") exclude("org.mortbay.jetty", "jetty"), //"org.apache.spark" % "spark-repl_2.10" % SPARK_VERSION excludeAll(excludeSpark) exclude("com.google.protobuf", "protobuf-java") exclude("io.netty", "netty-all") exclude("org.jboss.netty", "netty"), "org.apache.spark" % "spark-mllib_2.10" % SPARK_VERSION excludeAll(excludeSpark) exclude("io.netty", "netty-all"), "org.apache.spark" % "spark-sql_2.10" % SPARK_VERSION exclude("io.netty", "netty-all") exclude("org.jboss.netty", "netty") exclude("org.mortbay.jetty", "jetty"), "org.apache.spark" % "spark-hive_2.10" % SPARK_VERSION exclude("io.netty", "netty-all") exclude("org.jboss.netty", "netty") exclude("org.mortbay.jetty", "jetty") exclude("org.mortbay.jetty", "servlet-api"), //"org.apache.spark" % "spark-yarn_2.10" % SPARK_VERSION exclude("io.netty", "netty-all") "com.google.protobuf" % "protobuf-java" % "2.5.0" ) /////// Common/Shared project settings /////// def commonSettings = Defaults.defaultSettings ++ Seq( organization := projectOrganization, version := rootVersion, scalaVersion := theScalaVersion, scalacOptions := Seq("-unchecked", "-optimize", "-deprecation"), retrieveManaged := true, // Do create a lib_managed, so we have one place for all the dependency jars to copy to slaves, if needed retrievePattern := "[type]s/[artifact](-[revision])(-[classifier]).[ext]", transitiveClassifiers in Scope.GlobalScope := Seq("sources"), // Fork new JVMs for tests and set Java options for those fork in Test := true, parallelExecution in ThisBuild := false, javaOptions in Test ++= Seq("-Xmx2g"), // Only allow one test at a time, even across projects, since they run in the same JVM concurrentRestrictions in Global += Tags.limit(Tags.Test, 1), conflictManager := ConflictManager.strict, // This goes first for fastest resolution. We need this for rforge. // Now, sometimes missing .jars in ~/.m2 can lead to sbt compile errors. // In that case, clean up the ~/.m2 local repository using bin/clean-m2-repository.sh resolvers ++= Seq( "Local Maven Repository" at "file://"+Path.userHome.absolutePath+"/.m2/repository", //"Local ivy Repository" at "file://"+Path.userHome.absolutePath+"/.ivy2/local", "Adatao Mvnrepos Snapshots" at "https://raw.github.com/adatao/mvnrepos/master/snapshots", "Adatao Mvnrepos Releases" at "https://raw.github.com/adatao/mvnrepos/master/releases", "Typesafe Repository" at "http://repo.typesafe.com/typesafe/releases/", "Cloudera Repository" at "https://repository.cloudera.com/artifactory/cloudera-repos/", "Sonatype Snapshots" at "https://oss.sonatype.org/content/repositories/snapshots/" ), publishMavenStyle := true, // generate pom.xml with "sbt make-pom" libraryDependencies ++= Seq( "org.slf4j" % "slf4j-api" % slf4jVersion, "org.slf4j" % "slf4j-log4j12" % slf4jVersion, "commons-configuration" % "commons-configuration" % "1.6", "com.google.guava" % "guava" % "14.0.1", "com.google.code.gson"% "gson" % "2.2.2", "org.scalatest" % "scalatest_2.10" % "2.1.5" % "test", "org.scalacheck" %% "scalacheck" % "1.11.3" % "test", "com.novocode" % "junit-interface" % "0.10" % "test", "org.jblas" % "jblas" % "1.2.3", // for fast linear algebra "com.googlecode.matrix-toolkits-java" % "mtj" % "0.9.14", "net.sf" % "jsqlparser" % "0.9.8.5", "commons-io" % "commons-io" % "1.3.2", "org.easymock" % "easymock" % "3.1" % "test", "mysql" % "mysql-connector-java" % "5.1.25", "org.python" % "jython-standalone" % "2.7.0", "joda-time" % "joda-time" % "2.8.1", "org.joda" % "joda-convert" % "1.7" ), otherResolvers := Seq(Resolver.file("dotM2", file(Path.userHome + "/.m2/repository"))), publishLocalConfiguration in MavenCompile <<= (packagedArtifacts, deliverLocal, ivyLoggingLevel) map { (arts, _, level) => new PublishConfiguration(None, "dotM2", arts, Seq(), level) }, publishMavenStyle in MavenCompile := true, publishLocal in MavenCompile <<= publishTask(publishLocalConfiguration in MavenCompile, deliverLocal), publishLocalBoth <<= Seq(publishLocal in MavenCompile, publishLocal).dependOn, dependencyOverrides += "commons-lang" % "commons-lang" % "2.6", dependencyOverrides += "it.unimi.dsi" % "fastutil" % "6.4.4", dependencyOverrides += "log4j" % "log4j" % "1.2.17", dependencyOverrides += "org.slf4j" % "slf4j-api" % slf4jVersion, dependencyOverrides += "org.slf4j" % "slf4j-log4j12" % slf4jVersion, dependencyOverrides += "commons-io" % "commons-io" % "2.4", //tachyon 0.2.1 dependencyOverrides += "org.apache.httpcomponents" % "httpclient" % "4.1.3", //libthrift dependencyOverrides += "com.google.guava" % "guava" % "14.0.1", //spark-core dependencyOverrides += "org.codehaus.jackson" % "jackson-core-asl" % "1.8.8", dependencyOverrides += "org.codehaus.jackson" % "jackson-mapper-asl" % "1.8.8", dependencyOverrides += "org.codehaus.jackson" % "jackson-xc" % "1.8.8", dependencyOverrides += "org.codehaus.jackson" % "jackson-jaxrs" % "1.8.8", dependencyOverrides += "com.fasterxml.jackson.core" % "jackson-databind" % "2.4.4", dependencyOverrides += "com.fasterxml.jackson.core" % "jackson-annotations" % "2.4.4", dependencyOverrides += "com.google.code.findbugs" % "jsr305" % "2.0.1", dependencyOverrides += "com.thoughtworks.paranamer" % "paranamer" % "2.4.1", //net.liftweb conflict with avro dependencyOverrides += "org.xerial.snappy" % "snappy-java" % "1.0.5", //spark-core conflicts with avro dependencyOverrides += "org.apache.httpcomponents" % "httpcore" % "4.1.4", dependencyOverrides += "org.apache.avro" % "avro-ipc" % "1.7.4", dependencyOverrides += "org.apache.avro" % "avro" % "1.7.4", dependencyOverrides += "org.apache.zookeeper" % "zookeeper" % "3.4.5", dependencyOverrides += "org.scala-lang" % "scala-compiler" % "2.10.3", dependencyOverrides += "io.netty" % "netty" % "3.6.6.Final", dependencyOverrides += "org.ow2.asm" % "asm" % "4.0", //org.datanucleus#datanucleus-enhancer's dependencyOverrides += "asm" % "asm" % "3.2", dependencyOverrides += "commons-codec" % "commons-codec" % "1.4", dependencyOverrides += "org.scala-lang" % "scala-actors" % "2.10.1", dependencyOverrides += "org.scala-lang" % "scala-library" %"2.10.3", dependencyOverrides += "org.scala-lang" % "scala-reflect" %"2.10.3", dependencyOverrides += "com.sun.jersey" % "jersey-core" % "1.9", dependencyOverrides += "javax.xml.bind" % "jaxb-api" % "2.2.2", dependencyOverrides += "commons-collections" % "commons-collections" % "3.2.1", dependencyOverrides += "commons-logging" % "commons-logging" % "1.1.3", dependencyOverrides += "commons-net" % "commons-net" % "3.1", dependencyOverrides += "org.mockito" % "mockito-all" % "1.8.5", dependencyOverrides += "org.apache.commons" % "commons-math3" % "3.1.1", dependencyOverrides += "commons-httpclient" % "commons-httpclient" % "3.1", dependencyOverrides += "com.sun.jersey" % "jersey-json" % "1.9", dependencyOverrides += "com.sun.jersey" % "jersey-server" % "1.9", dependencyOverrides += "org.scalamacros" % "quasiquotes_2.10" % "2.0.0", dependencyOverrides += "commons-httpclient" % "commons-httpclient" % "3.1", dependencyOverrides += "org.apache.avro" % "avro-mapred" % "1.7.6", dependencyOverrides += "commons-logging" % "commons-logging" % "1.1.3", dependencyOverrides += "net.java.dev.jets3t" % "jets3t" % "0.7.1", dependencyOverrides += "com.google.code.gson"% "gson" % "2.3.1", pomExtra := ( <!-- ************************************************************************************************** IMPORTANT: This file is generated by "sbt make-pom" (bin/make-poms.sh). Edits will be overwritten! ************************************************************************************************** --> <parent> <groupId>{rootOrganization}</groupId> <artifactId>{rootProjectName}</artifactId> <version>{rootVersion}</version> </parent> <build> <directory>${{basedir}}/{targetDir}</directory> <plugins> <plugin> <!-- Let SureFire know where the jars are --> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-surefire-plugin</artifactId> <version>2.15</version> <configuration> <reuseForks>false</reuseForks> <enableAssertions>false</enableAssertions> <environmentVariables> <RSERVER_JAR>${{basedir}}/{targetDir}/*.jar,${{basedir}}/{targetDir}/lib/*</RSERVER_JAR> </environmentVariables> <systemPropertyVariables> <spark.serializer>org.apache.spark.serializer.KryoSerializer</spark.serializer> <spark.kryo.registrator>io.ddf.spark.content.KryoRegistrator</spark.kryo.registrator> <spark.ui.port>8085</spark.ui.port> <log4j.configuration>ddf-local-log4j.properties</log4j.configuration> <derby.stream.error.file>${{basedir}}/target/derby.log</derby.stream.error.file> </systemPropertyVariables> <additionalClasspathElements> <additionalClasspathElement>${{basedir}}/conf/</additionalClasspathElement> <additionalClasspathElement>${{basedir}}/conf/local/</additionalClasspathElement> <additionalClasspathElement>${{basedir}}/../lib_managed/jars/*</additionalClasspathElement> </additionalClasspathElements> <includes> <include>**/*.java</include> </includes> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-assembly-plugin</artifactId> <version>2.2.2</version> <configuration> <descriptors> <descriptor>assembly.xml</descriptor> </descriptors> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-jar-plugin</artifactId> <version>2.2</version> <executions> <execution> <goals><goal>test-jar</goal></goals> </execution> </executions> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-dependency-plugin</artifactId> <version>2.10</version> </plugin> <plugin> <groupId>net.alchim31.maven</groupId> <artifactId>scala-maven-plugin</artifactId> <version>3.2.0</version> <configuration> <recompileMode>incremental</recompileMode> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-checkstyle-plugin</artifactId> <version>2.6</version> <configuration> <configLocation>${{basedir}}/../src/main/resources/sun_checks.xml</configLocation> <propertyExpansion>checkstyle.conf.dir=${{basedir}}/../src/main/resources</propertyExpansion> <outputFileFormat>xml</outputFileFormat> </configuration> </plugin> </plugins> </build> <profiles> <profile> <id>local</id> <activation><property><name>!dist</name></property> <activeByDefault>true</activeByDefault> </activation> <build> <directory>${{basedir}}/{targetDir}</directory> <plugins> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-surefire-plugin</artifactId> <version>2.15</version> <configuration> <additionalClasspathElements> <additionalClasspathElement>${{basedir}}/conf/local</additionalClasspathElement> </additionalClasspathElements> </configuration> </plugin> </plugins> </build> </profile> <profile> <id>distributed</id> <activation><property><name>dist</name></property></activation> <build> <directory>${{basedir}}/{targetDir}</directory> <plugins> <plugin> <!-- Let SureFire know where the jars are --> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-surefire-plugin</artifactId> <version>2.15</version> <configuration> <additionalClasspathElements> <additionalClasspathElement>${{basedir}}/../lib_managed/jars/*</additionalClasspathElement> <additionalClasspathElement>${{basedir}}/conf/distributed/</additionalClasspathElement> <additionalClasspathElement>${{HADOOP_HOME}}/conf/</additionalClasspathElement> <additionalClasspathElement>${{HIVE_HOME}}/conf/</additionalClasspathElement> </additionalClasspathElements> </configuration> </plugin> </plugins> </build> </profile> <profile> <id>nospark</id> <activation><property><name>nospark</name></property></activation> <build> <directory>${{basedir}}/{targetDir}</directory> <plugins> <plugin> <!-- Let SureFire know where the jars are --> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-surefire-plugin</artifactId> <version>2.15</version> <configuration> <additionalClasspathElements> <additionalClasspathElement>${{basedir}}/conf/local</additionalClasspathElement> </additionalClasspathElements> <includes><include>**</include></includes> <excludes><exclude>**/spark/**</exclude></excludes> </configuration> </plugin> </plugins> </build> </profile> <profile> <id>package</id> <activation><property><name>package</name></property></activation> <build> <directory>${{basedir}}/{targetDir}</directory> <plugins> <plugin> <!-- Let SureFire know where the jars are --> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-surefire-plugin</artifactId> <version>2.15</version> <configuration> <additionalClasspathElements> <additionalClasspathElement>${{basedir}}/conf/local</additionalClasspathElement> </additionalClasspathElements> <includes><include>**/${{path}}/**</include></includes> </configuration> </plugin> </plugins> </build> </profile> </profiles> ) ) // end of commonSettings /////// Individual project settings ////// def rootSettings = commonSettings ++ Seq(publish := {}) def coreSettings = commonSettings ++ Seq( name := coreProjectName, //javaOptions in Test <+= baseDirectory map {dir => "-Dspark.classpath=" + dir + "/../lib_managed/jars/*"}, // Add post-compile activities: touch the maven timestamp files so mvn doesn't have to compile again compile in Compile <<= compile in Compile andFinally { List("sh", "-c", "touch core/" + targetDir + "/*timestamp") }, libraryDependencies += "org.xerial" % "sqlite-jdbc" % "3.7.2", libraryDependencies += "org.apache.hadoop" % "hadoop-common" % "2.2.0" exclude("org.mortbay.jetty", "servlet-api") exclude("javax.servlet", "servlet-api"), libraryDependencies += "org.jgrapht" % "jgrapht-core" % "0.9.0", libraryDependencies ++= scalaDependencies, testOptions in Test += Tests.Argument("-oI") ) ++ assemblySettings ++ extraAssemblySettings val java_opts = if(System.getenv("JAVA_OPTS") != null) { System.getenv("JAVA_OPTS").split(" ").filter(x => x.startsWith("-D")).map { s => s.stripPrefix("-D") }.map(x => x.split("=")).filter(x => x.size > 1).map(x => (x(0), x(1))) } else { Array[(String, String)]() } val isLocal = scala.util.Properties.envOrElse("SPARK_MASTER", "local").contains("local") val getEnvCommand = java_opts.map{ case (key, value) => "System.setProperty(\\"%s\\", \\"%s\\")".format(key, value) }.mkString("\\n|") def sparkSettings = commonSettings ++ Seq( name := sparkProjectName, javaOptions in Test <+= baseDirectory map {dir => "-Dspark.classpath=" + dir + "/../lib_managed/jars/*"}, // Add post-compile activities: touch the maven timestamp files so mvn doesn't have to compile again compile in Compile <<= compile in Compile andFinally { List("sh", "-c", "touch spark/" + targetDir + "/*timestamp") }, resolvers ++= Seq( //"JBoss Repository" at "http://repository.jboss.org/nexus/content/repositories/releases/", //"Spray Repository" at "http://repo.spray.cc/", //"Twitter4J Repository" at "http://twitter4j.org/maven2/" //"Cloudera Repository" at "https://repository.cloudera.com/artifactory/cloudera-repos/" ), testOptions in Test += Tests.Argument("-oI"), libraryDependencies ++= rforge, libraryDependencies ++= spark_dependencies, if(isLocal) { initialCommands in console := s""" |$getEnvCommand |import io.ddf.DDFManager |val manager = DDFManager.get("spark") |manager.sql2txt("drop table if exists airline") |manager.sql2txt("create external table airline (Year int,Month int,DayofMonth int,DayOfWeek int, " + |"aDepTime int,CRSDepTime int,ArrTime int,CRSArrTime int,UniqueCarrier string, " + |"FlightNum int, TailNum string, ActualElapsedTime int, CRSElapsedTime int, AirTime int, " + |"ArrDelay int, DepDelay int, Origin string, Dest string, Distance int, TaxiIn int, TaxiOut int, " + |"Cancelled int, CancellationCode string, Diverted string, CarrierDelay int, WeatherDelay int, " + |"NASDelay int, SecurityDelay int, LateAircraftDelay int ) " + |"ROW FORMAT DELIMITED FIELDS TERMINATED BY ','") |manager.sql2txt("load data local inpath 'resources/test/airlineBig.csv' into table airline") |println("SparkDDFManager is available as the DDF manager")""".stripMargin } else { initialCommands in console := s""" |$getEnvCommand |import io.ddf.DDFManager |val manager = DDFManager.get("spark") |println("SparkDDFManager is available as the DDF manager") """.stripMargin } ) ++ assemblySettings ++ extraAssemblySettings def examplesSettings = commonSettings ++ Seq( name := examplesProjectName, //javaOptions in Test <+= baseDirectory map {dir => "-Dspark.classpath=" + dir + "/../lib_managed/jars/*"}, // Add post-compile activities: touch the maven timestamp files so mvn doesn't have to compile again compile in Compile <<= compile in Compile andFinally { List("sh", "-c", "touch examples/" + targetDir + "/*timestamp") } ) ++ assemblySettings ++ extraAssemblySettings def extraAssemblySettings() = Seq(test in assembly := {}) ++ Seq( mergeStrategy in assembly := { case m if m.toLowerCase.endsWith("manifest.mf") => MergeStrategy.discard case m if m.toLowerCase.endsWith("eclipsef.sf") => MergeStrategy.discard case m if m.toLowerCase.endsWith("eclipsef.rsa") => MergeStrategy.discard case "reference.conf" => MergeStrategy.concat case _ => MergeStrategy.first } ) }
ddf-project/DDF
project/RootBuild.scala
Scala
apache-2.0
25,878
/* * Copyright 2011-2022 GatlingCorp (https://gatling.io) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.gatling.javaapi.mqtt.internal import io.gatling.core.{ Predef => CorePredef } import io.gatling.core.check.CheckBuilder import io.gatling.core.check.bytes.BodyBytesCheckType import io.gatling.core.check.jmespath.JmesPathCheckType import io.gatling.core.check.jsonpath.JsonPathCheckType import io.gatling.core.check.regex.RegexCheckType import io.gatling.core.check.string.BodyStringCheckType import io.gatling.core.check.substring.SubstringCheckType import io.gatling.javaapi.core.internal.CoreCheckType import io.gatling.mqtt.{ Predef => MqttPredef } import io.gatling.mqtt.check.MessageCorrelator import com.fasterxml.jackson.databind.JsonNode object MessageCorrelators { def toScalaCorrelator(javaCheck: io.gatling.javaapi.core.CheckBuilder): MessageCorrelator = { val scalaCheck = javaCheck.asScala javaCheck.`type` match { case CoreCheckType.BodyBytes => val checkBuilder = scalaCheck.asInstanceOf[CheckBuilder[BodyBytesCheckType, Array[Byte]]] val textCheck = checkBuilder.build(MqttPredef.MqttTextBodyBytesCorrelatorMaterializer) val bufferCheck = checkBuilder.build(MqttPredef.MqttBufferBodyBytesCorrelatorMaterializer) MessageCorrelator(textCheck, bufferCheck) case CoreCheckType.BodyLength => val checkBuilder = scalaCheck.asInstanceOf[CheckBuilder[BodyBytesCheckType, Int]] val textCheck = checkBuilder.build(MqttPredef.MqttTextBodyLengthCorrelatorMaterializer) val bufferCheck = checkBuilder.build(MqttPredef.MqttBufferBodyLengthCorrelatorMaterializer) MessageCorrelator(textCheck, bufferCheck) case CoreCheckType.BodyString => val checkBuilder = scalaCheck.asInstanceOf[CheckBuilder[BodyStringCheckType, String]] val textCheck = checkBuilder.build(MqttPredef.MqttTextBodyStringCorrelatorMaterializer) val bufferCheck = checkBuilder.build(MqttPredef.MqttBufferBodyStringCorrelatorMaterializer) MessageCorrelator(textCheck, bufferCheck) case CoreCheckType.Substring => val checkBuilder = scalaCheck.asInstanceOf[CheckBuilder[SubstringCheckType, String]] val textCheck = checkBuilder.build(MqttPredef.MqttTextSubstringCorrelatorMaterializer) val bufferCheck = checkBuilder.build(MqttPredef.MqttBufferSubstringCorrelatorMaterializer) MessageCorrelator(textCheck, bufferCheck) case CoreCheckType.Regex => val checkBuilder = scalaCheck.asInstanceOf[CheckBuilder[RegexCheckType, String]] val textCheck = checkBuilder.build(MqttPredef.MqttTextRegexCorrelatorMaterializer) val bufferCheck = checkBuilder.build(MqttPredef.MqttBufferRegexCorrelatorMaterializer) MessageCorrelator(textCheck, bufferCheck) case CoreCheckType.JsonPath => val checkBuilder = scalaCheck.asInstanceOf[CheckBuilder[JsonPathCheckType, JsonNode]] val textCheck = checkBuilder.build(MqttPredef.mqttTextJsonPathMaterializer(CorePredef.defaultJsonParsers)) val bufferCheck = checkBuilder.build(MqttPredef.mqttBufferJsonPathMaterializer(CorePredef.defaultJsonParsers)) MessageCorrelator(textCheck, bufferCheck) case CoreCheckType.JmesPath => val checkBuilder = scalaCheck.asInstanceOf[CheckBuilder[JmesPathCheckType, JsonNode]] val textCheck = checkBuilder.build(MqttPredef.mqttTextJmesPathMaterializer(CorePredef.defaultJsonParsers)) val bufferCheck = checkBuilder.build(MqttPredef.mqttBufferJmesPathMaterializer(CorePredef.defaultJsonParsers)) MessageCorrelator(textCheck, bufferCheck) case unknown => throw new IllegalArgumentException(s"MQTT DSL doesn't support $unknown") } } }
gatling/gatling
gatling-mqtt-java/src/main/scala/io/gatling/javaapi/mqtt/internal/MessageCorrelators.scala
Scala
apache-2.0
4,284
package wdl trait WorkflowScoped extends Scope { def parentWorkflow: WdlWorkflow = ancestry.collectFirst({ case w: WdlWorkflow => w }).getOrElse( throw new IllegalStateException(s"Grammar constraint violation: $fullyQualifiedName should be contained in a workflow") ) }
ohsu-comp-bio/cromwell
wdl/src/main/scala/wdl/WorkflowScoped.scala
Scala
bsd-3-clause
279
package uniandes.cupi2.dogBook.mundo import scala.util.Properties import java.io.FileInputStream import java.io.File import java.util.Properties class DogBookScala { private var perros: List[PerroScala] = List() private var perroActual: Int = 0; def buscarPerroPorNombre(nombrePerro: String) = { var elPerro = perros.find(_.nombre.equals(nombrePerro)) elPerro match { case Some(p) => { perroActual = perros.indexOf(p) p } case None => None } } def darCantidadPerros = perros.length def darCantidadReaccionPerros(nombreReaccion: String) = { var reaccion = darPerroActual.darReaccion(nombreReaccion) reaccion match { case Some(r) => r.cantidad case None => 0 } } def darReaccionPerros(nombreReaccion: String) = { var reaccion = darPerroActual.darReaccion(nombreReaccion) reaccion match { case Some(r) => r case None => null } } def darPerroActual = perros(perroActual) def darPerroAnterior() = { if (perroActual == 0) { throw new Exception("Se encuentra en el primer perro. "); } perroActual -= 1; darPerroActual } def darPerroSiguiente() = { if (perroActual == perros.length - 1) { throw new Exception("Se encuentra en el último perro. "); } perroActual += 1; darPerroActual } def darPrimerPerro() = { if (perroActual == 0) { throw new Exception("Se encuentra en el primer perro. "); } perroActual = 0; darPerroActual } def darUltimoPerro() = { if (perroActual == perros.length - 1) { throw new Exception("Se encuentra en el último perro. "); } perroActual = perros.length - 1; darPerroActual } def darPerroMasPopular = { perros.foldLeft(perros(0))((x: PerroScala, y: PerroScala) => { if (x.darCantidadReacciones >= y.darCantidadReacciones) x else y }) } def reaccionar(nombreReaccion: String) = perros(perroActual).reaccionar(nombreReaccion) def metodo1 = "Opcion 1" def metodo2 = "Opcion 2" def cargarPerros(archivo: String) = { // Carga las propiedades val persistencia = new Properties(); val fis = new FileInputStream(new File(archivo)); persistencia.load(fis); fis.close(); // Lee la cantidad de perros del archivo val cantidadPerros = persistencia.getProperty("dogBook.cantidadPerros").toInt for (i <- 1 to cantidadPerros) { val nombre = persistencia.getProperty("dogBook.perro" + i + ".nombre"); val edad = persistencia.getProperty("dogBook.perro" + i + ".edad").toInt; val raza = persistencia.getProperty("dogBook.perro" + i + ".raza"); val sexo = persistencia.getProperty("dogBook.perro" + i + ".sexo"); val foto = persistencia.getProperty("dogBook.perro" + i + ".foto"); val meGusta = persistencia.getProperty("dogBook.perro" + i + ".meGusta"); val noMeGusta = persistencia.getProperty("dogBook.perro" + i + ".noMeGusta"); val buscoPareja = persistencia.getProperty("dogBook.perro" + i + ".buscoPareja").equals("SI"); // construir el perro val perro = new PerroScala(nombre, edad, raza, sexo, foto, meGusta, noMeGusta, buscoPareja); perros = perro :: perros } perros = perros.reverse } }
ncardozo/scalakids
DogBook/src/uniandes/cupi2/dogBook/mundo/DogBookScala.scala
Scala
gpl-3.0
3,285
/* * Copyright 2017 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.ct.ct600e.v3 import uk.gov.hmrc.ct.box._ import uk.gov.hmrc.ct.ct600e.v3.retriever.CT600EBoxRetriever case class E85(value: Option[Int]) extends CtBoxIdentifier("Income Other sources") with CtOptionalInteger with Input with ValidatableBox[CT600EBoxRetriever]{ override def validate(boxRetriever: CT600EBoxRetriever): Set[CtValidation] = validateZeroOrPositiveInteger(this) }
liquidarmour/ct-calculations
src/main/scala/uk/gov/hmrc/ct/ct600e/v3/E85.scala
Scala
apache-2.0
1,007
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.api.r import java.io._ import java.net.{InetAddress, ServerSocket} import java.util.Arrays import java.util.{Map => JMap} import scala.collection.JavaConverters._ import scala.io.Source import scala.reflect.ClassTag import scala.util.Try import org.apache.spark._ import org.apache.spark.api.java.{JavaPairRDD, JavaRDD, JavaSparkContext} import org.apache.spark.broadcast.Broadcast import org.apache.spark.rdd.RDD import org.apache.spark.util.Utils private abstract class BaseRRDD[T: ClassTag, U: ClassTag]( parent: RDD[T], numPartitions: Int, func: Array[Byte], deserializer: String, serializer: String, packageNames: Array[Byte], broadcastVars: Array[Broadcast[Object]]) extends RDD[U](parent) with Logging { protected var dataStream: DataInputStream = _ private var bootTime: Double = _ override def getPartitions: Array[Partition] = parent.partitions override def compute(partition: Partition, context: TaskContext): Iterator[U] = { // Timing start bootTime = System.currentTimeMillis / 1000.0 // The parent may be also an RRDD, so we should launch it first. val parentIterator = firstParent[T].iterator(partition, context) // we expect two connections val serverSocket = new ServerSocket(0, 2, InetAddress.getByName("localhost")) val listenPort = serverSocket.getLocalPort() // The stdout/stderr is shared by multiple tasks, because we use one daemon // to launch child process as worker. val errThread = RRDD.createRWorker(listenPort) // We use two sockets to separate input and output, then it's easy to manage // the lifecycle of them to avoid deadlock. // TODO: optimize it to use one socket // the socket used to send out the input of task serverSocket.setSoTimeout(10000) val inSocket = serverSocket.accept() startStdinThread(inSocket.getOutputStream(), parentIterator, partition.index) // the socket used to receive the output of task val outSocket = serverSocket.accept() val inputStream = new BufferedInputStream(outSocket.getInputStream) dataStream = new DataInputStream(inputStream) serverSocket.close() try { return new Iterator[U] { def next(): U = { val obj = _nextObj if (hasNext) { _nextObj = read() } obj } var _nextObj = read() def hasNext(): Boolean = { val hasMore = (_nextObj != null) if (!hasMore) { dataStream.close() } hasMore } } } catch { case e: Exception => throw new SparkException("R computation failed with\n " + errThread.getLines()) } } /** * Start a thread to write RDD data to the R process. */ private def startStdinThread[T]( output: OutputStream, iter: Iterator[T], partition: Int): Unit = { val env = SparkEnv.get val taskContext = TaskContext.get() val bufferSize = System.getProperty("spark.buffer.size", "65536").toInt val stream = new BufferedOutputStream(output, bufferSize) new Thread("writer for R") { override def run(): Unit = { try { SparkEnv.set(env) TaskContext.setTaskContext(taskContext) val dataOut = new DataOutputStream(stream) dataOut.writeInt(partition) SerDe.writeString(dataOut, deserializer) SerDe.writeString(dataOut, serializer) dataOut.writeInt(packageNames.length) dataOut.write(packageNames) dataOut.writeInt(func.length) dataOut.write(func) dataOut.writeInt(broadcastVars.length) broadcastVars.foreach { broadcast => // TODO(shivaram): Read a Long in R to avoid this cast dataOut.writeInt(broadcast.id.toInt) // TODO: Pass a byte array from R to avoid this cast ? val broadcastByteArr = broadcast.value.asInstanceOf[Array[Byte]] dataOut.writeInt(broadcastByteArr.length) dataOut.write(broadcastByteArr) } dataOut.writeInt(numPartitions) if (!iter.hasNext) { dataOut.writeInt(0) } else { dataOut.writeInt(1) } val printOut = new PrintStream(stream) def writeElem(elem: Any): Unit = { if (deserializer == SerializationFormats.BYTE) { val elemArr = elem.asInstanceOf[Array[Byte]] dataOut.writeInt(elemArr.length) dataOut.write(elemArr) } else if (deserializer == SerializationFormats.ROW) { dataOut.write(elem.asInstanceOf[Array[Byte]]) } else if (deserializer == SerializationFormats.STRING) { // write string(for StringRRDD) // scalastyle:off println printOut.println(elem) // scalastyle:on println } } for (elem <- iter) { elem match { case (key, value) => writeElem(key) writeElem(value) case _ => writeElem(elem) } } stream.flush() } catch { // TODO: We should propogate this error to the task thread case e: Exception => logError("R Writer thread got an exception", e) } finally { Try(output.close()) } } }.start() } protected def readData(length: Int): U protected def read(): U = { try { val length = dataStream.readInt() length match { case SpecialLengths.TIMING_DATA => // Timing data from R worker val boot = dataStream.readDouble - bootTime val init = dataStream.readDouble val broadcast = dataStream.readDouble val input = dataStream.readDouble val compute = dataStream.readDouble val output = dataStream.readDouble logInfo( ("Times: boot = %.3f s, init = %.3f s, broadcast = %.3f s, " + "read-input = %.3f s, compute = %.3f s, write-output = %.3f s, " + "total = %.3f s").format( boot, init, broadcast, input, compute, output, boot + init + broadcast + input + compute + output)) read() case length if length >= 0 => readData(length) } } catch { case eof: EOFException => throw new SparkException("R worker exited unexpectedly (cranshed)", eof) } } } /** * Form an RDD[(Int, Array[Byte])] from key-value pairs returned from R. * This is used by SparkR's shuffle operations. */ private class PairwiseRRDD[T: ClassTag]( parent: RDD[T], numPartitions: Int, hashFunc: Array[Byte], deserializer: String, packageNames: Array[Byte], broadcastVars: Array[Object]) extends BaseRRDD[T, (Int, Array[Byte])]( parent, numPartitions, hashFunc, deserializer, SerializationFormats.BYTE, packageNames, broadcastVars.map(x => x.asInstanceOf[Broadcast[Object]])) { override protected def readData(length: Int): (Int, Array[Byte]) = { length match { case length if length == 2 => val hashedKey = dataStream.readInt() val contentPairsLength = dataStream.readInt() val contentPairs = new Array[Byte](contentPairsLength) dataStream.readFully(contentPairs) (hashedKey, contentPairs) case _ => null } } lazy val asJavaPairRDD : JavaPairRDD[Int, Array[Byte]] = JavaPairRDD.fromRDD(this) } /** * An RDD that stores serialized R objects as Array[Byte]. */ private class RRDD[T: ClassTag]( parent: RDD[T], func: Array[Byte], deserializer: String, serializer: String, packageNames: Array[Byte], broadcastVars: Array[Object]) extends BaseRRDD[T, Array[Byte]]( parent, -1, func, deserializer, serializer, packageNames, broadcastVars.map(x => x.asInstanceOf[Broadcast[Object]])) { override protected def readData(length: Int): Array[Byte] = { length match { case length if length > 0 => val obj = new Array[Byte](length) dataStream.readFully(obj) obj case _ => null } } lazy val asJavaRDD : JavaRDD[Array[Byte]] = JavaRDD.fromRDD(this) } /** * An RDD that stores R objects as Array[String]. */ private class StringRRDD[T: ClassTag]( parent: RDD[T], func: Array[Byte], deserializer: String, packageNames: Array[Byte], broadcastVars: Array[Object]) extends BaseRRDD[T, String]( parent, -1, func, deserializer, SerializationFormats.STRING, packageNames, broadcastVars.map(x => x.asInstanceOf[Broadcast[Object]])) { override protected def readData(length: Int): String = { length match { case length if length > 0 => SerDe.readStringBytes(dataStream, length) case _ => null } } lazy val asJavaRDD : JavaRDD[String] = JavaRDD.fromRDD(this) } private object SpecialLengths { val TIMING_DATA = -1 } private[r] class BufferedStreamThread( in: InputStream, name: String, errBufferSize: Int) extends Thread(name) with Logging { val lines = new Array[String](errBufferSize) var lineIdx = 0 override def run() { for (line <- Source.fromInputStream(in).getLines) { synchronized { lines(lineIdx) = line lineIdx = (lineIdx + 1) % errBufferSize } logInfo(line) } } def getLines(): String = synchronized { (0 until errBufferSize).filter { x => lines((x + lineIdx) % errBufferSize) != null }.map { x => lines((x + lineIdx) % errBufferSize) }.mkString("\n") } } private[r] object RRDD { // Because forking processes from Java is expensive, we prefer to launch // a single R daemon (daemon.R) and tell it to fork new workers for our tasks. // This daemon currently only works on UNIX-based systems now, so we should // also fall back to launching workers (worker.R) directly. private[this] var errThread: BufferedStreamThread = _ private[this] var daemonChannel: DataOutputStream = _ def createSparkContext( master: String, appName: String, sparkHome: String, jars: Array[String], sparkEnvirMap: JMap[Object, Object], sparkExecutorEnvMap: JMap[Object, Object]): JavaSparkContext = { val sparkConf = new SparkConf().setAppName(appName) .setSparkHome(sparkHome) // Override `master` if we have a user-specified value if (master != "") { sparkConf.setMaster(master) } else { // If conf has no master set it to "local" to maintain // backwards compatibility sparkConf.setIfMissing("spark.master", "local") } for ((name, value) <- sparkEnvirMap.asScala) { sparkConf.set(name.toString, value.toString) } for ((name, value) <- sparkExecutorEnvMap.asScala) { sparkConf.setExecutorEnv(name.toString, value.toString) } val jsc = new JavaSparkContext(sparkConf) jars.foreach { jar => jsc.addJar(jar) } jsc } /** * Start a thread to print the process's stderr to ours */ private def startStdoutThread(proc: Process): BufferedStreamThread = { val BUFFER_SIZE = 100 val thread = new BufferedStreamThread(proc.getInputStream, "stdout reader for R", BUFFER_SIZE) thread.setDaemon(true) thread.start() thread } private def createRProcess(port: Int, script: String): BufferedStreamThread = { // "spark.sparkr.r.command" is deprecated and replaced by "spark.r.command", // but kept here for backward compatibility. val sparkConf = SparkEnv.get.conf var rCommand = sparkConf.get("spark.sparkr.r.command", "Rscript") rCommand = sparkConf.get("spark.r.command", rCommand) val rOptions = "--vanilla" val rLibDir = RUtils.sparkRPackagePath(isDriver = false) val rExecScript = rLibDir(0) + "/SparkR/worker/" + script val pb = new ProcessBuilder(Arrays.asList(rCommand, rOptions, rExecScript)) // Unset the R_TESTS environment variable for workers. // This is set by R CMD check as startup.Rs // (http://svn.r-project.org/R/trunk/src/library/tools/R/testing.R) // and confuses worker script which tries to load a non-existent file pb.environment().put("R_TESTS", "") pb.environment().put("SPARKR_RLIBDIR", rLibDir.mkString(",")) pb.environment().put("SPARKR_WORKER_PORT", port.toString) pb.redirectErrorStream(true) // redirect stderr into stdout val proc = pb.start() val errThread = startStdoutThread(proc) errThread } /** * ProcessBuilder used to launch worker R processes. */ def createRWorker(port: Int): BufferedStreamThread = { val useDaemon = SparkEnv.get.conf.getBoolean("spark.sparkr.use.daemon", true) if (!Utils.isWindows && useDaemon) { synchronized { if (daemonChannel == null) { // we expect one connections val serverSocket = new ServerSocket(0, 1, InetAddress.getByName("localhost")) val daemonPort = serverSocket.getLocalPort errThread = createRProcess(daemonPort, "daemon.R") // the socket used to send out the input of task serverSocket.setSoTimeout(10000) val sock = serverSocket.accept() daemonChannel = new DataOutputStream(new BufferedOutputStream(sock.getOutputStream)) serverSocket.close() } try { daemonChannel.writeInt(port) daemonChannel.flush() } catch { case e: IOException => // daemon process died daemonChannel.close() daemonChannel = null errThread = null // fail the current task, retry by scheduler throw e } errThread } } else { createRProcess(port, "worker.R") } } /** * Create an RRDD given a sequence of byte arrays. Used to create RRDD when `parallelize` is * called from R. */ def createRDDFromArray(jsc: JavaSparkContext, arr: Array[Array[Byte]]): JavaRDD[Array[Byte]] = { JavaRDD.fromRDD(jsc.sc.parallelize(arr, arr.length)) } }
chenc10/Spark-PAF
core/src/main/scala/org/apache/spark/api/r/RRDD.scala
Scala
apache-2.0
15,052
package io.udash.properties.single import com.avsystem.commons._ import io.udash.properties._ import io.udash.properties.seq.{SeqProperty, SeqPropertyFromSingleValue} import io.udash.utils.Registration object Property { /** Creates a blank `DirectProperty[T]`. */ def blank[T: PropertyCreator : Blank]: CastableProperty[T] = PropertyCreator[T].newProperty(null) /** Creates `DirectProperty[T]` with initial value. */ def apply[T: PropertyCreator](init: T): CastableProperty[T] = PropertyCreator[T].newProperty(init, null) } /** Property which can be modified. */ trait Property[A] extends ReadableProperty[A] { /** Changes current property value. Fires value change listeners. * @param t Should not be null! * @param force If true, the value change listeners will be fired even if value didn't change. */ def set(t: A, force: Boolean = false): Unit /** Changes current property value. Does not fire value change listeners. */ def setInitValue(t: A): Unit /** Fires value change listeners with current value. */ def touch(): Unit /** Removes all listeners from property. */ def clearListeners(): Unit /** * Creates Property[B] linked to `this`. Changes will be bidirectionally synchronized between `this` and new property. * * @param transformer Method transforming type A of existing Property to type B of new Property. * @param revert Method transforming type B of new Property to type A of existing Property. * @tparam B Type of new Property. * @return New Property[B], which will be synchronised with original Property[A]. */ def bitransform[B](transformer: A => B)(revert: B => A): Property[B] /** * Creates SeqProperty[B] linked to `this`. Changes will be synchronized with `this` in both directions. * * @param transformer Method transforming type A of existing Property to type Seq[B] of new Property. * @param revert Method transforming type Seq[B] to A. * @tparam B Type of elements in new SeqProperty. * @return New ReadableSeqProperty[B], which will be synchronised with original Property[A]. */ def bitransformToSeq[B: PropertyCreator](transformer: A => BSeq[B])(revert: BSeq[B] => A): SeqProperty[B, Property[B]] /** * Bidirectionally synchronizes Property[B] with `this`. The transformed value is synchronized from `this` * to Property[B] on initialization. * * @param p Property to be synchronized with `this`. * @param transformer Method transforming type A of existing Property to type B of new Property. * @param revert Method transforming type B of new Property to type A of existing Property. * @tparam B Type of new Property. * @return Bidirectional registration between existing and new property. */ def sync[B](p: Property[B])(transformer: A => B, revert: B => A): Registration } /** Property which can be modified. */ private[properties] trait AbstractProperty[A] extends AbstractReadableProperty[A] with Property[A] { override def clearListeners(): Unit = { listenersUpdate() listeners.clear() oneTimeListeners.clear() } override def bitransform[B](transformer: A => B)(revert: B => A): Property[B] = new TransformedProperty[A, B](this, transformer, revert) override def bitransformToSeq[B: PropertyCreator](transformer: A => BSeq[B])(revert: BSeq[B] => A): SeqProperty[B, Property[B]] = new SeqPropertyFromSingleValue(this, transformer, revert) override def sync[B](p: Property[B])(transformer: A => B, revert: B => A): Registration = { val transformerRegistration = this.streamTo(p)(transformer) val revertRegistration = p.streamTo(this, initUpdate = false)(revert) new Registration { override def cancel(): Unit = { transformerRegistration.cancel() revertRegistration.cancel() } override def isActive: Boolean = { transformerRegistration.isActive && revertRegistration.isActive } override def restart(): Unit = { transformerRegistration.restart() revertRegistration.restart() touch() } } } }
UdashFramework/udash-core
core/src/main/scala/io/udash/properties/single/Property.scala
Scala
apache-2.0
4,130
package com.github.lstephen.ootp.ai.regression import collection.JavaConversions._ import com.github.lstephen.ootp.ai.player.ratings.{ BattingRatings, PitchingRatings } import com.github.lstephen.ootp.ai.site.{SiteHolder, Version} import com.google.common.base.Optional import com.typesafe.scalalogging.StrictLogging import scala.math.ScalaNumericAnyConversions trait Regressable[-T] { def toInput(t: T): Input def features: Seq[String] } object Regressable { // Note that this is java.lang.Integer def toSomeDouble(i: Integer): Some[Double] = Some(i.doubleValue) val version = SiteHolder.get.getType def s(n: Number): Option[Number] = Some(n) def o[T](o: Optional[T]): Option[T] = if (o.isPresent) Some(o.get) else None implicit object RegressableBattingRatings extends Regressable[BattingRatings[_]] { def toInput(r: BattingRatings[_]): Input = version match { case Version.OOTP5 => Input(s(r.getContact), s(r.getGap), o(r.getTriples), s(r.getPower), s(r.getEye), o(r.getK), o(r.getRunningSpeed)) case Version.OOTP6 => Input(s(r.getContact), s(r.getGap), s(r.getPower), s(r.getEye), o(r.getK), o(r.getRunningSpeed)) } val features = version match { case Version.OOTP5 => Seq("Hits", "Doubles", "Triples", "Homeruns", "Walks", "Strikeouts", "Running Speed") case Version.OOTP6 => Seq("Contact", "Gap", "Power", "Eye", "Avoid K's", "Running Speed") } } implicit object RegressablePitchingRatings extends Regressable[PitchingRatings[_]] { def toInput(r: PitchingRatings[_]) = version match { case Version.OOTP5 => Input(o(r.getRuns), s(r.getHits), s(r.getGap), s(r.getMovement), s(r.getControl), s(r.getStuff), o(r.getGroundBallPct), s(r.getEndurance)) case Version.OOTP6 => Input(s(r.getStuff), s(r.getControl), s(r.getMovement), o(r.getGroundBallPct), s(r.getEndurance)) } val features = version match { case Version.OOTP5 => Seq("Runs", "Hits", "Doubles", "Homeruns", "Walks", "Strikeouts", "Groundball Pct.", "Endurance") case Version.OOTP6 => Seq("Stuff", "Control", "Movement", "Groundball Pct.", "Endurance") } } } class Regression(label: String) extends StrictLogging { import Regressable._ var data: DataSet = DataSet() var _regression: Option[RegressionPyModel.Predict] = None val model: RegressionPyModel = new RegressionPyModel def regression: RegressionPyModel.Predict = _regression match { case Some(r) => r case None => logger.info( s"Creating regression for $label, size: ${data.length}, averages: ${data.averages}") val p = model train data _regression = Some(p) p } def addData[T](x: T, y: Double, w: Int)( implicit regressable: Regressable[T]): Unit = { data = data :+ new DataPoint(regressable.toInput(x), y, w) _regression = None } def train: Unit = regression def getN: Long = data.length def predict[T: Regressable](xs: Seq[T]): Seq[Double] = predict(xs.map(implicitly[Regressable[T]].toInput(_))) def predict(xs: Seq[Input]): Seq[Double] = regression(xs) def mse = ((predict(data.map(_.input)), data.map(_.output)).zipped.map { case (p, o) => math.pow(o - p, 2) }.sum) / data.length def rsme = math.pow(mse, 0.5) def format: String = { f"$label%15s | ${rsme}%.3f" } def modelReport = regression.report(label) } object Regression { def predict[T: Regressable](rs: Map[String, Regression], xs: Seq[T]): Map[String, Seq[Double]] = predict(rs, xs.map(implicitly[Regressable[T]].toInput(_))) def predict(rs: Map[String, Regression], xs: Seq[Input]): Map[String, Seq[Double]] = { val models = rs.mapValues(_.regression) RegressionPyModel.predict(models, xs) } }
lstephen/ootp-ai
src/main/scala/com/github/lstephen/ootp/ai/regression/Regression.scala
Scala
apache-2.0
4,330
package models object constants { val links = Seq( ("GitHub", "https://github.com/FlightOfStairs", "github"), ("LinkedIn", "http://www.linkedin.com/in/alistairfsmith", "linkedin"), ("Twitter", "https://twitter.com/FOfStairs", "twitter") ) }
FlightOfStairs/flightofstairs.org
app/models/constants.scala
Scala
mit
259
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.datasources.parquet import java.io.File import scala.collection.JavaConverters._ import scala.reflect.ClassTag import scala.reflect.runtime.universe.TypeTag import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.Path import org.apache.parquet.format.converter.ParquetMetadataConverter import org.apache.parquet.hadoop.{Footer, ParquetFileReader, ParquetFileWriter} import org.apache.parquet.hadoop.metadata.{BlockMetaData, FileMetaData, ParquetMetadata} import org.apache.parquet.schema.MessageType import org.apache.spark.sql.DataFrame import org.apache.spark.sql.execution.datasources.FileBasedDataSourceTest import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types.StructType /** * A helper trait that provides convenient facilities for Parquet testing. * * NOTE: Considering classes `Tuple1` ... `Tuple22` all extend `Product`, it would be more * convenient to use tuples rather than special case classes when writing test cases/suites. * Especially, `Tuple1.apply` can be used to easily wrap a single type/value. */ private[sql] trait ParquetTest extends FileBasedDataSourceTest { override protected val dataSourceName: String = "parquet" override protected val vectorizedReaderEnabledKey: String = SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key /** * Reads the parquet file at `path` */ protected def readParquetFile(path: String, testVectorized: Boolean = true) (f: DataFrame => Unit) = readFile(path, testVectorized)(f) /** * Writes `data` to a Parquet file, which is then passed to `f` and will be deleted after `f` * returns. */ protected def withParquetFile[T <: Product: ClassTag: TypeTag] (data: Seq[T]) (f: String => Unit): Unit = withDataSourceFile(data)(f) /** * Writes `data` to a Parquet file and reads it back as a [[DataFrame]], * which is then passed to `f`. The Parquet file will be deleted after `f` returns. */ protected def withParquetDataFrame[T <: Product: ClassTag: TypeTag] (data: Seq[T], testVectorized: Boolean = true) (f: DataFrame => Unit): Unit = withDataSourceDataFrame(data, testVectorized)(f) /** * Writes `data` to a Parquet file, reads it back as a [[DataFrame]] and registers it as a * temporary table named `tableName`, then call `f`. The temporary table together with the * Parquet file will be dropped/deleted after `f` returns. */ protected def withParquetTable[T <: Product: ClassTag: TypeTag] (data: Seq[T], tableName: String, testVectorized: Boolean = true) (f: => Unit): Unit = withDataSourceTable(data, tableName, testVectorized)(f) protected def makeParquetFile[T <: Product: ClassTag: TypeTag]( data: Seq[T], path: File): Unit = makeDataSourceFile(data, path) protected def makeParquetFile[T <: Product: ClassTag: TypeTag]( df: DataFrame, path: File): Unit = makeDataSourceFile(df, path) protected def makePartitionDir( basePath: File, defaultPartitionName: String, partitionCols: (String, Any)*): File = { val partNames = partitionCols.map { case (k, v) => val valueString = if (v == null || v == "") defaultPartitionName else v.toString s"$k=$valueString" } val partDir = partNames.foldLeft(basePath) { (parent, child) => new File(parent, child) } assert(partDir.mkdirs(), s"Couldn't create directory $partDir") partDir } protected def writeMetadata( schema: StructType, path: Path, configuration: Configuration): Unit = { val parquetSchema = new SparkToParquetSchemaConverter().convert(schema) val extraMetadata = Map(ParquetReadSupport.SPARK_METADATA_KEY -> schema.json).asJava val createdBy = s"Apache Spark ${org.apache.spark.SPARK_VERSION}" val fileMetadata = new FileMetaData(parquetSchema, extraMetadata, createdBy) val parquetMetadata = new ParquetMetadata(fileMetadata, Seq.empty[BlockMetaData].asJava) val footer = new Footer(path, parquetMetadata) ParquetFileWriter.writeMetadataFile(configuration, path, Seq(footer).asJava) } /** * This is an overloaded version of `writeMetadata` above to allow writing customized * Parquet schema. */ protected def writeMetadata( parquetSchema: MessageType, path: Path, configuration: Configuration, extraMetadata: Map[String, String] = Map.empty[String, String]): Unit = { val extraMetadataAsJava = extraMetadata.asJava val createdBy = s"Apache Spark ${org.apache.spark.SPARK_VERSION}" val fileMetadata = new FileMetaData(parquetSchema, extraMetadataAsJava, createdBy) val parquetMetadata = new ParquetMetadata(fileMetadata, Seq.empty[BlockMetaData].asJava) val footer = new Footer(path, parquetMetadata) ParquetFileWriter.writeMetadataFile(configuration, path, Seq(footer).asJava) } protected def readAllFootersWithoutSummaryFiles( path: Path, configuration: Configuration): Seq[Footer] = { val fs = path.getFileSystem(configuration) ParquetFileReader.readAllFootersInParallel(configuration, fs.getFileStatus(path)).asScala.toSeq } protected def readFooter(path: Path, configuration: Configuration): ParquetMetadata = { ParquetFileReader.readFooter( configuration, new Path(path, ParquetFileWriter.PARQUET_METADATA_FILE), ParquetMetadataConverter.NO_FILTER) } protected def testStandardAndLegacyModes(testName: String)(f: => Unit): Unit = { test(s"Standard mode - $testName") { withSQLConf(SQLConf.PARQUET_WRITE_LEGACY_FORMAT.key -> "false") { f } } test(s"Legacy mode - $testName") { withSQLConf(SQLConf.PARQUET_WRITE_LEGACY_FORMAT.key -> "true") { f } } } protected def readResourceParquetFile(name: String): DataFrame = { val url = Thread.currentThread().getContextClassLoader.getResource(name) spark.read.parquet(url.toString) } }
pgandhi999/spark
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetTest.scala
Scala
apache-2.0
6,703
package com.tierline.scala.activemodel import com.tierline.scala.activemodel.singletenant.domain.Cart import org.scalatest.FunSuite import com.tierline.scala.activemodel.util.Companion class UtilsTest extends FunSuite { test("Get Companion object from class manifest") { assert(Companion.of[Cart].isDefined) } }
tierline/scala-activemodel
src/test/scala/com/tierline/scala/activemodel/UtilsTest.scala
Scala
mit
323
/* * MIT License * * Copyright (c) 2016 Ramjet Anvil * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ package com.ramjetanvil.padrone import java.io.File import com.ramjetanvil.padrone.http.client.{itch, oculus, steam} import com.typesafe.config.{Config, ConfigException, ConfigFactory} import scala.util.{Failure, Success, Try} object AppConfig { val Global = { val userConfig = ConfigFactory.parseFile(new File("application.conf")) val defaultConfig = ConfigFactory.defaultApplication().resolve() userConfig.withFallback(defaultConfig) } val MasterServer = Global.getConfig("com.ramjetanvil.padrone") val Server = MasterServer.getConfig("server") val Steam = optionalConfig("steam").map(steam.config) val Itch = optionalConfig("itch-io").map(itch.config) val Oculus = optionalConfig("oculus").map(oculus.config) private def optionalConfig(key: String): Option[Config] = { Try(MasterServer.getConfig(key)) match { case Success(config) => Some(config) case Failure(ex: ConfigException.Missing) => None case Failure(ex) => throw ex } } }
RamjetAnvil/padrone
server/src/main/scala/com/ramjetanvil/padrone/AppConfig.scala
Scala
mit
2,135
package me.rexim.issuestant.github.model case class Comment ( id: Int, body: String )
tsoding/Issuestant
src/main/scala/me/rexim/issuestant/github/model/Comment.scala
Scala
mit
91
/** * Copyright 2011-2017 GatlingCorp (http://gatling.io) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.gatling.recorder.ui.swing.frame import java.awt.Color import scala.collection.mutable import scala.swing._ import scala.swing.Swing.MatteBorder import scala.util.Try import io.gatling.commons.util.StringHelper.RichString private[swing] object ValidationHelper { case class Validator( condition: String => Boolean, successCallback: Component => Unit = setStandardBorder, failureCallback: Component => Unit = setErrorBorder, alwaysValid: Boolean = false ) // Those are lazy vals to avoid unneccessary component creation when they're not needed (e.g. tests) private lazy val standardBorder = new TextField().border private lazy val errorBorder = MatteBorder(2, 2, 2, 2, Color.red) /* Default validators */ private val portRange = 0 to 65536 def isValidPort(s: String) = Try(s.toInt).toOption.exists(portRange.contains) def isNonEmpty(s: String) = s.trimToOption.isDefined private val validPackageNameRegex = """^[a-z_\\$][\\w\\$]*(?:\\.[a-z_\\$][\\w\\$]*)*$""" def isValidPackageName(s: String) = s.isEmpty || s.matches(validPackageNameRegex) def isValidSimpleClassName(s: String) = isNonEmpty(s) && !s.contains('_') && Character.isJavaIdentifierStart(s.charAt(0)) && !s.substring(1, s.length).exists(!Character.isJavaIdentifierPart(_)) /* Default callbacks */ def setStandardBorder(c: Component): Unit = { c.border = standardBorder } def setErrorBorder(c: Component): Unit = { c.border = errorBorder } private val validators = mutable.Map.empty[TextField, Validator] private val status = mutable.Map.empty[TextField, Boolean] def registerValidator(textField: TextField, validator: Validator): Unit = { validators += (textField -> validator) } def updateValidationStatus(field: TextField) = validators.get(field) match { case Some(validator) => val isValid = validator.condition(field.text) val callback = if (isValid) validator.successCallback else validator.failureCallback callback(field) status += (field -> (validator.alwaysValid || isValid)) case None => throw new IllegalStateException(s"No validator registered for component : $field") } def allValid = { validators.keys.map(updateValidationStatus) validationStatus } def validationStatus = status.values.forall(identity) }
MykolaB/gatling
gatling-recorder/src/main/scala/io/gatling/recorder/ui/swing/frame/ValidationHelper.scala
Scala
apache-2.0
2,982
/** * License * ======= * * The MIT License (MIT) * * * Copyright (c) 2017 Antoine DOERAENE @sherpal * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ package sharednodejsapis import scala.scalajs.js import scala.scalajs.js.annotation.JSName import scala.scalajs.js.typedarray.ArrayBuffer @js.native @JSName("Buffer") abstract class Buffer extends js.typedarray.Uint8Array(0) { def toString(encoding: String = "utf8", start: Int = 0, end: Int = this.length): String = js.native } @js.native object Buffer extends js.Object { def from(s: String, encoding: String = "utf8"): Buffer = js.native def from(msg: ArrayBuffer): Buffer = js.native }
sherpal/holomorphic-maps
src/main/scala/sharednodejsapis/Buffer.scala
Scala
mit
1,732
package com.flowtomation.akkared.nodes.core import java.time.Instant import akka.actor.{Actor, ActorLogging, Props} import akka.http.scaladsl.model.{ContentTypes, HttpEntity, StatusCodes} import akka.http.scaladsl.server.Directives import com.flowtomation.akkared.{NodeContext, NodeMessage, NodeType, Runtime} import com.flowtomation.akkared.model.{FlowNode, ItemId} import com.flowtomation.akkared.nodes.core.DebugActor.SetActive import play.api.libs.json.{JsObject, Json, Reads} object Debug extends NodeType with Directives{ val name = "debug" def instance(ctx: NodeContext): Props = { Props(new DebugActor(ctx)) } override def routes(runtime: Runtime) = pathPrefix("debug") { pathPrefix(Segment) { nodeId => // if node not found return 404 with plain text body "Not Found" path(Segment) { action => post { // empty body // TODO can we get this more typesafe? action match { case "disable" => runtime.send(ItemId(nodeId), SetActive(false)) complete(StatusCodes.Created, HttpEntity(ContentTypes.`text/plain(UTF-8)`, "Created")) case "enable" => runtime.send(ItemId(nodeId), SetActive(true)) complete(HttpEntity(ContentTypes.`text/plain(UTF-8)`, "OK")) case other => complete(StatusCodes.BadRequest, HttpEntity(ContentTypes.`text/plain(UTF-8)`, "Bad Request")) } } } } } } private object DebugConfig{ implicit val reads: Reads[DebugConfig] = Json.reads[DebugConfig] } private case class DebugConfig( active: Boolean, console: String, // "true" or "false" <- also print to console? complete: String // complete message body or only msg property ) object DebugActor{ case class SetActive(active: Boolean) } private class DebugActor(ctx: NodeContext) extends Actor with ActorLogging { private val config = Json.fromJson[DebugConfig](JsObject(ctx.node.otherProperties)).fold( e => throw new RuntimeException(e.toString) , identity ) private var active = config.active //ctx.node.otherProperties.foreach(println) private def sendDebugComplete(msg: NodeMessage){ // { id:node.id, name:node.name, topic:msg.topic, msg:msg, _path:msg._path} debug(msg) } private def sendDebug(msg: NodeMessage) { //{id:node.id, z:node.z, name:node.name, topic:msg.topic, property:property, msg:output, _path:msg._path})} debug(msg) } override def receive: Receive = { case SetActive(a) => this.active = a log.info(s"active = $active") case m:NodeMessage => if (config.complete == "true"){ if(config.console == "true"){ log.info(m.toString) } if(active) { sendDebugComplete(m) } }else{ // var property = "payload"; // var output = msg[property]; // if (this.complete !== "false" && typeof this.complete !== "undefined") { // property = this.complete; // try { // output = RED.util.getMessageProperty(msg,this.complete); // } catch(err) { // output = undefined; // } // } // if (this.console === "true") { // if (typeof output === "string") { // node.log((output.indexOf("\\n") !== -1 ? "\\n" : "") + output); // } else if (typeof output === "object") { // node.log("\\n"+util.inspect(output, {colors:useColors, depth:10})); // } else { // node.log(util.inspect(output, {colors:useColors})); // } // } if(config.console == "true"){ log.info(m.toString) } if(active) { sendDebug (m) } } case otherMessage => log.warning(otherMessage.toString) } private def debug(msg: NodeMessage){ val toPublish: NodeMessage = msg.msg match { case number:BigDecimal => NodeMessage(Json.obj( "format" -> "number", "msg" -> number.toString() )) // msg.format = "number" // msg.msg = msg.msg.toString(); case other => msg } // // don't put blank errors in sidebar (but do add to logs) // //if ((msg.msg === "") && (msg.hasOwnProperty("level")) && (msg.level === 20)) { return; } // if (msg.msg instanceof Error) { // msg.format = "error"; // var errorMsg = {}; // if (msg.msg.name) { // errorMsg.name = msg.msg.name; // } // if (msg.msg.hasOwnProperty('message')) { // errorMsg.message = msg.msg.message; // } else { // errorMsg.message = msg.msg.toString(); // } // msg.msg = JSON.stringify(errorMsg); // } else if (msg.msg instanceof Buffer) { // msg.format = "buffer["+msg.msg.length+"]"; // msg.msg = msg.msg.toString('hex'); // if (msg.msg.length > debuglength) { // msg.msg = msg.msg.substring(0,debuglength); // } // } else if (msg.msg && typeof msg.msg === 'object') { // try { // msg.format = msg.msg.constructor.name || "Object"; // // Handle special case of msg.req/res objects from HTTP In node // if (msg.format === "IncomingMessage" || msg.format === "ServerResponse") { // msg.format = "Object"; // } // } catch(err) { // msg.format = "Object"; // } // if (/error/i.test(msg.format)) { // msg.msg = JSON.stringify({ // name: msg.msg.name, // message: msg.msg.message // }); // } else { // var isArray = util.isArray(msg.msg); // if (isArray) { // msg.format = "array["+msg.msg.length+"]"; // if (msg.msg.length > debuglength) { // // msg.msg = msg.msg.slice(0,debuglength); // msg.msg = { // __encoded__: true, // type: "array", // data: msg.msg.slice(0,debuglength), // length: msg.msg.length // } // } // } // if (isArray || (msg.format === "Object")) { // msg.msg = safeJSONStringify(msg.msg, function(key, value) { // if (key === '_req' || key === '_res') { // value = "[internal]" // } else if (value instanceof Error) { // value = value.toString() // } else if (util.isArray(value) && value.length > debuglength) { // value = { // __encoded__: true, // type: "array", // data: value.slice(0,debuglength), // length: value.length // } // } else if (typeof value === 'string') { // if (value.length > debuglength) { // value = value.substring(0,debuglength)+"..."; // } // } else if (value && value.constructor) { // if (value.type === "Buffer") { // value.__encoded__ = true; // value.length = value.data.length; // if (value.length > debuglength) { // value.data = value.data.slice(0,debuglength); // } // } else if (value.constructor.name === "ServerResponse") { // value = "[internal]" // } else if (value.constructor.name === "Socket") { // value = "[internal]" // } // } // return value; // }," "); // } else { // try { msg.msg = msg.msg.toString(); } // catch(e) { msg.msg = "[Type not printable]"; } // } // } // } else if (typeof msg.msg === "boolean") { // msg.format = "boolean"; // msg.msg = msg.msg.toString(); // } else if (typeof msg.msg === "number") { // msg.format = "number"; // msg.msg = msg.msg.toString(); // } else if (msg.msg === 0) { // msg.format = "number"; // msg.msg = "0"; // } else if (msg.msg === null || typeof msg.msg === "undefined") { // msg.format = (msg.msg === null)?"null":"undefined"; // msg.msg = "(undefined)"; // } else { // msg.format = "string["+msg.msg.length+"]"; // if (msg.msg.length > debuglength) { // msg.msg = msg.msg.substring(0,debuglength)+"..."; // } // } // // if (msg.msg.length > debuglength) { // // msg.msg = msg.msg.substr(0,debuglength) +" ...."; // // } ctx.publish("debug", toPublish) } }
francisdb/akka-red
src/main/scala/com/flowtomation/akkared/nodes/core/Debug.scala
Scala
apache-2.0
8,385
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.analysis import org.apache.spark.SparkFunSuite import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.dsl.expressions._ import org.apache.spark.sql.catalyst.dsl.plans._ import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.aggregate._ import org.apache.spark.sql.catalyst.plans.logical.LocalRelation import org.apache.spark.sql.types._ class ExpressionTypeCheckingSuite extends SparkFunSuite { val testRelation = LocalRelation( 'intField.int, 'stringField.string, 'booleanField.boolean, 'decimalField.decimal(8, 0), 'arrayField.array(StringType), 'mapField.map(StringType, LongType)) def assertError(expr: Expression, errorMessage: String): Unit = { val e = intercept[AnalysisException] { assertSuccess(expr) } assert(e.getMessage.contains( s"cannot resolve '${expr.sql}' due to data type mismatch:")) assert(e.getMessage.contains(errorMessage)) } def assertSuccess(expr: Expression): Unit = { val analyzed = testRelation.select(expr.as("c")).analyze SimpleAnalyzer.checkAnalysis(analyzed) } def assertErrorForDifferingTypes(expr: Expression): Unit = { assertError(expr, s"differing types in '${expr.sql}'") } test("check types for unary arithmetic") { assertError(BitwiseNot('stringField), "requires integral type") } test("check types for binary arithmetic") { // We will cast String to Double for binary arithmetic assertSuccess(Add('intField, 'stringField)) assertSuccess(Subtract('intField, 'stringField)) assertSuccess(Multiply('intField, 'stringField)) assertSuccess(Divide('intField, 'stringField)) assertSuccess(Remainder('intField, 'stringField)) // checkAnalysis(BitwiseAnd('intField, 'stringField)) assertErrorForDifferingTypes(Add('intField, 'booleanField)) assertErrorForDifferingTypes(Subtract('intField, 'booleanField)) assertErrorForDifferingTypes(Multiply('intField, 'booleanField)) assertErrorForDifferingTypes(Divide('intField, 'booleanField)) assertErrorForDifferingTypes(Remainder('intField, 'booleanField)) assertErrorForDifferingTypes(BitwiseAnd('intField, 'booleanField)) assertErrorForDifferingTypes(BitwiseOr('intField, 'booleanField)) assertErrorForDifferingTypes(BitwiseXor('intField, 'booleanField)) assertError(Add('booleanField, 'booleanField), "requires (numeric or calendarinterval) type") assertError(Subtract('booleanField, 'booleanField), "requires (numeric or calendarinterval) type") assertError(Multiply('booleanField, 'booleanField), "requires numeric type") assertError(Divide('booleanField, 'booleanField), "requires (double or decimal) type") assertError(Remainder('booleanField, 'booleanField), "requires numeric type") assertError(BitwiseAnd('booleanField, 'booleanField), "requires integral type") assertError(BitwiseOr('booleanField, 'booleanField), "requires integral type") assertError(BitwiseXor('booleanField, 'booleanField), "requires integral type") } test("check types for predicates") { // We will cast String to Double for binary comparison assertSuccess(EqualTo('intField, 'stringField)) assertSuccess(EqualNullSafe('intField, 'stringField)) assertSuccess(LessThan('intField, 'stringField)) assertSuccess(LessThanOrEqual('intField, 'stringField)) assertSuccess(GreaterThan('intField, 'stringField)) assertSuccess(GreaterThanOrEqual('intField, 'stringField)) // We will transform EqualTo with numeric and boolean types to CaseKeyWhen assertSuccess(EqualTo('intField, 'booleanField)) assertSuccess(EqualNullSafe('intField, 'booleanField)) assertErrorForDifferingTypes(EqualTo('intField, 'mapField)) assertErrorForDifferingTypes(EqualNullSafe('intField, 'mapField)) assertErrorForDifferingTypes(LessThan('intField, 'booleanField)) assertErrorForDifferingTypes(LessThanOrEqual('intField, 'booleanField)) assertErrorForDifferingTypes(GreaterThan('intField, 'booleanField)) assertErrorForDifferingTypes(GreaterThanOrEqual('intField, 'booleanField)) assertError(EqualTo('mapField, 'mapField), "EqualTo does not support ordering on type map") assertError(EqualNullSafe('mapField, 'mapField), "EqualNullSafe does not support ordering on type map") assertError(LessThan('mapField, 'mapField), "LessThan does not support ordering on type map") assertError(LessThanOrEqual('mapField, 'mapField), "LessThanOrEqual does not support ordering on type map") assertError(GreaterThan('mapField, 'mapField), "GreaterThan does not support ordering on type map") assertError(GreaterThanOrEqual('mapField, 'mapField), "GreaterThanOrEqual does not support ordering on type map") assertError(If('intField, 'stringField, 'stringField), "type of predicate expression in If should be boolean") assertErrorForDifferingTypes(If('booleanField, 'intField, 'booleanField)) assertError( CaseWhen(Seq(('booleanField.attr, 'intField.attr), ('booleanField.attr, 'mapField.attr))), "THEN and ELSE expressions should all be same type or coercible to a common type") assertError( CaseKeyWhen('intField, Seq('intField, 'stringField, 'intField, 'mapField)), "THEN and ELSE expressions should all be same type or coercible to a common type") assertError( CaseWhen(Seq(('booleanField.attr, 'intField.attr), ('intField.attr, 'intField.attr))), "WHEN expressions in CaseWhen should all be boolean type") } test("check types for aggregates") { // We use AggregateFunction directly at here because the error will be thrown from it // instead of from AggregateExpression, which is the wrapper of an AggregateFunction. // We will cast String to Double for sum and average assertSuccess(Sum('stringField)) assertSuccess(Average('stringField)) assertSuccess(Min('arrayField)) assertSuccess(new EveryAgg('booleanField)) assertSuccess(new AnyAgg('booleanField)) assertSuccess(new SomeAgg('booleanField)) assertError(Min('mapField), "min does not support ordering on type") assertError(Max('mapField), "max does not support ordering on type") assertError(Sum('booleanField), "function sum requires numeric type") assertError(Average('booleanField), "function average requires numeric type") } test("check types for others") { assertError(CreateArray(Seq('intField, 'booleanField)), "input to function array should all be the same type") assertError(Coalesce(Seq('intField, 'booleanField)), "input to function coalesce should all be the same type") assertError(Coalesce(Nil), "function coalesce requires at least one argument") assertError(new Murmur3Hash(Nil), "function hash requires at least one argument") assertError(new XxHash64(Nil), "function xxhash64 requires at least one argument") assertError(Explode('intField), "input to function explode should be array or map type") assertError(PosExplode('intField), "input to function explode should be array or map type") } test("check types for CreateNamedStruct") { assertError( CreateNamedStruct(Seq("a", "b", 2.0)), "even number of arguments") assertError( CreateNamedStruct(Seq(1, "a", "b", 2.0)), "Only foldable string expressions are allowed to appear at odd position") assertError( CreateNamedStruct(Seq('a.string.at(0), "a", "b", 2.0)), "Only foldable string expressions are allowed to appear at odd position") assertError( CreateNamedStruct(Seq(Literal.create(null, StringType), "a")), "Field name should not be null") } test("check types for CreateMap") { assertError(CreateMap(Seq("a", "b", 2.0)), "even number of arguments") assertError( CreateMap(Seq('intField, 'stringField, 'booleanField, 'stringField)), "keys of function map should all be the same type") assertError( CreateMap(Seq('stringField, 'intField, 'stringField, 'booleanField)), "values of function map should all be the same type") } test("check types for ROUND/BROUND") { assertSuccess(Round(Literal(null), Literal(null))) assertSuccess(Round('intField, Literal(1))) assertError(Round('intField, 'intField), "Only foldable Expression is allowed") assertError(Round('intField, 'booleanField), "requires int type") assertError(Round('intField, 'mapField), "requires int type") assertError(Round('booleanField, 'intField), "requires numeric type") assertSuccess(BRound(Literal(null), Literal(null))) assertSuccess(BRound('intField, Literal(1))) assertError(BRound('intField, 'intField), "Only foldable Expression is allowed") assertError(BRound('intField, 'booleanField), "requires int type") assertError(BRound('intField, 'mapField), "requires int type") assertError(BRound('booleanField, 'intField), "requires numeric type") } test("check types for Greatest/Least") { for (operator <- Seq[(Seq[Expression] => Expression)](Greatest, Least)) { assertError(operator(Seq('booleanField)), "requires at least two arguments") assertError(operator(Seq('intField, 'stringField)), "should all have the same type") assertError(operator(Seq('mapField, 'mapField)), "does not support ordering") } } }
aosagie/spark
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/ExpressionTypeCheckingSuite.scala
Scala
apache-2.0
10,236
package processes package object freeMonads { type Id[A] = A }
EECOLOR/scala-clean-code-patterns
src/main/scala/processes/freeMonads/package.scala
Scala
mit
65