code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
/* * Copyright 2021 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package models.businessactivities import jto.validation.{Invalid, Path, Valid, ValidationError} import models.Country import org.scalatestplus.play.PlaySpec import play.api.libs.json.{JsSuccess, Json} class AccountantsAddressSpec extends PlaySpec { val testAddressLine1 = "Default Line 1" val testAddressLine2 = "Default Line 2" val testAddressLine3 = Some("Default Line 3") val testAddressLine4 = Some("Default Line 4") val testPostcode = "AA1 1AA" val testCountry = Country("United Kingdom", "GB") val NewAddressLine1 = "New Line 1" val NewAddressLine2 = "New Line 2" val NewAddressLine3 = Some("New Line 3") val NewAddressLine4 = Some("New Line 4") val NewPostcode = "AA1 1AA" val testUKAddress = UkAccountantsAddress(testAddressLine1, testAddressLine2, testAddressLine3, testAddressLine4, testPostcode) val testNonUKAddress = NonUkAccountantsAddress(testAddressLine1, testAddressLine2, testAddressLine3, testAddressLine4, testCountry) val testUKJson = Json.obj( "accountantsAddressLine1" -> testAddressLine1, "accountantsAddressLine2" -> testAddressLine2, "accountantsAddressLine3" -> testAddressLine3, "accountantsAddressLine4" -> testAddressLine4, "accountantsAddressPostCode" -> testPostcode ) val testNonUKJson = Json.obj( "accountantsAddressLine1" -> testAddressLine1, "accountantsAddressLine2" -> testAddressLine2, "accountantsAddressLine3" -> testAddressLine3, "accountantsAddressLine4" -> testAddressLine4, "accountantsAddressCountry" -> testCountry.code ) val testUKModel = Map( "isUK" -> Seq("true"), "addressLine1" -> Seq(testAddressLine1), "addressLine2" -> Seq(testAddressLine2), "addressLine3" -> Seq("Default Line 3"), "addressLine4" -> Seq("Default Line 4"), "postCode" -> Seq(testPostcode) ) val testNonUKModel = Map( "isUK" -> Seq("false"), "addressLineNonUK1" -> Seq(testAddressLine1), "addressLineNonUK2" -> Seq(testAddressLine2), "addressLineNonUK3" -> Seq("Default Line 3"), "addressLineNonUK4" -> Seq("Default Line 4"), "country" -> Seq(testCountry.code) ) "AccountantsAddress" must { "validate toLines for UK address" in { testUKAddress.toLines must be (Seq("Default Line 1", "Default Line 2", "Default Line 3", "Default Line 4", "AA1 1AA")) } "validate toLines for Non UK address" in { testNonUKAddress.toLines must be (Seq("Default Line 1", "Default Line 2", "Default Line 3", "Default Line 4", "United Kingdom")) } "Form validation" must { "pass validation" when { "given valid Uk address data" in { AccountantsAddress.ukFormRule.validate(testUKModel) must be(Valid(testUKAddress)) } "given valid Non-Uk address data" in { AccountantsAddress.nonUkFormRule.validate(testNonUKModel) must be (Valid(testNonUKAddress)) } } "fail validation" when { "country is given invalid data" in { val model = testNonUKModel ++ Map("country" -> Seq("HGHHHH")) AccountantsAddress.nonUkFormRule.validate(model) must be( Invalid(Seq( (Path \\ "country") -> Seq( ValidationError("error.invalid.country") ) ))) } } } "JSON validation" must { "Round trip a UK Address correctly" in { AccountantsAddress.jsonReads.reads( AccountantsAddress.jsonWrites.writes(testUKAddress) ) must be (JsSuccess(testUKAddress)) } "Round trip a Non UK Address correctly" in { AccountantsAddress.jsonReads.reads( AccountantsAddress.jsonWrites.writes(testNonUKAddress) ) must be (JsSuccess(testNonUKAddress)) } "Serialise UK address as expected" in { Json.toJson(testUKAddress.asInstanceOf[AccountantsAddress]) must be(testUKJson) } "Serialise non-UK address as expected" in { Json.toJson(testNonUKAddress.asInstanceOf[AccountantsAddress]) must be(testNonUKJson) } "Deserialise UK address as expected" in { testUKJson.as[AccountantsAddress] must be(testUKAddress) } "Deserialise non-UK address as expected" in { testNonUKJson.as[AccountantsAddress] must be(testNonUKAddress) } } } }
hmrc/amls-frontend
test/models/businessactivities/AccountantsAddressSpec.scala
Scala
apache-2.0
5,008
package hello import org.springframework.boot.SpringApplication import org.springframework.boot.autoconfigure.SpringBootApplication object Application extends App { SpringApplication.run(classOf[Application], args: _*) } @SpringBootApplication class Application
shekhargulati/52-technologies-in-2016
37-spring-boot-scala/gs-rest-service/src/main/scala/hello/Application.scala
Scala
mit
269
package org.jetbrains.sbt.editor.documentationProvider import com.intellij.lang.documentation.{AbstractDocumentationProvider, DocumentationMarkup} import com.intellij.psi.PsiElement import com.intellij.psi.util.PsiTreeUtil import org.jetbrains.annotations.{Nls, NonNls} import org.jetbrains.plugins.scala.editor.documentationProvider.ScalaDocumentationProvider import org.jetbrains.plugins.scala.extensions.OptionExt import org.jetbrains.plugins.scala.lang.psi.api.base.ScLiteral import org.jetbrains.plugins.scala.lang.psi.api.base.literals.ScStringLiteral import org.jetbrains.plugins.scala.lang.psi.api.expr._ import org.jetbrains.plugins.scala.lang.psi.api.statements.ScPatternDefinition import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScNamedElement import org.jetbrains.sbt.editor.documentationProvider.SbtDocumentationProvider._ import org.jetbrains.sbt.language.SbtFileType import scala.jdk.CollectionConverters._ /** * Generates documentation from sbt key description.<br> * There are three types of key: SettingKey, TaskKey, InputKey<br> * [[https://www.scala-sbt.org/1.x/docs/Basic-Def.html#Keys]]<br><br> * * For sbt '''0.13.18''' see sbt.Structure.scala: {{{ * object SettingKey { * def apply[T: Manifest](label: String, description: String, ...): SettingKey[T] = ... * def apply[T](akey: AttributeKey[T]): SettingKey[T] = ... * } * }}} * example: {{{ * val libraryDependencies = SettingKey[Seq[ModuleID]]("library-dependencies", "Declares managed dependencies.", APlusSetting) * }}} * * For sbt '''1.2.8''' see sbt.BuildSyntax.scala: {{{ * def settingKey[T](description: String): SettingKey[T] = macro std.KeyMacro.settingKeyImpl[T] * }}} * example: {{{ * val libraryDependencies = settingKey[Seq[ModuleID]]("Declares managed dependencies.").withRank(APlusSetting) * }}} */ class SbtDocumentationProvider extends AbstractDocumentationProvider { private val scalaDocProvider = new ScalaDocumentationProvider override def getQuickNavigateInfo(element: PsiElement, originalElement: PsiElement): String = if (!isInSbtFile(originalElement)) null else generateSbtDoc(element, originalElement, scalaDocProvider.getQuickNavigateInfo).orNull override def generateDoc(element: PsiElement, originalElement: PsiElement): String = if (!isInSbtFile(originalElement)) null else generateSbtDoc(element, originalElement, scalaDocProvider.generateDoc).orNull private def generateSbtDoc(element: PsiElement, originalElement: PsiElement, generateScalaDoc: (PsiElement, PsiElement) => String): Option[String] = for { sbtKey <- Option(element).filterByType[ScNamedElement] sbtDoc <- generateSBtDocFromSbtKey(sbtKey) scalaDoc <- Option(generateScalaDoc(element, originalElement)) } yield appendToScalaDoc(scalaDoc, sbtDoc) private def isInSbtFile(element: PsiElement): Boolean = Option(element).safeMap(_.getContainingFile).exists(_.getFileType == SbtFileType) private def generateSBtDocFromSbtKey(key: ScNamedElement): Option[String] = for { keyDefinition <- keyDefinition(key) applyMethodCall <- keyApplyMethodCall(keyDefinition) args = applyMethodCall.argumentExpressions descriptionElement <- descriptionArgument(args) description <- descriptionText(descriptionElement) if description.nonEmpty } yield wrapIntoHtml(description) private def keyDefinition(key: ScNamedElement): Option[ScPatternDefinition] = Option(key.getNavigationElement) .safeMap(_.getParent) .safeMap(_.getParent) .collect { case s: ScPatternDefinition => s } private def keyApplyMethodCall(keyDefinition: ScPatternDefinition): Option[ScMethodCall] = { // last found method child will be the left-most method call in chain val methodCalls: Iterable[ScMethodCall] = PsiTreeUtil.findChildrenOfType(keyDefinition, classOf[ScMethodCall]).asScala methodCalls.lastOption.filter(isSbtKeyApplyMethodCall) } private def isSbtKeyApplyMethodCall(call: ScMethodCall): Boolean = Option(call.getInvokedExpr) .filterByType[ScGenericCall] .map(_.referencedExpr.getText.toLowerCase) .exists(SbtKeyTypes.contains) private def descriptionArgument(args: Iterable[ScExpression]): Option[ScExpression] = Some(args.toList).collect { case (_: ScLiteral) :: description :: _ => description //e.g. SettingKey[Unit]("some-key", "Here goes description for some-key", ...) case (ref: ScReferenceExpression) :: _ => ref // e.g. SettingKey(BasicKeys.watch) case description :: Nil => description //e.g. settingKey[Seq[ModuleID]]("Some description").withRank(BSetting) } @NonNls private def descriptionText(element: ScExpression): Option[String] = Some(element).collect { case ScInfixExpr(left, _, right) => Seq(left, right).map(descriptionText).mkString case ScStringLiteral(string) => string case ref: ScReferenceExpression => s"<i>${ref.getText}</i>" } private def wrapIntoHtml(@Nls description: String): String = DocumentationMarkup.CONTENT_START + description + DocumentationMarkup.CONTENT_END private def appendToScalaDoc(@Nls scalaDoc: String, @Nls sbtDoc: String): String = { @NonNls val closingTags = "</body></html>" val withoutClosingTags = scalaDoc.replace(closingTags, "") s"$withoutClosingTags$sbtDoc$closingTags" } } private object SbtDocumentationProvider { @NonNls private val SbtKeyTypes: Set[String] = Set("SettingKey", "TaskKey", "InputKey", "AttributeKey").map(_.toLowerCase) }
JetBrains/intellij-scala
scala/scala-impl/src/org/jetbrains/sbt/editor/documentationProvider/SbtDocumentationProvider.scala
Scala
apache-2.0
5,605
package doodle package image package examples import doodle.core._ import doodle.image._ import doodle.syntax._ object Tree { import PathElement._ def leaf(angle: Angle, length: Double): Image = Image .openPath( Seq( moveTo(Point.zero), lineTo(Point.polar(length, angle)) )) .strokeColor(Color.hsl(angle, .5, .5)) def branch(depth: Int, angle: Angle, length: Double): Image = { if (depth == 0) { leaf(angle, length) } else { val l = branch(depth - 1, angle + 20.degrees, length * 0.8) val r = branch(depth - 1, angle - 20.degrees, length * 0.8) val b = leaf(angle, length) b on ((l on r) at Vec.polar(length, angle)) } } def image = branch(10, 90.degrees, 50) }
underscoreio/doodle
image/shared/src/main/scala/doodle/image/examples/Tree.scala
Scala
apache-2.0
771
p == fst compose m q == snd compose m
hmemcpy/milewski-ctfp-pdf
src/content/1.5/code/scala/snippet27.scala
Scala
gpl-3.0
37
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package spark.mllib.recommendation import scala.collection.mutable.{ArrayBuffer, BitSet} import scala.util.Random import scala.util.Sorting import spark.{HashPartitioner, Partitioner, SparkContext, RDD} import spark.storage.StorageLevel import spark.KryoRegistrator import spark.SparkContext._ import com.esotericsoftware.kryo.Kryo import org.jblas.{DoubleMatrix, SimpleBlas, Solve} /** * Out-link information for a user or product block. This includes the original user/product IDs * of the elements within this block, and the list of destination blocks that each user or * product will need to send its feature vector to. */ private[recommendation] case class OutLinkBlock(elementIds: Array[Int], shouldSend: Array[BitSet]) /** * In-link information for a user (or product) block. This includes the original user/product IDs * of the elements within this block, as well as an array of indices and ratings that specify * which user in the block will be rated by which products from each product block (or vice-versa). * Specifically, if this InLinkBlock is for users, ratingsForBlock(b)(i) will contain two arrays, * indices and ratings, for the i'th product that will be sent to us by product block b (call this * P). These arrays represent the users that product P had ratings for (by their index in this * block), as well as the corresponding rating for each one. We can thus use this information when * we get product block b's message to update the corresponding users. */ private[recommendation] case class InLinkBlock( elementIds: Array[Int], ratingsForBlock: Array[Array[(Array[Int], Array[Double])]]) /** * A more compact class to represent a rating than Tuple3[Int, Int, Double]. */ private[recommendation] case class Rating(user: Int, product: Int, rating: Double) /** * Alternating Least Squares matrix factorization. * * This is a blocked implementation of the ALS factorization algorithm that groups the two sets * of factors (referred to as "users" and "products") into blocks and reduces communication by only * sending one copy of each user vector to each product block on each iteration, and only for the * product blocks that need that user's feature vector. This is achieved by precomputing some * information about the ratings matrix to determine the "out-links" of each user (which blocks of * products it will contribute to) and "in-link" information for each product (which of the feature * vectors it receives from each user block it will depend on). This allows us to send only an * array of feature vectors between each user block and product block, and have the product block * find the users' ratings and update the products based on these messages. */ class ALS private (var numBlocks: Int, var rank: Int, var iterations: Int, var lambda: Double) extends Serializable { def this() = this(-1, 10, 10, 0.01) /** * Set the number of blocks to parallelize the computation into; pass -1 for an auto-configured * number of blocks. Default: -1. */ def setBlocks(numBlocks: Int): ALS = { this.numBlocks = numBlocks this } /** Set the rank of the feature matrices computed (number of features). Default: 10. */ def setRank(rank: Int): ALS = { this.rank = rank this } /** Set the number of iterations to run. Default: 10. */ def setIterations(iterations: Int): ALS = { this.iterations = iterations this } /** Set the regularization parameter, lambda. Default: 0.01. */ def setLambda(lambda: Double): ALS = { this.lambda = lambda this } /** * Run ALS with the configured parameters on an input RDD of (user, product, rating) triples. * Returns a MatrixFactorizationModel with feature vectors for each user and product. */ def train(ratings: RDD[(Int, Int, Double)]): MatrixFactorizationModel = { val numBlocks = if (this.numBlocks == -1) { math.max(ratings.context.defaultParallelism, ratings.partitions.size / 2) } else { this.numBlocks } val partitioner = new HashPartitioner(numBlocks) val ratingsByUserBlock = ratings.map{ case (u, p, r) => (u % numBlocks, Rating(u, p, r)) } val ratingsByProductBlock = ratings.map{ case (u, p, r) => (p % numBlocks, Rating(p, u, r)) } val (userInLinks, userOutLinks) = makeLinkRDDs(numBlocks, ratingsByUserBlock) val (productInLinks, productOutLinks) = makeLinkRDDs(numBlocks, ratingsByProductBlock) // Initialize user and product factors randomly val seed = new Random().nextInt() var users = userOutLinks.mapValues(_.elementIds.map(u => randomFactor(rank, seed ^ u))) var products = productOutLinks.mapValues(_.elementIds.map(p => randomFactor(rank, seed ^ ~p))) for (iter <- 0 until iterations) { // perform ALS update products = updateFeatures(users, userOutLinks, productInLinks, partitioner, rank, lambda) users = updateFeatures(products, productOutLinks, userInLinks, partitioner, rank, lambda) } // Flatten and cache the two final RDDs to un-block them val usersOut = users.join(userOutLinks).flatMap { case (b, (factors, outLinkBlock)) => for (i <- 0 until factors.length) yield (outLinkBlock.elementIds(i), factors(i)) } val productsOut = products.join(productOutLinks).flatMap { case (b, (factors, outLinkBlock)) => for (i <- 0 until factors.length) yield (outLinkBlock.elementIds(i), factors(i)) } usersOut.persist() productsOut.persist() new MatrixFactorizationModel(rank, usersOut, productsOut) } /** * Make the out-links table for a block of the users (or products) dataset given the list of * (user, product, rating) values for the users in that block (or the opposite for products). */ private def makeOutLinkBlock(numBlocks: Int, ratings: Array[Rating]): OutLinkBlock = { val userIds = ratings.map(_.user).distinct.sorted val numUsers = userIds.length val userIdToPos = userIds.zipWithIndex.toMap val shouldSend = Array.fill(numUsers)(new BitSet(numBlocks)) for (r <- ratings) { shouldSend(userIdToPos(r.user))(r.product % numBlocks) = true } OutLinkBlock(userIds, shouldSend) } /** * Make the in-links table for a block of the users (or products) dataset given a list of * (user, product, rating) values for the users in that block (or the opposite for products). */ private def makeInLinkBlock(numBlocks: Int, ratings: Array[Rating]): InLinkBlock = { val userIds = ratings.map(_.user).distinct.sorted val numUsers = userIds.length val userIdToPos = userIds.zipWithIndex.toMap // Split out our ratings by product block val blockRatings = Array.fill(numBlocks)(new ArrayBuffer[Rating]) for (r <- ratings) { blockRatings(r.product % numBlocks) += r } val ratingsForBlock = new Array[Array[(Array[Int], Array[Double])]](numBlocks) for (productBlock <- 0 until numBlocks) { // Create an array of (product, Seq(Rating)) ratings val groupedRatings = blockRatings(productBlock).groupBy(_.product).toArray // Sort them by product ID val ordering = new Ordering[(Int, ArrayBuffer[Rating])] { def compare(a: (Int, ArrayBuffer[Rating]), b: (Int, ArrayBuffer[Rating])): Int = a._1 - b._1 } Sorting.quickSort(groupedRatings)(ordering) // Translate the user IDs to indices based on userIdToPos ratingsForBlock(productBlock) = groupedRatings.map { case (p, rs) => (rs.view.map(r => userIdToPos(r.user)).toArray, rs.view.map(_.rating).toArray) } } InLinkBlock(userIds, ratingsForBlock) } /** * Make RDDs of InLinkBlocks and OutLinkBlocks given an RDD of (blockId, (u, p, r)) values for * the users (or (blockId, (p, u, r)) for the products). We create these simultaneously to avoid * having to shuffle the (blockId, (u, p, r)) RDD twice, or to cache it. */ private def makeLinkRDDs(numBlocks: Int, ratings: RDD[(Int, Rating)]) : (RDD[(Int, InLinkBlock)], RDD[(Int, OutLinkBlock)]) = { val grouped = ratings.partitionBy(new HashPartitioner(numBlocks)) val links = grouped.mapPartitionsWithIndex((blockId, elements) => { val ratings = elements.map{_._2}.toArray val inLinkBlock = makeInLinkBlock(numBlocks, ratings) val outLinkBlock = makeOutLinkBlock(numBlocks, ratings) Iterator.single((blockId, (inLinkBlock, outLinkBlock))) }, true) links.persist(StorageLevel.MEMORY_AND_DISK) (links.mapValues(_._1), links.mapValues(_._2)) } /** * Make a random factor vector with the given seed. * TODO: Initialize things using mapPartitionsWithIndex to make it faster? */ private def randomFactor(rank: Int, seed: Int): Array[Double] = { val rand = new Random(seed) Array.fill(rank)(rand.nextDouble) } /** * Compute the user feature vectors given the current products (or vice-versa). This first joins * the products with their out-links to generate a set of messages to each destination block * (specifically, the features for the products that user block cares about), then groups these * by destination and joins them with the in-link info to figure out how to update each user. * It returns an RDD of new feature vectors for each user block. */ private def updateFeatures( products: RDD[(Int, Array[Array[Double]])], productOutLinks: RDD[(Int, OutLinkBlock)], userInLinks: RDD[(Int, InLinkBlock)], partitioner: Partitioner, rank: Int, lambda: Double) : RDD[(Int, Array[Array[Double]])] = { val numBlocks = products.partitions.size productOutLinks.join(products).flatMap { case (bid, (outLinkBlock, factors)) => val toSend = Array.fill(numBlocks)(new ArrayBuffer[Array[Double]]) for (p <- 0 until outLinkBlock.elementIds.length; userBlock <- 0 until numBlocks) { if (outLinkBlock.shouldSend(p)(userBlock)) { toSend(userBlock) += factors(p) } } toSend.zipWithIndex.map{ case (buf, idx) => (idx, (bid, buf.toArray)) } }.groupByKey(partitioner) .join(userInLinks) .mapValues{ case (messages, inLinkBlock) => updateBlock(messages, inLinkBlock, rank, lambda) } } /** * Compute the new feature vectors for a block of the users matrix given the list of factors * it received from each product and its InLinkBlock. */ def updateBlock(messages: Seq[(Int, Array[Array[Double]])], inLinkBlock: InLinkBlock, rank: Int, lambda: Double) : Array[Array[Double]] = { // Sort the incoming block factor messages by block ID and make them an array val blockFactors = messages.sortBy(_._1).map(_._2).toArray // Array[Array[Double]] val numBlocks = blockFactors.length val numUsers = inLinkBlock.elementIds.length // We'll sum up the XtXes using vectors that represent only the lower-triangular part, since // the matrices are symmetric val triangleSize = rank * (rank + 1) / 2 val userXtX = Array.fill(numUsers)(DoubleMatrix.zeros(triangleSize)) val userXy = Array.fill(numUsers)(DoubleMatrix.zeros(rank)) // Some temp variables to avoid memory allocation val tempXtX = DoubleMatrix.zeros(triangleSize) val fullXtX = DoubleMatrix.zeros(rank, rank) // Compute the XtX and Xy values for each user by adding products it rated in each product block for (productBlock <- 0 until numBlocks) { for (p <- 0 until blockFactors(productBlock).length) { val x = new DoubleMatrix(blockFactors(productBlock)(p)) fillXtX(x, tempXtX) val (us, rs) = inLinkBlock.ratingsForBlock(productBlock)(p) for (i <- 0 until us.length) { userXtX(us(i)).addi(tempXtX) SimpleBlas.axpy(rs(i), x, userXy(us(i))) } } } // Solve the least-squares problem for each user and return the new feature vectors userXtX.zipWithIndex.map{ case (triangularXtX, index) => // Compute the full XtX matrix from the lower-triangular part we got above fillFullMatrix(triangularXtX, fullXtX) // Add regularization (0 until rank).foreach(i => fullXtX.data(i*rank + i) += lambda) // Solve the resulting matrix, which is symmetric and positive-definite Solve.solvePositive(fullXtX, userXy(index)).data } } /** * Set xtxDest to the lower-triangular part of x transpose * x. For efficiency in summing * these matrices, we store xtxDest as only rank * (rank+1) / 2 values, namely the values * at (0,0), (1,0), (1,1), (2,0), (2,1), (2,2), etc in that order. */ private def fillXtX(x: DoubleMatrix, xtxDest: DoubleMatrix) { var i = 0 var pos = 0 while (i < x.length) { var j = 0 while (j <= i) { xtxDest.data(pos) = x.data(i) * x.data(j) pos += 1 j += 1 } i += 1 } } /** * Given a triangular matrix in the order of fillXtX above, compute the full symmetric square * matrix that it represents, storing it into destMatrix. */ private def fillFullMatrix(triangularMatrix: DoubleMatrix, destMatrix: DoubleMatrix) { val rank = destMatrix.rows var i = 0 var pos = 0 while (i < rank) { var j = 0 while (j <= i) { destMatrix.data(i*rank + j) = triangularMatrix.data(pos) destMatrix.data(j*rank + i) = triangularMatrix.data(pos) pos += 1 j += 1 } i += 1 } } } /** * Top-level methods for calling Alternating Least Squares (ALS) matrix factorizaton. */ object ALS { /** * Train a matrix factorization model given an RDD of ratings given by users to some products, * in the form of (userID, productID, rating) pairs. We approximate the ratings matrix as the * product of two lower-rank matrices of a given rank (number of features). To solve for these * features, we run a given number of iterations of ALS. This is done using a level of * parallelism given by `blocks`. * * @param ratings RDD of (userID, productID, rating) pairs * @param rank number of features to use * @param iterations number of iterations of ALS (recommended: 10-20) * @param lambda regularization factor (recommended: 0.01) * @param blocks level of parallelism to split computation into */ def train( ratings: RDD[(Int, Int, Double)], rank: Int, iterations: Int, lambda: Double, blocks: Int) : MatrixFactorizationModel = { new ALS(blocks, rank, iterations, lambda).train(ratings) } /** * Train a matrix factorization model given an RDD of ratings given by users to some products, * in the form of (userID, productID, rating) pairs. We approximate the ratings matrix as the * product of two lower-rank matrices of a given rank (number of features). To solve for these * features, we run a given number of iterations of ALS. The level of parallelism is determined * automatically based on the number of partitions in `ratings`. * * @param ratings RDD of (userID, productID, rating) pairs * @param rank number of features to use * @param iterations number of iterations of ALS (recommended: 10-20) * @param lambda regularization factor (recommended: 0.01) */ def train(ratings: RDD[(Int, Int, Double)], rank: Int, iterations: Int, lambda: Double) : MatrixFactorizationModel = { train(ratings, rank, iterations, lambda, -1) } /** * Train a matrix factorization model given an RDD of ratings given by users to some products, * in the form of (userID, productID, rating) pairs. We approximate the ratings matrix as the * product of two lower-rank matrices of a given rank (number of features). To solve for these * features, we run a given number of iterations of ALS. The level of parallelism is determined * automatically based on the number of partitions in `ratings`. * * @param ratings RDD of (userID, productID, rating) pairs * @param rank number of features to use * @param iterations number of iterations of ALS (recommended: 10-20) */ def train(ratings: RDD[(Int, Int, Double)], rank: Int, iterations: Int) : MatrixFactorizationModel = { train(ratings, rank, iterations, 0.01, -1) } private class ALSRegistrator extends KryoRegistrator { override def registerClasses(kryo: Kryo) { kryo.register(classOf[Rating]) } } def main(args: Array[String]) { if (args.length != 5 && args.length != 6) { println("Usage: ALS <master> <ratings_file> <rank> <iterations> <output_dir> [<blocks>]") System.exit(1) } val (master, ratingsFile, rank, iters, outputDir) = (args(0), args(1), args(2).toInt, args(3).toInt, args(4)) val blocks = if (args.length == 6) args(5).toInt else -1 System.setProperty("spark.serializer", "spark.KryoSerializer") System.setProperty("spark.kryo.registrator", classOf[ALSRegistrator].getName) System.setProperty("spark.kryo.referenceTracking", "false") System.setProperty("spark.locality.wait", "10000") val sc = new SparkContext(master, "ALS") val ratings = sc.textFile(ratingsFile).map { line => val fields = line.split(',') (fields(0).toInt, fields(1).toInt, fields(2).toDouble) } val model = ALS.train(ratings, rank, iters, 0.01, blocks) model.userFeatures.map{ case (id, vec) => id + "," + vec.mkString(" ") } .saveAsTextFile(outputDir + "/userFeatures") model.productFeatures.map{ case (id, vec) => id + "," + vec.mkString(" ") } .saveAsTextFile(outputDir + "/productFeatures") println("Final user/product features written to " + outputDir) System.exit(0) } }
rjpower/spark
mllib/src/main/scala/spark/mllib/recommendation/ALS.scala
Scala
apache-2.0
18,493
package lila.plan import org.joda.time.DateTime case class PayPalIpnKey(value: String) extends AnyVal case class CustomerId(value: String) extends AnyVal case class ChargeId(value: String) extends AnyVal case class Source(value: String) extends AnyVal sealed abstract class Freq(val renew: Boolean) object Freq { case object Monthly extends Freq(renew = true) case object Onetime extends Freq(renew = false) } case class Usd(value: BigDecimal) extends AnyVal with Ordered[Usd] { def compare(other: Usd) = value compare other.value def cents = Cents((value * 100).toInt) def toFloat = value.toFloat def toInt = value.toInt override def toString = s"$$$value" } object Usd { def apply(value: Double): Usd = Usd(BigDecimal(value)) def apply(value: Int): Usd = Usd(BigDecimal(value)) } case class Cents(value: Int) extends AnyVal with Ordered[Cents] { def compare(other: Cents) = value compare other.value def usd = Usd(value / 100d) override def toString = usd.toString } case class StripeSubscriptions(data: List[StripeSubscription]) case class StripePlan(id: String, name: String, amount: Cents) { def cents = amount def usd = cents.usd } object StripePlan { def make(cents: Cents, freq: Freq): StripePlan = freq match { case Freq.Monthly => StripePlan( id = s"monthly_${cents.value}", name = s"Monthly ${cents.usd}", amount = cents) case Freq.Onetime => StripePlan( id = s"onetime_${cents.value}", name = s"One-time ${cents.usd}", amount = cents) } val defaultAmounts = List(5, 10, 20, 50).map(Usd.apply).map(_.cents) } case class StripeSubscription( id: String, plan: StripePlan, customer: CustomerId, cancel_at_period_end: Boolean) { def renew = !cancel_at_period_end } case class StripeCustomer( id: CustomerId, email: Option[String], subscriptions: StripeSubscriptions) { def firstSubscription = subscriptions.data.headOption def plan = firstSubscription.map(_.plan) def renew = firstSubscription ?? (_.renew) } case class StripeCharge(id: ChargeId, amount: Cents, customer: CustomerId) case class StripeInvoice( id: Option[String], amount_due: Int, date: Long, paid: Boolean) { def cents = Cents(amount_due) def usd = cents.usd def dateTime = new DateTime(date * 1000) }
clarkerubber/lila
modules/plan/src/main/model.scala
Scala
agpl-3.0
2,329
package com.epicport.action.game import com.epicport.action.DefaultLayout import com.epicport.action.core.Link import xitrum.SkipCsrfCheck case class ScreenShot(small: String, big: String) abstract class GameDescription extends DefaultLayout with SkipCsrfCheck { def gameName: String def gameDescription: String def linkToPlay: String def links: Seq[Link] def screenshots: Seq[ScreenShot] final def execute() { respondView[GameDescription]() } }
caiiiycuk/epicport
web/src/main/scala/com/epicport/action/game/GameDescription.scala
Scala
gpl-2.0
483
/* * Copyright (C) 2011-2013 org.bayswater * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.bayswater.musicrest.model import org.bayswater.musicrest.abc.AbcMongo import org.bayswater.musicrest.abc.Abc import org.bayswater.musicrest.MusicRestSettings import com.mongodb.casbah.Imports._ import com.mongodb.ServerAddress import scalaz.Validation trait TuneModel { def testConnection(): Boolean /** insert a tune */ def insert(genre: String, abc:Abc): Validation[String, String] /** replace an existing tune (i.e. one with an existing tune _id) */ def replace(id: ObjectId, genre: String, abc:Abc): Validation[String, String] /** search by if to discover if the tune exists */ def exists(genre: String, id: String) : Boolean /** delete a tune by id */ def delete(genre: String, id: String) : Validation[String, String] /** delete all the tunes within the genre */ def delete(genre: String) : Validation[String, String] /** get all the currently supported genres */ def getSupportedGenres() : List[String] /** get all the currently supported rhythms for the supplied genre */ def getSupportedRhythmsFor(genre: String): List[String] /** get a page from the complete set of tunes in the genre */ def getTunes(genre: String, sort: String, page: Int, size: Int): Iterator[scala.collection.Map[String, String]] /** get the ABC notes for the supplied tune */ def getNotes(genre: String, id: String): Option[String] /** get the ABC headers for the supplied tune */ def getAbcHeaders(genre: String, id: String): Option[String] /** get the submitter of the tune */ def getSubmitter(genre: String, id: String): Option[String] /** get the tune */ def getTune(genre: String, id: String): Option[AbcMongo] /** get the tune reference (i.e. the _id GUID) */ def getTuneRef(genre: String, id: String): Option[ObjectId] /** add a new title to a tune */ def addAlternativeTitle(genre: String, id: String, title: String) : Validation[String, String] /** generic search */ def search(genre: String, params: Map[String, String], sort: String, page: Int, size: Int): Iterator[scala.collection.Map[String, String]] /** results count for a generic search */ def count(genre: String, params: Map[String, String]) : Long /** add an index on the genre */ def createIndex(genre: String) } object TuneModel { /* Potential change for new DB representation for tunes. * * By default, Mongo automatically provides _id as a GUID which is implicit. i.e. if we use tid as our unique id, then _id is also there. * Up to version 1.1.2, we over-ride Mongos's _id with out own value (a concatenation of tune name and rhythm) * From version 1.1.3 we intend to revert to using Mongo's _id and supply our tid key in addition * This is the only line we need to alter to change the DB tune representation */ val tuneKey = "tid" // val tuneKey = "_id" /** MongoConnection is very badly documented in Casbah. Apparently it is in fact a pooled connection and * you can alter the size of the pool with MongoOptions. (This is raw MongoDB behaviour). Let's * experiment with just setting the pool size for the moment. */ private val settings = MusicRestSettings private val mongoClient = MongoCasbahUtil.buildMongoClient( settings.dbHost , settings.dbPort , settings.dbLogin , settings.dbPassword , settings.dbName , Some(settings.dbPoolSize) ) private val casbahTuneModel = new TuneModelCasbahImpl(mongoClient, settings.dbName) def apply(): TuneModel = casbahTuneModel }
newlandsvalley/musicrest
src/main/scala/org/bayswater/musicrest/model/TuneModel.scala
Scala
apache-2.0
4,321
package hello.client import hello.hello.HelloServiceGrpc.{HelloServiceBlockingClient, HelloServiceStub} import hello.hello.{HelloReq, HelloResp, HelloServiceGrpc} import io.grpc.{ManagedChannelBuilder, StatusRuntimeException} import scala.concurrent.{Await, ExecutionContext, Future} import scala.concurrent.duration._ object HelloClient { val goodReq = HelloReq("Euler") val badReq = HelloReq("Leonhard Euler") def main(args: Array[String]): Unit = { val channel = ManagedChannelBuilder .forAddress("localhost", 50051) .usePlaintext(true) .build() val asyncStub = HelloServiceGrpc.stub(channel) val blockingStub: HelloServiceGrpc.HelloServiceBlockingStub = HelloServiceGrpc.blockingStub(channel) implicit val executionContext = ExecutionContext.global blockingExample(blockingStub) asyncExample(asyncStub) } def blockingExample(stub: HelloServiceBlockingClient): Unit = { println("Starting blocking example") val syncResp = stub.sayHello(goodReq) println(syncResp.result) try { stub.sayHelloStrict(badReq) } catch { case rtEx: StatusRuntimeException => println(s"Blocking implementation description ${rtEx.getStatus.getDescription} and code ${rtEx.getStatus.getCode}") } } def asyncExample(stub: HelloServiceStub)(implicit ec: ExecutionContext): Unit = { println("Starting async example") val f1: Future[HelloResp] = stub.sayHello(goodReq) val f2: Future[HelloResp] = stub.sayHelloStrict(badReq) f1.onSuccess { case HelloResp(result: String) => println(result) } f2 onFailure { case rtEx: StatusRuntimeException => println(s"Async implementation description ${rtEx.getStatus.getDescription} and code ${rtEx.getStatus.getCode}") } Await.ready(Future.sequence(Seq(f1, f2)), 1.second) } }
avinassh/grpc-errors
scala/src/main/scala/hello/client/HelloClient.scala
Scala
mit
1,853
import com.amazonaws.services.{simpledb => aws} object DomainMetadata { def apply(r: aws.model.DomainMetadataResult): DomainMetadata = new DomainMetadata( attributeNameCount = r.getAttributeNameCount, attributeNamesSizeBytes = r.getAttributeNamesSizeBytes, attributeValueCount = r.getAttributeValueCount, attributeValuesSizeBytes = r.getAttributeValuesSizeBytes, itemCount = r.getItemCount, itemNamesSizeBytes = r.getItemNamesSizeBytes, timestamp = r.getTimestamp ) } case class DomainMetadata( attributeNameCount: Int, attributeNamesSizeBytes: Long, attributeValueCount: Int, attributeValuesSizeBytes: Long, itemCount: Int, itemNamesSizeBytes: Long, timestamp: Int )
hirokikonishi/awscala
aws/simpledb/src/main/scala/DomainMetadata.scala
Scala
apache-2.0
712
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.samza.system.hdfs.writer import org.apache.hadoop.fs.{FileSystem, Path} import org.apache.hadoop.io.{SequenceFile, Writable, IOUtils} import org.apache.hadoop.io.SequenceFile.Writer import org.apache.hadoop.io.compress.{CompressionCodec, DefaultCodec, GzipCodec, SnappyCodec} import org.apache.samza.system.hdfs.HdfsConfig import org.apache.samza.system.hdfs.HdfsConfig._ import org.apache.samza.system.OutgoingMessageEnvelope abstract class SequenceFileHdfsWriter(dfs: FileSystem, systemName: String, config: HdfsConfig) extends HdfsWriter[SequenceFile.Writer](dfs, systemName, config) { val batchSize = config.getWriteBatchSizeBytes(systemName) val bucketer = Some(Bucketer.getInstance(systemName, config)) var bytesWritten = 0L /** * Generate a key (usually a singleton dummy value) appropriate for the SequenceFile you're writing */ def getKey: Writable /** * Wrap the outgoing message in an appropriate Writable */ def getValue(outgoing: OutgoingMessageEnvelope): Writable /** * Calculate (or estimate) the byte size of the outgoing message. Used internally * by HdfsWriters to decide when to cut a new output file based on max size. */ def getOutputSizeInBytes(writable: Writable): Long /** * The Writable key class for the SequenceFile type */ def keyClass: Class[_ <: Writable] /** * The Writable value class for the SequenceFile type */ def valueClass: Class[_ <: Writable] /** * Accepts a human-readable compression type from the job properties file such as * "gzip", "snappy", or "none" - returns an appropriate SequenceFile CompressionCodec. */ def getCompressionCodec(compressionType: String) = { compressionType match { case "snappy" => new SnappyCodec() case "gzip" => new GzipCodec() case _ => new DefaultCodec() } } override def flush: Unit = writer.map { _.hflush } override def write(outgoing: OutgoingMessageEnvelope): Unit = { if (shouldStartNewOutputFile) { close writer = getNextWriter } writer.map { seq => val writable = getValue(outgoing) bytesWritten += getOutputSizeInBytes(writable) seq.append(getKey, writable) } } override def close: Unit = { writer.map { w => w.hflush ; IOUtils.closeStream(w) } writer = None bytesWritten = 0L } protected def shouldStartNewOutputFile: Boolean = { bytesWritten >= batchSize || bucketer.get.shouldChangeBucket } protected def getNextWriter: Option[SequenceFile.Writer] = { val path = bucketer.get.getNextWritePath(dfs) Some( SequenceFile.createWriter( dfs.getConf, Writer.file(path), Writer.keyClass(keyClass), Writer.valueClass(valueClass), Writer.compression( SequenceFile.CompressionType.BLOCK, getCompressionCodec(config.getCompressionType(systemName)) ) ) ) } }
prateekm/samza
samza-hdfs/src/main/scala/org/apache/samza/system/hdfs/writer/SequenceFileHdfsWriter.scala
Scala
apache-2.0
3,767
package nfn import ccn.packet.{CCNName, Content} import com.typesafe.scalalogging.slf4j.Logging import scala.collection.mutable case class ChunkStore(size: Int, name: List[String]) extends Logging { val cs = Array.fill(size)(Option.empty[Array[Byte]]) def add(chunkNum: Int, chunkData: Array[Byte]): Unit = { if(cs(chunkNum).isEmpty) { logger.debug(s"new chunk with chunkNum: $chunkNum / ${size - 1}") cs(chunkNum) = Some(chunkData) } else { logger.warn(s"Received chunk is already in chunk store, ignoring chunk with num $chunkNum") } } def isComplete: Boolean = cs.forall(_.nonEmpty) def getComplete: Option[Content] = { if(cs.forall(_.nonEmpty)) { val data = cs.foldRight(Array[Byte]()) { case (head, tail) => head.get ++ tail } Some(Content(CCNName(name, None), data)) } else None } def getIncomplete: List[Int] = { cs.zipWithIndex .filter({case (a, i) => a.isEmpty}) .map({case (c, i) => i}).toList } } case class ContentStore() extends Logging { private val contentStore: mutable.Map[List[String], Content] = mutable.Map() private val chunkStore: mutable.Map[List[String], ChunkStore] = mutable.Map() def apply(name: CCNName): Option[Content] = get(name) def get(name: CCNName): Option[Content] = { name.chunkNum match { case Some(chunkNum) => chunkStore.get(name.cmpsList) flatMap { _.getComplete } case None => contentStore.get(name.cmpsList) } } def find(prefix: CCNName): Option[Content] = { val filteredContent = contentStore filter { entry => entry._1.startsWith(prefix.cmps) && CCNName(entry._1, None).requestType != "GIM" } filteredContent.values.toList.headOption } def add(content: Content): Unit = { val name = content.name.cmpsList (content.name.chunkNum, content.metaInfo.chunkNum) match { case (Some(chunkNum), Some(lastChunkNum)) => chunkStore.get(name) match { case Some(cs) => cs.add(chunkNum, content.data) case _ => { logger.debug(s"created chunkstore for name ${content.name}") val cs = ChunkStore(lastChunkNum + 1, name) cs.add(chunkNum, content.data) chunkStore += (name -> cs) } } case _ => { if(content.name.chunkNum.isDefined || content.metaInfo.chunkNum.isDefined) logger.warn(s"Found content $content with only one of either chunknum or lastchunknum, treating it as non-chunk content") contentStore += (name -> content) } } } def remove(name: CCNName): Unit = { name.chunkNum match { case Some(_) => logger.warn("remove for a single chunk not implemented") case None => contentStore -= name.cmps } } def getContentCompleteOrIncompletedChunks(name: CCNName): Either[Content, List[Int]] = { contentStore.get(name.cmps) match { case Some(content) => Left(content) case None => chunkStore.get(name.cmps).map({ cs => cs.getComplete match { case Some(completedContent) => { chunkStore -= name.cmps contentStore += (name.cmps -> completedContent) Left(completedContent) } case None => Right(cs.getIncomplete) } }).getOrElse(Right(Nil)) } } }
cn-uofbasel/nfn-scala
src/main/scala/nfn/ContentStore.scala
Scala
isc
3,323
/* * Copyright 2012-2014 Kieron Wilkinson. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package viper.util import java.io.{Reader, FilterReader} /** * Strips characters that would invalid XML in a streaming context. */ class StripXMLFilterReader(in: Reader) extends FilterReader(in) { /** If we are in an XML processing instruction. */ var inPI = false /** If we are in an XML entity reference. */ var inER = false /** PI's can legitimately be contained by CDATA. */ var inCDATA = false //todo <![CDATA[escaped]]> /** * Reads buffers of characters, the does some filtering on that same buffer. */ override def read(cbuf: Array[Char], from: Int, len: Int): Int = { var count = 0 // Loop to avoid returning zero characters while (count == 0) { // Read some characters count = in.read(cbuf, from, len) // Detect and return EOF if (count == -1) { return -1 } // Loop through characters var last = from var i = from while (i < from + count) { if (!inPI && !inER) { // Check for PI if (containsAt(cbuf, "<?", i)) { inPI = true i += 1 } // Check for ER else if (containsAt(cbuf, "<!", i)) { inER = true i += 1 } // A normal character to include else if (isValidChar(cbuf(i))) { cbuf(last) = cbuf(i) last += 1 } // Any other character is invalid and should be stripped else { i += 1 } } // Currently within PI, so check for end else if (inPI && containsAt(cbuf, "?>", i)) { inPI = false i += 1 } // Currently within ER, so check for end else if (inER && containsAt(cbuf, ">", i)) { inER = false } i += 1 } // Calculate how many characters we will be returning (see condition on while loop) count = last - from } count } def containsAt(cbuf: Array[Char], text: String, off: Int): Boolean = { // If the PI is on the boundary of two reads, it will not be detected // This probably not a problem though, as the PI is always at the start of the XML if (cbuf.size - off < text.length) { return false } for (i <- 0 until text.length) { if (cbuf(i+off) != text.charAt(i)) { return false } } return true } def isValidChar(c: Char): Boolean = { // From: http://en.wikipedia.org/wiki/Valid_characters_in_XML // U+0009, U+000A, U+000D: these are the only C0 controls accepted in XML 1.0; // U+0020–U+D7FF, U+E000–U+FFFD: this excludes some (not all) non-characters in the BMP (all surrogates, U+FFFE and U+FFFF are forbidden); // U+10000–U+10FFFF: this includes all code points in supplementary planes, including non-characters. (c >= 0x0020 && c <= 0xD7FF) || (c == 0x0009) || (c == 0x000A) || (c == 0x000D) || (c >= 0xE000 && c <= 0xFFFD) || (c >= 0x10000 && c <= 0x10FFFF) } /** * Implemented in terms of the read method above. **/ override def read(): Int = { val buf = new Array[Char](1) val result = read(buf, 0, 1) if (result == -1) { return -1 } else { return buf(0) } } }
vyadh/viper
util/src/main/scala/viper/util/StripXMLFilterReader.scala
Scala
apache-2.0
4,028
import com.github.nscala_time.time.Imports._ import org.scalatest._ import org.scalatest.matchers.ShouldMatchers import com.github.tototoshi.csv._ import java.io._ import java.nio.file._ class LandRegistryPricePaidDataMergerSpec extends FlatSpec with ShouldMatchers { "csv file" should "be merged with empty one" in { // create temp dir val destDir = System.getProperty("java.io.tmpdir") // files val destCsvFile = new File(destDir, "dest.csv" ) val dataCsvFile = new File(getClass.getResource("/data.csv").getFile) val writer = new PrintWriter(destCsvFile) writer.close() // merge with data.csv LandRegistryPricePaidDataMerger.mergeCsv(destCsvFile, dataCsvFile) // compare the two files line by line val readerData = CSVReader.open(dataCsvFile) val readerDest = CSVReader.open(destCsvFile) val mapData = readerData.all.map(l => l(0) -> l).toMap val mapDest = readerDest.all.map(l => l(0) -> l).toMap readerData.close readerDest.close mapData should equal (mapDest) } "csv files" should "be merged" in { // create temp dir val destDir = System.getProperty("java.io.tmpdir") // files val destCsvFile = new File(destDir, "dest.csv" ) val dataCsvFile = new File(getClass.getResource("/data.csv").getFile) val data2CsvFile = new File(getClass.getResource("/data-2.csv").getFile) val dataMergedCsvFile = new File(getClass.getResource("/data-merged.csv").getFile) val writer = new PrintWriter(destCsvFile) writer.close() // merge with data.csv LandRegistryPricePaidDataMerger.mergeCsv(destCsvFile, dataCsvFile) // merge with data-2.csv LandRegistryPricePaidDataMerger.mergeCsv(destCsvFile, data2CsvFile) // compare the two files line by line val readerData = CSVReader.open(dataMergedCsvFile) val readerDest = CSVReader.open(destCsvFile) val mapData = readerData.all.map(l => l(0) -> l).toMap val mapDest = readerDest.all.map(l => l(0) -> l).toMap readerData.close readerDest.close mapData should equal (mapDest) } "shouldGetComplete" should "return false if the file exist" in { val path = getClass.getResource("/data.csv").getPath LandRegistryPricePaidDataMerger.shouldGetComplete(path) should be (false) } "shouldGetComplete" should "return true if the file does not exist" in { val path = "unexisting.csv" LandRegistryPricePaidDataMerger.shouldGetComplete(path) should be (true) } "shouldGetMonthly" should "return true if the file does not exist" in { val fileName = "unexisting.csv" LandRegistryPricePaidDataMerger.shouldGetMonthly(fileName) should be (true) } "shouldGetMonthly" should "return true if the file exist but it is from previous month" in { // create temp dir val destDir = System.getProperty("java.io.tmpdir") // create file val path = Paths.get(destDir, "file.csv") if (Files.exists(path)) Files.delete(path) val file = Files.createFile(path).toFile // change last modified time to the last day of the previous month val now = DateTime.now val firstDayOfThisMonth = new DateTime(now.year.get, now.monthOfYear.get, 1, 0, 0) val lastDayPreviousMonth = firstDayOfThisMonth - 1.day file.setLastModified(lastDayPreviousMonth.getMillis) LandRegistryPricePaidDataMerger.shouldGetMonthly(path.toString) should be (true) } "shouldGetMonthly" should "return false if the file exist but it is from this month" in { // create temp dir val destDir = System.getProperty("java.io.tmpdir") // create file val path = Paths.get(destDir, "file.csv") if (Files.exists(path)) Files.delete(path) val file = Files.createFile(path).toFile // change last modified time to the last day of the previous month val now = DateTime.now val firstDayOfThisMonth = new DateTime(now.year.get, now.monthOfYear.get, 1, 0, 0) file.setLastModified(firstDayOfThisMonth.getMillis) LandRegistryPricePaidDataMerger.shouldGetMonthly(path.toString) should be (false) } }
Geovation/housemining
src/test/scala/LandRegistryPricePaidDataMergerSpec.scala
Scala
mit
4,259
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.message import kafka.utils.Logging import java.nio.ByteBuffer import java.nio.channels._ import kafka.utils.IteratorTemplate import kafka.common.{MessageSizeTooLargeException, InvalidMessageSizeException, ErrorMapping} /** * A sequence of messages stored in a byte buffer * * There are two ways to create a ByteBufferMessageSet * * Option 1: From a ByteBuffer which already contains the serialized message set. Consumers will use this method. * * Option 2: Give it a list of messages along with instructions relating to serialization format. Producers will use this method. * */ class ByteBufferMessageSet(private val buffer: ByteBuffer, private val initialOffset: Long = 0L, private val errorCode: Int = ErrorMapping.NoError) extends MessageSet with Logging { private var shallowValidByteCount = -1L if(sizeInBytes > Int.MaxValue) throw new InvalidMessageSizeException("Message set cannot be larger than " + Int.MaxValue) def this(compressionCodec: CompressionCodec, messages: Message*) { this(MessageSet.createByteBuffer(compressionCodec, messages:_*), 0L, ErrorMapping.NoError) } def this(messages: Message*) { this(NoCompressionCodec, messages: _*) } def getInitialOffset = initialOffset def getBuffer = buffer def getErrorCode = errorCode def serialized(): ByteBuffer = buffer def validBytes: Long = shallowValidBytes private def shallowValidBytes: Long = { if(shallowValidByteCount < 0) { val iter = this.internalIterator(true) while(iter.hasNext) { val messageAndOffset = iter.next shallowValidByteCount = messageAndOffset.offset } } if(shallowValidByteCount < initialOffset) 0 else (shallowValidByteCount - initialOffset) } /** Write the messages in this set to the given channel */ def writeTo(channel: GatheringByteChannel, offset: Long, size: Long): Long = { buffer.mark() val written = channel.write(buffer) buffer.reset() written } /** default iterator that iterates over decompressed messages */ override def iterator: Iterator[MessageAndOffset] = internalIterator() /** iterator over compressed messages without decompressing */ def shallowIterator: Iterator[MessageAndOffset] = internalIterator(true) def verifyMessageSize(maxMessageSize: Int){ var shallowIter = internalIterator(true) while(shallowIter.hasNext){ var messageAndOffset = shallowIter.next val payloadSize = messageAndOffset.message.payloadSize if ( payloadSize > maxMessageSize) throw new MessageSizeTooLargeException("payload size of " + payloadSize + " larger than " + maxMessageSize) } } /** When flag isShallow is set to be true, we do a shallow iteration: just traverse the first level of messages. This is used in verifyMessageSize() function **/ private def internalIterator(isShallow: Boolean = false): Iterator[MessageAndOffset] = { ErrorMapping.maybeThrowException(errorCode) new IteratorTemplate[MessageAndOffset] { var topIter = buffer.slice() var currValidBytes = initialOffset var innerIter:Iterator[MessageAndOffset] = null var lastMessageSize = 0L def innerDone():Boolean = (innerIter==null || !innerIter.hasNext) def makeNextOuter: MessageAndOffset = { if (topIter.remaining < 4) { return allDone() } val size = topIter.getInt() lastMessageSize = size trace("Remaining bytes in iterator = " + topIter.remaining) trace("size of data = " + size) if(size < 0 || topIter.remaining < size) { if (currValidBytes == initialOffset || size < 0) throw new InvalidMessageSizeException("invalid message size: " + size + " only received bytes: " + topIter.remaining + " at " + currValidBytes + "( possible causes (1) a single message larger than " + "the fetch size; (2) log corruption )") return allDone() } val message = topIter.slice() message.limit(size) topIter.position(topIter.position + size) val newMessage = new Message(message) if(!newMessage.isValid) throw new InvalidMessageException("message is invalid, compression codec: " + newMessage.compressionCodec + " size: " + size + " curr offset: " + currValidBytes + " init offset: " + initialOffset) if(isShallow){ currValidBytes += 4 + size trace("shallow iterator currValidBytes = " + currValidBytes) new MessageAndOffset(newMessage, currValidBytes) } else{ newMessage.compressionCodec match { case NoCompressionCodec => debug("Message is uncompressed. Valid byte count = %d".format(currValidBytes)) innerIter = null currValidBytes += 4 + size trace("currValidBytes = " + currValidBytes) new MessageAndOffset(newMessage, currValidBytes) case _ => debug("Message is compressed. Valid byte count = %d".format(currValidBytes)) innerIter = CompressionUtils.decompress(newMessage).internalIterator() if (!innerIter.hasNext) { currValidBytes += 4 + lastMessageSize innerIter = null } makeNext() } } } override def makeNext(): MessageAndOffset = { if(isShallow){ makeNextOuter } else{ val isInnerDone = innerDone() debug("makeNext() in internalIterator: innerDone = " + isInnerDone) isInnerDone match { case true => makeNextOuter case false => { val messageAndOffset = innerIter.next if (!innerIter.hasNext) currValidBytes += 4 + lastMessageSize new MessageAndOffset(messageAndOffset.message, currValidBytes) } } } } } } def sizeInBytes: Long = buffer.limit override def toString: String = { val builder = new StringBuilder() builder.append("ByteBufferMessageSet(") for(message <- this) { builder.append(message) builder.append(", ") } builder.append(")") builder.toString } override def equals(other: Any): Boolean = { other match { case that: ByteBufferMessageSet => (that canEqual this) && errorCode == that.errorCode && buffer.equals(that.buffer) && initialOffset == that.initialOffset case _ => false } } override def canEqual(other: Any): Boolean = other.isInstanceOf[ByteBufferMessageSet] override def hashCode: Int = 31 + (17 * errorCode) + buffer.hashCode + initialOffset.hashCode }
griddynamics/kafka
core/src/main/scala/kafka/message/ByteBufferMessageSet.scala
Scala
apache-2.0
7,604
/* * ScalaRay - Ray tracer based on pbrt (see http://pbrt.org) written in Scala * Copyright (C) 2009, 2010, 2011 Jesper de Jong * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package org.jesperdj.scalaray.shape import org.jesperdj.scalaray.sampler.SampleTransforms import org.jesperdj.scalaray.common._ import org.jesperdj.scalaray.vecmath._ // Sphere (pbrt 3.2) final class Sphere (radius: Double = 1.0, minZ: Double = Double.NegativeInfinity, maxZ: Double = Double.PositiveInfinity, maxPhi: Double = 2.0 * π) extends Quadric { require(radius > 0.0, "radius must be > 0") require(minZ < maxZ, "minZ must be < maxZ") require(maxPhi >= 0.0 && maxPhi <= 2.0 * π, "maxPhi must be >= 0 and <= 2π") // Minimum and maximum θ angle (pbrt 3.2.1) private val minTheta = math.acos(clamp(minZ / radius, -1.0, 1.0)) private val maxTheta = math.acos(clamp(maxZ / radius, -1.0, 1.0)) private val diffTheta = maxTheta - minTheta // Bounding box that contains the shape (pbrt 3.2.2) val boundingBox: BoundingBox = BoundingBox(Point(-radius, -radius, minZ), Point(radius, radius, maxZ)) // Compute quadratic coefficients (pbrt 3.2.3) protected def computeCoefficients(ray: Ray): (Double, Double, Double) = (ray.direction.x * ray.direction.x + ray.direction.y * ray.direction.y + ray.direction.z * ray.direction.z, 2.0 * (ray.direction.x * ray.origin.x + ray.direction.y * ray.origin.y + ray.direction.z * ray.origin.z), ray.origin.x * ray.origin.x + ray.origin.y * ray.origin.y + ray.origin.z * ray.origin.z - radius * radius) // Get differential geometry for an intersection point (pbrt 3.2.4, 3.2.5, 3.2.6) protected def differentialGeometry(p: Point): Option[DifferentialGeometry] = { // Check z range if (p.z < minZ || p.z > maxZ) return None // Check against max φ val phi = { val f = math.atan2(p.y, p.x); if (f >= 0.0) f else f + 2.0 * π } if (phi > maxPhi) return None // Initialize differential geometry Some(new DifferentialGeometry { // Intersection point val point: Point = p // Surface normal (better method than what's used in pbrt) lazy val normal: Normal = Normal(p).normalize private lazy val theta = math.acos(clamp(p.z / radius, -1.0, 1.0)) // Surface parameter coordinates (pbrt 3.3.4) lazy val u: Double = phi / maxPhi lazy val v: Double = (theta - minTheta) / diffTheta // Partial derivatives of the surface position and normal lazy val (dpdu: Vector, dpdv: Vector, dndu: Normal, dndv: Normal) = { val radiusZ = math.sqrt(p.x * p.x + p.y * p.y) val (cosPhi, sinPhi) = if (radiusZ > 0.0) (p.x / radiusZ, p.y / radiusZ) else (0.0, 1.0) val dpdv = Vector(cosPhi * p.z, sinPhi * p.z, -radius * math.sin(theta)) * diffTheta val dpdu = if (radiusZ > 0.0) Vector(-maxPhi * p.y, maxPhi * p.x, 0.0) else dpdv ** Vector(p) val d2Pduu = Vector(p.x, p.y, 0.0) * (-maxPhi * maxPhi) val d2Pduv = Vector(-sinPhi, cosPhi, 0.0) * (diffTheta * maxPhi * p.z) val d2Pdvv = Vector(p) * (-diffTheta * diffTheta) val E = dpdu * dpdu val F = dpdu * dpdv val G = dpdv * dpdv val N = (dpdu ** dpdv).normalize val e = N * d2Pduu val f = N * d2Pduv val g = N * d2Pdvv val EGF2 = (E * G - F * F) val dndu = Normal(dpdu * ((f * F - e * G) / EGF2) + dpdv * ((e * F - f * E) / EGF2)) val dndv = Normal(dpdu * ((g * F - f * G) / EGF2) + dpdv * ((f * F - g * E) / EGF2)) (dpdu, dpdv, dndu, dndv) } // Shape which is intersected val shape: Shape = Sphere.this }) } // Surface area (pbrt 3.2.7) val surfaceArea: Double = maxPhi * radius * (maxZ - minZ) // Sample a point on the surface using the random variables u1, u2 (pbrt 14.6.3) // Returns a point on the surface, the surface normal at that point and the probability density for this sample def sampleSurface(u1: Double, u2: Double): (Point, Normal, Double) = { val p = SampleTransforms.uniformSampleSphere(u1, u2) (p * radius, Normal(p), 1.0 / surfaceArea) // TODO: We are not taking partial spheres into account (innerRadius and maxPhi). See pbrt exercise 14.3 (page 734). } // Sample a point on the surface with respect to a point from which the shape is visible using the random variables u1, u2 (pbrt 14.6.3) // Returns a point on the surface, the surface normal at that point and the probability density for this sample override def sampleSurface(viewPoint: Point, u1: Double, u2: Double): (Point, Normal, Double) = throw new UnsupportedOperationException("Not yet implemented") // TODO: Implement this; see pbrt 14.6.3 (page 720-722) // Probability density of the direction wi (from viewPoint to a point on the surface) being sampled with respect to the distribution // that sampleSurface(viewPoint: Point, u1: Double, u2: Double) uses to sample points (pbrt 14.6.3) override def pdf(viewPoint: Point, wi: Vector): Double = throw new UnsupportedOperationException("Not yet implemented") // TODO: Implement this; see pbrt 14.6.3 (page 720-722) override def toString = "Sphere(radius=%g, minZ=%g, maxZ=%g, maxPhi=%g)" format (radius, minZ, maxZ, maxPhi) }
jesperdj/scalaray
src/main/scala/org/jesperdj/scalaray/shape/Sphere.scala
Scala
gpl-3.0
5,865
package com.varunvats.practice.queue import com.varunvats.practice.sorting.UnitSpec class PriorityQueueSpec extends UnitSpec { "The priority queue" when { "empty" should { "throw an exception when asked for the head element" in { intercept[NoSuchElementException] { PriorityQueue[Int]().head } } "throw an exception when dequeue'd" in { intercept[NoSuchElementException] { PriorityQueue[Int]().dequeue } } "have a length of 0" in { PriorityQueue[Int]().length shouldBe 0 } "be empty (test isEmpty)" in { PriorityQueue[Int]().isEmpty shouldBe true } "have a length of 1 when another element is enqueued" in { val pq = PriorityQueue[Int]() pq += 5 pq.length shouldBe 1 } "not be empty when another element is enqueued" in { val pq = PriorityQueue[Int]() pq += 5 pq.isEmpty shouldBe false } } "containing one element" should { "not be empty" in { PriorityQueue(5).isEmpty shouldBe false } "have length 1" in { PriorityQueue(101).length shouldBe 1 } "have a length of 2 when another element is enqueued" in { val pq = PriorityQueue(7) pq += 99 pq.length shouldBe 2 } "become empty when dequeue'd" in { val pq = PriorityQueue(57) pq.dequeue pq.isEmpty shouldBe true } "have length 0 when dequeue'd" in { val pq = PriorityQueue(57) pq.dequeue pq.length shouldBe 0 } "return the element when asked for the head element without removing it from the queue" in { val pq = PriorityQueue(5) pq.head shouldBe 5 pq.length shouldBe 1 } "remove the element from the queue and return it when dequeue'd and the queue should become empty" in { val pq = PriorityQueue(101) val elem = pq.dequeue elem shouldBe 101 pq.isEmpty shouldBe true } } "containing multiple elements" should { "return the correct length after every enqueue" in { val pq = PriorityQueue(12, 1, 37) pq.length shouldBe 3 pq += 101 pq.length shouldBe 4 pq += 53 += 77 pq.length shouldBe 6 } "return the correct length after every dequeue" in { val pq = PriorityQueue(12, 1, 37) pq += 101 pq += 53 += 103 pq.length shouldBe 6 pq.dequeue pq.length shouldBe 5 pq.dequeue pq.length shouldBe 4 pq.dequeue pq.length shouldBe 3 pq.dequeue pq.length shouldBe 2 pq.dequeue pq.length shouldBe 1 pq.dequeue pq.length shouldBe 0 } "promote element with the highest priority to the top of the queue" in { val pq = PriorityQueue(12, 1, 37) pq.dequeue shouldBe 37 pq += 101 pq.dequeue shouldBe 101 } "dequeue elements in decreasing priority order" in { val pq = PriorityQueue(12, 1, 37) pq.dequeue shouldBe 37 pq.dequeue shouldBe 12 pq += 101 += 0 pq.dequeue shouldBe 101 pq.dequeue shouldBe 1 pq += 53 pq.dequeue shouldBe 53 pq.dequeue shouldBe 0 } "become empty when all its elements are dequeue'd" in { val pq = PriorityQueue(12, 1, 37) pq += 101 += 53 += 103 for (i <- 1 to 6) pq.dequeue pq.isEmpty shouldBe true } "dequeue elements correctly when they all have the same priority" in { val pq = PriorityQueue(5, 5, 5, 5, 5) for (i <- 1 to 5) pq.dequeue shouldBe 5 pq.isEmpty shouldBe true } "dequeue elements in the correct order when they are inserted in increasing priority order" in { val pq = PriorityQueue(1, 2, 3, 4, 5) pq.dequeue shouldBe 5 pq.dequeue shouldBe 4 pq.dequeue shouldBe 3 pq.dequeue shouldBe 2 pq.dequeue shouldBe 1 pq.isEmpty shouldBe true } "deque elements in the correct order when they are inserted in decreasing priority order" in { val pq = PriorityQueue(5, 4, 3, 2, 1) pq.dequeue shouldBe 5 pq.dequeue shouldBe 4 pq.dequeue shouldBe 3 pq.dequeue shouldBe 2 pq.dequeue shouldBe 1 pq.isEmpty shouldBe true } } } }
varunvats/practice
jvm/src/test/scala/com/varunvats/practice/queue/PriorityQueueSpec.scala
Scala
mit
4,508
/** * Copyright 2011-2017 GatlingCorp (http://gatling.io) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.gatling.commons trait Exclude[Scope, X] object Exclude { implicit def NOT_FOR_USER_CODE[X, A]: Exclude[X, A] = new Exclude[X, A] {} def list[X] = new { def apply[A] = NOT_FOR_USER_CODE[X, A] } }
MykolaB/gatling
gatling-commons/src/main/scala/io/gatling/commons/Exclude.scala
Scala
apache-2.0
836
package com.nico.sparkdemo object app extends SparkAppRunner { def main(args: Array[String]) { val app = new MyApp executeWithSpark(app) } }
anicolaspp/spark-workshop
complex_app/src/main/scala/app.scala
Scala
mit
161
package com.thangiee.lolhangouts.ui.settings import java.util.concurrent.TimeUnit import android.app.{AlarmManager, PendingIntent} import android.content.SharedPreferences.OnSharedPreferenceChangeListener import android.content.{Intent, SharedPreferences} import android.os.{Bundle, SystemClock} import android.preference.{PreferenceFragment, PreferenceManager} import com.thangiee.lolhangouts.R import com.thangiee.lolhangouts.ui.receivers.DeleteOldMsgReceiver import com.thangiee.lolhangouts.ui.utils._ import org.scaloid.common.TagUtil class PreferenceSettings extends PreferenceFragment with OnSharedPreferenceChangeListener with TagUtil { implicit private lazy val ctx = getActivity.getBaseContext override def onCreate(savedInstanceState: Bundle): Unit = { super.onCreate(savedInstanceState) addPreferencesFromResource(R.xml.pref_settings) } override def onResume(): Unit = { super.onResume() PreferenceManager.getDefaultSharedPreferences(ctx).registerOnSharedPreferenceChangeListener(this) } override def onPause(): Unit = { super.onPause() PreferenceManager.getDefaultSharedPreferences(ctx).unregisterOnSharedPreferenceChangeListener(this) } override def onSharedPreferenceChanged(sharedPreferences: SharedPreferences, key: String): Unit = { val HistoryKey = R.string.pref_history.r2String key match { case HistoryKey => onHistoryChanged(sharedPreferences.getString(key, "3 days")) case _ => info(s"[+] setting change $key:${sharedPreferences.getBoolean(key, false)}") } } private def onHistoryChanged(value: String): Unit = { val i = new Intent(ctx, classOf[DeleteOldMsgReceiver]) lazy val p = PendingIntent.getBroadcast(ctx, 0, i, 0) var millis = TimeUnit.DAYS.toMillis(3) // get the milliseconds to be used to calculate which message to delete value match { case "1 day" => millis = TimeUnit.DAYS.toMillis(1) case "3 days" => millis = TimeUnit.DAYS.toMillis(3) case "7 days" => millis = TimeUnit.DAYS.toMillis(7) case "never" => alarmManager.cancel(p); info("[*] Preference-History changed to: never"); return case _ => warn("[!] No match for Preference-History. Setting value to 3 days.") } i.putExtra(DeleteOldMsgReceiver.TIME_KEY, millis) // check to delete old message base on the millis every 1 hours alarmManager.setRepeating(AlarmManager.ELAPSED_REALTIME, SystemClock.elapsedRealtime(), TimeUnit.HOURS.toMillis(1), p) info("[*] Preference-History changed to: " + value) } }
Thangiee/LoL-Hangouts
src/com/thangiee/lolhangouts/ui/settings/PreferenceSettings.scala
Scala
apache-2.0
2,562
/** Copyright 2015 TappingStone, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.prediction.data.storage.elasticsearch import grizzled.slf4j.Logging import io.prediction.data.storage.Channel import io.prediction.data.storage.Channels import io.prediction.data.storage.StorageClientConfig import org.elasticsearch.ElasticsearchException import org.elasticsearch.client.Client import org.elasticsearch.index.query.FilterBuilders.termFilter import org.json4s.DefaultFormats import org.json4s.JsonDSL._ import org.json4s.native.JsonMethods._ import org.json4s.native.Serialization.read import org.json4s.native.Serialization.write class ESChannels(client: Client, config: StorageClientConfig, index: String) extends Channels with Logging { implicit val formats = DefaultFormats.lossless private val estype = "channels" private val seq = new ESSequences(client, config, index) private val seqName = "channels" val indices = client.admin.indices val indexExistResponse = indices.prepareExists(index).get if (!indexExistResponse.isExists) { indices.prepareCreate(index).get } val typeExistResponse = indices.prepareTypesExists(index).setTypes(estype).get if (!typeExistResponse.isExists) { val json = (estype -> ("properties" -> ("name" -> ("type" -> "string") ~ ("index" -> "not_analyzed")))) indices.preparePutMapping(index).setType(estype). setSource(compact(render(json))).get } def insert(channel: Channel): Option[Int] = { val id = if (channel.id == 0) { var roll = seq.genNext(seqName) while (!get(roll).isEmpty) roll = seq.genNext(seqName) roll } else channel.id val realChannel = channel.copy(id = id) if (update(realChannel)) Some(id) else None } def get(id: Int): Option[Channel] = { try { val response = client.prepareGet( index, estype, id.toString).get() Some(read[Channel](response.getSourceAsString)) } catch { case e: ElasticsearchException => error(e.getMessage) None case e: NullPointerException => None } } def getByAppid(appid: Int): Seq[Channel] = { try { val builder = client.prepareSearch(index).setTypes(estype). setPostFilter(termFilter("appid", appid)) ESUtils.getAll[Channel](client, builder) } catch { case e: ElasticsearchException => error(e.getMessage) Seq[Channel]() } } def update(channel: Channel): Boolean = { try { val response = client.prepareIndex(index, estype, channel.id.toString). setSource(write(channel)).get() true } catch { case e: ElasticsearchException => error(e.getMessage) false } } def delete(id: Int): Unit = { try { client.prepareDelete(index, estype, id.toString).get } catch { case e: ElasticsearchException => error(e.getMessage) } } }
ch33hau/PredictionIO
data/src/main/scala/io/prediction/data/storage/elasticsearch/ESChannels.scala
Scala
apache-2.0
3,496
package com.ovoenergy.orchestration package scheduling package dynamo import java.time.{Clock, Instant, ZoneId} import java.util.concurrent.Executors import scala.concurrent.duration._ import scala.concurrent.ExecutionContext import scala.util.control.NonFatal import scala.collection.JavaConverters._ import cats.effect._ import cats.implicits._ import org.scalacheck.Shapeless._ import org.scalatest.{FlatSpec, Matchers} import com.amazonaws.handlers.AsyncHandler import com.amazonaws.services.dynamodbv2._ import com.amazonaws.services.dynamodbv2.model._ import com.ovoenergy.comms.templates.util.Hash import com.ovoenergy.orchestration.aws.AwsProvider.DbClients import com.ovoenergy.orchestration.scheduling.Persistence.{AlreadyBeingOrchestrated, Successful} import com.ovoenergy.orchestration.scheduling._ import com.ovoenergy.orchestration.scheduling.dynamo.DynamoPersistence.Context import com.ovoenergy.orchestration.util.{ArbGenerator, ArbInstances, LocalDynamoDB} import com.ovoenergy.orchestration.util.LocalDynamoDB.SecondaryIndexData import com.ovoenergy.comms.dockertestkit.{DynamoDbKit, ManagedContainers} import com.ovoenergy.comms.dockertestkit.dynamoDb.DynamoDbClient import com.ovoenergy.comms.dockertestkit.Model.TableName class DynamoPersistenceSpec extends IntegrationSpec with ArbInstances with DynamoDbClient with DynamoDbKit { implicit val ec = ExecutionContext.global implicit val cs = IO.contextShift(ec) override def managedContainers = ManagedContainers(dynamoDbContainer) behavior of "Persistence" it should "store scheduled comm" in { val scheduledComm = generate[Schedule].copy(status = ScheduleStatus.Pending) val pending = testResources.use { case (async, sync, _) => for { _ <- async.storeSchedule[IO](scheduledComm) pending <- IO(sync.listPendingSchedules()) } yield pending }.unsafeRunSync() pending.size shouldBe 1 pending.head shouldBe scheduledComm } it should "return pending schedules" in { testResources.use { case (async, sync, now) => val completedSchedule = generate[Schedule].copy(status = ScheduleStatus.Complete) val pendingSchedule = generate[Schedule].copy(status = ScheduleStatus.Pending) val expiredOrchestratingSchedule = generate[Schedule].copy(status = ScheduleStatus.Orchestrating, orchestrationExpiry = now.minusSeconds(60 * 30)) val inProgressOrchestratingSchedule = generate[Schedule].copy(status = ScheduleStatus.Orchestrating, orchestrationExpiry = now.plusSeconds(60 * 10)) for { _ <- async.storeSchedule[IO](completedSchedule) _ <- async.storeSchedule[IO](pendingSchedule) _ <- async.storeSchedule[IO](expiredOrchestratingSchedule) _ <- async.storeSchedule[IO](inProgressOrchestratingSchedule) pending <- IO(sync.listPendingSchedules()) } yield pending should contain only(pendingSchedule) }.unsafeRunSync() } it should "return expired orchestrating schedules" in { testResources.use { case (async, sync, now) => val completedSchedule = generate[Schedule].copy(status = ScheduleStatus.Complete) val pendingSchedule = generate[Schedule].copy(status = ScheduleStatus.Pending) val expiredOrchestratingSchedule = generate[Schedule].copy(status = ScheduleStatus.Orchestrating, orchestrationExpiry = now.minusSeconds(60 * 30)) val inProgressOrchestratingSchedule = generate[Schedule].copy(status = ScheduleStatus.Orchestrating, orchestrationExpiry = now.plusSeconds(60 * 10)) for { _ <- async.storeSchedule[IO](completedSchedule) _ <- async.storeSchedule[IO](pendingSchedule) _ <- async.storeSchedule[IO](expiredOrchestratingSchedule) _ <- async.storeSchedule[IO](inProgressOrchestratingSchedule) pending <- IO(sync.listExpiredSchedules()) } yield pending should contain only(expiredOrchestratingSchedule) }.unsafeRunSync() } it should "correctly mark a schedule as orchestrating" in { testResources.use { case (async, sync, now) => val schedule = generate[Schedule].copy(status = ScheduleStatus.Pending, history = Seq()) for { _ <- async.storeSchedule[IO](schedule) result <- IO(sync.attemptSetScheduleAsOrchestrating(schedule.scheduleId.toString)) orchestratingSchedule <- async.retrieveSchedule[IO](schedule.scheduleId).map(_.toRight(new RuntimeException("It is not defined"))).flatMap(IO.fromEither _) } yield { result shouldBe Successful(schedule.copy(status = ScheduleStatus.Orchestrating,history = Seq(Change(now, "Start orchestrating")),orchestrationExpiry = now.plusSeconds(60 * 5))) orchestratingSchedule.status shouldBe ScheduleStatus.Orchestrating orchestratingSchedule.orchestrationExpiry shouldBe now.plusSeconds(60 * 5) } }.unsafeRunSync() } it should "not mark a schedule as orchestrating that is already orchestrating" in { testResources.use { case (async, sync, now) => val orchestrationExpiry = now.plusSeconds(60 * 2) val schedule = generate[Schedule].copy( status = ScheduleStatus.Orchestrating, orchestrationExpiry = orchestrationExpiry ) for { _ <- async.storeSchedule[IO](schedule) result <- IO(sync.attemptSetScheduleAsOrchestrating(schedule.scheduleId.toString)) orchestratingSchedule <- async.retrieveSchedule[IO](schedule.scheduleId).map(_.toRight(new RuntimeException("It is not defined"))).flatMap(IO.fromEither _) } yield { result shouldBe AlreadyBeingOrchestrated orchestratingSchedule.status shouldBe ScheduleStatus.Orchestrating orchestratingSchedule.orchestrationExpiry shouldBe orchestrationExpiry } }.unsafeRunSync() } it should "mark schedules as failed" in { testResources.use { case (async, sync, now) => val schedule = generate[Schedule] for { _ <- async.storeSchedule[IO](schedule) _ <- IO(sync.setScheduleAsFailed(schedule.scheduleId.toString, "Invalid profile")) result <- async.retrieveSchedule[IO](schedule.scheduleId).map(_.toRight(new RuntimeException("It is not defined"))).flatMap(IO.fromEither _) } yield { result.status shouldBe ScheduleStatus.Failed result.history should contain(Change(now, "Failed - Invalid profile")) } }.unsafeRunSync() } it should "mark schedules as complete" in { testResources.use { case (async, sync, now) => val schedule = generate[Schedule] for { _ <- async.storeSchedule[IO](schedule) _ <- IO(sync.setScheduleAsComplete(schedule.scheduleId.toString)) result <- async.retrieveSchedule[IO](schedule.scheduleId).map(_.toRight(new RuntimeException("It is not defined"))).flatMap(IO.fromEither _) } yield { result.status shouldBe ScheduleStatus.Complete result.history should contain(Change(now, "Orchestration complete")) } }.unsafeRunSync() } val testResources = for { blockingEc <- blockingExecutionContextResource client <- dynamoDbClientResource[IO]() tableName <- scheduledTableResource(client) now <- Resource.liftF(IO(Instant.now())) } yield { val ctx = Context(client, tableName) val clock = Clock.fixed(now, ZoneId.of("UTC")) (new AsyncPersistence(ctx, blockingEc, clock), new DynamoPersistence(5.minutes, ctx, clock), now) } def blockingExecutionContextResource: Resource[IO, ExecutionContext] = { Resource.make(IO(Executors.newCachedThreadPool()))(threads => IO(threads.shutdown())).map(ExecutionContext.fromExecutor) } def scheduledTableResource(client: AmazonDynamoDBAsync): Resource[IO, String] = { val tableName = "scheduler-test" val globalSecondaryIndices = Seq( new GlobalSecondaryIndex() .withIndexName("status-orchestrationExpiry-index") .withKeySchema( new KeySchemaElement("status", KeyType.HASH), new KeySchemaElement("orchestrationExpiry", KeyType.RANGE) ) .withProjection(new Projection().withProjectionType(ProjectionType.ALL)) .withProvisionedThroughput(new ProvisionedThroughput(1L, 1L)) ) def createTable: IO[CreateTableResult] = { IO.async[CreateTableResult] { cb => client.createTableAsync(new CreateTableRequest( List( new AttributeDefinition("scheduleId", ScalarAttributeType.S), new AttributeDefinition("status", ScalarAttributeType.S), new AttributeDefinition("orchestrationExpiry", ScalarAttributeType.N), ).asJava, tableName, List(new KeySchemaElement("scheduleId", KeyType.HASH)).asJava, new ProvisionedThroughput(1L, 1L) ).withGlobalSecondaryIndexes(globalSecondaryIndices.asJava), new AsyncHandler[CreateTableRequest, CreateTableResult] { override def onError(exception: Exception): Unit = cb(exception.asLeft) override def onSuccess(request: CreateTableRequest, result: CreateTableResult): Unit = cb(result.asRight) } ) () }.onError { case NonFatal(e) => IO(println(s"Error creting DynamoDb table: ${e}")) } } def removeTable: IO[DeleteTableResult] = { IO.async[DeleteTableResult] { cb => client.deleteTableAsync( tableName, new AsyncHandler[DeleteTableRequest, DeleteTableResult] { override def onError(exception: Exception): Unit = cb(exception.asLeft) override def onSuccess(request: DeleteTableRequest, result: DeleteTableResult): Unit = cb(result.asRight) } ) () }.onError { case NonFatal(e) => IO(println(s"Error deleting DynamoDb table: ${e}")) } } Resource.make(createTable.as(tableName))(_ => removeTable.void) } }
ovotech/comms-orchestration
src/it/scala/orchestrator/scheduling/dynamo/DynamoPersistenceSpec.scala
Scala
mit
10,017
package model import scala.slick.driver.H2Driver.simple._ object WebHooks extends Table[WebHook]("WEB_HOOK") with BasicTemplate { def url = column[String]("URL") def * = userName ~ repositoryName ~ url <> (WebHook, WebHook.unapply _) def byPrimaryKey(owner: String, repository: String, url: String) = byRepository(owner, repository) && (this.url is url.bind) } case class WebHook( userName: String, repositoryName: String, url: String )
denen99/gitbucket
src/main/scala/model/WebHook.scala
Scala
apache-2.0
453
package ch.octo.cffpoc.gtfs.simulator.actors import akka.actor.{ Actor, ActorLogging, ActorRef } import ch.octo.cffpoc.gtfs.Trip import ch.octo.cffpoc.gtfs.simulator.actors.SimulatorMessages.{ EndOfTripSimulation, StartSimultationSchedule, StopSimulation } import ch.octo.cffpoc.gtfs.simulator.{ SimulatedTripPositions, TimeAccelerator } import org.joda.time.LocalDate import scala.concurrent.ExecutionContext import scala.concurrent.duration._ /** * Created by alex on 25.10.16. */ class ActorDelayedSimulatedTrip(actorSink: ActorRef, timeAccelerator: TimeAccelerator, trip: Trip, date: LocalDate, averagSecondIncrement: Double)(implicit ec: ExecutionContext) extends Actor with ActorLogging { val stp = SimulatedTripPositions(trip, date, averagSecondIncrement, true) val scheduler = context.system.scheduler //delay its autostart val scheduled = scheduler.scheduleOnce(timeAccelerator.inMS(trip.startsAt.getSecondOfDay) milliseconds, self, StartSimultationSchedule) override def receive: Receive = { case StartSimultationSchedule => log.info(s"launching trip ${trip.tripId}") stp.positions.foreach({ sp => val in = timeAccelerator.inMS(sp.secondsOfDay) scheduler.scheduleOnce(in milliseconds, actorSink, sp) }) case EndOfTripSimulation => context.stop(self) case StopSimulation => scheduled.cancel() context.stop(self) } }
alexmasselot/gtfs-simulation-play
src/main/scala/ch/octo/cffpoc/gtfs/simulator/actors/ActorDelayedSimulatedTrip.scala
Scala
apache-2.0
1,425
package models.daos import java.util.UUID import com.mohiva.play.silhouette.api.LoginInfo import models.User import models.daos.InMemUserDAOImpl._ import scala.collection.mutable import scala.concurrent.Future /** * Give access to the user object. */ class InMemUserDAOImpl extends UserDAO { /** * Finds a user by its login info. * * @param loginInfo The login info of the user to find. * @return The found user or None if no user for the given login info could be found. */ def find(loginInfo: LoginInfo) = { Future.successful( users.find { case (id, user) => user.loginInfo == loginInfo }.map(_._2) ) } /** * Finds a user by its user ID. * * @param userID The ID of the user to find. * @return The found user or None if no user for the given ID could be found. */ def find(userID: UUID) = { Future.successful(users.get(userID)) } /** * Saves a user. * * @param user The user to save. * @return The saved user. */ def save(user: User) = { users += (user.userID -> user) Future.successful(user) } } /** * The companion object. */ object InMemUserDAOImpl { /** * The list of users. */ val users: mutable.HashMap[UUID, User] = mutable.HashMap() }
devshorts/routy.io
app/models/daos/InMemUserDAOImpl.scala
Scala
apache-2.0
1,257
// Copyright 2017,2018,2019,2020 Commonwealth Bank of Australia // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package commbank.grimlock.framework import commbank.grimlock.framework.encoding.{ BinaryValue, BooleanValue, DoubleValue, FloatValue, IntValue, LongValue, StringValue, TimestampValue, TypeValue, Value } import commbank.grimlock.framework.metadata.Type import java.sql.Timestamp import shapeless.{ ::, HNil } package environment { package object implicits { /** Converts a `Array[Byte]` to a `Value`. */ implicit def binaryToValue(t: Array[Byte]): Value[Array[Byte]] = BinaryValue(t) /** Converts a `Boolean` to a `Value`. */ implicit def booleanToValue(t: Boolean): Value[Boolean] = BooleanValue(t) /** Converts a `Double` to a `Value`. */ implicit def doubleToValue(t: Double): Value[Double] = DoubleValue(t) /** Converts a `Float` to a `Value`. */ implicit def floatToValue(t: Float): Value[Float] = FloatValue(t) /** Converts a `Int` to a `Value`. */ implicit def intToValue(t: Int): Value[Int] = IntValue(t) /** Converts a `Long` to a `Value`. */ implicit def longToValue(t: Long): Value[Long] = LongValue(t) /** Converts a `String` to a `Value`. */ implicit def stringToValue(t: String): Value[String] = StringValue(t) /** Converts a `Timestamp` to a `Value`. */ implicit def timestampToValue(t: Timestamp): Value[Timestamp] = TimestampValue(t) /** Converts a `Type` to a `Value`. */ implicit def typeToValue(t: Type): Value[Type] = TypeValue(t) } } package object position { /** Short hand for 1 coordinate. */ type Coordinates1[T1] = Value[T1] :: HNil /** Short hand for 2 coordinates. */ type Coordinates2[T1, T2] = Value[T1] :: Value[T2] :: HNil /** Short hand for 3 coordinates. */ type Coordinates3[T1, T2, T3] = Value[T1] :: Value[T2] :: Value[T3] :: HNil /** Short hand for 4 coordinates. */ type Coordinates4[T1, T2, T3, T4] = Value[T1] :: Value[T2] :: Value[T3] :: Value[T4] :: HNil /** Short hand for 5 coordinates. */ type Coordinates5[T1, T2, T3, T4, T5] = Value[T1] :: Value[T2] :: Value[T3] :: Value[T4] :: Value[T5] :: HNil /** Short hand for 6 coordinates. */ type Coordinates6[T1, T2, T3, T4, T5, T6] = Value[T1] :: Value[T2] :: Value[T3] :: Value[T4] :: Value[T5] :: Value[T6] :: HNil /** Short hand for 7 coordinates. */ type Coordinates7[T1, T2, T3, T4, T5, T6, T7] = Value[T1] :: Value[T2] :: Value[T3] :: Value[T4] :: Value[T5] :: Value[T6] :: Value[T7] :: HNil /** Short hand for 8 coordinates. */ type Coordinates8[T1, T2, T3, T4, T5, T6, T7, T8] = Value[T1] :: Value[T2] :: Value[T3] :: Value[T4] :: Value[T5] :: Value[T6] :: Value[T7] :: Value[T8] :: HNil /** Short hand for 9 coordinates. */ type Coordinates9[T1, T2, T3, T4, T5, T6, T7, T8, T9] = Value[T1] :: Value[T2] :: Value[T3] :: Value[T4] :: Value[T5] :: Value[T6] :: Value[T7] :: Value[T8] :: Value[T9] :: HNil }
CommBank/grimlock
grimlock-core/src/main/scala/commbank/grimlock/framework/package.scala
Scala
apache-2.0
3,598
package io.makana.hexwar import io.makana.hexwar.engine.domain.game.{GameEngine, GameState} import io.makana.hexwar.engine.domain.model.boardmakers.RandomBoard import io.makana.hexwar.engine.domain.model.{MapBoard, Player} import io.makana.hexwar.engine.domain.vector.Vectr import io.makana.hexwar.render.Render object HexWar extends App { val gameEngine = new GameEngine() val mapBoard = RandomBoard(Vectr(10,10)) val players = Seq( Player(1, "Player 1", "USA"), Player(2, "Player 2", "GER") ) val gameState = GameState(mapBoard, players) Render.draw(gameState) val newGameState = gameEngine.playRound(gameState) Render.draw(newGameState) }
brmakana/hexwar
src/main/scala/io/makana/hexwar/HexWar.scala
Scala
apache-2.0
668
package jp.ne.opt.redshiftfake.parse.compat import net.sf.jsqlparser.expression.Expression import net.sf.jsqlparser.statement.select.{PlainSelect, FromItem, Join} import scala.collection.JavaConverters._ object Ops { implicit class RichJoin(val self: Join) extends AnyVal { def on(expression: Expression): Join = { self.setOnExpression(expression) self } def withRightItem(fromItem: FromItem): Join = { self.setRightItem(fromItem) self } } implicit class RichPlainSelect(val self: PlainSelect) extends AnyVal { def withJoins(joins: Join*): PlainSelect = { self.setJoins(joins.asJava) self } } def mkLeftJoin: Join = { val join = new Join join.setLeft(true) join } def mkRightJoin: Join = { val join = new Join join.setRight(true) join } }
opt-tech/redshift-fake-driver
src/main/scala/jp/ne/opt/redshiftfake/parse/compat/Ops.scala
Scala
apache-2.0
847
/* * Copyright 2007-2010 WorldWide Conferencing, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.liftweb { package example { package lib { import _root_.net.liftweb._ import http._ import common._ import util._ import _root_.net.liftweb.example.model._ object WebServices { // register the WebServices with the dispatcher def init() { LiftRules.dispatch.append(NamedPF("Web Services Example") { case Req("webservices" :: "all_users" :: Nil, _, GetRequest) => () => Full(all_users()) case Req("webservices" :: "add_user" :: Nil, _, rt) if rt == GetRequest || rt == PostRequest => () => Full(add_user()) }) } // List the XML for all users def all_users(): XmlResponse = XmlResponse( <all_users> { User.findAll.map(_.toXml) } </all_users>) // extract the parameters, create a user // return the appropriate response def add_user(): LiftResponse = (for { firstname <- S.param("firstname") ?~ "firstname parameter missing" lastname <- S.param("lastname") ?~ "lastname parameter missing" email <- S.param("email") ?~ "email parameter missing" } yield { val u =User.create.firstName(firstname).lastName(lastname).email(email). textArea(S.param("textarea") openOr "") S.param("password").map{v => u.password(v)} u.save }) match { case Full(success) => XmlResponse(<add_user success={success.toString}/>) case Failure(msg, _, _) => NotAcceptableResponse(msg) case _ => NotFoundResponse() } } } } }
jeppenejsum/liftweb
examples/example/src/main/scala/net/liftweb/example/lib/WebServices.scala
Scala
apache-2.0
2,097
package scala.meta package internal package object quasiquotes { implicit class XtensionQuasiquoteDebug(debug: org.scalameta.debug.Debug.type) { def quasiquote = sys.props("quasiquote.debug") != null } }
beni55/scalameta
scalameta/quasiquotes/src/main/scala/scala/meta/internal/quasiquotes/package.scala
Scala
bsd-3-clause
212
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.visualization.tensorboard import com.intel.analytics.bigdl.utils.Engine import org.tensorflow import org.tensorflow.util.Event /** * Writes Summary protocol buffers to event files. * @param logDirecotry * @param flushMillis */ class FileWriter(val logDirecotry : String, flushMillis: Int = 1000) { private val logDir = new java.io.File(logDirecotry) require(!logDir.exists() || logDir.isDirectory, s"FileWriter: can not create $logDir") if (!logDir.exists()) logDir.mkdirs() private val eventWriter = new EventWriter(logDirecotry, flushMillis) Engine.default.invoke(() => eventWriter.run()) /** * Adds a Summary protocol buffer to the event file. * @param summary a Summary protobuf String generated by bigdl.utils.Summary's * scalar()/histogram(). * @param globalStep a consistent global count of the event. * @return */ def addSummary(summary: tensorflow.framework.Summary, globalStep: Long): this.type = { val event = Event.newBuilder().setSummary(summary).build() addEvent(event, globalStep) this } /** * Add a event protocol buffer to the event file. * @param event A event protobuf contains summary protobuf. * @param globalStep a consistent global count of the event. * @return */ def addEvent(event: Event, globalStep: Long): this.type = { eventWriter.addEvent( event.toBuilder.setWallTime(System.currentTimeMillis() / 1e3).setStep(globalStep).build()) this } /** * Close file writer. * @return */ def close(): Unit = { eventWriter.close() } }
psyyz10/BigDL
spark/dl/src/main/scala/com/intel/analytics/bigdl/visualization/tensorboard/FileWriter.scala
Scala
apache-2.0
2,213
package de.htwg.zeta.server.controller import javax.inject.Inject import scala.concurrent.ExecutionContext import scala.concurrent.Future import com.mohiva.play.silhouette.api.LoginInfo import com.mohiva.play.silhouette.impl.providers.CredentialsProvider import de.htwg.zeta.persistence.general.UserRepository import de.htwg.zeta.server.forms.ForgotPasswordForm import de.htwg.zeta.server.model.TokenCache import de.htwg.zeta.server.routing.routes import de.htwg.zeta.server.silhouette.SilhouetteLoginInfoDao import play.api.i18n.Messages import play.api.libs.mailer.Email import play.api.libs.mailer.MailerClient import play.api.mvc.AnyContent import play.api.mvc.BaseController import play.api.mvc.ControllerComponents import play.api.mvc.Request import play.api.mvc.Result /** * The `Forgot Password` controller. * * @param mailerClient The mailer client. */ class ForgotPasswordController @Inject()( val controllerComponents: ControllerComponents, mailerClient: MailerClient, loginInfoRepo: SilhouetteLoginInfoDao, userRepo: UserRepository, tokenCache: TokenCache, implicit val ec: ExecutionContext ) extends BaseController { // TODO: New Workflow. See: https://github.com/Zeta-Project/zeta/issues/456 /** Sends an email with password reset instructions. * * It sends an email to the given address if it exists in the database. Otherwise we do not show the user * a notice for not existing email addresses to prevent the leak of existing email addresses. * * @param request The request * @param messages The messages * @return The result to display. */ def submit(request: Request[AnyContent], messages: Messages): Future[Result] = { ForgotPasswordForm.form.bindFromRequest()(request).fold( form => Future.successful(NotAcceptable), email => { val loginInfo = LoginInfo(CredentialsProvider.ID, email) val result = Ok val userId = loginInfoRepo.read(loginInfo) val user = userId.flatMap(userId => userRepo.read(userId)) user.flatMap(user => { tokenCache.create(user.id).map { token => // TODO: Replace URL val url = routes.ScalaRoutes.postPasswordForgot().absoluteURL()(request) mailerClient.send(Email( subject = messages("email.reset.password.subject"), from = messages("email.from"), to = Seq(email), bodyText = Some(views.txt.silhouette.emails.resetPassword(user, url, messages).body), bodyHtml = Some(views.html.silhouette.emails.resetPassword(user, url, messages).body) )) result } }).recover { case _ => NotAcceptable } } ) } }
Zeta-Project/zeta
api/server/app/de/htwg/zeta/server/controller/ForgotPasswordController.scala
Scala
bsd-2-clause
2,747
package com.eevolution.context.dictionary.domain.model import ai.x.play.json.Jsonx import com.eevolution.context.dictionary.api.{ActiveEnabled, DomainModel, Identifiable, Traceable} import org.joda.time.DateTime /** * Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * Email: emeris.hernandez@e-evolution.com, http://www.e-evolution.com , http://github.com/e-Evolution * Created by emeris.hernandez@e-evolution.com , www.e-evolution.com on 12/10/2017. */ /** * Replication Entity * @param replicationId Replication ID * @param tenantId Tenant ID * @param organizationId Organization ID * @param isActive Is Active * @param created Created * @param createdBy Created By * @param updated Updated * @param updatedBy Updated By * @param name Name * @param description Description * @param help Help * @param hostAddress Host Address * @param hostPort Host Port * @param replicationStrategyId Replication Strategy ID * @param isRMIOverHTTP Is RMI Over HTTP * @param processing Processing * @param idRangeStart Is Range Start * @param isRangeEnd Is Range End * @param remoteClientId Remote Client ID * @param remoteOrganizationId Remote Organization ID * @param prefix Prefix * @param suffix Suffix * @param dateLastRun Date Last Run * @param uuid UUID */ case class Replication(replicationId: Int, tenantId: Int, organizationId: Int, isActive : Boolean = true, created : DateTime = DateTime.now, createdBy : Int , updated : DateTime = DateTime.now, updatedBy : Int, name : String, description: Option[String], help: Option[String], hostAddress: String, hostPort: Int, replicationStrategyId: Int, isRMIOverHTTP: Boolean = true, processing: Option[Boolean], idRangeStart: Option[Int], isRangeEnd: Option[Int], remoteClientId: Option[Int], remoteOrganizationId: Option[Int], prefix: Option[String], suffix: Option[String], dateLastRun: DateTime = new DateTime, uuid: String ) extends DomainModel with ActiveEnabled with Identifiable with Traceable { override type ActiveEnabled = this.type override type Identifiable = this.type override type Traceable = this.type override def Id: Int = replicationId override val entityName: String = "AD_Replication" override val identifier: String = "AD_Replication_ID" } object Replication { implicit lazy val jsonFormat = Jsonx.formatCaseClass[Replication] def create(replicationId: Int, tenantId: Int, organizationId: Int, isActive : Boolean, created : DateTime, createdBy : Int , updated : DateTime, updatedBy : Int, name : String, description: String, help: String, hostAddress: String, hostPort: Int, replicationStrategyId: Int, isRMIOverHTTP: Boolean, processing: Boolean, idRangeStart: Int, isRangeEnd: Int, remoteClientId: Int, remoteOrganizationId: Int, prefix: String, suffix: String, dateLastRun: DateTime, uuid: String) = Replication(replicationId, tenantId, organizationId, isActive, created, createdBy, updated, updatedBy, name, None, None, hostAddress, hostPort, replicationStrategyId, isRMIOverHTTP, None, None, None, None, None, None, None, dateLastRun, uuid) }
adempiere/ADReactiveSystem
dictionary-api/src/main/scala/com/eevolution/context/dictionary/domain/model/Replication.scala
Scala
gpl-3.0
4,641
package org.jetbrains.plugins.scala package lang package refactoring package delete import java.util import com.intellij.openapi.diagnostic.Logger import com.intellij.openapi.util.{Condition, TextRange} import com.intellij.psi._ import com.intellij.psi.impl.source.javadoc.PsiDocMethodOrFieldRef import com.intellij.psi.javadoc.PsiDocTag import com.intellij.psi.search.searches.ReferencesSearch import com.intellij.psi.util.{MethodSignatureUtil, PsiTreeUtil} import com.intellij.refactoring.safeDelete._ import com.intellij.refactoring.safeDelete.usageInfo._ import com.intellij.usageView.UsageInfo import com.intellij.util._ import org.jetbrains.annotations.{NonNls, Nullable} import org.jetbrains.plugins.scala.extensions._ import org.jetbrains.plugins.scala.lang.psi.api.base.ScStableCodeReferenceElement import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScNamedElement import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.ScImportStmt import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScObject import org.jetbrains.plugins.scala.lang.psi.impl.search.ScalaOverridingMemberSearcher import scala.collection.JavaConverters._ /** * This is a port of the static, private mtehods in JavaSafeDeleteProcessor. * * Much still needs to be made Scala aware. * * @see com.intellij.refactoring.safeDelete.JavaSafeDeleteProcessor */ object SafeDeleteProcessorUtil { def getUsageInsideDeletedFilter(allElementsToDelete: Array[PsiElement]): Condition[PsiElement] = { (usage: PsiElement) => { !usage.isInstanceOf[PsiFile] && isInside(usage, allElementsToDelete) } } private def referenceSearch(element: PsiElement) = ReferencesSearch.search(element, element.getUseScope) def findClassUsages(psiClass: PsiClass, allElementsToDelete: Array[PsiElement], usages: util.List[UsageInfo]) { val justPrivates: Boolean = containsOnlyPrivates(psiClass) referenceSearch(psiClass).forEach(new Processor[PsiReference] { def process(reference: PsiReference): Boolean = { val element: PsiElement = reference.getElement if (!isInside(element, allElementsToDelete)) { val parent: PsiElement = element.getParent if (parent.isInstanceOf[PsiReferenceList]) { val pparent: PsiElement = parent.getParent pparent match { case inheritor: PsiClass => if (justPrivates) { if (parent.equals(inheritor.getExtendsList) || parent.equals(inheritor.getImplementsList)) { usages.add(new SafeDeleteExtendsClassUsageInfo(element.asInstanceOf[PsiJavaCodeReferenceElement], psiClass, inheritor)) return true } } case _ => } } LOG.assertTrue(element.getTextRange != null) val shouldDelete = element match { case ref: ScStableCodeReferenceElement => val results = ref.multiResolveScala(false) def isSyntheticObject(e: PsiElement) = e.asOptionOf[ScObject].exists(_.isSyntheticObject) val nonSyntheticTargets = results.map(_.getElement).filterNot(isSyntheticObject) nonSyntheticTargets.toSet subsetOf allElementsToDelete.toSet case _ => true } val usagesToAdd = if (shouldDelete) { val isInImport = PsiTreeUtil.getParentOfType(element, classOf[ScImportStmt]) != null if (isInImport) Seq(new SafeDeleteReferenceJavaDeleteUsageInfo(element, psiClass, true)) // delete without review else Seq(new SafeDeleteReferenceJavaDeleteUsageInfo(element, psiClass, false)) // delete with review } else Seq() // don't delete usages.addAll(usagesToAdd.asJava) } true } }) } def containsOnlyPrivates(aClass: PsiClass): Boolean = { false // TODO } def findTypeParameterExternalUsages(typeParameter: PsiTypeParameter, usages: util.Collection[UsageInfo]) { val owner: PsiTypeParameterListOwner = typeParameter.getOwner if (owner != null) { val index: Int = owner.getTypeParameterList.getTypeParameterIndex(typeParameter) referenceSearch(owner).forEach(new Processor[PsiReference] { def process(reference: PsiReference): Boolean = { reference match { case referenceElement: PsiJavaCodeReferenceElement => val typeArgs: Array[PsiTypeElement] = referenceElement.getParameterList.getTypeParameterElements if (typeArgs.length > index) { usages.add(new SafeDeleteReferenceJavaDeleteUsageInfo(typeArgs(index), typeParameter, true)) } case _ => } true } }) } } @Nullable def findMethodUsages(psiMethod: PsiMethod, allElementsToDelete: Array[PsiElement], usages: util.List[UsageInfo]): Condition[PsiElement] = { val references: util.Collection[PsiReference] = referenceSearch(psiMethod).findAll if (psiMethod.isConstructor) { return findConstructorUsages(psiMethod, references, usages, allElementsToDelete) } val overridingElements: Array[PsiNamedElement] = ScalaOverridingMemberSearcher.search(psiMethod) val overridingMethods: Array[PsiNamedElement] = overridingElements.filterNot(x => allElementsToDelete.contains(x)) references.forEach { reference => val element: PsiElement = reference.getElement if (!isInside(element, allElementsToDelete) && !isInside(element, overridingMethods.map(x => x: PsiElement))) { val isReferenceInImport = PsiTreeUtil.getParentOfType(element, classOf[ScImportStmt]) != null usages.add(new SafeDeleteReferenceJavaDeleteUsageInfo(element, psiMethod, isReferenceInImport)) } } val methodToReferences: util.HashMap[PsiNamedElement, util.Collection[PsiReference]] = new util.HashMap[PsiNamedElement, util.Collection[PsiReference]] for (overridingMethod <- overridingMethods) { val overridingReferences: util.Collection[PsiReference] = referenceSearch(overridingMethod).findAll methodToReferences.put(overridingMethod, overridingReferences) } val validOverriding: util.Set[PsiElement] = { // TODO overridingMethods.toSet[PsiElement].asJava } validOverriding.forEach { case `psiMethod` => case x: PsiMethod => usages.add(new SafeDeleteOverridingMethodUsageInfo(x, psiMethod)) case x: ScNamedElement => val info = new SafeDeleteUsageInfo(x, psiMethod) // TODO SafeDeleteOverridingMemberUsageInfo usages.add(info) } new Condition[PsiElement] { def value(usage: PsiElement): Boolean = { if (usage.isInstanceOf[PsiFile]) return false isInside(usage, allElementsToDelete) || isInside(usage, validOverriding) } } } def removeDeletedMethods(methods: Array[PsiMethod], allElementsToDelete: Array[PsiElement]): Array[PsiMethod] = { val list: util.ArrayList[PsiMethod] = new util.ArrayList[PsiMethod] for (method <- methods) { if (!allElementsToDelete.contains(method)) { list.add(method) } } list.toArray(new Array[PsiMethod](list.size)) } @Nullable def findConstructorUsages(constructor: PsiMethod, originalReferences: util.Collection[PsiReference], usages: util.List[UsageInfo], allElementsToDelete: Array[PsiElement]): Condition[PsiElement] = { val constructorsToRefs: util.HashMap[PsiMethod, util.Collection[PsiReference]] = new util.HashMap[PsiMethod, util.Collection[PsiReference]] val newConstructors: util.HashSet[PsiMethod] = new util.HashSet[PsiMethod] if (isTheOnlyEmptyDefaultConstructor(constructor)) return null newConstructors.add(constructor) constructorsToRefs.put(constructor, originalReferences) val passConstructors: util.HashSet[PsiMethod] = new util.HashSet[PsiMethod] do { passConstructors.clear() newConstructors.forEach { method => val references: util.Collection[PsiReference] = constructorsToRefs.get(method) references.forEach { reference => val overridingConstructor: PsiMethod = getOverridingConstructorOfSuperCall(reference.getElement) if (overridingConstructor != null && !constructorsToRefs.containsKey(overridingConstructor)) { val overridingConstructorReferences: util.Collection[PsiReference] = referenceSearch(overridingConstructor).findAll constructorsToRefs.put(overridingConstructor, overridingConstructorReferences) passConstructors.add(overridingConstructor) } } } newConstructors.clear() newConstructors.addAll(passConstructors) } while (!newConstructors.isEmpty) val validOverriding: util.Set[PsiMethod] = validateOverridingMethods(constructor, originalReferences, constructorsToRefs.keySet, constructorsToRefs, usages, allElementsToDelete) new Condition[PsiElement] { def value(usage: PsiElement): Boolean = { if (usage.isInstanceOf[PsiFile]) return false isInside(usage, allElementsToDelete) || isInside(usage, validOverriding) } } } def isTheOnlyEmptyDefaultConstructor(constructor: PsiMethod): Boolean = { if (constructor.parameters.nonEmpty) return false val body: PsiCodeBlock = constructor.getBody if (body != null && body.getStatements.length > 0) return false constructor.containingClass.getConstructors.length == 1 } def validateOverridingMethods(originalMethod: PsiMethod, originalReferences: util.Collection[PsiReference], overridingMethods: util.Collection[PsiMethod], methodToReferences: util.HashMap[PsiMethod, util.Collection[PsiReference]], usages: util.List[UsageInfo], allElementsToDelete: Array[PsiElement]): util.Set[PsiMethod] = { val validOverriding: util.Set[PsiMethod] = new util.LinkedHashSet[PsiMethod](overridingMethods) val multipleInterfaceImplementations: util.Set[PsiMethod] = new util.HashSet[PsiMethod] var anyNewBadRefs: Boolean = false do { anyNewBadRefs = false overridingMethods.forEach { overridingMethod => if (validOverriding.contains(overridingMethod)) { val overridingReferences: util.Collection[PsiReference] = methodToReferences.get(overridingMethod) var anyOverridingRefs: Boolean = false import scala.util.control.Breaks._ breakable { overridingReferences.forEach { overridingReference => val element: PsiElement = overridingReference.getElement if (!isInside(element, allElementsToDelete) && !isInside(element, validOverriding)) { anyOverridingRefs = true break() } } } if (!anyOverridingRefs && isMultipleInterfacesImplementation(overridingMethod, originalMethod, allElementsToDelete)) { anyOverridingRefs = true multipleInterfaceImplementations.add(overridingMethod) } if (anyOverridingRefs) { validOverriding.remove(overridingMethod) anyNewBadRefs = true originalReferences.forEach { reference => val element: PsiElement = reference.getElement if (!isInside(element, allElementsToDelete) && !isInside(element, overridingMethods)) { usages.add(new SafeDeleteReferenceJavaDeleteUsageInfo(element, originalMethod, false)) validOverriding.clear() } } } } } } while (anyNewBadRefs && !validOverriding.isEmpty) validOverriding.forEach ( method => if (method != originalMethod) usages.add(new SafeDeleteOverridingMethodUsageInfo(method, originalMethod)) ) overridingMethods.forEach { method => if (!validOverriding.contains(method) && !multipleInterfaceImplementations.contains(method)) { val methodCanBePrivate: Boolean = canBePrivate(method, methodToReferences.get(method), validOverriding, allElementsToDelete) if (methodCanBePrivate) { usages.add(new SafeDeletePrivatizeMethod(method, originalMethod)) } } } validOverriding } def isMultipleInterfacesImplementation(method: PsiMethod, originalMethod: PsiMethod, allElementsToDelete: Array[PsiElement]): Boolean = { val methods: Array[PsiMethod] = method.findSuperMethods for (superMethod <- methods) { if (ArrayUtilRt.find(allElementsToDelete, superMethod) < 0 && !MethodSignatureUtil.isSuperMethod(originalMethod, superMethod)) { return true } } false } @Nullable def getOverridingConstructorOfSuperCall(element: PsiElement): PsiMethod = { if (element.isInstanceOf[PsiReferenceExpression] && "super".equals(element.getText)) { var parent: PsiElement = element.getParent if (parent.isInstanceOf[PsiMethodCallExpression]) { parent = parent.getParent if (parent.isInstanceOf[PsiExpressionStatement]) { parent = parent.getParent if (parent.isInstanceOf[PsiCodeBlock]) { parent = parent.getParent parent match { case method: PsiMethod if method.isConstructor => return method case _ => } } } } } null } def canBePrivate(method: PsiMethod, references: util.Collection[PsiReference], deleted: util.Collection[_ <: PsiElement], allElementsToDelete: Array[PsiElement]): Boolean = { val containingClass: PsiClass = method.containingClass if (containingClass == null) { return false } val manager: PsiManager = method.getManager val facade: JavaPsiFacade = JavaPsiFacade.getInstance(manager.getProject) val factory: PsiElementFactory = facade.getElementFactory var privateModifierList: PsiModifierList = null try { val newMethod: PsiMethod = factory.createMethod("x3", PsiType.VOID) privateModifierList = newMethod.getModifierList privateModifierList.setModifierProperty(PsiModifier.PRIVATE, true) } catch { case _: IncorrectOperationException => LOG.assertTrue(false) return false } references.forEach { reference => val element: PsiElement = reference.getElement if (!isInside(element, allElementsToDelete) && !isInside(element, deleted) && !facade.getResolveHelper.isAccessible(method, privateModifierList, element, null, null)) { return false } } true } def findFieldUsages(psiField: PsiField, usages: util.List[UsageInfo], allElementsToDelete: Array[PsiElement]): Condition[PsiElement] = { val isInsideDeleted: Condition[PsiElement] = getUsageInsideDeletedFilter(allElementsToDelete) referenceSearch(psiField).forEach(new Processor[PsiReference] { def process(reference: PsiReference): Boolean = { if (!isInsideDeleted.value(reference.getElement)) { val element: PsiElement = reference.getElement val parent: PsiElement = element.getParent parent match { case assignExpr: PsiAssignmentExpression if element == assignExpr.getLExpression => usages.add(new SafeDeleteFieldWriteReference(assignExpr, psiField)) case _ => val range: TextRange = reference.getRangeInElement usages.add(new SafeDeleteReferenceJavaDeleteUsageInfo(reference.getElement, psiField, range.getStartOffset, range.getEndOffset, false, element.parentOfType(classOf[PsiImportStaticStatement]).isDefined)) } } true } }) isInsideDeleted } def findParameterUsages(parameter: PsiParameter, usages: util.List[UsageInfo]) { val method: PsiMethod = parameter.getDeclarationScope.asInstanceOf[PsiMethod] val index: Int = method.getParameterList.getParameterIndex(parameter) referenceSearch(method).forEach(new Processor[PsiReference] { def process(reference: PsiReference): Boolean = { val element: PsiElement = reference.getElement var call: PsiCall = null element match { case psiCall: PsiCall => call = psiCall case _ => element.getParent match { case psiCall: PsiCall => call = psiCall case _ => } } if (call != null) { val argList: PsiExpressionList = call.getArgumentList if (argList != null) { val args: Array[PsiExpression] = argList.getExpressions if (index < args.length) { if (!parameter.isVarArgs) { usages.add(new SafeDeleteReferenceJavaDeleteUsageInfo(args(index), parameter, true)) } else { { var i: Int = index while (i < args.length) { usages.add(new SafeDeleteReferenceJavaDeleteUsageInfo(args(i), parameter, true)) i += 1 } } } } } } else element match { case methodOrFieldRef: PsiDocMethodOrFieldRef => if (methodOrFieldRef.getSignature != null) { @NonNls val newText: StringBuffer = new StringBuffer newText.append("/** @see #").append(method.name).append('(') val parameters: java.util.List[PsiParameter] = new util.ArrayList[PsiParameter](util.Arrays.asList(method.getParameterList.getParameters: _*)) parameters.remove(parameter) newText.append(parameters.asScala.map(_.getType.getCanonicalText).mkString(",")) newText.append(")*/") usages.add(new SafeDeleteReferenceJavaDeleteUsageInfo(element, parameter, true) { override def deleteElement() { val javadocMethodReference: PsiDocMethodOrFieldRef#MyReference = element.getReference.asInstanceOf[PsiDocMethodOrFieldRef#MyReference] if (javadocMethodReference != null) { javadocMethodReference.bindToText(method.containingClass, newText) } } }) } case _ => } true } }) referenceSearch(parameter).forEach(new Processor[PsiReference] { def process(reference: PsiReference): Boolean = { val element: PsiElement = reference.getElement val docTag: PsiDocTag = PsiTreeUtil.getParentOfType(element, classOf[PsiDocTag]) if (docTag != null) { usages.add(new SafeDeleteReferenceJavaDeleteUsageInfo(docTag, parameter, true)) return true } var isSafeDelete: Boolean = false element.getParent.getParent match { case call: PsiMethodCallExpression => val methodExpression: PsiReferenceExpression = call.getMethodExpression if (methodExpression.getText.equals(PsiKeyword.SUPER)) { isSafeDelete = true } else if (methodExpression.getQualifierExpression.isInstanceOf[PsiSuperExpression]) { val superMethod: PsiMethod = call.resolveMethod if (superMethod != null && MethodSignatureUtil.isSuperMethod(superMethod, method)) { isSafeDelete = true } } case _ => } usages.add(new SafeDeleteReferenceJavaDeleteUsageInfo(element, parameter, isSafeDelete)) true } }) } def isInside(place: PsiElement, ancestors: Array[PsiElement]): Boolean = { isInside(place, util.Arrays.asList(ancestors : _*)) } def isInside(place: PsiElement, ancestors: util.Collection[_ <: PsiElement]): Boolean = { ancestors.forEach { element => if (isInside(place, element)) return true } false } def isInside(place: PsiElement, ancestor: PsiElement): Boolean = { if (SafeDeleteProcessor.isInside(place, ancestor)) return true if (place.isInstanceOf[PsiComment] && ancestor.isInstanceOf[PsiClass]) { val aClass: PsiClass = ancestor.asInstanceOf[PsiClass] aClass.getParent match { case file: PsiJavaFile => if (PsiTreeUtil.isAncestor(file, place, false)) { if (file.getClasses.length == 1) { return true } } case _ => } } false } private val LOG: Logger = Logger.getInstance("#com.intellij.refactoring.safeDelete.JavaSafeDeleteProcessor") }
jastice/intellij-scala
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/refactoring/delete/SafeDeleteProcessorUtil.scala
Scala
apache-2.0
20,501
package scalaDemo /** * Created by liush on 17-7-23. */ object byNameParameterDemo extends App { def nano() = { println("Getting nano") System.nanoTime } def delayed(t: => Long) = { // => indicates a by-name parameter println("In delayed method") println("Param: " + t) t } println(delayed(nano())) }
tophua/spark1.52
examples/src/main/scala/scalaDemo/byNameParameterDemo.scala
Scala
apache-2.0
338
package gitbucket.core.controller import gitbucket.core.issues.html import gitbucket.core.model.Account import gitbucket.core.service.IssuesService._ import gitbucket.core.service._ import gitbucket.core.util.Implicits._ import gitbucket.core.util._ import gitbucket.core.view import gitbucket.core.view.Markdown import org.scalatra.forms._ import org.scalatra.{BadRequest, Ok} class IssuesController extends IssuesControllerBase with IssuesService with RepositoryService with AccountService with LabelsService with MilestonesService with ActivityService with HandleCommentService with IssueCreationService with ReadableUsersAuthenticator with ReferrerAuthenticator with WritableUsersAuthenticator with MergeService with PullRequestService with WebHookIssueCommentService with WebHookPullRequestReviewCommentService with CommitsService with PrioritiesService with RequestCache trait IssuesControllerBase extends ControllerBase { self: IssuesService with RepositoryService with AccountService with LabelsService with MilestonesService with ActivityService with HandleCommentService with IssueCreationService with ReadableUsersAuthenticator with ReferrerAuthenticator with WritableUsersAuthenticator with PullRequestService with WebHookIssueCommentService with PrioritiesService => case class IssueCreateForm( title: String, content: Option[String], assignedUserName: Option[String], milestoneId: Option[Int], priorityId: Option[Int], labelNames: Option[String] ) case class CommentForm(issueId: Int, content: String) case class IssueStateForm(issueId: Int, content: Option[String]) val issueCreateForm = mapping( "title" -> trim(label("Title", text(required))), "content" -> trim(optional(text())), "assignedUserName" -> trim(optional(text())), "milestoneId" -> trim(optional(number())), "priorityId" -> trim(optional(number())), "labelNames" -> trim(optional(text())) )(IssueCreateForm.apply) val issueTitleEditForm = mapping( "title" -> trim(label("Title", text(required))) )(x => x) val issueEditForm = mapping( "content" -> trim(optional(text())) )(x => x) val commentForm = mapping( "issueId" -> label("Issue Id", number()), "content" -> trim(label("Comment", text(required))) )(CommentForm.apply) val issueStateForm = mapping( "issueId" -> label("Issue Id", number()), "content" -> trim(optional(text())) )(IssueStateForm.apply) get("/:owner/:repository/issues")(referrersOnly { repository => val q = request.getParameter("q") if (Option(q).exists(_.contains("is:pr"))) { redirect(s"/${repository.owner}/${repository.name}/pulls?q=${StringUtil.urlEncode(q)}") } else { searchIssues(repository) } }) get("/:owner/:repository/issues/:id")(referrersOnly { repository => val issueId = params("id") getIssue(repository.owner, repository.name, issueId) map { issue => if (issue.isPullRequest) { redirect(s"/${repository.owner}/${repository.name}/pull/${issueId}") } else { html.issue( issue, getComments(repository.owner, repository.name, issueId.toInt), getIssueLabels(repository.owner, repository.name, issueId.toInt), getAssignableUserNames(repository.owner, repository.name), getMilestonesWithIssueCount(repository.owner, repository.name), getPriorities(repository.owner, repository.name), getLabels(repository.owner, repository.name), isIssueEditable(repository), isIssueManageable(repository), isIssueCommentManageable(repository), repository ) } } getOrElse NotFound() }) get("/:owner/:repository/issues/new")(readableUsersOnly { repository => if (isIssueEditable(repository)) { // TODO Should this check is provided by authenticator? html.create( getAssignableUserNames(repository.owner, repository.name), getMilestones(repository.owner, repository.name), getPriorities(repository.owner, repository.name), getDefaultPriority(repository.owner, repository.name), getLabels(repository.owner, repository.name), isIssueManageable(repository), getContentTemplate(repository, "ISSUE_TEMPLATE"), repository ) } else Unauthorized() }) post("/:owner/:repository/issues/new", issueCreateForm)(readableUsersOnly { (form, repository) => context.withLoginAccount { loginAccount => if (isIssueEditable(repository)) { // TODO Should this check is provided by authenticator? val issue = createIssue( repository, form.title, form.content, form.assignedUserName, form.milestoneId, form.priorityId, form.labelNames.toSeq.flatMap(_.split(",")), loginAccount ) redirect(s"/${issue.userName}/${issue.repositoryName}/issues/${issue.issueId}") } else Unauthorized() } }) ajaxPost("/:owner/:repository/issues/edit_title/:id", issueTitleEditForm)(readableUsersOnly { (title, repository) => context.withLoginAccount { loginAccount => getIssue(repository.owner, repository.name, params("id")).map { issue => if (isEditableContent(repository.owner, repository.name, issue.openedUserName, loginAccount)) { if (issue.title != title) { // update issue updateIssue(repository.owner, repository.name, issue.issueId, title, issue.content) // extract references and create refer comment createReferComment(repository.owner, repository.name, issue.copy(title = title), title, loginAccount) createComment( repository.owner, repository.name, loginAccount.userName, issue.issueId, issue.title + "\\r\\n" + title, "change_title" ) } redirect(s"/${repository.owner}/${repository.name}/issues/_data/${issue.issueId}") } else Unauthorized() } getOrElse NotFound() } }) ajaxPost("/:owner/:repository/issues/edit/:id", issueEditForm)(readableUsersOnly { (content, repository) => context.withLoginAccount { loginAccount => getIssue(repository.owner, repository.name, params("id")).map { issue => if (isEditableContent(repository.owner, repository.name, issue.openedUserName, loginAccount)) { // update issue updateIssue(repository.owner, repository.name, issue.issueId, issue.title, content) // extract references and create refer comment createReferComment(repository.owner, repository.name, issue, content.getOrElse(""), loginAccount) redirect(s"/${repository.owner}/${repository.name}/issues/_data/${issue.issueId}") } else Unauthorized() } getOrElse NotFound() } }) post("/:owner/:repository/issue_comments/new", commentForm)(readableUsersOnly { (form, repository) => context.withLoginAccount { loginAccount => getIssue(repository.owner, repository.name, form.issueId.toString).flatMap { issue => val actionOpt = params .get("action") .filter(_ => isEditableContent(issue.userName, issue.repositoryName, issue.openedUserName, loginAccount)) handleComment(issue, Some(form.content), repository, actionOpt) map { case (issue, id) => redirect( s"/${repository.owner}/${repository.name}/${if (issue.isPullRequest) "pull" else "issues"}/${form.issueId}#comment-${id}" ) } } getOrElse NotFound() } }) post("/:owner/:repository/issue_comments/state", issueStateForm)(readableUsersOnly { (form, repository) => context.withLoginAccount { loginAccount => getIssue(repository.owner, repository.name, form.issueId.toString).flatMap { issue => val actionOpt = params .get("action") .filter(_ => isEditableContent(issue.userName, issue.repositoryName, issue.openedUserName, loginAccount)) handleComment(issue, form.content, repository, actionOpt) map { case (issue, id) => redirect( s"/${repository.owner}/${repository.name}/${if (issue.isPullRequest) "pull" else "issues"}/${form.issueId}#comment-${id}" ) } } getOrElse NotFound() } }) ajaxPost("/:owner/:repository/issue_comments/edit/:id", commentForm)(readableUsersOnly { (form, repository) => context.withLoginAccount { loginAccount => getComment(repository.owner, repository.name, params("id")).map { comment => if (isEditableContent(repository.owner, repository.name, comment.commentedUserName, loginAccount)) { updateComment(comment.issueId, comment.commentId, form.content) redirect(s"/${repository.owner}/${repository.name}/issue_comments/_data/${comment.commentId}") } else Unauthorized() } getOrElse NotFound() } }) ajaxPost("/:owner/:repository/issue_comments/delete/:id")(readableUsersOnly { repository => context.withLoginAccount { loginAccount => getComment(repository.owner, repository.name, params("id")).map { comment => if (isDeletableComment(repository.owner, repository.name, comment.commentedUserName, loginAccount)) { Ok(deleteComment(repository.owner, repository.name, comment.issueId, comment.commentId)) } else Unauthorized() } getOrElse NotFound() } }) ajaxGet("/:owner/:repository/issues/_data/:id")(readableUsersOnly { repository => context.withLoginAccount { loginAccount => getIssue(repository.owner, repository.name, params("id")) map { x => if (isEditableContent(x.userName, x.repositoryName, x.openedUserName, loginAccount)) { params.get("dataType") collect { case t if t == "html" => html.editissue(x.content, x.issueId, repository) } getOrElse { contentType = formats("json") org.json4s.jackson.Serialization.write( Map( "title" -> x.title, "content" -> Markdown.toHtml( markdown = x.content getOrElse "No description given.", repository = repository, branch = repository.repository.defaultBranch, enableWikiLink = false, enableRefsLink = true, enableAnchor = true, enableLineBreaks = true, enableTaskList = true, hasWritePermission = true ) ) ) } } else Unauthorized() } getOrElse NotFound() } }) ajaxGet("/:owner/:repository/issue_comments/_data/:id")(readableUsersOnly { repository => context.withLoginAccount { loginAccount => getComment(repository.owner, repository.name, params("id")) map { x => if (isEditableContent(x.userName, x.repositoryName, x.commentedUserName, loginAccount)) { params.get("dataType") collect { case t if t == "html" => html.editcomment(x.content, x.commentId, repository) } getOrElse { contentType = formats("json") org.json4s.jackson.Serialization.write( Map( "content" -> view.Markdown.toHtml( markdown = x.content, repository = repository, branch = repository.repository.defaultBranch, enableWikiLink = false, enableRefsLink = true, enableAnchor = true, enableLineBreaks = true, enableTaskList = true, hasWritePermission = true ) ) ) } } else Unauthorized() } getOrElse NotFound() } }) ajaxPost("/:owner/:repository/issues/new/label")(writableUsersOnly { repository => val labelNames = params("labelNames").split(",") val labels = getLabels(repository.owner, repository.name).filter(x => labelNames.contains(x.labelName)) html.labellist(labels) }) ajaxPost("/:owner/:repository/issues/:id/label/new")(writableUsersOnly { repository => val issueId = params("id").toInt registerIssueLabel(repository.owner, repository.name, issueId, params("labelId").toInt, true) html.labellist(getIssueLabels(repository.owner, repository.name, issueId)) }) ajaxPost("/:owner/:repository/issues/:id/label/delete")(writableUsersOnly { repository => val issueId = params("id").toInt deleteIssueLabel(repository.owner, repository.name, issueId, params("labelId").toInt, true) html.labellist(getIssueLabels(repository.owner, repository.name, issueId)) }) ajaxPost("/:owner/:repository/issues/:id/assign")(writableUsersOnly { repository => updateAssignedUserName( repository.owner, repository.name, params("id").toInt, assignedUserName("assignedUserName"), true ) Ok("updated") }) ajaxPost("/:owner/:repository/issues/:id/milestone")(writableUsersOnly { repository => updateMilestoneId(repository.owner, repository.name, params("id").toInt, milestoneId("milestoneId"), true) milestoneId("milestoneId").map { milestoneId => getMilestonesWithIssueCount(repository.owner, repository.name) .find(_._1.milestoneId == milestoneId) .map { case (_, openCount, closeCount) => gitbucket.core.issues.milestones.html.progress(openCount + closeCount, closeCount) } getOrElse NotFound() } getOrElse Ok() }) ajaxPost("/:owner/:repository/issues/:id/priority")(writableUsersOnly { repository => val priority = priorityId("priorityId") updatePriorityId(repository.owner, repository.name, params("id").toInt, priority, true) Ok("updated") }) post("/:owner/:repository/issues/batchedit/state")(writableUsersOnly { repository => val action = params.get("value") action match { case Some("open") => executeBatch(repository) { issueId => getIssue(repository.owner, repository.name, issueId.toString).foreach { issue => handleComment(issue, None, repository, Some("reopen")) } } if (params("uri").nonEmpty) { redirect(params("uri")) } case Some("close") => executeBatch(repository) { issueId => getIssue(repository.owner, repository.name, issueId.toString).foreach { issue => handleComment(issue, None, repository, Some("close")) } } if (params("uri").nonEmpty) { redirect(params("uri")) } case _ => BadRequest() } }) post("/:owner/:repository/issues/batchedit/label")(writableUsersOnly { repository => params("value").toIntOpt.map { labelId => executeBatch(repository) { issueId => getIssueLabel(repository.owner, repository.name, issueId, labelId) getOrElse { registerIssueLabel(repository.owner, repository.name, issueId, labelId, true) if (params("uri").nonEmpty) { redirect(params("uri")) } } } } getOrElse NotFound() }) post("/:owner/:repository/issues/batchedit/assign")(writableUsersOnly { repository => val value = assignedUserName("value") executeBatch(repository) { updateAssignedUserName(repository.owner, repository.name, _, value, true) } if (params("uri").nonEmpty) { redirect(params("uri")) } }) post("/:owner/:repository/issues/batchedit/milestone")(writableUsersOnly { repository => val value = milestoneId("value") executeBatch(repository) { updateMilestoneId(repository.owner, repository.name, _, value, true) } }) post("/:owner/:repository/issues/batchedit/priority")(writableUsersOnly { repository => val value = priorityId("value") executeBatch(repository) { updatePriorityId(repository.owner, repository.name, _, value, true) } }) get("/:owner/:repository/_attached/:file")(referrersOnly { repository => (Directory.getAttachedDir(repository.owner, repository.name) match { case dir if (dir.exists && dir.isDirectory) => dir.listFiles.find(_.getName.startsWith(params("file") + ".")).map { file => response.setHeader("Content-Disposition", f"""inline; filename=${file.getName}""") RawData(FileUtil.getSafeMimeType(file.getName), file) } case _ => None }) getOrElse NotFound() }) /** * JSON API for issue and PR completion. */ ajaxGet("/:owner/:repository/_issue/proposals")(writableUsersOnly { repository => contentType = formats("json") org.json4s.jackson.Serialization.write( Map( "options" -> ( getOpenIssues(repository.owner, repository.name) .map { t => Map( "label" -> s"""${if (t.isPullRequest) "<i class='octicon octicon-git-pull-request'></i>" else "<i class='octicon octicon-issue-opened'></i>"}<b> #${StringUtil .escapeHtml(t.issueId.toString)} ${StringUtil .escapeHtml(StringUtil.cutTail(t.title, 50, "..."))}</b>""", "value" -> t.issueId.toString ) } ) ) ) }) val assignedUserName = (key: String) => params.get(key) filter (_.trim != "") val milestoneId: String => Option[Int] = (key: String) => params.get(key).flatMap(_.toIntOpt) val priorityId: String => Option[Int] = (key: String) => params.get(key).flatMap(_.toIntOpt) private def executeBatch(repository: RepositoryService.RepositoryInfo)(execute: Int => Unit) = { params("checked").split(',') map (_.toInt) foreach execute params("from") match { case "issues" => redirect(s"/${repository.owner}/${repository.name}/issues") case "pulls" => redirect(s"/${repository.owner}/${repository.name}/pulls") case _ => } } private def searchIssues(repository: RepositoryService.RepositoryInfo) = { val page = IssueSearchCondition.page(request) // retrieve search condition val condition = IssueSearchCondition(request) // search issues val issues = searchIssue( condition, IssueSearchOption.Issues, (page - 1) * IssueLimit, IssueLimit, repository.owner -> repository.name ) html.list( "issues", issues.map(issue => (issue, None)), page, getAssignableUserNames(repository.owner, repository.name), getMilestones(repository.owner, repository.name), getPriorities(repository.owner, repository.name), getLabels(repository.owner, repository.name), countIssue(condition.copy(state = "open"), IssueSearchOption.Issues, repository.owner -> repository.name), countIssue(condition.copy(state = "closed"), IssueSearchOption.Issues, repository.owner -> repository.name), condition, repository, isIssueEditable(repository), isIssueManageable(repository) ) } /** * Tests whether an issue or a comment is editable by a logged-in user. */ private def isEditableContent(owner: String, repository: String, author: String, loginAccount: Account)( implicit context: Context ): Boolean = { hasDeveloperRole(owner, repository, context.loginAccount) || author == loginAccount.userName } /** * Tests whether an issue comment is deletable by a logged-in user. */ private def isDeletableComment(owner: String, repository: String, author: String, loginAccount: Account)( implicit context: Context ): Boolean = { hasOwnerRole(owner, repository, context.loginAccount) || author == loginAccount.userName } }
imeszaros/gitbucket
src/main/scala/gitbucket/core/controller/IssuesController.scala
Scala
apache-2.0
20,366
package org.jetbrains.plugins.scala.lang package completion package postfix package templates import com.intellij.psi.PsiElement import org.jetbrains.plugins.scala.lang.completion.postfix.templates.selector.AncestorSelector.SelectAllAncestors /** * @author Roman.Shein * Date: 24.12.2015 */ final class ScalaSeqPostfixTemplate extends ScalaStringBasedPostfixTemplate( "Seq", "Seq(expr)", SelectAllAncestors() ) { override def getTemplateString(element: PsiElement): String = "Seq($expr$)" }
jastice/intellij-scala
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/completion/postfix/templates/ScalaSeqPostfixTemplate.scala
Scala
apache-2.0
514
package org.openurp.edu.eams.base.web.action ExistsException import org.beangle.commons.collection.Collections import org.beangle.data.jpa.dao.OqlBuilder import org.beangle.commons.entity.util.HierarchyEntityUtils import org.beangle.commons.lang.Strings import org.beangle.struts2.helper.Params import org.springframework.dao.DataIntegrityViolationException import org.openurp.base.Department import org.openurp.edu.eams.base.model.DepartmentBean class DepartmentAction extends DepartmentSearchAction { def edit(): String = { builderDepartmentParamForPage(getEntity(classOf[Department], "department")) forward() } private def getMyFamily(depart: Department): List[Department] = { val departs = Collections.newBuffer[Any] departs.add(depart) findChildren(depart, departs) departs } private def findChildren(depart: Department, children: List[Department]) { if (Collections.isEmpty(depart.getChildren)) { return } for (one <- depart.getChildren) { children.add(one) findChildren(one, children) } } def save(): String = { val departmentId = getIntId("department") if (entityDao.duplicate(classOf[Department], departmentId, "code", get("department.code"))) { builderDepartmentParamForPage(populateEntity(classOf[Department], "department")) addError(getText("error.code.existed")) return "edit" } var department: DepartmentBean = null val departParams = Params.sub("department") try { if (null == departmentId) { department = new DepartmentBean() department.setSchool(getSchool) populate(department, departParams) logHelper.info("Create a depart with name:" + department.getName) } else { department = entityDao.get(classOf[DepartmentBean], departmentId) logHelper.info("Update a depart with name:" + department.getName) populate(department, departParams) } department.setSchool(getSchool) val errorForward = saveOrUpdate(department) if (null != errorForward) { return errorForward } } catch { case e: EntityExistsException => { logHelper.info("Failure save or update a depart with name:" + department.getName, e) return forwardError(Array("entity.department", "error.model.existed")) } case e: Exception => { logHelper.info("Failure save or update a depart with name:" + department.getName, e) return forwardError("error.occurred") } } redirect("search", "info.save.success") } protected def builderDepartmentParamForPage(department: Department) { if (null == department.id) { department.setTeaching(true) department.setCollege(true) } val departs = entityDao.getAll(classOf[Department]) departs.removeAll(getMyFamily(department)) HierarchyEntityUtils.sort(departs) put("parents", departs) put("department", department) } def remove(): String = { try { entityDao.remove(entityDao.get(classOf[Department], getIntIds("department"))) } catch { case e: DataIntegrityViolationException => { logger.error(e.getMessage) addError(getText("error.remove.beenUsed")) put("departments", entityDao.search(buildDepartmentQuery())) return "search" } } redirect("search", "info.action.success") } protected override def getExportDatas(): Iterable[Department] = { val departmentIds = Strings.splitToInt(get("departmentIds")) if (departmentIds.length > 0) { val builder = OqlBuilder.from(classOf[Department], "department") builder.where("department.id in (:departmentIds)", departmentIds) entityDao.search(builder) } else { entityDao.search(buildDepartmentQuery().limit(null)) } } }
openurp/edu-eams-webapp
web/src/main/scala/org/openurp/edu/eams/base/web/action/DepartmentAction.scala
Scala
gpl-3.0
3,823
package testfile import java.something.com package object SomeObject[A <: B] extends Implicits { type Booger[A] = A => Unit type SomeType = A <: B :> C type SomeOtherType = A ⇒ Thing type Something type Something <: SomethingElse type ParserContext = Context { type PrefixType = Parser } new Something#SomethingElse def x: Something#SomethingElse def hasFunctionType[A, B <: A, Z](f: A => B, g: (A, B) => Z): Unit = { println("Something") } val f: (Int, String) => Unit = (i: Int, s: String) => println(s"$i -- $s") val f: (Int, String) ⇒ Unit = (i: Int, s: String) ⇒ println(s"$i -- $s") } object Test { def test( param1: List[(Int, Int)], param2: List[Int]): List[Int] = { param2 match { case head :: tail => tail } } } case class ACaseClass(param1: Float = 14.23f) case object ACaseObject extends Something def x(): Unit = { case Something(a, b) => case SomethingElse() => case SomethingElseElse => } class ScalaClass(i: Int = 12, b: Trait[A, Trait[B, C]]) extends B with SomeTrait[A, B[String], D] { /** * I forgot comments! We spelcheck them. [[scala.Option]] * * {{{ * scala> This is a REPL line * scala> and this is another one * }}} * * <li></li> * * @param parameter Explanation of the parameter. Speling. * @return TODO */ val thing = "A String" // this is a trailing comment, spelchecked too [TODO] val thing = "A String with a \" in it" val intString = "A string with $stuff // and a comment in it" val intString = s"A string /* a comment and */ with $stuff and ${stuff} in it" val intString = s"""A string /* a comment and */ with $stuff and ${stuff} in it""" val intFString = f"A string with $stuff and ${stuff} and ${eval this}%-2.2f and $stuff%2d in it" val intFString = f"""A string with $stuff and ${stuff} and ${eval this}%-2.2f and $stuff%2d in it""" val otherThings = """|This is a string |that spans multiple lines. |""".stripMargin val intString = sql"select * from T where id = $id and name = ${name}" val intString = sql""" select * from T where id = $id and name = ${s"$name Jr"} and age > ${age + 10} """ val notImplemented = ??? implicit val somethingImplicit = true // Ripped off from Scalaz final def foldMap[B: Monoid](f: A => B = (a: A) => A): B = F.foldMap(self)(f) final def foldRight[B](z: => B)(f: (A, => B) => B): B = F.foldRight(self, z)(f) final def foldLeft[B](z: B)(f: (B, A) => B): B = F.foldLeft(self, z)(f) final def foldRightM[G[_], B](z: => B)(f: (A, => B) => G[B])(implicit M: Monad[G]): G[B] = F.foldRightM(self, z)(f) final def foldLeftM[G[_], B](z: B)(f: (B, A) => G[B])(implicit M: Monad[G]): G[B] = F.foldLeftM(self, z)(f) final def foldr[B](z: => B)(f: A => (=> B) => B): B = F.foldr(self, z)(f) final def foldl[B](z: B)(f: B => A => B): B = F.foldl(self, z)(f) final def foldrM[G[_], B](z: => B)(f: A => ( => B) => G[B])(implicit M: Monad[G]): G[B] = F.foldrM(self, z)(f) val aChar = 'a' val anEscapedChar = '\\' val anotherEscapedChar = '\n' val aUnicodeChar = '\u00ab' val aSymbol = 'SomeSymbol def number = 0xAf903adeL def float = 1f def float = 1F def float = 1.1f def float = 1.1F def float = 231.1232f def float = 231.2321F def float = .2f def float = .2F def double = 1d def double = 1D def double = 1.1d def double = 1.1D def double = 231.1232d def double = 231.2321D def double = 231.2321 def double = .2d def double = .2 def double = .2D def exp = 1.2342e-24 def exp = 1e+24 var flarf: Int = 12 def flooger(x: String): Unit = println(42) private val booger = "Hithere" protected[this] def something[A](y: SomeTrait[A])(implicit shoot: Function[Int, String]): Long = 12 private final val do = done someVar match { case Flooger(thing, that, matches) => flender ! Message(hi, there, guys) case '"' => Bah } try { whatever } catch { case e: Throwable } finally { at the end } while (a == b) { } for (x <- somecall) { dothing } for { a <- futureCall1 b <- futureCall2 } yield (a, b) protected[package] something = null def receive = super.receive require(something == true) val q"This $is a $string" = something q"""return this $thing""" tq"""return this $thing""" tq"return this $thing" cq"""return this $thing""" cq"return this $thing" pq"""return this $thing""" pq"return this $thing" val something = s"""bar="foo"""" val something = f"""bar="foo"""" val something = """bar="foo"""" val something = s"Interpolatin' fancy expressions ${bar map (_.toString)}" def someFunc[A <: B, X =:= Y] val soManyEscapes = "\\\"\u0031\n\b\r\f\t" // and a comment val soManyEscapes = """\\\"\u0031\n\b\r\f\t""" // and a comment val soManyEscapes = s"\\\"\u0031\n\b\r\f\t" // and a comment val soManyEscapes = f"\\\"\u0031\n\b\r\f\t" // and a comment val soManyEscapes = s"""\\\"\u0031\n\b\r\f\t""" // and a comment val soManyEscapes = f"""\\\"\u0031\n\b\r\f\t""" // and a comment val soManyEscapes = "\\\"\u0031\n\b\r\f\t" // and a comment }
randompearl/vim-scala
syntax/testfile.scala
Scala
apache-2.0
5,190
/* * VESolverTest.scala * Test of variable elimination solver. * * Created By: Avi Pfeffer (apfeffer@cra.com) * Creation Date: March 1, 2015 * * Copyright 2015 Avrom J. Pfeffer and Charles River Analytics, Inc. * See http://www.cra.com or email figaro@cra.com for information. * * See http://www.github.com/p2t2/figaro for a copy of the software license. */ package com.cra.figaro.test.algorithm.structured.solver import org.scalatest.{ WordSpec, Matchers } import com.cra.figaro.language._ import com.cra.figaro.library.compound._ import com.cra.figaro.algorithm.factored.factors.Factor import com.cra.figaro.algorithm.factored.factors.factory.Factory import com.cra.figaro.algorithm.factored.factors.SumProductSemiring import com.cra.figaro.algorithm.lazyfactored.Regular import com.cra.figaro.algorithm.structured.strategy.solve._ import com.cra.figaro.algorithm.structured._ import com.cra.figaro.algorithm.structured.solver._ import com.cra.figaro.algorithm.structured.algorithm.structured.StructuredMPEVE import com.cra.figaro.library.atomic.discrete.Uniform class VESolverTest extends WordSpec with Matchers { "Running VariableElimination without *" when { "given a flat model with no conditions or constraints" should { "produce the correct result over a single element" in { Universe.createNew() val cc = new ComponentCollection val e1 = Select(0.25 -> 0.3, 0.25 -> 0.5, 0.25 -> 0.7, 0.25 -> 0.9) val e2 = Flip(e1) val e3 = Apply(e2, (b: Boolean) => b) val pr = new Problem(cc, List(e2)) pr.add(e1) pr.add(e3) val c1 = cc(e1) val c2 = cc(e2) val c3 = cc(e3) c1.generateRange() c2.generateRange() c3.generateRange() c1.makeNonConstraintFactors() c2.makeNonConstraintFactors() c3.makeNonConstraintFactors() c1.makeConstraintFactors() c2.makeConstraintFactors() c3.makeConstraintFactors() pr.solve(new ConstantStrategy(marginalVariableElimination)) pr.globals should equal(Set(c2)) pr.solved should equal(true) val result = multiplyAll(pr.solution) result.variables should equal(List(c2.variable)) result.size should equal(2) val c2IndexT = c2.variable.range.indexOf(Regular(true)) val c2IndexF = c2.variable.range.indexOf(Regular(false)) result.get(List(c2IndexT)) should be(0.6 +- 0.00000001) result.get(List(c2IndexF)) should be(0.4 +- 0.00000001) } "produce the correct result over multiple elements" in { Universe.createNew() val cc = new ComponentCollection val e1 = Select(0.25 -> 0.3, 0.25 -> 0.5, 0.25 -> 0.7, 0.25 -> 0.9) val e2 = Flip(e1) val e3 = Apply(e2, (b: Boolean) => b) val pr = new Problem(cc, List(e2, e3)) pr.add(e1) val c1 = cc(e1) val c2 = cc(e2) val c3 = cc(e3) c1.generateRange() c2.generateRange() c3.generateRange() c1.makeNonConstraintFactors() c2.makeNonConstraintFactors() c3.makeNonConstraintFactors() c1.makeConstraintFactors() c2.makeConstraintFactors() c3.makeConstraintFactors() pr.solve(new ConstantStrategy(marginalVariableElimination)) pr.globals should equal(Set(c2, c3)) val result = multiplyAll(pr.solution) result.variables.size should equal(2) val c2IndexT = c2.variable.range.indexOf(Regular(true)) val c2IndexF = c2.variable.range.indexOf(Regular(false)) val c3IndexT = c3.variable.range.indexOf(Regular(true)) val c3IndexF = c3.variable.range.indexOf(Regular(false)) result.size should equal(4) val var0 = result.variables(0) val var1 = result.variables(1) if (var0 == c2.variable) { var1 should equal(c3.variable) result.get(List(c2IndexT, c3IndexT)) should equal(0.6) result.get(List(c2IndexT, c3IndexF)) should equal(0.0) result.get(List(c2IndexF, c3IndexT)) should equal(0.0) result.get(List(c2IndexF, c3IndexF)) should equal(0.4) } else { var0 should equal(c3.variable) var1 should equal(c2.variable) result.get(List(c3IndexT, c2IndexT)) should equal(0.6) result.get(List(c3IndexT, c2IndexF)) should equal(0.0) result.get(List(c3IndexF, c2IndexT)) should equal(0.0) result.get(List(c3IndexF, c2IndexF)) should equal(0.4) } } } "given a condition on a dependent element" should { "produce the result with the correct probability" in { Universe.createNew() val cc = new ComponentCollection val e1 = Select(0.25 -> 0.3, 0.25 -> 0.5, 0.25 -> 0.7, 0.25 -> 0.9) val e2 = Flip(e1) val e3 = Apply(e2, (b: Boolean) => b) e3.observe(true) val pr = new Problem(cc, List(e1)) pr.add(e2) pr.add(e3) val c1 = cc(e1) val c2 = cc(e2) val c3 = cc(e3) c1.generateRange() c2.generateRange() c3.generateRange() c1.makeNonConstraintFactors() c2.makeNonConstraintFactors() c3.makeNonConstraintFactors() c1.makeConstraintFactors() c2.makeConstraintFactors() c3.makeConstraintFactors() pr.solve(new ConstantStrategy(marginalVariableElimination)) pr.globals should equal(Set(c1)) val result = multiplyAll(pr.solution) val c1Index3 = c1.variable.range.indexOf(Regular(0.3)) val c1Index5 = c1.variable.range.indexOf(Regular(0.5)) val c1Index7 = c1.variable.range.indexOf(Regular(0.7)) val c1Index9 = c1.variable.range.indexOf(Regular(0.9)) result.size should equal(4) result.get(List(c1Index3)) should be((0.25 * 0.3) +- 0.000000001) result.get(List(c1Index5)) should be((0.25 * 0.5) +- 0.000000001) result.get(List(c1Index7)) should be((0.25 * 0.7) +- 0.000000001) result.get(List(c1Index9)) should be((0.25 * 0.9) +- 0.000000001) } } "given a constraint on a dependent element" should { "produce the result with the correct probability" in { Universe.createNew() val cc = new ComponentCollection val e1 = Select(0.25 -> 0.3, 0.25 -> 0.5, 0.25 -> 0.7, 0.25 -> 0.9) val e2 = Flip(e1) val e3 = Apply(e2, (b: Boolean) => b) e3.addConstraint((b: Boolean) => if (b) 0.5 else 0.2) val pr = new Problem(cc, List(e1)) pr.add(e2) pr.add(e3) val c1 = cc(e1) val c2 = cc(e2) val c3 = cc(e3) c1.generateRange() c2.generateRange() c3.generateRange() c1.makeNonConstraintFactors() c2.makeNonConstraintFactors() c3.makeNonConstraintFactors() c1.makeConstraintFactors() c2.makeConstraintFactors() c3.makeConstraintFactors() pr.solve(new ConstantStrategy(marginalVariableElimination)) pr.globals should equal(Set(c1)) val result = multiplyAll(pr.solution) val c1Index3 = c1.variable.range.indexOf(Regular(0.3)) val c1Index5 = c1.variable.range.indexOf(Regular(0.5)) val c1Index7 = c1.variable.range.indexOf(Regular(0.7)) val c1Index9 = c1.variable.range.indexOf(Regular(0.9)) result.size should equal(4) result.get(List(c1Index3)) should be((0.25 * (0.3 * 0.5 + 0.7 * 0.2)) +- 0.000000001) result.get(List(c1Index5)) should be((0.25 * (0.5 * 0.5 + 0.5 * 0.2)) +- 0.000000001) result.get(List(c1Index7)) should be((0.25 * (0.7 * 0.5 + 0.3 * 0.2)) +- 0.000000001) result.get(List(c1Index9)) should be((0.25 * (0.9 * 0.5 + 0.1 * 0.2)) +- 0.000000001) } } "given two constraints on a dependent element" should { "produce the result with the correct probability" in { Universe.createNew() val cc = new ComponentCollection val e1 = Select(0.25 -> 0.3, 0.25 -> 0.5, 0.25 -> 0.7, 0.25 -> 0.9) val e2 = Flip(e1) val e3 = Apply(e2, (b: Boolean) => b) e3.addConstraint((b: Boolean) => if (b) 0.5 else 0.2) e3.addConstraint((b: Boolean) => if (b) 0.4 else 0.1) val pr = new Problem(cc, List(e1)) pr.add(e2) pr.add(e3) val c1 = cc(e1) val c2 = cc(e2) val c3 = cc(e3) c1.generateRange() c2.generateRange() c3.generateRange() c1.makeNonConstraintFactors() c2.makeNonConstraintFactors() c3.makeNonConstraintFactors() c1.makeConstraintFactors() c2.makeConstraintFactors() c3.makeConstraintFactors() pr.solve(new ConstantStrategy(marginalVariableElimination)) pr.globals should equal(Set(c1)) val result = multiplyAll(pr.solution) val c1Index3 = c1.variable.range.indexOf(Regular(0.3)) val c1Index5 = c1.variable.range.indexOf(Regular(0.5)) val c1Index7 = c1.variable.range.indexOf(Regular(0.7)) val c1Index9 = c1.variable.range.indexOf(Regular(0.9)) result.size should equal(4) result.get(List(c1Index3)) should be((0.25 * (0.3 * 0.5 * 0.4 + 0.7 * 0.2 * 0.1)) +- 0.000000001) result.get(List(c1Index5)) should be((0.25 * (0.5 * 0.5 * 0.4 + 0.5 * 0.2 * 0.1)) +- 0.000000001) result.get(List(c1Index7)) should be((0.25 * (0.7 * 0.5 * 0.4 + 0.3 * 0.2 * 0.1)) +- 0.000000001) result.get(List(c1Index9)) should be((0.25 * (0.9 * 0.5 * 0.4 + 0.1 * 0.2 * 0.1)) +- 0.000000001) } } "given constraints on two dependent elements" should { "produce the result with the correct probability" in { Universe.createNew() val cc = new ComponentCollection val e1 = Select(0.25 -> 0.3, 0.25 -> 0.5, 0.25 -> 0.7, 0.25 -> 0.9) val e2 = Flip(e1) val e3 = Apply(e2, (b: Boolean) => b) e2.addConstraint((b: Boolean) => if (b) 0.5 else 0.2) e3.addConstraint((b: Boolean) => if (b) 0.4 else 0.1) val pr = new Problem(cc, List(e1)) pr.add(e2) pr.add(e3) val c1 = cc(e1) val c2 = cc(e2) val c3 = cc(e3) c1.generateRange() c2.generateRange() c3.generateRange() c1.makeNonConstraintFactors() c2.makeNonConstraintFactors() c3.makeNonConstraintFactors() c1.makeConstraintFactors() c2.makeConstraintFactors() c3.makeConstraintFactors() pr.solve(new ConstantStrategy(marginalVariableElimination)) pr.globals should equal(Set(c1)) val result = multiplyAll(pr.solution) val c1Index3 = c1.variable.range.indexOf(Regular(0.3)) val c1Index5 = c1.variable.range.indexOf(Regular(0.5)) val c1Index7 = c1.variable.range.indexOf(Regular(0.7)) val c1Index9 = c1.variable.range.indexOf(Regular(0.9)) result.size should equal(4) result.get(List(c1Index3)) should be((0.25 * (0.3 * 0.5 * 0.4 + 0.7 * 0.2 * 0.1)) +- 0.000000001) result.get(List(c1Index5)) should be((0.25 * (0.5 * 0.5 * 0.4 + 0.5 * 0.2 * 0.1)) +- 0.000000001) result.get(List(c1Index7)) should be((0.25 * (0.7 * 0.5 * 0.4 + 0.3 * 0.2 * 0.1)) +- 0.000000001) result.get(List(c1Index9)) should be((0.25 * (0.9 * 0.5 * 0.4 + 0.1 * 0.2 * 0.1)) +- 0.000000001) } } "given a contingent condition on an element" should { "produce the result with the correct probability" in { val universe = Universe.createNew() val cc = new ComponentCollection val ec1 = new EC1 val ec2 = new EC1 val e11 = Flip(0.6)("e1", ec1) val e12 = Flip(0.3)("e1", ec2) val e2 = Select(0.8 -> ec1, 0.2 -> ec2)("e2", universe) universe.assertEvidence("e2.e1", Observation(true)) val pr = new Problem(cc, List(e2)) pr.add(e11) pr.add(e12) val c11 = cc(e11) val c12 = cc(e12) val c2 = cc(e2) c11.generateRange() c12.generateRange() c2.generateRange() c11.makeNonConstraintFactors() c12.makeNonConstraintFactors() c2.makeNonConstraintFactors() c11.makeConstraintFactors() c12.makeConstraintFactors() c2.makeConstraintFactors() pr.solve(new ConstantStrategy(marginalVariableElimination)) pr.globals should equal(Set(c2)) val result = multiplyAll(pr.solution) val c2Index1 = c2.variable.range.indexOf(Regular(ec1)) val c2Index2 = c2.variable.range.indexOf(Regular(ec2)) result.size should equal(2) result.get(List(c2Index1)) should be((0.8 * 0.6) +- 0.000000001) result.get(List(c2Index2)) should be((0.2 * 0.3) +- 0.000000001) } } "with an element that uses another element multiple times, " + "always produce the same value for the different uses" in { Universe.createNew() val cc = new ComponentCollection val e1 = Flip(0.5) val e2 = Apply(e1, e1, (b1: Boolean, b2: Boolean) => b1 == b2) val pr = new Problem(cc, List(e2)) pr.add(e1) val c1 = cc(e1) val c2 = cc(e2) c1.generateRange() c2.generateRange() c1.makeNonConstraintFactors() c2.makeNonConstraintFactors() c1.makeConstraintFactors() c2.makeConstraintFactors() pr.solve(new ConstantStrategy(marginalVariableElimination)) val result = multiplyAll(pr.solution) val c2IndexT = c2.variable.range.indexOf(Regular(true)) val c2IndexF = c2.variable.range.indexOf(Regular(false)) result.get(List(c2IndexT)) should be(1.0 +- 0.000000001) result.get(List(c2IndexF)) should be(0.0 +- 0.000000001) } "with a constraint on an element that is used multiple times, only factor in the constraint once" in { Universe.createNew() val cc = new ComponentCollection val f1 = Flip(0.5) val f2 = Flip(0.3) val e1 = Apply(f1, f1, (b1: Boolean, b2: Boolean) => b1 == b2) val e2 = Apply(f1, f2, (b1: Boolean, b2: Boolean) => b1 == b2) val d = Dist(0.5 -> e1, 0.5 -> e2) f1.setConstraint((b: Boolean) => if (b) 3.0; else 2.0) val pr = new Problem(cc, List(d)) pr.add(f1) pr.add(f2) pr.add(e1) pr.add(e2) val cf1 = cc(f1) val cf2 = cc(f2) val ce1 = cc(e1) val ce2 = cc(e2) val cd = cc(d) cf1.generateRange() cf2.generateRange() ce1.generateRange() ce2.generateRange() cd.generateRange() cf1.makeNonConstraintFactors() cf2.makeNonConstraintFactors() ce1.makeNonConstraintFactors() ce2.makeNonConstraintFactors() cd.makeNonConstraintFactors() cf1.makeConstraintFactors() cf2.makeConstraintFactors() ce1.makeConstraintFactors() ce2.makeConstraintFactors() cd.makeConstraintFactors() pr.solve(new ConstantStrategy(marginalVariableElimination)) // Probability that f1 is true = 0.6 // Probability that e1 is true = 1.0 // Probability that e2 is true = 0.6 * 0.3 + 0.4 * 0.7 = 0.46 // Probability that d is true = 0.5 * 1 + 0.5 * 0.46 = 0.73 val result = multiplyAll(pr.solution) val dIndexT = cd.variable.range.indexOf(Regular(true)) val dIndexF = cd.variable.range.indexOf(Regular(false)) val pT = result.get(List(dIndexT)) val pF = result.get(List(dIndexF)) (pT / (pT + pF)) should be(0.73 +- 0.000000001) } "with elements that are not used by the query or evidence, produce the correct result" in { val u1 = Universe.createNew() val cc = new ComponentCollection val u = Select(0.25 -> 0.3, 0.25 -> 0.5, 0.25 -> 0.7, 0.25 -> 0.9) val f = Flip(u) val a = If(f, Select(0.3 -> 1, 0.7 -> 2), Constant(2)) val pr = new Problem(cc, List(f)) pr.add(u) pr.add(a) val cu = cc(u) val cf = cc(f) val ca = cc(a) cu.generateRange() cf.generateRange() ca.expand() ca.generateRange() cu.makeNonConstraintFactors() cf.makeNonConstraintFactors() ca.makeNonConstraintFactors() cu.makeConstraintFactors() cf.makeConstraintFactors() ca.makeConstraintFactors() pr.solve(new ConstantStrategy(marginalVariableElimination)) val result = multiplyAll(pr.solution) val fIndexT = cf.variable.range.indexOf(Regular(true)) val fIndexF = cf.variable.range.indexOf(Regular(false)) val pT = result.get(List(fIndexT)) val pF = result.get(List(fIndexF)) (pT / (pT + pF)) should be(0.6 +- 0.000000001) } "with a model using chain and no conditions or constraints, when the outcomes are at the top level, produce the correct answer" in { Universe.createNew() val e1 = Flip(0.3) val e2 = Select(0.1 -> 1, 0.9 -> 2) val e3 = Select(0.7 -> 1, 0.2 -> 2, 0.1 -> 3) val e4 = Chain(e1, (b: Boolean) => if (b) e2; else e3) val cc = new ComponentCollection val pr = new Problem(cc, List(e4)) pr.add(e1) pr.add(e2) pr.add(e3) val c1 = cc(e1) val c2 = cc(e2) val c3 = cc(e3) val c4 = cc(e4) c1.generateRange() c2.generateRange() c3.generateRange() c4.expand() c4.generateRange() c1.makeNonConstraintFactors() c2.makeNonConstraintFactors() c3.makeNonConstraintFactors() c4.makeNonConstraintFactors() c1.makeConstraintFactors() c2.makeConstraintFactors() c3.makeConstraintFactors() c4.makeConstraintFactors() pr.solve(new ConstantStrategy(marginalVariableElimination)) val result = multiplyAll(pr.solution) val c4Index1 = c4.variable.range.indexOf(Regular(1)) result.get(List(c4Index1)) should be((0.3 * 0.1 + 0.7 * 0.7) +- 0.000000001) } "with a model using chain and no conditions or constraints, when the outcomes are nested, produce the correct answer" in { Universe.createNew() val e1 = Flip(0.3) val e2 = Select(0.1 -> 1, 0.9 -> 2) val e3 = Select(0.7 -> 1, 0.2 -> 2, 0.1 -> 3) val e4 = Chain(e1, (b: Boolean) => if (b) e2; else e3) val cc = new ComponentCollection val pr = new Problem(cc, List(e4)) pr.add(e1) val c1 = cc(e1) val c4 = cc(e4) c1.generateRange() c4.expand() val c2 = cc(e2) val c3 = cc(e3) c2.generateRange() c3.generateRange() c4.generateRange() c1.makeConstraintFactors() c2.makeConstraintFactors() c3.makeConstraintFactors() c4.makeConstraintFactors() c1.makeNonConstraintFactors() c2.makeNonConstraintFactors() c3.makeNonConstraintFactors() c4.subproblems.values.foreach(_.solve(new ConstantStrategy(marginalVariableElimination))) c4.makeNonConstraintFactors() pr.solve(new ConstantStrategy(marginalVariableElimination)) val result = multiplyAll(pr.solution) val c4Index1 = c4.variable.range.indexOf(Regular(1)) result.get(List(c4Index1)) should be((0.3 * 0.1 + 0.7 * 0.7) +- 0.000000001) } "with a model using chain and a condition on the result, when the outcomes are at the top level, correctly condition the parent" in { Universe.createNew() val e1 = Flip(0.3) val e2 = Select(0.1 -> 1, 0.9 -> 2) val e3 = Select(0.7 -> 1, 0.2 -> 2, 0.1 -> 3) val e4 = Chain(e1, (b: Boolean) => if (b) e2; else e3) e4.observe(1) val cc = new ComponentCollection val pr = new Problem(cc, List(e1)) pr.add(e2) pr.add(e3) pr.add(e4) val c1 = cc(e1) val c2 = cc(e2) val c3 = cc(e3) val c4 = cc(e4) c1.generateRange() c2.generateRange() c3.generateRange() c4.expand() c4.generateRange() c1.makeConstraintFactors() c2.makeConstraintFactors() c3.makeConstraintFactors() c4.makeConstraintFactors() c1.makeNonConstraintFactors() c2.makeNonConstraintFactors() c3.makeNonConstraintFactors() c4.makeNonConstraintFactors() pr.solve(new ConstantStrategy(marginalVariableElimination)) val result = multiplyAll(pr.solution) val c1IndexT = c1.variable.range.indexOf(Regular(true)) val c1IndexF = c1.variable.range.indexOf(Regular(false)) val pT = result.get(List(c1IndexT)) val pF = result.get(List(c1IndexF)) (pT / (pT + pF)) should be((0.3 * 0.1 / (0.3 * 0.1 + 0.7 * 0.7)) +- 0.000000001) } "with a model using chain and a condition on the result, when the outcomes are nested, correctly condition the parent" in { Universe.createNew() val e1 = Flip(0.3) val e2 = Select(0.1 -> 1, 0.9 -> 2) val e3 = Select(0.7 -> 1, 0.2 -> 2, 0.1 -> 3) val e4 = Chain(e1, (b: Boolean) => if (b) e2; else e3) e4.observe(1) val cc = new ComponentCollection val pr = new Problem(cc, List(e1)) pr.add(e4) val c1 = cc(e1) val c4 = cc(e4) c1.generateRange() c4.expand() val c2 = cc(e2) val c3 = cc(e3) c2.generateRange() c3.generateRange() c4.generateRange() c1.makeConstraintFactors() c2.makeConstraintFactors() c3.makeConstraintFactors() c4.makeConstraintFactors() c1.makeNonConstraintFactors() c2.makeNonConstraintFactors() c3.makeNonConstraintFactors() c4.subproblems.values.foreach(_.solve(new ConstantStrategy(marginalVariableElimination))) c4.makeNonConstraintFactors() pr.solve(new ConstantStrategy(marginalVariableElimination)) val result = multiplyAll(pr.solution) val c1IndexT = c1.variable.range.indexOf(Regular(true)) val c1IndexF = c1.variable.range.indexOf(Regular(false)) val pT = result.get(List(c1IndexT)) val pF = result.get(List(c1IndexF)) (pT / (pT + pF)) should be((0.3 * 0.1 / (0.3 * 0.1 + 0.7 * 0.7)) +- 0.000000001) } "with a model using chain and a condition on one of the outcome elements, when the outcomes are at the top level, correctly condition the result" in { Universe.createNew() val e1 = Flip(0.3) val e2 = Select(0.1 -> 1, 0.9 -> 2) val e3 = Select(0.7 -> 1, 0.2 -> 2, 0.1 -> 3) val e4 = Chain(e1, (b: Boolean) => if (b) e2; else e3) e2.observe(1) val cc = new ComponentCollection val pr = new Problem(cc, List(e4)) pr.add(e1) pr.add(e2) pr.add(e3) val c1 = cc(e1) val c2 = cc(e2) val c3 = cc(e3) val c4 = cc(e4) c1.generateRange() c2.generateRange() c3.generateRange() c4.expand() c4.generateRange() c1.makeConstraintFactors() c2.makeConstraintFactors() c3.makeConstraintFactors() c4.makeConstraintFactors() c1.makeNonConstraintFactors() c2.makeNonConstraintFactors() c3.makeNonConstraintFactors() c4.makeNonConstraintFactors() pr.solve(new ConstantStrategy(marginalVariableElimination)) val result = multiplyAll(pr.solution) val c4Index1 = c4.variable.range.indexOf(Regular(1)) val c4Index2 = c4.variable.range.indexOf(Regular(2)) val c4Index3 = c4.variable.range.indexOf(Regular(3)) val p1 = result.get(List(c4Index1)) val p2 = result.get(List(c4Index2)) val p3 = result.get(List(c4Index3)) (p1 / (p1 + p2 + p3)) should be((0.3 * 1 + 0.7 * 0.7) +- 0.000000001) } "with a model using chain and a condition on one of the outcome elements, when the outcomes are at the top level, " + "not change the belief about the parent" in { Universe.createNew() val e1 = Flip(0.3) val e2 = Select(0.1 -> 1, 0.9 -> 2) val e3 = Select(0.7 -> 1, 0.2 -> 2, 0.1 -> 3) val e4 = Chain(e1, (b: Boolean) => if (b) e2; else e3) e2.observe(1) val cc = new ComponentCollection val pr = new Problem(cc, List(e1)) pr.add(e2) pr.add(e3) pr.add(e4) val c1 = cc(e1) val c2 = cc(e2) val c3 = cc(e3) val c4 = cc(e4) c1.generateRange() c2.generateRange() c3.generateRange() c4.expand() c4.generateRange() c1.makeConstraintFactors() c2.makeConstraintFactors() c3.makeConstraintFactors() c4.makeConstraintFactors() c1.makeNonConstraintFactors() c2.makeNonConstraintFactors() c3.makeNonConstraintFactors() c4.makeNonConstraintFactors() pr.solve(new ConstantStrategy(marginalVariableElimination)) val result = multiplyAll(pr.solution) val c1IndexT = c1.variable.range.indexOf(Regular(true)) val c1IndexF = c1.variable.range.indexOf(Regular(false)) val pT = result.get(List(c1IndexT)) val pF = result.get(List(c1IndexF)) (pT / (pT + pF)) should be(0.3 +- 0.000000001) } "with a model using chain and a condition on one of the outcome elements, when the outcomes are nested, correctly condition the result" in { Universe.createNew() val e1 = Flip(0.3) val e2 = Select(0.1 -> 1, 0.9 -> 2) val e3 = Select(0.7 -> 1, 0.2 -> 2, 0.1 -> 3) val e4 = Chain(e1, (b: Boolean) => if (b) e2; else e3) e2.observe(1) val cc = new ComponentCollection val pr = new Problem(cc, List(e4)) pr.add(e1) val c1 = cc(e1) val c4 = cc(e4) c1.generateRange() c4.expand() val c2 = cc(e2) val c3 = cc(e3) c2.generateRange() c3.generateRange() c4.generateRange() c1.makeConstraintFactors() c2.makeConstraintFactors() c3.makeConstraintFactors() c4.makeConstraintFactors() c1.makeNonConstraintFactors() c2.makeNonConstraintFactors() c3.makeNonConstraintFactors() c4.subproblems.values.foreach(_.solve(new ConstantStrategy(marginalVariableElimination))) c4.makeNonConstraintFactors() pr.solve(new ConstantStrategy(marginalVariableElimination)) val result = multiplyAll(pr.solution) val c4Index1 = c4.variable.range.indexOf(Regular(1)) val c4Index2 = c4.variable.range.indexOf(Regular(2)) val c4Index3 = c4.variable.range.indexOf(Regular(3)) val p1 = result.get(List(c4Index1)) val p2 = result.get(List(c4Index2)) val p3 = result.get(List(c4Index3)) (p1 / (p1 + p2 + p3)) should be((0.3 * 1 + 0.7 * 0.7) +- 0.000000001) } "with a model using chain and a condition on one of the outcome elements, when the outcomes are nested, " + "not change the belief about the parent" in { Universe.createNew() val e1 = Flip(0.3) val e2 = Select(0.1 -> 1, 0.9 -> 2) val e3 = Select(0.7 -> 1, 0.2 -> 2, 0.1 -> 3) val e4 = Chain(e1, (b: Boolean) => if (b) e2; else e3) e2.observe(1) val cc = new ComponentCollection val pr = new Problem(cc, List(e1)) pr.add(e4) val c1 = cc(e1) val c4 = cc(e4) c1.generateRange() c4.expand() val c2 = cc(e2) val c3 = cc(e3) c2.generateRange() c3.generateRange() c4.generateRange() c1.makeConstraintFactors() c2.makeConstraintFactors() c3.makeConstraintFactors() c4.makeConstraintFactors() c1.makeNonConstraintFactors() c2.makeNonConstraintFactors() c3.makeNonConstraintFactors() c4.subproblems.values.foreach(_.solve(new ConstantStrategy(marginalVariableElimination))) c4.makeNonConstraintFactors() pr.solve(new ConstantStrategy(marginalVariableElimination)) val result = multiplyAll(pr.solution) val c1IndexT = c1.variable.range.indexOf(Regular(true)) val c1IndexF = c1.variable.range.indexOf(Regular(false)) val pT = result.get(List(c1IndexT)) val pF = result.get(List(c1IndexF)) (pT / (pT + pF)) should be(0.3 +- 0.000000001) } } "Running MPE VariableElimination" when { "given a target" should { "produce the most likely factor over the target" in { Universe.createNew() val cc = new ComponentCollection val e1 = Select(0.75 -> 0.2, 0.25 -> 0.3) val e2 = Flip(e1) val e3 = Flip(e1) val e4 = e2 === e3 val pr = new Problem(cc, List(e1)) pr.add(e1) pr.add(e2) pr.add(e3) pr.add(e4) val c1 = cc(e1) val c2 = cc(e2) val c3 = cc(e3) val c4 = cc(e4) c1.generateRange() c2.generateRange() c3.generateRange() c4.generateRange() c1.makeNonConstraintFactors() c2.makeNonConstraintFactors() c3.makeNonConstraintFactors() c4.makeNonConstraintFactors() // p(e1=.2,e2=T,e3=T,e4=T) = 0.75 * 0.2 * 0.2 = .03 // p(e1=.2,e2=F,e3=F,e4=T) = 0.75 * 0.8 * 0.8 = .48 // p(e1=.3,e2=T,e3=T,e4=T) = 0.25 * 0.3 * 0.3 = .0225 // p(e1=.3,e2=F,e3=F,e4=T) = 0.25 * 0.7 * 0.7 = .1225 // p(e1=.2,e2=T,e3=F,e4=F) = 0.75 * 0.2 * 0.8 = .12 // p(e1=.2,e2=F,e3=T,e4=F) = 0.75 * 0.8 * 0.2 = .12 // p(e1=.3,e2=T,e3=F,e4=F) = 0.25 * 0.3 * 0.7 = .0525 // p(e1=.3,e2=F,e3=T,e4=F) = 0.25 * 0.7 * 0.3 = .0525 // MPE: e1=.2,e2=F,e3=F,e4=T // If we leave e1 un-eliminated, we should end up with a factor that has e1=.2 at .48 and e1=.3 at .1225 pr.solve(new ConstantStrategy(mpeVariableElimination)) val f = pr.solution reduceLeft (_.product(_)) f.numVars should equal(1) f.get(List(0)) should be({ if (c1.variable.range(0).value == .2) 0.48 else 0.1225 } +- 0.000000001) f.get(List(1)) should be({ if (c1.variable.range(1).value == .2) 0.48 else 0.1225 } +- 0.000000001) } } "given a flat model" should { "produce the correct most likely values for all elements with no conditions or constraints" in { Universe.createNew() val cc = new ComponentCollection val e1 = Select(0.75 -> 0.2, 0.25 -> 0.3) val e2 = Flip(e1) val e3 = Flip(e1) val e4 = e2 === e3 val pr = new Problem(cc, List()) pr.add(e1) pr.add(e2) pr.add(e3) pr.add(e4) val c1 = cc(e1) val c2 = cc(e2) val c3 = cc(e3) val c4 = cc(e4) c1.generateRange() c2.generateRange() c3.generateRange() c4.generateRange() c1.makeNonConstraintFactors() c2.makeNonConstraintFactors() c3.makeNonConstraintFactors() c4.makeNonConstraintFactors() // p(e1=.2,e2=T,e3=T,e4=T) = 0.75 * 0.2 * 0.2 = .03 // p(e1=.2,e2=F,e3=F,e4=T) = 0.75 * 0.8 * 0.8 = .48 // p(e1=.3,e2=T,e3=T,e4=T) = 0.25 * 0.3 * 0.3 = .0225 // p(e1=.3,e2=F,e3=F,e4=T) = 0.25 * 0.7 * 0.7 = .1225 // p(e1=.2,e2=T,e3=F,e4=F) = 0.75 * 0.2 * 0.8 = .12 // p(e1=.2,e2=F,e3=T,e4=F) = 0.75 * 0.8 * 0.2 = .12 // p(e1=.3,e2=T,e3=F,e4=F) = 0.25 * 0.3 * 0.7 = .0525 // p(e1=.3,e2=F,e3=T,e4=F) = 0.25 * 0.7 * 0.3 = .0525 // MPE: e1=.2,e2=F,e3=F,e4=T pr.solve(new ConstantStrategy(mpeVariableElimination)) pr.recordingFactors(c1.variable).get(List()).asInstanceOf[Double] should be(0.2 +- .0000001) pr.recordingFactors(c2.variable).get(List()).asInstanceOf[Boolean] should be(false) pr.recordingFactors(c3.variable).get(List()).asInstanceOf[Boolean] should be(false) pr.recordingFactors(c4.variable).get(List()).asInstanceOf[Boolean] should be(true) } "produce the correct most likely values for all elements with conditions and constraints" in { Universe.createNew() val cc = new ComponentCollection val e1 = Select(0.5 -> 0.2, 0.5 -> 0.3) e1.addConstraint((d: Double) => if (d < 0.25) 3.0 else 1.0) val e2 = Flip(e1) val e3 = Flip(e1) val e4 = e2 === e3 e4.observe(true) val pr = new Problem(cc, List()) pr.add(e1) pr.add(e2) pr.add(e3) pr.add(e4) val c1 = cc(e1) val c2 = cc(e2) val c3 = cc(e3) val c4 = cc(e4) c1.generateRange() c2.generateRange() c3.generateRange() c4.generateRange() c1.makeConstraintFactors() c2.makeConstraintFactors() c3.makeConstraintFactors() c4.makeConstraintFactors() c1.makeNonConstraintFactors() c2.makeNonConstraintFactors() c3.makeNonConstraintFactors() c4.makeNonConstraintFactors() // p(e1=.2,e2=T,e3=T,e4=T) = 0.75 * 0.2 * 0.2 = .03 // p(e1=.2,e2=F,e3=F,e4=T) = 0.75 * 0.8 * 0.8 = .48 // p(e1=.3,e2=T,e3=T,e4=T) = 0.25 * 0.3 * 0.3 = .0225 // p(e1=.3,e2=F,e3=F,e4=T) = 0.25 * 0.7 * 0.7 = .1225 // MPE: e1=.2,e2=F,e3=F,e4=T pr.solve(new ConstantStrategy(mpeVariableElimination)) pr.recordingFactors(c1.variable).get(List()).asInstanceOf[Double] should be(0.2 +- .0000001) pr.recordingFactors(c2.variable).get(List()).asInstanceOf[Boolean] should be(false) pr.recordingFactors(c3.variable).get(List()).asInstanceOf[Boolean] should be(false) pr.recordingFactors(c4.variable).get(List()).asInstanceOf[Boolean] should be(true) } } } def multiplyAll(factors: List[Factor[Double]]): Factor[Double] = factors.foldLeft(Factory.unit(SumProductSemiring()))(_.product(_)) class EC1 extends ElementCollection {} }
scottcb/figaro
Figaro/src/test/scala/com/cra/figaro/test/algorithm/structured/solver/VESolverTest.scala
Scala
bsd-3-clause
33,590
package us.feliscat.text.vector import scala.collection.mutable /** * @author K.Sakamoto * Created on 2016/05/22 */ trait VectorGenerator[V <: Vector] { private val cache = mutable.Map.empty[Long, V] def getVectorFromCache(id: Long, sentence: String): V = { if (cache contains id) { cache(id) } else { val vector: V = getVectorFromSentence(sentence) cache(id) = vector vector } } def getVectorFromText(text: String): V def getVectorFromSentence(sentence: String): V }
ktr-skmt/FelisCatusZero-multilingual
libraries/src/main/scala/us/feliscat/text/vector/VectorGenerator.scala
Scala
apache-2.0
533
package com.softwaremill.react.kafka.commit.native import com.softwaremill.react.kafka.commit._ import kafka.api.{ConsumerMetadataRequest, ConsumerMetadataResponse} import kafka.cluster.Broker import kafka.common.ErrorMapping import kafka.consumer.KafkaConsumer import kafka.network.BlockingChannel import scala.util.{Failure, Success, Try} class NativeCommitterFactory extends CommitterFactory { lazy val offsetManagerResolver = new OffsetManagerResolver override def create(kafkaConsumer: KafkaConsumer[_]) = { for { channel <- offsetManagerResolver.resolve(kafkaConsumer) } yield new NativeCommitter(kafkaConsumer, offsetManagerResolver, channel) } }
blawlor/reactive-kafka
core/src/main/scala/com/softwaremill/react/kafka/commit/native/NativeCommitterFactory.scala
Scala
apache-2.0
679
package org.scalatest import org.scalactic.Prettifier._ import org.scalactic.Requirements._ import org.scalatest.Suite.{ formatterForSuiteAborted, formatterForSuiteCompleted, formatterForSuiteStarting, getTopOfClass } import org.scalatest.events._ import org.scalatest.tools.StandardOutReporter import scala.util.control.NonFatal /** * This class has the same package as [[org.scalatest.Suite]] so that it can access protected or package private * members. */ trait ScriptedScalaTestSuite extends Suite { thisSuite: Suite => /** * Copied from [[org.scalatest.Suite.execute(String,org.scalatest.ConfigMap,Boolean,Boolean,Boolean,Boolean,Boolean):Unit]], * but modified to return [[org.scalatest.Status]] instead of [[scala.Unit]]. * * @return [[org.scalatest.FailedStatus]] on error, * else the return from [[org.scalatest.Suite.run(Option[String],org.scalatest.Args)]]. */ final def executeScripted( testName: String = null, configMap: ConfigMap = ConfigMap.empty, color: Boolean = true, durations: Boolean = false, shortstacks: Boolean = false, fullstacks: Boolean = false, stats: Boolean = false ): Status = { requireNonNull(configMap) val SelectedTag = "Selected" val SelectedSet = Set(SelectedTag) val desiredTests: Set[String] = if (testName == null) Set.empty else { testNames.filter { s => s.indexOf(testName) >= 0 || NameTransformer .decode(s) .indexOf(testName) >= 0 } } if (testName != null && desiredTests.isEmpty) throw new IllegalArgumentException(Resources.testNotFound(testName)) val dispatch = new DispatchReporter( List( new StandardOutReporter( durations, color, shortstacks, fullstacks, false, false, false, false, false, false, false ) ) ) val tracker = new Tracker val filter = if (testName == null) Filter() else { val taggedTests: Map[String, Set[String]] = desiredTests.map(_ -> SelectedSet).toMap Filter( tagsToInclude = Some(SelectedSet), excludeNestedSuites = true, dynaTags = DynaTags(Map.empty, Map(suiteId -> taggedTests)) ) } val runStartTime = System.currentTimeMillis if (stats) dispatch( RunStarting( tracker.nextOrdinal(), expectedTestCount(filter), configMap ) ) val suiteStartTime = System.currentTimeMillis def dispatchSuiteAborted(e: Throwable): Unit = { val eMessage = e.getMessage val rawString = if (eMessage != null && eMessage.length > 0) Resources.runOnSuiteException else Resources.runOnSuiteExceptionWithMessage(eMessage) val formatter = formatterForSuiteAborted(thisSuite, rawString) val duration = System.currentTimeMillis - suiteStartTime dispatch( SuiteAborted( tracker.nextOrdinal(), rawString, thisSuite.suiteName, thisSuite.suiteId, Some(thisSuite.getClass.getName), Some(e), Some(duration), formatter, Some(SeeStackDepthException) ) ) } try { val formatter = formatterForSuiteStarting(thisSuite) dispatch( SuiteStarting( tracker.nextOrdinal(), thisSuite.suiteName, thisSuite.suiteId, Some(thisSuite.getClass.getName), formatter, Some(getTopOfClass(thisSuite)) ) ) val status = run( None, Args( dispatch, Stopper.default, filter, configMap, None, tracker, Set.empty ) ) status.waitUntilCompleted() val suiteCompletedFormatter = formatterForSuiteCompleted(thisSuite) val duration = System.currentTimeMillis - suiteStartTime dispatch( SuiteCompleted( tracker.nextOrdinal(), thisSuite.suiteName, thisSuite.suiteId, Some(thisSuite.getClass.getName), Some(duration), suiteCompletedFormatter, Some(getTopOfClass(thisSuite)) ) ) if (stats) { val duration = System.currentTimeMillis - runStartTime dispatch(RunCompleted(tracker.nextOrdinal(), Some(duration))) } status } catch { case e: InstantiationException => dispatchSuiteAborted(e) dispatch( RunAborted( tracker.nextOrdinal(), Resources.cannotInstantiateSuite(e.getMessage), Some(e), Some(System.currentTimeMillis - runStartTime) ) ) FailedStatus case e: IllegalAccessException => dispatchSuiteAborted(e) dispatch( RunAborted( tracker.nextOrdinal(), Resources.cannotInstantiateSuite(e.getMessage), Some(e), Some(System.currentTimeMillis - runStartTime) ) ) FailedStatus case e: NoClassDefFoundError => dispatchSuiteAborted(e) dispatch( RunAborted( tracker.nextOrdinal(), Resources.cannotLoadClass(e.getMessage), Some(e), Some(System.currentTimeMillis - runStartTime) ) ) FailedStatus case e: Throwable => dispatchSuiteAborted(e) dispatch( RunAborted( tracker.nextOrdinal(), Resources.bigProblems(e), Some(e), Some(System.currentTimeMillis - runStartTime) ) ) if (!NonFatal(e)) throw e FailedStatus } finally { dispatch.dispatchDisposeAndWaitUntilDone() } } }
daniel-shuy/scripted-scalatest-sbt-plugin
src/main/scala/org/scalatest/ScriptedScalaTestSuite.scala
Scala
apache-2.0
5,996
package com.githup.voles.inconcert.models case class Category(id: Option[Int], name: Option[String], short: Option[String])
asomov/meet-in-concert
src/main/scala/com/githup/voles/inconcert/models/Category.scala
Scala
apache-2.0
125
/******************************************************************************* * Copyright 2017 Capital One Services, LLC and Bitwise, Inc. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ package hydrograph.engine.spark.datasource.fixedwidth import java.util.{Locale, TimeZone} import hydrograph.engine.spark.datasource.utils.{CompressionCodecs, TypeCast} import org.apache.commons.lang3.time.FastDateFormat import org.apache.hadoop.fs.Path import org.apache.spark.sql.sources.{BaseRelation, CreatableRelationProvider, RelationProvider, SchemaRelationProvider} import org.apache.spark.sql.types._ import org.apache.spark.sql.{DataFrame, SQLContext, SaveMode} import org.slf4j.{Logger, LoggerFactory} /** * The Class DefaultSource. * * @author Bitwise * */ class DefaultSource extends RelationProvider with SchemaRelationProvider with CreatableRelationProvider with Serializable { private val LOG:Logger = LoggerFactory.getLogger(classOf[DefaultSource]) override def createRelation(sqlContext: SQLContext, parameters: Map[String, String]): BaseRelation = createRelation(sqlContext, parameters, null) private def fastDateFormat(dateFormat: String): FastDateFormat = if (!(dateFormat).equalsIgnoreCase("null")) { val date = FastDateFormat.getInstance(dateFormat,TimeZone.getDefault,Locale.getDefault) // val date = new FastDateFormat(dateFormat, Locale.getDefault) // date.setLenient(false) // date.setTimeZone(TimeZone.getDefault) date } else null private def getDateFormats(dateFormats: List[String]): List[FastDateFormat] = dateFormats.map{ e => if (e.equals("null")){ null } else { fastDateFormat(e) } } override def createRelation(sqlContext: SQLContext, parameters: Map[String, String], schema: StructType): BaseRelation = { LOG.trace("In method createRelation for Fixed Width Input File Component") val path: String = parameters.getOrElse("path", throw new RuntimeException("path option must be specified for Input File Fixed Width Component")) val fieldLengths = parameters.getOrElse("length", throw new RuntimeException("length option must be specified for Input File Fixed Width Component")) val inDateFormats: String = parameters.getOrElse("dateFormats", "null") val componentName: String = parameters.getOrElse("componentName", "") if (path == null || path.equals("")){ LOG.error("Fixed Width Input File path cannot be null or empty") throw new RuntimeException("Delimited Input File path cannot be null or empty") } val dateFormat: List[FastDateFormat] = getDateFormats(inDateFormats.split("\\t").toList) new FixedWidthRelation(componentName,path, parameters.get("charset").get, fieldLengths, parameters.getOrElse("strict","true").toBoolean, parameters.getOrElse("safe","false").toBoolean, dateFormat, schema)(sqlContext) } private def toIntLength(fieldsLen: String): Array[Int] = { val len = fieldsLen.split(",") len.map(x => x.toInt) } def saveAsFW(dataFrame: DataFrame, path: String, parameters: Map[String, String]) = { LOG.trace("In method saveAsFW for creating Fixed Width Output File") val outDateFormats: String = parameters.getOrElse("dateFormats", "null") val strict: Boolean = parameters.getOrElse("strict","true").toBoolean val safe: Boolean = parameters.getOrElse("safe","false").toBoolean val schema = dataFrame.schema val fieldlen: Array[Int] = toIntLength( parameters.get("length").get) val codec = CompressionCodecs.getCodec(dataFrame.sparkSession.sparkContext,parameters.getOrElse("codec", null)) val dateFormat: List[FastDateFormat] = getDateFormats(outDateFormats.split("\\t").toList) val valueRDD = dataFrame.rdd.map(row => { if (strict && (row.length != fieldlen.length)){ LOG.error("Input row does not have enough length to parse all fields. Input length is " + row.length + ". Sum of length of all fields is " + fieldlen.sum + "\\nRow being parsed: " + row) throw new RuntimeException("Input row does not have enough length to parse all fields. Input length is " + row.length + ". Sum of length of all fields is " + fieldlen.sum + "\\nRow being parsed: " + row) } def getFiller(filler:String, length: Int): String ={ var res:String = "" for (i <- 0 until (length * -1) ) { res += filler } res } var index = 0 val sb=new StringBuilder while (index < schema.length) { if (row.size == 0) sb.toString() val data = row.get(index) if (data == null) { if (!safe && !schema(index).nullable) { LOG.error("Field " + schema(index).name + " has value null. Field length specified in the schema is " + fieldlen(index) + ". ") throw new RuntimeException("Field " + schema(index).name + " has value null. Field length specified in the schema is " + fieldlen(index) + ". ") } else { sb.append(getFiller(" ", fieldlen(index))) } } else { val coercedVal = TypeCast.outputValue(data, schema.fields(index).dataType, dateFormat(index)) val lengthDiff = coercedVal.toString.length - fieldlen(index) val result = lengthDiff match { case _ if lengthDiff == 0 => coercedVal case _ if lengthDiff > 0 => coercedVal.toString.substring(0, fieldlen(index)) case _ if lengthDiff < 0 => schema(index).dataType match { case BooleanType | ByteType | DateType | StringType | TimestampType | CalendarIntervalType => coercedVal + getFiller(" ", lengthDiff) case _ => getFiller("0", lengthDiff) + coercedVal } } sb.append(result) } index = index + 1 } sb.toString() }) val codecClass = CompressionCodecs.getCodecClass(codec) codecClass match { case null => valueRDD.saveAsTextFile(path) case codeClass => valueRDD.saveAsTextFile(path, codeClass) } LOG.info("Fixed Width Output File is successfully created at path : " + path) } override def createRelation(sqlContext: SQLContext, mode: SaveMode, parameters: Map[String, String], data: DataFrame): BaseRelation = { LOG.trace("In method createRelation for creating Fixed Width Output File") val path = parameters.getOrElse("path", throw new RuntimeException("path option must be specified for Output File Fixed Width Component")) parameters.getOrElse("length", throw new RuntimeException("length option must be specified for Output File Fixed Width Component")) if (path == null || path.equals("")){ LOG.error("Fixed Width Output File path cannot be null or empty") throw new RuntimeException("Delimited Input File path cannot be null or empty") } val fsPath = new Path(path) val fs = fsPath.getFileSystem(sqlContext.sparkContext.hadoopConfiguration) val isSave = if (fs.exists(fsPath)) { mode match { case SaveMode.Append => LOG.error("Output file append operation is not supported") throw new RuntimeException("Output file append operation is not supported") case SaveMode.Overwrite => if (fs.delete(fsPath, true)) true else{ LOG.error("Output directory path '"+ path +"' cannot be deleted") throw new RuntimeException("Output directory path '"+ path +"' cannot be deleted") } case SaveMode.ErrorIfExists => LOG.error("Output directory path '"+ path +"' already exists") throw new RuntimeException("Output directory path '"+ path +"' already exists") case SaveMode.Ignore => false } } else true if (isSave) saveAsFW(data, path, parameters) createRelation(sqlContext, parameters, data.schema) } }
capitalone/Hydrograph
hydrograph.engine/hydrograph.engine.spark/src/main/scala/hydrograph/engine/spark/datasource/fixedwidth/DefaultSource.scala
Scala
apache-2.0
8,620
package models.sitedata object SiteInfoAccessMailDef{ def toTable: String = "SiteInfoAccessMail" } case class SiteInfoAccessMail ( id: Long, siteid: String, mailaddress: Option[String] = None, mailaddresstype: String // lastmodifiedtime: DateTime, // lastmodifier: String, // modifiedtimestamp: Timestamp ) extends BaseModel { override def toString: String = { "SiteInfoAccessMail {id: " + id + ", siteid: " + siteid + ", mailaddress: " + mailaddress + "}" } override def getId: Long = id override def setId(id: Long): Unit = { /* this.id = id */ } }
tnddn/iv-web
portal/rest-portal/app/models/sitedata/SiteInfoAccessMail.scala
Scala
apache-2.0
662
package scrabble class BoardTest extends ScrabbleTest { val oneLetterPlaced = { val place = toPlace("a", true, pos(3, 3)) placeSquares(board, place) // board.placeLetter(Pos.posAt(3, 3).get, Letter('a', 1)).get } def checkNeighbours(word: String, direction: Pos => List[PosSquare], pos: Option[Pos]) = { pos must beSome val res = pos flatMap { pos => crossedWords.map { board => val list = direction(pos) list.map(tup => tup._3.letter).mkString } } res must beEqualTo(Some(word)) } "a board" should { "find letters above a position" in { val v = crossedWords.map { case b => checkNeighbours("RES", b.lettersAbove, pos(7, 5)) } v must beSome } "find letters below a position" in { val v = crossedWords.map { case b => checkNeighbours("SC", b.lettersBelow, pos(7, 5)) } v must beSome } "find letters left of a position" in { val v = crossedWords.map { case b => checkNeighbours("HIST", b.lettersLeft, pos(7, 5)) } v must beSome } "find letters right of a position" in { val v = crossedWords.map { case b => checkNeighbours("RY", b.lettersRight, pos(7, 5)) } v must beSome } val normal = NormalSquare(None) val tripleWord = TripleWordSquare(None) val doubleLetter = DoubleLetterSquare(None) val doubleWord = DoubleWordSquare(None) val tripleLetter = TripleLetterSquare(None) def checkSpecialSquare(pos: Option[Pos], square: Square) = { pos must beSome pos foreach { pos => board.squares must havePair(pos, square) } } /* Tedious, but important test to make sure all the special squares are positioned correctly */ "should position special squares correctly" in { val pairs = List( pos(1, 1) -> tripleWord, pos(4, 1) -> doubleLetter, pos(8, 1) -> tripleWord, pos(12, 1) -> doubleLetter, pos(15, 1) -> tripleWord, pos(2, 2) -> doubleWord, pos(6, 2) -> tripleLetter, pos(10, 2) -> tripleLetter, pos(14, 2) -> doubleWord, pos(3, 3) -> doubleWord, pos(7, 3) -> doubleLetter, pos(9, 3) -> doubleLetter, pos(13, 3) -> doubleWord, pos(1, 4) -> doubleLetter, pos(4, 4) -> doubleWord, pos(8, 4) -> doubleLetter, pos(12, 4) -> doubleWord, pos(15, 4) -> doubleLetter, pos(5, 5) -> doubleWord, pos(11, 5) -> doubleWord, pos(2, 6) -> tripleLetter, pos(6, 6) -> tripleLetter, pos(10, 6) -> tripleLetter, pos(14, 6) -> tripleLetter, pos(3, 7) -> doubleLetter, pos(7, 7) -> doubleLetter, pos(9, 7) -> doubleLetter, pos(13, 7) -> doubleLetter, pos(1, 8) -> tripleWord, pos(4, 8) -> doubleLetter, pos(8, 8) -> doubleWord, pos(12, 8) -> doubleLetter, pos(15, 8) -> tripleWord, pos(3, 9) -> doubleLetter, pos(7, 9) -> doubleLetter, pos(9, 9) -> doubleLetter, pos(13, 9) -> doubleLetter, pos(2, 10) -> tripleLetter, pos(6, 10) -> tripleLetter, pos(10, 10) -> tripleLetter, pos(14, 10) -> tripleLetter, pos(5, 11) -> doubleWord, pos(11, 11) -> doubleWord, pos(1, 12) -> doubleLetter, pos(4, 12) -> doubleWord, pos(8, 12) -> doubleLetter, pos(12, 12) -> doubleWord, pos(15, 12) -> doubleLetter, pos(3, 13) -> doubleWord, pos(7, 13) -> doubleLetter, pos(9, 13) -> doubleLetter, pos(13, 13) -> doubleWord, pos(2, 14) -> doubleWord, pos(6, 14) -> tripleLetter, pos(10, 14) -> tripleLetter, pos(14, 14) -> doubleWord, pos(1, 15) -> tripleWord, pos(4, 15) -> doubleLetter, pos(8, 15) -> tripleWord, pos(12, 15) -> doubleLetter, pos(15, 15) -> tripleWord) pairs.foreach { case (pos, tile) => checkSpecialSquare(pos, tile) } } "have 61 special squares" in { board.squares.toTraversable filter (p => p._2 != normal) must have size 61 } "have and 164 normal squares" in { board.squares.toTraversable filter (p => p._2 == normal) must have size 164 } "have 225 squares" in { board.squares must have size 225 } "place 1 tile" in { oneLetterPlaced map (_.squares.toTraversable filter (p => !p._2.isEmpty) must have size 1) must beSome } val placeAt = pos(3, 3) placeAt must beSome "place Tile in the correct position" in { placeAt foreach { p => oneLetterPlaced map (_.squareAt(p) map (_.tile must beEqualTo(Some(Letter('A', 1))))) } } "retrieve an occupied square" in { placeAt foreach { p => oneLetterPlaced map (_.squareAt(p) map (_.tile must beEqualTo(Some(Letter('A', 1))))) } } } }
Happy0/scalascrabble
src/test/scala/BoardTest.scala
Scala
gpl-2.0
5,076
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.predictionio.controller import org.apache.predictionio.core.BaseDataSource import org.apache.spark.SparkContext import org.apache.spark.rdd.RDD /** Base class of a parallel data source. * * A parallel data source runs locally within a single machine, or in parallel * on a cluster, to return data that is distributed across a cluster. * * @tparam TD Training data class. * @tparam EI Evaluation Info class. * @tparam Q Input query class. * @tparam A Actual value class. * @group Data Source */ abstract class PDataSource[TD, EI, Q, A] extends BaseDataSource[TD, EI, Q, A] { override def readTrainingBase(sc: SparkContext): TD = readTraining(sc) /** Implement this method to only return training data from a data source */ def readTraining(sc: SparkContext): TD override def readEvalBase(sc: SparkContext): Seq[(TD, EI, RDD[(Q, A)])] = readEval(sc) /** To provide evaluation feature for your engine, your must override this * method to return data for evaluation from a data source. Returned data can * optionally include a sequence of query and actual value pairs for * evaluation purpose. * * The default implementation returns an empty sequence as a stub, so that * an engine can be compiled without implementing evaluation. */ def readEval(sc: SparkContext): Seq[(TD, EI, RDD[(Q, A)])] = Seq[(TD, EI, RDD[(Q, A)])]() @deprecated("Use readEval() instead.", "0.9.0") def read(sc: SparkContext): Seq[(TD, EI, RDD[(Q, A)])] = readEval(sc) }
PredictionIO/PredictionIO
core/src/main/scala/org/apache/predictionio/controller/PDataSource.scala
Scala
apache-2.0
2,340
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.hive.thriftserver import java.io.File import java.sql.{DriverManager, Statement, Timestamp} import java.util.{Locale, MissingFormatArgumentException} import scala.util.{Random, Try} import scala.util.control.NonFatal import org.apache.commons.lang3.exception.ExceptionUtils import org.apache.hadoop.hive.conf.HiveConf.ConfVars import org.apache.spark.{SparkConf, SparkException} import org.apache.spark.sql.{AnalysisException, SQLQueryTestSuite} import org.apache.spark.sql.catalyst.analysis.NoSuchTableException import org.apache.spark.sql.catalyst.util.fileToString import org.apache.spark.sql.execution.HiveResult import org.apache.spark.sql.hive.HiveUtils import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types._ /** * Re-run all the tests in SQLQueryTestSuite via Thrift Server. * Note that this TestSuite does not support maven. * * TODO: * 1. Support UDF testing. * 2. Support DESC command. * 3. Support SHOW command. */ class ThriftServerQueryTestSuite extends SQLQueryTestSuite { private var hiveServer2: HiveThriftServer2 = _ override def beforeAll(): Unit = { super.beforeAll() // Chooses a random port between 10000 and 19999 var listeningPort = 10000 + Random.nextInt(10000) // Retries up to 3 times with different port numbers if the server fails to start (1 to 3).foldLeft(Try(startThriftServer(listeningPort, 0))) { case (started, attempt) => started.orElse { listeningPort += 1 Try(startThriftServer(listeningPort, attempt)) } }.recover { case cause: Throwable => throw cause }.get logInfo("HiveThriftServer2 started successfully") } override def afterAll(): Unit = { try { hiveServer2.stop() } finally { super.afterAll() } } override def sparkConf: SparkConf = super.sparkConf // Hive Thrift server should not executes SQL queries in an asynchronous way // because we may set session configuration. .set(HiveUtils.HIVE_THRIFT_SERVER_ASYNC, false) override val isTestWithConfigSets = false /** List of test cases to ignore, in lower cases. */ override def blackList: Set[String] = Set( "blacklist.sql", // Do NOT remove this one. It is here to test the blacklist functionality. // Missing UDF "postgreSQL/boolean.sql", "postgreSQL/case.sql", // SPARK-28624 "date.sql", // SPARK-28620 "postgreSQL/float4.sql", // SPARK-28636 "decimalArithmeticOperations.sql", "literals.sql", "subquery/scalar-subquery/scalar-subquery-predicate.sql", "subquery/in-subquery/in-limit.sql", "subquery/in-subquery/in-group-by.sql", "subquery/in-subquery/simple-in.sql", "subquery/in-subquery/in-order-by.sql", "subquery/in-subquery/in-set-operations.sql" ) override def runQueries( queries: Seq[String], testCase: TestCase, configSet: Option[Seq[(String, String)]]): Unit = { // We do not test with configSet. withJdbcStatement { statement => loadTestData(statement) testCase match { case _: PgSQLTest => // PostgreSQL enabled cartesian product by default. statement.execute(s"SET ${SQLConf.CROSS_JOINS_ENABLED.key} = true") statement.execute(s"SET ${SQLConf.ANSI_ENABLED.key} = true") statement.execute(s"SET ${SQLConf.DIALECT.key} = ${SQLConf.Dialect.POSTGRESQL.toString}") case _ => } // Run the SQL queries preparing them for comparison. val outputs: Seq[QueryOutput] = queries.map { sql => val output = getNormalizedResult(statement, sql) // We might need to do some query canonicalization in the future. QueryOutput( sql = sql, schema = "", output = output.mkString("\\n").replaceAll("\\\\s+$", "")) } // Read back the golden file. val expectedOutputs: Seq[QueryOutput] = { val goldenOutput = fileToString(new File(testCase.resultFile)) val segments = goldenOutput.split("-- !query.+\\n") // each query has 3 segments, plus the header assert(segments.size == outputs.size * 3 + 1, s"Expected ${outputs.size * 3 + 1} blocks in result file but got ${segments.size}. " + "Try regenerate the result files.") Seq.tabulate(outputs.size) { i => val sql = segments(i * 3 + 1).trim val originalOut = segments(i * 3 + 3) val output = if (isNeedSort(sql)) { originalOut.split("\\n").sorted.mkString("\\n") } else { originalOut } QueryOutput( sql = sql, schema = "", output = output.replaceAll("\\\\s+$", "") ) } } // Compare results. assertResult(expectedOutputs.size, s"Number of queries should be ${expectedOutputs.size}") { outputs.size } outputs.zip(expectedOutputs).zipWithIndex.foreach { case ((output, expected), i) => assertResult(expected.sql, s"SQL query did not match for query #$i\\n${expected.sql}") { output.sql } expected match { // Skip desc command, see HiveResult.hiveResultString case d if d.sql.toUpperCase(Locale.ROOT).startsWith("DESC ") || d.sql.toUpperCase(Locale.ROOT).startsWith("DESC\\n") || d.sql.toUpperCase(Locale.ROOT).startsWith("DESCRIBE ") || d.sql.toUpperCase(Locale.ROOT).startsWith("DESCRIBE\\n") => // Skip show command, see HiveResult.hiveResultString case s if s.sql.toUpperCase(Locale.ROOT).startsWith("SHOW ") || s.sql.toUpperCase(Locale.ROOT).startsWith("SHOW\\n") => case _ if output.output.startsWith(classOf[NoSuchTableException].getPackage.getName) => assert(expected.output.startsWith(classOf[NoSuchTableException].getPackage.getName), s"Exception did not match for query #$i\\n${expected.sql}, " + s"expected: ${expected.output}, but got: ${output.output}") case _ if output.output.startsWith(classOf[SparkException].getName) && output.output.contains("overflow") => assert(expected.output.contains(classOf[ArithmeticException].getName) && expected.output.contains("overflow"), s"Exception did not match for query #$i\\n${expected.sql}, " + s"expected: ${expected.output}, but got: ${output.output}") case _ if output.output.startsWith(classOf[RuntimeException].getName) => assert(expected.output.contains("Exception"), s"Exception did not match for query #$i\\n${expected.sql}, " + s"expected: ${expected.output}, but got: ${output.output}") case _ if output.output.startsWith(classOf[ArithmeticException].getName) && output.output.contains("causes overflow") => assert(expected.output.contains(classOf[ArithmeticException].getName) && expected.output.contains("causes overflow"), s"Exception did not match for query #$i\\n${expected.sql}, " + s"expected: ${expected.output}, but got: ${output.output}") case _ if output.output.startsWith(classOf[MissingFormatArgumentException].getName) && output.output.contains("Format specifier") => assert(expected.output.contains(classOf[MissingFormatArgumentException].getName) && expected.output.contains("Format specifier"), s"Exception did not match for query #$i\\n${expected.sql}, " + s"expected: ${expected.output}, but got: ${output.output}") case _ => assertResult(expected.output, s"Result did not match for query #$i\\n${expected.sql}") { output.output } } } } } override def createScalaTestCase(testCase: TestCase): Unit = { if (blackList.exists(t => testCase.name.toLowerCase(Locale.ROOT).contains(t.toLowerCase(Locale.ROOT)))) { // Create a test case to ignore this case. ignore(testCase.name) { /* Do nothing */ } } else { // Create a test case to run this case. test(testCase.name) { runTest(testCase) } } } override def listTestCases(): Seq[TestCase] = { listFilesRecursively(new File(inputFilePath)).flatMap { file => val resultFile = file.getAbsolutePath.replace(inputFilePath, goldenFilePath) + ".out" val absPath = file.getAbsolutePath val testCaseName = absPath.stripPrefix(inputFilePath).stripPrefix(File.separator) if (file.getAbsolutePath.startsWith(s"$inputFilePath${File.separator}udf")) { Seq.empty } else if (file.getAbsolutePath.startsWith(s"$inputFilePath${File.separator}postgreSQL")) { PgSQLTestCase(testCaseName, absPath, resultFile) :: Nil } else { RegularTestCase(testCaseName, absPath, resultFile) :: Nil } } } test("Check if ThriftServer can work") { withJdbcStatement { statement => val rs = statement.executeQuery("select 1L") rs.next() assert(rs.getLong(1) === 1L) } } private def getNormalizedResult(statement: Statement, sql: String): Seq[String] = { try { val rs = statement.executeQuery(sql) val cols = rs.getMetaData.getColumnCount val buildStr = () => (for (i <- 1 to cols) yield { getHiveResult(rs.getObject(i)) }).mkString("\\t") val answer = Iterator.continually(rs.next()).takeWhile(identity).map(_ => buildStr()).toSeq .map(replaceNotIncludedMsg) if (isNeedSort(sql)) { answer.sorted } else { answer } } catch { case a: AnalysisException => // Do not output the logical plan tree which contains expression IDs. // Also implement a crude way of masking expression IDs in the error message // with a generic pattern "###". val msg = if (a.plan.nonEmpty) a.getSimpleMessage else a.getMessage Seq(a.getClass.getName, msg.replaceAll("#\\\\d+", "#x")).sorted case NonFatal(e) => val rootCause = ExceptionUtils.getRootCause(e) // If there is an exception, put the exception class followed by the message. Seq(rootCause.getClass.getName, rootCause.getMessage) } } private def startThriftServer(port: Int, attempt: Int): Unit = { logInfo(s"Trying to start HiveThriftServer2: port=$port, attempt=$attempt") val sqlContext = spark.newSession().sqlContext sqlContext.setConf(ConfVars.HIVE_SERVER2_THRIFT_PORT.varname, port.toString) hiveServer2 = HiveThriftServer2.startWithContext(sqlContext) } private def withJdbcStatement(fs: (Statement => Unit)*): Unit = { val user = System.getProperty("user.name") val serverPort = hiveServer2.getHiveConf.get(ConfVars.HIVE_SERVER2_THRIFT_PORT.varname) val connections = fs.map { _ => DriverManager.getConnection(s"jdbc:hive2://localhost:$serverPort", user, "") } val statements = connections.map(_.createStatement()) try { statements.zip(fs).foreach { case (s, f) => f(s) } } finally { statements.foreach(_.close()) connections.foreach(_.close()) } } /** Load built-in test tables. */ private def loadTestData(statement: Statement): Unit = { // Prepare the data statement.execute( """ |CREATE OR REPLACE TEMPORARY VIEW testdata as |SELECT id AS key, CAST(id AS string) AS value FROM range(1, 101) """.stripMargin) statement.execute( """ |CREATE OR REPLACE TEMPORARY VIEW arraydata as |SELECT * FROM VALUES |(ARRAY(1, 2, 3), ARRAY(ARRAY(1, 2, 3))), |(ARRAY(2, 3, 4), ARRAY(ARRAY(2, 3, 4))) AS v(arraycol, nestedarraycol) """.stripMargin) statement.execute( """ |CREATE OR REPLACE TEMPORARY VIEW mapdata as |SELECT * FROM VALUES |MAP(1, 'a1', 2, 'b1', 3, 'c1', 4, 'd1', 5, 'e1'), |MAP(1, 'a2', 2, 'b2', 3, 'c2', 4, 'd2'), |MAP(1, 'a3', 2, 'b3', 3, 'c3'), |MAP(1, 'a4', 2, 'b4'), |MAP(1, 'a5') AS v(mapcol) """.stripMargin) statement.execute( s""" |CREATE TEMPORARY VIEW aggtest | (a int, b float) |USING csv |OPTIONS (path '${baseResourcePath.getParent}/test-data/postgresql/agg.data', | header 'false', delimiter '\\t') """.stripMargin) statement.execute( s""" |CREATE OR REPLACE TEMPORARY VIEW onek | (unique1 int, unique2 int, two int, four int, ten int, twenty int, hundred int, | thousand int, twothousand int, fivethous int, tenthous int, odd int, even int, | stringu1 string, stringu2 string, string4 string) |USING csv |OPTIONS (path '${baseResourcePath.getParent}/test-data/postgresql/onek.data', | header 'false', delimiter '\\t') """.stripMargin) statement.execute( s""" |CREATE OR REPLACE TEMPORARY VIEW tenk1 | (unique1 int, unique2 int, two int, four int, ten int, twenty int, hundred int, | thousand int, twothousand int, fivethous int, tenthous int, odd int, even int, | stringu1 string, stringu2 string, string4 string) |USING csv | OPTIONS (path '${baseResourcePath.getParent}/test-data/postgresql/tenk.data', | header 'false', delimiter '\\t') """.stripMargin) } // Returns true if sql is retrieving data. private def isNeedSort(sql: String): Boolean = { val upperCase = sql.toUpperCase(Locale.ROOT) upperCase.startsWith("SELECT ") || upperCase.startsWith("SELECT\\n") || upperCase.startsWith("WITH ") || upperCase.startsWith("WITH\\n") || upperCase.startsWith("VALUES ") || upperCase.startsWith("VALUES\\n") || // postgreSQL/union.sql upperCase.startsWith("(") } private def getHiveResult(obj: Object): String = { obj match { case null => HiveResult.toHiveString((null, StringType)) case d: java.sql.Date => HiveResult.toHiveString((d, DateType)) case t: Timestamp => HiveResult.toHiveString((t, TimestampType)) case d: java.math.BigDecimal => HiveResult.toHiveString((d, DecimalType.fromBigDecimal(d))) case bin: Array[Byte] => HiveResult.toHiveString((bin, BinaryType)) case other => other.toString } } }
rezasafi/spark
sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ThriftServerQueryTestSuite.scala
Scala
apache-2.0
15,270
package no.nr.edvard.metis /*~ \\section{Models} \\subsection{Locations} Lorem ipsum dolor sit amet. Durante degli Alighieri, mononymously referred to as Dante, was an Italian poet, prose writer, literary theorist, moral philosopher, and political thinker. He is best known for the monumental epic poem La commedia, later named La divina commedia (Divine Comedy), considered the greatest literary work composed in the Italian language and from masterpiece of world literature. */ object types { sealed abstract class Type(shortName: String) { override def toString = shortName } case object ValueType extends Type("value") case object NullType extends Type("null") case object ThisType extends Type("this") case object PrimitiveType extends Type("primitive") case object IntType extends Type("integer") case object BooleanType extends Type("boolean") case object ReferenceType extends Type("reference") case object VoidType extends Type("void") case object BottomType extends Type("bottom") } object RichModel { import types._ sealed abstract class Location extends Product { def isOpenForFlow(t: FlowSetFacade) = true val typeAt: Type @inline override final def hashCode = scala.runtime.ScalaRunTime._hashCode(this) } case object NothingLocation extends Location { override def isOpenForFlow(whatever: FlowSetFacade) = false override val typeAt = BottomType } case object ThisRefLocation extends Location { override val typeAt = ThisType } case class ParamRefLocation(number: Int, typeAt: Type) extends Location case class LocalVarLocation(number: Int, typeAt: Type) extends Location case class ThisInstanceDataLocation(name: String, typeAt: Type) extends Location case class ForeignDataLocation(name: String, typeAt: Type) extends Location case class ClassDataLocation(name: String, typeAt: Type) extends Location case class ConstPoolLocation(typeAt: Type) extends Location case object BitHeavenLocation extends Location { override val typeAt = BottomType } class MessageLocationGroup(typeAtReturn: Type) { val receiverLocation = new MessageReceiverLocation(this) val returnValueLocation = new MessageReturnValueLocation(this, typeAtReturn) val argumentLocation = new MessageArgumentLocation(this) override final def hashCode = System.identityHashCode(this) override final def equals(other: Any) = other match { case otherMlg: MessageLocationGroup => this.eq(otherMlg) case _ => false } override def toString = "MessageLocationGroup" } abstract class AbstractMessageLocation private[RichModel]( val group: MessageLocationGroup ) extends Location { def productElement(n: Int) = n match { case 0 => group } def productArity = 1 def canEqual(that: Any) = true } class MessageReturnValueLocation(group: MessageLocationGroup, val typeAt:Type) extends AbstractMessageLocation(group) class MessageReceiverLocation(group: MessageLocationGroup) extends AbstractMessageLocation(group) { val typeAt = ValueType } class MessageArgumentLocation(group: MessageLocationGroup) extends AbstractMessageLocation(group) { val typeAt = ValueType } case class ReturnValueLocation(uniqueId: Int, val typeAt: Type) extends Location object ReturnValueLocation { private var idCounter = -1 def apply(typeAt: Type) = { idCounter += 1 new ReturnValueLocation(idCounter, typeAt) } } case class FlowEdge(from: Location, to: Location) { @inline override final val hashCode = scala.runtime.ScalaRunTime._hashCode(this) } case class FactSet(conditionLocations: Set[Location], edges: Set[FlowEdge]) } object TightModel { import types._ sealed case class Theory(facts: Set[Fact]) { def +(other: Theory) = Theory(this.facts ++ other.facts) override def toString = facts.mkString("\\n") + "\\n" } sealed trait Fact trait FlowFact extends Fact case class MessageFlowFact( receiverFlowSources: Set[Location], argumentFlowSources: Set[Location], returnFlowTargets: Set[Location] ) extends FlowFact { override def toString = "[%s] <- [%s].([%s])".format( returnFlowTargets.mkString(","), receiverFlowSources.mkString(","), argumentFlowSources.mkString(",") ) } case class NonMessageFlowFact(_type: Type, from: Location, to: Location) extends FlowFact { override def toString = "%s <- %s : %s".format(to, from, _type) } case class GuardedFlowFact(conditionLocations: Set[Location], flowFacts: Set[FlowFact]) extends Fact { require { !conditionLocations.isEmpty } require { !flowFacts.isEmpty } override def toString = "if (%s) {\\n%s\\n}".format( conditionLocations.mkString(", "), flowFacts.map(" " + _).mkString("\\n") ) } sealed abstract class Location(shortName: String) { override final def toString = shortName } case object InstanceDataLocation extends Location("instance-data") case object ClassDataLocation extends Location("class-data") case object ForeignDataLocation extends Location("foreign-data") case object ParameterLocation extends Location("parameter") case object LocalVarLocation extends Location("local") case object ConstPoolLocation extends Location("const-pool") case object MessageReceiverLocation extends Location("callee") case object MessageArgumentLocation extends Location("call-arg") case object MessageReturnValueLocation extends Location("call-return-value") case object ReturnValueLocation extends Location("return-value") case object ThisRefLocation extends Location("this") case object BitHeavenLocation extends Location("bit-heaven") case object NothingLocation extends Location("nothing") }
edwkar/edwbsc
projects/metis/src/main/scala/no/nr/edvard/metis/models.scala
Scala
gpl-2.0
5,839
/* * Copyright 2015 - 2016 Red Bull Media House GmbH <http://www.redbullmediahouse.com> - all rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.rbmhtechnology.eventuate.adapter.vertx import akka.actor.{ Actor, Props } import scala.collection.immutable.Seq private[vertx] object VertxSupervisor { def props(adapters: Seq[Props]): Props = Props(new VertxSupervisor(adapters)) } private[vertx] class VertxSupervisor(adapters: Seq[Props]) extends Actor { val adapterActors = adapters.map(context.actorOf) override def receive: Receive = Actor.emptyBehavior }
RBMHTechnology/eventuate
eventuate-adapter-vertx/src/main/scala/com/rbmhtechnology/eventuate/adapter/vertx/VertxSupervisor.scala
Scala
apache-2.0
1,114
package pep_065 object Solution { def a(i: Int) = (i % 3, i / 3) match { case (0, r) => 2 * r case _ => 1 } def solve(max: Int = 100): Int = { var (nk2, nk1, n) = (BigInt(0), BigInt(1), BigInt(2)) for (i <- 2 to max) { nk2 = nk1 nk1 = n n = a(i) * nk1 + nk2 } n.toString().map(_.toInt - '0'.toInt).sum } } // WIP // import Stream._ // Numerator of the Continued Fractions of e // lazy val numerators: Stream[Int] = 1 #:: 3 #:: (numerators zip numerators.tail map { // case (nk2, nk1) => ak * nk1 + nk2 // nk = ak * nk-1 + nk-2 // }) // lazy val numerators: Stream[(Int, Int, Int)] = (0, 0, 0) #:: numerators.scanLeft((0, 0, 1))({ // case (a, b) => (0, 0, a._3 + b._3) // }) //(nk2, nk1, nk0) // lazy val numerators: Stream[(Int, Int, Int)] = (0, 0, 0) #:: numerators.scanLeft((0, 0, 1))({ // case (a, b) => (0, 0, 10 + b._3) // }) //(nk2, nk1, nk0) // numerators.take(10).toList // Numerator of the Continued Fractions of e // lazy val numerators: Stream[(Int, Int)] = (0, 3) #::(1, 8) #:: (numerators zip numerators.tail map { // case ((k2, nk2), (k1, nk1)) => (k1 + 1, a(k1 + 1) * nk1 + nk2) // nk = ak * nk-1 + nk-2 // }) // val fibs: Stream[Int] = 0 #:: fibs.scanLeft(1)(_ + _)
filippovitale/pe
pe-solution/src/main/scala/pep_065/Solution.scala
Scala
mit
1,267
package composition import com.google.inject.Guice import filters.ServiceOpenFilter import play.filters.gzip.GzipFilter import uk.gov.dvla.vehicles.presentation.common.filters.AccessLoggingFilter import uk.gov.dvla.vehicles.presentation.common.filters.CsrfPreventionFilter import uk.gov.dvla.vehicles.presentation.common.filters.EnsureSessionCreatedFilter import utils.helpers.ErrorStrategy trait Composition { lazy val injector = Guice.createInjector(new DevModule) lazy val filters = Array( injector.getInstance(classOf[EnsureSessionCreatedFilter]), new GzipFilter(), injector.getInstance(classOf[AccessLoggingFilter]), injector.getInstance(classOf[CsrfPreventionFilter]), injector.getInstance(classOf[ServiceOpenFilter]) ) lazy val errorStrategy = injector.getInstance(classOf[ErrorStrategy]) }
dvla/vehicles-online
app/composition/Composition.scala
Scala
mit
829
/** * Copyright 2013 Robert Welin * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package se.weln.noyt.net import java.io.IOException import scala.io.Source class Downloader extends Gatherer { /** * Downloads the contents of a URL. */ def apply(url: String, tries: Int): Option[String] = { try { Some(Source.fromURL(url)(scala.io.Codec("UTF-8")).mkString) } catch { case e: IOException => println("Download Error " + url); println(e); if (tries <= 1) None else apply(url, tries - 1) } } def apply(url: String) = apply(url, 3) }
nilewapp/NoYt
src/main/scala/se/weln/noyt/net/Downloader.scala
Scala
apache-2.0
1,099
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ package scaps.settings import java.io.File import java.util.concurrent.TimeUnit import scala.annotation.elidable import scala.annotation.elidable.ASSERTION import scala.concurrent.duration.Duration import scala.concurrent.duration.DurationLong import com.typesafe.config.Config import com.typesafe.config.ConfigFactory case class Settings( index: IndexSettings, query: QuerySettings) { def modQuery(f: QuerySettings => QuerySettings) = this.copy(query = f(query)) def modIndex(f: IndexSettings => IndexSettings) = this.copy(index = f(index)) } object Settings { def fromApplicationConf = Settings(ConfigFactory.load().getConfig("scaps")) def apply(conf: Config): Settings = Settings( IndexSettings(conf.getConfig("index")), QuerySettings(conf.getConfig("query"))) def assertDouble(min: Double, max: Double)(value: Double) = { assert(value >= min) assert(value <= max) } private[settings] val assertPositive = assertDouble(0d, Double.MaxValue)_ } case class IndexSettings( indexDir: String, timeout: Duration, typeFrequenciesSampleSize: Int, polarizedTypes: Boolean) { val typeDefsDir = new File(indexDir + "/typeDefs") val modulesDir = new File(indexDir + "/modules") val valuesDir = new File(indexDir + "/values") val viewsDir = new File(indexDir + "/views") import Settings._ assertPositive(typeFrequenciesSampleSize) } object IndexSettings { def apply(conf: Config): IndexSettings = IndexSettings( conf.getString("index-dir"), conf.getDuration("timeout", TimeUnit.MILLISECONDS).millis, conf.getInt("type-frequencies-sample-size"), conf.getBoolean("polarized-types")) } case class QuerySettings( maxClauseCount: Int, maxResults: Int, views: Boolean, fractions: Boolean, penaltyWeight: Double, typeFrequencyWeight: Double, distanceWeight: Double, docBoost: Double, fingerprintFrequencyCutoff: Double, explainScores: Boolean) { import Settings._ assertPositive(maxClauseCount) assertPositive(maxResults) assertPositive(penaltyWeight) assertPositive(typeFrequencyWeight) assertPositive(distanceWeight) assertPositive(docBoost) assertPositive(fingerprintFrequencyCutoff) } object QuerySettings { def apply(conf: Config): QuerySettings = QuerySettings( conf.getInt("max-clause-count"), conf.getInt("max-results"), conf.getBoolean(views), conf.getBoolean(fractions), conf.getDouble(penaltyWeight), conf.getDouble(typeFrequencyWeight), conf.getDouble(distanceWeight), conf.getDouble(docBoost), conf.getDouble(fingerprintFrequencyCutoff), conf.getBoolean("explain-scores")) val views = "views" val fractions = "fractions" val penaltyWeight = "penalty-weight" val typeFrequencyWeight = "type-frequency-weight" val distanceWeight = "distance-weight" val docBoost = "doc-boost" val fingerprintFrequencyCutoff = "fingerprint-frequency-cutoff" }
scala-search/scaps
core/src/main/scala/scaps/settings/Settings.scala
Scala
mpl-2.0
3,209
package assets.mustache.overseas import uk.gov.gds.ier.transaction.overseas.passport.PassportCheckMustache import uk.gov.gds.ier.test._ class PassportCheckTemplateTest extends TemplateTestSuite with PassportCheckMustache { val data = new PassportCheckModel( question = Question(), hasPassport = Field( classes = "hasPassportClasses" ), hasPassportTrue = Field( id = "hasPassportTrueId", name = "hasPassportTrueName", classes = "hasPassportTrueClasses", attributes = "foo=\\"foo\\"" ), hasPassportFalse = Field( id = "hasPassportFalseId", name = "hasPassportFalseName", classes = "hasPassportFalseClasses", attributes = "foo=\\"foo\\"" ), bornInUk = Field( classes = "bornInUkClasses" ), bornInUkTrue = Field( id = "bornInUkTrueId", name = "bornInUkTrueName", classes = "bornInUkTrueClasses", attributes = "foo=\\"foo\\"" ), bornInUkFalse = Field( id = "bornInUkFalseId", name = "bornInUkFalseName", classes = "bornInUkFalseClasses", attributes = "foo=\\"foo\\"" ) ) it should "properly render all properties from the model" in { running(FakeApplication()) { val html = Mustache.render("overseas/passportCheck", data) val doc = Jsoup.parse(html.toString) val passportFieldset = doc.select("fieldset[class*=hasPassportClasses]").first() passportFieldset.attr("class") should include("hasPassportClasses") val passportTrueLabel = passportFieldset.select("label[for=hasPassportTrueId]").first() passportTrueLabel.attr("for") should be("hasPassportTrueId") val passportTrueInput = passportTrueLabel.select("input").first() passportTrueInput.attr("id") should be("hasPassportTrueId") passportTrueInput.attr("name") should be("hasPassportTrueName") passportTrueInput.attr("foo") should be("foo") val passportFalseLabel = passportFieldset.select("label[for=hasPassportFalseId]").first() passportFalseLabel.attr("for") should be("hasPassportFalseId") val passportFalseInput = passportFalseLabel.select("input").first() passportFalseInput.attr("id") should be("hasPassportFalseId") passportFalseInput.attr("name") should be("hasPassportFalseName") passportFalseInput.attr("foo") should be("foo") val bornFieldset = doc.select("fieldset[class*=bornInUkClasses]").first() bornFieldset.attr("class") should include("bornInUkClasses") val bornTrueLabel = bornFieldset.select("label[for=bornInUkTrueId]").first() bornTrueLabel.attr("for") should be("bornInUkTrueId") val bornTrueInput = bornTrueLabel.select("input").first() bornTrueInput.attr("id") should be("bornInUkTrueId") bornTrueInput.attr("name") should be("bornInUkTrueName") bornTrueInput.attr("foo") should be("foo") val bornFalseLabel = bornFieldset.select("label[for=bornInUkFalseId]").first() bornFalseLabel.attr("for") should be("bornInUkFalseId") val bornFalseInput = bornFalseLabel.select("input").first() bornFalseInput.attr("id") should be("bornInUkFalseId") bornFalseInput.attr("name") should be("bornInUkFalseName") bornFalseInput.attr("foo") should be("foo") } } }
michaeldfallen/ier-frontend
test/assets/mustache/overseas/PassportCheckTemplateTest.scala
Scala
mit
3,290
package com.twitter.finagle.factory import com.twitter.finagle._ import com.twitter.finagle.service.{DelayedFactory, FailingFactory, ServiceFactoryRef} import com.twitter.finagle.ServiceFactoryProxy import com.twitter.finagle.stats.{StatsReceiver, NullStatsReceiver} import com.twitter.finagle.util.{Drv, Rng, OnReady} import com.twitter.util._ import java.net.SocketAddress private[finagle] object TrafficDistributor { /** * A [[ServiceFactory]] and its associated weight. The `closeGate` defers closes * to `factory` until it is set. */ case class WeightedFactory[Req, Rep]( factory: ServiceFactory[Req, Rep], closeGate: Promise[Unit], weight: Double) /** * An intermediate representation of the endpoints that a load balancer * operates over, capable of being updated. */ type BalancerEndpoints[Req, Rep] = Var[Activity.State[Set[ServiceFactory[Req, Rep]]]] with Updatable[Activity.State[Set[ServiceFactory[Req, Rep]]]] /** * Represents cache entries for load balancer instances. Stores both * the load balancer instance and its backing updatable collection. * Size refers to the number of elements in `endpoints`. */ case class CachedBalancer[Req, Rep]( balancer: ServiceFactory[Req, Rep], endpoints: BalancerEndpoints[Req, Rep], size: Int) /** * A load balancer and its associated weight. Size refers to the * size of the balancers backing collection. The [[Distributor]] * operates over these. */ case class WeightClass[Req, Rep]( balancer: ServiceFactory[Req, Rep], weight: Double, size: Int) /** * Folds and accumulates over an [[Activity]] based event `stream`. * `Activity.Pending` and `Activity.Failed` states from the source * `stream` take precedence. */ def scanLeft[T, U]( init: U, stream: Event[Activity.State[T]] )(f: (U, T) => U): Event[Activity.State[U]] = { val initState: Activity.State[U] = Activity.Ok(init) stream.foldLeft(initState) { case (Activity.Pending, Activity.Ok(update)) => Activity.Ok(f(init, update)) case (Activity.Failed(_), Activity.Ok(update)) => Activity.Ok(f(init, update)) case (Activity.Ok(state), Activity.Ok(update)) => Activity.Ok(f(state, update)) case (_, failed@Activity.Failed(_)) => failed case (_, pending@Activity.Pending) => pending } } /** * Distributes requests to `classes` according to their weight and size. */ private class Distributor[Req, Rep]( classes: Iterable[WeightClass[Req, Rep]], rng: Rng = Rng.threadLocal, statsReceiver: StatsReceiver = NullStatsReceiver) extends ServiceFactory[Req, Rep] { private[this] val meanWeight = statsReceiver.addGauge("meanweight") { val size = classes.map(_.size).sum if (size != 0) classes.map { c => c.weight * c.size }.sum.toFloat / size else 0.0F } private[this] val (balancers, drv): (IndexedSeq[ServiceFactory[Req, Rep]], Drv) = { val tupled = classes.map { case WeightClass(b, weight, size) => (b, weight*size) } val (bs, ws) = tupled.unzip (bs.toIndexedSeq, Drv.fromWeights(ws.toSeq)) } def apply(conn: ClientConnection): Future[Service[Req, Rep]] = balancers(drv(rng))(conn) def close(deadline: Time): Future[Unit] = { meanWeight.remove() Closable.all(balancers: _*).close(deadline) } override def status = Status.worstOf[ServiceFactory[Req, Rep]](balancers, _.status) override def toString = s"Distributor($classes)" } } /** * A traffic distributor groups the input `dest` into distinct weight classes and * allocates traffic to each class. Classes are encoded in the stream of [[SocketAddress]] * instances in `dest`. The class operates with the following regime: * 1. For every distinct [[SocketAddress]] observed in `dest`, it creates a `newEndpoint`. * Each resulting `newEndpoint` is paired with a weight extracted from the [[SocketAddress]]. * Calls to `newEndpoint` are assumed to be expensive, so they are cached by input address. * * 2. The weighted endpoints are partitioned into weight classes and each class is * serviced by a distinct `newBalancer` instance. That is, load offered to a weight * class is also load balanced across its members. Offered load is distributed according * to the classes weight and number of members that belong to the class. * * @param eagerEviction When set to false, a SocketAddress cache entry is only removed * when its associated [[ServiceFactory]] has a status that is not Status.Open. This allows * for stale cache entries across updates that are only evicted when a [[ServiceFactory]] * is no longer eligible to receive traffic (as indicated by its `status` field). */ private[finagle] class TrafficDistributor[Req, Rep]( dest: Activity[Set[SocketAddress]], newEndpoint: SocketAddress => ServiceFactory[Req, Rep], newBalancer: Activity[Set[ServiceFactory[Req, Rep]]] => ServiceFactory[Req, Rep], eagerEviction: Boolean, rng: Rng = Rng.threadLocal, statsReceiver: StatsReceiver = NullStatsReceiver) extends ServiceFactory[Req, Rep] { import TrafficDistributor._ // Allows per endpoint closes to be overwritten by // a call to close on an instance of TrafficDistributor. private[this] val outerClose = new Promise[Unit] /** * Creates a `newEndpoint` for each distinct [[SocketAddress]] in the `sockaddrs` * stream. Calls to `newEndpoint` are cached based on the input address. The cache is * privy to SocketAddresses of type [[WeightedSocketAddress]] and unwraps them. Weights * are extracted and coupled with the their respective result from `newEndpoint`. If * the SocketAddress does not have a weight, a default weight of 1.0 is used. */ private[this] def weightEndpoints( sockaddrs: Event[Activity.State[Set[SocketAddress]]] ): Event[Activity.State[Set[WeightedFactory[Req, Rep]]]] = { val init = Map.empty[SocketAddress, WeightedFactory[Req, Rep]] scanLeft(init, sockaddrs) { case (active, addrs) => // Note, if an update contains multiple `WeightedSocketAddress` instances // with duplicate `addr` fields, only one of the instances and its associated // factory is cached. Last write wins. val weightedAddrs = addrs.map(WeightedSocketAddress.extract) val merged = weightedAddrs.foldLeft(active) { case (cache, (sa, saWeight)) => cache.get(sa) match { // An update with an existing SocketAddress that has a new weight // results in the the weight being overwritten but the [[ServiceFactory]] // instance is maintained. case Some(wf@WeightedFactory(_, _, w)) if w != saWeight => cache.updated(sa, wf.copy(weight = saWeight)) case None => // The `closeGate` allows us to defer closing an endpoint service // factory until it is removed from this cache. Without it, an endpoint may // be closed prematurely when moving across weight classes if the // weight class is removed. val closeGate = new Promise[Unit] val endpoint = new ServiceFactoryProxy(newEndpoint(sa)) { override def close(when: Time) = (closeGate or outerClose).before { super.close(when) } } cache.updated(sa, WeightedFactory(endpoint, closeGate, saWeight)) case _ => cache } } // Remove stale cache entries. When `eagerEviction` is false cache // entries are only removed in subsequent stream updates. val removed = merged.keySet -- weightedAddrs.map(_._1) removed.foldLeft(merged) { case (cache, sa) => cache.get(sa) match { case Some(WeightedFactory(f, g, _)) if eagerEviction || f.status != Status.Open => g.setDone() f.close() cache - sa case _ => cache } } }.map { case Activity.Ok(cache) => Activity.Ok(cache.values.toSet) case pending@Activity.Pending => pending case failed@Activity.Failed(_) => failed } } /** * Partitions `endpoints` and assigns a `newBalancer` instance to each partition. * Because balancer instances are stateful, they need to be cached across updates. */ private[this] def partition( endpoints: Event[Activity.State[Set[WeightedFactory[Req, Rep]]]] ): Event[Activity.State[Iterable[WeightClass[Req, Rep]]]] = { // Cache entries are balancer instances together with their backing collection // which is updatable. The entries are keyed by weight class. val init = Map.empty[Double, CachedBalancer[Req, Rep]] scanLeft(init, endpoints) { case (balancers, activeSet) => val weightedGroups: Map[Double, Set[WeightedFactory[Req, Rep]]] = activeSet.groupBy(_.weight) val merged = weightedGroups.foldLeft(balancers) { case (cache, (weight, factories)) => val unweighted = factories.map { case WeightedFactory(f, _, _) => f } val newCacheEntry = if (cache.contains(weight)) { // an update that contains an existing weight class updates // the balancers backing collection. val cached = cache(weight) cached.endpoints.update(Activity.Ok(unweighted)) cached.copy(size = unweighted.size) } else { val endpoints: BalancerEndpoints[Req, Rep] = Var(Activity.Ok(unweighted)) val lb = newBalancer(Activity(endpoints)) CachedBalancer(lb, endpoints, unweighted.size) } cache + (weight -> newCacheEntry) } // weight classes that no longer exist in the update are removed from // the cache and the associated balancer instances are closed. val removed = balancers.keySet -- weightedGroups.keySet removed.foldLeft(merged) { case (cache, weight) => cache.get(weight) match { case Some(CachedBalancer(bal, _, _)) => bal.close() cache - weight case _ => cache } } }.map { case Activity.Ok(cache) => Activity.Ok(cache.map { case (weight, CachedBalancer(bal, _, size)) => WeightClass(bal, weight, size) }) case pending@Activity.Pending => pending case failed@Activity.Failed(_) => failed } } private[this] val weightClasses = partition(weightEndpoints(dest.states)) private[this] val pending = new Promise[ServiceFactory[Req, Rep]] private[this] val init: ServiceFactory[Req, Rep] = new DelayedFactory(pending) // Translate the stream of weightClasses into a stream of underlying // ServiceFactories that can service requests. private[this] val underlying: Event[ServiceFactory[Req, Rep]] = weightClasses.foldLeft(init) { case (_, Activity.Ok(bals)) if bals.isEmpty => // Defer the handling of an empty destination set to `newBalancer` val emptyBal = newBalancer(Activity(Var(Activity.Ok(Set.empty)))) pending.updateIfEmpty(Return(emptyBal)) emptyBal case (_, Activity.Ok(bals)) => val dist = new Distributor(bals, rng, statsReceiver) pending.updateIfEmpty(Return(dist)) dist case (_, Activity.Failed(e)) => val failing = new FailingFactory[Req, Rep](e) pending.updateIfEmpty(Return(failing)) failing case (staleState, Activity.Pending) => // This could create a new `DelayedFactory, however, after an // initial resolution, we prefer a stale set instead of queueing // for resolution. That is, it's okay to serve requests on an // outdated set. staleState } private[this] val ref = new ServiceFactoryRef(init) private[this] val obs = underlying.register(Witness(ref)) def apply(conn: ClientConnection): Future[Service[Req, Rep]] = ref(conn) def close(deadline: Time): Future[Unit] = { outerClose.setDone() Closable.all(obs, ref).close(deadline) } override def status: Status = ref.status }
nkhuyu/finagle
finagle-core/src/main/scala/com/twitter/finagle/factory/TrafficDistributor.scala
Scala
apache-2.0
12,206
object Test { inline def foo(f: Int ?=> Int): AnyRef = f // error inline def bar(f: Int ?=> Int) = f // error def main(args: Array[String]) = { foo(thisTransaction ?=> 43) bar(thisTransaction ?=> 44) } }
som-snytt/dotty
tests/neg/i2006.scala
Scala
apache-2.0
222
/* * Copyright 2012 Twitter Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.twitter.zipkin.hadoop import com.twitter.scalding._ import sources.{PreprocessedSpanSource, PrepNoNamesSpanSource} import com.twitter.zipkin.gen.{SpanServiceName, Span, Constants, Annotation} /** * Obtain the IDs and the durations of the one hundred service calls which take the longest per service */ class WorstRuntimes(args: Args) extends Job(args) with DefaultDateRangeJob { val clientAnnotations = Seq(Constants.CLIENT_RECV, Constants.CLIENT_SEND) val preprocessed = PreprocessedSpanSource() .read .mapTo(0 -> ('service, 'id, 'annotations)) { s : SpanServiceName => (s.service_name, s.id, s.annotations.toList) } val result = preprocessed // let's find those client annotations and convert into service name and duration .flatMap('annotations -> 'duration) { annotations: List[Annotation] => var clientSend: Option[Annotation] = None var clientReceived: Option[Annotation] = None annotations.foreach { a => if (Constants.CLIENT_SEND.equals(a.getValue)) clientSend = Some(a) if (Constants.CLIENT_RECV.equals(a.getValue)) clientReceived = Some(a) } // only return a value if we have both annotations for (cs <- clientSend; cr <- clientReceived) yield (cr.timestamp - cs.timestamp) / 1000 }.discard('annotations) //sort by duration, find the 100 largest .groupBy('service) { _.sortBy('duration).reverse.take(100)} .write(Tsv(args("output"))) }
dsias/zipkin
zipkin-hadoop/src/main/scala/com/twitter/zipkin/hadoop/WorstRuntimes.scala
Scala
apache-2.0
2,070
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.scheduler import java.io.NotSerializableException import java.util.Properties import java.util.concurrent.{ConcurrentHashMap, TimeUnit} import java.util.concurrent.atomic.AtomicInteger import java.util.function.BiFunction import scala.annotation.tailrec import scala.collection.Map import scala.collection.mutable.{ArrayStack, HashMap, HashSet} import scala.concurrent.duration._ import scala.language.existentials import scala.language.postfixOps import scala.util.control.NonFatal import org.apache.commons.lang3.SerializationUtils import org.apache.spark._ import org.apache.spark.broadcast.Broadcast import org.apache.spark.executor.{ExecutorMetrics, TaskMetrics} import org.apache.spark.internal.Logging import org.apache.spark.internal.config import org.apache.spark.internal.config.Tests.TEST_NO_STAGE_RETRY import org.apache.spark.network.util.JavaUtils import org.apache.spark.partial.{ApproximateActionListener, ApproximateEvaluator, PartialResult} import org.apache.spark.rdd.{DeterministicLevel, RDD, RDDCheckpointData} import org.apache.spark.rpc.RpcTimeout import org.apache.spark.storage._ import org.apache.spark.storage.BlockManagerMessages.BlockManagerHeartbeat import org.apache.spark.util._ /** * The high-level scheduling layer that implements stage-oriented scheduling. It computes a DAG of * stages for each job, keeps track of which RDDs and stage outputs are materialized, and finds a * minimal schedule to run the job. It then submits stages as TaskSets to an underlying * TaskScheduler implementation that runs them on the cluster. A TaskSet contains fully independent * tasks that can run right away based on the data that's already on the cluster (e.g. map output * files from previous stages), though it may fail if this data becomes unavailable. * * Spark stages are created by breaking the RDD graph at shuffle boundaries. RDD operations with * "narrow" dependencies, like map() and filter(), are pipelined together into one set of tasks * in each stage, but operations with shuffle dependencies require multiple stages (one to write a * set of map output files, and another to read those files after a barrier). In the end, every * stage will have only shuffle dependencies on other stages, and may compute multiple operations * inside it. The actual pipelining of these operations happens in the RDD.compute() functions of * various RDDs * * In addition to coming up with a DAG of stages, the DAGScheduler also determines the preferred * locations to run each task on, based on the current cache status, and passes these to the * low-level TaskScheduler. Furthermore, it handles failures due to shuffle output files being * lost, in which case old stages may need to be resubmitted. Failures *within* a stage that are * not caused by shuffle file loss are handled by the TaskScheduler, which will retry each task * a small number of times before cancelling the whole stage. * * When looking through this code, there are several key concepts: * * - Jobs (represented by [[ActiveJob]]) are the top-level work items submitted to the scheduler. * For example, when the user calls an action, like count(), a job will be submitted through * submitJob. Each Job may require the execution of multiple stages to build intermediate data. * * - Stages ([[Stage]]) are sets of tasks that compute intermediate results in jobs, where each * task computes the same function on partitions of the same RDD. Stages are separated at shuffle * boundaries, which introduce a barrier (where we must wait for the previous stage to finish to * fetch outputs). There are two types of stages: [[ResultStage]], for the final stage that * executes an action, and [[ShuffleMapStage]], which writes map output files for a shuffle. * Stages are often shared across multiple jobs, if these jobs reuse the same RDDs. * * - Tasks are individual units of work, each sent to one machine. * * - Cache tracking: the DAGScheduler figures out which RDDs are cached to avoid recomputing them * and likewise remembers which shuffle map stages have already produced output files to avoid * redoing the map side of a shuffle. * * - Preferred locations: the DAGScheduler also computes where to run each task in a stage based * on the preferred locations of its underlying RDDs, or the location of cached or shuffle data. * * - Cleanup: all data structures are cleared when the running jobs that depend on them finish, * to prevent memory leaks in a long-running application. * * To recover from failures, the same stage might need to run multiple times, which are called * "attempts". If the TaskScheduler reports that a task failed because a map output file from a * previous stage was lost, the DAGScheduler resubmits that lost stage. This is detected through a * CompletionEvent with FetchFailed, or an ExecutorLost event. The DAGScheduler will wait a small * amount of time to see whether other nodes or tasks fail, then resubmit TaskSets for any lost * stage(s) that compute the missing tasks. As part of this process, we might also have to create * Stage objects for old (finished) stages where we previously cleaned up the Stage object. Since * tasks from the old attempt of a stage could still be running, care must be taken to map any * events received in the correct Stage object. * * Here's a checklist to use when making or reviewing changes to this class: * * - All data structures should be cleared when the jobs involving them end to avoid indefinite * accumulation of state in long-running programs. * * - When adding a new data structure, update `DAGSchedulerSuite.assertDataStructuresEmpty` to * include the new structure. This will help to catch memory leaks. */ private[spark] class DAGScheduler( private[scheduler] val sc: SparkContext, private[scheduler] val taskScheduler: TaskScheduler, listenerBus: LiveListenerBus, mapOutputTracker: MapOutputTrackerMaster, blockManagerMaster: BlockManagerMaster, env: SparkEnv, clock: Clock = new SystemClock()) extends Logging { def this(sc: SparkContext, taskScheduler: TaskScheduler) = { this( sc, taskScheduler, sc.listenerBus, sc.env.mapOutputTracker.asInstanceOf[MapOutputTrackerMaster], sc.env.blockManager.master, sc.env) } def this(sc: SparkContext) = this(sc, sc.taskScheduler) private[spark] val metricsSource: DAGSchedulerSource = new DAGSchedulerSource(this) private[scheduler] val nextJobId = new AtomicInteger(0) private[scheduler] def numTotalJobs: Int = nextJobId.get() private val nextStageId = new AtomicInteger(0) private[scheduler] val jobIdToStageIds = new HashMap[Int, HashSet[Int]] private[scheduler] val stageIdToStage = new HashMap[Int, Stage] /** * Mapping from shuffle dependency ID to the ShuffleMapStage that will generate the data for * that dependency. Only includes stages that are part of currently running job (when the job(s) * that require the shuffle stage complete, the mapping will be removed, and the only record of * the shuffle data will be in the MapOutputTracker). */ private[scheduler] val shuffleIdToMapStage = new HashMap[Int, ShuffleMapStage] private[scheduler] val jobIdToActiveJob = new HashMap[Int, ActiveJob] // Stages we need to run whose parents aren't done private[scheduler] val waitingStages = new HashSet[Stage] // Stages we are running right now private[scheduler] val runningStages = new HashSet[Stage] // Stages that must be resubmitted due to fetch failures private[scheduler] val failedStages = new HashSet[Stage] private[scheduler] val activeJobs = new HashSet[ActiveJob] /** * Contains the locations that each RDD's partitions are cached on. This map's keys are RDD ids * and its values are arrays indexed by partition numbers. Each array value is the set of * locations where that RDD partition is cached. * * All accesses to this map should be guarded by synchronizing on it (see SPARK-4454). */ private val cacheLocs = new HashMap[Int, IndexedSeq[Seq[TaskLocation]]] // For tracking failed nodes, we use the MapOutputTracker's epoch number, which is sent with // every task. When we detect a node failing, we note the current epoch number and failed // executor, increment it for new tasks, and use this to ignore stray ShuffleMapTask results. // // TODO: Garbage collect information about failure epochs when we know there are no more // stray messages to detect. private val failedEpoch = new HashMap[String, Long] private [scheduler] val outputCommitCoordinator = env.outputCommitCoordinator // A closure serializer that we reuse. // This is only safe because DAGScheduler runs in a single thread. private val closureSerializer = SparkEnv.get.closureSerializer.newInstance() /** If enabled, FetchFailed will not cause stage retry, in order to surface the problem. */ private val disallowStageRetryForTest = sc.getConf.get(TEST_NO_STAGE_RETRY) /** * Whether to unregister all the outputs on the host in condition that we receive a FetchFailure, * this is set default to false, which means, we only unregister the outputs related to the exact * executor(instead of the host) on a FetchFailure. */ private[scheduler] val unRegisterOutputOnHostOnFetchFailure = sc.getConf.get(config.UNREGISTER_OUTPUT_ON_HOST_ON_FETCH_FAILURE) /** * Number of consecutive stage attempts allowed before a stage is aborted. */ private[scheduler] val maxConsecutiveStageAttempts = sc.getConf.getInt("spark.stage.maxConsecutiveAttempts", DAGScheduler.DEFAULT_MAX_CONSECUTIVE_STAGE_ATTEMPTS) /** * Number of max concurrent tasks check failures for each barrier job. */ private[scheduler] val barrierJobIdToNumTasksCheckFailures = new ConcurrentHashMap[Int, Int] /** * Time in seconds to wait between a max concurrent tasks check failure and the next check. */ private val timeIntervalNumTasksCheck = sc.getConf .get(config.BARRIER_MAX_CONCURRENT_TASKS_CHECK_INTERVAL) /** * Max number of max concurrent tasks check failures allowed for a job before fail the job * submission. */ private val maxFailureNumTasksCheck = sc.getConf .get(config.BARRIER_MAX_CONCURRENT_TASKS_CHECK_MAX_FAILURES) private val messageScheduler = ThreadUtils.newDaemonSingleThreadScheduledExecutor("dag-scheduler-message") private[spark] val eventProcessLoop = new DAGSchedulerEventProcessLoop(this) taskScheduler.setDAGScheduler(this) /** * Called by the TaskSetManager to report task's starting. */ def taskStarted(task: Task[_], taskInfo: TaskInfo) { eventProcessLoop.post(BeginEvent(task, taskInfo)) } /** * Called by the TaskSetManager to report that a task has completed * and results are being fetched remotely. */ def taskGettingResult(taskInfo: TaskInfo) { eventProcessLoop.post(GettingResultEvent(taskInfo)) } /** * Called by the TaskSetManager to report task completions or failures. */ def taskEnded( task: Task[_], reason: TaskEndReason, result: Any, accumUpdates: Seq[AccumulatorV2[_, _]], taskInfo: TaskInfo): Unit = { eventProcessLoop.post( CompletionEvent(task, reason, result, accumUpdates, taskInfo)) } /** * Update metrics for in-progress tasks and let the master know that the BlockManager is still * alive. Return true if the driver knows about the given block manager. Otherwise, return false, * indicating that the block manager should re-register. */ def executorHeartbeatReceived( execId: String, // (taskId, stageId, stageAttemptId, accumUpdates) accumUpdates: Array[(Long, Int, Int, Seq[AccumulableInfo])], blockManagerId: BlockManagerId, // executor metrics indexed by ExecutorMetricType.values executorUpdates: ExecutorMetrics): Boolean = { listenerBus.post(SparkListenerExecutorMetricsUpdate(execId, accumUpdates, Some(executorUpdates))) blockManagerMaster.driverEndpoint.askSync[Boolean]( BlockManagerHeartbeat(blockManagerId), new RpcTimeout(600 seconds, "BlockManagerHeartbeat")) } /** * Called by TaskScheduler implementation when an executor fails. */ def executorLost(execId: String, reason: ExecutorLossReason): Unit = { eventProcessLoop.post(ExecutorLost(execId, reason)) } /** * Called by TaskScheduler implementation when a worker is removed. */ def workerRemoved(workerId: String, host: String, message: String): Unit = { eventProcessLoop.post(WorkerRemoved(workerId, host, message)) } /** * Called by TaskScheduler implementation when a host is added. */ def executorAdded(execId: String, host: String): Unit = { eventProcessLoop.post(ExecutorAdded(execId, host)) } /** * Called by the TaskSetManager to cancel an entire TaskSet due to either repeated failures or * cancellation of the job itself. */ def taskSetFailed(taskSet: TaskSet, reason: String, exception: Option[Throwable]): Unit = { eventProcessLoop.post(TaskSetFailed(taskSet, reason, exception)) } /** * Called by the TaskSetManager when it decides a speculative task is needed. */ def speculativeTaskSubmitted(task: Task[_]): Unit = { eventProcessLoop.post(SpeculativeTaskSubmitted(task)) } private[scheduler] def getCacheLocs(rdd: RDD[_]): IndexedSeq[Seq[TaskLocation]] = cacheLocs.synchronized { // Note: this doesn't use `getOrElse()` because this method is called O(num tasks) times if (!cacheLocs.contains(rdd.id)) { // Note: if the storage level is NONE, we don't need to get locations from block manager. val locs: IndexedSeq[Seq[TaskLocation]] = if (rdd.getStorageLevel == StorageLevel.NONE) { IndexedSeq.fill(rdd.partitions.length)(Nil) } else { val blockIds = rdd.partitions.indices.map(index => RDDBlockId(rdd.id, index)).toArray[BlockId] blockManagerMaster.getLocations(blockIds).map { bms => bms.map(bm => TaskLocation(bm.host, bm.executorId)) } } cacheLocs(rdd.id) = locs } cacheLocs(rdd.id) } private def clearCacheLocs(): Unit = cacheLocs.synchronized { cacheLocs.clear() } /** * Gets a shuffle map stage if one exists in shuffleIdToMapStage. Otherwise, if the * shuffle map stage doesn't already exist, this method will create the shuffle map stage in * addition to any missing ancestor shuffle map stages. */ private def getOrCreateShuffleMapStage( shuffleDep: ShuffleDependency[_, _, _], firstJobId: Int): ShuffleMapStage = { shuffleIdToMapStage.get(shuffleDep.shuffleId) match { case Some(stage) => stage case None => // Create stages for all missing ancestor shuffle dependencies. getMissingAncestorShuffleDependencies(shuffleDep.rdd).foreach { dep => // Even though getMissingAncestorShuffleDependencies only returns shuffle dependencies // that were not already in shuffleIdToMapStage, it's possible that by the time we // get to a particular dependency in the foreach loop, it's been added to // shuffleIdToMapStage by the stage creation process for an earlier dependency. See // SPARK-13902 for more information. if (!shuffleIdToMapStage.contains(dep.shuffleId)) { createShuffleMapStage(dep, firstJobId) } } // Finally, create a stage for the given shuffle dependency. createShuffleMapStage(shuffleDep, firstJobId) } } /** * Check to make sure we don't launch a barrier stage with unsupported RDD chain pattern. The * following patterns are not supported: * 1. Ancestor RDDs that have different number of partitions from the resulting RDD (eg. * union()/coalesce()/first()/take()/PartitionPruningRDD); * 2. An RDD that depends on multiple barrier RDDs (eg. barrierRdd1.zip(barrierRdd2)). */ private def checkBarrierStageWithRDDChainPattern(rdd: RDD[_], numTasksInStage: Int): Unit = { val predicate: RDD[_] => Boolean = (r => r.getNumPartitions == numTasksInStage && r.dependencies.filter(_.rdd.isBarrier()).size <= 1) if (rdd.isBarrier() && !traverseParentRDDsWithinStage(rdd, predicate)) { throw new BarrierJobUnsupportedRDDChainException } } /** * Creates a ShuffleMapStage that generates the given shuffle dependency's partitions. If a * previously run stage generated the same shuffle data, this function will copy the output * locations that are still available from the previous shuffle to avoid unnecessarily * regenerating data. */ def createShuffleMapStage(shuffleDep: ShuffleDependency[_, _, _], jobId: Int): ShuffleMapStage = { val rdd = shuffleDep.rdd checkBarrierStageWithDynamicAllocation(rdd) checkBarrierStageWithNumSlots(rdd) checkBarrierStageWithRDDChainPattern(rdd, rdd.getNumPartitions) val numTasks = rdd.partitions.length val parents = getOrCreateParentStages(rdd, jobId) val id = nextStageId.getAndIncrement() val stage = new ShuffleMapStage( id, rdd, numTasks, parents, jobId, rdd.creationSite, shuffleDep, mapOutputTracker) stageIdToStage(id) = stage shuffleIdToMapStage(shuffleDep.shuffleId) = stage updateJobIdStageIdMaps(jobId, stage) if (!mapOutputTracker.containsShuffle(shuffleDep.shuffleId)) { // Kind of ugly: need to register RDDs with the cache and map output tracker here // since we can't do it in the RDD constructor because # of partitions is unknown logInfo("Registering RDD " + rdd.id + " (" + rdd.getCreationSite + ")") mapOutputTracker.registerShuffle(shuffleDep.shuffleId, rdd.partitions.length) } stage } /** * We don't support run a barrier stage with dynamic resource allocation enabled, it shall lead * to some confusing behaviors (eg. with dynamic resource allocation enabled, it may happen that * we acquire some executors (but not enough to launch all the tasks in a barrier stage) and * later release them due to executor idle time expire, and then acquire again). * * We perform the check on job submit and fail fast if running a barrier stage with dynamic * resource allocation enabled. * * TODO SPARK-24942 Improve cluster resource management with jobs containing barrier stage */ private def checkBarrierStageWithDynamicAllocation(rdd: RDD[_]): Unit = { if (rdd.isBarrier() && Utils.isDynamicAllocationEnabled(sc.getConf)) { throw new BarrierJobRunWithDynamicAllocationException } } /** * Check whether the barrier stage requires more slots (to be able to launch all tasks in the * barrier stage together) than the total number of active slots currently. Fail current check * if trying to submit a barrier stage that requires more slots than current total number. If * the check fails consecutively beyond a configured number for a job, then fail current job * submission. */ private def checkBarrierStageWithNumSlots(rdd: RDD[_]): Unit = { if (rdd.isBarrier() && rdd.getNumPartitions > sc.maxNumConcurrentTasks) { throw new BarrierJobSlotsNumberCheckFailed } } /** * Create a ResultStage associated with the provided jobId. */ private def createResultStage( rdd: RDD[_], func: (TaskContext, Iterator[_]) => _, partitions: Array[Int], jobId: Int, callSite: CallSite): ResultStage = { checkBarrierStageWithDynamicAllocation(rdd) checkBarrierStageWithNumSlots(rdd) checkBarrierStageWithRDDChainPattern(rdd, partitions.toSet.size) val parents = getOrCreateParentStages(rdd, jobId) val id = nextStageId.getAndIncrement() val stage = new ResultStage(id, rdd, func, partitions, parents, jobId, callSite) stageIdToStage(id) = stage updateJobIdStageIdMaps(jobId, stage) stage } /** * Get or create the list of parent stages for a given RDD. The new Stages will be created with * the provided firstJobId. */ private def getOrCreateParentStages(rdd: RDD[_], firstJobId: Int): List[Stage] = { getShuffleDependencies(rdd).map { shuffleDep => getOrCreateShuffleMapStage(shuffleDep, firstJobId) }.toList } /** Find ancestor shuffle dependencies that are not registered in shuffleToMapStage yet */ private def getMissingAncestorShuffleDependencies( rdd: RDD[_]): ArrayStack[ShuffleDependency[_, _, _]] = { val ancestors = new ArrayStack[ShuffleDependency[_, _, _]] val visited = new HashSet[RDD[_]] // We are manually maintaining a stack here to prevent StackOverflowError // caused by recursively visiting val waitingForVisit = new ArrayStack[RDD[_]] waitingForVisit.push(rdd) while (waitingForVisit.nonEmpty) { val toVisit = waitingForVisit.pop() if (!visited(toVisit)) { visited += toVisit getShuffleDependencies(toVisit).foreach { shuffleDep => if (!shuffleIdToMapStage.contains(shuffleDep.shuffleId)) { ancestors.push(shuffleDep) waitingForVisit.push(shuffleDep.rdd) } // Otherwise, the dependency and its ancestors have already been registered. } } } ancestors } /** * Returns shuffle dependencies that are immediate parents of the given RDD. * * This function will not return more distant ancestors. For example, if C has a shuffle * dependency on B which has a shuffle dependency on A: * * A <-- B <-- C * * calling this function with rdd C will only return the B <-- C dependency. * * This function is scheduler-visible for the purpose of unit testing. */ private[scheduler] def getShuffleDependencies( rdd: RDD[_]): HashSet[ShuffleDependency[_, _, _]] = { val parents = new HashSet[ShuffleDependency[_, _, _]] val visited = new HashSet[RDD[_]] val waitingForVisit = new ArrayStack[RDD[_]] waitingForVisit.push(rdd) while (waitingForVisit.nonEmpty) { val toVisit = waitingForVisit.pop() if (!visited(toVisit)) { visited += toVisit toVisit.dependencies.foreach { case shuffleDep: ShuffleDependency[_, _, _] => parents += shuffleDep case dependency => waitingForVisit.push(dependency.rdd) } } } parents } /** * Traverses the given RDD and its ancestors within the same stage and checks whether all of the * RDDs satisfy a given predicate. */ private def traverseParentRDDsWithinStage(rdd: RDD[_], predicate: RDD[_] => Boolean): Boolean = { val visited = new HashSet[RDD[_]] val waitingForVisit = new ArrayStack[RDD[_]] waitingForVisit.push(rdd) while (waitingForVisit.nonEmpty) { val toVisit = waitingForVisit.pop() if (!visited(toVisit)) { if (!predicate(toVisit)) { return false } visited += toVisit toVisit.dependencies.foreach { case _: ShuffleDependency[_, _, _] => // Not within the same stage with current rdd, do nothing. case dependency => waitingForVisit.push(dependency.rdd) } } } true } private def getMissingParentStages(stage: Stage): List[Stage] = { val missing = new HashSet[Stage] val visited = new HashSet[RDD[_]] // We are manually maintaining a stack here to prevent StackOverflowError // caused by recursively visiting val waitingForVisit = new ArrayStack[RDD[_]] def visit(rdd: RDD[_]) { if (!visited(rdd)) { visited += rdd val rddHasUncachedPartitions = getCacheLocs(rdd).contains(Nil) if (rddHasUncachedPartitions) { for (dep <- rdd.dependencies) { dep match { case shufDep: ShuffleDependency[_, _, _] => val mapStage = getOrCreateShuffleMapStage(shufDep, stage.firstJobId) if (!mapStage.isAvailable) { missing += mapStage } case narrowDep: NarrowDependency[_] => waitingForVisit.push(narrowDep.rdd) } } } } } waitingForVisit.push(stage.rdd) while (waitingForVisit.nonEmpty) { visit(waitingForVisit.pop()) } missing.toList } /** * Registers the given jobId among the jobs that need the given stage and * all of that stage's ancestors. */ private def updateJobIdStageIdMaps(jobId: Int, stage: Stage): Unit = { @tailrec def updateJobIdStageIdMapsList(stages: List[Stage]) { if (stages.nonEmpty) { val s = stages.head s.jobIds += jobId jobIdToStageIds.getOrElseUpdate(jobId, new HashSet[Int]()) += s.id val parentsWithoutThisJobId = s.parents.filter { ! _.jobIds.contains(jobId) } updateJobIdStageIdMapsList(parentsWithoutThisJobId ++ stages.tail) } } updateJobIdStageIdMapsList(List(stage)) } /** * Removes state for job and any stages that are not needed by any other job. Does not * handle cancelling tasks or notifying the SparkListener about finished jobs/stages/tasks. * * @param job The job whose state to cleanup. */ private def cleanupStateForJobAndIndependentStages(job: ActiveJob): Unit = { val registeredStages = jobIdToStageIds.get(job.jobId) if (registeredStages.isEmpty || registeredStages.get.isEmpty) { logError("No stages registered for job " + job.jobId) } else { stageIdToStage.filterKeys(stageId => registeredStages.get.contains(stageId)).foreach { case (stageId, stage) => val jobSet = stage.jobIds if (!jobSet.contains(job.jobId)) { logError( "Job %d not registered for stage %d even though that stage was registered for the job" .format(job.jobId, stageId)) } else { def removeStage(stageId: Int) { // data structures based on Stage for (stage <- stageIdToStage.get(stageId)) { if (runningStages.contains(stage)) { logDebug("Removing running stage %d".format(stageId)) runningStages -= stage } for ((k, v) <- shuffleIdToMapStage.find(_._2 == stage)) { shuffleIdToMapStage.remove(k) } if (waitingStages.contains(stage)) { logDebug("Removing stage %d from waiting set.".format(stageId)) waitingStages -= stage } if (failedStages.contains(stage)) { logDebug("Removing stage %d from failed set.".format(stageId)) failedStages -= stage } } // data structures based on StageId stageIdToStage -= stageId logDebug("After removal of stage %d, remaining stages = %d" .format(stageId, stageIdToStage.size)) } jobSet -= job.jobId if (jobSet.isEmpty) { // no other job needs this stage removeStage(stageId) } } } } jobIdToStageIds -= job.jobId jobIdToActiveJob -= job.jobId activeJobs -= job job.finalStage match { case r: ResultStage => r.removeActiveJob() case m: ShuffleMapStage => m.removeActiveJob(job) } } /** * Submit an action job to the scheduler. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like first() * @param callSite where in the user program this job was called * @param resultHandler callback to pass each result to * @param properties scheduler properties to attach to this job, e.g. fair scheduler pool name * * @return a JobWaiter object that can be used to block until the job finishes executing * or can be used to cancel the job. * * @throws IllegalArgumentException when partitions ids are illegal */ def submitJob[T, U]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], callSite: CallSite, resultHandler: (Int, U) => Unit, properties: Properties): JobWaiter[U] = { // Check to make sure we are not launching a task on a partition that does not exist. val maxPartitions = rdd.partitions.length partitions.find(p => p >= maxPartitions || p < 0).foreach { p => throw new IllegalArgumentException( "Attempting to access a non-existent partition: " + p + ". " + "Total number of partitions: " + maxPartitions) } val jobId = nextJobId.getAndIncrement() if (partitions.size == 0) { val time = clock.getTimeMillis() listenerBus.post( SparkListenerJobStart(jobId, time, Seq[StageInfo](), properties)) listenerBus.post( SparkListenerJobEnd(jobId, time, JobSucceeded)) // Return immediately if the job is running 0 tasks return new JobWaiter[U](this, jobId, 0, resultHandler) } assert(partitions.size > 0) val func2 = func.asInstanceOf[(TaskContext, Iterator[_]) => _] val waiter = new JobWaiter(this, jobId, partitions.size, resultHandler) eventProcessLoop.post(JobSubmitted( jobId, rdd, func2, partitions.toArray, callSite, waiter, SerializationUtils.clone(properties))) waiter } /** * Run an action job on the given RDD and pass all the results to the resultHandler function as * they arrive. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param partitions set of partitions to run on; some jobs may not want to compute on all * partitions of the target RDD, e.g. for operations like first() * @param callSite where in the user program this job was called * @param resultHandler callback to pass each result to * @param properties scheduler properties to attach to this job, e.g. fair scheduler pool name * * @note Throws `Exception` when the job fails */ def runJob[T, U]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], callSite: CallSite, resultHandler: (Int, U) => Unit, properties: Properties): Unit = { val start = System.nanoTime val waiter = submitJob(rdd, func, partitions, callSite, resultHandler, properties) ThreadUtils.awaitReady(waiter.completionFuture, Duration.Inf) waiter.completionFuture.value.get match { case scala.util.Success(_) => logInfo("Job %d finished: %s, took %f s".format (waiter.jobId, callSite.shortForm, (System.nanoTime - start) / 1e9)) case scala.util.Failure(exception) => logInfo("Job %d failed: %s, took %f s".format (waiter.jobId, callSite.shortForm, (System.nanoTime - start) / 1e9)) // SPARK-8644: Include user stack trace in exceptions coming from DAGScheduler. val callerStackTrace = Thread.currentThread().getStackTrace.tail exception.setStackTrace(exception.getStackTrace ++ callerStackTrace) throw exception } } /** * Run an approximate job on the given RDD and pass all the results to an ApproximateEvaluator * as they arrive. Returns a partial result object from the evaluator. * * @param rdd target RDD to run tasks on * @param func a function to run on each partition of the RDD * @param evaluator `ApproximateEvaluator` to receive the partial results * @param callSite where in the user program this job was called * @param timeout maximum time to wait for the job, in milliseconds * @param properties scheduler properties to attach to this job, e.g. fair scheduler pool name */ def runApproximateJob[T, U, R]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, evaluator: ApproximateEvaluator[U, R], callSite: CallSite, timeout: Long, properties: Properties): PartialResult[R] = { val partitions = (0 until rdd.partitions.length).toArray val jobId = nextJobId.getAndIncrement() if (partitions.isEmpty) { // Return immediately if the job is running 0 tasks val time = clock.getTimeMillis() listenerBus.post(SparkListenerJobStart(jobId, time, Seq[StageInfo](), properties)) listenerBus.post(SparkListenerJobEnd(jobId, time, JobSucceeded)) return new PartialResult(evaluator.currentResult(), true) } val listener = new ApproximateActionListener(rdd, func, evaluator, timeout) val func2 = func.asInstanceOf[(TaskContext, Iterator[_]) => _] eventProcessLoop.post(JobSubmitted( jobId, rdd, func2, partitions, callSite, listener, SerializationUtils.clone(properties))) listener.awaitResult() // Will throw an exception if the job fails } /** * Submit a shuffle map stage to run independently and get a JobWaiter object back. The waiter * can be used to block until the job finishes executing or can be used to cancel the job. * This method is used for adaptive query planning, to run map stages and look at statistics * about their outputs before submitting downstream stages. * * @param dependency the ShuffleDependency to run a map stage for * @param callback function called with the result of the job, which in this case will be a * single MapOutputStatistics object showing how much data was produced for each partition * @param callSite where in the user program this job was submitted * @param properties scheduler properties to attach to this job, e.g. fair scheduler pool name */ def submitMapStage[K, V, C]( dependency: ShuffleDependency[K, V, C], callback: MapOutputStatistics => Unit, callSite: CallSite, properties: Properties): JobWaiter[MapOutputStatistics] = { val rdd = dependency.rdd val jobId = nextJobId.getAndIncrement() if (rdd.partitions.length == 0) { throw new SparkException("Can't run submitMapStage on RDD with 0 partitions") } // We create a JobWaiter with only one "task", which will be marked as complete when the whole // map stage has completed, and will be passed the MapOutputStatistics for that stage. // This makes it easier to avoid race conditions between the user code and the map output // tracker that might result if we told the user the stage had finished, but then they queries // the map output tracker and some node failures had caused the output statistics to be lost. val waiter = new JobWaiter(this, jobId, 1, (i: Int, r: MapOutputStatistics) => callback(r)) eventProcessLoop.post(MapStageSubmitted( jobId, dependency, callSite, waiter, SerializationUtils.clone(properties))) waiter } /** * Cancel a job that is running or waiting in the queue. */ def cancelJob(jobId: Int, reason: Option[String]): Unit = { logInfo("Asked to cancel job " + jobId) eventProcessLoop.post(JobCancelled(jobId, reason)) } /** * Cancel all jobs in the given job group ID. */ def cancelJobGroup(groupId: String): Unit = { logInfo("Asked to cancel job group " + groupId) eventProcessLoop.post(JobGroupCancelled(groupId)) } /** * Cancel all jobs that are running or waiting in the queue. */ def cancelAllJobs(): Unit = { eventProcessLoop.post(AllJobsCancelled) } private[scheduler] def doCancelAllJobs() { // Cancel all running jobs. runningStages.map(_.firstJobId).foreach(handleJobCancellation(_, Option("as part of cancellation of all jobs"))) activeJobs.clear() // These should already be empty by this point, jobIdToActiveJob.clear() // but just in case we lost track of some jobs... } /** * Cancel all jobs associated with a running or scheduled stage. */ def cancelStage(stageId: Int, reason: Option[String]) { eventProcessLoop.post(StageCancelled(stageId, reason)) } /** * Kill a given task. It will be retried. * * @return Whether the task was successfully killed. */ def killTaskAttempt(taskId: Long, interruptThread: Boolean, reason: String): Boolean = { taskScheduler.killTaskAttempt(taskId, interruptThread, reason) } /** * Resubmit any failed stages. Ordinarily called after a small amount of time has passed since * the last fetch failure. */ private[scheduler] def resubmitFailedStages() { if (failedStages.size > 0) { // Failed stages may be removed by job cancellation, so failed might be empty even if // the ResubmitFailedStages event has been scheduled. logInfo("Resubmitting failed stages") clearCacheLocs() val failedStagesCopy = failedStages.toArray failedStages.clear() for (stage <- failedStagesCopy.sortBy(_.firstJobId)) { submitStage(stage) } } } /** * Check for waiting stages which are now eligible for resubmission. * Submits stages that depend on the given parent stage. Called when the parent stage completes * successfully. */ private def submitWaitingChildStages(parent: Stage) { logTrace(s"Checking if any dependencies of $parent are now runnable") logTrace("running: " + runningStages) logTrace("waiting: " + waitingStages) logTrace("failed: " + failedStages) val childStages = waitingStages.filter(_.parents.contains(parent)).toArray waitingStages --= childStages for (stage <- childStages.sortBy(_.firstJobId)) { submitStage(stage) } } /** Finds the earliest-created active job that needs the stage */ // TODO: Probably should actually find among the active jobs that need this // stage the one with the highest priority (highest-priority pool, earliest created). // That should take care of at least part of the priority inversion problem with // cross-job dependencies. private def activeJobForStage(stage: Stage): Option[Int] = { val jobsThatUseStage: Array[Int] = stage.jobIds.toArray.sorted jobsThatUseStage.find(jobIdToActiveJob.contains) } private[scheduler] def handleJobGroupCancelled(groupId: String) { // Cancel all jobs belonging to this job group. // First finds all active jobs with this group id, and then kill stages for them. val activeInGroup = activeJobs.filter { activeJob => Option(activeJob.properties).exists { _.getProperty(SparkContext.SPARK_JOB_GROUP_ID) == groupId } } val jobIds = activeInGroup.map(_.jobId) jobIds.foreach(handleJobCancellation(_, Option("part of cancelled job group %s".format(groupId)))) } private[scheduler] def handleBeginEvent(task: Task[_], taskInfo: TaskInfo) { // Note that there is a chance that this task is launched after the stage is cancelled. // In that case, we wouldn't have the stage anymore in stageIdToStage. val stageAttemptId = stageIdToStage.get(task.stageId).map(_.latestInfo.attemptNumber).getOrElse(-1) listenerBus.post(SparkListenerTaskStart(task.stageId, stageAttemptId, taskInfo)) } private[scheduler] def handleSpeculativeTaskSubmitted(task: Task[_]): Unit = { listenerBus.post(SparkListenerSpeculativeTaskSubmitted(task.stageId)) } private[scheduler] def handleTaskSetFailed( taskSet: TaskSet, reason: String, exception: Option[Throwable]): Unit = { stageIdToStage.get(taskSet.stageId).foreach { abortStage(_, reason, exception) } } private[scheduler] def cleanUpAfterSchedulerStop() { for (job <- activeJobs) { val error = new SparkException(s"Job ${job.jobId} cancelled because SparkContext was shut down") job.listener.jobFailed(error) // Tell the listeners that all of the running stages have ended. Don't bother // cancelling the stages because if the DAG scheduler is stopped, the entire application // is in the process of getting stopped. val stageFailedMessage = "Stage cancelled because SparkContext was shut down" // The `toArray` here is necessary so that we don't iterate over `runningStages` while // mutating it. runningStages.toArray.foreach { stage => markStageAsFinished(stage, Some(stageFailedMessage)) } listenerBus.post(SparkListenerJobEnd(job.jobId, clock.getTimeMillis(), JobFailed(error))) } } private[scheduler] def handleGetTaskResult(taskInfo: TaskInfo) { listenerBus.post(SparkListenerTaskGettingResult(taskInfo)) } private[scheduler] def handleJobSubmitted(jobId: Int, finalRDD: RDD[_], func: (TaskContext, Iterator[_]) => _, partitions: Array[Int], callSite: CallSite, listener: JobListener, properties: Properties) { var finalStage: ResultStage = null try { // New stage creation may throw an exception if, for example, jobs are run on a // HadoopRDD whose underlying HDFS files have been deleted. finalStage = createResultStage(finalRDD, func, partitions, jobId, callSite) } catch { case e: BarrierJobSlotsNumberCheckFailed => logWarning(s"The job $jobId requires to run a barrier stage that requires more slots " + "than the total number of slots in the cluster currently.") // If jobId doesn't exist in the map, Scala coverts its value null to 0: Int automatically. val numCheckFailures = barrierJobIdToNumTasksCheckFailures.compute(jobId, new BiFunction[Int, Int, Int] { override def apply(key: Int, value: Int): Int = value + 1 }) if (numCheckFailures <= maxFailureNumTasksCheck) { messageScheduler.schedule( new Runnable { override def run(): Unit = eventProcessLoop.post(JobSubmitted(jobId, finalRDD, func, partitions, callSite, listener, properties)) }, timeIntervalNumTasksCheck, TimeUnit.SECONDS ) return } else { // Job failed, clear internal data. barrierJobIdToNumTasksCheckFailures.remove(jobId) listener.jobFailed(e) return } case e: Exception => logWarning("Creating new stage failed due to exception - job: " + jobId, e) listener.jobFailed(e) return } // Job submitted, clear internal data. barrierJobIdToNumTasksCheckFailures.remove(jobId) val job = new ActiveJob(jobId, finalStage, callSite, listener, properties) clearCacheLocs() logInfo("Got job %s (%s) with %d output partitions".format( job.jobId, callSite.shortForm, partitions.length)) logInfo("Final stage: " + finalStage + " (" + finalStage.name + ")") logInfo("Parents of final stage: " + finalStage.parents) logInfo("Missing parents: " + getMissingParentStages(finalStage)) val jobSubmissionTime = clock.getTimeMillis() jobIdToActiveJob(jobId) = job activeJobs += job finalStage.setActiveJob(job) val stageIds = jobIdToStageIds(jobId).toArray val stageInfos = stageIds.flatMap(id => stageIdToStage.get(id).map(_.latestInfo)) listenerBus.post( SparkListenerJobStart(job.jobId, jobSubmissionTime, stageInfos, properties)) submitStage(finalStage) } private[scheduler] def handleMapStageSubmitted(jobId: Int, dependency: ShuffleDependency[_, _, _], callSite: CallSite, listener: JobListener, properties: Properties) { // Submitting this map stage might still require the creation of some parent stages, so make // sure that happens. var finalStage: ShuffleMapStage = null try { // New stage creation may throw an exception if, for example, jobs are run on a // HadoopRDD whose underlying HDFS files have been deleted. finalStage = getOrCreateShuffleMapStage(dependency, jobId) } catch { case e: Exception => logWarning("Creating new stage failed due to exception - job: " + jobId, e) listener.jobFailed(e) return } val job = new ActiveJob(jobId, finalStage, callSite, listener, properties) clearCacheLocs() logInfo("Got map stage job %s (%s) with %d output partitions".format( jobId, callSite.shortForm, dependency.rdd.partitions.length)) logInfo("Final stage: " + finalStage + " (" + finalStage.name + ")") logInfo("Parents of final stage: " + finalStage.parents) logInfo("Missing parents: " + getMissingParentStages(finalStage)) val jobSubmissionTime = clock.getTimeMillis() jobIdToActiveJob(jobId) = job activeJobs += job finalStage.addActiveJob(job) val stageIds = jobIdToStageIds(jobId).toArray val stageInfos = stageIds.flatMap(id => stageIdToStage.get(id).map(_.latestInfo)) listenerBus.post( SparkListenerJobStart(job.jobId, jobSubmissionTime, stageInfos, properties)) submitStage(finalStage) // If the whole stage has already finished, tell the listener and remove it if (finalStage.isAvailable) { markMapStageJobAsFinished(job, mapOutputTracker.getStatistics(dependency)) } } /** Submits stage, but first recursively submits any missing parents. */ private def submitStage(stage: Stage) { val jobId = activeJobForStage(stage) if (jobId.isDefined) { logDebug("submitStage(" + stage + ")") if (!waitingStages(stage) && !runningStages(stage) && !failedStages(stage)) { val missing = getMissingParentStages(stage).sortBy(_.id) logDebug("missing: " + missing) if (missing.isEmpty) { logInfo("Submitting " + stage + " (" + stage.rdd + "), which has no missing parents") submitMissingTasks(stage, jobId.get) } else { for (parent <- missing) { submitStage(parent) } waitingStages += stage } } } else { abortStage(stage, "No active job for stage " + stage.id, None) } } /** Called when stage's parents are available and we can now do its task. */ private def submitMissingTasks(stage: Stage, jobId: Int) { logDebug("submitMissingTasks(" + stage + ")") // First figure out the indexes of partition ids to compute. val partitionsToCompute: Seq[Int] = stage.findMissingPartitions() // Use the scheduling pool, job group, description, etc. from an ActiveJob associated // with this Stage val properties = jobIdToActiveJob(jobId).properties runningStages += stage // SparkListenerStageSubmitted should be posted before testing whether tasks are // serializable. If tasks are not serializable, a SparkListenerStageCompleted event // will be posted, which should always come after a corresponding SparkListenerStageSubmitted // event. stage match { case s: ShuffleMapStage => outputCommitCoordinator.stageStart(stage = s.id, maxPartitionId = s.numPartitions - 1) case s: ResultStage => outputCommitCoordinator.stageStart( stage = s.id, maxPartitionId = s.rdd.partitions.length - 1) } val taskIdToLocations: Map[Int, Seq[TaskLocation]] = try { stage match { case s: ShuffleMapStage => partitionsToCompute.map { id => (id, getPreferredLocs(stage.rdd, id))}.toMap case s: ResultStage => partitionsToCompute.map { id => val p = s.partitions(id) (id, getPreferredLocs(stage.rdd, p)) }.toMap } } catch { case NonFatal(e) => stage.makeNewStageAttempt(partitionsToCompute.size) listenerBus.post(SparkListenerStageSubmitted(stage.latestInfo, properties)) abortStage(stage, s"Task creation failed: $e\n${Utils.exceptionString(e)}", Some(e)) runningStages -= stage return } stage.makeNewStageAttempt(partitionsToCompute.size, taskIdToLocations.values.toSeq) // If there are tasks to execute, record the submission time of the stage. Otherwise, // post the even without the submission time, which indicates that this stage was // skipped. if (partitionsToCompute.nonEmpty) { stage.latestInfo.submissionTime = Some(clock.getTimeMillis()) } listenerBus.post(SparkListenerStageSubmitted(stage.latestInfo, properties)) // TODO: Maybe we can keep the taskBinary in Stage to avoid serializing it multiple times. // Broadcasted binary for the task, used to dispatch tasks to executors. Note that we broadcast // the serialized copy of the RDD and for each task we will deserialize it, which means each // task gets a different copy of the RDD. This provides stronger isolation between tasks that // might modify state of objects referenced in their closures. This is necessary in Hadoop // where the JobConf/Configuration object is not thread-safe. var taskBinary: Broadcast[Array[Byte]] = null var partitions: Array[Partition] = null try { // For ShuffleMapTask, serialize and broadcast (rdd, shuffleDep). // For ResultTask, serialize and broadcast (rdd, func). var taskBinaryBytes: Array[Byte] = null // taskBinaryBytes and partitions are both effected by the checkpoint status. We need // this synchronization in case another concurrent job is checkpointing this RDD, so we get a // consistent view of both variables. RDDCheckpointData.synchronized { taskBinaryBytes = stage match { case stage: ShuffleMapStage => JavaUtils.bufferToArray( closureSerializer.serialize((stage.rdd, stage.shuffleDep): AnyRef)) case stage: ResultStage => JavaUtils.bufferToArray(closureSerializer.serialize((stage.rdd, stage.func): AnyRef)) } partitions = stage.rdd.partitions } if (taskBinaryBytes.length > TaskSetManager.TASK_SIZE_TO_WARN_KIB * 1024) { logWarning(s"Broadcasting large task binary with size " + s"${Utils.bytesToString(taskBinaryBytes.length)}") } taskBinary = sc.broadcast(taskBinaryBytes) } catch { // In the case of a failure during serialization, abort the stage. case e: NotSerializableException => abortStage(stage, "Task not serializable: " + e.toString, Some(e)) runningStages -= stage // Abort execution return case e: Throwable => abortStage(stage, s"Task serialization failed: $e\n${Utils.exceptionString(e)}", Some(e)) runningStages -= stage // Abort execution return } val tasks: Seq[Task[_]] = try { val serializedTaskMetrics = closureSerializer.serialize(stage.latestInfo.taskMetrics).array() stage match { case stage: ShuffleMapStage => stage.pendingPartitions.clear() partitionsToCompute.map { id => val locs = taskIdToLocations(id) val part = partitions(id) stage.pendingPartitions += id new ShuffleMapTask(stage.id, stage.latestInfo.attemptNumber, taskBinary, part, locs, properties, serializedTaskMetrics, Option(jobId), Option(sc.applicationId), sc.applicationAttemptId, stage.rdd.isBarrier()) } case stage: ResultStage => partitionsToCompute.map { id => val p: Int = stage.partitions(id) val part = partitions(p) val locs = taskIdToLocations(id) new ResultTask(stage.id, stage.latestInfo.attemptNumber, taskBinary, part, locs, id, properties, serializedTaskMetrics, Option(jobId), Option(sc.applicationId), sc.applicationAttemptId, stage.rdd.isBarrier()) } } } catch { case NonFatal(e) => abortStage(stage, s"Task creation failed: $e\n${Utils.exceptionString(e)}", Some(e)) runningStages -= stage return } if (tasks.size > 0) { logInfo(s"Submitting ${tasks.size} missing tasks from $stage (${stage.rdd}) (first 15 " + s"tasks are for partitions ${tasks.take(15).map(_.partitionId)})") taskScheduler.submitTasks(new TaskSet( tasks.toArray, stage.id, stage.latestInfo.attemptNumber, jobId, properties)) } else { // Because we posted SparkListenerStageSubmitted earlier, we should mark // the stage as completed here in case there are no tasks to run markStageAsFinished(stage, None) stage match { case stage: ShuffleMapStage => logDebug(s"Stage ${stage} is actually done; " + s"(available: ${stage.isAvailable}," + s"available outputs: ${stage.numAvailableOutputs}," + s"partitions: ${stage.numPartitions})") markMapStageJobsAsFinished(stage) case stage : ResultStage => logDebug(s"Stage ${stage} is actually done; (partitions: ${stage.numPartitions})") } submitWaitingChildStages(stage) } } /** * Merge local values from a task into the corresponding accumulators previously registered * here on the driver. * * Although accumulators themselves are not thread-safe, this method is called only from one * thread, the one that runs the scheduling loop. This means we only handle one task * completion event at a time so we don't need to worry about locking the accumulators. * This still doesn't stop the caller from updating the accumulator outside the scheduler, * but that's not our problem since there's nothing we can do about that. */ private def updateAccumulators(event: CompletionEvent): Unit = { val task = event.task val stage = stageIdToStage(task.stageId) event.accumUpdates.foreach { updates => val id = updates.id try { // Find the corresponding accumulator on the driver and update it val acc: AccumulatorV2[Any, Any] = AccumulatorContext.get(id) match { case Some(accum) => accum.asInstanceOf[AccumulatorV2[Any, Any]] case None => throw new SparkException(s"attempted to access non-existent accumulator $id") } acc.merge(updates.asInstanceOf[AccumulatorV2[Any, Any]]) // To avoid UI cruft, ignore cases where value wasn't updated if (acc.name.isDefined && !updates.isZero) { stage.latestInfo.accumulables(id) = acc.toInfo(None, Some(acc.value)) event.taskInfo.setAccumulables( acc.toInfo(Some(updates.value), Some(acc.value)) +: event.taskInfo.accumulables) } } catch { case NonFatal(e) => // Log the class name to make it easy to find the bad implementation val accumClassName = AccumulatorContext.get(id) match { case Some(accum) => accum.getClass.getName case None => "Unknown class" } logError( s"Failed to update accumulator $id ($accumClassName) for task ${task.partitionId}", e) } } } private def postTaskEnd(event: CompletionEvent): Unit = { val taskMetrics: TaskMetrics = if (event.accumUpdates.nonEmpty) { try { TaskMetrics.fromAccumulators(event.accumUpdates) } catch { case NonFatal(e) => val taskId = event.taskInfo.taskId logError(s"Error when attempting to reconstruct metrics for task $taskId", e) null } } else { null } listenerBus.post(SparkListenerTaskEnd(event.task.stageId, event.task.stageAttemptId, Utils.getFormattedClassName(event.task), event.reason, event.taskInfo, taskMetrics)) } /** * Check [[SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL]] in job properties to see if we should * interrupt running tasks. Returns `false` if the property value is not a boolean value */ private def shouldInterruptTaskThread(job: ActiveJob): Boolean = { if (job.properties == null) { false } else { val shouldInterruptThread = job.properties.getProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, "false") try { shouldInterruptThread.toBoolean } catch { case e: IllegalArgumentException => logWarning(s"${SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL} in Job ${job.jobId} " + s"is invalid: $shouldInterruptThread. Using 'false' instead", e) false } } } /** * Responds to a task finishing. This is called inside the event loop so it assumes that it can * modify the scheduler's internal state. Use taskEnded() to post a task end event from outside. */ private[scheduler] def handleTaskCompletion(event: CompletionEvent) { val task = event.task val stageId = task.stageId outputCommitCoordinator.taskCompleted( stageId, task.stageAttemptId, task.partitionId, event.taskInfo.attemptNumber, // this is a task attempt number event.reason) if (!stageIdToStage.contains(task.stageId)) { // The stage may have already finished when we get this event -- eg. maybe it was a // speculative task. It is important that we send the TaskEnd event in any case, so listeners // are properly notified and can chose to handle it. For instance, some listeners are // doing their own accounting and if they don't get the task end event they think // tasks are still running when they really aren't. postTaskEnd(event) // Skip all the actions if the stage has been cancelled. return } val stage = stageIdToStage(task.stageId) // Make sure the task's accumulators are updated before any other processing happens, so that // we can post a task end event before any jobs or stages are updated. The accumulators are // only updated in certain cases. event.reason match { case Success => task match { case rt: ResultTask[_, _] => val resultStage = stage.asInstanceOf[ResultStage] resultStage.activeJob match { case Some(job) => // Only update the accumulator once for each result task. if (!job.finished(rt.outputId)) { updateAccumulators(event) } case None => // Ignore update if task's job has finished. } case _ => updateAccumulators(event) } case _: ExceptionFailure | _: TaskKilled => updateAccumulators(event) case _ => } postTaskEnd(event) event.reason match { case Success => task match { case rt: ResultTask[_, _] => // Cast to ResultStage here because it's part of the ResultTask // TODO Refactor this out to a function that accepts a ResultStage val resultStage = stage.asInstanceOf[ResultStage] resultStage.activeJob match { case Some(job) => if (!job.finished(rt.outputId)) { job.finished(rt.outputId) = true job.numFinished += 1 // If the whole job has finished, remove it if (job.numFinished == job.numPartitions) { markStageAsFinished(resultStage) cleanupStateForJobAndIndependentStages(job) try { // killAllTaskAttempts will fail if a SchedulerBackend does not implement // killTask. logInfo(s"Job ${job.jobId} is finished. Cancelling potential speculative " + "or zombie tasks for this job") // ResultStage is only used by this job. It's safe to kill speculative or // zombie tasks in this stage. taskScheduler.killAllTaskAttempts( stageId, shouldInterruptTaskThread(job), reason = "Stage finished") } catch { case e: UnsupportedOperationException => logWarning(s"Could not cancel tasks for stage $stageId", e) } listenerBus.post( SparkListenerJobEnd(job.jobId, clock.getTimeMillis(), JobSucceeded)) } // taskSucceeded runs some user code that might throw an exception. Make sure // we are resilient against that. try { job.listener.taskSucceeded(rt.outputId, event.result) } catch { case e: Throwable if !Utils.isFatalError(e) => // TODO: Perhaps we want to mark the resultStage as failed? job.listener.jobFailed(new SparkDriverExecutionException(e)) } } case None => logInfo("Ignoring result from " + rt + " because its job has finished") } case smt: ShuffleMapTask => val shuffleStage = stage.asInstanceOf[ShuffleMapStage] shuffleStage.pendingPartitions -= task.partitionId val status = event.result.asInstanceOf[MapStatus] val execId = status.location.executorId logDebug("ShuffleMapTask finished on " + execId) if (failedEpoch.contains(execId) && smt.epoch <= failedEpoch(execId)) { logInfo(s"Ignoring possibly bogus $smt completion from executor $execId") } else { // The epoch of the task is acceptable (i.e., the task was launched after the most // recent failure we're aware of for the executor), so mark the task's output as // available. mapOutputTracker.registerMapOutput( shuffleStage.shuffleDep.shuffleId, smt.partitionId, status) } if (runningStages.contains(shuffleStage) && shuffleStage.pendingPartitions.isEmpty) { markStageAsFinished(shuffleStage) logInfo("looking for newly runnable stages") logInfo("running: " + runningStages) logInfo("waiting: " + waitingStages) logInfo("failed: " + failedStages) // This call to increment the epoch may not be strictly necessary, but it is retained // for now in order to minimize the changes in behavior from an earlier version of the // code. This existing behavior of always incrementing the epoch following any // successful shuffle map stage completion may have benefits by causing unneeded // cached map outputs to be cleaned up earlier on executors. In the future we can // consider removing this call, but this will require some extra investigation. // See https://github.com/apache/spark/pull/17955/files#r117385673 for more details. mapOutputTracker.incrementEpoch() clearCacheLocs() if (!shuffleStage.isAvailable) { // Some tasks had failed; let's resubmit this shuffleStage. // TODO: Lower-level scheduler should also deal with this logInfo("Resubmitting " + shuffleStage + " (" + shuffleStage.name + ") because some of its tasks had failed: " + shuffleStage.findMissingPartitions().mkString(", ")) submitStage(shuffleStage) } else { markMapStageJobsAsFinished(shuffleStage) submitWaitingChildStages(shuffleStage) } } } case FetchFailed(bmAddress, shuffleId, mapId, _, failureMessage) => val failedStage = stageIdToStage(task.stageId) val mapStage = shuffleIdToMapStage(shuffleId) if (failedStage.latestInfo.attemptNumber != task.stageAttemptId) { logInfo(s"Ignoring fetch failure from $task as it's from $failedStage attempt" + s" ${task.stageAttemptId} and there is a more recent attempt for that stage " + s"(attempt ${failedStage.latestInfo.attemptNumber}) running") } else { failedStage.failedAttemptIds.add(task.stageAttemptId) val shouldAbortStage = failedStage.failedAttemptIds.size >= maxConsecutiveStageAttempts || disallowStageRetryForTest // It is likely that we receive multiple FetchFailed for a single stage (because we have // multiple tasks running concurrently on different executors). In that case, it is // possible the fetch failure has already been handled by the scheduler. if (runningStages.contains(failedStage)) { logInfo(s"Marking $failedStage (${failedStage.name}) as failed " + s"due to a fetch failure from $mapStage (${mapStage.name})") markStageAsFinished(failedStage, errorMessage = Some(failureMessage), willRetry = !shouldAbortStage) } else { logDebug(s"Received fetch failure from $task, but its from $failedStage which is no " + s"longer running") } if (mapStage.rdd.isBarrier()) { // Mark all the map as broken in the map stage, to ensure retry all the tasks on // resubmitted stage attempt. mapOutputTracker.unregisterAllMapOutput(shuffleId) } else if (mapId != -1) { // Mark the map whose fetch failed as broken in the map stage mapOutputTracker.unregisterMapOutput(shuffleId, mapId, bmAddress) } if (failedStage.rdd.isBarrier()) { failedStage match { case failedMapStage: ShuffleMapStage => // Mark all the map as broken in the map stage, to ensure retry all the tasks on // resubmitted stage attempt. mapOutputTracker.unregisterAllMapOutput(failedMapStage.shuffleDep.shuffleId) case failedResultStage: ResultStage => // Abort the failed result stage since we may have committed output for some // partitions. val reason = "Could not recover from a failed barrier ResultStage. Most recent " + s"failure reason: $failureMessage" abortStage(failedResultStage, reason, None) } } if (shouldAbortStage) { val abortMessage = if (disallowStageRetryForTest) { "Fetch failure will not retry stage due to testing config" } else { s"""$failedStage (${failedStage.name}) |has failed the maximum allowable number of |times: $maxConsecutiveStageAttempts. |Most recent failure reason: $failureMessage""".stripMargin.replaceAll("\n", " ") } abortStage(failedStage, abortMessage, None) } else { // update failedStages and make sure a ResubmitFailedStages event is enqueued // TODO: Cancel running tasks in the failed stage -- cf. SPARK-17064 val noResubmitEnqueued = !failedStages.contains(failedStage) failedStages += failedStage failedStages += mapStage if (noResubmitEnqueued) { // If the map stage is INDETERMINATE, which means the map tasks may return // different result when re-try, we need to re-try all the tasks of the failed // stage and its succeeding stages, because the input data will be changed after the // map tasks are re-tried. // Note that, if map stage is UNORDERED, we are fine. The shuffle partitioner is // guaranteed to be determinate, so the input data of the reducers will not change // even if the map tasks are re-tried. if (mapStage.rdd.outputDeterministicLevel == DeterministicLevel.INDETERMINATE) { // It's a little tricky to find all the succeeding stages of `failedStage`, because // each stage only know its parents not children. Here we traverse the stages from // the leaf nodes (the result stages of active jobs), and rollback all the stages // in the stage chains that connect to the `failedStage`. To speed up the stage // traversing, we collect the stages to rollback first. If a stage needs to // rollback, all its succeeding stages need to rollback to. val stagesToRollback = scala.collection.mutable.HashSet(failedStage) def collectStagesToRollback(stageChain: List[Stage]): Unit = { if (stagesToRollback.contains(stageChain.head)) { stageChain.drop(1).foreach(s => stagesToRollback += s) } else { stageChain.head.parents.foreach { s => collectStagesToRollback(s :: stageChain) } } } def generateErrorMessage(stage: Stage): String = { "A shuffle map stage with indeterminate output was failed and retried. " + s"However, Spark cannot rollback the $stage to re-process the input data, " + "and has to fail this job. Please eliminate the indeterminacy by " + "checkpointing the RDD before repartition and try again." } activeJobs.foreach(job => collectStagesToRollback(job.finalStage :: Nil)) stagesToRollback.foreach { case mapStage: ShuffleMapStage => val numMissingPartitions = mapStage.findMissingPartitions().length if (numMissingPartitions < mapStage.numTasks) { // TODO: support to rollback shuffle files. // Currently the shuffle writing is "first write wins", so we can't re-run a // shuffle map stage and overwrite existing shuffle files. We have to finish // SPARK-8029 first. abortStage(mapStage, generateErrorMessage(mapStage), None) } case resultStage: ResultStage if resultStage.activeJob.isDefined => val numMissingPartitions = resultStage.findMissingPartitions().length if (numMissingPartitions < resultStage.numTasks) { // TODO: support to rollback result tasks. abortStage(resultStage, generateErrorMessage(resultStage), None) } case _ => } } // We expect one executor failure to trigger many FetchFailures in rapid succession, // but all of those task failures can typically be handled by a single resubmission of // the failed stage. We avoid flooding the scheduler's event queue with resubmit // messages by checking whether a resubmit is already in the event queue for the // failed stage. If there is already a resubmit enqueued for a different failed // stage, that event would also be sufficient to handle the current failed stage, but // producing a resubmit for each failed stage makes debugging and logging a little // simpler while not producing an overwhelming number of scheduler events. logInfo( s"Resubmitting $mapStage (${mapStage.name}) and " + s"$failedStage (${failedStage.name}) due to fetch failure" ) messageScheduler.schedule( new Runnable { override def run(): Unit = eventProcessLoop.post(ResubmitFailedStages) }, DAGScheduler.RESUBMIT_TIMEOUT, TimeUnit.MILLISECONDS ) } } // TODO: mark the executor as failed only if there were lots of fetch failures on it if (bmAddress != null) { val hostToUnregisterOutputs = if (env.blockManager.externalShuffleServiceEnabled && unRegisterOutputOnHostOnFetchFailure) { // We had a fetch failure with the external shuffle service, so we // assume all shuffle data on the node is bad. Some(bmAddress.host) } else { // Unregister shuffle data just for one executor (we don't have any // reason to believe shuffle data has been lost for the entire host). None } removeExecutorAndUnregisterOutputs( execId = bmAddress.executorId, fileLost = true, hostToUnregisterOutputs = hostToUnregisterOutputs, maybeEpoch = Some(task.epoch)) } } case failure: TaskFailedReason if task.isBarrier => // Also handle the task failed reasons here. failure match { case Resubmitted => handleResubmittedFailure(task, stage) case _ => // Do nothing. } // Always fail the current stage and retry all the tasks when a barrier task fail. val failedStage = stageIdToStage(task.stageId) if (failedStage.latestInfo.attemptNumber != task.stageAttemptId) { logInfo(s"Ignoring task failure from $task as it's from $failedStage attempt" + s" ${task.stageAttemptId} and there is a more recent attempt for that stage " + s"(attempt ${failedStage.latestInfo.attemptNumber}) running") } else { logInfo(s"Marking $failedStage (${failedStage.name}) as failed due to a barrier task " + "failed.") val message = s"Stage failed because barrier task $task finished unsuccessfully.\n" + failure.toErrorString try { // killAllTaskAttempts will fail if a SchedulerBackend does not implement killTask. val reason = s"Task $task from barrier stage $failedStage (${failedStage.name}) " + "failed." taskScheduler.killAllTaskAttempts(stageId, interruptThread = false, reason) } catch { case e: UnsupportedOperationException => // Cannot continue with barrier stage if failed to cancel zombie barrier tasks. // TODO SPARK-24877 leave the zombie tasks and ignore their completion events. logWarning(s"Could not kill all tasks for stage $stageId", e) abortStage(failedStage, "Could not kill zombie barrier tasks for stage " + s"$failedStage (${failedStage.name})", Some(e)) } markStageAsFinished(failedStage, Some(message)) failedStage.failedAttemptIds.add(task.stageAttemptId) // TODO Refactor the failure handling logic to combine similar code with that of // FetchFailed. val shouldAbortStage = failedStage.failedAttemptIds.size >= maxConsecutiveStageAttempts || disallowStageRetryForTest if (shouldAbortStage) { val abortMessage = if (disallowStageRetryForTest) { "Barrier stage will not retry stage due to testing config. Most recent failure " + s"reason: $message" } else { s"""$failedStage (${failedStage.name}) |has failed the maximum allowable number of |times: $maxConsecutiveStageAttempts. |Most recent failure reason: $message """.stripMargin.replaceAll("\n", " ") } abortStage(failedStage, abortMessage, None) } else { failedStage match { case failedMapStage: ShuffleMapStage => // Mark all the map as broken in the map stage, to ensure retry all the tasks on // resubmitted stage attempt. mapOutputTracker.unregisterAllMapOutput(failedMapStage.shuffleDep.shuffleId) case failedResultStage: ResultStage => // Abort the failed result stage since we may have committed output for some // partitions. val reason = "Could not recover from a failed barrier ResultStage. Most recent " + s"failure reason: $message" abortStage(failedResultStage, reason, None) } // In case multiple task failures triggered for a single stage attempt, ensure we only // resubmit the failed stage once. val noResubmitEnqueued = !failedStages.contains(failedStage) failedStages += failedStage if (noResubmitEnqueued) { logInfo(s"Resubmitting $failedStage (${failedStage.name}) due to barrier stage " + "failure.") messageScheduler.schedule(new Runnable { override def run(): Unit = eventProcessLoop.post(ResubmitFailedStages) }, DAGScheduler.RESUBMIT_TIMEOUT, TimeUnit.MILLISECONDS) } } } case Resubmitted => handleResubmittedFailure(task, stage) case _: TaskCommitDenied => // Do nothing here, left up to the TaskScheduler to decide how to handle denied commits case _: ExceptionFailure | _: TaskKilled => // Nothing left to do, already handled above for accumulator updates. case TaskResultLost => // Do nothing here; the TaskScheduler handles these failures and resubmits the task. case _: ExecutorLostFailure | UnknownReason => // Unrecognized failure - also do nothing. If the task fails repeatedly, the TaskScheduler // will abort the job. } } private def handleResubmittedFailure(task: Task[_], stage: Stage): Unit = { logInfo(s"Resubmitted $task, so marking it as still running.") stage match { case sms: ShuffleMapStage => sms.pendingPartitions += task.partitionId case _ => throw new SparkException("TaskSetManagers should only send Resubmitted task " + "statuses for tasks in ShuffleMapStages.") } } private[scheduler] def markMapStageJobsAsFinished(shuffleStage: ShuffleMapStage): Unit = { // Mark any map-stage jobs waiting on this stage as finished if (shuffleStage.isAvailable && shuffleStage.mapStageJobs.nonEmpty) { val stats = mapOutputTracker.getStatistics(shuffleStage.shuffleDep) for (job <- shuffleStage.mapStageJobs) { markMapStageJobAsFinished(job, stats) } } } /** * Responds to an executor being lost. This is called inside the event loop, so it assumes it can * modify the scheduler's internal state. Use executorLost() to post a loss event from outside. * * We will also assume that we've lost all shuffle blocks associated with the executor if the * executor serves its own blocks (i.e., we're not using external shuffle), the entire slave * is lost (likely including the shuffle service), or a FetchFailed occurred, in which case we * presume all shuffle data related to this executor to be lost. * * Optionally the epoch during which the failure was caught can be passed to avoid allowing * stray fetch failures from possibly retriggering the detection of a node as lost. */ private[scheduler] def handleExecutorLost( execId: String, workerLost: Boolean): Unit = { // if the cluster manager explicitly tells us that the entire worker was lost, then // we know to unregister shuffle output. (Note that "worker" specifically refers to the process // from a Standalone cluster, where the shuffle service lives in the Worker.) val fileLost = workerLost || !env.blockManager.externalShuffleServiceEnabled removeExecutorAndUnregisterOutputs( execId = execId, fileLost = fileLost, hostToUnregisterOutputs = None, maybeEpoch = None) } private def removeExecutorAndUnregisterOutputs( execId: String, fileLost: Boolean, hostToUnregisterOutputs: Option[String], maybeEpoch: Option[Long] = None): Unit = { val currentEpoch = maybeEpoch.getOrElse(mapOutputTracker.getEpoch) if (!failedEpoch.contains(execId) || failedEpoch(execId) < currentEpoch) { failedEpoch(execId) = currentEpoch logInfo("Executor lost: %s (epoch %d)".format(execId, currentEpoch)) blockManagerMaster.removeExecutor(execId) if (fileLost) { hostToUnregisterOutputs match { case Some(host) => logInfo("Shuffle files lost for host: %s (epoch %d)".format(host, currentEpoch)) mapOutputTracker.removeOutputsOnHost(host) case None => logInfo("Shuffle files lost for executor: %s (epoch %d)".format(execId, currentEpoch)) mapOutputTracker.removeOutputsOnExecutor(execId) } clearCacheLocs() } else { logDebug("Additional executor lost message for %s (epoch %d)".format(execId, currentEpoch)) } } } /** * Responds to a worker being removed. This is called inside the event loop, so it assumes it can * modify the scheduler's internal state. Use workerRemoved() to post a loss event from outside. * * We will assume that we've lost all shuffle blocks associated with the host if a worker is * removed, so we will remove them all from MapStatus. * * @param workerId identifier of the worker that is removed. * @param host host of the worker that is removed. * @param message the reason why the worker is removed. */ private[scheduler] def handleWorkerRemoved( workerId: String, host: String, message: String): Unit = { logInfo("Shuffle files lost for worker %s on host %s".format(workerId, host)) mapOutputTracker.removeOutputsOnHost(host) clearCacheLocs() } private[scheduler] def handleExecutorAdded(execId: String, host: String) { // remove from failedEpoch(execId) ? if (failedEpoch.contains(execId)) { logInfo("Host added was in lost list earlier: " + host) failedEpoch -= execId } } private[scheduler] def handleStageCancellation(stageId: Int, reason: Option[String]) { stageIdToStage.get(stageId) match { case Some(stage) => val jobsThatUseStage: Array[Int] = stage.jobIds.toArray jobsThatUseStage.foreach { jobId => val reasonStr = reason match { case Some(originalReason) => s"because $originalReason" case None => s"because Stage $stageId was cancelled" } handleJobCancellation(jobId, Option(reasonStr)) } case None => logInfo("No active jobs to kill for Stage " + stageId) } } private[scheduler] def handleJobCancellation(jobId: Int, reason: Option[String]) { if (!jobIdToStageIds.contains(jobId)) { logDebug("Trying to cancel unregistered job " + jobId) } else { failJobAndIndependentStages( jobIdToActiveJob(jobId), "Job %d cancelled %s".format(jobId, reason.getOrElse(""))) } } /** * Marks a stage as finished and removes it from the list of running stages. */ private def markStageAsFinished( stage: Stage, errorMessage: Option[String] = None, willRetry: Boolean = false): Unit = { val serviceTime = stage.latestInfo.submissionTime match { case Some(t) => "%.03f".format((clock.getTimeMillis() - t) / 1000.0) case _ => "Unknown" } if (errorMessage.isEmpty) { logInfo("%s (%s) finished in %s s".format(stage, stage.name, serviceTime)) stage.latestInfo.completionTime = Some(clock.getTimeMillis()) // Clear failure count for this stage, now that it's succeeded. // We only limit consecutive failures of stage attempts,so that if a stage is // re-used many times in a long-running job, unrelated failures don't eventually cause the // stage to be aborted. stage.clearFailures() } else { stage.latestInfo.stageFailed(errorMessage.get) logInfo(s"$stage (${stage.name}) failed in $serviceTime s due to ${errorMessage.get}") } if (!willRetry) { outputCommitCoordinator.stageEnd(stage.id) } listenerBus.post(SparkListenerStageCompleted(stage.latestInfo)) runningStages -= stage } /** * Aborts all jobs depending on a particular Stage. This is called in response to a task set * being canceled by the TaskScheduler. Use taskSetFailed() to inject this event from outside. */ private[scheduler] def abortStage( failedStage: Stage, reason: String, exception: Option[Throwable]): Unit = { if (!stageIdToStage.contains(failedStage.id)) { // Skip all the actions if the stage has been removed. return } val dependentJobs: Seq[ActiveJob] = activeJobs.filter(job => stageDependsOn(job.finalStage, failedStage)).toSeq failedStage.latestInfo.completionTime = Some(clock.getTimeMillis()) for (job <- dependentJobs) { failJobAndIndependentStages(job, s"Job aborted due to stage failure: $reason", exception) } if (dependentJobs.isEmpty) { logInfo("Ignoring failure of " + failedStage + " because all jobs depending on it are done") } } /** Fails a job and all stages that are only used by that job, and cleans up relevant state. */ private def failJobAndIndependentStages( job: ActiveJob, failureReason: String, exception: Option[Throwable] = None): Unit = { val error = new SparkException(failureReason, exception.getOrElse(null)) var ableToCancelStages = true // Cancel all independent, running stages. val stages = jobIdToStageIds(job.jobId) if (stages.isEmpty) { logError("No stages registered for job " + job.jobId) } stages.foreach { stageId => val jobsForStage: Option[HashSet[Int]] = stageIdToStage.get(stageId).map(_.jobIds) if (jobsForStage.isEmpty || !jobsForStage.get.contains(job.jobId)) { logError( "Job %d not registered for stage %d even though that stage was registered for the job" .format(job.jobId, stageId)) } else if (jobsForStage.get.size == 1) { if (!stageIdToStage.contains(stageId)) { logError(s"Missing Stage for stage with id $stageId") } else { // This is the only job that uses this stage, so fail the stage if it is running. val stage = stageIdToStage(stageId) if (runningStages.contains(stage)) { try { // cancelTasks will fail if a SchedulerBackend does not implement killTask taskScheduler.cancelTasks(stageId, shouldInterruptTaskThread(job)) markStageAsFinished(stage, Some(failureReason)) } catch { case e: UnsupportedOperationException => logWarning(s"Could not cancel tasks for stage $stageId", e) ableToCancelStages = false } } } } } if (ableToCancelStages) { // SPARK-15783 important to cleanup state first, just for tests where we have some asserts // against the state. Otherwise we have a *little* bit of flakiness in the tests. cleanupStateForJobAndIndependentStages(job) job.listener.jobFailed(error) listenerBus.post(SparkListenerJobEnd(job.jobId, clock.getTimeMillis(), JobFailed(error))) } } /** Return true if one of stage's ancestors is target. */ private def stageDependsOn(stage: Stage, target: Stage): Boolean = { if (stage == target) { return true } val visitedRdds = new HashSet[RDD[_]] // We are manually maintaining a stack here to prevent StackOverflowError // caused by recursively visiting val waitingForVisit = new ArrayStack[RDD[_]] def visit(rdd: RDD[_]) { if (!visitedRdds(rdd)) { visitedRdds += rdd for (dep <- rdd.dependencies) { dep match { case shufDep: ShuffleDependency[_, _, _] => val mapStage = getOrCreateShuffleMapStage(shufDep, stage.firstJobId) if (!mapStage.isAvailable) { waitingForVisit.push(mapStage.rdd) } // Otherwise there's no need to follow the dependency back case narrowDep: NarrowDependency[_] => waitingForVisit.push(narrowDep.rdd) } } } } waitingForVisit.push(stage.rdd) while (waitingForVisit.nonEmpty) { visit(waitingForVisit.pop()) } visitedRdds.contains(target.rdd) } /** * Gets the locality information associated with a partition of a particular RDD. * * This method is thread-safe and is called from both DAGScheduler and SparkContext. * * @param rdd whose partitions are to be looked at * @param partition to lookup locality information for * @return list of machines that are preferred by the partition */ private[spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = { getPreferredLocsInternal(rdd, partition, new HashSet) } /** * Recursive implementation for getPreferredLocs. * * This method is thread-safe because it only accesses DAGScheduler state through thread-safe * methods (getCacheLocs()); please be careful when modifying this method, because any new * DAGScheduler state accessed by it may require additional synchronization. */ private def getPreferredLocsInternal( rdd: RDD[_], partition: Int, visited: HashSet[(RDD[_], Int)]): Seq[TaskLocation] = { // If the partition has already been visited, no need to re-visit. // This avoids exponential path exploration. SPARK-695 if (!visited.add((rdd, partition))) { // Nil has already been returned for previously visited partitions. return Nil } // If the partition is cached, return the cache locations val cached = getCacheLocs(rdd)(partition) if (cached.nonEmpty) { return cached } // If the RDD has some placement preferences (as is the case for input RDDs), get those val rddPrefs = rdd.preferredLocations(rdd.partitions(partition)).toList if (rddPrefs.nonEmpty) { return rddPrefs.map(TaskLocation(_)) } // If the RDD has narrow dependencies, pick the first partition of the first narrow dependency // that has any placement preferences. Ideally we would choose based on transfer sizes, // but this will do for now. rdd.dependencies.foreach { case n: NarrowDependency[_] => for (inPart <- n.getParents(partition)) { val locs = getPreferredLocsInternal(n.rdd, inPart, visited) if (locs != Nil) { return locs } } case _ => } Nil } /** Mark a map stage job as finished with the given output stats, and report to its listener. */ def markMapStageJobAsFinished(job: ActiveJob, stats: MapOutputStatistics): Unit = { // In map stage jobs, we only create a single "task", which is to finish all of the stage // (including reusing any previous map outputs, etc); so we just mark task 0 as done job.finished(0) = true job.numFinished += 1 job.listener.taskSucceeded(0, stats) cleanupStateForJobAndIndependentStages(job) listenerBus.post(SparkListenerJobEnd(job.jobId, clock.getTimeMillis(), JobSucceeded)) } def stop() { messageScheduler.shutdownNow() eventProcessLoop.stop() taskScheduler.stop() } eventProcessLoop.start() } private[scheduler] class DAGSchedulerEventProcessLoop(dagScheduler: DAGScheduler) extends EventLoop[DAGSchedulerEvent]("dag-scheduler-event-loop") with Logging { private[this] val timer = dagScheduler.metricsSource.messageProcessingTimer /** * The main event loop of the DAG scheduler. */ override def onReceive(event: DAGSchedulerEvent): Unit = { val timerContext = timer.time() try { doOnReceive(event) } finally { timerContext.stop() } } private def doOnReceive(event: DAGSchedulerEvent): Unit = event match { case JobSubmitted(jobId, rdd, func, partitions, callSite, listener, properties) => dagScheduler.handleJobSubmitted(jobId, rdd, func, partitions, callSite, listener, properties) case MapStageSubmitted(jobId, dependency, callSite, listener, properties) => dagScheduler.handleMapStageSubmitted(jobId, dependency, callSite, listener, properties) case StageCancelled(stageId, reason) => dagScheduler.handleStageCancellation(stageId, reason) case JobCancelled(jobId, reason) => dagScheduler.handleJobCancellation(jobId, reason) case JobGroupCancelled(groupId) => dagScheduler.handleJobGroupCancelled(groupId) case AllJobsCancelled => dagScheduler.doCancelAllJobs() case ExecutorAdded(execId, host) => dagScheduler.handleExecutorAdded(execId, host) case ExecutorLost(execId, reason) => val workerLost = reason match { case SlaveLost(_, true) => true case _ => false } dagScheduler.handleExecutorLost(execId, workerLost) case WorkerRemoved(workerId, host, message) => dagScheduler.handleWorkerRemoved(workerId, host, message) case BeginEvent(task, taskInfo) => dagScheduler.handleBeginEvent(task, taskInfo) case SpeculativeTaskSubmitted(task) => dagScheduler.handleSpeculativeTaskSubmitted(task) case GettingResultEvent(taskInfo) => dagScheduler.handleGetTaskResult(taskInfo) case completion: CompletionEvent => dagScheduler.handleTaskCompletion(completion) case TaskSetFailed(taskSet, reason, exception) => dagScheduler.handleTaskSetFailed(taskSet, reason, exception) case ResubmitFailedStages => dagScheduler.resubmitFailedStages() } override def onError(e: Throwable): Unit = { logError("DAGSchedulerEventProcessLoop failed; shutting down SparkContext", e) try { dagScheduler.doCancelAllJobs() } catch { case t: Throwable => logError("DAGScheduler failed to cancel all jobs.", t) } dagScheduler.sc.stopInNewThread() } override def onStop(): Unit = { // Cancel any active jobs in postStop hook dagScheduler.cleanUpAfterSchedulerStop() } } private[spark] object DAGScheduler { // The time, in millis, to wait for fetch failure events to stop coming in after one is detected; // this is a simplistic way to avoid resubmitting tasks in the non-fetchable map stage one by one // as more failure events come in val RESUBMIT_TIMEOUT = 200 // Number of consecutive stage attempts allowed before a stage is aborted val DEFAULT_MAX_CONSECUTIVE_STAGE_ATTEMPTS = 4 }
Aegeaner/spark
core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala
Scala
apache-2.0
95,541
package net.liftweb.util /* * Copyright 2006-2008 WorldWide Conferencing, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import _root_.org.apache.commons.collections.map.{LRUMap, AbstractLinkedMap} import _root_.org.apache.commons.collections.map.AbstractLinkedMap.LinkEntry import _root_.scala.collection.jcl.Conversions._ /** * LRU Cache wrapping {@link org.apache.commons.collections.map.LRUMap} * * @param size the maximum number of Elements allowed in the LRU map * @param loadFactor the Load Factor to construct our LRU with. */ class LRU[KeyType, ValueType](size: Int, loadFactor: Box[Float]) { // Alternate constructor that gives you no load factor. def this(size: Int) = this(size, Empty) private val map = loadFactor match { case Full(lf) => new LRUMap(size, lf) case _ => new LRUMap(size) } def update(k: KeyType, v: ValueType) { map.put(k, v) } def remove(k: KeyType) = map.remove(k) def apply(k: KeyType): ValueType = map.get(k).asInstanceOf[ValueType] def contains(k: KeyType): Boolean = map.containsKey(k) def keys: List[KeyType] = map.keySet().toList.map(_.asInstanceOf[KeyType]) }
beni55/liftweb
lift-util/src/main/scala/net/liftweb/util/LRU.scala
Scala
apache-2.0
1,649
package suggestions package gui import scala.language.reflectiveCalls import scala.collection.mutable.ListBuffer import scala.collection.JavaConverters._ import scala.concurrent._ import scala.concurrent.ExecutionContext.Implicits.global import scala.util.{ Try, Success, Failure } import scala.swing.Reactions.Reaction import scala.swing.event.Event import rx.lang.scala.Observable import rx.lang.scala.subscriptions.Subscription /** Basic facilities for dealing with Swing-like components. * * Instead of committing to a particular widget implementation * functionality has been factored out here to deal only with * abstract types like `ValueChanged` or `TextField`. * Extractors for abstract events like `ValueChanged` have also * been factored out into corresponding abstract `val`s. */ trait SwingApi { type ValueChanged <: Event val ValueChanged: { def unapply(x: Event): Option[TextField] } type ButtonClicked <: Event val ButtonClicked: { def unapply(x: Event): Option[Button] } type TextField <: { def text: String def subscribe(r: Reaction): Unit def unsubscribe(r: Reaction): Unit } type Button <: { def subscribe(r: Reaction): Unit def unsubscribe(r: Reaction): Unit } implicit class TextFieldOps(field: TextField) { /** Returns a stream of text field values entered in the given text field. * * @param field the text field * @return an observable with a stream of text field updates */ def textValues: Observable[String] = Observable(observer => { field subscribe { case ValueChanged(field) => observer.onNext(field.text) } Subscription() }) } implicit class ButtonOps(button: Button) { /** Returns a stream of button clicks. * * @param field the button * @return an observable with a stream of buttons that have been clicked */ def clicks: Observable[Button] = Observable(observer => { button subscribe { case ButtonClicked(button) => observer.onNext(button) } Subscription() }) } }
albertpastrana/reactive
suggestions/src/main/scala/suggestions/gui/SwingApi.scala
Scala
mit
2,087
package ml.combust.mleap.runtime.serialization import ml.combust.mleap.core.types._ import ml.combust.mleap.tensor.{ByteString, Tensor} import ml.combust.mleap.runtime.MleapSupport._ import ml.combust.mleap.runtime.frame.{DefaultLeapFrame, Row} import org.scalatest.FunSpec /** * Created by hollinwilkins on 11/1/16. */ class FrameSerializerSpec extends FunSpec { private val schema = StructType(StructField("features", TensorType(BasicType.Double)), StructField("name", ScalarType.String), StructField("list_data", ListType(BasicType.String)), StructField("nullable_double", ScalarType.Double), StructField("float", ScalarType.Float), StructField("byte_tensor", TensorType(BasicType.Byte)), StructField("short_list", ListType(BasicType.Short)), StructField("byte_string", ScalarType.ByteString), StructField("nullable_string", ScalarType.String)).get private val dataset = Seq(Row(Tensor.denseVector(Array(20.0, 10.0, 5.0)), "hello", Seq("hello", "there"), 56.7d, 32.4f, Tensor.denseVector(Array[Byte](1, 2, 3, 4)), Seq[Short](99, 12, 45), ByteString(Array[Byte](32, 4, 55, 67)), null)) private val frame = DefaultLeapFrame(schema, dataset) describe("with format ml.combust.mleap.json") { it("serializes the leap frame as JSON") { val bytes = frame.writer("ml.combust.mleap.json").toBytes().get val dFrame = FrameReader("ml.combust.mleap.json").fromBytes(bytes).get frame.writer("ml.combust.mleap.json") assert(dFrame.schema == frame.schema) assert(dFrame.dataset == frame.dataset) } describe("row serializer") { it("serializes rows as JSON") { val writer = frame.schema.rowWriter("ml.combust.mleap.json") val reader = frame.schema.rowReader("ml.combust.mleap.json") val row = frame.dataset(0) val bytes = writer.toBytes(row).get val dRow = reader.fromBytes(bytes).get assert(row == dRow) } } } describe("with format ml.combust.mleap.binary") { it("serializes the leap frame as binary") { val bytes = frame.writer("ml.combust.mleap.binary").toBytes().get val dFrame = FrameReader("ml.combust.mleap.binary").fromBytes(bytes).get assert(dFrame.schema == frame.schema) assert(dFrame.dataset == frame.dataset) } describe("row serializer") { it("serializes rows as binary") { val writer = frame.schema.rowWriter("ml.combust.mleap.binary") val reader = frame.schema.rowReader("ml.combust.mleap.binary") val row = frame.dataset(0) val bytes = writer.toBytes(row).get val dRow = reader.fromBytes(bytes).get assert(row == dRow) } } } }
combust-ml/mleap
mleap-runtime/src/test/scala/ml/combust/mleap/runtime/serialization/FrameSerializerSpec.scala
Scala
apache-2.0
2,718
package me.invkrh.raft.core import scala.concurrent.duration._ import scala.language.postfixOps import com.typesafe.config.ConfigFactory import org.scalatest.FlatSpecLike import me.invkrh.raft.storage.DataStore class ServerConfTest extends FlatSpecLike { "ServerConf" should "be built from configure file" in { val confStr = """ |election.timeout.min.ms = 150 |election.timeout.max.ms = 150 |heartbeat.interval.ms = 100 |rpc.retries = 1 |datastore.type = memory """.stripMargin val config = ConfigFactory.parseString(confStr) val result = ServerConf(config) val expected = ServerConf(150 millis, 150 millis, 100 millis, 1, DataStore(config.getConfig("datastore"))) assertResult(expected) { result } } }
invkrh/akka-raft
src/test/scala/me/invkrh/raft/core/ServerConfTest.scala
Scala
mit
791
package WorkDayLength.Reports import java.time.format.DateTimeFormatter import java.time.{Duration, LocalTime} import WorkDayLength.{TimeEntry, TimeEntryHelper} /** * Created by dyahofarov on 07/11/2016. */ object DayLength { def result(entries: List[TimeEntry], dayStartsAt: LocalTime, dayEndsAt: LocalTime): String = { def closestIndex(entries: List[TimeEntry], time: LocalTime): Int = { entries. zipWithIndex. map(pair => { val distance = Duration.between(time, pair._1.startsAt.toLocalTime).getSeconds val index = pair._2 (Math.abs(distance), index) }). minBy(_._1). _2 } val closestIndexToStart = closestIndex(entries, dayStartsAt) val closestIndexToEnd = closestIndex(entries, dayEndsAt) val workEntries = entries.slice(closestIndexToStart, closestIndexToEnd + 1) // TODO: Summerize properly val startAt = TimeEntryHelper.startsEarliest(workEntries).startsAt val endAt = TimeEntryHelper.endsLatest(workEntries).endsAt val overallDuration = TimeEntryHelper.addDurations(workEntries) startAt.toLocalDate + ": Worked from " + startAt.toLocalTime + " till " + endAt.toLocalTime + " for " + TimeEntryHelper.DurationParts(overallDuration).toString } }
denyago/work_day_length
src/main/scala/WorkDayLength/Reports/DayLength.scala
Scala
mit
1,280
/** * Copyright (C) 2010 Orbeon, Inc. * * This program is free software; you can redistribute it and/or modify it under the terms of the * GNU Lesser General Public License as published by the Free Software Foundation; either version * 2.1 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU Lesser General Public License for more details. * * The full text of the license is available at http://www.gnu.org/copyleft/lesser.html */ package org.orbeon.oxf.xforms.function import org.orbeon.oxf.xforms.analysis.{ConstantXPathAnalysis, ElementAnalysis, PathMapXPathAnalysis} import org.orbeon.saxon.expr.PathMap import org.orbeon.saxon.expr.PathMap.PathMapNodeSet trait MatchSimpleAnalysis { def matchSimpleAnalysis(pathMap: PathMap, analysisOption: Option[ElementAnalysis]): PathMapNodeSet = analysisOption match { case Some(element) if element.bindingAnalysis.isDefined && element.bindingAnalysis.get.figuredOutDependencies => // Clone the PathMap first because the nodes returned must belong to this PathMap element.bindingAnalysis.get match { case bindingAnalysis: PathMapXPathAnalysis => val clonedContextPathMap = bindingAnalysis.pathmap.get.clone pathMap.addRoots(clonedContextPathMap.getPathMapRoots) clonedContextPathMap.findFinalNodes case bindingAnalysis: ConstantXPathAnalysis if bindingAnalysis.figuredOutDependencies => null case _ => pathMap.setInvalidated(true) null } case _ => // Either there is no analysis at all or we couldn't figure out binding analysis pathMap.setInvalidated(true) null } }
orbeon/orbeon-forms
xforms-runtime/jvm/src/main/scala/org/orbeon/oxf/xforms/function/MatchSimpleAnalysis.scala
Scala
lgpl-2.1
1,871
package model.battle /** * Created by salim on 12/09/2016. */ object BattleTactic extends Enumeration { val Fight, ChangePokemon, UseItem, Flee = Value }
salimfadhley/scalamoo
src/main/scala/model/battle/BattleTactic.scala
Scala
mit
161
/* * Copyright 2009-2010 WorldWide Conferencing, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.liftweb package json import scala.annotation.switch /** JSON parser. */ object JsonParser { import java.io._ class ParseException(message: String, cause: Exception) extends Exception(message, cause) /** Parsed tokens from low level pull parser. */ sealed abstract class Token case object OpenObj extends Token case object CloseObj extends Token case class FieldStart(name: String) extends Token case object End extends Token case class StringVal(value: String) extends Token case class IntVal(value: BigInt) extends Token case class DoubleVal(value: Double) extends Token case class BoolVal(value: Boolean) extends Token case object NullVal extends Token case object OpenArr extends Token case object CloseArr extends Token /** Return parsed JSON. * @throws ParseException is thrown if parsing fails */ def parse(s: String): JValue = parse(new Buffer(new StringReader(s), false)) /** Return parsed JSON. * @param closeAutomatically true (default) if the Reader is automatically closed on EOF * @throws ParseException is thrown if parsing fails */ def parse(s: Reader, closeAutomatically: Boolean = true): JValue = parse(new Buffer(s, closeAutomatically)) /** Return parsed JSON. */ def parseOpt(s: String): Option[JValue] = try { parse(s).toOpt } catch { case e: Exception => None } /** Return parsed JSON. * @param closeAutomatically true (default) if the Reader is automatically closed on EOF */ def parseOpt(s: Reader, closeAutomatically: Boolean = true): Option[JValue] = try { parse(s, closeAutomatically).toOpt } catch { case e: Exception => None } /** Parse in pull parsing style. * Use <code>p.nextToken</code> to parse tokens one by one from a string. * @see net.liftweb.json.JsonParser.Token */ def parse[A](s: String, p: Parser => A): A = parse(new StringReader(s), p) /** Parse in pull parsing style. * Use <code>p.nextToken</code> to parse tokens one by one from a stream. * The Reader must be closed when parsing is stopped. * @see net.liftweb.json.JsonParser.Token */ def parse[A](s: Reader, p: Parser => A): A = p(new Parser(new Buffer(s, false))) private[json] def parse(buf: Buffer): JValue = { try { astParser(new Parser(buf)) } catch { case e: ParseException => throw e case e: Exception => throw new ParseException("parsing failed", e) } finally { buf.release } } // JSON hex unicode strings (\\u12AF) are translated into characters through // this array. Each number in the array corresponds to the 4-bit value that // one number in the hex string will represent. These are combined when // reading the unicode string. private[this] final val HexChars: Array[Int] = { val chars = new Array[Int](128) var i = 0 while (i < 10) { chars(i + '0') = i i += 1 } i = 0 while (i < 16) { chars(i + 'a') = 10 + i chars(i + 'A') = 10 + i i += 1 } chars } // The size of one hex character in bits. private[this] final val hexCharSize = 4 // in bits private[json] def unquote(string: String): String = unquote(new JsonParser.Buffer(new java.io.StringReader(string), false)) private[this] def unquote(buf: JsonParser.Buffer): String = { def unquote0(buf: JsonParser.Buffer): String = { val builder = buf.builder builder.delete(0, builder.length()) var c = '\\\\' while (c != '"') { if (c == '\\\\') { buf.substring(intoBuilder = true) (buf.next: @switch) match { case '"' => builder.append('"') case '\\\\' => builder.append('\\\\') case '/' => builder.append('/') case 'b' => builder.append('\\b') case 'f' => builder.append('\\f') case 'n' => builder.append('\\n') case 'r' => builder.append('\\r') case 't' => builder.append('\\t') case 'u' => var byte = 0 var finalChar = 0 val chars = Array(buf.next, buf.next, buf.next, buf.next) while (byte < chars.length) { finalChar = (finalChar << hexCharSize) | HexChars(chars(byte).toInt) byte += 1 } builder.appendCodePoint(finalChar.toChar) case _ => builder.append('\\\\') } buf.mark } c = buf.next } buf.substring(intoBuilder = true) builder.toString } buf.eofIsFailure = true buf.mark var c = buf.next var forcedReturn: String = null while (c != '"') { (c: @switch) match { case '\\\\' => forcedReturn = unquote0(buf) c = '"' case _ => c = buf.next } } buf.eofIsFailure = false if (forcedReturn == null) { new String(buf.substring()) } else { forcedReturn } } private[json] def parseDouble(s: String) = { s.toDouble } // Intermediate objects and arrays which can be grown mutably for performance. // These are finalized into immutable JObject and JArray. private[this] case class IntermediateJObject(fields: scala.collection.mutable.ListBuffer[JField]) private[this] case class IntermediateJArray(bits: scala.collection.mutable.ListBuffer[JValue]) private val astParser = (p: Parser) => { val vals = new ValStack(p) var token: Token = null var root: Option[JValue] = None // At the end of an object, if we're looking at an intermediate form of an // object or array, gather up all their component parts and create the final // object or array. def closeBlock(v: Any) { def toJValue(x: Any) = x match { case json: JValue => json case other: IntermediateJObject => JObject(other.fields.result) case other: IntermediateJArray => JArray(other.bits.result) case _ => p.fail("unexpected field " + x) } vals.peekOption match { case Some(JField(name: String, value)) => vals.pop(classOf[JField]) val obj = vals.peek(classOf[IntermediateJObject]) obj.fields.append(JField(name, toJValue(v))) case Some(o: IntermediateJObject) => o.fields.append(vals.peek(classOf[JField])) case Some(a: IntermediateJArray) => a.bits.append(toJValue(v)) case Some(x) => p.fail("expected field, array or object but got " + x) case None => root = Some(toJValue(v)) } } def newValue(v: JValue) { if (!vals.isEmpty) vals.peekAny match { case JField(name, value) => vals.pop(classOf[JField]) val obj = vals.peek(classOf[IntermediateJObject]) obj.fields += (JField(name,v)) case a: IntermediateJArray => a.bits += v case other => p.fail("expected field or array but got " + other) } else { vals.push(v) root = Some(v) } } do { token = p.nextToken token match { case OpenObj => vals.push(IntermediateJObject(scala.collection.mutable.ListBuffer())) case FieldStart(name) => vals.push(JField(name, null)) case StringVal(x) => newValue(JString(x)) case IntVal(x) => newValue(JInt(x)) case DoubleVal(x) => newValue(JDouble(x)) case BoolVal(x) => newValue(JBool(x)) case NullVal => newValue(JNull) case CloseObj => closeBlock(vals.popAny) case OpenArr => vals.push(IntermediateJArray(scala.collection.mutable.ListBuffer())) case CloseArr => closeBlock(vals.popAny) case End => } } while (token != End) root getOrElse JNothing } private[this] final val EOF: Char = (-1).asInstanceOf[Char] private class ValStack(parser: Parser) { import java.util.ArrayDeque private[this] val stack = new ArrayDeque[Any](32) def popAny = stack.poll def pop[A](expectedType: Class[A]) = convert(stack.poll, expectedType) def push(v: Any) = stack.addFirst(v) def peekAny = stack.peek def peek[A](expectedType: Class[A]) = convert(stack.peek, expectedType) def replace[A](newTop: Any) = { stack.pop stack.push(newTop) } private def convert[A](x: Any, expectedType: Class[A]): A = { if (x == null) parser.fail("expected object or array") try { x.asInstanceOf[A] } catch { case cce: ClassCastException => parser.fail(s"failure during class conversion. I got $x but needed a type of $expectedType", cce) } } def peekOption = if (stack.isEmpty) None else Some(stack.peek) def isEmpty = stack.isEmpty } class Parser(buf: Buffer) { import java.util.ArrayDeque // Maintains our current nesting context in the form of BlockMode, which // indicates if each context is an array or object. private[this] val blocks = new ArrayDeque[BlockMode](32) private[this] var fieldNameMode = true def fail(msg: String, cause: Exception = null) = throw new ParseException(msg + "\\nNear: " + buf.near, cause) /** Parse next Token from stream. */ def nextToken: Token = { def parseString: String = try { unquote(buf) } catch { case p: ParseException => throw p case cause: Exception => fail("unexpected string end", cause) } def parseValue(first: Char) = { var wasInt = true var doubleVal = false val buf = this.buf // Back up and mark the buffer so that we can extract a substring after // that contains the whole value. buf.back buf.mark while (wasInt) { val c = buf.next (c: @switch) match { case '.' | 'e' | 'E' => doubleVal = true case '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | '-' | '+' => // continue case _ => wasInt = false if (c != EOF) { buf.back // don't include the last character } } } buf.forward // substring is exclusive to the last index val value = buf.substring() buf.back // back up so our current pointer is in the right place (doubleVal: @switch) match { case true => DoubleVal(parseDouble(new String(value))) case false => IntVal(BigInt(new String(value))) } } while (true) { (buf.next: @switch) match { case '{' => blocks.addFirst(OBJECT) fieldNameMode = true return OpenObj case '}' => blocks.poll return CloseObj case '"' => if (fieldNameMode && blocks.peek == OBJECT) return FieldStart(parseString) else { fieldNameMode = true return StringVal(parseString) } case 't' => fieldNameMode = true if (buf.next == 'r' && buf.next == 'u' && buf.next == 'e') { return BoolVal(true) } fail("expected boolean") case 'f' => fieldNameMode = true if (buf.next == 'a' && buf.next == 'l' && buf.next == 's' && buf.next == 'e') { return BoolVal(false) } fail("expected boolean") case 'n' => fieldNameMode = true if (buf.next == 'u' && buf.next == 'l' && buf.next == 'l') { return NullVal } fail("expected null") case ':' => if (blocks.peek == ARRAY) fail("Colon in an invalid position") fieldNameMode = false case '[' => blocks.addFirst(ARRAY) return OpenArr case ']' => fieldNameMode = true blocks.poll return CloseArr case c @ ('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | '-') => fieldNameMode = true return parseValue(c) case ' ' | '\\n' | ',' | '\\r' | '\\t' => // ignore case c => c match { case `EOF` => buf.automaticClose return End case _ => fail("unknown token " + c) } } } buf.automaticClose End } sealed abstract class BlockMode case object ARRAY extends BlockMode case object OBJECT extends BlockMode } /* Buffer used to parse JSON. * Buffer is divided to one or more segments (preallocated in segmentPool). */ private[json] final class Buffer(in: Reader, closeAutomatically: Boolean, segmentPool: SegmentPool = Segments) { // Reused by the parser when appropriate, allows for a single builder to be // used throughout the parse process, and to be written to directly from the // substring method, so as to avoid allocating new builders when avoidable. private[json] final val builder = new java.lang.StringBuilder(32) var offset = 0 // how far into the current segment we've read data var curMark = -1 var curMarkSegment = -1 var eofIsFailure = false private[this] var segments = scala.collection.mutable.ArrayBuffer(segmentPool.apply()) private[this] var segment: Array[Char] = segments.head.seg private[this] var cur = 0 // Pointer which points current parsing location private[this] var curSegmentIdx = 0 // Pointer which points current segment // Mark the current point so that future substring calls will extract the // value from this point to whatever point the buffer has advanced to. def mark = { if (curSegmentIdx > 0) { segments(0) = segments.remove(curSegmentIdx) curSegmentIdx = 0 } curMark = cur curMarkSegment = curSegmentIdx } def back = cur = cur-1 def forward = cur = cur+1 // Read the next character; reads new data from the reader if necessary. def next: Char = { if (cur >= offset && read < 0) { if (eofIsFailure) throw new ParseException("unexpected eof", null) else EOF } else { val c = segment(cur) cur += 1 c } } private[this] final val emptyArray = new Array[Char](0) // Slices from the last marked point to the current index. If intoBuilder is // true, appends it to the buffer's builder and returns an empty array. If // false, slices it into a new array and returns that array. final def substring(intoBuilder: Boolean = false) = { if (curSegmentIdx == curMarkSegment) { val substringLength = cur - curMark - 1 if (intoBuilder) { builder.append(segment, curMark, substringLength) emptyArray } else if (substringLength == 0) { emptyArray } else { val array = new Array[Char](substringLength) System.arraycopy(segment, curMark, array, 0, substringLength) array } } else { // slower path for case when string is in two or more segments val segmentCount = curSegmentIdx - curMarkSegment + 1 val substringLength = segmentCount * segmentPool.segmentSize - curMark - (segmentPool.segmentSize - cur) - 1 val chars = if (intoBuilder) { emptyArray } else { new Array[Char](substringLength) } var i = curMarkSegment var offset = 0 while (i <= curSegmentIdx) { val s = segments(i).seg val start = if (i == curMarkSegment) curMark else 0 val end = if (i == curSegmentIdx) cur else s.length+1 val partLen = end-start-1 if (intoBuilder) { builder.append(s, start, partLen) } else { System.arraycopy(s, start, chars, offset, partLen) } offset += partLen i = i+1 } curMarkSegment = -1 curMark = -1 chars } } def near = { val start = (cur - 20) max 0 val len = ((cur + 1) min segmentPool.segmentSize) - start new String(segment, start, len) } def release = segments.foreach(segmentPool.release) private[JsonParser] def automaticClose = if (closeAutomatically) in.close // Reads the next available block from the reader. Returns -1 if there's // nothing more to read. private[this] def read = { if (offset >= segment.length) { offset = 0 val segmentToUse = (curMarkSegment: @scala.annotation.switch) match { case -1 => curSegmentIdx = 0 segments(0) case _ => curSegmentIdx += 1 if (curSegmentIdx < segments.length) { segments(curSegmentIdx) } else { val segment = segmentPool.apply() segments.append(segment) segment } } segment = segmentToUse.seg } val length = in.read(segment, offset, segment.length-offset) if (length != -1) { cur = offset offset += length length } else -1 } } private[json] trait SegmentPool { def apply(): Segment def release(segment: Segment): Unit def segmentSize: Int } private[json] class ArrayBlockingSegmentPool(override val segmentSize: Int) extends SegmentPool { import java.util.concurrent.ArrayBlockingQueue import java.util.concurrent.atomic.AtomicInteger private[this] val maxNumOfSegments = 10000 private[this] var segmentCount = new AtomicInteger(0) private[this] val segments = new ArrayBlockingQueue[Segment](maxNumOfSegments) private[json] def clear = segments.clear def apply(): Segment = { val s = acquire // Give back a disposable segment if pool is exhausted. if (s != null) s else DisposableSegment(new Array(segmentSize)) } private[this] def acquire: Segment = { val curCount = segmentCount.get val createNew = if (segments.size == 0 && curCount < maxNumOfSegments) segmentCount.compareAndSet(curCount, curCount + 1) else false if (createNew) RecycledSegment(new Array(segmentSize)) else segments.poll } def release(s: Segment) = s match { case _: RecycledSegment => segments.offer(s) case _ => } } /* * A pool of preallocated char arrays. */ private object Segments extends ArrayBlockingSegmentPool(1000) sealed trait Segment { val seg: Array[Char] } case class RecycledSegment(seg: Array[Char]) extends Segment case class DisposableSegment(seg: Array[Char]) extends Segment }
lift/framework
core/json/src/main/scala/net/liftweb/json/JsonParser.scala
Scala
apache-2.0
19,462
import sbt._ import Keys._ import play.Project._ object ApplicationBuild extends Build { val appName = "play-example-bootstrap" val appVersion = "1.0-SNAPSHOT" val appDependencies = Seq( // Add your project dependencies here, javaCore, javaJdbc, javaEbean, "org.webjars" % "webjars-play" % "2.1.0", "org.webjars" % "bootstrap" % "2.3.2" exclude("org.webjars", "jquery"), "org.webjars" % "jquery" % "1.8.3" // use 1.8.3 so that integration tests with HtmlUnit work. ) val main = play.Project(appName, appVersion, appDependencies).settings( // Add your own project settings here ) }
ics-software-engineering/play-example-bootstrap
project/Build.scala
Scala
mit
649
package config import javax.inject._ import akka.actor._ import dao.{ComputerStateDAO, ConnectedUserDAO} import model.{ComputerState, ConnectedUser} import services.{ComputerService, SSHOrderService, Timer} import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration.Duration import scala.concurrent.{Await, ExecutionContext, Future} /** * This has the task of checking periodically the computers' state, without blocking the main thread. * @author Camilo Sampedro <camilo.sampedro@udea.edu.co> * * @param connectedUserDAO Injected. * @param computerStateDAO Injected. * @param computerService Injected. * @param sSHOrderService Injected. * @param actorSystem Injected. * @param executionContext Injected. */ class ComputerChecker @Inject()(connectedUserDAO: ConnectedUserDAO, computerStateDAO: ComputerStateDAO, computerService: ComputerService, sSHOrderService: SSHOrderService, actorSystem: ActorSystem, executionContext: ExecutionContext) extends UntypedActor with Timer { // Is the checker actually executing. var isExecuting = false /** * Execute the checker task. * * @param message Not needed. */ @scala.throws[Exception](classOf[Exception]) override def onReceive(message: Any): Unit = { // Only execute when it's not executing. if (!isExecuting) { // Set flag. It starts to execute now. isExecuting = true play.Logger.info("Executing computer checker.") // Create a checker task for every computer on the database. val task = computerService.listAllSimple.map { computers => time { computers.map { computer => play.Logger.debug("Checking: " + computer) sSHOrderService.check(computer)("Scheduled Checker") } } } // Execute all the task collected in the last step. val results: Seq[(ComputerState, Seq[ConnectedUser])] = Await.result(task, Duration.Inf) play.Logger.debug(s"""Computers checked, proceeding to save: $results""") // Save every result on the database. for (result <- results) { val computerState = result._1 val addComputerStateTask = computerStateDAO.add(computerState) Await.result(addComputerStateTask, Duration.Inf) val connectedUsers = result._2 val addConnectedUsersTasks = connectedUsers.map { connectedUserDAO.add } val f = Future.sequence(addConnectedUsersTasks.toList) Await.result(f, Duration.Inf) } // Reset the execution flag. isExecuting = false } else { // It is now executing play.Logger.debug("Already executing computer checker. Omitting") } } }
ProjectAton/AtonLab
app/config/ComputerChecker.scala
Scala
gpl-3.0
2,731
import scala.compiletime.S object Test extends App { def plusOne[I <: Int](using x: ValueOf[S[I]]): S[I] = x.value def plusTwo[I <: Int](using x: ValueOf[S[S[I]]]): S[S[I]] = x.value assert(plusOne[0] == 1) assert(plusTwo[0] == 2) }
som-snytt/dotty
tests/run/i7868.scala
Scala
apache-2.0
250
package org.jetbrains.plugins.scala package lang.refactoring.extractTrait import com.intellij.refactoring.classMembers.{MemberInfoChange, AbstractMemberInfoModel} import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScMember import org.jetbrains.plugins.scala.lang.psi.api.statements.ScDeclaration /** * Nikolay.Tropin * 2014-05-23 */ object ExtractTraitInfoModel extends AbstractMemberInfoModel[ScMember, ScalaExtractMemberInfo] { override def isAbstractEnabled(member: ScalaExtractMemberInfo) = { member.getMember match { case decl: ScDeclaration => false case _ => true } } override def memberInfoChanged(event: MemberInfoChange[ScMember, ScalaExtractMemberInfo]) = super.memberInfoChanged(event) override def isFixedAbstract(member: ScalaExtractMemberInfo) = member.getMember match { case decl: ScDeclaration => true case _ => null } override def isAbstractWhenDisabled(member: ScalaExtractMemberInfo) = member.getMember match { case decl: ScDeclaration => true case _ => false } }
consulo/consulo-scala
src/org/jetbrains/plugins/scala/lang/refactoring/extractTrait/ExtractTraitInfoModel.scala
Scala
apache-2.0
1,056
package io.buoyant.transformer package perHost import com.fasterxml.jackson.annotation.JsonIgnore import com.twitter.finagle.Path import io.buoyant.namer.{NameTreeTransformer, TransformerConfig, TransformerInitializer} import java.net.NetworkInterface import scala.collection.JavaConverters._ class LocalhostTransformerInitializer extends TransformerInitializer { val configClass = classOf[LocalhostTransformerConfig] override val configId = "io.l5d.localhost" } class LocalhostTransformerConfig extends TransformerConfig { @JsonIgnore val defaultPrefix = Path.read("/io.l5d.localhost") @JsonIgnore override def mk(): NameTreeTransformer = { val localIPs = for { interface <- NetworkInterface.getNetworkInterfaces.asScala if interface.isUp inet <- interface.getInetAddresses.asScala } yield inet new SubnetLocalTransformer(prefix, localIPs.toSeq, Netmask("255.255.255.255")) } }
hhtpcd/linkerd
interpreter/per-host/src/main/scala/io/buoyant/transformer/perHost/LocalhostTransformerInitializer.scala
Scala
apache-2.0
929
package org.scalatest.tools import org.scalatest.Tracker import org.scalatest.events.Summary import sbt.testing.{Framework => BaseFramework, Event => SbtEvent, Status => SbtStatus, _} import scala.compat.Platform class MasterRunner(theArgs: Array[String], theRemoteArgs: Array[String], testClassLoader: ClassLoader) extends Runner { // TODO: To take these from test arguments val presentAllDurations: Boolean = true val presentInColor: Boolean = true val presentShortStackTraces: Boolean = true val presentFullStackTraces: Boolean = true val presentUnformatted: Boolean = false val presentReminder: Boolean = false val presentReminderWithShortStackTraces: Boolean = false val presentReminderWithFullStackTraces: Boolean = false val presentReminderWithoutCanceledTests: Boolean = false val runStartTime = Platform.currentTime val tracker = new Tracker val summaryCounter = new SummaryCounter def done(): String = { val duration = Platform.currentTime - runStartTime val summary = new Summary(summaryCounter.testsSucceededCount, summaryCounter.testsFailedCount, summaryCounter.testsIgnoredCount, summaryCounter.testsPendingCount, summaryCounter.testsCanceledCount, summaryCounter.suitesCompletedCount, summaryCounter.suitesAbortedCount, summaryCounter.scopesPendingCount) val fragments: Vector[Fragment] = StringReporter.summaryFragments( true, Some(duration), Some(summary), Vector.empty ++ summaryCounter.reminderEventsQueue, presentAllDurations, presentReminder, presentReminderWithShortStackTraces, presentReminderWithFullStackTraces, presentReminderWithoutCanceledTests ) fragments.map(_.toPossiblyColoredText(presentInColor)).mkString("\\n") } def remoteArgs(): Array[String] = { theRemoteArgs } def args: Array[String] = { theArgs } def tasks(list: Array[TaskDef]): Array[Task] = { list.map(t => new TaskRunner(t, testClassLoader, tracker, presentAllDurations, presentInColor, presentShortStackTraces, presentFullStackTraces, presentUnformatted, presentReminder, presentReminderWithShortStackTraces, presentReminderWithFullStackTraces, presentReminderWithoutCanceledTests, None)) } def receiveMessage(msg: String): Option[String] = { msg match { case "org.scalatest.events.TestPending" => summaryCounter.incrementTestsPendingCount() case "org.scalatest.events.TestFailed" => summaryCounter.incrementTestsFailedCount() case "org.scalatest.events.TestSucceeded" => summaryCounter.incrementTestsSucceededCount() case "org.scalatest.events.TestIgnored" => summaryCounter.incrementTestsIgnoredCount() case "org.scalatest.events.TestCanceled" => summaryCounter.incrementTestsCanceledCount() case "org.scalatest.events.SuiteCompleted" => summaryCounter.incrementSuitesCompletedCount() case "org.scalatest.events.SuiteAborted" => summaryCounter.incrementSuitesAbortedCount() case "org.scalatest.events.ScopePending" => summaryCounter.incrementScopesPendingCount() case _ => } None } def serializeTask(task: Task, serializer: (TaskDef) => String): String = serializer(task.taskDef()) def deserializeTask(task: String, deserializer: (String) => TaskDef): Task = new TaskRunner(deserializer(task), testClassLoader, tracker, presentAllDurations, presentInColor, presentShortStackTraces, presentFullStackTraces, presentUnformatted, presentReminder, presentReminderWithShortStackTraces, presentReminderWithFullStackTraces, presentReminderWithoutCanceledTests, None) }
SRGOM/scalatest
scalatest.js/src/main/scala/org/scalatest/tools/MasterRunner.scala
Scala
apache-2.0
3,689
package com.github.akovari.rdfp.api.ql import org.parboiled.errors.ErrorUtils import org.parboiled.scala._ /** * Created by akovari on 03/07/15. */ object QOLParser { abstract class AstNode sealed abstract class AstValue[+T](value: T) extends AstNode sealed abstract class FieldOrder extends AstNode case class Field(value: String) extends AstValue(value) case class AscFieldOrder(field: Field) extends FieldOrder case class DescFieldOrder(field: Field) extends FieldOrder case class TableOrder(fieldOrders: Seq[FieldOrder]) extends AstNode } class QOLParser extends CommonParser { import QOLParser._ def QOLTableOrder: Rule1[TableOrder] = rule { QOLFieldOrder ~ zeroOrMore(WhiteSpace ~ "," ~ WhiteSpace ~ QOLFieldOrder) ~~> ((order, orders) => TableOrder(order :: orders)) ~ EOI } def QOLFieldOrder: Rule1[FieldOrder] = rule(QOLAscFieldOrder | QOLDescFieldOrder) def QOLAscFieldOrder: Rule1[AscFieldOrder] = rule { (QOLField ~ WhiteSpace ~ ignoreCase("ASC")) ~~> (field => AscFieldOrder(field)) } def QOLDescFieldOrder: Rule1[DescFieldOrder] = rule { (QOLField ~ WhiteSpace ~ ignoreCase("DESC")) ~~> (field => DescFieldOrder(field)) } def QOLField: Rule1[Field] = rule { Ident ~> (field => Field(field)) } def parseQOL(qol: String): TableOrder = { val parsingResult = ReportingParseRunner(QOLTableOrder).run(qol) parsingResult.result match { case Some(astRoot) => astRoot case None => throw new ParsingException(s"""Invalid QOL order: \n${ErrorUtils.printParseErrors(parsingResult)}""") } } }
akovari/reactive-data-federation-poc
src/main/scala/com/github/akovari/rdfp/api/ql/QOLParser.scala
Scala
apache-2.0
1,593
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.cg.monadic.transformer import com.typesafe.scalalogging.slf4j.LazyLogging import org.slf4j.LoggerFactory import scala.concurrent.{Await, Future} import scala.concurrent.duration.Duration import scala.concurrent.ExecutionContext.Implicits.global import com.typesafe.scalalogging.slf4j.Logger /** * A monadic type transformer that can be used to chain in for-comprehension * Implementations can be Case class so that parameters can be passed in class level * @author WZ */ trait Transformer[OUT] extends LazyLogging { self: Transformer[OUT] => @transient override protected lazy val logger: Logger = Logger(LoggerFactory getLogger getClass.getName) /** * Override to perform transformation * and return the desired type */ def transform(): OUT /** * monadic operation that can chain another transformer * (will be called implicitly in for-comprehension ) */ def flatMap[NEXT] (f: OUT => Transformer[NEXT]): Transformer[NEXT] = { this.validateInput new Transformer[NEXT] { def transform (): NEXT = { val nextTransformer: Transformer[NEXT] = f(self.transform) nextTransformer.transform } } } /** * monadic operation that can yield the last transformer * (will be called implicitly at the end of for-comprehension ) */ def map[NEXT] (f: OUT => NEXT): Transformer[NEXT] = { this.validateInput new Transformer[NEXT] { def transform (): NEXT = { f(self.transform) } } } /** * Shouldn't be required in for-comprehension * Known issue: * https://issues.scala-lang.org/browse/SI-1336 */ def filter(p: OUT => Boolean): Transformer[OUT] = { map { r => if (p(r)) r else throw new NoSuchElementException("not satisfied") } } /** * can be used to chain transformers that can run in parallel */ def zip[ANOTHER_OUT](that: Transformer[ANOTHER_OUT]): Transformer[(OUT, ANOTHER_OUT)] = { new Transformer[(OUT, ANOTHER_OUT)] { def transform (): (OUT, ANOTHER_OUT) = { val transformer1 = Future { self.validateInput self.transform } val transformer2 = Future { that.validateInput that.transform } val result = transformer1 zip transformer2 Await.result(result, Duration.Inf) } } } /** * Override this to do input validation */ protected def validateInput(): Unit = {} }
CodeGerm/monadic-lib
src/main/scala/org/cg/monadic/transformer/Transformer.scala
Scala
apache-2.0
3,302
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql import org.apache.spark.{SparkThrowable, SparkThrowableHelper} import org.apache.spark.annotation.Stable import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.catalyst.trees.Origin /** * Thrown when a query fails to analyze, usually because the query itself is invalid. * * @since 1.3.0 */ @Stable class AnalysisException protected[sql] ( val message: String, val line: Option[Int] = None, val startPosition: Option[Int] = None, // Some plans fail to serialize due to bugs in scala collections. @transient val plan: Option[LogicalPlan] = None, val cause: Option[Throwable] = None, val errorClass: Option[String] = None, val messageParameters: Array[String] = Array.empty) extends Exception(message, cause.orNull) with SparkThrowable with Serializable { def this(errorClass: String, messageParameters: Array[String], cause: Option[Throwable]) = this( SparkThrowableHelper.getMessage(errorClass, messageParameters), errorClass = Some(errorClass), messageParameters = messageParameters, cause = cause) def this(errorClass: String, messageParameters: Array[String]) = this(errorClass = errorClass, messageParameters = messageParameters, cause = None) def this( errorClass: String, messageParameters: Array[String], origin: Origin) = this( SparkThrowableHelper.getMessage(errorClass, messageParameters), line = origin.line, startPosition = origin.startPosition, errorClass = Some(errorClass), messageParameters = messageParameters) def copy( message: String = this.message, line: Option[Int] = this.line, startPosition: Option[Int] = this.startPosition, plan: Option[LogicalPlan] = this.plan, cause: Option[Throwable] = this.cause, errorClass: Option[String] = this.errorClass, messageParameters: Array[String] = this.messageParameters): AnalysisException = new AnalysisException(message, line, startPosition, plan, cause, errorClass, messageParameters) def withPosition(line: Option[Int], startPosition: Option[Int]): AnalysisException = { val newException = this.copy(line = line, startPosition = startPosition) newException.setStackTrace(getStackTrace) newException } override def getMessage: String = { val planAnnotation = Option(plan).flatten.map(p => s";\\n$p").getOrElse("") getSimpleMessage + planAnnotation } // Outputs an exception without the logical plan. // For testing only def getSimpleMessage: String = if (line.isDefined || startPosition.isDefined) { val lineAnnotation = line.map(l => s" line $l").getOrElse("") val positionAnnotation = startPosition.map(p => s" pos $p").getOrElse("") s"$message;$lineAnnotation$positionAnnotation" } else { message } override def getErrorClass: String = errorClass.orNull }
mahak/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/AnalysisException.scala
Scala
apache-2.0
3,732
/* * Copyright (C) 2017 Stratio (http://stratio.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.stratio.tikitakka.columbus import com.stratio.tikitakka.columbus.test.utils.DummyDiscoveryComponent import scala.concurrent.Await import scala.concurrent.duration._ import org.junit.runner.RunWith import org.scalatest.WordSpec import org.scalatest.junit.JUnitRunner import org.scalatest.ShouldMatchers @RunWith(classOf[JUnitRunner]) class DiscoveryComponentUnitTest extends WordSpec with ShouldMatchers { val timeout = 3 seconds "DiscoveryComponent" should { "know if the discovery service is up" in new DummyDiscoveryComponent { val uri = upHost Await.result(isUp, timeout) should be(true) } "know if the discovery service is down" in new DummyDiscoveryComponent { val uri = "fakeHost" Await.result(isUp, timeout) should be(false) } } }
compae/tiki-takka
discovery/src/test/scala/com/stratio/tikitakka/columbus/DiscoveryComponentUnitTest.scala
Scala
apache-2.0
1,430
/* * Copyright (C) 2014 Cathal Mc Ginley * * This file is part of TomatoJuice, a Pomodoro timer-tracker for GNOME. * * TomatoJuice is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 3 of the License, * or (at your option) any later version. * * TomatoJuice is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with TomatoJuice; see the file COPYING. If not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301 USA. */ package org.gnostai.tomatojuice.db import java.sql.ResultSet import java.sql.Connection import org.gnostai.tomatojuice.core.ProductionCoreModule trait QueryToStream { def makeStream[T](thunk: ResultSet => T)(rslt: ResultSet): Stream[T] = { def rsltToOptionT(): Option[T] = { if (rslt.next()) { Some(thunk(rslt)) } else { None } } def mkStream: Stream[T] = { val next = rsltToOptionT() next map { x: T => Stream.cons(x, mkStream) } getOrElse (Stream.empty) } mkStream } } object QueryMainX extends App with BoneConnectionPool with ProductionCoreModule { class ProjectDBUtilX extends QueryToStream { private def rsltToProject(rslt: ResultSet): Project = { val id = rslt.getInt(1) val name = rslt.getString(2) val descr = rslt.getString(3) val icon = rslt.getBlob(4) val iconBytes: Option[Array[Byte]] = if (icon == null) { None } else { Some(icon.getBytes(0, 100)) } Project(Some(id), name, descr, iconBytes) } //def makeProjectStream(r: ResultSet): Stream[Project] = makeStream(rsltToProject)(r) val makeProjectStream = makeStream(rsltToProject)(_) def loadAll(conn: Connection): Seq[Project] = { val stmt = conn.createStatement() val rslt = stmt.executeQuery("SELECT id, name, description, icon_png FROM project") val valueStream = makeProjectStream(rslt) valueStream.toSeq } } val conn = pool.getConnection() val util = new ProjectDBUtilX() val stmt = conn.createStatement() val rslt = stmt.executeQuery("SELECT id, name, description, icon_png FROM project") //while (rslt.next()) { // println(rslt.getString(2)) //} val stream = util.makeProjectStream(rslt) val xeq = stream.toArray.toSeq println(xeq) //println(stream.tail.head) //println(util.loadAll(conn)) }
cathalmcginley/tomatojuice
src/main/scala/org/gnostai/tomatojuice/db/QueryToStream.scala
Scala
gpl-3.0
2,810
/* * Copyright 2022 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package iht.views.filter import iht.constants.Constants._ import iht.testhelpers.UseService import iht.views.ViewTestHelper import iht.views.html.filter.use_service import play.api.test.Helpers._ class UseServiceViewTest extends ViewTestHelper with UseService { val fakeRequest = createFakeRequest(isAuthorised = false) val applicationMessages = messages lazy val useServiceView: use_service = app.injector.instanceOf[use_service] "use_service" must { "have no message keys in html" in { val result = useServiceView(under325000, false, "")(fakeRequest, applicationMessages) val view = asDocument(contentAsString(result)).toString noMessageKeysShouldBePresent(view) } "generate appropriate content for the title" in { val result = useServiceView(under325000, false, "iht.mustUseOnlineService")(fakeRequest, applicationMessages) val doc = asDocument(contentAsString(result)) val titleElement = doc.getElementsByTag("h1").first titleElement.text must include(messagesApi("iht.mustUseOnlineService")) } "generate appropriate content for the browser title" in { val result = useServiceView(under325000, false, "iht.mustUseOnlineService")(fakeRequest, applicationMessages) val doc = asDocument(contentAsString(result)) val browserTitleElement = doc.getElementsByTag("title").first browserTitleElement.text must include(messagesApi("iht.mustUseOnlineService")) } "generate appropriate content for under 325000" in { val result = useServiceView(under325000, false, "")(fakeRequest, applicationMessages) val content = contentAsString(result) content must include(messagesApi("page.iht.filter.useService.under325000.paragraph0")) content must include(messagesApi("page.iht.filter.useService.paragraphFinal")) content mustNot include(messagesApi("page.iht.filter.useService.between325000And1Million.section1.title")) content mustNot include(messagesApi("page.iht.filter.useService.between325000And1Million.section2.title")) content mustNot include(messagesApi("page.iht.filter.useService.between325000And1Million.section3.title")) } "generate appropriate content for between 325000 and 1 million" in { val result = useServiceView(between325000and1million, false, "")(fakeRequest, applicationMessages) val content = contentAsString(result) content must include(messagesApi("page.iht.filter.useService.between325000And1Million.section1.title")) content must include(messagesApi("page.iht.filter.useService.between325000And1Million.section2.title")) content must include(messagesApi("page.iht.filter.useService.between325000And1Million.section3.title")) } "display content about other ways to report the value of the estate when value is under 325000" in { val result = useServiceView(under325000, false, "")(fakeRequest, applicationMessages) val doc = asDocument(contentAsString(result)) val h2 = doc.getElementById("other-ways-to-report") h2.text() must be(messagesApi("page.iht.filter.useService.under325000.otherWaysToReportValue")) } "does not display content about other ways to report the value of the estate when value is between 325000 and 1 million" in { val result = useServiceView(between325000and1million, false, "")(fakeRequest, applicationMessages) val doc = asDocument(contentAsString(result)) val h2 = doc.getElementById("other-ways-to-reportOver") h2.text() mustBe pageIHTFilterUseServiceBetween325000And1MillionSection4P1 } "contain a link with the button class with the text 'Continue' for values under 325000" in { val result = useServiceView(under325000, false, "")(fakeRequest, applicationMessages) val doc = asDocument(contentAsString(result)) val button = doc.select("a.button").first } "generate content for the final paragraph when given the under 325 parameter" in { val result = useServiceView(under325000, false, "")(fakeRequest, applicationMessages) val doc = asDocument(contentAsString(result)) val paragraph0 = doc.getElementById("paragraph-final") paragraph0.text() must be(messagesApi("page.iht.filter.useService.paragraphFinal")) } "contain a link with the button class with the text 'Continue to online service' " + "for values between 325000 and 1 million" in { val result = useServiceView(between325000and1million, false, "")(fakeRequest, applicationMessages) val doc = asDocument(contentAsString(result)) val button = doc.select("a.button").first button.text() mustBe pageIHTFilterUseServiceBetween325000And1MillionReport } "contain a link to the TNRB and RNRB guidance, and IHT400, when value is between 325 and 1 million" in { val result = useServiceView(between325000and1million, false, "")(fakeRequest, applicationMessages) val doc = asDocument(contentAsString(result)) val tnrb = doc.getElementById("tnrb-link") val rnrb = doc.getElementById("rnrb-link") val iht400 = doc.getElementById("IHT400-form") tnrb.attr("href") must be("https://www.gov.uk/guidance/inheritance-tax-transfer-of-threshold") rnrb.attr("href") must be("https://www.gov.uk/guidance/check-if-you-can-get-an-additional-inheritance-tax-threshold") iht400.attr("href") must be("https://www.gov.uk/government/publications/inheritance-tax-inheritance-tax-account-iht400") } "contain a link with the button class with href attribute pointing to the start pages" in { val result = useServiceView(under325000, false, "")(fakeRequest, applicationMessages) val doc = asDocument(contentAsString(result)) val button = doc.select("a.button").first button.attr("href") must be(iht.controllers.registration.routes.RegistrationChecklistController.onPageLoad().url) } "contain a 'Previous answers' section" in { val result = useServiceView(under325000, false, "")(fakeRequest, applicationMessages) val doc = asDocument(contentAsString(result)) assertRenderedById(doc, "previous-answers") } "contain a 'Start again' link to go back to the domicile page" in { val result = useServiceView(under325000, false, "")(fakeRequest, applicationMessages) val doc = asDocument(contentAsString(result)) val link = doc.getElementById("start-again") link.text() must be(messagesApi("iht.startAgain")) link.attr("href") must be(iht.controllers.filter.routes.DomicileController.onPageLoad().url) } "contain a row showing the user's answer to the previous domicile question" in { val result = useServiceView(under325000, false, "")(fakeRequest, applicationMessages) val doc = asDocument(contentAsString(result)) val row = doc.getElementById("domicile-row") row.text() must include(messagesApi("page.iht.registration.deceasedPermanentHome.title")) row.text() must include(messagesApi("iht.countries.englandOrWales")) } "contain a 'Change' link to go back to the domicile page" in { val result = useServiceView(under325000, false, "")(fakeRequest, applicationMessages) val doc = asDocument(contentAsString(result)) val link = doc.getElementById("change-domicile") link.text() must include(messagesApi("iht.change")) link.attr("href") must be(iht.controllers.filter.routes.DomicileController.onPageLoad().url) } "contain a row showing the user's answer to the previous estimate question when given the under 32500 parameter" in { val result = useServiceView(under325000, false, "")(fakeRequest, applicationMessages) val doc = asDocument(contentAsString(result)) val row = doc.getElementById("estimate-row") row.text() must include(messagesApi("iht.roughEstimateEstateWorth")) row.text() must include(messagesApi("page.iht.filter.estimate.choice.under")) } "contain a row showing the user's answer to the previous estimate question when given the between parameter" in { val result = useServiceView(between325000and1million, false, "")(fakeRequest, applicationMessages) val doc = asDocument(contentAsString(result)) val rows = doc.getElementsByAttributeValue("id","estimate-row") rows.size() mustEqual 1 } "contain a 'Change' link to go back to the estimate page" in { val result = useServiceView(under325000, false, "")(fakeRequest, applicationMessages) val doc = asDocument(contentAsString(result)) val link = doc.getElementById("change-estimate") link.text() must include(messagesApi("iht.change")) link.attr("href") must be(iht.controllers.filter.routes.EstimateController.onPageLoadWithoutJointAssets().url) } } }
hmrc/iht-frontend
test/iht/views/filter/UseServiceViewTest.scala
Scala
apache-2.0
9,400
package se.lu.nateko.cp.meta.utils import org.semanticweb.owlapi.model._ import org.semanticweb.owlapi.io.XMLUtils import java.util.Optional import java.util.stream.{Stream => JavaStream} import scala.reflect.ClassTag package object owlapi { // implicit class GoogleScalaOptionable[T](val opt: Optional[T]) extends AnyVal{ // def toOption: Option[T] = if(opt.isPresent) Some(opt.get) else None // } implicit class JavaUtilOptionable[T](val opt: Optional[T]) extends AnyVal{ def toOption: Option[T] = if(opt.isPresent) Some(opt.get) else None } implicit class JavaStreamToScalaConverter[T <: AnyRef](val stream: JavaStream[T]) extends AnyVal { def toIndexedSeq(implicit ev: ClassTag[T]): IndexedSeq[T] = stream.toArray[T](Array.ofDim[T]).toIndexedSeq } def getOntologyFromJarResourceFile( resourcePath: String, manager: OWLOntologyManager): OWLOntology = { val stream = getClass.getResourceAsStream(resourcePath) manager.loadOntologyFromOntologyDocument(stream) } def getLastFragment(iri: IRI): String = { XMLUtils.getNCNameSuffix(iri.toString) } }
ICOS-Carbon-Portal/meta
src/main/scala/se/lu/nateko/cp/meta/utils/owlapi/package.scala
Scala
gpl-3.0
1,081
/* * Copyright 2018 Analytics Zoo Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.zoo.pipeline.nnframes import java.io.File import com.intel.analytics.bigdl.models.inception.Inception_v1 import com.intel.analytics.bigdl.models.lenet.LeNet5 import com.intel.analytics.bigdl.nn._ import com.intel.analytics.bigdl.optim.{Adam, LBFGS, Loss, Trigger} import com.intel.analytics.bigdl.tensor.Tensor import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric.NumericFloat import com.intel.analytics.bigdl.utils.Engine import com.intel.analytics.bigdl.utils.RandomGenerator.RNG import com.intel.analytics.bigdl.visualization.{TrainSummary, ValidationSummary} import com.intel.analytics.zoo.feature.common._ import com.intel.analytics.zoo.feature.image._ import com.intel.analytics.zoo.pipeline.api.keras.ZooSpecHelper import org.apache.logging.log4j.{Level, LogManager} import org.apache.logging.log4j.core.config.Configurator import org.apache.spark.SparkContext import org.apache.spark.ml.feature.{MinMaxScaler, VectorAssembler} import org.apache.spark.ml.linalg.Vector import org.apache.spark.ml.param.ParamMap import org.apache.spark.ml.{Pipeline, PipelineModel} import org.apache.spark.mllib.linalg.Vectors import org.apache.spark.sql.functions._ import org.apache.spark.sql.types.{DoubleType, StringType, StructField, StructType} import org.apache.spark.sql.{DataFrame, SQLContext, SparkSession} import scala.collection.mutable.ArrayBuffer import scala.reflect.io.Path class NNClassifierSpec extends ZooSpecHelper { var sc : SparkContext = _ var sqlContext : SQLContext = _ var smallData: Seq[(Array[Double], Double)] = _ val nRecords = 100 val maxEpoch = 20 override def doBefore(): Unit = { val conf = Engine.createSparkConf().setAppName("Test NNClassifier").setMaster("local[1]") sc = SparkContext.getOrCreate(conf) sqlContext = new SQLContext(sc) smallData = NNEstimatorSpec.generateTestInput( nRecords, Array(1.0, 2.0, 3.0, 4.0, 5.0, 6.0), -1.0, 42L) val seed = System.currentTimeMillis() RNG.setSeed(seed) Engine.init } override def doAfter(): Unit = { if (sc != null) { sc.stop() } } "NNClassifier" should "has correct default params" in { val model = Linear[Float](10, 1) val criterion = ZooClassNLLCriterion[Float]() val estimator = NNClassifier(model, criterion, Array(10)) assert(estimator.getFeaturesCol == "features") assert(estimator.getLabelCol == "label") assert(estimator.getMaxEpoch == 50) assert(estimator.getBatchSize == 1) assert(estimator.getLearningRate == 1e-3) assert(estimator.getLearningRateDecay == 0) } "NNClassifier" should "apply with differnt params" in { val model = Linear[Float](6, 2) val criterion = ZooClassNLLCriterion[Float]() val data = sc.parallelize(smallData) val df = sqlContext.createDataFrame(data).toDF("features", "label") Seq( NNClassifier(model, criterion), NNClassifier(model, criterion, Array(6)), NNClassifier(model, criterion, SeqToTensor(Array(6))) ).foreach(c => c.setMaxEpoch(1).fit(df)) } "NNClassifier" should "get reasonable accuracy" in { val model = new Sequential().add(Linear[Float](6, 2)).add(LogSoftMax[Float]) val criterion = ZooClassNLLCriterion[Float]() val classifier = NNClassifier(model, criterion, Array(6)) .setOptimMethod(new LBFGS[Float]()) .setLearningRate(0.1) .setBatchSize(nRecords) .setMaxEpoch(maxEpoch) val data = sc.parallelize(smallData) val df = sqlContext.createDataFrame(data).toDF("features", "label") val nnModel = classifier.fit(df) nnModel.isInstanceOf[NNClassifierModel[_]] should be(true) assert(nnModel.transform(df).where("prediction=label").count() > nRecords * 0.8) } "NNClassifier" should "support model with Sigmoid" in { val model = new Sequential().add(Linear[Float](6, 10)).add(Linear[Float](10, 1)) .add(Sigmoid[Float]) val criterion = BCECriterion[Float]() val classifier = NNClassifier(model, criterion, Array(6)) .setOptimMethod(new Adam[Float]()) .setLearningRate(0.01) .setBatchSize(10) .setMaxEpoch(10) val data = sc.parallelize(smallData.map(t => (t._1, t._2 - 1.0))) val df = sqlContext.createDataFrame(data).toDF("features", "label") val nnModel = classifier.fit(df) nnModel.isInstanceOf[NNClassifierModel[_]] should be(true) val correctCount = nnModel.transform(df).where("prediction=label").count() assert(correctCount > nRecords * 0.8) } "NNClassifier" should "apply with size support different FEATURE types" in { val model = new Sequential().add(Linear[Float](6, 2)).add(LogSoftMax[Float]) val criterion = ZooClassNLLCriterion[Float]() val classifier = NNClassifier(model, criterion, Array(6)) .setLearningRate(0.1) .setBatchSize(2) .setEndWhen(Trigger.maxIteration(2)) Array( sqlContext.createDataFrame(sc.parallelize(smallData.map(p => (p._1, p._2)))) .toDF("features", "label"), // Array[Double] sqlContext.createDataFrame(sc.parallelize(smallData.map(p => (p._1.map(_.toFloat), p._2)))) .toDF("features", "label") // Array[Float] // TODO: add ML Vector when ut for Spark 2.0+ is ready ).foreach { df => val nnModel = classifier.fit(df) nnModel.transform(df).collect() } } "NNClassifier" should "support scalar FEATURE" in { val model = new Sequential().add(Linear[Float](1, 2)).add(LogSoftMax[Float]) val criterion = ZooClassNLLCriterion[Float]() val classifier = NNClassifier(model, criterion, Array(1)) .setLearningRate(0.1) .setBatchSize(2) .setEndWhen(Trigger.maxIteration(2)) Array( sqlContext.createDataFrame(sc.parallelize(smallData.map(p => (p._1.head.toFloat, p._2)))) .toDF("features", "label"), // Float sqlContext.createDataFrame(sc.parallelize(smallData.map(p => (p._1.head, p._2)))) .toDF("features", "label") // Double // TODO: add ML Vector when ut for Spark 2.0+ is ready ).foreach { df => val nnModel = classifier.fit(df) nnModel.transform(df).collect() } } "NNClassifier" should "fit with adam and LBFGS" in { val model = new Sequential().add(Linear[Float](6, 2)).add(LogSoftMax[Float]) val criterion = ZooClassNLLCriterion[Float]() Seq(new LBFGS[Float], new Adam[Float]).foreach { optimMethod => val classifier = NNClassifier(model, criterion, Array(6)) .setBatchSize(nRecords) .setMaxEpoch(2) .setOptimMethod(optimMethod) .setLearningRate(0.1) val data = sc.parallelize(smallData) val df = sqlContext.createDataFrame(data).toDF("features", "label") val nnModel = classifier.fit(df) nnModel.isInstanceOf[NNClassifierModel[_]] should be(true) } } "NNClassifier" should "supports validation data and summary" in { val data = sc.parallelize(smallData) val df = sqlContext.createDataFrame(data).toDF("features", "label") val logdir = createTmpDir() val model = new Sequential().add(Linear[Float](6, 2)).add(LogSoftMax[Float]) val criterion = ZooClassNLLCriterion[Float]() val classifier = NNClassifier(model, criterion, Array(6)) .setBatchSize(nRecords) .setEndWhen(Trigger.maxIteration(5)) .setOptimMethod(new Adam[Float]) .setLearningRate(0.1) .setValidation(Trigger.severalIteration(1), df, Array(new Loss[Float]()), 2) .setValidationSummary(ValidationSummary(logdir.getPath, "NNEstimatorValidation")) classifier.fit(df) val validationSummary = classifier.getValidationSummary.get val losses = validationSummary.readScalar("Loss") validationSummary.close() logdir.deleteOnExit() } "NNClassifier" should "get the same classification result with BigDL model" in { Configurator.setLevel("org", Level.WARN) Configurator.setLevel("akka", Level.WARN) val model = LeNet5(10) // init val valTrans = NNClassifierModel(model, Array(28, 28)) .setBatchSize(4) val tensorBuffer = new ArrayBuffer[Data]() val input = Tensor[Float](10, 28, 28).rand() val target = model.forward(input).toTensor[Float] // test against NNClassifierModel val inputArr = input.storage().array() val targetArr = target.max(2)._2.squeeze().storage().array() (0 until 10).foreach(i => tensorBuffer.append( Data(targetArr(i), inputArr.slice(i * 28 * 28, (i + 1) * 28 * 28).map(_.toDouble)))) val rowRDD = sc.parallelize(tensorBuffer) val testData = sqlContext.createDataFrame(rowRDD) assert(valTrans.transform(testData).where("prediction=label").count() == testData.count()) tensorBuffer.clear() } "NNClassifier" should "works in ML pipeline" in { val appSparkVersion = org.apache.spark.SPARK_VERSION if (appSparkVersion.trim.startsWith("1")) { val data = sc.parallelize( smallData.map(p => (org.apache.spark.mllib.linalg.Vectors.dense(p._1), p._2))) val df: DataFrame = sqlContext.createDataFrame(data).toDF("features", "label") val scaler = new MinMaxScaler().setInputCol("features").setOutputCol("scaled") .setMax(1).setMin(-1) val model = new Sequential().add(Linear[Float](6, 2)).add(LogSoftMax[Float]) val criterion = ZooClassNLLCriterion[Float]() val estimator = NNClassifier(model, criterion) .setBatchSize(nRecords) .setOptimMethod(new LBFGS[Float]()) .setLearningRate(0.1) .setMaxEpoch(maxEpoch) .setFeaturesCol("scaled") val pipeline = new Pipeline().setStages(Array(scaler, estimator)) val pipelineModel = pipeline.fit(df) pipelineModel.isInstanceOf[PipelineModel] should be(true) assert(pipelineModel.transform(df).where("prediction=label").count() > nRecords * 0.8) } } "NNClasifier" should "support image FEATURE types" in { val pascalResource = getClass.getClassLoader.getResource("pascal/") val imageDF = NNImageReader.readImages(pascalResource.getFile, sc) .withColumn("label", lit(2.0f)) assert(imageDF.count() == 1) val transformer = RowToImageFeature() -> ImageResize(256, 256) -> ImageCenterCrop(224, 224) -> ImageChannelNormalize(123, 117, 104, 1, 1, 1) -> ImageMatToTensor() -> ImageFeatureToTensor() val estimator = NNClassifier(Inception_v1(1000), ZooClassNLLCriterion[Float](), transformer) .setBatchSize(1) .setEndWhen(Trigger.maxIteration(1)) .setFeaturesCol("image") estimator.fit(imageDF) } "NNClasifierModel" should "has default batchperthread as 4" in { val model = new Sequential().add(Linear[Float](6, 2)).add(LogSoftMax[Float]) val criterion = ZooClassNLLCriterion[Float]() Seq(new LBFGS[Float], new Adam[Float]).foreach { optimMethod => val classifier = NNClassifier(model, criterion, Array(6)) .setBatchSize(nRecords) .setMaxEpoch(2) .setOptimMethod(optimMethod) .setLearningRate(0.1) val data = sc.parallelize(smallData) val df = sqlContext.createDataFrame(data).toDF("features", "label") val nnModel = classifier.fit(df) nnModel.isInstanceOf[NNClassifierModel[_]] should be(true) nnModel.getBatchSize should be(4) } } "NNClasifierModel" should "return same results after saving and loading" in { val data = sqlContext.createDataFrame(smallData).toDF("features", "label") val module = new Sequential[Double]().add(Linear[Double](6, 2)).add(LogSoftMax[Double]) val nnModel = NNClassifierModel(module) val result = nnModel.transform(data).rdd.map(_.getAs[Double](2)).collect().sorted val filePath = createTmpFile().getPath nnModel.setBatchSize(10).setFeaturesCol("test123").setPredictionCol("predict123") nnModel.write.overwrite().save(filePath) val nnModel2 = NNClassifierModel.load(filePath) nnModel2.uid shouldEqual nnModel.uid nnModel2.getBatchSize shouldEqual nnModel.getBatchSize nnModel2.getFeaturesCol shouldEqual nnModel.getFeaturesCol nnModel2.getPredictionCol shouldEqual nnModel.getPredictionCol nnModel2.setFeaturesCol("features").setPredictionCol("prediction") val result2 = nnModel2.transform(data).rdd.map(_.getAs[Double](2)).collect().sorted result2 shouldEqual result } "NNClassifierModel" should "apply with differnt params" in { val model = Linear[Float](6, 2) val data = sc.parallelize(smallData) val df = sqlContext.createDataFrame(data).toDF("features", "label") Seq( NNClassifierModel(model), NNClassifierModel(model, Array(6)), NNClassifierModel(model, SeqToTensor(Array(6))) ).foreach { e => e.transform(df).count() assert(e.getBatchSize == 4) } } "NNClassifier" should "supports deep copy" in { val model = new Sequential().add(Linear[Float](6, 2)).add(LogSoftMax[Float]) val criterion = ZooClassNLLCriterion[Float]() val data = sc.parallelize( smallData.map(p => (org.apache.spark.mllib.linalg.Vectors.dense(p._1), p._2))) val df: DataFrame = sqlContext.createDataFrame(data).toDF("features", "label") val appName = System.nanoTime().toString val classifier = NNClassifier(model, criterion) .setBatchSize(31) .setOptimMethod(new LBFGS[Float]()) .setLearningRate(0.123) .setLearningRateDecay(0.432) .setMaxEpoch(13) .setFeaturesCol("abc") .setTrainSummary(new TrainSummary("/tmp", appName)) .setValidationSummary(new ValidationSummary("/tmp", appName)) .setValidation(Trigger.maxIteration(3), df, Array(new Loss[Float]()), 2) val copied = classifier.copy(ParamMap.empty) assert(classifier.model ne copied.model) assert(classifier.criterion ne copied.criterion) assert(classifier.model == copied.model) assert(classifier.criterion == copied.criterion) NNEstimatorSpec.compareParams(classifier, copied) val estVal = classifier.getValidation.get val copiedVal = copied.getValidation.get assert(estVal._1 == copiedVal._1) assert(estVal._2 == copiedVal._2) assert(estVal._3.deep == copiedVal._3.deep) assert(estVal._4 == copiedVal._4) // train Summary and validation Summary are not copied since they are not thread-safe and cannot // be shared among estimators assert(copied.getTrainSummary.isEmpty) assert(copied.getValidationSummary.isEmpty) } "NNClassifierModel" should "construct with sampleTransformer" in { val model = new Sequential().add(Linear[Float](6, 2)).add(LogSoftMax[Float]) val sampleTransformer = SeqToTensor(Array(6)) -> TensorToSample() val nnModel = NNClassifierModel(model).setBatchSize(nRecords) val data = sc.parallelize(smallData) val df = sqlContext.createDataFrame(data).toDF("features", "label") assert(nnModel.transform(df).count() == nRecords) } "NNClassifierModel" should "supports deep copy" in { val model = new Sequential().add(Linear[Float](6, 2)).add(LogSoftMax[Float]) val criterion = ZooClassNLLCriterion[Float]() val data = sc.parallelize( smallData.map(p => (org.apache.spark.mllib.linalg.Vectors.dense(p._1), p._2))) val df: DataFrame = sqlContext.createDataFrame(data).toDF("abc", "la") val classifier = NNClassifier(model, criterion) .setBatchSize(31) .setOptimMethod(new LBFGS[Float]()) .setLearningRate(0.123) .setLearningRateDecay(0.432) .setMaxEpoch(3) .setFeaturesCol("abc") .setLabelCol("la") val nnModel = classifier.fit(df) val copied = nnModel.copy(ParamMap.empty) assert(copied.isInstanceOf[NNClassifierModel[_]]) assert(nnModel.model ne copied.model) assert(nnModel.model == copied.model) NNEstimatorSpec.compareParams(nnModel, copied) } "NNClassifierModel" should "supports set Preprocessing" in { val model = new Sequential().add(Linear[Float](6, 2)).add(LogSoftMax[Float]) val criterion = ZooClassNLLCriterion[Float]() val data = sc.parallelize(smallData) val df = sqlContext.createDataFrame(data).toDF("features", "label") val classifier = NNClassifier(model, criterion) .setBatchSize(31) .setMaxEpoch(1) val nnModel = classifier.fit(df) val newPreprocessing = ArrayToTensor(Array(6)) -> TensorToSample() nnModel.setSamplePreprocessing(newPreprocessing) assert(df.count() == nnModel.transform(df).count()) } "XGBClassifierModel" should "work with sparse features" in { if (!(scala.util.Properties.isMac || scala.util.Properties.isWin)) { val path = getClass.getClassLoader.getResource("XGBClassifier").getPath val filePath = path + "/test.csv" val modelPath = path + "/XGBClassifer.bin" val spark = SparkSession.builder().getOrCreate() val df = spark.read.format("csv") .option("sep", ",") .option("inferSchema", true) .option("header", true) .load(filePath) val model = XGBClassifierModel.load(modelPath, 2) model.setFeaturesCol(Array("age", "gender", "jointime", "star")) model.transform(df).count() } } "XGBClassifierModel" should "work with dense features" in { if (!(scala.util.Properties.isMac || scala.util.Properties.isWin)) { val path = getClass.getClassLoader.getResource("XGBClassifier").getPath val filePath = path + "/iris.data" val modelPath = path + "/XGBClassifer.bin" val spark = SparkSession.builder().getOrCreate() val schema = new StructType(Array( StructField("sepal length", DoubleType, true), StructField("sepal width", DoubleType, true), StructField("petal length", DoubleType, true), StructField("petal width", DoubleType, true), StructField("class", StringType, true))) val df = spark.read.schema(schema).csv(filePath) val model = XGBClassifierModel.load(modelPath, 2) model.setFeaturesCol(Array("sepal length", "sepal width", "petal length", "petal width")) model.transform(df).count() } } "XGBRegressorModel" should "work" in { if (!(scala.util.Properties.isMac || scala.util.Properties.isWin)) { val path = getClass.getClassLoader.getResource("XGBClassifier").getPath val filePath = path + "/regressor.csv" val modelPath = path + "/xgbregressor0.model" val spark = SparkSession.builder().getOrCreate() val df = spark.read.format("csv") .option("sep", ",") .option("inferSchema", true) .option("header", true) .load(filePath) val vectorAssembler = new VectorAssembler() .setInputCols(Array("0", "1", "2", "3", "4", "5", "6", "7", "8", "9")) .setOutputCol("features_vec") val data = vectorAssembler.transform(df) val asDense = udf((v: Vector) => v.toDense) val xgbInput = data.withColumn("features", asDense(col("features_vec"))) val model = XGBRegressorModel.loadFromXGB(modelPath) model.transform(xgbInput).count() } } } private case class Data(label: Double, features: Array[Double])
intel-analytics/analytics-zoo
zoo/src/test/scala/com/intel/analytics/zoo/pipeline/nnframes/NNClassifierSpec.scala
Scala
apache-2.0
19,625
package org.jetbrains.plugins.scala.lang.psi.stubs.impl import com.intellij.psi.stubs._ import com.intellij.psi.{PsiElement, PsiNamedElement} /** * @author adkozlov */ abstract class ScNamedStubBase[E <: PsiNamedElement] protected[impl](parent: StubElement[_ <: PsiElement], elementType: IStubElementType[_ <: StubElement[_ <: PsiElement], _ <: PsiElement], name: String) extends StubBase[E](parent, elementType) with NamedStub[E] { override final def getName: String = name }
JetBrains/intellij-scala
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/stubs/impl/ScNamedStubBase.scala
Scala
apache-2.0
626
package picasso.math.hol sealed abstract class Type { def freeParameters: Set[TypeVariable] def alpha(subst: Map[TypeVariable, Type]): Type //syntactic sugar def ~>(that: Type): Function = this match { case Function(args, ret) => Function(args ::: List(ret), that) case other => Function(List(this), that) } } object Type { var cnt = 0 //TODO synchronise def freshTypeVar = { cnt += 1 TypeVariable("_" + cnt) } def freshParams(tpe: Type): (Map[TypeVariable,TypeVariable], Type) = { var oldParams = tpe.freeParameters var subst: Map[TypeVariable,TypeVariable] = (for (t <- oldParams.toSeq) yield (t, freshTypeVar)).toMap (subst, tpe alpha subst) } /** Are there some object which are in the intersection of the two types ? * This is not exact but a good first approximation. * To make thing better we should keep some information about the subtyping of ClassTypes. */ def nonEmptyIntersection(tp1: Type, tp2: Type): Boolean = (tp1, tp2) match { case (Bool, Bool) | (Int, Int) | (String, String) | (Wildcard, _) | (_, Wildcard) | (_, TypeVariable(_)) | (TypeVariable(_), _) => true case (Product(lst1), Product(lst2)) => (lst1 zip lst2) forall { case (t1, t2) => nonEmptyIntersection(t1, t2) } case (Function(a1, r1), Function(a2, r2)) => nonEmptyIntersection(r1,r2) && ((a1 zip a2) forall { case (t1, t2) => nonEmptyIntersection(t1, t2) }) case (FiniteValues(lst1), FiniteValues(lst2)) => lst1 exists (lst2 contains _) case (UnInterpreted(n1), UnInterpreted(n2)) => n1 == n2 case (c1 @ ClassType(n1, a1), c2 @ ClassType(n2, a2)) => c1.isActor == c2.isActor && c1.isCollection == c2.isCollection && c1.isCase == c2.isCase && c1.isModule == c2.isModule && ((a1 zip a2) forall { case (t1, t2) => nonEmptyIntersection(t1, t2) }) case (_, _) => false } } case object Bool extends Type { override def toString = "Bool" def freeParameters = Set[TypeVariable]() def alpha(subst: Map[TypeVariable, Type]) = this } case object Int extends Type { override def toString = "Int" def freeParameters = Set[TypeVariable]() def alpha(subst: Map[TypeVariable, Type]) = this } case object String extends Type { override def toString = "String" def freeParameters = Set[TypeVariable]() def alpha(subst: Map[TypeVariable, Type]) = this } case object Wildcard extends Type { override def toString = "_" def freeParameters = Set[TypeVariable]() def alpha(subst: Map[TypeVariable, Type]) = this } case class Product(cmpts: List[Type]) extends Type { override def toString = cmpts.mkString("","*","") def freeParameters = (Set[TypeVariable]() /: cmpts)(_ ++ _.freeParameters) def alpha(subst: Map[TypeVariable, Type]) = Product(cmpts.map(_.alpha(subst))) } case class Function(args: List[Type], returns: Type) extends Type { override def toString = args.mkString("(","->","->") + returns + ")" def freeParameters = (returns.freeParameters /: args)(_ ++ _.freeParameters) def alpha(subst: Map[TypeVariable, Type]) = Function(args.map(_.alpha(subst)), returns.alpha(subst)) } case class FiniteValues[T](values: List[T]) extends Type { override def toString = values.mkString("{",",","}") def freeParameters = Set[TypeVariable]() def alpha(subst: Map[TypeVariable, Type]) = this } case class UnInterpreted(id: String) extends Type { override def toString = id def freeParameters = Set[TypeVariable]() def alpha(subst: Map[TypeVariable, Type]) = this } case class TypeVariable(name: String) extends Type { override def toString = "'"+name def freeParameters = Set[TypeVariable](this) def alpha(subst: Map[TypeVariable, Type]) = subst.getOrElse(this, this) } case class ClassType( name: String, tparams: List[Type]) extends Type { override def toString = name + (if (tparams.isEmpty) "" else tparams.mkString("[",",","]")) def freeParameters = (Set[TypeVariable]() /: tparams.map(_.freeParameters))(_ ++ _) def alpha(subst: Map[TypeVariable, Type]) = ClassType(name, tparams.map(_.alpha(subst))).copyAttr(this) //a series of flags that gives additional info var isActor = false var isCollection = false var isCase = false var isModule = false //unique global reference def copyAttr(from: ClassType): this.type = { isActor = from.isActor isCollection = from.isCollection isCase = from.isCase isModule = from.isModule this } } //TODO copier for Type //TODO accessor for tuples //TODO Nothing types ? object UnitT { private val instance = FiniteValues(List( () )) def apply(): FiniteValues[Unit] = instance def unapply(tpe: FiniteValues[Unit]) = tpe match { case FiniteValues(List( () )) => true case _ => false } } object Collection { def apply(name: String, t: Type): Type = { val ct = ClassType(name, t :: Nil) ct.isCollection = true ct } def unapply(tpe: ClassType): Option[(String, Type)] = tpe match { case ct @ ClassType(name, t :: Nil) if ct.isCollection => Some((name, t)) case _ => None } } object ActorType { def apply(name: String, lst: List[Type]): Type = { val ct = ClassType(name, lst) ct.isActor = true ct } def unapply(tpe: ClassType): Option[(String, List[Type])] = tpe match { case ct @ ClassType(a, b) if ct.isActor => Some((a,b)) case _ => None } } object CaseType { def apply(name: String, lst: List[Type]): Type = { val ct = ClassType(name, lst) ct.isCase = true ct } def unapply(tpe: ClassType): Option[(String, List[Type])] = tpe match { case ct @ ClassType(a, b) if ct.isCase => Some((a,b)) case _ => None } } /** Channel/name in the pi-calculus sense */ object Channel { //not really uninterpreted, but actually pretty much interpreted def apply() = UnInterpreted("name") def unapply(tpe: UnInterpreted): Boolean = tpe match { case UnInterpreted("name") => true case _ => false } }
dzufferey/picasso
core/src/main/scala/picasso/math/hol/Types.scala
Scala
bsd-2-clause
5,962
/* * Copyright (c) 2012-2013 SnowPlow Analytics Ltd. All rights reserved. * * This program is licensed to you under the Apache License Version 2.0, * and you may not use this file except in compliance with the Apache License Version 2.0. * You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, * software distributed under the Apache License Version 2.0 is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. */ package com.snowplowanalytics.snowplow.enrich package hadoop package good // Scala import scala.collection.mutable.Buffer // Specs2 import org.specs2.mutable.Specification // Scalding import com.twitter.scalding._ // Cascading import cascading.tuple.TupleEntry // This project import JobSpecHelpers._ /** * Holds the input and expected data * for the test. */ object UnstructEventCfLineSpec { val lines = Lines( "2012-05-27 11:35:53 DFW3 3343 70.46.123.145 GET d3gs014xn8p70.cloudfront.net /ice.png 200 http://www.psychicbazaar.com/oracles/119-psycards-book-and-deck-starter-pack.html?view=print#detail Mozilla/5.0%20(Windows%20NT%206.1;%20WOW64;%20rv:12.0)%20Gecko/20100101%20Firefox/12.0 &e=ue&ue_px=eyJzY2hlbWEiOiJpZ2x1OmNvbS5zbm93cGxvd2FuYWx5dGljcy5zbm93cGxvdy91bnN0cnVjdF9ldmVudC9qc29uc2NoZW1hLzEtMC0wIiwiZGF0YSI6eyJzY2hlbWEiOiJpZ2x1OmNvbS5hY21lL3ZpZXdlZF9wcm9kdWN0L2pzb25zY2hlbWEvMi0wLTAiLCJkYXRhIjp7ImFnZSI6MjMsIm5hbWUiOiJKb2huIn19fQ==&dtm=1364230969450&evn=com.acme&tid=598951&vp=2560x934&ds=2543x1420&vid=43&duid=9795bd0203804cd1&p=web&tv=js-0.11.1&fp=2876815413&aid=pbzsite&lang=en-GB&cs=UTF-8&tz=Europe%2FLondon&refr=http%3A%2F%2Fwww.psychicbazaar.com%2F&f_pdf=1&f_qt=0&f_realp=0&f_wma=0&f_dir=0&f_fla=1&f_java=1&f_gears=0&f_ag=1&res=2560x1440&cd=32&cookie=1&url=http%3A%2F%2Fwww.psychicbazaar.com%2F2-tarot-cards" ) val expected = List( "pbzsite", "web", EtlTimestamp, "2012-05-27 11:35:53.000", "2013-03-25 17:02:49.450", "unstruct", null, // We can't predict the event_id "598951", null, // No tracker namespace "js-0.11.1", "cloudfront", EtlVersion, null, // No user_id set "70.46.123.145", "2876815413", "9795bd0203804cd1", "43", null, // No network_userid set "US", // US geolocation "FL", "Delray Beach", null, "26.461502", "-80.0728", "Florida", null, null, "nuvox.net", // Using the MaxMind domain lookup service null, "http://www.psychicbazaar.com/2-tarot-cards", null, // No page title for events "http://www.psychicbazaar.com/", "http", "www.psychicbazaar.com", "80", "/2-tarot-cards", null, null, "http", "www.psychicbazaar.com", "80", "/", null, null, "internal", // Internal referer null, null, null, // No marketing campaign info null, // null, // null, // null, // null, // No custom contexts null, // Structured event fields empty null, // null, // null, // null, // """{"schema":"iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0","data":{"schema":"iglu:com.acme/viewed_product/jsonschema/2-0-0","data":{"age":23,"name":"John"}}}""", // Unstructured event field set null, // Transaction fields empty null, // null, // null, // null, // null, // null, // null, // null, // Transaction item fields empty null, // null, // null, // null, // null, // null, // Page ping fields are empty null, // null, // null, // "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0", "Firefox 12", "Firefox", "12.0", "Browser", "GECKO", "en-GB", "1", "1", "1", "0", "0", "0", "0", "0", "1", "1", "32", "2560", "934", "Windows 7", "Windows", "Microsoft Corporation", "Europe/London", "Computer", "0", "2560", "1440", "UTF-8", "2543", "1420" ) } /** * Integration test for the EtlJob: * * Check that all tuples in a custom unstructured event * (CloudFront format) are successfully extracted. */ class UnstructEventCfLineSpec extends Specification { "A job which processes a CloudFront file containing 1 valid custom unstructured event" should { EtlJobSpec("cloudfront", "1", false, List("geo", "domain")). source(MultipleTextLineFiles("inputFolder"), UnstructEventCfLineSpec.lines). sink[TupleEntry](Tsv("outputFolder")){ buf : Buffer[TupleEntry] => "correctly output 1 custom unstructured event" in { buf.size must_== 1 val actual = buf.head for (idx <- UnstructEventCfLineSpec.expected.indices) { actual.getString(idx) must beFieldEqualTo(UnstructEventCfLineSpec.expected(idx), withIndex = idx) } } }. sink[TupleEntry](Tsv("exceptionsFolder")){ trap => "not trap any exceptions" in { trap must beEmpty } }. sink[String](Tsv("badFolder")){ error => "not write any bad rows" in { error must beEmpty } }. run. finish } }
guardian/snowplow
3-enrich/scala-hadoop-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.hadoop/good/UnstructEventCfLineSpec.scala
Scala
apache-2.0
5,428
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.server import java.util.{Collections, Properties} import kafka.admin.{AdminOperationException, AdminUtils} import kafka.common.TopicAlreadyMarkedForDeletionException import kafka.log.LogConfig import kafka.utils.Log4jController import kafka.metrics.KafkaMetricsGroup import kafka.utils._ import kafka.zk.{AdminZkClient, KafkaZkClient} import org.apache.kafka.clients.admin.AlterConfigOp import org.apache.kafka.clients.admin.AlterConfigOp.OpType import org.apache.kafka.common.config.ConfigDef.ConfigKey import org.apache.kafka.common.config.{AbstractConfig, ConfigDef, ConfigException, ConfigResource, LogLevelConfig} import org.apache.kafka.common.errors.{ApiException, InvalidConfigurationException, InvalidPartitionsException, InvalidReplicaAssignmentException, InvalidRequestException, ReassignmentInProgressException, TopicExistsException, UnknownTopicOrPartitionException} import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic import org.apache.kafka.common.message.CreateTopicsResponseData.{CreatableTopicConfigs, CreatableTopicResult} import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.CreatePartitionsRequest.PartitionDetails import org.apache.kafka.common.requests.CreateTopicsRequest._ import org.apache.kafka.common.requests.DescribeConfigsResponse.ConfigSource import org.apache.kafka.common.requests.{AlterConfigsRequest, ApiError, DescribeConfigsResponse} import org.apache.kafka.server.policy.{AlterConfigPolicy, CreateTopicPolicy} import org.apache.kafka.server.policy.CreateTopicPolicy.RequestMetadata import scala.collection.{Map, mutable, _} import scala.collection.JavaConverters._ class AdminManager(val config: KafkaConfig, val metrics: Metrics, val metadataCache: MetadataCache, val zkClient: KafkaZkClient) extends Logging with KafkaMetricsGroup { this.logIdent = "[Admin Manager on Broker " + config.brokerId + "]: " private val topicPurgatory = DelayedOperationPurgatory[DelayedOperation]("topic", config.brokerId) private val adminZkClient = new AdminZkClient(zkClient) private val createTopicPolicy = Option(config.getConfiguredInstance(KafkaConfig.CreateTopicPolicyClassNameProp, classOf[CreateTopicPolicy])) private val alterConfigPolicy = Option(config.getConfiguredInstance(KafkaConfig.AlterConfigPolicyClassNameProp, classOf[AlterConfigPolicy])) def hasDelayedTopicOperations = topicPurgatory.numDelayed != 0 private val defaultNumPartitions = config.numPartitions.intValue() private val defaultReplicationFactor = config.defaultReplicationFactor.shortValue() /** * Try to complete delayed topic operations with the request key */ def tryCompleteDelayedTopicOperations(topic: String): Unit = { val key = TopicKey(topic) val completed = topicPurgatory.checkAndComplete(key) debug(s"Request key ${key.keyLabel} unblocked $completed topic requests.") } /** * Create topics and wait until the topics have been completely created. * The callback function will be triggered either when timeout, error or the topics are created. */ def createTopics(timeout: Int, validateOnly: Boolean, toCreate: Map[String, CreatableTopic], includeConfigsAndMetatadata: Map[String, CreatableTopicResult], responseCallback: Map[String, ApiError] => Unit): Unit = { // 1. map over topics creating assignment and calling zookeeper val brokers = metadataCache.getAliveBrokers.map { b => kafka.admin.BrokerMetadata(b.id, b.rack) } val metadata = toCreate.values.map(topic => try { if (metadataCache.contains(topic.name)) throw new TopicExistsException(s"Topic '${topic.name}' already exists.") val configs = new Properties() topic.configs.asScala.foreach { entry => configs.setProperty(entry.name, entry.value) } LogConfig.validate(configs) if ((topic.numPartitions != NO_NUM_PARTITIONS || topic.replicationFactor != NO_REPLICATION_FACTOR) && !topic.assignments().isEmpty) { throw new InvalidRequestException("Both numPartitions or replicationFactor and replicasAssignments were set. " + "Both cannot be used at the same time.") } val resolvedNumPartitions = if (topic.numPartitions == NO_NUM_PARTITIONS) defaultNumPartitions else topic.numPartitions val resolvedReplicationFactor = if (topic.replicationFactor == NO_REPLICATION_FACTOR) defaultReplicationFactor else topic.replicationFactor val assignments = if (topic.assignments().isEmpty) { AdminUtils.assignReplicasToBrokers( brokers, resolvedNumPartitions, resolvedReplicationFactor) } else { val assignments = new mutable.HashMap[Int, Seq[Int]] // Note: we don't check that replicaAssignment contains unknown brokers - unlike in add-partitions case, // this follows the existing logic in TopicCommand topic.assignments.asScala.foreach { case assignment => assignments(assignment.partitionIndex()) = assignment.brokerIds().asScala.map(a => a: Int) } assignments } trace(s"Assignments for topic $topic are $assignments ") createTopicPolicy match { case Some(policy) => adminZkClient.validateTopicCreate(topic.name(), assignments, configs) // Use `null` for unset fields in the public API val numPartitions: java.lang.Integer = if (topic.assignments().isEmpty) resolvedNumPartitions else null val replicationFactor: java.lang.Short = if (topic.assignments().isEmpty) resolvedReplicationFactor else null val javaAssignments = if (topic.assignments().isEmpty) { null } else { assignments.map { case (k, v) => (k: java.lang.Integer) -> v.map(i => i: java.lang.Integer).asJava }.asJava } val javaConfigs = new java.util.HashMap[String, String] topic.configs().asScala.foreach(config => javaConfigs.put(config.name(), config.value())) policy.validate(new RequestMetadata(topic.name, numPartitions, replicationFactor, javaAssignments, javaConfigs)) if (!validateOnly) adminZkClient.createTopicWithAssignment(topic.name, configs, assignments) case None => if (validateOnly) adminZkClient.validateTopicCreate(topic.name, assignments, configs) else adminZkClient.createTopicWithAssignment(topic.name, configs, assignments) } // For responses with DescribeConfigs permission, populate metadata and configs includeConfigsAndMetatadata.get(topic.name).foreach { result => val logConfig = LogConfig.fromProps(KafkaServer.copyKafkaConfigToLog(config), configs) val createEntry = createTopicConfigEntry(logConfig, configs, includeSynonyms = false)(_, _) val topicConfigs = logConfig.values.asScala.map { case (k, v) => val entry = createEntry(k, v) val source = ConfigSource.values.indices.map(_.toByte) .find(i => ConfigSource.forId(i.toByte) == entry.source) .getOrElse(0.toByte) new CreatableTopicConfigs() .setName(k) .setValue(entry.value) .setIsSensitive(entry.isSensitive) .setReadOnly(entry.isReadOnly) .setConfigSource(source) }.toList.asJava result.setConfigs(topicConfigs) result.setNumPartitions(assignments.size) result.setReplicationFactor(assignments(0).size.toShort) } CreatePartitionsMetadata(topic.name, assignments, ApiError.NONE) } catch { // Log client errors at a lower level than unexpected exceptions case e: TopicExistsException => debug(s"Topic creation failed since topic '${topic.name}' already exists.", e) CreatePartitionsMetadata(topic.name, Map(), ApiError.fromThrowable(e)) case e: ApiException => info(s"Error processing create topic request $topic", e) CreatePartitionsMetadata(topic.name, Map(), ApiError.fromThrowable(e)) case e: ConfigException => info(s"Error processing create topic request $topic", e) CreatePartitionsMetadata(topic.name, Map(), ApiError.fromThrowable(new InvalidConfigurationException(e.getMessage, e.getCause))) case e: Throwable => error(s"Error processing create topic request $topic", e) CreatePartitionsMetadata(topic.name, Map(), ApiError.fromThrowable(e)) }).toBuffer // 2. if timeout <= 0, validateOnly or no topics can proceed return immediately if (timeout <= 0 || validateOnly || !metadata.exists(_.error.is(Errors.NONE))) { val results = metadata.map { createTopicMetadata => // ignore topics that already have errors if (createTopicMetadata.error.isSuccess() && !validateOnly) { (createTopicMetadata.topic, new ApiError(Errors.REQUEST_TIMED_OUT, null)) } else { (createTopicMetadata.topic, createTopicMetadata.error) } }.toMap responseCallback(results) } else { // 3. else pass the assignments and errors to the delayed operation and set the keys val delayedCreate = new DelayedCreatePartitions(timeout, metadata, this, responseCallback) val delayedCreateKeys = toCreate.values.map(topic => new TopicKey(topic.name)).toBuffer // try to complete the request immediately, otherwise put it into the purgatory topicPurgatory.tryCompleteElseWatch(delayedCreate, delayedCreateKeys) } } /** * Delete topics and wait until the topics have been completely deleted. * The callback function will be triggered either when timeout, error or the topics are deleted. */ def deleteTopics(timeout: Int, topics: Set[String], responseCallback: Map[String, Errors] => Unit): Unit = { // 1. map over topics calling the asynchronous delete val metadata = topics.map { topic => try { adminZkClient.deleteTopic(topic) DeleteTopicMetadata(topic, Errors.NONE) } catch { case _: TopicAlreadyMarkedForDeletionException => // swallow the exception, and still track deletion allowing multiple calls to wait for deletion DeleteTopicMetadata(topic, Errors.NONE) case e: Throwable => error(s"Error processing delete topic request for topic $topic", e) DeleteTopicMetadata(topic, Errors.forException(e)) } } // 2. if timeout <= 0 or no topics can proceed return immediately if (timeout <= 0 || !metadata.exists(_.error == Errors.NONE)) { val results = metadata.map { deleteTopicMetadata => // ignore topics that already have errors if (deleteTopicMetadata.error == Errors.NONE) { (deleteTopicMetadata.topic, Errors.REQUEST_TIMED_OUT) } else { (deleteTopicMetadata.topic, deleteTopicMetadata.error) } }.toMap responseCallback(results) } else { // 3. else pass the topics and errors to the delayed operation and set the keys val delayedDelete = new DelayedDeleteTopics(timeout, metadata.toSeq, this, responseCallback) val delayedDeleteKeys = topics.map(new TopicKey(_)).toSeq // try to complete the request immediately, otherwise put it into the purgatory topicPurgatory.tryCompleteElseWatch(delayedDelete, delayedDeleteKeys) } } def createPartitions(timeout: Int, newPartitions: Map[String, PartitionDetails], validateOnly: Boolean, listenerName: ListenerName, callback: Map[String, ApiError] => Unit): Unit = { val allBrokers = adminZkClient.getBrokerMetadatas() val allBrokerIds = allBrokers.map(_.id) // 1. map over topics creating assignment and calling AdminUtils val metadata = newPartitions.map { case (topic, newPartition) => try { val existingAssignment = zkClient.getFullReplicaAssignmentForTopics(immutable.Set(topic)).map { case (topicPartition, assignment) => if (assignment.isBeingReassigned) { // We prevent adding partitions while topic reassignment is in progress, to protect from a race condition // between the controller thread processing reassignment update and createPartitions(this) request. throw new ReassignmentInProgressException(s"A partition reassignment is in progress for the topic '$topic'.") } topicPartition.partition -> assignment } if (existingAssignment.isEmpty) throw new UnknownTopicOrPartitionException(s"The topic '$topic' does not exist.") val oldNumPartitions = existingAssignment.size val newNumPartitions = newPartition.totalCount val numPartitionsIncrement = newNumPartitions - oldNumPartitions if (numPartitionsIncrement < 0) { throw new InvalidPartitionsException( s"Topic currently has $oldNumPartitions partitions, which is higher than the requested $newNumPartitions.") } else if (numPartitionsIncrement == 0) { throw new InvalidPartitionsException(s"Topic already has $oldNumPartitions partitions.") } val newPartitionsAssignment = Option(newPartition.newAssignments).map(_.asScala.map(_.asScala.map(_.toInt))).map { assignments => val unknownBrokers = assignments.flatten.toSet -- allBrokerIds if (unknownBrokers.nonEmpty) throw new InvalidReplicaAssignmentException( s"Unknown broker(s) in replica assignment: ${unknownBrokers.mkString(", ")}.") if (assignments.size != numPartitionsIncrement) throw new InvalidReplicaAssignmentException( s"Increasing the number of partitions by $numPartitionsIncrement " + s"but ${assignments.size} assignments provided.") assignments.zipWithIndex.map { case (replicas, index) => existingAssignment.size + index -> replicas }.toMap } val updatedReplicaAssignment = adminZkClient.addPartitions(topic, existingAssignment, allBrokers, newPartition.totalCount, newPartitionsAssignment, validateOnly = validateOnly) CreatePartitionsMetadata(topic, updatedReplicaAssignment, ApiError.NONE) } catch { case e: AdminOperationException => CreatePartitionsMetadata(topic, Map.empty, ApiError.fromThrowable(e)) case e: ApiException => CreatePartitionsMetadata(topic, Map.empty, ApiError.fromThrowable(e)) } } // 2. if timeout <= 0, validateOnly or no topics can proceed return immediately if (timeout <= 0 || validateOnly || !metadata.exists(_.error.is(Errors.NONE))) { val results = metadata.map { createPartitionMetadata => // ignore topics that already have errors if (createPartitionMetadata.error.isSuccess() && !validateOnly) { (createPartitionMetadata.topic, new ApiError(Errors.REQUEST_TIMED_OUT, null)) } else { (createPartitionMetadata.topic, createPartitionMetadata.error) } }.toMap callback(results) } else { // 3. else pass the assignments and errors to the delayed operation and set the keys val delayedCreate = new DelayedCreatePartitions(timeout, metadata.toSeq, this, callback) val delayedCreateKeys = newPartitions.keySet.map(new TopicKey(_)).toSeq // try to complete the request immediately, otherwise put it into the purgatory topicPurgatory.tryCompleteElseWatch(delayedCreate, delayedCreateKeys) } } def describeConfigs(resourceToConfigNames: Map[ConfigResource, Option[Set[String]]], includeSynonyms: Boolean): Map[ConfigResource, DescribeConfigsResponse.Config] = { resourceToConfigNames.map { case (resource, configNames) => def allConfigs(config: AbstractConfig) = { config.originals.asScala.filter(_._2 != null) ++ config.values.asScala } def createResponseConfig(configs: Map[String, Any], createConfigEntry: (String, Any) => DescribeConfigsResponse.ConfigEntry): DescribeConfigsResponse.Config = { val filteredConfigPairs = configs.filter { case (configName, _) => /* Always returns true if configNames is None */ configNames.forall(_.contains(configName)) }.toBuffer val configEntries = filteredConfigPairs.map { case (name, value) => createConfigEntry(name, value) } new DescribeConfigsResponse.Config(ApiError.NONE, configEntries.asJava) } try { val resourceConfig = resource.`type` match { case ConfigResource.Type.TOPIC => val topic = resource.name Topic.validate(topic) if (metadataCache.contains(topic)) { // Consider optimizing this by caching the configs or retrieving them from the `Log` when possible val topicProps = adminZkClient.fetchEntityConfig(ConfigType.Topic, topic) val logConfig = LogConfig.fromProps(KafkaServer.copyKafkaConfigToLog(config), topicProps) createResponseConfig(allConfigs(logConfig), createTopicConfigEntry(logConfig, topicProps, includeSynonyms)) } else { new DescribeConfigsResponse.Config(new ApiError(Errors.UNKNOWN_TOPIC_OR_PARTITION, null), Collections.emptyList[DescribeConfigsResponse.ConfigEntry]) } case ConfigResource.Type.BROKER => if (resource.name == null || resource.name.isEmpty) createResponseConfig(config.dynamicConfig.currentDynamicDefaultConfigs, createBrokerConfigEntry(perBrokerConfig = false, includeSynonyms)) else if (resourceNameToBrokerId(resource.name) == config.brokerId) createResponseConfig(allConfigs(config), createBrokerConfigEntry(perBrokerConfig = true, includeSynonyms)) else throw new InvalidRequestException(s"Unexpected broker id, expected ${config.brokerId} or empty string, but received ${resource.name}") case ConfigResource.Type.BROKER_LOGGER => if (resource.name == null || resource.name.isEmpty) throw new InvalidRequestException("Broker id must not be empty") else if (resourceNameToBrokerId(resource.name) != config.brokerId) throw new InvalidRequestException(s"Unexpected broker id, expected ${config.brokerId} but received ${resource.name}") else createResponseConfig(Log4jController.loggers, (name, value) => new DescribeConfigsResponse.ConfigEntry(name, value.toString, ConfigSource.DYNAMIC_BROKER_LOGGER_CONFIG, false, false, List.empty.asJava)) case resourceType => throw new InvalidRequestException(s"Unsupported resource type: $resourceType") } resource -> resourceConfig } catch { case e: Throwable => // Log client errors at a lower level than unexpected exceptions val message = s"Error processing describe configs request for resource $resource" if (e.isInstanceOf[ApiException]) info(message, e) else error(message, e) resource -> new DescribeConfigsResponse.Config(ApiError.fromThrowable(e), Collections.emptyList[DescribeConfigsResponse.ConfigEntry]) } }.toMap } def alterConfigs(configs: Map[ConfigResource, AlterConfigsRequest.Config], validateOnly: Boolean): Map[ConfigResource, ApiError] = { configs.map { case (resource, config) => try { val configEntriesMap = config.entries.asScala.map(entry => (entry.name, entry.value)).toMap val configProps = new Properties config.entries.asScala.foreach { configEntry => configProps.setProperty(configEntry.name, configEntry.value) } resource.`type` match { case ConfigResource.Type.TOPIC => alterTopicConfigs(resource, validateOnly, configProps, configEntriesMap) case ConfigResource.Type.BROKER => alterBrokerConfigs(resource, validateOnly, configProps, configEntriesMap) case resourceType => throw new InvalidRequestException(s"AlterConfigs is only supported for topics and brokers, but resource type is $resourceType") } } catch { case e @ (_: ConfigException | _: IllegalArgumentException) => val message = s"Invalid config value for resource $resource: ${e.getMessage}" info(message) resource -> ApiError.fromThrowable(new InvalidRequestException(message, e)) case e: Throwable => // Log client errors at a lower level than unexpected exceptions val message = s"Error processing alter configs request for resource $resource, config $config" if (e.isInstanceOf[ApiException]) info(message, e) else error(message, e) resource -> ApiError.fromThrowable(e) } }.toMap } private def alterTopicConfigs(resource: ConfigResource, validateOnly: Boolean, configProps: Properties, configEntriesMap: Map[String, String]): (ConfigResource, ApiError) = { val topic = resource.name adminZkClient.validateTopicConfig(topic, configProps) validateConfigPolicy(resource, configEntriesMap) if (!validateOnly) { info(s"Updating topic $topic with new configuration $config") adminZkClient.changeTopicConfig(topic, configProps) } resource -> ApiError.NONE } private def alterBrokerConfigs(resource: ConfigResource, validateOnly: Boolean, configProps: Properties, configEntriesMap: Map[String, String]): (ConfigResource, ApiError) = { val brokerId = getBrokerId(resource) val perBrokerConfig = brokerId.nonEmpty this.config.dynamicConfig.validate(configProps, perBrokerConfig) validateConfigPolicy(resource, configEntriesMap) if (!validateOnly) { if (perBrokerConfig) this.config.dynamicConfig.reloadUpdatedFilesWithoutConfigChange(configProps) adminZkClient.changeBrokerConfig(brokerId, this.config.dynamicConfig.toPersistentProps(configProps, perBrokerConfig)) } resource -> ApiError.NONE } private def alterLogLevelConfigs(alterConfigOps: List[AlterConfigOp]): Unit = { alterConfigOps.foreach { alterConfigOp => val loggerName = alterConfigOp.configEntry().name() val logLevel = alterConfigOp.configEntry().value() alterConfigOp.opType() match { case OpType.SET => Log4jController.logLevel(loggerName, logLevel) case OpType.DELETE => Log4jController.unsetLogLevel(loggerName) } } } private def getBrokerId(resource: ConfigResource) = { if (resource.name == null || resource.name.isEmpty) None else { val id = resourceNameToBrokerId(resource.name) if (id != this.config.brokerId) throw new InvalidRequestException(s"Unexpected broker id, expected ${this.config.brokerId}, but received ${resource.name}") Some(id) } } private def validateConfigPolicy(resource: ConfigResource, configEntriesMap: Map[String, String]): Unit = { alterConfigPolicy match { case Some(policy) => policy.validate(new AlterConfigPolicy.RequestMetadata( new ConfigResource(resource.`type`(), resource.name), configEntriesMap.asJava)) case None => } } def incrementalAlterConfigs(configs: Map[ConfigResource, List[AlterConfigOp]], validateOnly: Boolean): Map[ConfigResource, ApiError] = { configs.map { case (resource, alterConfigOps) => try { // throw InvalidRequestException if any duplicate keys val duplicateKeys = alterConfigOps.groupBy(config => config.configEntry().name()) .mapValues(_.size).filter(_._2 > 1).keys.toSet if (duplicateKeys.nonEmpty) throw new InvalidRequestException(s"Error due to duplicate config keys : ${duplicateKeys.mkString(",")}") val configEntriesMap = alterConfigOps.map(entry => (entry.configEntry().name(), entry.configEntry().value())).toMap resource.`type` match { case ConfigResource.Type.TOPIC => val configProps = adminZkClient.fetchEntityConfig(ConfigType.Topic, resource.name) prepareIncrementalConfigs(alterConfigOps, configProps, LogConfig.configKeys) alterTopicConfigs(resource, validateOnly, configProps, configEntriesMap) case ConfigResource.Type.BROKER => val brokerId = getBrokerId(resource) val perBrokerConfig = brokerId.nonEmpty val persistentProps = if (perBrokerConfig) adminZkClient.fetchEntityConfig(ConfigType.Broker, brokerId.get.toString) else adminZkClient.fetchEntityConfig(ConfigType.Broker, ConfigEntityName.Default) val configProps = this.config.dynamicConfig.fromPersistentProps(persistentProps, perBrokerConfig) prepareIncrementalConfigs(alterConfigOps, configProps, KafkaConfig.configKeys) alterBrokerConfigs(resource, validateOnly, configProps, configEntriesMap) case ConfigResource.Type.BROKER_LOGGER => getBrokerId(resource) validateLogLevelConfigs(alterConfigOps) if (!validateOnly) alterLogLevelConfigs(alterConfigOps) resource -> ApiError.NONE case resourceType => throw new InvalidRequestException(s"AlterConfigs is only supported for topics and brokers, but resource type is $resourceType") } } catch { case e @ (_: ConfigException | _: IllegalArgumentException) => val message = s"Invalid config value for resource $resource: ${e.getMessage}" info(message) resource -> ApiError.fromThrowable(new InvalidRequestException(message, e)) case e: Throwable => // Log client errors at a lower level than unexpected exceptions val message = s"Error processing alter configs request for resource $resource, config $alterConfigOps" if (e.isInstanceOf[ApiException]) info(message, e) else error(message, e) resource -> ApiError.fromThrowable(e) } }.toMap } private def validateLogLevelConfigs(alterConfigOps: List[AlterConfigOp]): Unit = { def validateLoggerNameExists(loggerName: String): Unit = { if (!Log4jController.loggerExists(loggerName)) throw new ConfigException(s"Logger $loggerName does not exist!") } alterConfigOps.foreach { alterConfigOp => val loggerName = alterConfigOp.configEntry().name() alterConfigOp.opType() match { case OpType.SET => validateLoggerNameExists(loggerName) val logLevel = alterConfigOp.configEntry().value() if (!LogLevelConfig.VALID_LOG_LEVELS.contains(logLevel)) { val validLevelsStr = LogLevelConfig.VALID_LOG_LEVELS.asScala.mkString(", ") throw new ConfigException( s"Cannot set the log level of $loggerName to $logLevel as it is not a supported log level. " + s"Valid log levels are $validLevelsStr" ) } case OpType.DELETE => validateLoggerNameExists(loggerName) if (loggerName == Log4jController.ROOT_LOGGER) throw new InvalidRequestException(s"Removing the log level of the ${Log4jController.ROOT_LOGGER} logger is not allowed") case OpType.APPEND => throw new InvalidRequestException(s"${OpType.APPEND} operation is not allowed for the ${ConfigResource.Type.BROKER_LOGGER} resource") case OpType.SUBTRACT => throw new InvalidRequestException(s"${OpType.SUBTRACT} operation is not allowed for the ${ConfigResource.Type.BROKER_LOGGER} resource") } } } private def prepareIncrementalConfigs(alterConfigOps: List[AlterConfigOp], configProps: Properties, configKeys: Map[String, ConfigKey]): Unit = { def listType(configName: String, configKeys: Map[String, ConfigKey]): Boolean = { val configKey = configKeys(configName) if (configKey == null) throw new InvalidConfigurationException(s"Unknown topic config name: $configName") configKey.`type` == ConfigDef.Type.LIST } alterConfigOps.foreach { alterConfigOp => alterConfigOp.opType() match { case OpType.SET => configProps.setProperty(alterConfigOp.configEntry().name(), alterConfigOp.configEntry().value()) case OpType.DELETE => configProps.remove(alterConfigOp.configEntry().name()) case OpType.APPEND => { if (!listType(alterConfigOp.configEntry().name(), configKeys)) throw new InvalidRequestException(s"Config value append is not allowed for config key: ${alterConfigOp.configEntry().name()}") val oldValueList = configProps.getProperty(alterConfigOp.configEntry().name()).split(",").toList val newValueList = oldValueList ::: alterConfigOp.configEntry().value().split(",").toList configProps.setProperty(alterConfigOp.configEntry().name(), newValueList.mkString(",")) } case OpType.SUBTRACT => { if (!listType(alterConfigOp.configEntry().name(), configKeys)) throw new InvalidRequestException(s"Config value subtract is not allowed for config key: ${alterConfigOp.configEntry().name()}") val oldValueList = configProps.getProperty(alterConfigOp.configEntry().name()).split(",").toList val newValueList = oldValueList.diff(alterConfigOp.configEntry().value().split(",").toList) configProps.setProperty(alterConfigOp.configEntry().name(), newValueList.mkString(",")) } } } } def shutdown(): Unit = { topicPurgatory.shutdown() CoreUtils.swallow(createTopicPolicy.foreach(_.close()), this) CoreUtils.swallow(alterConfigPolicy.foreach(_.close()), this) } private def resourceNameToBrokerId(resourceName: String): Int = { try resourceName.toInt catch { case _: NumberFormatException => throw new InvalidRequestException(s"Broker id must be an integer, but it is: $resourceName") } } private def brokerSynonyms(name: String): List[String] = { DynamicBrokerConfig.brokerConfigSynonyms(name, matchListenerOverride = true) } private def configType(name: String, synonyms: List[String]): ConfigDef.Type = { val configType = config.typeOf(name) if (configType != null) configType else synonyms.iterator.map(config.typeOf).find(_ != null).orNull } private def configSynonyms(name: String, synonyms: List[String], isSensitive: Boolean): List[DescribeConfigsResponse.ConfigSynonym] = { val dynamicConfig = config.dynamicConfig val allSynonyms = mutable.Buffer[DescribeConfigsResponse.ConfigSynonym]() def maybeAddSynonym(map: Map[String, String], source: ConfigSource)(name: String): Unit = { map.get(name).map { value => val configValue = if (isSensitive) null else value allSynonyms += new DescribeConfigsResponse.ConfigSynonym(name, configValue, source) } } synonyms.foreach(maybeAddSynonym(dynamicConfig.currentDynamicBrokerConfigs, ConfigSource.DYNAMIC_BROKER_CONFIG)) synonyms.foreach(maybeAddSynonym(dynamicConfig.currentDynamicDefaultConfigs, ConfigSource.DYNAMIC_DEFAULT_BROKER_CONFIG)) synonyms.foreach(maybeAddSynonym(dynamicConfig.staticBrokerConfigs, ConfigSource.STATIC_BROKER_CONFIG)) synonyms.foreach(maybeAddSynonym(dynamicConfig.staticDefaultConfigs, ConfigSource.DEFAULT_CONFIG)) allSynonyms.dropWhile(s => s.name != name).toList // e.g. drop listener overrides when describing base config } private def createTopicConfigEntry(logConfig: LogConfig, topicProps: Properties, includeSynonyms: Boolean) (name: String, value: Any): DescribeConfigsResponse.ConfigEntry = { val configEntryType = logConfig.typeOf(name) val isSensitive = configEntryType == ConfigDef.Type.PASSWORD val valueAsString = if (isSensitive) null else ConfigDef.convertToString(value, configEntryType) val allSynonyms = { val list = LogConfig.TopicConfigSynonyms.get(name) .map(s => configSynonyms(s, brokerSynonyms(s), isSensitive)) .getOrElse(List.empty) if (!topicProps.containsKey(name)) list else new DescribeConfigsResponse.ConfigSynonym(name, valueAsString, ConfigSource.TOPIC_CONFIG) +: list } val source = if (allSynonyms.isEmpty) ConfigSource.DEFAULT_CONFIG else allSynonyms.head.source val synonyms = if (!includeSynonyms) List.empty else allSynonyms new DescribeConfigsResponse.ConfigEntry(name, valueAsString, source, isSensitive, false, synonyms.asJava) } private def createBrokerConfigEntry(perBrokerConfig: Boolean, includeSynonyms: Boolean) (name: String, value: Any): DescribeConfigsResponse.ConfigEntry = { val allNames = brokerSynonyms(name) val configEntryType = configType(name, allNames) // If we can't determine the config entry type, treat it as a sensitive config to be safe val isSensitive = configEntryType == ConfigDef.Type.PASSWORD || configEntryType == null val valueAsString = if (isSensitive) null else value match { case v: String => v case _ => ConfigDef.convertToString(value, configEntryType) } val allSynonyms = configSynonyms(name, allNames, isSensitive) .filter(perBrokerConfig || _.source == ConfigSource.DYNAMIC_DEFAULT_BROKER_CONFIG) val synonyms = if (!includeSynonyms) List.empty else allSynonyms val source = if (allSynonyms.isEmpty) ConfigSource.DEFAULT_CONFIG else allSynonyms.head.source val readOnly = !DynamicBrokerConfig.AllDynamicConfigs.contains(name) new DescribeConfigsResponse.ConfigEntry(name, valueAsString, source, isSensitive, readOnly, synonyms.asJava) } }
noslowerdna/kafka
core/src/main/scala/kafka/server/AdminManager.scala
Scala
apache-2.0
35,388
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.mllib.linalg import breeze.linalg.{DenseMatrix => BDM, DenseVector => BDV} import com.github.fommil.netlib.ARPACK import org.netlib.util.{doubleW, intW} /** * Compute eigen-decomposition. */ private[mllib] object EigenValueDecomposition { /** * Compute the leading k eigenvalues and eigenvectors on a symmetric square matrix using ARPACK. * The caller needs to ensure that the input matrix is real symmetric. This function requires * memory for `n*(4*k+4)` doubles. * * @param mul a function that multiplies the symmetric matrix with a DenseVector. * @param n dimension of the square matrix (maximum Int.MaxValue). * @param k number of leading eigenvalues required, where k must be positive and less than n. * @param tol tolerance of the eigs computation. * @param maxIterations the maximum number of Arnoldi update iterations. * @return a dense vector of eigenvalues in descending order and a dense matrix of eigenvectors * (columns of the matrix). * @note The number of computed eigenvalues might be smaller than k when some Ritz values do not * satisfy the convergence criterion specified by tol (see ARPACK Users Guide, Chapter 4.6 * for more details). The maximum number of Arnoldi update iterations is set to 300 in this * function. */ def symmetricEigs( mul: BDV[Double] => BDV[Double], n: Int, k: Int, tol: Double, maxIterations: Int): (BDV[Double], BDM[Double]) = { // TODO: remove this function and use eigs in breeze when switching breeze version require(n > k, s"Number of required eigenvalues $k must be smaller than matrix dimension $n") val arpack = ARPACK.getInstance() // tolerance used in stopping criterion val tolW = new doubleW(tol) // number of desired eigenvalues, 0 < nev < n val nev = new intW(k) // nev Lanczos vectors are generated in the first iteration // ncv-nev Lanczos vectors are generated in each subsequent iteration // ncv must be smaller than n val ncv = math.min(2 * k, n) // "I" for standard eigenvalue problem, "G" for generalized eigenvalue problem val bmat = "I" // "LM" : compute the NEV largest (in magnitude) eigenvalues val which = "LM" val iparam = new Array[Int](11) // use exact shift in each iteration iparam(0) = 1 // maximum number of Arnoldi update iterations, or the actual number of iterations on output iparam(2) = maxIterations // Mode 1: A*x = lambda*x, A symmetric iparam(6) = 1 require(n * ncv.toLong <= Integer.MAX_VALUE && ncv * (ncv.toLong + 8) <= Integer.MAX_VALUE, s"k = $k and/or n = $n are too large to compute an eigendecomposition") val ido = new intW(0) val info = new intW(0) val resid = new Array[Double](n) val v = new Array[Double](n * ncv) val workd = new Array[Double](n * 3) val workl = new Array[Double](ncv * (ncv + 8)) val ipntr = new Array[Int](11) // call ARPACK's reverse communication, first iteration with ido = 0 arpack.dsaupd(ido, bmat, n, which, nev.`val`, tolW, resid, ncv, v, n, iparam, ipntr, workd, workl, workl.length, info) val w = BDV(workd) // ido = 99 : done flag in reverse communication while (ido.`val` != 99) { if (ido.`val` != -1 && ido.`val` != 1) { throw new IllegalStateException("ARPACK returns ido = " + ido.`val` + " This flag is not compatible with Mode 1: A*x = lambda*x, A symmetric.") } // multiply working vector with the matrix val inputOffset = ipntr(0) - 1 val outputOffset = ipntr(1) - 1 val x = w.slice(inputOffset, inputOffset + n) val y = w.slice(outputOffset, outputOffset + n) y := mul(x) // call ARPACK's reverse communication arpack.dsaupd(ido, bmat, n, which, nev.`val`, tolW, resid, ncv, v, n, iparam, ipntr, workd, workl, workl.length, info) } if (info.`val` != 0) { info.`val` match { case 1 => throw new IllegalStateException("ARPACK returns non-zero info = " + info.`val` + " Maximum number of iterations taken. (Refer ARPACK user guide for details)") case 3 => throw new IllegalStateException("ARPACK returns non-zero info = " + info.`val` + " No shifts could be applied. Try to increase NCV. " + "(Refer ARPACK user guide for details)") case _ => throw new IllegalStateException("ARPACK returns non-zero info = " + info.`val` + " Please refer ARPACK user guide for error message.") } } val d = new Array[Double](nev.`val`) val select = new Array[Boolean](ncv) // copy the Ritz vectors val z = java.util.Arrays.copyOfRange(v, 0, nev.`val` * n) // call ARPACK's post-processing for eigenvectors arpack.dseupd(true, "A", select, d, z, n, 0.0, bmat, n, which, nev, tol, resid, ncv, v, n, iparam, ipntr, workd, workl, workl.length, info) // number of computed eigenvalues, might be smaller than k val computed = iparam(4) val eigenPairs = java.util.Arrays.copyOfRange(d, 0, computed).zipWithIndex.map { r => (r._1, java.util.Arrays.copyOfRange(z, r._2 * n, r._2 * n + n)) } // sort the eigen-pairs in descending order val sortedEigenPairs = eigenPairs.sortBy(- _._1) // copy eigenvectors in descending order of eigenvalues val sortedU = BDM.zeros[Double](n, computed) sortedEigenPairs.zipWithIndex.foreach { r => val b = r._2 * n var i = 0 while (i < n) { sortedU.data(b + i) = r._1._2(i) i += 1 } } (BDV[Double](sortedEigenPairs.map(_._1)), sortedU) } }
witgo/spark
mllib/src/main/scala/org/apache/spark/mllib/linalg/EigenValueDecomposition.scala
Scala
apache-2.0
6,496
package com.pharmpress.dojo.currency import java.util.Locale class CurrencyConverter(val locale: Locale, val base: Currency, val converter: ConverterMethod) object CurrencyConverter { def apply(base: Currency, rates: (Currency, AmountType)*): ConverterType = { new CurrencyConverter( locale = Locale.getDefault, base = base, converter = rates.flatMap { case (currency, rate) => Seq( (base -> currency) -> rate, (currency -> base) -> 1.0 / rate ) }.toMap ) } }
pharmpress/codingdojo
currency/src/main/scala/com/pharmpress/dojo/currency/CurrencyConverter.scala
Scala
apache-2.0
535
package scala.collection import org.junit.Assert._ import org.junit.Test class MapTest { @deprecated("Tests deprecated API", since="2.13") @Test def test(): Unit = { val map = collection.Map( 1 -> 1, 2 -> 2, 4 -> 4, 5 -> 5 ) val actual = map -- List(1, 2, 3) val expected = collection.Map( 4 -> 4, 5 -> 5 ) assertEquals(expected, actual) } @Test def mkString(): Unit = { assert(Map().mkString == "") assert(Map(1 -> 1).mkString(",") == "1 -> 1") assert(Map(1 -> 1, 2 -> 2).mkString(",") == "1 -> 1,2 -> 2") } @Test def addString(): Unit = { assert(Map().addString(new StringBuilder).toString == "") assert(Map(1 -> 1).addString(new StringBuilder).toString == "1 -> 1") assert(Map(1 -> 1, 2 -> 2).mkString("foo [", ", ", "] bar").toString == "foo [1 -> 1, 2 -> 2] bar") } @deprecated("Tests deprecated API", since="2.13") @Test def t11188(): Unit = { import scala.collection.immutable.ListMap val m = ListMap(1 -> "one") val mm = Map(2 -> "two") ++: m assert(mm.isInstanceOf[ListMap[Int,String]]) assertEquals(mm.mkString("[", ", ", "]"), "[2 -> two, 1 -> one]") } @deprecated("Tests deprecated API", since="2.13") @Test def deprecatedPPE(): Unit = { val m = (1 to 10).map(x => (x, x)).toMap val m1 = m ++: m assertEquals(m.toList.sorted, (m1: Map[Int, Int]).toList.sorted) val s1: Iterable[Any] = List(1) ++: m assertEquals(1 :: m.toList.sorted, s1.toList.sortBy { case (x: Int, _) => x ; case x: Int => x }) } @Test def flatMapOption(): Unit = { def f(p: (Int, Int)) = if (p._1 < p._2) Some((p._1, p._2)) else None val m = (1 to 10).zip(11 to 20).toMap val m2 = m.flatMap(f) (m2: Map[Int, Int]).head val m3 = m.flatMap(p => Some(p)) (m3: Map[Int, Int]).head val m4 = m.flatMap(_ => Some(3)) (m4: Iterable[Int]).head } @deprecated("Tests deprecated API", since="2.13") @Test def t11589(): Unit = { // tests the strictness of Map#values def check(m: collection.Map[Int, Int]): Unit = { def checkImmutable[K, V](m: immutable.Map[Int, Int]): Unit = { var i = 0 m.withDefault(_ => -1).values.map{v => i = 1; v} assertEquals(1, i) i = 0 m.withDefaultValue(-1).values.map{v => i = 1; v} assertEquals(1, i) } var i = 0 m.values.map{v => i = 1; v} assertEquals(1, i) m match { case im: immutable.Map[Int, Int] => checkImmutable(im) case _ => () } } check(collection.Map(1 -> 1)) check(immutable.Map(1 -> 1)) check(mutable.Map(1 -> 1)) check(collection.SortedMap(1 -> 1)) check(immutable.SortedMap(1 -> 1)) check(mutable.SortedMap(1 -> 1)) check(immutable.HashMap(1 -> 1)) check(mutable.HashMap(1 -> 1)) check(immutable.TreeMap(1 -> 1)) check(mutable.TreeMap(1 -> 1)) check(immutable.SeqMap(1 -> 1)) check(mutable.SeqMap(1 -> 1)) check(immutable.ListMap(1 -> 1)) check(mutable.ListMap(1 -> 1)) check(immutable.VectorMap(1 -> 1)) check(immutable.TreeSeqMap(1 -> 1)) check(mutable.LinkedHashMap(1 -> 1)) check(mutable.OpenHashMap(1 -> 1)) check(mutable.CollisionProofHashMap(1 -> 1)) } @Test def t12228(): Unit = { assertFalse(Set("") == immutable.BitSet(1)) assertFalse(Map("" -> 2) == scala.collection.immutable.LongMap(1L -> 2)) } }
scala/scala
test/junit/scala/collection/MapTest.scala
Scala
apache-2.0
3,480
package com.typesafe.slick.testkit.tests import com.typesafe.slick.testkit.util.{AsyncTest, JdbcTestDB} class UnionAdditionalTest extends AsyncTest[JdbcTestDB] { import tdb.profile.api._ class Managers(tag: Tag) extends Table[(Int, String, String)](tag, "managers") { def id = column[Int]("id") def name = column[String]("name") def department = column[String]("department") def * = (id, name, department) } lazy val managers = TableQuery[Managers] class Employees(tag: Tag) extends Table[(Int, String, Int)](tag, "employees") { def id = column[Int]("id") def name = column[String]("name2") def manager = column[Int]("manager") def * = (id, name, manager) // A convenience method for selecting employees by department def departmentIs(dept: String) = manager in managers.filter(_.department === dept).map(_.id) } lazy val employees = TableQuery[Employees] def testLimitWithUnion = { val q1 = for (m <- managers drop 1 take 2) yield (m.id, m.name) val q2 = for (e <- employees drop 1 take 3) yield (e.id, e.name) val q3 = (q1 union q2) sortBy (_._2) (for { _ <- (managers.schema ++ employees.schema).create _ <- managers ++= Seq((1, "Peter", "HR"), (2, "Amy", "IT"), (3, "Steve", "IT")) _ <- employees ++= Seq((4, "Leonard", 2), (5, "Jennifer", 1), (6, "Tom", 1), (7, "Ben", 1), (8, "Greg", 3)) _ <- mark("q1", q1.result).map(r => r.toSet shouldBe Set((2, "Amy"), (3, "Steve"))) _ <- mark("q2", q2.result).map(r => r.toSet shouldBe Set((5, "Jennifer"), (6, "Tom"), (7, "Ben"))) _ <- mark("q3", q3.result).map(_ shouldBe Vector((2, "Amy"), (7, "Ben"), (5, "Jennifer"), (3, "Steve"), (6, "Tom"))) } yield ()) andFinally (managers.schema ++ employees.schema).drop } def testOrderByWithUnion = { val q1 = for (m <- managers sortBy (_.name)) yield (m.id, m.name) val q2 = for (e <- employees sortBy (_.name)) yield (e.id, e.name) val q3 = (q1 ++ q2) sortBy (_._2) (for { _ <- (managers.schema ++ employees.schema).create _ <- managers ++= Seq((1, "Peter", "HR"), (2, "Amy", "IT"), (3, "Steve", "IT")) _ <- employees ++= Seq((4, "Leonard", 2), (5, "Jennifer", 1), (6, "Tom", 1), (7, "Ben", 1), (8, "Greg", 3)) _ <- mark("q1", q1.result).map(r => r.toSet shouldBe Set((2, "Amy"), (1, "Peter"), (3, "Steve"))) _ <- mark("q2", q2.result).map(r => r.toSet shouldBe Set((7, "Ben"), (8, "Greg"), (5, "Jennifer"), (4, "Leonard"), (6, "Tom"))) _ <- mark("q3", q3.result).map(_ shouldBe Vector((2, "Amy"), (7, "Ben"), (8, "Greg"), (5, "Jennifer"), (4, "Leonard"), (1, "Peter"), (3, "Steve"), (6, "Tom") )) } yield ()) andFinally (managers.schema ++ employees.schema).drop } }
nafg/slick
slick-testkit/src/main/scala/com/typesafe/slick/testkit/tests/UnionAdditionalTest.scala
Scala
bsd-2-clause
2,764
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * */ package io.github.tailhq.dynaml.models.lm import breeze.linalg.DenseVector import breeze.numerics._ import breeze.stats.distributions.Gaussian import io.github.tailhq.dynaml.optimization._ /** * @author tailhq date: 31/3/16. * * Logistic model for binary classification. * @param data The training data as a stream of tuples * @param numPoints The number of training data points * @param map The basis functions used to map the input * features to a possible higher dimensional space */ class LogisticGLM(data: Stream[(DenseVector[Double], Double)], numPoints: Int, map: (DenseVector[Double]) => DenseVector[Double] = identity[DenseVector[Double]]) extends GeneralizedLinearModel[ Stream[(DenseVector[Double], Double)] ](data, numPoints, map) { override val h: (Double) => Double = (x) => sigmoid(x) override val task = "classification" override protected val optimizer: RegularizedOptimizer[DenseVector[Double], DenseVector[Double], Double, Stream[(DenseVector[Double], Double)]] = new GradientDescent(new LogisticGradient, new SquaredL2Updater) override def prepareData(d: Stream[(DenseVector[Double], Double)]) = d.map(point => (featureMap(point._1), point._2)) } /** * Probit model for binary classification. * @param data The training data as a stream of tuples * @param numPoints The number of training data points * @param map The basis functions used to map the input * features to a possible higher dimensional space */ class ProbitGLM(data: Stream[(DenseVector[Double], Double)], numPoints: Int, map: (DenseVector[Double]) => DenseVector[Double] = identity[DenseVector[Double]]) extends LogisticGLM(data, numPoints, map) { private val standardGaussian = new Gaussian(0, 1.0) override val h = (x: Double) => standardGaussian.cdf(x) override protected val optimizer = new GradientDescent( new ProbitGradient, new SquaredL2Updater) }
mandar2812/DynaML
dynaml-core/src/main/scala/io/github/tailhq/dynaml/models/lm/LogisticGLM.scala
Scala
apache-2.0
2,830
package ch.bsisa.hyperbird import play.api.Play /** * Api configuration object. * * Provides JavaScript client with dynamic configurations * * @author Patrick Refondini */ class ApiConfig { /** * Used by Restangular to configure required baseUrl. */ val baseUrl: String = Play.current.configuration.getString(ApiConfig.BaseUrlKey) match { case Some(baseUrlValue) => baseUrlValue case None => throw ApiConfigException(s"ApiConfig base URL information ${ApiConfig.BaseUrlKey} missing") } /** * Used for ELFIN Annexes flat file storage */ val annexesRootFolder: String = Play.current.configuration.getString(ApiConfig.AnnexesRootFolderKey) match { case Some(annexesRootFolderValue) => annexesRootFolderValue case None => throw ApiConfigException(s"ApiConfig annexes root folder information ${ApiConfig.AnnexesRootFolderKey} missing") } /** * Used for ELFIN Annexes flat file temporary upload */ val temporaryUploadFolder: String = Play.current.configuration.getString(ApiConfig.TemporaryUploadFolderKey) match { case Some(temporaryUploadFolderValue) => temporaryUploadFolderValue case None => throw ApiConfigException(s"ApiConfig temporary upload folder information ${ApiConfig.TemporaryUploadFolderKey} missing") } /** * Used by Angular logProvider service to enable or disable debug log. */ val clientDebugEnabled: Boolean = Play.current.configuration.getBoolean(ApiConfig.ClientDebugEnabledUrlKey) match { case Some(clientDebugEnabledValue) => clientDebugEnabledValue case None => throw ApiConfigException(s"ApiConfig client debug enabled information ${ApiConfig.ClientDebugEnabledUrlKey} missing") } /** * Used by Api service to enable or disable queries cache feature. */ val queryCacheEnabled: Boolean = Play.current.configuration.getBoolean(ApiConfig.QueryCacheEnabledUrlKey) match { case Some(queryCacheEnabledValue) => queryCacheEnabledValue case None => false // This property is optional, fallback to false without requiring configuration. } /** * Used by Api service to enable or disable data manager based security feature. */ val dataManagerSecurityEnabled: Boolean = Play.current.configuration.getBoolean(ApiConfig.DataManagerSecurityEnabledUrlKey) match { case Some(dataManagerSecurityEnabledValue) => dataManagerSecurityEnabledValue case None => false // This property is optional, fallback to false without requiring configuration. } /** * Used by Api service to enable or disable serverSideNotification service. Currently used to provide automatic offline detection feature. */ val serverSideNotificationEnabled: Option[Int] = Play.current.configuration.getInt(ApiConfig.ServerSideNotificationEnabledUrlKey) /** * Used by Api service to enable or disable services related to orders statistics module, such as orders id service (`OrdersIdActor`). */ val ordersStatiticsModuleEnabled: Option[Boolean] = Play.current.configuration.getBoolean(ApiConfig.OrdersStatiticsModuleEnabledKey) /** * Used for configuring `hbGeoApiUrl` in conf.js to provide client side restangular dynamic configuration to access hb-geo-api service. */ val HbGeoApiProxyUrl : Option[String] = Play.current.configuration.getString(ApiConfig.HbGeoApiProxyUrlKey) /** * Used for configuring `protocol` while forwarding requests to hb-geo-api service. */ val hbGeoApiProtocol : Option[String] = Play.current.configuration.getString(ApiConfig.HbGeoApiProtocolKey) /** * Used for configuring `host` while forwarding requests to hb-geo-api service. */ val hbGeoApiHost : Option[String] = Play.current.configuration.getString(ApiConfig.HbGeoApiHostKey) /** * Used for configuring `port` while forwarding requests to hb-geo-api service. */ val hbGeoApiPort : Option[String] = Play.current.configuration.getString(ApiConfig.HbGeoApiPortKey) } /** * ApiConfig exception class */ case class ApiConfigException(message: String = null, cause: Throwable = null) extends Exception(message, cause) /** * Companion object containing constants */ object ApiConfig { private val TemporaryUploadFolderKey = "hb.api.temporaryUploadFolder" private val AnnexesRootFolderKey = "hb.api.annexesRootFolder" private val BaseUrlKey = "hb.api.baseUrl" private val ClientDebugEnabledUrlKey = "hb.api.clientDebugEnabled" private val QueryCacheEnabledUrlKey = "hb.api.queryCacheEnabled" private val DataManagerSecurityEnabledUrlKey = "hb.api.dataManagerSecurityEnabled" private val ServerSideNotificationEnabledUrlKey = "hb.api.serverSideNotificationEnabled" private val OrdersStatiticsModuleEnabledKey = "hb.modules.ordersStatistics.enabled" private val HbGeoApiProxyUrlKey = "hb.modules.hbGeoApi.proxyBaseUrl" private val HbGeoApiProtocolKey = "hb.modules.hbGeoApi.protocol" private val HbGeoApiHostKey = "hb.modules.hbGeoApi.host" private val HbGeoApiPortKey = "hb.modules.hbGeoApi.port" }
bsisa/hb-api
app/ch/bsisa/hyperbird/ApiConfig.scala
Scala
gpl-2.0
5,009
/* ** Copyright [2013-2016] [Megam Systems] ** ** Licensed under the Apache License, Version 2.0 (the "License"); ** you may not use this file except in compliance with the License. ** You may obtain a copy of the License at ** ** http://www.apache.org/licenses/LICENSE-2.0 ** ** Unless required by applicable law or agreed to in writing, software ** distributed under the License is distributed on an "AS IS" BASIS, ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ** See the License for the specific language governing permissions and ** limitations under the License. */ package models.json.analytics import scalaz._ import scalaz.NonEmptyList._ import Scalaz._ import net.liftweb.json._ import net.liftweb.json.scalaz.JsonScalaz._ import models.analytics._ import java.nio.charset.Charset import models.analytics.{YonpiConnectorsList, YonpiConnector} /** * @author ranjitha * */ object YonpiConnectorsListSerialization extends io.megam.json.SerializationBase[YonpiConnectorsList] { implicit override val writer = new JSONW[YonpiConnectorsList] { override def write(h: YonpiConnectorsList): JValue = { val nrsList: Option[List[JValue]] = h.map { nrOpt: YonpiConnector => nrOpt.toJValue }.some JArray(nrsList.getOrElse(List.empty[JValue])) } } implicit override val reader = new JSONR[YonpiConnectorsList] { override def read(json: JValue): Result[YonpiConnectorsList] = { json match { case JArray(jObjectList) => { val list = jObjectList.flatMap { jValue: JValue => YonpiConnector.fromJValue(jValue) match { case Success(nr) => List(nr) case Failure(fail) => List[YonpiConnector]() } }.some val nrs: YonpiConnectorsList = YonpiConnectorsList(list.getOrElse(YonpiConnectorsList.empty)) nrs.successNel[Error] } case j => UnexpectedJSONError(j, classOf[JArray]).failureNel[YonpiConnectorsList] } } } }
meglytics/bidi
app/models/json/analytics/YonpiConnectorsListSerialization.scala
Scala
mit
2,016
package collectionjson import java.net.{URISyntaxException, URI} import jsonpicklers._ import Picklers._ object CollectionJson { case class CollectionJson(version: String, href: URI, links: List[Link], items: List[Item], queries: List[Query], template: Option[Template], error: Option[Error]) case class Error(title: Option[String], code: Option[String], message: Option[String]) case class Template(data: List[Data]) case class Item(href: URI, data: List[Data], links: List[Link]) case class Data(name: Option[String], value: Option[String], prompt: Option[String]) case class Query(href: URI, rel: String, name: Option[String], prompt: Option[String], data: List[Data]) case class Link(href: URI, rel: String, name: Option[String], render: Option[String], prompt: Option[String]) lazy val collection = "collection" :: { version ~ href ~ links.?(Nil) ~ items.?(Nil) ~ queries.?(Nil) ~ template.? ~ error.? }.wrap(CollectionJson)(CollectionJson.unapply(_).get) lazy val error = "error" :: { title.? ~ code.? ~ message.? }.wrap(Error)(Error.unapply(_).get) lazy val template = "template" :: data.wrap(Template)(Template.unapply(_).get) lazy val items = "items" :: array({ href ~ data.?(Nil) ~ links.?(Nil) }.wrap(Item)(Item.unapply(_).get)) lazy val data = "data" :: array({ name.? ~ value.? ~ prompt.? }.wrap(Data)(Data.unapply(_).get)) lazy val queries = "queries" :: array({ href ~ rel ~ name.? ~ prompt.? ~ data.?(Nil) }.wrap(Query)(Query.unapply(_).get)) lazy val links = "links" :: array({ href ~ rel ~ name.? ~ render.? ~ prompt.? }.wrap(Link)(Link.unapply(_).get)) lazy val code = "code" :: string lazy val href = "href" :: uri lazy val message = "message" :: string lazy val name = "name" :: string lazy val prompt = "prompt" :: string lazy val rel = "rel" :: string lazy val render = "render" :: string lazy val title = "title" :: string lazy val value = "value" :: string lazy val version = "version" :: string lazy val uri = string.trying(a => Parsers.success(new URI(a)))(u => Some(u.toString)) }
teigen/jsonpicklers
src/test/scala/collectionjson/CollectionJson.scala
Scala
apache-2.0
2,307
package com.socrata.querycoordinator.caching.cache.config import java.io.File import com.socrata.thirdparty.typesafeconfig.ConfigClass import com.typesafe.config.Config class CacheConfig(config: Config, root: String) extends ConfigClass(config, root) { val realConfig: CacheImplConfig = optionally(getString("type")) match { case Some("filesystem") => new FilesystemCacheConfig(config, root) case Some("postgresql") => new PostgresqlCacheConfig(config, root) case Some("noop") => NoopCacheConfig case Some(other) => sys.error("Unknown cache configuration type " + other) case None => NoopCacheConfig } val rowsPerWindow = getInt("rows-per-window") val maxWindows = getInt("max-windows") val cleanInterval = getDuration("clean-interval") val minQueryTime = getDuration("min-query-time").toMillis } sealed trait CacheImplConfig sealed trait ConcreteCacheImplConfig extends CacheImplConfig { this: ConfigClass => val atimeUpdateInterval = getDuration("atime-update-interval") val survivorCutoff = getDuration("survivor-cutoff") val assumeDeadCreateCutoff = getDuration("assume-dead-create-cutoff") } object NoopCacheConfig extends CacheImplConfig class FilesystemCacheConfig(config: Config, rootPath: String) extends ConfigClass(config, rootPath) with ConcreteCacheImplConfig { val root = new File(getString("root")) } class PostgresqlCacheConfig(config: Config, root: String) extends ConfigClass(config, root) with ConcreteCacheImplConfig { val deleteDelay = getDuration("delete-delay") val deleteChunkSize = getInt("delete-chunk-size") val db = getConfig("database", new DatabaseConfig(_, _)) } class DatabaseConfig(config: Config, root: String) extends ConfigClass(config, root) { val host = getString("host") val port = getInt("port") val database = getString("database") val username = getString("username") val password = getString("password") val minPoolSize = getInt("min-pool-size") val maxPoolSize = getInt("max-pool-size") }
socrata-platform/query-coordinator
query-coordinator/src/main/scala/com/socrata/querycoordinator/caching/cache/config/CacheConfig.scala
Scala
apache-2.0
2,019
package glasskey.spray.resource import glasskey.model.{ValidatedToken, ValidatedData, OAuthAccessToken} import glasskey.resource.{ProtectedResourceHandler, ProtectedResource, OIDCTokenData} trait DefaultAccessTokenAuthenticator { import glasskey.model.ValidatedData import scala.concurrent.{ExecutionContext, Future} import scala.language.implicitConversions val resource = ProtectedResource.apply val handler: ProtectedResourceHandler[ValidatedData, ValidatedToken] def attemptAuth(accessToken: (OAuthAccessToken, Option[OIDCTokenData]))(implicit ec: ExecutionContext): Future[Option[ValidatedData]] = resource.handleRequest[(OAuthAccessToken, Option[OIDCTokenData])](accessToken, handler).flatMap { authInfo: ValidatedData => Future.successful(Some(authInfo)) } def validate(implicit ec: ExecutionContext): AccessTokenAuthenticator = (accessToken) => attemptAuth(accessToken) flatMap Future.successful } object DefaultAccessTokenAuthenticator { import glasskey.resource.ProtectedResourceHandler def apply(resourceHandler: ProtectedResourceHandler[ValidatedData, ValidatedToken]): DefaultAccessTokenAuthenticator = new DefaultAccessTokenAuthenticator { override val handler: ProtectedResourceHandler[ValidatedData, ValidatedToken] = resourceHandler } }
MonsantoCo/glass-key
glass-key-spray/src/main/scala/glasskey/spray/resource/DefaultAccessTokenAuthenticator.scala
Scala
bsd-3-clause
1,306
package com.btcontract.wallet import fr.acinq.bitcoin.{ByteVector32, Crypto} import androidx.test.ext.junit.runners.AndroidJUnit4 import immortan.LightningNodeKeys import org.junit.runner.RunWith import scodec.bits.ByteVector import org.junit.Test @RunWith(classOf[AndroidJUnit4]) class LnUrlAuthSpec { @Test def backwardCompatWithBLWv1: Unit = { val refSeed = ByteVector.fromValidHex("1b51da6b34675f0aa4bb0b5f0af193ef75294cf6a28d1b2541304e486b76b2538bd680b66f618faf0e9cd4fd98823b5439fc89c242102387eb1bc5e29a052007") val keys = LightningNodeKeys.makeFromSeed(refSeed.toArray) val refSite = "www.site.com" val linkingKey = keys.makeLinkingKey(refSite) val refLinkingKeyFormat = ByteVector.fromValidHex("4116700cd356bcc94f2de10b328a41e2e2932152ec7ba720e6d12b4f54eee134") assert(linkingKey.value.bytes == refLinkingKeyFormat) val refK1 = ByteVector32.fromValidHex("7f9d3443b2ece9485a7a8d98df6a79fd269ce938880266769bfb4ba511ff8761") val refDerSignature = ByteVector.fromValidHex("304402205757f98d52e0d3b72d4be1a46d7d62c0f7d8caf0987707352089d8ec29b8f238022019c7f508c1606234d7688116a14d812bc5e8d699c93f5b7cfa1de17b1ef523e1") assert(Crypto.compact2der(Crypto.sign(refK1, linkingKey)) == refDerSignature) } }
btcontract/wallet
app/src/androidTest/java/com/btcontract/wallet/LnUrlAuthSpec.scala
Scala
apache-2.0
1,249
package org.jetbrains.bsp.settings import com.intellij.openapi.fileChooser.FileChooserDescriptorFactory class BspSystemSettingsPane extends BspSystemSettingsForm { def setPathListeners() = { bloopExecutablePath.addBrowseFolderListener("Bloop executable", "select bloop executable", null, FileChooserDescriptorFactory.createSingleLocalFileDescriptor) } }
jastice/intellij-scala
bsp/src/org/jetbrains/bsp/settings/BspSystemSettingsPane.scala
Scala
apache-2.0
366
/* Copyright 2013 Twitter, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.twitter import org.apache.hadoop.fs.{ Path, PathFilter } package object scalding { /** * The objects for the Typed-API live in the scalding.typed package * but are aliased here. */ val TDsl = com.twitter.scalding.typed.TDsl val TypedPipe = com.twitter.scalding.typed.TypedPipe type TypedPipe[+T] = com.twitter.scalding.typed.TypedPipe[T] type TypedSink[-T] = com.twitter.scalding.typed.TypedSink[T] type TypedSource[+T] = com.twitter.scalding.typed.TypedSource[T] type KeyedList[K, +V] = com.twitter.scalding.typed.KeyedList[K, V] type ValuePipe[+T] = com.twitter.scalding.typed.ValuePipe[T] type Grouped[K, +V] = com.twitter.scalding.typed.Grouped[K, V] /** * Make sure this is in sync with version.sbt */ val scaldingVersion: String = "0.11.1" object RichPathFilter { implicit def toRichPathFilter(f: PathFilter) = new RichPathFilter(f) } class RichPathFilter(f: PathFilter) { def and(filters: PathFilter*): PathFilter = { new AndPathFilter(Seq(f) ++ filters) } def or(filters: PathFilter*): PathFilter = { new OrPathFilter(Seq(f) ++ filters) } def not: PathFilter = { new NotPathFilter(f) } } private[this] class AndPathFilter(filters: Seq[PathFilter]) extends PathFilter { override def accept(p: Path): Boolean = { filters.forall(_.accept(p)) } } private[this] class OrPathFilter(filters: Seq[PathFilter]) extends PathFilter { override def accept(p: Path): Boolean = { filters.exists(_.accept(p)) } } private[this] class NotPathFilter(filter: PathFilter) extends PathFilter { override def accept(p: Path): Boolean = { !filter.accept(p) } } }
lucamilanesio/scalding
scalding-core/src/main/scala/com/twitter/package.scala
Scala
apache-2.0
2,270